Index: vendor/clang/dist/lib/Basic/Targets.cpp =================================================================== --- vendor/clang/dist/lib/Basic/Targets.cpp (revision 292727) +++ vendor/clang/dist/lib/Basic/Targets.cpp (revision 292728) @@ -1,7443 +1,7449 @@ //===--- Targets.cpp - Implement -arch option and targets -----------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements construction of a TargetInfo object from a // target triple. // //===----------------------------------------------------------------------===// #include "clang/Basic/TargetInfo.h" #include "clang/Basic/Builtins.h" #include "clang/Basic/Diagnostic.h" #include "clang/Basic/LangOptions.h" #include "clang/Basic/MacroBuilder.h" #include "clang/Basic/TargetBuiltins.h" #include "clang/Basic/TargetOptions.h" #include "llvm/ADT/APFloat.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/StringSwitch.h" #include "llvm/ADT/Triple.h" #include "llvm/MC/MCSectionMachO.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/TargetParser.h" #include #include using namespace clang; //===----------------------------------------------------------------------===// // Common code shared among targets. //===----------------------------------------------------------------------===// /// DefineStd - Define a macro name and standard variants. For example if /// MacroName is "unix", then this will define "__unix", "__unix__", and "unix" /// when in GNU mode. static void DefineStd(MacroBuilder &Builder, StringRef MacroName, const LangOptions &Opts) { assert(MacroName[0] != '_' && "Identifier should be in the user's namespace"); // If in GNU mode (e.g. -std=gnu99 but not -std=c99) define the raw identifier // in the user's namespace. if (Opts.GNUMode) Builder.defineMacro(MacroName); // Define __unix. Builder.defineMacro("__" + MacroName); // Define __unix__. Builder.defineMacro("__" + MacroName + "__"); } static void defineCPUMacros(MacroBuilder &Builder, StringRef CPUName, bool Tuning = true) { Builder.defineMacro("__" + CPUName); Builder.defineMacro("__" + CPUName + "__"); if (Tuning) Builder.defineMacro("__tune_" + CPUName + "__"); } //===----------------------------------------------------------------------===// // Defines specific to certain operating systems. //===----------------------------------------------------------------------===// namespace { template class OSTargetInfo : public TgtInfo { protected: virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, MacroBuilder &Builder) const=0; public: OSTargetInfo(const llvm::Triple &Triple) : TgtInfo(Triple) {} void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { TgtInfo::getTargetDefines(Opts, Builder); getOSDefines(Opts, TgtInfo::getTriple(), Builder); } }; } // end anonymous namespace static void getDarwinDefines(MacroBuilder &Builder, const LangOptions &Opts, const llvm::Triple &Triple, StringRef &PlatformName, VersionTuple &PlatformMinVersion) { Builder.defineMacro("__APPLE_CC__", "6000"); Builder.defineMacro("__APPLE__"); Builder.defineMacro("OBJC_NEW_PROPERTIES"); // AddressSanitizer doesn't play well with source fortification, which is on // by default on Darwin. if (Opts.Sanitize.has(SanitizerKind::Address)) Builder.defineMacro("_FORTIFY_SOURCE", "0"); if (!Opts.ObjCAutoRefCount) { // __weak is always defined, for use in blocks and with objc pointers. Builder.defineMacro("__weak", "__attribute__((objc_gc(weak)))"); // Darwin defines __strong even in C mode (just to nothing). if (Opts.getGC() != LangOptions::NonGC) Builder.defineMacro("__strong", "__attribute__((objc_gc(strong)))"); else Builder.defineMacro("__strong", ""); // __unsafe_unretained is defined to nothing in non-ARC mode. We even // allow this in C, since one might have block pointers in structs that // are used in pure C code and in Objective-C ARC. Builder.defineMacro("__unsafe_unretained", ""); } if (Opts.Static) Builder.defineMacro("__STATIC__"); else Builder.defineMacro("__DYNAMIC__"); if (Opts.POSIXThreads) Builder.defineMacro("_REENTRANT"); // Get the platform type and version number from the triple. unsigned Maj, Min, Rev; if (Triple.isMacOSX()) { Triple.getMacOSXVersion(Maj, Min, Rev); PlatformName = "macosx"; } else { Triple.getOSVersion(Maj, Min, Rev); PlatformName = llvm::Triple::getOSTypeName(Triple.getOS()); } // If -target arch-pc-win32-macho option specified, we're // generating code for Win32 ABI. No need to emit // __ENVIRONMENT_XX_OS_VERSION_MIN_REQUIRED__. if (PlatformName == "win32") { PlatformMinVersion = VersionTuple(Maj, Min, Rev); return; } // Set the appropriate OS version define. if (Triple.isiOS()) { assert(Maj < 10 && Min < 100 && Rev < 100 && "Invalid version!"); char Str[6]; Str[0] = '0' + Maj; Str[1] = '0' + (Min / 10); Str[2] = '0' + (Min % 10); Str[3] = '0' + (Rev / 10); Str[4] = '0' + (Rev % 10); Str[5] = '\0'; Builder.defineMacro("__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__", Str); } else if (Triple.isMacOSX()) { // Note that the Driver allows versions which aren't representable in the // define (because we only get a single digit for the minor and micro // revision numbers). So, we limit them to the maximum representable // version. assert(Maj < 100 && Min < 100 && Rev < 100 && "Invalid version!"); char Str[7]; if (Maj < 10 || (Maj == 10 && Min < 10)) { Str[0] = '0' + (Maj / 10); Str[1] = '0' + (Maj % 10); Str[2] = '0' + std::min(Min, 9U); Str[3] = '0' + std::min(Rev, 9U); Str[4] = '\0'; } else { // Handle versions > 10.9. Str[0] = '0' + (Maj / 10); Str[1] = '0' + (Maj % 10); Str[2] = '0' + (Min / 10); Str[3] = '0' + (Min % 10); Str[4] = '0' + (Rev / 10); Str[5] = '0' + (Rev % 10); Str[6] = '\0'; } Builder.defineMacro("__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__", Str); } // Tell users about the kernel if there is one. if (Triple.isOSDarwin()) Builder.defineMacro("__MACH__"); PlatformMinVersion = VersionTuple(Maj, Min, Rev); } namespace { // CloudABI Target template class CloudABITargetInfo : public OSTargetInfo { protected: void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, MacroBuilder &Builder) const override { Builder.defineMacro("__CloudABI__"); Builder.defineMacro("__ELF__"); // CloudABI uses ISO/IEC 10646:2012 for wchar_t, char16_t and char32_t. Builder.defineMacro("__STDC_ISO_10646__", "201206L"); Builder.defineMacro("__STDC_UTF_16__"); Builder.defineMacro("__STDC_UTF_32__"); } public: CloudABITargetInfo(const llvm::Triple &Triple) : OSTargetInfo(Triple) { this->UserLabelPrefix = ""; } }; template class DarwinTargetInfo : public OSTargetInfo { protected: void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, MacroBuilder &Builder) const override { getDarwinDefines(Builder, Opts, Triple, this->PlatformName, this->PlatformMinVersion); } public: DarwinTargetInfo(const llvm::Triple &Triple) : OSTargetInfo(Triple) { this->TLSSupported = Triple.isMacOSX() && !Triple.isMacOSXVersionLT(10, 7); this->MCountName = "\01mcount"; } std::string isValidSectionSpecifier(StringRef SR) const override { // Let MCSectionMachO validate this. StringRef Segment, Section; unsigned TAA, StubSize; bool HasTAA; return llvm::MCSectionMachO::ParseSectionSpecifier(SR, Segment, Section, TAA, HasTAA, StubSize); } const char *getStaticInitSectionSpecifier() const override { // FIXME: We should return 0 when building kexts. return "__TEXT,__StaticInit,regular,pure_instructions"; } /// Darwin does not support protected visibility. Darwin's "default" /// is very similar to ELF's "protected"; Darwin requires a "weak" /// attribute on declarations that can be dynamically replaced. bool hasProtectedVisibility() const override { return false; } }; // DragonFlyBSD Target template class DragonFlyBSDTargetInfo : public OSTargetInfo { protected: void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, MacroBuilder &Builder) const override { // DragonFly defines; list based off of gcc output Builder.defineMacro("__DragonFly__"); Builder.defineMacro("__DragonFly_cc_version", "100001"); Builder.defineMacro("__ELF__"); Builder.defineMacro("__KPRINTF_ATTRIBUTE__"); Builder.defineMacro("__tune_i386__"); DefineStd(Builder, "unix", Opts); } public: DragonFlyBSDTargetInfo(const llvm::Triple &Triple) : OSTargetInfo(Triple) { this->UserLabelPrefix = ""; switch (Triple.getArch()) { default: case llvm::Triple::x86: case llvm::Triple::x86_64: this->MCountName = ".mcount"; break; } } }; // FreeBSD Target template class FreeBSDTargetInfo : public OSTargetInfo { protected: void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, MacroBuilder &Builder) const override { // FreeBSD defines; list based off of gcc output unsigned Release = Triple.getOSMajorVersion(); if (Release == 0U) Release = 8; Builder.defineMacro("__FreeBSD__", Twine(Release)); Builder.defineMacro("__FreeBSD_cc_version", Twine(Release * 100000U + 1U)); Builder.defineMacro("__KPRINTF_ATTRIBUTE__"); DefineStd(Builder, "unix", Opts); Builder.defineMacro("__ELF__"); // On FreeBSD, wchar_t contains the number of the code point as // used by the character set of the locale. These character sets are // not necessarily a superset of ASCII. // // FIXME: This is wrong; the macro refers to the numerical values // of wchar_t *literals*, which are not locale-dependent. However, // FreeBSD systems apparently depend on us getting this wrong, and // setting this to 1 is conforming even if all the basic source // character literals have the same encoding as char and wchar_t. Builder.defineMacro("__STDC_MB_MIGHT_NEQ_WC__", "1"); } public: FreeBSDTargetInfo(const llvm::Triple &Triple) : OSTargetInfo(Triple) { this->UserLabelPrefix = ""; switch (Triple.getArch()) { default: case llvm::Triple::x86: case llvm::Triple::x86_64: this->MCountName = ".mcount"; break; case llvm::Triple::mips: case llvm::Triple::mipsel: case llvm::Triple::ppc: case llvm::Triple::ppc64: case llvm::Triple::ppc64le: this->MCountName = "_mcount"; break; case llvm::Triple::arm: this->MCountName = "__mcount"; break; } } }; // GNU/kFreeBSD Target template class KFreeBSDTargetInfo : public OSTargetInfo { protected: void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, MacroBuilder &Builder) const override { // GNU/kFreeBSD defines; list based off of gcc output DefineStd(Builder, "unix", Opts); Builder.defineMacro("__FreeBSD_kernel__"); Builder.defineMacro("__GLIBC__"); Builder.defineMacro("__ELF__"); if (Opts.POSIXThreads) Builder.defineMacro("_REENTRANT"); if (Opts.CPlusPlus) Builder.defineMacro("_GNU_SOURCE"); } public: KFreeBSDTargetInfo(const llvm::Triple &Triple) : OSTargetInfo(Triple) { this->UserLabelPrefix = ""; } }; // Minix Target template class MinixTargetInfo : public OSTargetInfo { protected: void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, MacroBuilder &Builder) const override { // Minix defines Builder.defineMacro("__minix", "3"); Builder.defineMacro("_EM_WSIZE", "4"); Builder.defineMacro("_EM_PSIZE", "4"); Builder.defineMacro("_EM_SSIZE", "2"); Builder.defineMacro("_EM_LSIZE", "4"); Builder.defineMacro("_EM_FSIZE", "4"); Builder.defineMacro("_EM_DSIZE", "8"); Builder.defineMacro("__ELF__"); DefineStd(Builder, "unix", Opts); } public: MinixTargetInfo(const llvm::Triple &Triple) : OSTargetInfo(Triple) { this->UserLabelPrefix = ""; } }; // Linux target template class LinuxTargetInfo : public OSTargetInfo { protected: void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, MacroBuilder &Builder) const override { // Linux defines; list based off of gcc output DefineStd(Builder, "unix", Opts); DefineStd(Builder, "linux", Opts); Builder.defineMacro("__gnu_linux__"); Builder.defineMacro("__ELF__"); if (Triple.getEnvironment() == llvm::Triple::Android) { Builder.defineMacro("__ANDROID__", "1"); unsigned Maj, Min, Rev; Triple.getEnvironmentVersion(Maj, Min, Rev); this->PlatformName = "android"; this->PlatformMinVersion = VersionTuple(Maj, Min, Rev); } if (Opts.POSIXThreads) Builder.defineMacro("_REENTRANT"); if (Opts.CPlusPlus) Builder.defineMacro("_GNU_SOURCE"); } public: LinuxTargetInfo(const llvm::Triple &Triple) : OSTargetInfo(Triple) { this->UserLabelPrefix = ""; this->WIntType = TargetInfo::UnsignedInt; switch (Triple.getArch()) { default: break; case llvm::Triple::ppc: case llvm::Triple::ppc64: case llvm::Triple::ppc64le: this->MCountName = "_mcount"; break; } } const char *getStaticInitSectionSpecifier() const override { return ".text.startup"; } }; // NetBSD Target template class NetBSDTargetInfo : public OSTargetInfo { protected: void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, MacroBuilder &Builder) const override { // NetBSD defines; list based off of gcc output Builder.defineMacro("__NetBSD__"); Builder.defineMacro("__unix__"); Builder.defineMacro("__ELF__"); if (Opts.POSIXThreads) Builder.defineMacro("_POSIX_THREADS"); switch (Triple.getArch()) { default: break; case llvm::Triple::arm: case llvm::Triple::armeb: case llvm::Triple::thumb: case llvm::Triple::thumbeb: Builder.defineMacro("__ARM_DWARF_EH__"); break; } } public: NetBSDTargetInfo(const llvm::Triple &Triple) : OSTargetInfo(Triple) { this->UserLabelPrefix = ""; this->MCountName = "_mcount"; } }; // OpenBSD Target template class OpenBSDTargetInfo : public OSTargetInfo { protected: void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, MacroBuilder &Builder) const override { // OpenBSD defines; list based off of gcc output Builder.defineMacro("__OpenBSD__"); DefineStd(Builder, "unix", Opts); Builder.defineMacro("__ELF__"); if (Opts.POSIXThreads) Builder.defineMacro("_REENTRANT"); } public: OpenBSDTargetInfo(const llvm::Triple &Triple) : OSTargetInfo(Triple) { this->UserLabelPrefix = ""; this->TLSSupported = false; switch (Triple.getArch()) { default: case llvm::Triple::x86: case llvm::Triple::x86_64: case llvm::Triple::arm: case llvm::Triple::sparc: this->MCountName = "__mcount"; break; case llvm::Triple::mips64: case llvm::Triple::mips64el: case llvm::Triple::ppc: case llvm::Triple::sparcv9: this->MCountName = "_mcount"; break; } } }; // Bitrig Target template class BitrigTargetInfo : public OSTargetInfo { protected: void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, MacroBuilder &Builder) const override { // Bitrig defines; list based off of gcc output Builder.defineMacro("__Bitrig__"); DefineStd(Builder, "unix", Opts); Builder.defineMacro("__ELF__"); if (Opts.POSIXThreads) Builder.defineMacro("_REENTRANT"); switch (Triple.getArch()) { default: break; case llvm::Triple::arm: case llvm::Triple::armeb: case llvm::Triple::thumb: case llvm::Triple::thumbeb: Builder.defineMacro("__ARM_DWARF_EH__"); break; } } public: BitrigTargetInfo(const llvm::Triple &Triple) : OSTargetInfo(Triple) { this->UserLabelPrefix = ""; this->MCountName = "__mcount"; } }; // PSP Target template class PSPTargetInfo : public OSTargetInfo { protected: void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, MacroBuilder &Builder) const override { // PSP defines; list based on the output of the pspdev gcc toolchain. Builder.defineMacro("PSP"); Builder.defineMacro("_PSP"); Builder.defineMacro("__psp__"); Builder.defineMacro("__ELF__"); } public: PSPTargetInfo(const llvm::Triple &Triple) : OSTargetInfo(Triple) { this->UserLabelPrefix = ""; } }; // PS3 PPU Target template class PS3PPUTargetInfo : public OSTargetInfo { protected: void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, MacroBuilder &Builder) const override { // PS3 PPU defines. Builder.defineMacro("__PPC__"); Builder.defineMacro("__PPU__"); Builder.defineMacro("__CELLOS_LV2__"); Builder.defineMacro("__ELF__"); Builder.defineMacro("__LP32__"); Builder.defineMacro("_ARCH_PPC64"); Builder.defineMacro("__powerpc64__"); } public: PS3PPUTargetInfo(const llvm::Triple &Triple) : OSTargetInfo(Triple) { this->UserLabelPrefix = ""; this->LongWidth = this->LongAlign = 32; this->PointerWidth = this->PointerAlign = 32; this->IntMaxType = TargetInfo::SignedLongLong; this->Int64Type = TargetInfo::SignedLongLong; this->SizeType = TargetInfo::UnsignedInt; this->DescriptionString = "E-m:e-p:32:32-i64:64-n32:64"; } }; template class PS4OSTargetInfo : public OSTargetInfo { protected: void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, MacroBuilder &Builder) const override { Builder.defineMacro("__FreeBSD__", "9"); Builder.defineMacro("__FreeBSD_cc_version", "900001"); Builder.defineMacro("__KPRINTF_ATTRIBUTE__"); DefineStd(Builder, "unix", Opts); Builder.defineMacro("__ELF__"); Builder.defineMacro("__PS4__"); } public: PS4OSTargetInfo(const llvm::Triple &Triple) : OSTargetInfo(Triple) { this->WCharType = this->UnsignedShort; // On PS4, TLS variable cannot be aligned to more than 32 bytes (256 bits). this->MaxTLSAlign = 256; this->UserLabelPrefix = ""; switch (Triple.getArch()) { default: case llvm::Triple::x86_64: this->MCountName = ".mcount"; break; } } }; // Solaris target template class SolarisTargetInfo : public OSTargetInfo { protected: void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, MacroBuilder &Builder) const override { DefineStd(Builder, "sun", Opts); DefineStd(Builder, "unix", Opts); Builder.defineMacro("__ELF__"); Builder.defineMacro("__svr4__"); Builder.defineMacro("__SVR4"); // Solaris headers require _XOPEN_SOURCE to be set to 600 for C99 and // newer, but to 500 for everything else. feature_test.h has a check to // ensure that you are not using C99 with an old version of X/Open or C89 // with a new version. if (Opts.C99) Builder.defineMacro("_XOPEN_SOURCE", "600"); else Builder.defineMacro("_XOPEN_SOURCE", "500"); if (Opts.CPlusPlus) Builder.defineMacro("__C99FEATURES__"); Builder.defineMacro("_LARGEFILE_SOURCE"); Builder.defineMacro("_LARGEFILE64_SOURCE"); Builder.defineMacro("__EXTENSIONS__"); Builder.defineMacro("_REENTRANT"); } public: SolarisTargetInfo(const llvm::Triple &Triple) : OSTargetInfo(Triple) { this->UserLabelPrefix = ""; this->WCharType = this->SignedInt; // FIXME: WIntType should be SignedLong } }; // Windows target template class WindowsTargetInfo : public OSTargetInfo { protected: void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, MacroBuilder &Builder) const override { Builder.defineMacro("_WIN32"); } void getVisualStudioDefines(const LangOptions &Opts, MacroBuilder &Builder) const { if (Opts.CPlusPlus) { if (Opts.RTTIData) Builder.defineMacro("_CPPRTTI"); if (Opts.CXXExceptions) Builder.defineMacro("_CPPUNWIND"); } if (!Opts.CharIsSigned) Builder.defineMacro("_CHAR_UNSIGNED"); // FIXME: POSIXThreads isn't exactly the option this should be defined for, // but it works for now. if (Opts.POSIXThreads) Builder.defineMacro("_MT"); if (Opts.MSCompatibilityVersion) { Builder.defineMacro("_MSC_VER", Twine(Opts.MSCompatibilityVersion / 100000)); Builder.defineMacro("_MSC_FULL_VER", Twine(Opts.MSCompatibilityVersion)); // FIXME We cannot encode the revision information into 32-bits Builder.defineMacro("_MSC_BUILD", Twine(1)); if (Opts.CPlusPlus11 && Opts.isCompatibleWithMSVC(LangOptions::MSVC2015)) Builder.defineMacro("_HAS_CHAR16_T_LANGUAGE_SUPPORT", Twine(1)); } if (Opts.MicrosoftExt) { Builder.defineMacro("_MSC_EXTENSIONS"); if (Opts.CPlusPlus11) { Builder.defineMacro("_RVALUE_REFERENCES_V2_SUPPORTED"); Builder.defineMacro("_RVALUE_REFERENCES_SUPPORTED"); Builder.defineMacro("_NATIVE_NULLPTR_SUPPORTED"); } } Builder.defineMacro("_INTEGRAL_MAX_BITS", "64"); } public: WindowsTargetInfo(const llvm::Triple &Triple) : OSTargetInfo(Triple) {} }; template class NaClTargetInfo : public OSTargetInfo { protected: void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, MacroBuilder &Builder) const override { if (Opts.POSIXThreads) Builder.defineMacro("_REENTRANT"); if (Opts.CPlusPlus) Builder.defineMacro("_GNU_SOURCE"); DefineStd(Builder, "unix", Opts); Builder.defineMacro("__ELF__"); Builder.defineMacro("__native_client__"); } public: NaClTargetInfo(const llvm::Triple &Triple) : OSTargetInfo(Triple) { this->UserLabelPrefix = ""; this->LongAlign = 32; this->LongWidth = 32; this->PointerAlign = 32; this->PointerWidth = 32; this->IntMaxType = TargetInfo::SignedLongLong; this->Int64Type = TargetInfo::SignedLongLong; this->DoubleAlign = 64; this->LongDoubleWidth = 64; this->LongDoubleAlign = 64; this->LongLongWidth = 64; this->LongLongAlign = 64; this->SizeType = TargetInfo::UnsignedInt; this->PtrDiffType = TargetInfo::SignedInt; this->IntPtrType = TargetInfo::SignedInt; // RegParmMax is inherited from the underlying architecture this->LongDoubleFormat = &llvm::APFloat::IEEEdouble; if (Triple.getArch() == llvm::Triple::arm) { // Handled in ARM's setABI(). } else if (Triple.getArch() == llvm::Triple::x86) { this->DescriptionString = "e-m:e-p:32:32-i64:64-n8:16:32-S128"; } else if (Triple.getArch() == llvm::Triple::x86_64) { this->DescriptionString = "e-m:e-p:32:32-i64:64-n8:16:32:64-S128"; } else if (Triple.getArch() == llvm::Triple::mipsel) { // Handled on mips' setDescriptionString. } else { assert(Triple.getArch() == llvm::Triple::le32); this->DescriptionString = "e-p:32:32-i64:64"; } } }; //===----------------------------------------------------------------------===// // Specific target implementations. //===----------------------------------------------------------------------===// // PPC abstract base class class PPCTargetInfo : public TargetInfo { static const Builtin::Info BuiltinInfo[]; static const char * const GCCRegNames[]; static const TargetInfo::GCCRegAlias GCCRegAliases[]; std::string CPU; // Target cpu features. bool HasVSX; bool HasP8Vector; bool HasP8Crypto; bool HasDirectMove; bool HasQPX; bool HasHTM; bool HasBPERMD; bool HasExtDiv; protected: std::string ABI; public: PPCTargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple), HasVSX(false), HasP8Vector(false), HasP8Crypto(false), HasDirectMove(false), HasQPX(false), HasHTM(false), HasBPERMD(false), HasExtDiv(false) { BigEndian = (Triple.getArch() != llvm::Triple::ppc64le); SimdDefaultAlign = 128; LongDoubleWidth = LongDoubleAlign = 128; LongDoubleFormat = &llvm::APFloat::PPCDoubleDouble; } /// \brief Flags for architecture specific defines. typedef enum { ArchDefineNone = 0, ArchDefineName = 1 << 0, // is substituted for arch name. ArchDefinePpcgr = 1 << 1, ArchDefinePpcsq = 1 << 2, ArchDefine440 = 1 << 3, ArchDefine603 = 1 << 4, ArchDefine604 = 1 << 5, ArchDefinePwr4 = 1 << 6, ArchDefinePwr5 = 1 << 7, ArchDefinePwr5x = 1 << 8, ArchDefinePwr6 = 1 << 9, ArchDefinePwr6x = 1 << 10, ArchDefinePwr7 = 1 << 11, ArchDefinePwr8 = 1 << 12, ArchDefineA2 = 1 << 13, ArchDefineA2q = 1 << 14 } ArchDefineTypes; // Note: GCC recognizes the following additional cpus: // 401, 403, 405, 405fp, 440fp, 464, 464fp, 476, 476fp, 505, 740, 801, // 821, 823, 8540, 8548, e300c2, e300c3, e500mc64, e6500, 860, cell, // titan, rs64. bool setCPU(const std::string &Name) override { bool CPUKnown = llvm::StringSwitch(Name) .Case("generic", true) .Case("440", true) .Case("450", true) .Case("601", true) .Case("602", true) .Case("603", true) .Case("603e", true) .Case("603ev", true) .Case("604", true) .Case("604e", true) .Case("620", true) .Case("630", true) .Case("g3", true) .Case("7400", true) .Case("g4", true) .Case("7450", true) .Case("g4+", true) .Case("750", true) .Case("970", true) .Case("g5", true) .Case("a2", true) .Case("a2q", true) .Case("e500mc", true) .Case("e5500", true) .Case("power3", true) .Case("pwr3", true) .Case("power4", true) .Case("pwr4", true) .Case("power5", true) .Case("pwr5", true) .Case("power5x", true) .Case("pwr5x", true) .Case("power6", true) .Case("pwr6", true) .Case("power6x", true) .Case("pwr6x", true) .Case("power7", true) .Case("pwr7", true) .Case("power8", true) .Case("pwr8", true) .Case("powerpc", true) .Case("ppc", true) .Case("powerpc64", true) .Case("ppc64", true) .Case("powerpc64le", true) .Case("ppc64le", true) .Default(false); if (CPUKnown) CPU = Name; return CPUKnown; } StringRef getABI() const override { return ABI; } void getTargetBuiltins(const Builtin::Info *&Records, unsigned &NumRecords) const override { Records = BuiltinInfo; NumRecords = clang::PPC::LastTSBuiltin-Builtin::FirstTSBuiltin; } bool isCLZForZeroUndef() const override { return false; } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override; void getDefaultFeatures(llvm::StringMap &Features) const override; bool handleTargetFeatures(std::vector &Features, DiagnosticsEngine &Diags) override; bool hasFeature(StringRef Feature) const override; void setFeatureEnabled(llvm::StringMap &Features, StringRef Name, bool Enabled) const override; void getGCCRegNames(const char * const *&Names, unsigned &NumNames) const override; void getGCCRegAliases(const GCCRegAlias *&Aliases, unsigned &NumAliases) const override; bool validateAsmConstraint(const char *&Name, TargetInfo::ConstraintInfo &Info) const override { switch (*Name) { default: return false; case 'O': // Zero break; case 'b': // Base register case 'f': // Floating point register Info.setAllowsRegister(); break; // FIXME: The following are added to allow parsing. // I just took a guess at what the actions should be. // Also, is more specific checking needed? I.e. specific registers? case 'd': // Floating point register (containing 64-bit value) case 'v': // Altivec vector register Info.setAllowsRegister(); break; case 'w': switch (Name[1]) { case 'd':// VSX vector register to hold vector double data case 'f':// VSX vector register to hold vector float data case 's':// VSX vector register to hold scalar float data case 'a':// Any VSX register case 'c':// An individual CR bit break; default: return false; } Info.setAllowsRegister(); Name++; // Skip over 'w'. break; case 'h': // `MQ', `CTR', or `LINK' register case 'q': // `MQ' register case 'c': // `CTR' register case 'l': // `LINK' register case 'x': // `CR' register (condition register) number 0 case 'y': // `CR' register (condition register) case 'z': // `XER[CA]' carry bit (part of the XER register) Info.setAllowsRegister(); break; case 'I': // Signed 16-bit constant case 'J': // Unsigned 16-bit constant shifted left 16 bits // (use `L' instead for SImode constants) case 'K': // Unsigned 16-bit constant case 'L': // Signed 16-bit constant shifted left 16 bits case 'M': // Constant larger than 31 case 'N': // Exact power of 2 case 'P': // Constant whose negation is a signed 16-bit constant case 'G': // Floating point constant that can be loaded into a // register with one instruction per word case 'H': // Integer/Floating point constant that can be loaded // into a register using three instructions break; case 'm': // Memory operand. Note that on PowerPC targets, m can // include addresses that update the base register. It // is therefore only safe to use `m' in an asm statement // if that asm statement accesses the operand exactly once. // The asm statement must also use `%U' as a // placeholder for the "update" flag in the corresponding // load or store instruction. For example: // asm ("st%U0 %1,%0" : "=m" (mem) : "r" (val)); // is correct but: // asm ("st %1,%0" : "=m" (mem) : "r" (val)); // is not. Use es rather than m if you don't want the base // register to be updated. case 'e': if (Name[1] != 's') return false; // es: A "stable" memory operand; that is, one which does not // include any automodification of the base register. Unlike // `m', this constraint can be used in asm statements that // might access the operand several times, or that might not // access it at all. Info.setAllowsMemory(); Name++; // Skip over 'e'. break; case 'Q': // Memory operand that is an offset from a register (it is // usually better to use `m' or `es' in asm statements) case 'Z': // Memory operand that is an indexed or indirect from a // register (it is usually better to use `m' or `es' in // asm statements) Info.setAllowsMemory(); Info.setAllowsRegister(); break; case 'R': // AIX TOC entry case 'a': // Address operand that is an indexed or indirect from a // register (`p' is preferable for asm statements) case 'S': // Constant suitable as a 64-bit mask operand case 'T': // Constant suitable as a 32-bit mask operand case 'U': // System V Release 4 small data area reference case 't': // AND masks that can be performed by two rldic{l, r} // instructions case 'W': // Vector constant that does not require memory case 'j': // Vector constant that is all zeros. break; // End FIXME. } return true; } std::string convertConstraint(const char *&Constraint) const override { std::string R; switch (*Constraint) { case 'e': case 'w': // Two-character constraint; add "^" hint for later parsing. R = std::string("^") + std::string(Constraint, 2); Constraint++; break; default: return TargetInfo::convertConstraint(Constraint); } return R; } const char *getClobbers() const override { return ""; } int getEHDataRegisterNumber(unsigned RegNo) const override { if (RegNo == 0) return 3; if (RegNo == 1) return 4; return -1; } bool hasSjLjLowering() const override { return true; } bool useFloat128ManglingForLongDouble() const override { return LongDoubleWidth == 128 && LongDoubleFormat == &llvm::APFloat::PPCDoubleDouble && getTriple().isOSBinFormatELF(); } }; const Builtin::Info PPCTargetInfo::BuiltinInfo[] = { #define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES }, #define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\ ALL_LANGUAGES }, #include "clang/Basic/BuiltinsPPC.def" }; /// handleTargetFeatures - Perform initialization based on the user /// configured set of features. bool PPCTargetInfo::handleTargetFeatures(std::vector &Features, DiagnosticsEngine &Diags) { for (unsigned i = 0, e = Features.size(); i !=e; ++i) { // Ignore disabled features. if (Features[i][0] == '-') continue; StringRef Feature = StringRef(Features[i]).substr(1); if (Feature == "vsx") { HasVSX = true; continue; } if (Feature == "bpermd") { HasBPERMD = true; continue; } if (Feature == "extdiv") { HasExtDiv = true; continue; } if (Feature == "power8-vector") { HasP8Vector = true; continue; } if (Feature == "crypto") { HasP8Crypto = true; continue; } if (Feature == "direct-move") { HasDirectMove = true; continue; } if (Feature == "qpx") { HasQPX = true; continue; } if (Feature == "htm") { HasHTM = true; continue; } // TODO: Finish this list and add an assert that we've handled them // all. } if (!HasVSX && (HasP8Vector || HasDirectMove)) { if (HasP8Vector) Diags.Report(diag::err_opt_not_valid_with_opt) << "-mpower8-vector" << "-mno-vsx"; else if (HasDirectMove) Diags.Report(diag::err_opt_not_valid_with_opt) << "-mdirect-move" << "-mno-vsx"; return false; } return true; } /// PPCTargetInfo::getTargetDefines - Return a set of the PowerPC-specific /// #defines that are not tied to a specific subtarget. void PPCTargetInfo::getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const { // Target identification. Builder.defineMacro("__ppc__"); Builder.defineMacro("__PPC__"); Builder.defineMacro("_ARCH_PPC"); Builder.defineMacro("__powerpc__"); Builder.defineMacro("__POWERPC__"); if (PointerWidth == 64) { Builder.defineMacro("_ARCH_PPC64"); Builder.defineMacro("__powerpc64__"); Builder.defineMacro("__ppc64__"); Builder.defineMacro("__PPC64__"); } // Target properties. if (getTriple().getArch() == llvm::Triple::ppc64le) { Builder.defineMacro("_LITTLE_ENDIAN"); } else { if (getTriple().getOS() != llvm::Triple::NetBSD && getTriple().getOS() != llvm::Triple::OpenBSD) Builder.defineMacro("_BIG_ENDIAN"); } // ABI options. if (ABI == "elfv1" || ABI == "elfv1-qpx") Builder.defineMacro("_CALL_ELF", "1"); if (ABI == "elfv2") Builder.defineMacro("_CALL_ELF", "2"); // Subtarget options. Builder.defineMacro("__NATURAL_ALIGNMENT__"); Builder.defineMacro("__REGISTER_PREFIX__", ""); // FIXME: Should be controlled by command line option. if (LongDoubleWidth == 128) Builder.defineMacro("__LONG_DOUBLE_128__"); if (Opts.AltiVec) { Builder.defineMacro("__VEC__", "10206"); Builder.defineMacro("__ALTIVEC__"); } // CPU identification. ArchDefineTypes defs = (ArchDefineTypes)llvm::StringSwitch(CPU) .Case("440", ArchDefineName) .Case("450", ArchDefineName | ArchDefine440) .Case("601", ArchDefineName) .Case("602", ArchDefineName | ArchDefinePpcgr) .Case("603", ArchDefineName | ArchDefinePpcgr) .Case("603e", ArchDefineName | ArchDefine603 | ArchDefinePpcgr) .Case("603ev", ArchDefineName | ArchDefine603 | ArchDefinePpcgr) .Case("604", ArchDefineName | ArchDefinePpcgr) .Case("604e", ArchDefineName | ArchDefine604 | ArchDefinePpcgr) .Case("620", ArchDefineName | ArchDefinePpcgr) .Case("630", ArchDefineName | ArchDefinePpcgr) .Case("7400", ArchDefineName | ArchDefinePpcgr) .Case("7450", ArchDefineName | ArchDefinePpcgr) .Case("750", ArchDefineName | ArchDefinePpcgr) .Case("970", ArchDefineName | ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq) .Case("a2", ArchDefineA2) .Case("a2q", ArchDefineName | ArchDefineA2 | ArchDefineA2q) .Case("pwr3", ArchDefinePpcgr) .Case("pwr4", ArchDefineName | ArchDefinePpcgr | ArchDefinePpcsq) .Case("pwr5", ArchDefineName | ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq) .Case("pwr5x", ArchDefineName | ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq) .Case("pwr6", ArchDefineName | ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq) .Case("pwr6x", ArchDefineName | ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq) .Case("pwr7", ArchDefineName | ArchDefinePwr6x | ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq) .Case("pwr8", ArchDefineName | ArchDefinePwr7 | ArchDefinePwr6x | ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq) .Case("power3", ArchDefinePpcgr) .Case("power4", ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq) .Case("power5", ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq) .Case("power5x", ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq) .Case("power6", ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq) .Case("power6x", ArchDefinePwr6x | ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq) .Case("power7", ArchDefinePwr7 | ArchDefinePwr6x | ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq) .Case("power8", ArchDefinePwr8 | ArchDefinePwr7 | ArchDefinePwr6x | ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq) .Default(ArchDefineNone); if (defs & ArchDefineName) Builder.defineMacro(Twine("_ARCH_", StringRef(CPU).upper())); if (defs & ArchDefinePpcgr) Builder.defineMacro("_ARCH_PPCGR"); if (defs & ArchDefinePpcsq) Builder.defineMacro("_ARCH_PPCSQ"); if (defs & ArchDefine440) Builder.defineMacro("_ARCH_440"); if (defs & ArchDefine603) Builder.defineMacro("_ARCH_603"); if (defs & ArchDefine604) Builder.defineMacro("_ARCH_604"); if (defs & ArchDefinePwr4) Builder.defineMacro("_ARCH_PWR4"); if (defs & ArchDefinePwr5) Builder.defineMacro("_ARCH_PWR5"); if (defs & ArchDefinePwr5x) Builder.defineMacro("_ARCH_PWR5X"); if (defs & ArchDefinePwr6) Builder.defineMacro("_ARCH_PWR6"); if (defs & ArchDefinePwr6x) Builder.defineMacro("_ARCH_PWR6X"); if (defs & ArchDefinePwr7) Builder.defineMacro("_ARCH_PWR7"); if (defs & ArchDefinePwr8) Builder.defineMacro("_ARCH_PWR8"); if (defs & ArchDefineA2) Builder.defineMacro("_ARCH_A2"); if (defs & ArchDefineA2q) { Builder.defineMacro("_ARCH_A2Q"); Builder.defineMacro("_ARCH_QP"); } if (getTriple().getVendor() == llvm::Triple::BGQ) { Builder.defineMacro("__bg__"); Builder.defineMacro("__THW_BLUEGENE__"); Builder.defineMacro("__bgq__"); Builder.defineMacro("__TOS_BGQ__"); } if (HasVSX) Builder.defineMacro("__VSX__"); if (HasP8Vector) Builder.defineMacro("__POWER8_VECTOR__"); if (HasP8Crypto) Builder.defineMacro("__CRYPTO__"); if (HasHTM) Builder.defineMacro("__HTM__"); if (getTriple().getArch() == llvm::Triple::ppc64le || (defs & ArchDefinePwr8) || (CPU == "pwr8")) { Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1"); Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2"); Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4"); if (PointerWidth == 64) Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8"); } // FIXME: The following are not yet generated here by Clang, but are // generated by GCC: // // _SOFT_FLOAT_ // __RECIP_PRECISION__ // __APPLE_ALTIVEC__ // __RECIP__ // __RECIPF__ // __RSQRTE__ // __RSQRTEF__ // _SOFT_DOUBLE_ // __NO_LWSYNC__ // __HAVE_BSWAP__ // __LONGDOUBLE128 // __CMODEL_MEDIUM__ // __CMODEL_LARGE__ // _CALL_SYSV // _CALL_DARWIN // __NO_FPRS__ } void PPCTargetInfo::getDefaultFeatures(llvm::StringMap &Features) const { Features["altivec"] = llvm::StringSwitch(CPU) .Case("7400", true) .Case("g4", true) .Case("7450", true) .Case("g4+", true) .Case("970", true) .Case("g5", true) .Case("pwr6", true) .Case("pwr7", true) .Case("pwr8", true) .Case("ppc64", true) .Case("ppc64le", true) .Default(false); Features["qpx"] = (CPU == "a2q"); Features["crypto"] = llvm::StringSwitch(CPU) .Case("ppc64le", true) .Case("pwr8", true) .Default(false); Features["power8-vector"] = llvm::StringSwitch(CPU) .Case("ppc64le", true) .Case("pwr8", true) .Default(false); Features["bpermd"] = llvm::StringSwitch(CPU) .Case("ppc64le", true) .Case("pwr8", true) .Case("pwr7", true) .Default(false); Features["extdiv"] = llvm::StringSwitch(CPU) .Case("ppc64le", true) .Case("pwr8", true) .Case("pwr7", true) .Default(false); Features["direct-move"] = llvm::StringSwitch(CPU) .Case("ppc64le", true) .Case("pwr8", true) .Default(false); Features["vsx"] = llvm::StringSwitch(CPU) .Case("ppc64le", true) .Case("pwr8", true) .Case("pwr7", true) .Default(false); } bool PPCTargetInfo::hasFeature(StringRef Feature) const { return llvm::StringSwitch(Feature) .Case("powerpc", true) .Case("vsx", HasVSX) .Case("power8-vector", HasP8Vector) .Case("crypto", HasP8Crypto) .Case("direct-move", HasDirectMove) .Case("qpx", HasQPX) .Case("htm", HasHTM) .Case("bpermd", HasBPERMD) .Case("extdiv", HasExtDiv) .Default(false); } /* There is no clear way for the target to know which of the features in the final feature vector came from defaults and which are actually specified by the user. To that end, we use the fact that this function is not called on default features - only user specified ones. By the first time this function is called, the default features are populated. We then keep track of the features that the user specified so that we can ensure we do not override a user's request (only defaults). For example: -mcpu=pwr8 -mno-vsx (should disable vsx and everything that depends on it) -mcpu=pwr8 -mdirect-move -mno-vsx (should actually be diagnosed) NOTE: Do not call this from PPCTargetInfo::getDefaultFeatures */ void PPCTargetInfo::setFeatureEnabled(llvm::StringMap &Features, StringRef Name, bool Enabled) const { static llvm::StringMap ExplicitFeatures; ExplicitFeatures[Name] = Enabled; // At this point, -mno-vsx turns off the dependent features but we respect // the user's requests. if (!Enabled && Name == "vsx") { Features["direct-move"] = ExplicitFeatures["direct-move"]; Features["power8-vector"] = ExplicitFeatures["power8-vector"]; } if ((Enabled && Name == "power8-vector") || (Enabled && Name == "direct-move")) { if (ExplicitFeatures.find("vsx") == ExplicitFeatures.end()) { Features["vsx"] = true; } } Features[Name] = Enabled; } const char * const PPCTargetInfo::GCCRegNames[] = { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31", "mq", "lr", "ctr", "ap", "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", "xer", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "vrsave", "vscr", "spe_acc", "spefscr", "sfp" }; void PPCTargetInfo::getGCCRegNames(const char * const *&Names, unsigned &NumNames) const { Names = GCCRegNames; NumNames = llvm::array_lengthof(GCCRegNames); } const TargetInfo::GCCRegAlias PPCTargetInfo::GCCRegAliases[] = { // While some of these aliases do map to different registers // they still share the same register name. { { "0" }, "r0" }, { { "1"}, "r1" }, { { "2" }, "r2" }, { { "3" }, "r3" }, { { "4" }, "r4" }, { { "5" }, "r5" }, { { "6" }, "r6" }, { { "7" }, "r7" }, { { "8" }, "r8" }, { { "9" }, "r9" }, { { "10" }, "r10" }, { { "11" }, "r11" }, { { "12" }, "r12" }, { { "13" }, "r13" }, { { "14" }, "r14" }, { { "15" }, "r15" }, { { "16" }, "r16" }, { { "17" }, "r17" }, { { "18" }, "r18" }, { { "19" }, "r19" }, { { "20" }, "r20" }, { { "21" }, "r21" }, { { "22" }, "r22" }, { { "23" }, "r23" }, { { "24" }, "r24" }, { { "25" }, "r25" }, { { "26" }, "r26" }, { { "27" }, "r27" }, { { "28" }, "r28" }, { { "29" }, "r29" }, { { "30" }, "r30" }, { { "31" }, "r31" }, { { "fr0" }, "f0" }, { { "fr1" }, "f1" }, { { "fr2" }, "f2" }, { { "fr3" }, "f3" }, { { "fr4" }, "f4" }, { { "fr5" }, "f5" }, { { "fr6" }, "f6" }, { { "fr7" }, "f7" }, { { "fr8" }, "f8" }, { { "fr9" }, "f9" }, { { "fr10" }, "f10" }, { { "fr11" }, "f11" }, { { "fr12" }, "f12" }, { { "fr13" }, "f13" }, { { "fr14" }, "f14" }, { { "fr15" }, "f15" }, { { "fr16" }, "f16" }, { { "fr17" }, "f17" }, { { "fr18" }, "f18" }, { { "fr19" }, "f19" }, { { "fr20" }, "f20" }, { { "fr21" }, "f21" }, { { "fr22" }, "f22" }, { { "fr23" }, "f23" }, { { "fr24" }, "f24" }, { { "fr25" }, "f25" }, { { "fr26" }, "f26" }, { { "fr27" }, "f27" }, { { "fr28" }, "f28" }, { { "fr29" }, "f29" }, { { "fr30" }, "f30" }, { { "fr31" }, "f31" }, { { "cc" }, "cr0" }, }; void PPCTargetInfo::getGCCRegAliases(const GCCRegAlias *&Aliases, unsigned &NumAliases) const { Aliases = GCCRegAliases; NumAliases = llvm::array_lengthof(GCCRegAliases); } class PPC32TargetInfo : public PPCTargetInfo { public: PPC32TargetInfo(const llvm::Triple &Triple) : PPCTargetInfo(Triple) { DescriptionString = "E-m:e-p:32:32-i64:64-n32"; switch (getTriple().getOS()) { case llvm::Triple::Linux: case llvm::Triple::FreeBSD: case llvm::Triple::NetBSD: SizeType = UnsignedInt; PtrDiffType = SignedInt; IntPtrType = SignedInt; break; default: break; } if (getTriple().getOS() == llvm::Triple::FreeBSD) { LongDoubleWidth = LongDoubleAlign = 64; LongDoubleFormat = &llvm::APFloat::IEEEdouble; } // PPC32 supports atomics up to 4 bytes. MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 32; } BuiltinVaListKind getBuiltinVaListKind() const override { // This is the ELF definition, and is overridden by the Darwin sub-target return TargetInfo::PowerABIBuiltinVaList; } }; // Note: ABI differences may eventually require us to have a separate // TargetInfo for little endian. class PPC64TargetInfo : public PPCTargetInfo { public: PPC64TargetInfo(const llvm::Triple &Triple) : PPCTargetInfo(Triple) { LongWidth = LongAlign = PointerWidth = PointerAlign = 64; IntMaxType = SignedLong; Int64Type = SignedLong; if ((Triple.getArch() == llvm::Triple::ppc64le)) { DescriptionString = "e-m:e-i64:64-n32:64"; ABI = "elfv2"; } else { DescriptionString = "E-m:e-i64:64-n32:64"; ABI = "elfv1"; } switch (getTriple().getOS()) { case llvm::Triple::FreeBSD: LongDoubleWidth = LongDoubleAlign = 64; LongDoubleFormat = &llvm::APFloat::IEEEdouble; break; case llvm::Triple::NetBSD: IntMaxType = SignedLongLong; Int64Type = SignedLongLong; break; default: break; } // PPC64 supports atomics up to 8 bytes. MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64; } BuiltinVaListKind getBuiltinVaListKind() const override { return TargetInfo::CharPtrBuiltinVaList; } // PPC64 Linux-specific ABI options. bool setABI(const std::string &Name) override { if (Name == "elfv1" || Name == "elfv1-qpx" || Name == "elfv2") { ABI = Name; return true; } return false; } }; class DarwinPPC32TargetInfo : public DarwinTargetInfo { public: DarwinPPC32TargetInfo(const llvm::Triple &Triple) : DarwinTargetInfo(Triple) { HasAlignMac68kSupport = true; BoolWidth = BoolAlign = 32; //XXX support -mone-byte-bool? PtrDiffType = SignedInt; // for http://llvm.org/bugs/show_bug.cgi?id=15726 LongLongAlign = 32; SuitableAlign = 128; DescriptionString = "E-m:o-p:32:32-f64:32:64-n32"; } BuiltinVaListKind getBuiltinVaListKind() const override { return TargetInfo::CharPtrBuiltinVaList; } }; class DarwinPPC64TargetInfo : public DarwinTargetInfo { public: DarwinPPC64TargetInfo(const llvm::Triple &Triple) : DarwinTargetInfo(Triple) { HasAlignMac68kSupport = true; SuitableAlign = 128; DescriptionString = "E-m:o-i64:64-n32:64"; } }; static const unsigned NVPTXAddrSpaceMap[] = { 1, // opencl_global 3, // opencl_local 4, // opencl_constant // FIXME: generic has to be added to the target 0, // opencl_generic 1, // cuda_device 4, // cuda_constant 3, // cuda_shared }; class NVPTXTargetInfo : public TargetInfo { static const char * const GCCRegNames[]; static const Builtin::Info BuiltinInfo[]; // The GPU profiles supported by the NVPTX backend enum GPUKind { GK_NONE, GK_SM20, GK_SM21, GK_SM30, GK_SM35, GK_SM37, } GPU; public: NVPTXTargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) { BigEndian = false; TLSSupported = false; LongWidth = LongAlign = 64; AddrSpaceMap = &NVPTXAddrSpaceMap; UseAddrSpaceMapMangling = true; // Define available target features // These must be defined in sorted order! NoAsmVariants = true; // Set the default GPU to sm20 GPU = GK_SM20; } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { Builder.defineMacro("__PTX__"); Builder.defineMacro("__NVPTX__"); if (Opts.CUDAIsDevice) { // Set __CUDA_ARCH__ for the GPU specified. std::string CUDAArchCode; switch (GPU) { case GK_SM20: CUDAArchCode = "200"; break; case GK_SM21: CUDAArchCode = "210"; break; case GK_SM30: CUDAArchCode = "300"; break; case GK_SM35: CUDAArchCode = "350"; break; case GK_SM37: CUDAArchCode = "370"; break; default: llvm_unreachable("Unhandled target CPU"); } Builder.defineMacro("__CUDA_ARCH__", CUDAArchCode); } } void getTargetBuiltins(const Builtin::Info *&Records, unsigned &NumRecords) const override { Records = BuiltinInfo; NumRecords = clang::NVPTX::LastTSBuiltin-Builtin::FirstTSBuiltin; } bool hasFeature(StringRef Feature) const override { return Feature == "ptx" || Feature == "nvptx"; } void getGCCRegNames(const char * const *&Names, unsigned &NumNames) const override; void getGCCRegAliases(const GCCRegAlias *&Aliases, unsigned &NumAliases) const override { // No aliases. Aliases = nullptr; NumAliases = 0; } bool validateAsmConstraint(const char *&Name, TargetInfo::ConstraintInfo &Info) const override { switch (*Name) { default: return false; case 'c': case 'h': case 'r': case 'l': case 'f': case 'd': Info.setAllowsRegister(); return true; } } const char *getClobbers() const override { // FIXME: Is this really right? return ""; } BuiltinVaListKind getBuiltinVaListKind() const override { // FIXME: implement return TargetInfo::CharPtrBuiltinVaList; } bool setCPU(const std::string &Name) override { GPU = llvm::StringSwitch(Name) .Case("sm_20", GK_SM20) .Case("sm_21", GK_SM21) .Case("sm_30", GK_SM30) .Case("sm_35", GK_SM35) .Case("sm_37", GK_SM37) .Default(GK_NONE); return GPU != GK_NONE; } }; const Builtin::Info NVPTXTargetInfo::BuiltinInfo[] = { #define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES }, #define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\ ALL_LANGUAGES }, #include "clang/Basic/BuiltinsNVPTX.def" }; const char * const NVPTXTargetInfo::GCCRegNames[] = { "r0" }; void NVPTXTargetInfo::getGCCRegNames(const char * const *&Names, unsigned &NumNames) const { Names = GCCRegNames; NumNames = llvm::array_lengthof(GCCRegNames); } class NVPTX32TargetInfo : public NVPTXTargetInfo { public: NVPTX32TargetInfo(const llvm::Triple &Triple) : NVPTXTargetInfo(Triple) { PointerWidth = PointerAlign = 32; SizeType = TargetInfo::UnsignedInt; PtrDiffType = TargetInfo::SignedInt; IntPtrType = TargetInfo::SignedInt; DescriptionString = "e-p:32:32-i64:64-v16:16-v32:32-n16:32:64"; } }; class NVPTX64TargetInfo : public NVPTXTargetInfo { public: NVPTX64TargetInfo(const llvm::Triple &Triple) : NVPTXTargetInfo(Triple) { PointerWidth = PointerAlign = 64; SizeType = TargetInfo::UnsignedLong; PtrDiffType = TargetInfo::SignedLong; IntPtrType = TargetInfo::SignedLong; DescriptionString = "e-i64:64-v16:16-v32:32-n16:32:64"; } }; static const unsigned AMDGPUAddrSpaceMap[] = { 1, // opencl_global 3, // opencl_local 2, // opencl_constant 4, // opencl_generic 1, // cuda_device 2, // cuda_constant 3 // cuda_shared }; // If you edit the description strings, make sure you update // getPointerWidthV(). static const char *DescriptionStringR600 = "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128" "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"; static const char *DescriptionStringR600DoubleOps = "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128" "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"; static const char *DescriptionStringSI = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64" "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128" "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"; class AMDGPUTargetInfo : public TargetInfo { static const Builtin::Info BuiltinInfo[]; static const char * const GCCRegNames[]; /// \brief The GPU profiles supported by the AMDGPU target. enum GPUKind { GK_NONE, GK_R600, GK_R600_DOUBLE_OPS, GK_R700, GK_R700_DOUBLE_OPS, GK_EVERGREEN, GK_EVERGREEN_DOUBLE_OPS, GK_NORTHERN_ISLANDS, GK_CAYMAN, GK_SOUTHERN_ISLANDS, GK_SEA_ISLANDS, GK_VOLCANIC_ISLANDS } GPU; bool hasFP64:1; bool hasFMAF:1; bool hasLDEXPF:1; public: AMDGPUTargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) { if (Triple.getArch() == llvm::Triple::amdgcn) { DescriptionString = DescriptionStringSI; GPU = GK_SOUTHERN_ISLANDS; hasFP64 = true; hasFMAF = true; hasLDEXPF = true; } else { DescriptionString = DescriptionStringR600; GPU = GK_R600; hasFP64 = false; hasFMAF = false; hasLDEXPF = false; } AddrSpaceMap = &AMDGPUAddrSpaceMap; UseAddrSpaceMapMangling = true; } uint64_t getPointerWidthV(unsigned AddrSpace) const override { if (GPU <= GK_CAYMAN) return 32; switch(AddrSpace) { default: return 64; case 0: case 3: case 5: return 32; } } const char * getClobbers() const override { return ""; } void getGCCRegNames(const char * const *&Names, unsigned &NumNames) const override; void getGCCRegAliases(const GCCRegAlias *&Aliases, unsigned &NumAliases) const override { Aliases = nullptr; NumAliases = 0; } bool validateAsmConstraint(const char *&Name, TargetInfo::ConstraintInfo &info) const override { return true; } void getTargetBuiltins(const Builtin::Info *&Records, unsigned &NumRecords) const override { Records = BuiltinInfo; NumRecords = clang::AMDGPU::LastTSBuiltin - Builtin::FirstTSBuiltin; } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { Builder.defineMacro("__R600__"); if (hasFMAF) Builder.defineMacro("__HAS_FMAF__"); if (hasLDEXPF) Builder.defineMacro("__HAS_LDEXPF__"); if (hasFP64 && Opts.OpenCL) { Builder.defineMacro("cl_khr_fp64"); } } BuiltinVaListKind getBuiltinVaListKind() const override { return TargetInfo::CharPtrBuiltinVaList; } bool setCPU(const std::string &Name) override { GPU = llvm::StringSwitch(Name) .Case("r600" , GK_R600) .Case("rv610", GK_R600) .Case("rv620", GK_R600) .Case("rv630", GK_R600) .Case("rv635", GK_R600) .Case("rs780", GK_R600) .Case("rs880", GK_R600) .Case("rv670", GK_R600_DOUBLE_OPS) .Case("rv710", GK_R700) .Case("rv730", GK_R700) .Case("rv740", GK_R700_DOUBLE_OPS) .Case("rv770", GK_R700_DOUBLE_OPS) .Case("palm", GK_EVERGREEN) .Case("cedar", GK_EVERGREEN) .Case("sumo", GK_EVERGREEN) .Case("sumo2", GK_EVERGREEN) .Case("redwood", GK_EVERGREEN) .Case("juniper", GK_EVERGREEN) .Case("hemlock", GK_EVERGREEN_DOUBLE_OPS) .Case("cypress", GK_EVERGREEN_DOUBLE_OPS) .Case("barts", GK_NORTHERN_ISLANDS) .Case("turks", GK_NORTHERN_ISLANDS) .Case("caicos", GK_NORTHERN_ISLANDS) .Case("cayman", GK_CAYMAN) .Case("aruba", GK_CAYMAN) .Case("tahiti", GK_SOUTHERN_ISLANDS) .Case("pitcairn", GK_SOUTHERN_ISLANDS) .Case("verde", GK_SOUTHERN_ISLANDS) .Case("oland", GK_SOUTHERN_ISLANDS) .Case("hainan", GK_SOUTHERN_ISLANDS) .Case("bonaire", GK_SEA_ISLANDS) .Case("kabini", GK_SEA_ISLANDS) .Case("kaveri", GK_SEA_ISLANDS) .Case("hawaii", GK_SEA_ISLANDS) .Case("mullins", GK_SEA_ISLANDS) .Case("tonga", GK_VOLCANIC_ISLANDS) .Case("iceland", GK_VOLCANIC_ISLANDS) .Case("carrizo", GK_VOLCANIC_ISLANDS) .Default(GK_NONE); if (GPU == GK_NONE) { return false; } // Set the correct data layout switch (GPU) { case GK_NONE: case GK_R600: case GK_R700: case GK_EVERGREEN: case GK_NORTHERN_ISLANDS: DescriptionString = DescriptionStringR600; hasFP64 = false; hasFMAF = false; hasLDEXPF = false; break; case GK_R600_DOUBLE_OPS: case GK_R700_DOUBLE_OPS: case GK_EVERGREEN_DOUBLE_OPS: case GK_CAYMAN: DescriptionString = DescriptionStringR600DoubleOps; hasFP64 = true; hasFMAF = true; hasLDEXPF = false; break; case GK_SOUTHERN_ISLANDS: case GK_SEA_ISLANDS: case GK_VOLCANIC_ISLANDS: DescriptionString = DescriptionStringSI; hasFP64 = true; hasFMAF = true; hasLDEXPF = true; break; } return true; } }; const Builtin::Info AMDGPUTargetInfo::BuiltinInfo[] = { #define BUILTIN(ID, TYPE, ATTRS) \ { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES }, #include "clang/Basic/BuiltinsAMDGPU.def" }; const char * const AMDGPUTargetInfo::GCCRegNames[] = { "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "v32", "v33", "v34", "v35", "v36", "v37", "v38", "v39", "v40", "v41", "v42", "v43", "v44", "v45", "v46", "v47", "v48", "v49", "v50", "v51", "v52", "v53", "v54", "v55", "v56", "v57", "v58", "v59", "v60", "v61", "v62", "v63", "v64", "v65", "v66", "v67", "v68", "v69", "v70", "v71", "v72", "v73", "v74", "v75", "v76", "v77", "v78", "v79", "v80", "v81", "v82", "v83", "v84", "v85", "v86", "v87", "v88", "v89", "v90", "v91", "v92", "v93", "v94", "v95", "v96", "v97", "v98", "v99", "v100", "v101", "v102", "v103", "v104", "v105", "v106", "v107", "v108", "v109", "v110", "v111", "v112", "v113", "v114", "v115", "v116", "v117", "v118", "v119", "v120", "v121", "v122", "v123", "v124", "v125", "v126", "v127", "v128", "v129", "v130", "v131", "v132", "v133", "v134", "v135", "v136", "v137", "v138", "v139", "v140", "v141", "v142", "v143", "v144", "v145", "v146", "v147", "v148", "v149", "v150", "v151", "v152", "v153", "v154", "v155", "v156", "v157", "v158", "v159", "v160", "v161", "v162", "v163", "v164", "v165", "v166", "v167", "v168", "v169", "v170", "v171", "v172", "v173", "v174", "v175", "v176", "v177", "v178", "v179", "v180", "v181", "v182", "v183", "v184", "v185", "v186", "v187", "v188", "v189", "v190", "v191", "v192", "v193", "v194", "v195", "v196", "v197", "v198", "v199", "v200", "v201", "v202", "v203", "v204", "v205", "v206", "v207", "v208", "v209", "v210", "v211", "v212", "v213", "v214", "v215", "v216", "v217", "v218", "v219", "v220", "v221", "v222", "v223", "v224", "v225", "v226", "v227", "v228", "v229", "v230", "v231", "v232", "v233", "v234", "v235", "v236", "v237", "v238", "v239", "v240", "v241", "v242", "v243", "v244", "v245", "v246", "v247", "v248", "v249", "v250", "v251", "v252", "v253", "v254", "v255", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31", "s32", "s33", "s34", "s35", "s36", "s37", "s38", "s39", "s40", "s41", "s42", "s43", "s44", "s45", "s46", "s47", "s48", "s49", "s50", "s51", "s52", "s53", "s54", "s55", "s56", "s57", "s58", "s59", "s60", "s61", "s62", "s63", "s64", "s65", "s66", "s67", "s68", "s69", "s70", "s71", "s72", "s73", "s74", "s75", "s76", "s77", "s78", "s79", "s80", "s81", "s82", "s83", "s84", "s85", "s86", "s87", "s88", "s89", "s90", "s91", "s92", "s93", "s94", "s95", "s96", "s97", "s98", "s99", "s100", "s101", "s102", "s103", "s104", "s105", "s106", "s107", "s108", "s109", "s110", "s111", "s112", "s113", "s114", "s115", "s116", "s117", "s118", "s119", "s120", "s121", "s122", "s123", "s124", "s125", "s126", "s127" "exec", "vcc", "scc", "m0", "flat_scr", "exec_lo", "exec_hi", "vcc_lo", "vcc_hi", "flat_scr_lo", "flat_scr_hi" }; void AMDGPUTargetInfo::getGCCRegNames(const char * const *&Names, unsigned &NumNames) const { Names = GCCRegNames; NumNames = llvm::array_lengthof(GCCRegNames); } // Namespace for x86 abstract base class const Builtin::Info BuiltinInfo[] = { #define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES }, #define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\ ALL_LANGUAGES }, #include "clang/Basic/BuiltinsX86.def" }; static const char* const GCCRegNames[] = { "ax", "dx", "cx", "bx", "si", "di", "bp", "sp", "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)", "argp", "flags", "fpcr", "fpsr", "dirflag", "frame", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15", "ymm0", "ymm1", "ymm2", "ymm3", "ymm4", "ymm5", "ymm6", "ymm7", "ymm8", "ymm9", "ymm10", "ymm11", "ymm12", "ymm13", "ymm14", "ymm15", }; const TargetInfo::AddlRegName AddlRegNames[] = { { { "al", "ah", "eax", "rax" }, 0 }, { { "bl", "bh", "ebx", "rbx" }, 3 }, { { "cl", "ch", "ecx", "rcx" }, 2 }, { { "dl", "dh", "edx", "rdx" }, 1 }, { { "esi", "rsi" }, 4 }, { { "edi", "rdi" }, 5 }, { { "esp", "rsp" }, 7 }, { { "ebp", "rbp" }, 6 }, }; // X86 target abstract base class; x86-32 and x86-64 are very close, so // most of the implementation can be shared. class X86TargetInfo : public TargetInfo { enum X86SSEEnum { NoSSE, SSE1, SSE2, SSE3, SSSE3, SSE41, SSE42, AVX, AVX2, AVX512F } SSELevel; enum MMX3DNowEnum { NoMMX3DNow, MMX, AMD3DNow, AMD3DNowAthlon } MMX3DNowLevel; enum XOPEnum { NoXOP, SSE4A, FMA4, XOP } XOPLevel; bool HasAES; bool HasPCLMUL; bool HasLZCNT; bool HasRDRND; bool HasFSGSBASE; bool HasBMI; bool HasBMI2; bool HasPOPCNT; bool HasRTM; bool HasPRFCHW; bool HasRDSEED; bool HasADX; bool HasTBM; bool HasFMA; bool HasF16C; bool HasAVX512CD, HasAVX512ER, HasAVX512PF, HasAVX512DQ, HasAVX512BW, HasAVX512VL; bool HasSHA; bool HasCX16; /// \brief Enumeration of all of the X86 CPUs supported by Clang. /// /// Each enumeration represents a particular CPU supported by Clang. These /// loosely correspond to the options passed to '-march' or '-mtune' flags. enum CPUKind { CK_Generic, /// \name i386 /// i386-generation processors. //@{ CK_i386, //@} /// \name i486 /// i486-generation processors. //@{ CK_i486, CK_WinChipC6, CK_WinChip2, CK_C3, //@} /// \name i586 /// i586-generation processors, P5 microarchitecture based. //@{ CK_i586, CK_Pentium, CK_PentiumMMX, //@} /// \name i686 /// i686-generation processors, P6 / Pentium M microarchitecture based. //@{ CK_i686, CK_PentiumPro, CK_Pentium2, CK_Pentium3, CK_Pentium3M, CK_PentiumM, CK_C3_2, /// This enumerator is a bit odd, as GCC no longer accepts -march=yonah. /// Clang however has some logic to suport this. // FIXME: Warn, deprecate, and potentially remove this. CK_Yonah, //@} /// \name Netburst /// Netburst microarchitecture based processors. //@{ CK_Pentium4, CK_Pentium4M, CK_Prescott, CK_Nocona, //@} /// \name Core /// Core microarchitecture based processors. //@{ CK_Core2, /// This enumerator, like \see CK_Yonah, is a bit odd. It is another /// codename which GCC no longer accepts as an option to -march, but Clang /// has some logic for recognizing it. // FIXME: Warn, deprecate, and potentially remove this. CK_Penryn, //@} /// \name Atom /// Atom processors //@{ CK_Bonnell, CK_Silvermont, //@} /// \name Nehalem /// Nehalem microarchitecture based processors. CK_Nehalem, /// \name Westmere /// Westmere microarchitecture based processors. CK_Westmere, /// \name Sandy Bridge /// Sandy Bridge microarchitecture based processors. CK_SandyBridge, /// \name Ivy Bridge /// Ivy Bridge microarchitecture based processors. CK_IvyBridge, /// \name Haswell /// Haswell microarchitecture based processors. CK_Haswell, /// \name Broadwell /// Broadwell microarchitecture based processors. CK_Broadwell, /// \name Skylake /// Skylake microarchitecture based processors. CK_Skylake, /// \name Knights Landing /// Knights Landing processor. CK_KNL, /// \name K6 /// K6 architecture processors. //@{ CK_K6, CK_K6_2, CK_K6_3, //@} /// \name K7 /// K7 architecture processors. //@{ CK_Athlon, CK_AthlonThunderbird, CK_Athlon4, CK_AthlonXP, CK_AthlonMP, //@} /// \name K8 /// K8 architecture processors. //@{ CK_Athlon64, CK_Athlon64SSE3, CK_AthlonFX, CK_K8, CK_K8SSE3, CK_Opteron, CK_OpteronSSE3, CK_AMDFAM10, //@} /// \name Bobcat /// Bobcat architecture processors. //@{ CK_BTVER1, CK_BTVER2, //@} /// \name Bulldozer /// Bulldozer architecture processors. //@{ CK_BDVER1, CK_BDVER2, CK_BDVER3, CK_BDVER4, //@} /// This specification is deprecated and will be removed in the future. /// Users should prefer \see CK_K8. // FIXME: Warn on this when the CPU is set to it. //@{ CK_x86_64, //@} /// \name Geode /// Geode processors. //@{ CK_Geode //@} } CPU; enum FPMathKind { FP_Default, FP_SSE, FP_387 } FPMath; public: X86TargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple), SSELevel(NoSSE), MMX3DNowLevel(NoMMX3DNow), XOPLevel(NoXOP), HasAES(false), HasPCLMUL(false), HasLZCNT(false), HasRDRND(false), HasFSGSBASE(false), HasBMI(false), HasBMI2(false), HasPOPCNT(false), HasRTM(false), HasPRFCHW(false), HasRDSEED(false), HasADX(false), HasTBM(false), HasFMA(false), HasF16C(false), HasAVX512CD(false), HasAVX512ER(false), HasAVX512PF(false), HasAVX512DQ(false), HasAVX512BW(false), HasAVX512VL(false), HasSHA(false), HasCX16(false), CPU(CK_Generic), FPMath(FP_Default) { BigEndian = false; LongDoubleFormat = &llvm::APFloat::x87DoubleExtended; } unsigned getFloatEvalMethod() const override { // X87 evaluates with 80 bits "long double" precision. return SSELevel == NoSSE ? 2 : 0; } void getTargetBuiltins(const Builtin::Info *&Records, unsigned &NumRecords) const override { Records = BuiltinInfo; NumRecords = clang::X86::LastTSBuiltin-Builtin::FirstTSBuiltin; } void getGCCRegNames(const char * const *&Names, unsigned &NumNames) const override { Names = GCCRegNames; NumNames = llvm::array_lengthof(GCCRegNames); } void getGCCRegAliases(const GCCRegAlias *&Aliases, unsigned &NumAliases) const override { Aliases = nullptr; NumAliases = 0; } void getGCCAddlRegNames(const AddlRegName *&Names, unsigned &NumNames) const override { Names = AddlRegNames; NumNames = llvm::array_lengthof(AddlRegNames); } bool validateCpuSupports(StringRef Name) const override; bool validateAsmConstraint(const char *&Name, TargetInfo::ConstraintInfo &info) const override; bool validateOutputSize(StringRef Constraint, unsigned Size) const override; bool validateInputSize(StringRef Constraint, unsigned Size) const override; virtual bool validateOperandSize(StringRef Constraint, unsigned Size) const; std::string convertConstraint(const char *&Constraint) const override; const char *getClobbers() const override { return "~{dirflag},~{fpsr},~{flags}"; } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override; static void setSSELevel(llvm::StringMap &Features, X86SSEEnum Level, bool Enabled); static void setMMXLevel(llvm::StringMap &Features, MMX3DNowEnum Level, bool Enabled); static void setXOPLevel(llvm::StringMap &Features, XOPEnum Level, bool Enabled); void setFeatureEnabled(llvm::StringMap &Features, StringRef Name, bool Enabled) const override { setFeatureEnabledImpl(Features, Name, Enabled); } // This exists purely to cut down on the number of virtual calls in // getDefaultFeatures which calls this repeatedly. static void setFeatureEnabledImpl(llvm::StringMap &Features, StringRef Name, bool Enabled); void getDefaultFeatures(llvm::StringMap &Features) const override; bool hasFeature(StringRef Feature) const override; bool handleTargetFeatures(std::vector &Features, DiagnosticsEngine &Diags) override; StringRef getABI() const override { if (getTriple().getArch() == llvm::Triple::x86_64 && SSELevel >= AVX512F) return "avx512"; else if (getTriple().getArch() == llvm::Triple::x86_64 && SSELevel >= AVX) return "avx"; else if (getTriple().getArch() == llvm::Triple::x86 && MMX3DNowLevel == NoMMX3DNow) return "no-mmx"; return ""; } bool setCPU(const std::string &Name) override { CPU = llvm::StringSwitch(Name) .Case("i386", CK_i386) .Case("i486", CK_i486) .Case("winchip-c6", CK_WinChipC6) .Case("winchip2", CK_WinChip2) .Case("c3", CK_C3) .Case("i586", CK_i586) .Case("pentium", CK_Pentium) .Case("pentium-mmx", CK_PentiumMMX) .Case("i686", CK_i686) .Case("pentiumpro", CK_PentiumPro) .Case("pentium2", CK_Pentium2) .Case("pentium3", CK_Pentium3) .Case("pentium3m", CK_Pentium3M) .Case("pentium-m", CK_PentiumM) .Case("c3-2", CK_C3_2) .Case("yonah", CK_Yonah) .Case("pentium4", CK_Pentium4) .Case("pentium4m", CK_Pentium4M) .Case("prescott", CK_Prescott) .Case("nocona", CK_Nocona) .Case("core2", CK_Core2) .Case("penryn", CK_Penryn) .Case("bonnell", CK_Bonnell) .Case("atom", CK_Bonnell) // Legacy name. .Case("silvermont", CK_Silvermont) .Case("slm", CK_Silvermont) // Legacy name. .Case("nehalem", CK_Nehalem) .Case("corei7", CK_Nehalem) // Legacy name. .Case("westmere", CK_Westmere) .Case("sandybridge", CK_SandyBridge) .Case("corei7-avx", CK_SandyBridge) // Legacy name. .Case("ivybridge", CK_IvyBridge) .Case("core-avx-i", CK_IvyBridge) // Legacy name. .Case("haswell", CK_Haswell) .Case("core-avx2", CK_Haswell) // Legacy name. .Case("broadwell", CK_Broadwell) .Case("skylake", CK_Skylake) .Case("skx", CK_Skylake) // Legacy name. .Case("knl", CK_KNL) .Case("k6", CK_K6) .Case("k6-2", CK_K6_2) .Case("k6-3", CK_K6_3) .Case("athlon", CK_Athlon) .Case("athlon-tbird", CK_AthlonThunderbird) .Case("athlon-4", CK_Athlon4) .Case("athlon-xp", CK_AthlonXP) .Case("athlon-mp", CK_AthlonMP) .Case("athlon64", CK_Athlon64) .Case("athlon64-sse3", CK_Athlon64SSE3) .Case("athlon-fx", CK_AthlonFX) .Case("k8", CK_K8) .Case("k8-sse3", CK_K8SSE3) .Case("opteron", CK_Opteron) .Case("opteron-sse3", CK_OpteronSSE3) .Case("barcelona", CK_AMDFAM10) .Case("amdfam10", CK_AMDFAM10) .Case("btver1", CK_BTVER1) .Case("btver2", CK_BTVER2) .Case("bdver1", CK_BDVER1) .Case("bdver2", CK_BDVER2) .Case("bdver3", CK_BDVER3) .Case("bdver4", CK_BDVER4) .Case("x86-64", CK_x86_64) .Case("geode", CK_Geode) .Default(CK_Generic); // Perform any per-CPU checks necessary to determine if this CPU is // acceptable. // FIXME: This results in terrible diagnostics. Clang just says the CPU is // invalid without explaining *why*. switch (CPU) { case CK_Generic: // No processor selected! return false; case CK_i386: case CK_i486: case CK_WinChipC6: case CK_WinChip2: case CK_C3: case CK_i586: case CK_Pentium: case CK_PentiumMMX: case CK_i686: case CK_PentiumPro: case CK_Pentium2: case CK_Pentium3: case CK_Pentium3M: case CK_PentiumM: case CK_Yonah: case CK_C3_2: case CK_Pentium4: case CK_Pentium4M: case CK_Prescott: case CK_K6: case CK_K6_2: case CK_K6_3: case CK_Athlon: case CK_AthlonThunderbird: case CK_Athlon4: case CK_AthlonXP: case CK_AthlonMP: case CK_Geode: // Only accept certain architectures when compiling in 32-bit mode. if (getTriple().getArch() != llvm::Triple::x86) return false; // Fallthrough case CK_Nocona: case CK_Core2: case CK_Penryn: case CK_Bonnell: case CK_Silvermont: case CK_Nehalem: case CK_Westmere: case CK_SandyBridge: case CK_IvyBridge: case CK_Haswell: case CK_Broadwell: case CK_Skylake: case CK_KNL: case CK_Athlon64: case CK_Athlon64SSE3: case CK_AthlonFX: case CK_K8: case CK_K8SSE3: case CK_Opteron: case CK_OpteronSSE3: case CK_AMDFAM10: case CK_BTVER1: case CK_BTVER2: case CK_BDVER1: case CK_BDVER2: case CK_BDVER3: case CK_BDVER4: case CK_x86_64: return true; } llvm_unreachable("Unhandled CPU kind"); } bool setFPMath(StringRef Name) override; CallingConvCheckResult checkCallingConvention(CallingConv CC) const override { // We accept all non-ARM calling conventions return (CC == CC_X86ThisCall || CC == CC_X86FastCall || CC == CC_X86StdCall || CC == CC_X86VectorCall || CC == CC_C || CC == CC_X86Pascal || CC == CC_IntelOclBicc) ? CCCR_OK : CCCR_Warning; } CallingConv getDefaultCallingConv(CallingConvMethodType MT) const override { return MT == CCMT_Member ? CC_X86ThisCall : CC_C; } bool hasSjLjLowering() const override { return true; } }; bool X86TargetInfo::setFPMath(StringRef Name) { if (Name == "387") { FPMath = FP_387; return true; } if (Name == "sse") { FPMath = FP_SSE; return true; } return false; } void X86TargetInfo::getDefaultFeatures(llvm::StringMap &Features) const { // FIXME: This *really* should not be here. // X86_64 always has SSE2. if (getTriple().getArch() == llvm::Triple::x86_64) setFeatureEnabledImpl(Features, "sse2", true); switch (CPU) { case CK_Generic: case CK_i386: case CK_i486: case CK_i586: case CK_Pentium: case CK_i686: case CK_PentiumPro: break; case CK_PentiumMMX: case CK_Pentium2: case CK_K6: case CK_WinChipC6: setFeatureEnabledImpl(Features, "mmx", true); break; case CK_Pentium3: case CK_Pentium3M: case CK_C3_2: setFeatureEnabledImpl(Features, "sse", true); break; case CK_PentiumM: case CK_Pentium4: case CK_Pentium4M: case CK_x86_64: setFeatureEnabledImpl(Features, "sse2", true); break; case CK_Yonah: case CK_Prescott: case CK_Nocona: setFeatureEnabledImpl(Features, "sse3", true); setFeatureEnabledImpl(Features, "cx16", true); break; case CK_Core2: case CK_Bonnell: setFeatureEnabledImpl(Features, "ssse3", true); setFeatureEnabledImpl(Features, "cx16", true); break; case CK_Penryn: setFeatureEnabledImpl(Features, "sse4.1", true); setFeatureEnabledImpl(Features, "cx16", true); break; case CK_Skylake: setFeatureEnabledImpl(Features, "avx512f", true); setFeatureEnabledImpl(Features, "avx512cd", true); setFeatureEnabledImpl(Features, "avx512dq", true); setFeatureEnabledImpl(Features, "avx512bw", true); setFeatureEnabledImpl(Features, "avx512vl", true); // FALLTHROUGH case CK_Broadwell: setFeatureEnabledImpl(Features, "rdseed", true); setFeatureEnabledImpl(Features, "adx", true); // FALLTHROUGH case CK_Haswell: setFeatureEnabledImpl(Features, "avx2", true); setFeatureEnabledImpl(Features, "lzcnt", true); setFeatureEnabledImpl(Features, "bmi", true); setFeatureEnabledImpl(Features, "bmi2", true); setFeatureEnabledImpl(Features, "rtm", true); setFeatureEnabledImpl(Features, "fma", true); // FALLTHROUGH case CK_IvyBridge: setFeatureEnabledImpl(Features, "rdrnd", true); setFeatureEnabledImpl(Features, "f16c", true); setFeatureEnabledImpl(Features, "fsgsbase", true); // FALLTHROUGH case CK_SandyBridge: setFeatureEnabledImpl(Features, "avx", true); // FALLTHROUGH case CK_Westmere: case CK_Silvermont: setFeatureEnabledImpl(Features, "aes", true); setFeatureEnabledImpl(Features, "pclmul", true); // FALLTHROUGH case CK_Nehalem: setFeatureEnabledImpl(Features, "sse4.2", true); setFeatureEnabledImpl(Features, "cx16", true); break; case CK_KNL: setFeatureEnabledImpl(Features, "avx512f", true); setFeatureEnabledImpl(Features, "avx512cd", true); setFeatureEnabledImpl(Features, "avx512er", true); setFeatureEnabledImpl(Features, "avx512pf", true); setFeatureEnabledImpl(Features, "rdseed", true); setFeatureEnabledImpl(Features, "adx", true); setFeatureEnabledImpl(Features, "lzcnt", true); setFeatureEnabledImpl(Features, "bmi", true); setFeatureEnabledImpl(Features, "bmi2", true); setFeatureEnabledImpl(Features, "rtm", true); setFeatureEnabledImpl(Features, "fma", true); setFeatureEnabledImpl(Features, "rdrnd", true); setFeatureEnabledImpl(Features, "f16c", true); setFeatureEnabledImpl(Features, "fsgsbase", true); setFeatureEnabledImpl(Features, "aes", true); setFeatureEnabledImpl(Features, "pclmul", true); setFeatureEnabledImpl(Features, "cx16", true); break; case CK_K6_2: case CK_K6_3: case CK_WinChip2: case CK_C3: setFeatureEnabledImpl(Features, "3dnow", true); break; case CK_Athlon: case CK_AthlonThunderbird: case CK_Geode: setFeatureEnabledImpl(Features, "3dnowa", true); break; case CK_Athlon4: case CK_AthlonXP: case CK_AthlonMP: setFeatureEnabledImpl(Features, "sse", true); setFeatureEnabledImpl(Features, "3dnowa", true); break; case CK_K8: case CK_Opteron: case CK_Athlon64: case CK_AthlonFX: setFeatureEnabledImpl(Features, "sse2", true); setFeatureEnabledImpl(Features, "3dnowa", true); break; case CK_AMDFAM10: setFeatureEnabledImpl(Features, "sse4a", true); setFeatureEnabledImpl(Features, "lzcnt", true); setFeatureEnabledImpl(Features, "popcnt", true); // FALLTHROUGH case CK_K8SSE3: case CK_OpteronSSE3: case CK_Athlon64SSE3: setFeatureEnabledImpl(Features, "sse3", true); setFeatureEnabledImpl(Features, "3dnowa", true); break; case CK_BTVER2: setFeatureEnabledImpl(Features, "avx", true); setFeatureEnabledImpl(Features, "aes", true); setFeatureEnabledImpl(Features, "pclmul", true); setFeatureEnabledImpl(Features, "bmi", true); setFeatureEnabledImpl(Features, "f16c", true); // FALLTHROUGH case CK_BTVER1: setFeatureEnabledImpl(Features, "ssse3", true); setFeatureEnabledImpl(Features, "sse4a", true); setFeatureEnabledImpl(Features, "lzcnt", true); setFeatureEnabledImpl(Features, "popcnt", true); setFeatureEnabledImpl(Features, "prfchw", true); setFeatureEnabledImpl(Features, "cx16", true); break; case CK_BDVER4: setFeatureEnabledImpl(Features, "avx2", true); setFeatureEnabledImpl(Features, "bmi2", true); // FALLTHROUGH case CK_BDVER3: setFeatureEnabledImpl(Features, "fsgsbase", true); // FALLTHROUGH case CK_BDVER2: setFeatureEnabledImpl(Features, "bmi", true); setFeatureEnabledImpl(Features, "fma", true); setFeatureEnabledImpl(Features, "f16c", true); setFeatureEnabledImpl(Features, "tbm", true); // FALLTHROUGH case CK_BDVER1: // xop implies avx, sse4a and fma4. setFeatureEnabledImpl(Features, "xop", true); setFeatureEnabledImpl(Features, "lzcnt", true); setFeatureEnabledImpl(Features, "aes", true); setFeatureEnabledImpl(Features, "pclmul", true); setFeatureEnabledImpl(Features, "prfchw", true); setFeatureEnabledImpl(Features, "cx16", true); break; } } void X86TargetInfo::setSSELevel(llvm::StringMap &Features, X86SSEEnum Level, bool Enabled) { if (Enabled) { switch (Level) { case AVX512F: Features["avx512f"] = true; case AVX2: Features["avx2"] = true; case AVX: Features["avx"] = true; case SSE42: Features["sse4.2"] = true; case SSE41: Features["sse4.1"] = true; case SSSE3: Features["ssse3"] = true; case SSE3: Features["sse3"] = true; case SSE2: Features["sse2"] = true; case SSE1: Features["sse"] = true; case NoSSE: break; } return; } switch (Level) { case NoSSE: case SSE1: Features["sse"] = false; case SSE2: Features["sse2"] = Features["pclmul"] = Features["aes"] = Features["sha"] = false; case SSE3: Features["sse3"] = false; setXOPLevel(Features, NoXOP, false); case SSSE3: Features["ssse3"] = false; case SSE41: Features["sse4.1"] = false; case SSE42: Features["sse4.2"] = false; case AVX: Features["fma"] = Features["avx"] = Features["f16c"] = false; setXOPLevel(Features, FMA4, false); case AVX2: Features["avx2"] = false; case AVX512F: Features["avx512f"] = Features["avx512cd"] = Features["avx512er"] = Features["avx512pf"] = Features["avx512dq"] = Features["avx512bw"] = Features["avx512vl"] = false; } } void X86TargetInfo::setMMXLevel(llvm::StringMap &Features, MMX3DNowEnum Level, bool Enabled) { if (Enabled) { switch (Level) { case AMD3DNowAthlon: Features["3dnowa"] = true; case AMD3DNow: Features["3dnow"] = true; case MMX: Features["mmx"] = true; case NoMMX3DNow: break; } return; } switch (Level) { case NoMMX3DNow: case MMX: Features["mmx"] = false; case AMD3DNow: Features["3dnow"] = false; case AMD3DNowAthlon: Features["3dnowa"] = false; } } void X86TargetInfo::setXOPLevel(llvm::StringMap &Features, XOPEnum Level, bool Enabled) { if (Enabled) { switch (Level) { case XOP: Features["xop"] = true; case FMA4: Features["fma4"] = true; setSSELevel(Features, AVX, true); case SSE4A: Features["sse4a"] = true; setSSELevel(Features, SSE3, true); case NoXOP: break; } return; } switch (Level) { case NoXOP: case SSE4A: Features["sse4a"] = false; case FMA4: Features["fma4"] = false; case XOP: Features["xop"] = false; } } void X86TargetInfo::setFeatureEnabledImpl(llvm::StringMap &Features, StringRef Name, bool Enabled) { // This is a bit of a hack to deal with the sse4 target feature when used // as part of the target attribute. We handle sse4 correctly everywhere // else. See below for more information on how we handle the sse4 options. if (Name != "sse4") Features[Name] = Enabled; if (Name == "mmx") { setMMXLevel(Features, MMX, Enabled); } else if (Name == "sse") { setSSELevel(Features, SSE1, Enabled); } else if (Name == "sse2") { setSSELevel(Features, SSE2, Enabled); } else if (Name == "sse3") { setSSELevel(Features, SSE3, Enabled); } else if (Name == "ssse3") { setSSELevel(Features, SSSE3, Enabled); } else if (Name == "sse4.2") { setSSELevel(Features, SSE42, Enabled); } else if (Name == "sse4.1") { setSSELevel(Features, SSE41, Enabled); } else if (Name == "3dnow") { setMMXLevel(Features, AMD3DNow, Enabled); } else if (Name == "3dnowa") { setMMXLevel(Features, AMD3DNowAthlon, Enabled); } else if (Name == "aes") { if (Enabled) setSSELevel(Features, SSE2, Enabled); } else if (Name == "pclmul") { if (Enabled) setSSELevel(Features, SSE2, Enabled); } else if (Name == "avx") { setSSELevel(Features, AVX, Enabled); } else if (Name == "avx2") { setSSELevel(Features, AVX2, Enabled); } else if (Name == "avx512f") { setSSELevel(Features, AVX512F, Enabled); } else if (Name == "avx512cd" || Name == "avx512er" || Name == "avx512pf" || Name == "avx512dq" || Name == "avx512bw" || Name == "avx512vl") { if (Enabled) setSSELevel(Features, AVX512F, Enabled); } else if (Name == "fma") { if (Enabled) setSSELevel(Features, AVX, Enabled); } else if (Name == "fma4") { setXOPLevel(Features, FMA4, Enabled); } else if (Name == "xop") { setXOPLevel(Features, XOP, Enabled); } else if (Name == "sse4a") { setXOPLevel(Features, SSE4A, Enabled); } else if (Name == "f16c") { if (Enabled) setSSELevel(Features, AVX, Enabled); } else if (Name == "sha") { if (Enabled) setSSELevel(Features, SSE2, Enabled); } else if (Name == "sse4") { // We can get here via the __target__ attribute since that's not controlled // via the -msse4/-mno-sse4 command line alias. Handle this the same way // here - turn on the sse4.2 if enabled, turn off the sse4.1 level if // disabled. if (Enabled) setSSELevel(Features, SSE42, Enabled); else setSSELevel(Features, SSE41, Enabled); } } /// handleTargetFeatures - Perform initialization based on the user /// configured set of features. bool X86TargetInfo::handleTargetFeatures(std::vector &Features, DiagnosticsEngine &Diags) { // Remember the maximum enabled sselevel. for (unsigned i = 0, e = Features.size(); i !=e; ++i) { // Ignore disabled features. if (Features[i][0] == '-') continue; StringRef Feature = StringRef(Features[i]).substr(1); if (Feature == "aes") { HasAES = true; continue; } if (Feature == "pclmul") { HasPCLMUL = true; continue; } if (Feature == "lzcnt") { HasLZCNT = true; continue; } if (Feature == "rdrnd") { HasRDRND = true; continue; } if (Feature == "fsgsbase") { HasFSGSBASE = true; continue; } if (Feature == "bmi") { HasBMI = true; continue; } if (Feature == "bmi2") { HasBMI2 = true; continue; } if (Feature == "popcnt") { HasPOPCNT = true; continue; } if (Feature == "rtm") { HasRTM = true; continue; } if (Feature == "prfchw") { HasPRFCHW = true; continue; } if (Feature == "rdseed") { HasRDSEED = true; continue; } if (Feature == "adx") { HasADX = true; continue; } if (Feature == "tbm") { HasTBM = true; continue; } if (Feature == "fma") { HasFMA = true; continue; } if (Feature == "f16c") { HasF16C = true; continue; } if (Feature == "avx512cd") { HasAVX512CD = true; continue; } if (Feature == "avx512er") { HasAVX512ER = true; continue; } if (Feature == "avx512pf") { HasAVX512PF = true; continue; } if (Feature == "avx512dq") { HasAVX512DQ = true; continue; } if (Feature == "avx512bw") { HasAVX512BW = true; continue; } if (Feature == "avx512vl") { HasAVX512VL = true; continue; } if (Feature == "sha") { HasSHA = true; continue; } if (Feature == "cx16") { HasCX16 = true; continue; } assert(Features[i][0] == '+' && "Invalid target feature!"); X86SSEEnum Level = llvm::StringSwitch(Feature) .Case("avx512f", AVX512F) .Case("avx2", AVX2) .Case("avx", AVX) .Case("sse4.2", SSE42) .Case("sse4.1", SSE41) .Case("ssse3", SSSE3) .Case("sse3", SSE3) .Case("sse2", SSE2) .Case("sse", SSE1) .Default(NoSSE); SSELevel = std::max(SSELevel, Level); MMX3DNowEnum ThreeDNowLevel = llvm::StringSwitch(Feature) .Case("3dnowa", AMD3DNowAthlon) .Case("3dnow", AMD3DNow) .Case("mmx", MMX) .Default(NoMMX3DNow); MMX3DNowLevel = std::max(MMX3DNowLevel, ThreeDNowLevel); XOPEnum XLevel = llvm::StringSwitch(Feature) .Case("xop", XOP) .Case("fma4", FMA4) .Case("sse4a", SSE4A) .Default(NoXOP); XOPLevel = std::max(XOPLevel, XLevel); } // Enable popcnt if sse4.2 is enabled and popcnt is not explicitly disabled. // Can't do this earlier because we need to be able to explicitly enable // popcnt and still disable sse4.2. if (!HasPOPCNT && SSELevel >= SSE42 && std::find(Features.begin(), Features.end(), "-popcnt") == Features.end()){ HasPOPCNT = true; Features.push_back("+popcnt"); } // Enable prfchw if 3DNow! is enabled and prfchw is not explicitly disabled. if (!HasPRFCHW && MMX3DNowLevel >= AMD3DNow && std::find(Features.begin(), Features.end(), "-prfchw") == Features.end()){ HasPRFCHW = true; Features.push_back("+prfchw"); } // LLVM doesn't have a separate switch for fpmath, so only accept it if it // matches the selected sse level. if (FPMath == FP_SSE && SSELevel < SSE1) { Diags.Report(diag::err_target_unsupported_fpmath) << "sse"; return false; } else if (FPMath == FP_387 && SSELevel >= SSE1) { Diags.Report(diag::err_target_unsupported_fpmath) << "387"; return false; } // Don't tell the backend if we're turning off mmx; it will end up disabling // SSE, which we don't want. // Additionally, if SSE is enabled and mmx is not explicitly disabled, // then enable MMX. std::vector::iterator it; it = std::find(Features.begin(), Features.end(), "-mmx"); if (it != Features.end()) Features.erase(it); else if (SSELevel > NoSSE) MMX3DNowLevel = std::max(MMX3DNowLevel, MMX); SimdDefaultAlign = (getABI() == "avx512") ? 512 : (getABI() == "avx") ? 256 : 128; return true; } /// X86TargetInfo::getTargetDefines - Return the set of the X86-specific macro /// definitions for this particular subtarget. void X86TargetInfo::getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const { // Target identification. if (getTriple().getArch() == llvm::Triple::x86_64) { Builder.defineMacro("__amd64__"); Builder.defineMacro("__amd64"); Builder.defineMacro("__x86_64"); Builder.defineMacro("__x86_64__"); if (getTriple().getArchName() == "x86_64h") { Builder.defineMacro("__x86_64h"); Builder.defineMacro("__x86_64h__"); } } else { DefineStd(Builder, "i386", Opts); } // Subtarget options. // FIXME: We are hard-coding the tune parameters based on the CPU, but they // truly should be based on -mtune options. switch (CPU) { case CK_Generic: break; case CK_i386: // The rest are coming from the i386 define above. Builder.defineMacro("__tune_i386__"); break; case CK_i486: case CK_WinChipC6: case CK_WinChip2: case CK_C3: defineCPUMacros(Builder, "i486"); break; case CK_PentiumMMX: Builder.defineMacro("__pentium_mmx__"); Builder.defineMacro("__tune_pentium_mmx__"); // Fallthrough case CK_i586: case CK_Pentium: defineCPUMacros(Builder, "i586"); defineCPUMacros(Builder, "pentium"); break; case CK_Pentium3: case CK_Pentium3M: case CK_PentiumM: Builder.defineMacro("__tune_pentium3__"); // Fallthrough case CK_Pentium2: case CK_C3_2: Builder.defineMacro("__tune_pentium2__"); // Fallthrough case CK_PentiumPro: Builder.defineMacro("__tune_i686__"); Builder.defineMacro("__tune_pentiumpro__"); // Fallthrough case CK_i686: Builder.defineMacro("__i686"); Builder.defineMacro("__i686__"); // Strangely, __tune_i686__ isn't defined by GCC when CPU == i686. Builder.defineMacro("__pentiumpro"); Builder.defineMacro("__pentiumpro__"); break; case CK_Pentium4: case CK_Pentium4M: defineCPUMacros(Builder, "pentium4"); break; case CK_Yonah: case CK_Prescott: case CK_Nocona: defineCPUMacros(Builder, "nocona"); break; case CK_Core2: case CK_Penryn: defineCPUMacros(Builder, "core2"); break; case CK_Bonnell: defineCPUMacros(Builder, "atom"); break; case CK_Silvermont: defineCPUMacros(Builder, "slm"); break; case CK_Nehalem: case CK_Westmere: case CK_SandyBridge: case CK_IvyBridge: case CK_Haswell: case CK_Broadwell: // FIXME: Historically, we defined this legacy name, it would be nice to // remove it at some point. We've never exposed fine-grained names for // recent primary x86 CPUs, and we should keep it that way. defineCPUMacros(Builder, "corei7"); break; case CK_Skylake: // FIXME: Historically, we defined this legacy name, it would be nice to // remove it at some point. This is the only fine-grained CPU macro in the // main intel CPU line, and it would be better to not have these and force // people to use ISA macros. defineCPUMacros(Builder, "skx"); break; case CK_KNL: defineCPUMacros(Builder, "knl"); break; case CK_K6_2: Builder.defineMacro("__k6_2__"); Builder.defineMacro("__tune_k6_2__"); // Fallthrough case CK_K6_3: if (CPU != CK_K6_2) { // In case of fallthrough // FIXME: GCC may be enabling these in cases where some other k6 // architecture is specified but -m3dnow is explicitly provided. The // exact semantics need to be determined and emulated here. Builder.defineMacro("__k6_3__"); Builder.defineMacro("__tune_k6_3__"); } // Fallthrough case CK_K6: defineCPUMacros(Builder, "k6"); break; case CK_Athlon: case CK_AthlonThunderbird: case CK_Athlon4: case CK_AthlonXP: case CK_AthlonMP: defineCPUMacros(Builder, "athlon"); if (SSELevel != NoSSE) { Builder.defineMacro("__athlon_sse__"); Builder.defineMacro("__tune_athlon_sse__"); } break; case CK_K8: case CK_K8SSE3: case CK_x86_64: case CK_Opteron: case CK_OpteronSSE3: case CK_Athlon64: case CK_Athlon64SSE3: case CK_AthlonFX: defineCPUMacros(Builder, "k8"); break; case CK_AMDFAM10: defineCPUMacros(Builder, "amdfam10"); break; case CK_BTVER1: defineCPUMacros(Builder, "btver1"); break; case CK_BTVER2: defineCPUMacros(Builder, "btver2"); break; case CK_BDVER1: defineCPUMacros(Builder, "bdver1"); break; case CK_BDVER2: defineCPUMacros(Builder, "bdver2"); break; case CK_BDVER3: defineCPUMacros(Builder, "bdver3"); break; case CK_BDVER4: defineCPUMacros(Builder, "bdver4"); break; case CK_Geode: defineCPUMacros(Builder, "geode"); break; } // Target properties. Builder.defineMacro("__REGISTER_PREFIX__", ""); // Define __NO_MATH_INLINES on linux/x86 so that we don't get inline // functions in glibc header files that use FP Stack inline asm which the // backend can't deal with (PR879). Builder.defineMacro("__NO_MATH_INLINES"); if (HasAES) Builder.defineMacro("__AES__"); if (HasPCLMUL) Builder.defineMacro("__PCLMUL__"); if (HasLZCNT) Builder.defineMacro("__LZCNT__"); if (HasRDRND) Builder.defineMacro("__RDRND__"); if (HasFSGSBASE) Builder.defineMacro("__FSGSBASE__"); if (HasBMI) Builder.defineMacro("__BMI__"); if (HasBMI2) Builder.defineMacro("__BMI2__"); if (HasPOPCNT) Builder.defineMacro("__POPCNT__"); if (HasRTM) Builder.defineMacro("__RTM__"); if (HasPRFCHW) Builder.defineMacro("__PRFCHW__"); if (HasRDSEED) Builder.defineMacro("__RDSEED__"); if (HasADX) Builder.defineMacro("__ADX__"); if (HasTBM) Builder.defineMacro("__TBM__"); switch (XOPLevel) { case XOP: Builder.defineMacro("__XOP__"); case FMA4: Builder.defineMacro("__FMA4__"); case SSE4A: Builder.defineMacro("__SSE4A__"); case NoXOP: break; } if (HasFMA) Builder.defineMacro("__FMA__"); if (HasF16C) Builder.defineMacro("__F16C__"); if (HasAVX512CD) Builder.defineMacro("__AVX512CD__"); if (HasAVX512ER) Builder.defineMacro("__AVX512ER__"); if (HasAVX512PF) Builder.defineMacro("__AVX512PF__"); if (HasAVX512DQ) Builder.defineMacro("__AVX512DQ__"); if (HasAVX512BW) Builder.defineMacro("__AVX512BW__"); if (HasAVX512VL) Builder.defineMacro("__AVX512VL__"); if (HasSHA) Builder.defineMacro("__SHA__"); if (HasCX16) Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16"); // Each case falls through to the previous one here. switch (SSELevel) { case AVX512F: Builder.defineMacro("__AVX512F__"); case AVX2: Builder.defineMacro("__AVX2__"); case AVX: Builder.defineMacro("__AVX__"); case SSE42: Builder.defineMacro("__SSE4_2__"); case SSE41: Builder.defineMacro("__SSE4_1__"); case SSSE3: Builder.defineMacro("__SSSE3__"); case SSE3: Builder.defineMacro("__SSE3__"); case SSE2: Builder.defineMacro("__SSE2__"); Builder.defineMacro("__SSE2_MATH__"); // -mfp-math=sse always implied. case SSE1: Builder.defineMacro("__SSE__"); Builder.defineMacro("__SSE_MATH__"); // -mfp-math=sse always implied. case NoSSE: break; } if (Opts.MicrosoftExt && getTriple().getArch() == llvm::Triple::x86) { switch (SSELevel) { case AVX512F: case AVX2: case AVX: case SSE42: case SSE41: case SSSE3: case SSE3: case SSE2: Builder.defineMacro("_M_IX86_FP", Twine(2)); break; case SSE1: Builder.defineMacro("_M_IX86_FP", Twine(1)); break; default: Builder.defineMacro("_M_IX86_FP", Twine(0)); } } // Each case falls through to the previous one here. switch (MMX3DNowLevel) { case AMD3DNowAthlon: Builder.defineMacro("__3dNOW_A__"); case AMD3DNow: Builder.defineMacro("__3dNOW__"); case MMX: Builder.defineMacro("__MMX__"); case NoMMX3DNow: break; } if (CPU >= CK_i486) { Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1"); Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2"); Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4"); } if (CPU >= CK_i586) Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8"); } bool X86TargetInfo::hasFeature(StringRef Feature) const { return llvm::StringSwitch(Feature) .Case("aes", HasAES) .Case("avx", SSELevel >= AVX) .Case("avx2", SSELevel >= AVX2) .Case("avx512f", SSELevel >= AVX512F) .Case("avx512cd", HasAVX512CD) .Case("avx512er", HasAVX512ER) .Case("avx512pf", HasAVX512PF) .Case("avx512dq", HasAVX512DQ) .Case("avx512bw", HasAVX512BW) .Case("avx512vl", HasAVX512VL) .Case("bmi", HasBMI) .Case("bmi2", HasBMI2) .Case("cx16", HasCX16) .Case("f16c", HasF16C) .Case("fma", HasFMA) .Case("fma4", XOPLevel >= FMA4) .Case("fsgsbase", HasFSGSBASE) .Case("lzcnt", HasLZCNT) .Case("mm3dnow", MMX3DNowLevel >= AMD3DNow) .Case("mm3dnowa", MMX3DNowLevel >= AMD3DNowAthlon) .Case("mmx", MMX3DNowLevel >= MMX) .Case("pclmul", HasPCLMUL) .Case("popcnt", HasPOPCNT) .Case("prfchw", HasPRFCHW) .Case("rdrnd", HasRDRND) .Case("rdseed", HasRDSEED) .Case("rtm", HasRTM) .Case("sha", HasSHA) .Case("sse", SSELevel >= SSE1) .Case("sse2", SSELevel >= SSE2) .Case("sse3", SSELevel >= SSE3) .Case("ssse3", SSELevel >= SSSE3) .Case("sse4.1", SSELevel >= SSE41) .Case("sse4.2", SSELevel >= SSE42) .Case("sse4a", XOPLevel >= SSE4A) .Case("tbm", HasTBM) .Case("x86", true) .Case("x86_32", getTriple().getArch() == llvm::Triple::x86) .Case("x86_64", getTriple().getArch() == llvm::Triple::x86_64) .Case("xop", XOPLevel >= XOP) .Default(false); } // We can't use a generic validation scheme for the features accepted here // versus subtarget features accepted in the target attribute because the // bitfield structure that's initialized in the runtime only supports the // below currently rather than the full range of subtarget features. (See // X86TargetInfo::hasFeature for a somewhat comprehensive list). bool X86TargetInfo::validateCpuSupports(StringRef FeatureStr) const { return llvm::StringSwitch(FeatureStr) .Case("cmov", true) .Case("mmx", true) .Case("popcnt", true) .Case("sse", true) .Case("sse2", true) .Case("sse3", true) .Case("sse4.1", true) .Case("sse4.2", true) .Case("avx", true) .Case("avx2", true) .Case("sse4a", true) .Case("fma4", true) .Case("xop", true) .Case("fma", true) .Case("avx512f", true) .Case("bmi", true) .Case("bmi2", true) .Default(false); } bool X86TargetInfo::validateAsmConstraint(const char *&Name, TargetInfo::ConstraintInfo &Info) const { switch (*Name) { default: return false; case 'I': Info.setRequiresImmediate(0, 31); return true; case 'J': Info.setRequiresImmediate(0, 63); return true; case 'K': Info.setRequiresImmediate(-128, 127); return true; case 'L': // FIXME: properly analyze this constraint: // must be one of 0xff, 0xffff, or 0xffffffff return true; case 'M': Info.setRequiresImmediate(0, 3); return true; case 'N': Info.setRequiresImmediate(0, 255); return true; case 'O': Info.setRequiresImmediate(0, 127); return true; case 'Y': // first letter of a pair: switch (*(Name+1)) { default: return false; case '0': // First SSE register. case 't': // Any SSE register, when SSE2 is enabled. case 'i': // Any SSE register, when SSE2 and inter-unit moves enabled. case 'm': // any MMX register, when inter-unit moves enabled. break; // falls through to setAllowsRegister. } case 'f': // any x87 floating point stack register. // Constraint 'f' cannot be used for output operands. if (Info.ConstraintStr[0] == '=') return false; Info.setAllowsRegister(); return true; case 'a': // eax. case 'b': // ebx. case 'c': // ecx. case 'd': // edx. case 'S': // esi. case 'D': // edi. case 'A': // edx:eax. case 't': // top of floating point stack. case 'u': // second from top of floating point stack. case 'q': // Any register accessible as [r]l: a, b, c, and d. case 'y': // Any MMX register. case 'x': // Any SSE register. case 'Q': // Any register accessible as [r]h: a, b, c, and d. case 'R': // "Legacy" registers: ax, bx, cx, dx, di, si, sp, bp. case 'l': // "Index" registers: any general register that can be used as an // index in a base+index memory access. Info.setAllowsRegister(); return true; case 'C': // SSE floating point constant. case 'G': // x87 floating point constant. case 'e': // 32-bit signed integer constant for use with zero-extending // x86_64 instructions. case 'Z': // 32-bit unsigned integer constant for use with zero-extending // x86_64 instructions. return true; } } bool X86TargetInfo::validateOutputSize(StringRef Constraint, unsigned Size) const { // Strip off constraint modifiers. while (Constraint[0] == '=' || Constraint[0] == '+' || Constraint[0] == '&') Constraint = Constraint.substr(1); return validateOperandSize(Constraint, Size); } bool X86TargetInfo::validateInputSize(StringRef Constraint, unsigned Size) const { return validateOperandSize(Constraint, Size); } bool X86TargetInfo::validateOperandSize(StringRef Constraint, unsigned Size) const { switch (Constraint[0]) { default: break; case 'y': return Size <= 64; case 'f': case 't': case 'u': return Size <= 128; case 'x': // 256-bit ymm registers can be used if target supports AVX. return Size <= (SSELevel >= AVX ? 256U : 128U); } return true; } std::string X86TargetInfo::convertConstraint(const char *&Constraint) const { switch (*Constraint) { case 'a': return std::string("{ax}"); case 'b': return std::string("{bx}"); case 'c': return std::string("{cx}"); case 'd': return std::string("{dx}"); case 'S': return std::string("{si}"); case 'D': return std::string("{di}"); case 'p': // address return std::string("im"); case 't': // top of floating point stack. return std::string("{st}"); case 'u': // second from top of floating point stack. return std::string("{st(1)}"); // second from top of floating point stack. default: return std::string(1, *Constraint); } } // X86-32 generic target class X86_32TargetInfo : public X86TargetInfo { public: X86_32TargetInfo(const llvm::Triple &Triple) : X86TargetInfo(Triple) { DoubleAlign = LongLongAlign = 32; LongDoubleWidth = 96; LongDoubleAlign = 32; SuitableAlign = 128; DescriptionString = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128"; SizeType = UnsignedInt; PtrDiffType = SignedInt; IntPtrType = SignedInt; RegParmMax = 3; // Use fpret for all types. RealTypeUsesObjCFPRet = ((1 << TargetInfo::Float) | (1 << TargetInfo::Double) | (1 << TargetInfo::LongDouble)); // x86-32 has atomics up to 8 bytes // FIXME: Check that we actually have cmpxchg8b before setting // MaxAtomicInlineWidth. (cmpxchg8b is an i586 instruction.) MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64; } BuiltinVaListKind getBuiltinVaListKind() const override { return TargetInfo::CharPtrBuiltinVaList; } int getEHDataRegisterNumber(unsigned RegNo) const override { if (RegNo == 0) return 0; if (RegNo == 1) return 2; return -1; } bool validateOperandSize(StringRef Constraint, unsigned Size) const override { switch (Constraint[0]) { default: break; case 'R': case 'q': case 'Q': case 'a': case 'b': case 'c': case 'd': case 'S': case 'D': return Size <= 32; case 'A': return Size <= 64; } return X86TargetInfo::validateOperandSize(Constraint, Size); } }; class NetBSDI386TargetInfo : public NetBSDTargetInfo { public: NetBSDI386TargetInfo(const llvm::Triple &Triple) : NetBSDTargetInfo(Triple) {} unsigned getFloatEvalMethod() const override { unsigned Major, Minor, Micro; getTriple().getOSVersion(Major, Minor, Micro); // New NetBSD uses the default rounding mode. if (Major >= 7 || (Major == 6 && Minor == 99 && Micro >= 26) || Major == 0) return X86_32TargetInfo::getFloatEvalMethod(); // NetBSD before 6.99.26 defaults to "double" rounding. return 1; } }; class OpenBSDI386TargetInfo : public OpenBSDTargetInfo { public: OpenBSDI386TargetInfo(const llvm::Triple &Triple) : OpenBSDTargetInfo(Triple) { SizeType = UnsignedLong; IntPtrType = SignedLong; PtrDiffType = SignedLong; } }; class BitrigI386TargetInfo : public BitrigTargetInfo { public: BitrigI386TargetInfo(const llvm::Triple &Triple) : BitrigTargetInfo(Triple) { SizeType = UnsignedLong; IntPtrType = SignedLong; PtrDiffType = SignedLong; } }; class DarwinI386TargetInfo : public DarwinTargetInfo { public: DarwinI386TargetInfo(const llvm::Triple &Triple) : DarwinTargetInfo(Triple) { LongDoubleWidth = 128; LongDoubleAlign = 128; SuitableAlign = 128; MaxVectorAlign = 256; SizeType = UnsignedLong; IntPtrType = SignedLong; DescriptionString = "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128"; HasAlignMac68kSupport = true; } }; // x86-32 Windows target class WindowsX86_32TargetInfo : public WindowsTargetInfo { public: WindowsX86_32TargetInfo(const llvm::Triple &Triple) : WindowsTargetInfo(Triple) { WCharType = UnsignedShort; DoubleAlign = LongLongAlign = 64; bool IsWinCOFF = getTriple().isOSWindows() && getTriple().isOSBinFormatCOFF(); DescriptionString = IsWinCOFF ? "e-m:x-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32" : "e-m:e-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32"; } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { WindowsTargetInfo::getTargetDefines(Opts, Builder); } }; // x86-32 Windows Visual Studio target class MicrosoftX86_32TargetInfo : public WindowsX86_32TargetInfo { public: MicrosoftX86_32TargetInfo(const llvm::Triple &Triple) : WindowsX86_32TargetInfo(Triple) { LongDoubleWidth = LongDoubleAlign = 64; LongDoubleFormat = &llvm::APFloat::IEEEdouble; } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { WindowsX86_32TargetInfo::getTargetDefines(Opts, Builder); WindowsX86_32TargetInfo::getVisualStudioDefines(Opts, Builder); // The value of the following reflects processor type. // 300=386, 400=486, 500=Pentium, 600=Blend (default) // We lost the original triple, so we use the default. Builder.defineMacro("_M_IX86", "600"); } }; } // end anonymous namespace static void addCygMingDefines(const LangOptions &Opts, MacroBuilder &Builder) { // Mingw and cygwin define __declspec(a) to __attribute__((a)). Clang supports // __declspec natively under -fms-extensions, but we define a no-op __declspec // macro anyway for pre-processor compatibility. if (Opts.MicrosoftExt) Builder.defineMacro("__declspec", "__declspec"); else Builder.defineMacro("__declspec(a)", "__attribute__((a))"); if (!Opts.MicrosoftExt) { // Provide macros for all the calling convention keywords. Provide both // single and double underscore prefixed variants. These are available on // x64 as well as x86, even though they have no effect. const char *CCs[] = {"cdecl", "stdcall", "fastcall", "thiscall", "pascal"}; for (const char *CC : CCs) { std::string GCCSpelling = "__attribute__((__"; GCCSpelling += CC; GCCSpelling += "__))"; Builder.defineMacro(Twine("_") + CC, GCCSpelling); Builder.defineMacro(Twine("__") + CC, GCCSpelling); } } } static void addMinGWDefines(const LangOptions &Opts, MacroBuilder &Builder) { Builder.defineMacro("__MSVCRT__"); Builder.defineMacro("__MINGW32__"); addCygMingDefines(Opts, Builder); } namespace { // x86-32 MinGW target class MinGWX86_32TargetInfo : public WindowsX86_32TargetInfo { public: MinGWX86_32TargetInfo(const llvm::Triple &Triple) : WindowsX86_32TargetInfo(Triple) {} void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { WindowsX86_32TargetInfo::getTargetDefines(Opts, Builder); DefineStd(Builder, "WIN32", Opts); DefineStd(Builder, "WINNT", Opts); Builder.defineMacro("_X86_"); addMinGWDefines(Opts, Builder); } }; // x86-32 Cygwin target class CygwinX86_32TargetInfo : public X86_32TargetInfo { public: CygwinX86_32TargetInfo(const llvm::Triple &Triple) : X86_32TargetInfo(Triple) { TLSSupported = false; WCharType = UnsignedShort; DoubleAlign = LongLongAlign = 64; DescriptionString = "e-m:x-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32"; } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { X86_32TargetInfo::getTargetDefines(Opts, Builder); Builder.defineMacro("_X86_"); Builder.defineMacro("__CYGWIN__"); Builder.defineMacro("__CYGWIN32__"); addCygMingDefines(Opts, Builder); DefineStd(Builder, "unix", Opts); if (Opts.CPlusPlus) Builder.defineMacro("_GNU_SOURCE"); } }; // x86-32 Haiku target class HaikuX86_32TargetInfo : public X86_32TargetInfo { public: HaikuX86_32TargetInfo(const llvm::Triple &Triple) : X86_32TargetInfo(Triple) { SizeType = UnsignedLong; IntPtrType = SignedLong; PtrDiffType = SignedLong; ProcessIDType = SignedLong; this->UserLabelPrefix = ""; this->TLSSupported = false; } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { X86_32TargetInfo::getTargetDefines(Opts, Builder); Builder.defineMacro("__INTEL__"); Builder.defineMacro("__HAIKU__"); } }; // RTEMS Target template class RTEMSTargetInfo : public OSTargetInfo { protected: void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, MacroBuilder &Builder) const override { // RTEMS defines; list based off of gcc output Builder.defineMacro("__rtems__"); Builder.defineMacro("__ELF__"); } public: RTEMSTargetInfo(const llvm::Triple &Triple) : OSTargetInfo(Triple) { this->UserLabelPrefix = ""; switch (Triple.getArch()) { default: case llvm::Triple::x86: // this->MCountName = ".mcount"; break; case llvm::Triple::mips: case llvm::Triple::mipsel: case llvm::Triple::ppc: case llvm::Triple::ppc64: case llvm::Triple::ppc64le: // this->MCountName = "_mcount"; break; case llvm::Triple::arm: // this->MCountName = "__mcount"; break; } } }; // x86-32 RTEMS target class RTEMSX86_32TargetInfo : public X86_32TargetInfo { public: RTEMSX86_32TargetInfo(const llvm::Triple &Triple) : X86_32TargetInfo(Triple) { SizeType = UnsignedLong; IntPtrType = SignedLong; PtrDiffType = SignedLong; this->UserLabelPrefix = ""; } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { X86_32TargetInfo::getTargetDefines(Opts, Builder); Builder.defineMacro("__INTEL__"); Builder.defineMacro("__rtems__"); } }; // x86-64 generic target class X86_64TargetInfo : public X86TargetInfo { public: X86_64TargetInfo(const llvm::Triple &Triple) : X86TargetInfo(Triple) { const bool IsX32 = getTriple().getEnvironment() == llvm::Triple::GNUX32; bool IsWinCOFF = getTriple().isOSWindows() && getTriple().isOSBinFormatCOFF(); LongWidth = LongAlign = PointerWidth = PointerAlign = IsX32 ? 32 : 64; LongDoubleWidth = 128; LongDoubleAlign = 128; LargeArrayMinWidth = 128; LargeArrayAlign = 128; SuitableAlign = 128; SizeType = IsX32 ? UnsignedInt : UnsignedLong; PtrDiffType = IsX32 ? SignedInt : SignedLong; IntPtrType = IsX32 ? SignedInt : SignedLong; IntMaxType = IsX32 ? SignedLongLong : SignedLong; Int64Type = IsX32 ? SignedLongLong : SignedLong; RegParmMax = 6; // Pointers are 32-bit in x32. DescriptionString = IsX32 ? "e-m:e-p:32:32-i64:64-f80:128-n8:16:32:64-S128" : IsWinCOFF ? "e-m:w-i64:64-f80:128-n8:16:32:64-S128" : "e-m:e-i64:64-f80:128-n8:16:32:64-S128"; // Use fpret only for long double. RealTypeUsesObjCFPRet = (1 << TargetInfo::LongDouble); // Use fp2ret for _Complex long double. ComplexLongDoubleUsesFP2Ret = true; // x86-64 has atomics up to 16 bytes. MaxAtomicPromoteWidth = 128; MaxAtomicInlineWidth = 128; } BuiltinVaListKind getBuiltinVaListKind() const override { return TargetInfo::X86_64ABIBuiltinVaList; } int getEHDataRegisterNumber(unsigned RegNo) const override { if (RegNo == 0) return 0; if (RegNo == 1) return 1; return -1; } CallingConvCheckResult checkCallingConvention(CallingConv CC) const override { return (CC == CC_C || CC == CC_X86VectorCall || CC == CC_IntelOclBicc || CC == CC_X86_64Win64) ? CCCR_OK : CCCR_Warning; } CallingConv getDefaultCallingConv(CallingConvMethodType MT) const override { return CC_C; } // for x32 we need it here explicitly bool hasInt128Type() const override { return true; } }; // x86-64 Windows target class WindowsX86_64TargetInfo : public WindowsTargetInfo { public: WindowsX86_64TargetInfo(const llvm::Triple &Triple) : WindowsTargetInfo(Triple) { WCharType = UnsignedShort; LongWidth = LongAlign = 32; DoubleAlign = LongLongAlign = 64; IntMaxType = SignedLongLong; Int64Type = SignedLongLong; SizeType = UnsignedLongLong; PtrDiffType = SignedLongLong; IntPtrType = SignedLongLong; this->UserLabelPrefix = ""; } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { WindowsTargetInfo::getTargetDefines(Opts, Builder); Builder.defineMacro("_WIN64"); } BuiltinVaListKind getBuiltinVaListKind() const override { return TargetInfo::CharPtrBuiltinVaList; } CallingConvCheckResult checkCallingConvention(CallingConv CC) const override { switch (CC) { case CC_X86StdCall: case CC_X86ThisCall: case CC_X86FastCall: return CCCR_Ignore; case CC_C: case CC_X86VectorCall: case CC_IntelOclBicc: case CC_X86_64SysV: return CCCR_OK; default: return CCCR_Warning; } } }; // x86-64 Windows Visual Studio target class MicrosoftX86_64TargetInfo : public WindowsX86_64TargetInfo { public: MicrosoftX86_64TargetInfo(const llvm::Triple &Triple) : WindowsX86_64TargetInfo(Triple) { LongDoubleWidth = LongDoubleAlign = 64; LongDoubleFormat = &llvm::APFloat::IEEEdouble; } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { WindowsX86_64TargetInfo::getTargetDefines(Opts, Builder); WindowsX86_64TargetInfo::getVisualStudioDefines(Opts, Builder); Builder.defineMacro("_M_X64"); Builder.defineMacro("_M_AMD64"); } }; // x86-64 MinGW target class MinGWX86_64TargetInfo : public WindowsX86_64TargetInfo { public: MinGWX86_64TargetInfo(const llvm::Triple &Triple) - : WindowsX86_64TargetInfo(Triple) {} + : WindowsX86_64TargetInfo(Triple) { + // Mingw64 rounds long double size and alignment up to 16 bytes, but sticks + // with x86 FP ops. Weird. + LongDoubleWidth = LongDoubleAlign = 128; + LongDoubleFormat = &llvm::APFloat::x87DoubleExtended; + } + void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { WindowsX86_64TargetInfo::getTargetDefines(Opts, Builder); DefineStd(Builder, "WIN64", Opts); Builder.defineMacro("__MINGW64__"); addMinGWDefines(Opts, Builder); // GCC defines this macro when it is using __gxx_personality_seh0. if (!Opts.SjLjExceptions) Builder.defineMacro("__SEH__"); } }; class DarwinX86_64TargetInfo : public DarwinTargetInfo { public: DarwinX86_64TargetInfo(const llvm::Triple &Triple) : DarwinTargetInfo(Triple) { Int64Type = SignedLongLong; MaxVectorAlign = 256; // The 64-bit iOS simulator uses the builtin bool type for Objective-C. llvm::Triple T = llvm::Triple(Triple); if (T.isiOS()) UseSignedCharForObjCBool = false; DescriptionString = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"; } }; class OpenBSDX86_64TargetInfo : public OpenBSDTargetInfo { public: OpenBSDX86_64TargetInfo(const llvm::Triple &Triple) : OpenBSDTargetInfo(Triple) { IntMaxType = SignedLongLong; Int64Type = SignedLongLong; } }; class BitrigX86_64TargetInfo : public BitrigTargetInfo { public: BitrigX86_64TargetInfo(const llvm::Triple &Triple) : BitrigTargetInfo(Triple) { IntMaxType = SignedLongLong; Int64Type = SignedLongLong; } }; class ARMTargetInfo : public TargetInfo { // Possible FPU choices. enum FPUMode { VFP2FPU = (1 << 0), VFP3FPU = (1 << 1), VFP4FPU = (1 << 2), NeonFPU = (1 << 3), FPARMV8 = (1 << 4) }; // Possible HWDiv features. enum HWDivMode { HWDivThumb = (1 << 0), HWDivARM = (1 << 1) }; static bool FPUModeIsVFP(FPUMode Mode) { return Mode & (VFP2FPU | VFP3FPU | VFP4FPU | NeonFPU | FPARMV8); } static const TargetInfo::GCCRegAlias GCCRegAliases[]; static const char * const GCCRegNames[]; std::string ABI, CPU; enum { FP_Default, FP_VFP, FP_Neon } FPMath; unsigned FPU : 5; unsigned IsAAPCS : 1; unsigned IsThumb : 1; unsigned HWDiv : 2; // Initialized via features. unsigned SoftFloat : 1; unsigned SoftFloatABI : 1; unsigned CRC : 1; unsigned Crypto : 1; // ACLE 6.5.1 Hardware floating point enum { HW_FP_HP = (1 << 1), /// half (16-bit) HW_FP_SP = (1 << 2), /// single (32-bit) HW_FP_DP = (1 << 3), /// double (64-bit) }; uint32_t HW_FP; static const Builtin::Info BuiltinInfo[]; static bool shouldUseInlineAtomic(const llvm::Triple &T) { StringRef ArchName = T.getArchName(); if (T.getArch() == llvm::Triple::arm || T.getArch() == llvm::Triple::armeb) { StringRef VersionStr; if (ArchName.startswith("armv")) VersionStr = ArchName.substr(4, 1); else if (ArchName.startswith("armebv")) VersionStr = ArchName.substr(6, 1); else return false; unsigned Version; if (VersionStr.getAsInteger(10, Version)) return false; return Version >= 6; } assert(T.getArch() == llvm::Triple::thumb || T.getArch() == llvm::Triple::thumbeb); StringRef VersionStr; if (ArchName.startswith("thumbv")) VersionStr = ArchName.substr(6, 1); else if (ArchName.startswith("thumbebv")) VersionStr = ArchName.substr(8, 1); else return false; unsigned Version; if (VersionStr.getAsInteger(10, Version)) return false; return Version >= 7; } void setABIAAPCS() { IsAAPCS = true; DoubleAlign = LongLongAlign = LongDoubleAlign = SuitableAlign = 64; const llvm::Triple &T = getTriple(); // size_t is unsigned long on MachO-derived environments, NetBSD and Bitrig. if (T.isOSBinFormatMachO() || T.getOS() == llvm::Triple::NetBSD || T.getOS() == llvm::Triple::Bitrig) SizeType = UnsignedLong; else SizeType = UnsignedInt; switch (T.getOS()) { case llvm::Triple::NetBSD: WCharType = SignedInt; break; case llvm::Triple::Win32: WCharType = UnsignedShort; break; case llvm::Triple::Linux: default: // AAPCS 7.1.1, ARM-Linux ABI 2.4: type of wchar_t is unsigned int. WCharType = UnsignedInt; break; } UseBitFieldTypeAlignment = true; ZeroLengthBitfieldBoundary = 0; // Thumb1 add sp, #imm requires the immediate value be multiple of 4, // so set preferred for small types to 32. if (T.isOSBinFormatMachO()) { DescriptionString = BigEndian ? "E-m:o-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64" : "e-m:o-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"; } else if (T.isOSWindows()) { assert(!BigEndian && "Windows on ARM does not support big endian"); DescriptionString = "e" "-m:w" "-p:32:32" "-i64:64" "-v128:64:128" "-a:0:32" "-n32" "-S64"; } else if (T.isOSNaCl()) { assert(!BigEndian && "NaCl on ARM does not support big endian"); DescriptionString = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S128"; } else { DescriptionString = BigEndian ? "E-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64" : "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"; } // FIXME: Enumerated types are variable width in straight AAPCS. } void setABIAPCS() { const llvm::Triple &T = getTriple(); IsAAPCS = false; DoubleAlign = LongLongAlign = LongDoubleAlign = SuitableAlign = 32; // size_t is unsigned int on FreeBSD. if (T.getOS() == llvm::Triple::FreeBSD) SizeType = UnsignedInt; else SizeType = UnsignedLong; // Revert to using SignedInt on apcs-gnu to comply with existing behaviour. WCharType = SignedInt; // Do not respect the alignment of bit-field types when laying out // structures. This corresponds to PCC_BITFIELD_TYPE_MATTERS in gcc. UseBitFieldTypeAlignment = false; /// gcc forces the alignment to 4 bytes, regardless of the type of the /// zero length bitfield. This corresponds to EMPTY_FIELD_BOUNDARY in /// gcc. ZeroLengthBitfieldBoundary = 32; if (T.isOSBinFormatMachO()) DescriptionString = BigEndian ? "E-m:o-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32" : "e-m:o-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32"; else DescriptionString = BigEndian ? "E-m:e-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32" : "e-m:e-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32"; // FIXME: Override "preferred align" for double and long long. } public: ARMTargetInfo(const llvm::Triple &Triple, bool IsBigEndian) : TargetInfo(Triple), CPU("arm1136j-s"), FPMath(FP_Default), IsAAPCS(true), HW_FP(0) { BigEndian = IsBigEndian; switch (getTriple().getOS()) { case llvm::Triple::NetBSD: PtrDiffType = SignedLong; break; default: PtrDiffType = SignedInt; break; } // {} in inline assembly are neon specifiers, not assembly variant // specifiers. NoAsmVariants = true; // FIXME: Should we just treat this as a feature? IsThumb = getTriple().getArchName().startswith("thumb"); // FIXME: This duplicates code from the driver that sets the -target-abi // option - this code is used if -target-abi isn't passed and should // be unified in some way. if (Triple.isOSBinFormatMachO()) { // The backend is hardwired to assume AAPCS for M-class processors, ensure // the frontend matches that. if (Triple.getEnvironment() == llvm::Triple::EABI || Triple.getOS() == llvm::Triple::UnknownOS || StringRef(CPU).startswith("cortex-m")) { setABI("aapcs"); } else { setABI("apcs-gnu"); } } else if (Triple.isOSWindows()) { // FIXME: this is invalid for WindowsCE setABI("aapcs"); } else { // Select the default based on the platform. switch (Triple.getEnvironment()) { case llvm::Triple::Android: case llvm::Triple::GNUEABI: case llvm::Triple::GNUEABIHF: setABI("aapcs-linux"); break; case llvm::Triple::EABIHF: case llvm::Triple::EABI: setABI("aapcs"); break; case llvm::Triple::GNU: setABI("apcs-gnu"); break; default: if (Triple.getOS() == llvm::Triple::NetBSD) setABI("apcs-gnu"); else setABI("aapcs"); break; } } // ARM targets default to using the ARM C++ ABI. TheCXXABI.set(TargetCXXABI::GenericARM); // ARM has atomics up to 8 bytes MaxAtomicPromoteWidth = 64; if (shouldUseInlineAtomic(getTriple())) MaxAtomicInlineWidth = 64; // Do force alignment of members that follow zero length bitfields. If // the alignment of the zero-length bitfield is greater than the member // that follows it, `bar', `bar' will be aligned as the type of the // zero length bitfield. UseZeroLengthBitfieldAlignment = true; } StringRef getABI() const override { return ABI; } bool setABI(const std::string &Name) override { ABI = Name; // The defaults (above) are for AAPCS, check if we need to change them. // // FIXME: We need support for -meabi... we could just mangle it into the // name. if (Name == "apcs-gnu") { setABIAPCS(); return true; } if (Name == "aapcs" || Name == "aapcs-vfp" || Name == "aapcs-linux") { setABIAAPCS(); return true; } return false; } // FIXME: This should be based on Arch attributes, not CPU names. void getDefaultFeatures(llvm::StringMap &Features) const override { StringRef ArchName = getTriple().getArchName(); unsigned ArchKind = llvm::ARMTargetParser::parseArch(ArchName); bool IsV8 = (ArchKind == llvm::ARM::AK_ARMV8A || ArchKind == llvm::ARM::AK_ARMV8_1A); if (CPU == "arm1136jf-s" || CPU == "arm1176jzf-s" || CPU == "mpcore") Features["vfp2"] = true; else if (CPU == "cortex-a8" || CPU == "cortex-a9") { Features["vfp3"] = true; Features["neon"] = true; } else if (CPU == "cortex-a5") { Features["vfp4"] = true; Features["neon"] = true; } else if (CPU == "swift" || CPU == "cortex-a7" || CPU == "cortex-a12" || CPU == "cortex-a15" || CPU == "cortex-a17" || CPU == "krait") { Features["vfp4"] = true; Features["neon"] = true; Features["hwdiv"] = true; Features["hwdiv-arm"] = true; } else if (CPU == "cyclone" || CPU == "cortex-a53" || CPU == "cortex-a57" || CPU == "cortex-a72") { Features["fp-armv8"] = true; Features["neon"] = true; Features["hwdiv"] = true; Features["hwdiv-arm"] = true; Features["crc"] = true; Features["crypto"] = true; } else if (CPU == "cortex-r5" || CPU == "cortex-r7" || IsV8) { Features["hwdiv"] = true; Features["hwdiv-arm"] = true; } else if (CPU == "cortex-m3" || CPU == "cortex-m4" || CPU == "cortex-m7" || CPU == "sc300" || CPU == "cortex-r4" || CPU == "cortex-r4f") { Features["hwdiv"] = true; } } bool handleTargetFeatures(std::vector &Features, DiagnosticsEngine &Diags) override { FPU = 0; CRC = 0; Crypto = 0; SoftFloat = SoftFloatABI = false; HWDiv = 0; // This does not diagnose illegal cases like having both // "+vfpv2" and "+vfpv3" or having "+neon" and "+fp-only-sp". uint32_t HW_FP_remove = 0; for (const auto &Feature : Features) { if (Feature == "+soft-float") { SoftFloat = true; } else if (Feature == "+soft-float-abi") { SoftFloatABI = true; } else if (Feature == "+vfp2") { FPU |= VFP2FPU; HW_FP |= HW_FP_SP | HW_FP_DP; } else if (Feature == "+vfp3") { FPU |= VFP3FPU; HW_FP |= HW_FP_SP | HW_FP_DP; } else if (Feature == "+vfp4") { FPU |= VFP4FPU; HW_FP |= HW_FP_SP | HW_FP_DP | HW_FP_HP; } else if (Feature == "+fp-armv8") { FPU |= FPARMV8; HW_FP |= HW_FP_SP | HW_FP_DP | HW_FP_HP; } else if (Feature == "+neon") { FPU |= NeonFPU; HW_FP |= HW_FP_SP | HW_FP_DP; } else if (Feature == "+hwdiv") { HWDiv |= HWDivThumb; } else if (Feature == "+hwdiv-arm") { HWDiv |= HWDivARM; } else if (Feature == "+crc") { CRC = 1; } else if (Feature == "+crypto") { Crypto = 1; } else if (Feature == "+fp-only-sp") { HW_FP_remove |= HW_FP_DP | HW_FP_HP; } } HW_FP &= ~HW_FP_remove; if (!(FPU & NeonFPU) && FPMath == FP_Neon) { Diags.Report(diag::err_target_unsupported_fpmath) << "neon"; return false; } if (FPMath == FP_Neon) Features.push_back("+neonfp"); else if (FPMath == FP_VFP) Features.push_back("-neonfp"); // Remove front-end specific options which the backend handles differently. auto Feature = std::find(Features.begin(), Features.end(), "+soft-float-abi"); if (Feature != Features.end()) Features.erase(Feature); return true; } bool hasFeature(StringRef Feature) const override { return llvm::StringSwitch(Feature) .Case("arm", true) .Case("softfloat", SoftFloat) .Case("thumb", IsThumb) .Case("neon", (FPU & NeonFPU) && !SoftFloat) .Case("hwdiv", HWDiv & HWDivThumb) .Case("hwdiv-arm", HWDiv & HWDivARM) .Default(false); } const char *getCPUDefineSuffix(StringRef Name) const { if(Name == "generic") { auto subarch = getTriple().getSubArch(); switch (subarch) { case llvm::Triple::SubArchType::ARMSubArch_v8_1a: return "8_1A"; default: break; } } unsigned ArchKind = llvm::ARMTargetParser::parseCPUArch(Name); if (ArchKind == llvm::ARM::AK_INVALID) return ""; // For most sub-arches, the build attribute CPU name is enough. // For Cortex variants, it's slightly different. switch(ArchKind) { default: return llvm::ARMTargetParser::getCPUAttr(ArchKind); case llvm::ARM::AK_ARMV6M: case llvm::ARM::AK_ARMV6SM: return "6M"; case llvm::ARM::AK_ARMV7: case llvm::ARM::AK_ARMV7A: case llvm::ARM::AK_ARMV7S: return "7A"; case llvm::ARM::AK_ARMV7R: return "7R"; case llvm::ARM::AK_ARMV7M: return "7M"; case llvm::ARM::AK_ARMV7EM: return "7EM"; case llvm::ARM::AK_ARMV8A: return "8A"; case llvm::ARM::AK_ARMV8_1A: return "8_1A"; } } const char *getCPUProfile(StringRef Name) const { if(Name == "generic") { auto subarch = getTriple().getSubArch(); switch (subarch) { case llvm::Triple::SubArchType::ARMSubArch_v8_1a: return "A"; default: break; } } unsigned CPUArch = llvm::ARMTargetParser::parseCPUArch(Name); if (CPUArch == llvm::ARM::AK_INVALID) return ""; StringRef ArchName = llvm::ARMTargetParser::getArchName(CPUArch); switch(llvm::ARMTargetParser::parseArchProfile(ArchName)) { case llvm::ARM::PK_A: return "A"; case llvm::ARM::PK_R: return "R"; case llvm::ARM::PK_M: return "M"; default: return ""; } } bool setCPU(const std::string &Name) override { if (!getCPUDefineSuffix(Name)) return false; // Cortex M does not support 8 byte atomics, while general Thumb2 does. StringRef Profile = getCPUProfile(Name); if (Profile == "M" && MaxAtomicInlineWidth) { MaxAtomicPromoteWidth = 32; MaxAtomicInlineWidth = 32; } CPU = Name; return true; } bool setFPMath(StringRef Name) override; bool supportsThumb(StringRef ArchName, StringRef CPUArch, unsigned CPUArchVer) const { return CPUArchVer >= 7 || (CPUArch.find('T') != StringRef::npos) || (CPUArch.find('M') != StringRef::npos); } bool supportsThumb2(StringRef ArchName, StringRef CPUArch, unsigned CPUArchVer) const { // We check both CPUArchVer and ArchName because when only triple is // specified, the default CPU is arm1136j-s. return ArchName.endswith("v6t2") || ArchName.endswith("v7") || ArchName.endswith("v8.1a") || ArchName.endswith("v8") || CPUArch == "6T2" || CPUArchVer >= 7; } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { // Target identification. Builder.defineMacro("__arm"); Builder.defineMacro("__arm__"); // Target properties. Builder.defineMacro("__REGISTER_PREFIX__", ""); StringRef CPUArch = getCPUDefineSuffix(CPU); unsigned int CPUArchVer; if (CPUArch.substr(0, 1).getAsInteger(10, CPUArchVer)) llvm_unreachable("Invalid char for architecture version number"); Builder.defineMacro("__ARM_ARCH_" + CPUArch + "__"); // ACLE 6.4.1 ARM/Thumb instruction set architecture StringRef CPUProfile = getCPUProfile(CPU); StringRef ArchName = getTriple().getArchName(); // __ARM_ARCH is defined as an integer value indicating the current ARM ISA Builder.defineMacro("__ARM_ARCH", CPUArch.substr(0, 1)); if (CPUArch[0] >= '8') { Builder.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN"); Builder.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING"); } // __ARM_ARCH_ISA_ARM is defined to 1 if the core supports the ARM ISA. It // is not defined for the M-profile. // NOTE that the deffault profile is assumed to be 'A' if (CPUProfile.empty() || CPUProfile != "M") Builder.defineMacro("__ARM_ARCH_ISA_ARM", "1"); // __ARM_ARCH_ISA_THUMB is defined to 1 if the core supporst the original // Thumb ISA (including v6-M). It is set to 2 if the core supports the // Thumb-2 ISA as found in the v6T2 architecture and all v7 architecture. if (supportsThumb2(ArchName, CPUArch, CPUArchVer)) Builder.defineMacro("__ARM_ARCH_ISA_THUMB", "2"); else if (supportsThumb(ArchName, CPUArch, CPUArchVer)) Builder.defineMacro("__ARM_ARCH_ISA_THUMB", "1"); // __ARM_32BIT_STATE is defined to 1 if code is being generated for a 32-bit // instruction set such as ARM or Thumb. Builder.defineMacro("__ARM_32BIT_STATE", "1"); // ACLE 6.4.2 Architectural Profile (A, R, M or pre-Cortex) // __ARM_ARCH_PROFILE is defined as 'A', 'R', 'M' or 'S', or unset. if (!CPUProfile.empty()) Builder.defineMacro("__ARM_ARCH_PROFILE", "'" + CPUProfile + "'"); // ACLE 6.5.1 Hardware Floating Point if (HW_FP) Builder.defineMacro("__ARM_FP", "0x" + llvm::utohexstr(HW_FP)); // ACLE predefines. Builder.defineMacro("__ARM_ACLE", "200"); // Subtarget options. // FIXME: It's more complicated than this and we don't really support // interworking. // Windows on ARM does not "support" interworking if (5 <= CPUArchVer && CPUArchVer <= 8 && !getTriple().isOSWindows()) Builder.defineMacro("__THUMB_INTERWORK__"); if (ABI == "aapcs" || ABI == "aapcs-linux" || ABI == "aapcs-vfp") { // Embedded targets on Darwin follow AAPCS, but not EABI. // Windows on ARM follows AAPCS VFP, but does not conform to EABI. if (!getTriple().isOSDarwin() && !getTriple().isOSWindows()) Builder.defineMacro("__ARM_EABI__"); Builder.defineMacro("__ARM_PCS", "1"); if ((!SoftFloat && !SoftFloatABI) || ABI == "aapcs-vfp") Builder.defineMacro("__ARM_PCS_VFP", "1"); } if (SoftFloat) Builder.defineMacro("__SOFTFP__"); if (CPU == "xscale") Builder.defineMacro("__XSCALE__"); if (IsThumb) { Builder.defineMacro("__THUMBEL__"); Builder.defineMacro("__thumb__"); if (supportsThumb2(ArchName, CPUArch, CPUArchVer)) Builder.defineMacro("__thumb2__"); } if (((HWDiv & HWDivThumb) && IsThumb) || ((HWDiv & HWDivARM) && !IsThumb)) Builder.defineMacro("__ARM_ARCH_EXT_IDIV__", "1"); // Note, this is always on in gcc, even though it doesn't make sense. Builder.defineMacro("__APCS_32__"); if (FPUModeIsVFP((FPUMode) FPU)) { Builder.defineMacro("__VFP_FP__"); if (FPU & VFP2FPU) Builder.defineMacro("__ARM_VFPV2__"); if (FPU & VFP3FPU) Builder.defineMacro("__ARM_VFPV3__"); if (FPU & VFP4FPU) Builder.defineMacro("__ARM_VFPV4__"); } // This only gets set when Neon instructions are actually available, unlike // the VFP define, hence the soft float and arch check. This is subtly // different from gcc, we follow the intent which was that it should be set // when Neon instructions are actually available. if ((FPU & NeonFPU) && !SoftFloat && CPUArchVer >= 7) { Builder.defineMacro("__ARM_NEON"); Builder.defineMacro("__ARM_NEON__"); } Builder.defineMacro("__ARM_SIZEOF_WCHAR_T", Opts.ShortWChar ? "2" : "4"); Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM", Opts.ShortEnums ? "1" : "4"); if (CRC) Builder.defineMacro("__ARM_FEATURE_CRC32"); if (Crypto) Builder.defineMacro("__ARM_FEATURE_CRYPTO"); if (CPUArchVer >= 6 && CPUArch != "6M") { Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1"); Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2"); Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4"); Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8"); } bool is5EOrAbove = (CPUArchVer >= 6 || (CPUArchVer == 5 && CPUArch.find('E') != StringRef::npos)); bool is32Bit = (!IsThumb || supportsThumb2(ArchName, CPUArch, CPUArchVer)); if (is5EOrAbove && is32Bit && (CPUProfile != "M" || CPUArch == "7EM")) Builder.defineMacro("__ARM_FEATURE_DSP"); } void getTargetBuiltins(const Builtin::Info *&Records, unsigned &NumRecords) const override { Records = BuiltinInfo; NumRecords = clang::ARM::LastTSBuiltin-Builtin::FirstTSBuiltin; } bool isCLZForZeroUndef() const override { return false; } BuiltinVaListKind getBuiltinVaListKind() const override { return IsAAPCS ? AAPCSABIBuiltinVaList : TargetInfo::VoidPtrBuiltinVaList; } void getGCCRegNames(const char * const *&Names, unsigned &NumNames) const override; void getGCCRegAliases(const GCCRegAlias *&Aliases, unsigned &NumAliases) const override; bool validateAsmConstraint(const char *&Name, TargetInfo::ConstraintInfo &Info) const override { switch (*Name) { default: break; case 'l': // r0-r7 case 'h': // r8-r15 case 'w': // VFP Floating point register single precision case 'P': // VFP Floating point register double precision Info.setAllowsRegister(); return true; case 'I': case 'J': case 'K': case 'L': case 'M': // FIXME return true; case 'Q': // A memory address that is a single base register. Info.setAllowsMemory(); return true; case 'U': // a memory reference... switch (Name[1]) { case 'q': // ...ARMV4 ldrsb case 'v': // ...VFP load/store (reg+constant offset) case 'y': // ...iWMMXt load/store case 't': // address valid for load/store opaque types wider // than 128-bits case 'n': // valid address for Neon doubleword vector load/store case 'm': // valid address for Neon element and structure load/store case 's': // valid address for non-offset loads/stores of quad-word // values in four ARM registers Info.setAllowsMemory(); Name++; return true; } } return false; } std::string convertConstraint(const char *&Constraint) const override { std::string R; switch (*Constraint) { case 'U': // Two-character constraint; add "^" hint for later parsing. R = std::string("^") + std::string(Constraint, 2); Constraint++; break; case 'p': // 'p' should be translated to 'r' by default. R = std::string("r"); break; default: return std::string(1, *Constraint); } return R; } bool validateConstraintModifier(StringRef Constraint, char Modifier, unsigned Size, std::string &SuggestedModifier) const override { bool isOutput = (Constraint[0] == '='); bool isInOut = (Constraint[0] == '+'); // Strip off constraint modifiers. while (Constraint[0] == '=' || Constraint[0] == '+' || Constraint[0] == '&') Constraint = Constraint.substr(1); switch (Constraint[0]) { default: break; case 'r': { switch (Modifier) { default: return (isInOut || isOutput || Size <= 64); case 'q': // A register of size 32 cannot fit a vector type. return false; } } } return true; } const char *getClobbers() const override { // FIXME: Is this really right? return ""; } CallingConvCheckResult checkCallingConvention(CallingConv CC) const override { return (CC == CC_AAPCS || CC == CC_AAPCS_VFP) ? CCCR_OK : CCCR_Warning; } int getEHDataRegisterNumber(unsigned RegNo) const override { if (RegNo == 0) return 0; if (RegNo == 1) return 1; return -1; } }; bool ARMTargetInfo::setFPMath(StringRef Name) { if (Name == "neon") { FPMath = FP_Neon; return true; } else if (Name == "vfp" || Name == "vfp2" || Name == "vfp3" || Name == "vfp4") { FPMath = FP_VFP; return true; } return false; } const char * const ARMTargetInfo::GCCRegNames[] = { // Integer registers "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "sp", "lr", "pc", // Float registers "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31", // Double registers "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31", // Quad registers "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" }; void ARMTargetInfo::getGCCRegNames(const char * const *&Names, unsigned &NumNames) const { Names = GCCRegNames; NumNames = llvm::array_lengthof(GCCRegNames); } const TargetInfo::GCCRegAlias ARMTargetInfo::GCCRegAliases[] = { { { "a1" }, "r0" }, { { "a2" }, "r1" }, { { "a3" }, "r2" }, { { "a4" }, "r3" }, { { "v1" }, "r4" }, { { "v2" }, "r5" }, { { "v3" }, "r6" }, { { "v4" }, "r7" }, { { "v5" }, "r8" }, { { "v6", "rfp" }, "r9" }, { { "sl" }, "r10" }, { { "fp" }, "r11" }, { { "ip" }, "r12" }, { { "r13" }, "sp" }, { { "r14" }, "lr" }, { { "r15" }, "pc" }, // The S, D and Q registers overlap, but aren't really aliases; we // don't want to substitute one of these for a different-sized one. }; void ARMTargetInfo::getGCCRegAliases(const GCCRegAlias *&Aliases, unsigned &NumAliases) const { Aliases = GCCRegAliases; NumAliases = llvm::array_lengthof(GCCRegAliases); } const Builtin::Info ARMTargetInfo::BuiltinInfo[] = { #define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES }, #define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\ ALL_LANGUAGES }, #include "clang/Basic/BuiltinsNEON.def" #define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES }, #define LANGBUILTIN(ID, TYPE, ATTRS, LANG) { #ID, TYPE, ATTRS, 0, LANG }, #define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\ ALL_LANGUAGES }, #include "clang/Basic/BuiltinsARM.def" }; class ARMleTargetInfo : public ARMTargetInfo { public: ARMleTargetInfo(const llvm::Triple &Triple) : ARMTargetInfo(Triple, false) { } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { Builder.defineMacro("__ARMEL__"); ARMTargetInfo::getTargetDefines(Opts, Builder); } }; class ARMbeTargetInfo : public ARMTargetInfo { public: ARMbeTargetInfo(const llvm::Triple &Triple) : ARMTargetInfo(Triple, true) { } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { Builder.defineMacro("__ARMEB__"); Builder.defineMacro("__ARM_BIG_ENDIAN"); ARMTargetInfo::getTargetDefines(Opts, Builder); } }; class WindowsARMTargetInfo : public WindowsTargetInfo { const llvm::Triple Triple; public: WindowsARMTargetInfo(const llvm::Triple &Triple) : WindowsTargetInfo(Triple), Triple(Triple) { TLSSupported = false; WCharType = UnsignedShort; SizeType = UnsignedInt; UserLabelPrefix = ""; } void getVisualStudioDefines(const LangOptions &Opts, MacroBuilder &Builder) const { WindowsTargetInfo::getVisualStudioDefines(Opts, Builder); // FIXME: this is invalid for WindowsCE Builder.defineMacro("_M_ARM_NT", "1"); Builder.defineMacro("_M_ARMT", "_M_ARM"); Builder.defineMacro("_M_THUMB", "_M_ARM"); assert((Triple.getArch() == llvm::Triple::arm || Triple.getArch() == llvm::Triple::thumb) && "invalid architecture for Windows ARM target info"); unsigned Offset = Triple.getArch() == llvm::Triple::arm ? 4 : 6; Builder.defineMacro("_M_ARM", Triple.getArchName().substr(Offset)); // TODO map the complete set of values // 31: VFPv3 40: VFPv4 Builder.defineMacro("_M_ARM_FP", "31"); } BuiltinVaListKind getBuiltinVaListKind() const override { return TargetInfo::CharPtrBuiltinVaList; } }; // Windows ARM + Itanium C++ ABI Target class ItaniumWindowsARMleTargetInfo : public WindowsARMTargetInfo { public: ItaniumWindowsARMleTargetInfo(const llvm::Triple &Triple) : WindowsARMTargetInfo(Triple) { TheCXXABI.set(TargetCXXABI::GenericARM); } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { WindowsARMTargetInfo::getTargetDefines(Opts, Builder); if (Opts.MSVCCompat) WindowsARMTargetInfo::getVisualStudioDefines(Opts, Builder); } }; // Windows ARM, MS (C++) ABI class MicrosoftARMleTargetInfo : public WindowsARMTargetInfo { public: MicrosoftARMleTargetInfo(const llvm::Triple &Triple) : WindowsARMTargetInfo(Triple) { TheCXXABI.set(TargetCXXABI::Microsoft); } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { WindowsARMTargetInfo::getTargetDefines(Opts, Builder); WindowsARMTargetInfo::getVisualStudioDefines(Opts, Builder); } }; class DarwinARMTargetInfo : public DarwinTargetInfo { protected: void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, MacroBuilder &Builder) const override { getDarwinDefines(Builder, Opts, Triple, PlatformName, PlatformMinVersion); } public: DarwinARMTargetInfo(const llvm::Triple &Triple) : DarwinTargetInfo(Triple) { HasAlignMac68kSupport = true; // iOS always has 64-bit atomic instructions. // FIXME: This should be based off of the target features in // ARMleTargetInfo. MaxAtomicInlineWidth = 64; // Darwin on iOS uses a variant of the ARM C++ ABI. TheCXXABI.set(TargetCXXABI::iOS); } }; class AArch64TargetInfo : public TargetInfo { virtual void setDescriptionString() = 0; static const TargetInfo::GCCRegAlias GCCRegAliases[]; static const char *const GCCRegNames[]; enum FPUModeEnum { FPUMode, NeonMode }; unsigned FPU; unsigned CRC; unsigned Crypto; static const Builtin::Info BuiltinInfo[]; std::string ABI; public: AArch64TargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple), ABI("aapcs") { if (getTriple().getOS() == llvm::Triple::NetBSD) { WCharType = SignedInt; // NetBSD apparently prefers consistency across ARM targets to consistency // across 64-bit targets. Int64Type = SignedLongLong; IntMaxType = SignedLongLong; } else { WCharType = UnsignedInt; Int64Type = SignedLong; IntMaxType = SignedLong; } LongWidth = LongAlign = PointerWidth = PointerAlign = 64; MaxVectorAlign = 128; MaxAtomicInlineWidth = 128; MaxAtomicPromoteWidth = 128; LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128; LongDoubleFormat = &llvm::APFloat::IEEEquad; // {} in inline assembly are neon specifiers, not assembly variant // specifiers. NoAsmVariants = true; // AAPCS gives rules for bitfields. 7.1.7 says: "The container type // contributes to the alignment of the containing aggregate in the same way // a plain (non bit-field) member of that type would, without exception for // zero-sized or anonymous bit-fields." UseBitFieldTypeAlignment = true; UseZeroLengthBitfieldAlignment = true; // AArch64 targets default to using the ARM C++ ABI. TheCXXABI.set(TargetCXXABI::GenericAArch64); } StringRef getABI() const override { return ABI; } bool setABI(const std::string &Name) override { if (Name != "aapcs" && Name != "darwinpcs") return false; ABI = Name; return true; } bool setCPU(const std::string &Name) override { bool CPUKnown = llvm::StringSwitch(Name) .Case("generic", true) .Cases("cortex-a53", "cortex-a57", "cortex-a72", true) .Case("cyclone", true) .Default(false); return CPUKnown; } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { // Target identification. Builder.defineMacro("__aarch64__"); // Target properties. Builder.defineMacro("_LP64"); Builder.defineMacro("__LP64__"); // ACLE predefines. Many can only have one possible value on v8 AArch64. Builder.defineMacro("__ARM_ACLE", "200"); Builder.defineMacro("__ARM_ARCH", "8"); Builder.defineMacro("__ARM_ARCH_PROFILE", "'A'"); Builder.defineMacro("__ARM_64BIT_STATE"); Builder.defineMacro("__ARM_PCS_AAPCS64"); Builder.defineMacro("__ARM_ARCH_ISA_A64"); Builder.defineMacro("__ARM_FEATURE_UNALIGNED"); Builder.defineMacro("__ARM_FEATURE_CLZ"); Builder.defineMacro("__ARM_FEATURE_FMA"); Builder.defineMacro("__ARM_FEATURE_DIV"); Builder.defineMacro("__ARM_FEATURE_IDIV"); // As specified in ACLE Builder.defineMacro("__ARM_FEATURE_DIV"); // For backwards compatibility Builder.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN"); Builder.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING"); Builder.defineMacro("__ARM_ALIGN_MAX_STACK_PWR", "4"); // 0xe implies support for half, single and double precision operations. Builder.defineMacro("__ARM_FP", "0xe"); // PCS specifies this for SysV variants, which is all we support. Other ABIs // may choose __ARM_FP16_FORMAT_ALTERNATIVE. Builder.defineMacro("__ARM_FP16_FORMAT_IEEE"); if (Opts.FastMath || Opts.FiniteMathOnly) Builder.defineMacro("__ARM_FP_FAST"); if (Opts.C99 && !Opts.Freestanding) Builder.defineMacro("__ARM_FP_FENV_ROUNDING"); Builder.defineMacro("__ARM_SIZEOF_WCHAR_T", Opts.ShortWChar ? "2" : "4"); Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM", Opts.ShortEnums ? "1" : "4"); if (FPU == NeonMode) { Builder.defineMacro("__ARM_NEON"); // 64-bit NEON supports half, single and double precision operations. Builder.defineMacro("__ARM_NEON_FP", "0xe"); } if (CRC) Builder.defineMacro("__ARM_FEATURE_CRC32"); if (Crypto) Builder.defineMacro("__ARM_FEATURE_CRYPTO"); // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8) builtins work. Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1"); Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2"); Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4"); Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8"); } void getTargetBuiltins(const Builtin::Info *&Records, unsigned &NumRecords) const override { Records = BuiltinInfo; NumRecords = clang::AArch64::LastTSBuiltin - Builtin::FirstTSBuiltin; } bool hasFeature(StringRef Feature) const override { return Feature == "aarch64" || Feature == "arm64" || (Feature == "neon" && FPU == NeonMode); } bool handleTargetFeatures(std::vector &Features, DiagnosticsEngine &Diags) override { FPU = FPUMode; CRC = 0; Crypto = 0; for (unsigned i = 0, e = Features.size(); i != e; ++i) { if (Features[i] == "+neon") FPU = NeonMode; if (Features[i] == "+crc") CRC = 1; if (Features[i] == "+crypto") Crypto = 1; } setDescriptionString(); return true; } bool isCLZForZeroUndef() const override { return false; } BuiltinVaListKind getBuiltinVaListKind() const override { return TargetInfo::AArch64ABIBuiltinVaList; } void getGCCRegNames(const char *const *&Names, unsigned &NumNames) const override; void getGCCRegAliases(const GCCRegAlias *&Aliases, unsigned &NumAliases) const override; bool validateAsmConstraint(const char *&Name, TargetInfo::ConstraintInfo &Info) const override { switch (*Name) { default: return false; case 'w': // Floating point and SIMD registers (V0-V31) Info.setAllowsRegister(); return true; case 'I': // Constant that can be used with an ADD instruction case 'J': // Constant that can be used with a SUB instruction case 'K': // Constant that can be used with a 32-bit logical instruction case 'L': // Constant that can be used with a 64-bit logical instruction case 'M': // Constant that can be used as a 32-bit MOV immediate case 'N': // Constant that can be used as a 64-bit MOV immediate case 'Y': // Floating point constant zero case 'Z': // Integer constant zero return true; case 'Q': // A memory reference with base register and no offset Info.setAllowsMemory(); return true; case 'S': // A symbolic address Info.setAllowsRegister(); return true; case 'U': // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes. // Utf: A memory address suitable for ldp/stp in TF mode. // Usa: An absolute symbolic address. // Ush: The high part (bits 32:12) of a pc-relative symbolic address. llvm_unreachable("FIXME: Unimplemented support for U* constraints."); case 'z': // Zero register, wzr or xzr Info.setAllowsRegister(); return true; case 'x': // Floating point and SIMD registers (V0-V15) Info.setAllowsRegister(); return true; } return false; } bool validateConstraintModifier(StringRef Constraint, char Modifier, unsigned Size, std::string &SuggestedModifier) const override { // Strip off constraint modifiers. while (Constraint[0] == '=' || Constraint[0] == '+' || Constraint[0] == '&') Constraint = Constraint.substr(1); switch (Constraint[0]) { default: return true; case 'z': case 'r': { switch (Modifier) { case 'x': case 'w': // For now assume that the person knows what they're // doing with the modifier. return true; default: // By default an 'r' constraint will be in the 'x' // registers. if (Size == 64) return true; SuggestedModifier = "w"; return false; } } } } const char *getClobbers() const override { return ""; } int getEHDataRegisterNumber(unsigned RegNo) const override { if (RegNo == 0) return 0; if (RegNo == 1) return 1; return -1; } }; const char *const AArch64TargetInfo::GCCRegNames[] = { // 32-bit Integer registers "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11", "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22", "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wsp", // 64-bit Integer registers "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "fp", "lr", "sp", // 32-bit floating point regsisters "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31", // 64-bit floating point regsisters "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31", // Vector registers "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" }; void AArch64TargetInfo::getGCCRegNames(const char *const *&Names, unsigned &NumNames) const { Names = GCCRegNames; NumNames = llvm::array_lengthof(GCCRegNames); } const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = { { { "w31" }, "wsp" }, { { "x29" }, "fp" }, { { "x30" }, "lr" }, { { "x31" }, "sp" }, // The S/D/Q and W/X registers overlap, but aren't really aliases; we // don't want to substitute one of these for a different-sized one. }; void AArch64TargetInfo::getGCCRegAliases(const GCCRegAlias *&Aliases, unsigned &NumAliases) const { Aliases = GCCRegAliases; NumAliases = llvm::array_lengthof(GCCRegAliases); } const Builtin::Info AArch64TargetInfo::BuiltinInfo[] = { #define BUILTIN(ID, TYPE, ATTRS) \ { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES }, #include "clang/Basic/BuiltinsNEON.def" #define BUILTIN(ID, TYPE, ATTRS) \ { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES }, #include "clang/Basic/BuiltinsAArch64.def" }; class AArch64leTargetInfo : public AArch64TargetInfo { void setDescriptionString() override { if (getTriple().isOSBinFormatMachO()) DescriptionString = "e-m:o-i64:64-i128:128-n32:64-S128"; else DescriptionString = "e-m:e-i64:64-i128:128-n32:64-S128"; } public: AArch64leTargetInfo(const llvm::Triple &Triple) : AArch64TargetInfo(Triple) { BigEndian = false; } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { Builder.defineMacro("__AARCH64EL__"); AArch64TargetInfo::getTargetDefines(Opts, Builder); } }; class AArch64beTargetInfo : public AArch64TargetInfo { void setDescriptionString() override { assert(!getTriple().isOSBinFormatMachO()); DescriptionString = "E-m:e-i64:64-i128:128-n32:64-S128"; } public: AArch64beTargetInfo(const llvm::Triple &Triple) : AArch64TargetInfo(Triple) { } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { Builder.defineMacro("__AARCH64EB__"); Builder.defineMacro("__AARCH_BIG_ENDIAN"); Builder.defineMacro("__ARM_BIG_ENDIAN"); AArch64TargetInfo::getTargetDefines(Opts, Builder); } }; class DarwinAArch64TargetInfo : public DarwinTargetInfo { protected: void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, MacroBuilder &Builder) const override { Builder.defineMacro("__AARCH64_SIMD__"); Builder.defineMacro("__ARM64_ARCH_8__"); Builder.defineMacro("__ARM_NEON__"); Builder.defineMacro("__LITTLE_ENDIAN__"); Builder.defineMacro("__REGISTER_PREFIX__", ""); Builder.defineMacro("__arm64", "1"); Builder.defineMacro("__arm64__", "1"); getDarwinDefines(Builder, Opts, Triple, PlatformName, PlatformMinVersion); } public: DarwinAArch64TargetInfo(const llvm::Triple &Triple) : DarwinTargetInfo(Triple) { Int64Type = SignedLongLong; WCharType = SignedInt; UseSignedCharForObjCBool = false; LongDoubleWidth = LongDoubleAlign = SuitableAlign = 64; LongDoubleFormat = &llvm::APFloat::IEEEdouble; TheCXXABI.set(TargetCXXABI::iOS64); } BuiltinVaListKind getBuiltinVaListKind() const override { return TargetInfo::CharPtrBuiltinVaList; } }; // Hexagon abstract base class class HexagonTargetInfo : public TargetInfo { static const Builtin::Info BuiltinInfo[]; static const char * const GCCRegNames[]; static const TargetInfo::GCCRegAlias GCCRegAliases[]; std::string CPU; public: HexagonTargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) { BigEndian = false; DescriptionString = "e-m:e-p:32:32-i1:32-i64:64-a:0-n32"; // {} in inline assembly are packet specifiers, not assembly variant // specifiers. NoAsmVariants = true; } void getTargetBuiltins(const Builtin::Info *&Records, unsigned &NumRecords) const override { Records = BuiltinInfo; NumRecords = clang::Hexagon::LastTSBuiltin-Builtin::FirstTSBuiltin; } bool validateAsmConstraint(const char *&Name, TargetInfo::ConstraintInfo &Info) const override { return true; } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override; bool hasFeature(StringRef Feature) const override { return Feature == "hexagon"; } BuiltinVaListKind getBuiltinVaListKind() const override { return TargetInfo::CharPtrBuiltinVaList; } void getGCCRegNames(const char * const *&Names, unsigned &NumNames) const override; void getGCCRegAliases(const GCCRegAlias *&Aliases, unsigned &NumAliases) const override; const char *getClobbers() const override { return ""; } static const char *getHexagonCPUSuffix(StringRef Name) { return llvm::StringSwitch(Name) .Case("hexagonv4", "4") .Case("hexagonv5", "5") .Default(nullptr); } bool setCPU(const std::string &Name) override { if (!getHexagonCPUSuffix(Name)) return false; CPU = Name; return true; } }; void HexagonTargetInfo::getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const { Builder.defineMacro("qdsp6"); Builder.defineMacro("__qdsp6", "1"); Builder.defineMacro("__qdsp6__", "1"); Builder.defineMacro("hexagon"); Builder.defineMacro("__hexagon", "1"); Builder.defineMacro("__hexagon__", "1"); if(CPU == "hexagonv1") { Builder.defineMacro("__HEXAGON_V1__"); Builder.defineMacro("__HEXAGON_ARCH__", "1"); if(Opts.HexagonQdsp6Compat) { Builder.defineMacro("__QDSP6_V1__"); Builder.defineMacro("__QDSP6_ARCH__", "1"); } } else if(CPU == "hexagonv2") { Builder.defineMacro("__HEXAGON_V2__"); Builder.defineMacro("__HEXAGON_ARCH__", "2"); if(Opts.HexagonQdsp6Compat) { Builder.defineMacro("__QDSP6_V2__"); Builder.defineMacro("__QDSP6_ARCH__", "2"); } } else if(CPU == "hexagonv3") { Builder.defineMacro("__HEXAGON_V3__"); Builder.defineMacro("__HEXAGON_ARCH__", "3"); if(Opts.HexagonQdsp6Compat) { Builder.defineMacro("__QDSP6_V3__"); Builder.defineMacro("__QDSP6_ARCH__", "3"); } } else if(CPU == "hexagonv4") { Builder.defineMacro("__HEXAGON_V4__"); Builder.defineMacro("__HEXAGON_ARCH__", "4"); if(Opts.HexagonQdsp6Compat) { Builder.defineMacro("__QDSP6_V4__"); Builder.defineMacro("__QDSP6_ARCH__", "4"); } } else if(CPU == "hexagonv5") { Builder.defineMacro("__HEXAGON_V5__"); Builder.defineMacro("__HEXAGON_ARCH__", "5"); if(Opts.HexagonQdsp6Compat) { Builder.defineMacro("__QDSP6_V5__"); Builder.defineMacro("__QDSP6_ARCH__", "5"); } } } const char * const HexagonTargetInfo::GCCRegNames[] = { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", "p0", "p1", "p2", "p3", "sa0", "lc0", "sa1", "lc1", "m0", "m1", "usr", "ugp" }; void HexagonTargetInfo::getGCCRegNames(const char * const *&Names, unsigned &NumNames) const { Names = GCCRegNames; NumNames = llvm::array_lengthof(GCCRegNames); } const TargetInfo::GCCRegAlias HexagonTargetInfo::GCCRegAliases[] = { { { "sp" }, "r29" }, { { "fp" }, "r30" }, { { "lr" }, "r31" }, }; void HexagonTargetInfo::getGCCRegAliases(const GCCRegAlias *&Aliases, unsigned &NumAliases) const { Aliases = GCCRegAliases; NumAliases = llvm::array_lengthof(GCCRegAliases); } const Builtin::Info HexagonTargetInfo::BuiltinInfo[] = { #define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES }, #define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\ ALL_LANGUAGES }, #include "clang/Basic/BuiltinsHexagon.def" }; // Shared base class for SPARC v8 (32-bit) and SPARC v9 (64-bit). class SparcTargetInfo : public TargetInfo { static const TargetInfo::GCCRegAlias GCCRegAliases[]; static const char * const GCCRegNames[]; bool SoftFloat; public: SparcTargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple), SoftFloat(false) {} bool handleTargetFeatures(std::vector &Features, DiagnosticsEngine &Diags) override { // The backend doesn't actually handle soft float yet, but in case someone // is using the support for the front end continue to support it. auto Feature = std::find(Features.begin(), Features.end(), "+soft-float"); if (Feature != Features.end()) { SoftFloat = true; Features.erase(Feature); } return true; } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { DefineStd(Builder, "sparc", Opts); Builder.defineMacro("__REGISTER_PREFIX__", ""); if (SoftFloat) Builder.defineMacro("SOFT_FLOAT", "1"); } bool hasFeature(StringRef Feature) const override { return llvm::StringSwitch(Feature) .Case("softfloat", SoftFloat) .Case("sparc", true) .Default(false); } void getTargetBuiltins(const Builtin::Info *&Records, unsigned &NumRecords) const override { // FIXME: Implement! } BuiltinVaListKind getBuiltinVaListKind() const override { return TargetInfo::VoidPtrBuiltinVaList; } void getGCCRegNames(const char * const *&Names, unsigned &NumNames) const override; void getGCCRegAliases(const GCCRegAlias *&Aliases, unsigned &NumAliases) const override; bool validateAsmConstraint(const char *&Name, TargetInfo::ConstraintInfo &info) const override { // FIXME: Implement! switch (*Name) { case 'I': // Signed 13-bit constant case 'J': // Zero case 'K': // 32-bit constant with the low 12 bits clear case 'L': // A constant in the range supported by movcc (11-bit signed imm) case 'M': // A constant in the range supported by movrcc (19-bit signed imm) case 'N': // Same as 'K' but zext (required for SIMode) case 'O': // The constant 4096 return true; } return false; } const char *getClobbers() const override { // FIXME: Implement! return ""; } }; const char * const SparcTargetInfo::GCCRegNames[] = { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31" }; void SparcTargetInfo::getGCCRegNames(const char * const *&Names, unsigned &NumNames) const { Names = GCCRegNames; NumNames = llvm::array_lengthof(GCCRegNames); } const TargetInfo::GCCRegAlias SparcTargetInfo::GCCRegAliases[] = { { { "g0" }, "r0" }, { { "g1" }, "r1" }, { { "g2" }, "r2" }, { { "g3" }, "r3" }, { { "g4" }, "r4" }, { { "g5" }, "r5" }, { { "g6" }, "r6" }, { { "g7" }, "r7" }, { { "o0" }, "r8" }, { { "o1" }, "r9" }, { { "o2" }, "r10" }, { { "o3" }, "r11" }, { { "o4" }, "r12" }, { { "o5" }, "r13" }, { { "o6", "sp" }, "r14" }, { { "o7" }, "r15" }, { { "l0" }, "r16" }, { { "l1" }, "r17" }, { { "l2" }, "r18" }, { { "l3" }, "r19" }, { { "l4" }, "r20" }, { { "l5" }, "r21" }, { { "l6" }, "r22" }, { { "l7" }, "r23" }, { { "i0" }, "r24" }, { { "i1" }, "r25" }, { { "i2" }, "r26" }, { { "i3" }, "r27" }, { { "i4" }, "r28" }, { { "i5" }, "r29" }, { { "i6", "fp" }, "r30" }, { { "i7" }, "r31" }, }; void SparcTargetInfo::getGCCRegAliases(const GCCRegAlias *&Aliases, unsigned &NumAliases) const { Aliases = GCCRegAliases; NumAliases = llvm::array_lengthof(GCCRegAliases); } // SPARC v8 is the 32-bit mode selected by Triple::sparc. class SparcV8TargetInfo : public SparcTargetInfo { public: SparcV8TargetInfo(const llvm::Triple &Triple) : SparcTargetInfo(Triple) { DescriptionString = "E-m:e-p:32:32-i64:64-f128:64-n32-S64"; // NetBSD uses long (same as llvm default); everyone else uses int. if (getTriple().getOS() == llvm::Triple::NetBSD) { SizeType = UnsignedLong; IntPtrType = SignedLong; PtrDiffType = SignedLong; } else { SizeType = UnsignedInt; IntPtrType = SignedInt; PtrDiffType = SignedInt; } } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { SparcTargetInfo::getTargetDefines(Opts, Builder); Builder.defineMacro("__sparcv8"); } }; // SPARCV8el is the 32-bit little-endian mode selected by Triple::sparcel. class SparcV8elTargetInfo : public SparcV8TargetInfo { public: SparcV8elTargetInfo(const llvm::Triple &Triple) : SparcV8TargetInfo(Triple) { DescriptionString = "e-m:e-p:32:32-i64:64-f128:64-n32-S64"; BigEndian = false; } }; // SPARC v9 is the 64-bit mode selected by Triple::sparcv9. class SparcV9TargetInfo : public SparcTargetInfo { public: SparcV9TargetInfo(const llvm::Triple &Triple) : SparcTargetInfo(Triple) { // FIXME: Support Sparc quad-precision long double? DescriptionString = "E-m:e-i64:64-n32:64-S128"; // This is an LP64 platform. LongWidth = LongAlign = PointerWidth = PointerAlign = 64; // OpenBSD uses long long for int64_t and intmax_t. if (getTriple().getOS() == llvm::Triple::OpenBSD) IntMaxType = SignedLongLong; else IntMaxType = SignedLong; Int64Type = IntMaxType; // The SPARCv8 System V ABI has long double 128-bits in size, but 64-bit // aligned. The SPARCv9 SCD 2.4.1 says 16-byte aligned. LongDoubleWidth = 128; LongDoubleAlign = 128; LongDoubleFormat = &llvm::APFloat::IEEEquad; MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64; } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { SparcTargetInfo::getTargetDefines(Opts, Builder); Builder.defineMacro("__sparcv9"); Builder.defineMacro("__arch64__"); // Solaris doesn't need these variants, but the BSDs do. if (getTriple().getOS() != llvm::Triple::Solaris) { Builder.defineMacro("__sparc64__"); Builder.defineMacro("__sparc_v9__"); Builder.defineMacro("__sparcv9__"); } } bool setCPU(const std::string &Name) override { bool CPUKnown = llvm::StringSwitch(Name) .Case("v9", true) .Case("ultrasparc", true) .Case("ultrasparc3", true) .Case("niagara", true) .Case("niagara2", true) .Case("niagara3", true) .Case("niagara4", true) .Default(false); // No need to store the CPU yet. There aren't any CPU-specific // macros to define. return CPUKnown; } }; class SystemZTargetInfo : public TargetInfo { static const Builtin::Info BuiltinInfo[]; static const char *const GCCRegNames[]; std::string CPU; bool HasTransactionalExecution; bool HasVector; public: SystemZTargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple), CPU("z10"), HasTransactionalExecution(false), HasVector(false) { IntMaxType = SignedLong; Int64Type = SignedLong; TLSSupported = true; IntWidth = IntAlign = 32; LongWidth = LongLongWidth = LongAlign = LongLongAlign = 64; PointerWidth = PointerAlign = 64; LongDoubleWidth = 128; LongDoubleAlign = 64; LongDoubleFormat = &llvm::APFloat::IEEEquad; DefaultAlignForAttributeAligned = 64; MinGlobalAlign = 16; DescriptionString = "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-a:8:16-n32:64"; MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64; } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { Builder.defineMacro("__s390__"); Builder.defineMacro("__s390x__"); Builder.defineMacro("__zarch__"); Builder.defineMacro("__LONG_DOUBLE_128__"); if (HasTransactionalExecution) Builder.defineMacro("__HTM__"); if (Opts.ZVector) Builder.defineMacro("__VEC__", "10301"); } void getTargetBuiltins(const Builtin::Info *&Records, unsigned &NumRecords) const override { Records = BuiltinInfo; NumRecords = clang::SystemZ::LastTSBuiltin-Builtin::FirstTSBuiltin; } void getGCCRegNames(const char *const *&Names, unsigned &NumNames) const override; void getGCCRegAliases(const GCCRegAlias *&Aliases, unsigned &NumAliases) const override { // No aliases. Aliases = nullptr; NumAliases = 0; } bool validateAsmConstraint(const char *&Name, TargetInfo::ConstraintInfo &info) const override; const char *getClobbers() const override { // FIXME: Is this really right? return ""; } BuiltinVaListKind getBuiltinVaListKind() const override { return TargetInfo::SystemZBuiltinVaList; } bool setCPU(const std::string &Name) override { CPU = Name; bool CPUKnown = llvm::StringSwitch(Name) .Case("z10", true) .Case("z196", true) .Case("zEC12", true) .Case("z13", true) .Default(false); return CPUKnown; } void getDefaultFeatures(llvm::StringMap &Features) const override { if (CPU == "zEC12") Features["transactional-execution"] = true; if (CPU == "z13") { Features["transactional-execution"] = true; Features["vector"] = true; } } bool handleTargetFeatures(std::vector &Features, DiagnosticsEngine &Diags) override { HasTransactionalExecution = false; for (unsigned i = 0, e = Features.size(); i != e; ++i) { if (Features[i] == "+transactional-execution") HasTransactionalExecution = true; if (Features[i] == "+vector") HasVector = true; } // If we use the vector ABI, vector types are 64-bit aligned. if (HasVector) { MaxVectorAlign = 64; DescriptionString = "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64" "-v128:64-a:8:16-n32:64"; } return true; } bool hasFeature(StringRef Feature) const override { return llvm::StringSwitch(Feature) .Case("systemz", true) .Case("htm", HasTransactionalExecution) .Case("vx", HasVector) .Default(false); } StringRef getABI() const override { if (HasVector) return "vector"; return ""; } bool useFloat128ManglingForLongDouble() const override { return true; } }; const Builtin::Info SystemZTargetInfo::BuiltinInfo[] = { #define BUILTIN(ID, TYPE, ATTRS) \ { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES }, #include "clang/Basic/BuiltinsSystemZ.def" }; const char *const SystemZTargetInfo::GCCRegNames[] = { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", "f0", "f2", "f4", "f6", "f1", "f3", "f5", "f7", "f8", "f10", "f12", "f14", "f9", "f11", "f13", "f15" }; void SystemZTargetInfo::getGCCRegNames(const char *const *&Names, unsigned &NumNames) const { Names = GCCRegNames; NumNames = llvm::array_lengthof(GCCRegNames); } bool SystemZTargetInfo:: validateAsmConstraint(const char *&Name, TargetInfo::ConstraintInfo &Info) const { switch (*Name) { default: return false; case 'a': // Address register case 'd': // Data register (equivalent to 'r') case 'f': // Floating-point register Info.setAllowsRegister(); return true; case 'I': // Unsigned 8-bit constant case 'J': // Unsigned 12-bit constant case 'K': // Signed 16-bit constant case 'L': // Signed 20-bit displacement (on all targets we support) case 'M': // 0x7fffffff return true; case 'Q': // Memory with base and unsigned 12-bit displacement case 'R': // Likewise, plus an index case 'S': // Memory with base and signed 20-bit displacement case 'T': // Likewise, plus an index Info.setAllowsMemory(); return true; } } class MSP430TargetInfo : public TargetInfo { static const char * const GCCRegNames[]; public: MSP430TargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) { BigEndian = false; TLSSupported = false; IntWidth = 16; IntAlign = 16; LongWidth = 32; LongLongWidth = 64; LongAlign = LongLongAlign = 16; PointerWidth = 16; PointerAlign = 16; SuitableAlign = 16; SizeType = UnsignedInt; IntMaxType = SignedLongLong; IntPtrType = SignedInt; PtrDiffType = SignedInt; SigAtomicType = SignedLong; DescriptionString = "e-m:e-p:16:16-i32:16:32-a:16-n8:16"; } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { Builder.defineMacro("MSP430"); Builder.defineMacro("__MSP430__"); // FIXME: defines for different 'flavours' of MCU } void getTargetBuiltins(const Builtin::Info *&Records, unsigned &NumRecords) const override { // FIXME: Implement. Records = nullptr; NumRecords = 0; } bool hasFeature(StringRef Feature) const override { return Feature == "msp430"; } void getGCCRegNames(const char * const *&Names, unsigned &NumNames) const override; void getGCCRegAliases(const GCCRegAlias *&Aliases, unsigned &NumAliases) const override { // No aliases. Aliases = nullptr; NumAliases = 0; } bool validateAsmConstraint(const char *&Name, TargetInfo::ConstraintInfo &info) const override { // FIXME: implement switch (*Name) { case 'K': // the constant 1 case 'L': // constant -1^20 .. 1^19 case 'M': // constant 1-4: return true; } // No target constraints for now. return false; } const char *getClobbers() const override { // FIXME: Is this really right? return ""; } BuiltinVaListKind getBuiltinVaListKind() const override { // FIXME: implement return TargetInfo::CharPtrBuiltinVaList; } }; const char * const MSP430TargetInfo::GCCRegNames[] = { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" }; void MSP430TargetInfo::getGCCRegNames(const char * const *&Names, unsigned &NumNames) const { Names = GCCRegNames; NumNames = llvm::array_lengthof(GCCRegNames); } // LLVM and Clang cannot be used directly to output native binaries for // target, but is used to compile C code to llvm bitcode with correct // type and alignment information. // // TCE uses the llvm bitcode as input and uses it for generating customized // target processor and program binary. TCE co-design environment is // publicly available in http://tce.cs.tut.fi static const unsigned TCEOpenCLAddrSpaceMap[] = { 3, // opencl_global 4, // opencl_local 5, // opencl_constant // FIXME: generic has to be added to the target 0, // opencl_generic 0, // cuda_device 0, // cuda_constant 0 // cuda_shared }; class TCETargetInfo : public TargetInfo{ public: TCETargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) { TLSSupported = false; IntWidth = 32; LongWidth = LongLongWidth = 32; PointerWidth = 32; IntAlign = 32; LongAlign = LongLongAlign = 32; PointerAlign = 32; SuitableAlign = 32; SizeType = UnsignedInt; IntMaxType = SignedLong; IntPtrType = SignedInt; PtrDiffType = SignedInt; FloatWidth = 32; FloatAlign = 32; DoubleWidth = 32; DoubleAlign = 32; LongDoubleWidth = 32; LongDoubleAlign = 32; FloatFormat = &llvm::APFloat::IEEEsingle; DoubleFormat = &llvm::APFloat::IEEEsingle; LongDoubleFormat = &llvm::APFloat::IEEEsingle; DescriptionString = "E-p:32:32-i8:8:32-i16:16:32-i64:32" "-f64:32-v64:32-v128:32-a:0:32-n32"; AddrSpaceMap = &TCEOpenCLAddrSpaceMap; UseAddrSpaceMapMangling = true; } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { DefineStd(Builder, "tce", Opts); Builder.defineMacro("__TCE__"); Builder.defineMacro("__TCE_V1__"); } bool hasFeature(StringRef Feature) const override { return Feature == "tce"; } void getTargetBuiltins(const Builtin::Info *&Records, unsigned &NumRecords) const override {} const char *getClobbers() const override { return ""; } BuiltinVaListKind getBuiltinVaListKind() const override { return TargetInfo::VoidPtrBuiltinVaList; } void getGCCRegNames(const char * const *&Names, unsigned &NumNames) const override {} bool validateAsmConstraint(const char *&Name, TargetInfo::ConstraintInfo &info) const override{ return true; } void getGCCRegAliases(const GCCRegAlias *&Aliases, unsigned &NumAliases) const override {} }; class BPFTargetInfo : public TargetInfo { public: BPFTargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) { LongWidth = LongAlign = PointerWidth = PointerAlign = 64; SizeType = UnsignedLong; PtrDiffType = SignedLong; IntPtrType = SignedLong; IntMaxType = SignedLong; Int64Type = SignedLong; RegParmMax = 5; if (Triple.getArch() == llvm::Triple::bpfeb) { BigEndian = true; DescriptionString = "E-m:e-p:64:64-i64:64-n32:64-S128"; } else { BigEndian = false; DescriptionString = "e-m:e-p:64:64-i64:64-n32:64-S128"; } MaxAtomicPromoteWidth = 64; MaxAtomicInlineWidth = 64; TLSSupported = false; } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { DefineStd(Builder, "bpf", Opts); Builder.defineMacro("__BPF__"); } bool hasFeature(StringRef Feature) const override { return Feature == "bpf"; } void getTargetBuiltins(const Builtin::Info *&Records, unsigned &NumRecords) const override {} const char *getClobbers() const override { return ""; } BuiltinVaListKind getBuiltinVaListKind() const override { return TargetInfo::VoidPtrBuiltinVaList; } void getGCCRegNames(const char * const *&Names, unsigned &NumNames) const override { Names = nullptr; NumNames = 0; } bool validateAsmConstraint(const char *&Name, TargetInfo::ConstraintInfo &info) const override { return true; } void getGCCRegAliases(const GCCRegAlias *&Aliases, unsigned &NumAliases) const override { Aliases = nullptr; NumAliases = 0; } }; class MipsTargetInfoBase : public TargetInfo { virtual void setDescriptionString() = 0; static const Builtin::Info BuiltinInfo[]; std::string CPU; bool IsMips16; bool IsMicromips; bool IsNan2008; bool IsSingleFloat; enum MipsFloatABI { HardFloat, SoftFloat } FloatABI; enum DspRevEnum { NoDSP, DSP1, DSP2 } DspRev; bool HasMSA; protected: bool HasFP64; std::string ABI; public: MipsTargetInfoBase(const llvm::Triple &Triple, const std::string &ABIStr, const std::string &CPUStr) : TargetInfo(Triple), CPU(CPUStr), IsMips16(false), IsMicromips(false), IsNan2008(false), IsSingleFloat(false), FloatABI(HardFloat), DspRev(NoDSP), HasMSA(false), HasFP64(false), ABI(ABIStr) { TheCXXABI.set(TargetCXXABI::GenericMIPS); } bool isNaN2008Default() const { return CPU == "mips32r6" || CPU == "mips64r6"; } bool isFP64Default() const { return CPU == "mips32r6" || ABI == "n32" || ABI == "n64" || ABI == "64"; } bool isNan2008() const override { return IsNan2008; } StringRef getABI() const override { return ABI; } bool setCPU(const std::string &Name) override { bool IsMips32 = getTriple().getArch() == llvm::Triple::mips || getTriple().getArch() == llvm::Triple::mipsel; CPU = Name; return llvm::StringSwitch(Name) .Case("mips1", IsMips32) .Case("mips2", IsMips32) .Case("mips3", true) .Case("mips4", true) .Case("mips5", true) .Case("mips32", IsMips32) .Case("mips32r2", IsMips32) .Case("mips32r3", IsMips32) .Case("mips32r5", IsMips32) .Case("mips32r6", IsMips32) .Case("mips64", true) .Case("mips64r2", true) .Case("mips64r3", true) .Case("mips64r5", true) .Case("mips64r6", true) .Case("octeon", true) .Default(false); } const std::string& getCPU() const { return CPU; } void getDefaultFeatures(llvm::StringMap &Features) const override { if (CPU == "octeon") Features["mips64r2"] = Features["cnmips"] = true; else Features[CPU] = true; } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { Builder.defineMacro("__mips__"); Builder.defineMacro("_mips"); if (Opts.GNUMode) Builder.defineMacro("mips"); Builder.defineMacro("__REGISTER_PREFIX__", ""); switch (FloatABI) { case HardFloat: Builder.defineMacro("__mips_hard_float", Twine(1)); break; case SoftFloat: Builder.defineMacro("__mips_soft_float", Twine(1)); break; } if (IsSingleFloat) Builder.defineMacro("__mips_single_float", Twine(1)); Builder.defineMacro("__mips_fpr", HasFP64 ? Twine(64) : Twine(32)); Builder.defineMacro("_MIPS_FPSET", Twine(32 / (HasFP64 || IsSingleFloat ? 1 : 2))); if (IsMips16) Builder.defineMacro("__mips16", Twine(1)); if (IsMicromips) Builder.defineMacro("__mips_micromips", Twine(1)); if (IsNan2008) Builder.defineMacro("__mips_nan2008", Twine(1)); switch (DspRev) { default: break; case DSP1: Builder.defineMacro("__mips_dsp_rev", Twine(1)); Builder.defineMacro("__mips_dsp", Twine(1)); break; case DSP2: Builder.defineMacro("__mips_dsp_rev", Twine(2)); Builder.defineMacro("__mips_dspr2", Twine(1)); Builder.defineMacro("__mips_dsp", Twine(1)); break; } if (HasMSA) Builder.defineMacro("__mips_msa", Twine(1)); Builder.defineMacro("_MIPS_SZPTR", Twine(getPointerWidth(0))); Builder.defineMacro("_MIPS_SZINT", Twine(getIntWidth())); Builder.defineMacro("_MIPS_SZLONG", Twine(getLongWidth())); Builder.defineMacro("_MIPS_ARCH", "\"" + CPU + "\""); Builder.defineMacro("_MIPS_ARCH_" + StringRef(CPU).upper()); } void getTargetBuiltins(const Builtin::Info *&Records, unsigned &NumRecords) const override { Records = BuiltinInfo; NumRecords = clang::Mips::LastTSBuiltin - Builtin::FirstTSBuiltin; } bool hasFeature(StringRef Feature) const override { return llvm::StringSwitch(Feature) .Case("mips", true) .Case("fp64", HasFP64) .Default(false); } BuiltinVaListKind getBuiltinVaListKind() const override { return TargetInfo::VoidPtrBuiltinVaList; } void getGCCRegNames(const char * const *&Names, unsigned &NumNames) const override { static const char *const GCCRegNames[] = { // CPU register names // Must match second column of GCCRegAliases "$0", "$1", "$2", "$3", "$4", "$5", "$6", "$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23", "$24", "$25", "$26", "$27", "$28", "$29", "$30", "$31", // Floating point register names "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "$f8", "$f9", "$f10", "$f11", "$f12", "$f13", "$f14", "$f15", "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22", "$f23", "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31", // Hi/lo and condition register names "hi", "lo", "", "$fcc0","$fcc1","$fcc2","$fcc3","$fcc4", "$fcc5","$fcc6","$fcc7", // MSA register names "$w0", "$w1", "$w2", "$w3", "$w4", "$w5", "$w6", "$w7", "$w8", "$w9", "$w10", "$w11", "$w12", "$w13", "$w14", "$w15", "$w16", "$w17", "$w18", "$w19", "$w20", "$w21", "$w22", "$w23", "$w24", "$w25", "$w26", "$w27", "$w28", "$w29", "$w30", "$w31", // MSA control register names "$msair", "$msacsr", "$msaaccess", "$msasave", "$msamodify", "$msarequest", "$msamap", "$msaunmap" }; Names = GCCRegNames; NumNames = llvm::array_lengthof(GCCRegNames); } void getGCCRegAliases(const GCCRegAlias *&Aliases, unsigned &NumAliases) const override = 0; bool validateAsmConstraint(const char *&Name, TargetInfo::ConstraintInfo &Info) const override { switch (*Name) { default: return false; case 'r': // CPU registers. case 'd': // Equivalent to "r" unless generating MIPS16 code. case 'y': // Equivalent to "r", backward compatibility only. case 'f': // floating-point registers. case 'c': // $25 for indirect jumps case 'l': // lo register case 'x': // hilo register pair Info.setAllowsRegister(); return true; case 'I': // Signed 16-bit constant case 'J': // Integer 0 case 'K': // Unsigned 16-bit constant case 'L': // Signed 32-bit constant, lower 16-bit zeros (for lui) case 'M': // Constants not loadable via lui, addiu, or ori case 'N': // Constant -1 to -65535 case 'O': // A signed 15-bit constant case 'P': // A constant between 1 go 65535 return true; case 'R': // An address that can be used in a non-macro load or store Info.setAllowsMemory(); return true; case 'Z': if (Name[1] == 'C') { // An address usable by ll, and sc. Info.setAllowsMemory(); Name++; // Skip over 'Z'. return true; } return false; } } std::string convertConstraint(const char *&Constraint) const override { std::string R; switch (*Constraint) { case 'Z': // Two-character constraint; add "^" hint for later parsing. if (Constraint[1] == 'C') { R = std::string("^") + std::string(Constraint, 2); Constraint++; return R; } break; } return TargetInfo::convertConstraint(Constraint); } const char *getClobbers() const override { // In GCC, $1 is not widely used in generated code (it's used only in a few // specific situations), so there is no real need for users to add it to // the clobbers list if they want to use it in their inline assembly code. // // In LLVM, $1 is treated as a normal GPR and is always allocatable during // code generation, so using it in inline assembly without adding it to the // clobbers list can cause conflicts between the inline assembly code and // the surrounding generated code. // // Another problem is that LLVM is allowed to choose $1 for inline assembly // operands, which will conflict with the ".set at" assembler option (which // we use only for inline assembly, in order to maintain compatibility with // GCC) and will also conflict with the user's usage of $1. // // The easiest way to avoid these conflicts and keep $1 as an allocatable // register for generated code is to automatically clobber $1 for all inline // assembly code. // // FIXME: We should automatically clobber $1 only for inline assembly code // which actually uses it. This would allow LLVM to use $1 for inline // assembly operands if the user's assembly code doesn't use it. return "~{$1}"; } bool handleTargetFeatures(std::vector &Features, DiagnosticsEngine &Diags) override { IsMips16 = false; IsMicromips = false; IsNan2008 = isNaN2008Default(); IsSingleFloat = false; FloatABI = HardFloat; DspRev = NoDSP; HasFP64 = isFP64Default(); for (std::vector::iterator it = Features.begin(), ie = Features.end(); it != ie; ++it) { if (*it == "+single-float") IsSingleFloat = true; else if (*it == "+soft-float") FloatABI = SoftFloat; else if (*it == "+mips16") IsMips16 = true; else if (*it == "+micromips") IsMicromips = true; else if (*it == "+dsp") DspRev = std::max(DspRev, DSP1); else if (*it == "+dspr2") DspRev = std::max(DspRev, DSP2); else if (*it == "+msa") HasMSA = true; else if (*it == "+fp64") HasFP64 = true; else if (*it == "-fp64") HasFP64 = false; else if (*it == "+nan2008") IsNan2008 = true; else if (*it == "-nan2008") IsNan2008 = false; } setDescriptionString(); return true; } int getEHDataRegisterNumber(unsigned RegNo) const override { if (RegNo == 0) return 4; if (RegNo == 1) return 5; return -1; } bool isCLZForZeroUndef() const override { return false; } }; const Builtin::Info MipsTargetInfoBase::BuiltinInfo[] = { #define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES }, #define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\ ALL_LANGUAGES }, #include "clang/Basic/BuiltinsMips.def" }; class Mips32TargetInfoBase : public MipsTargetInfoBase { public: Mips32TargetInfoBase(const llvm::Triple &Triple) : MipsTargetInfoBase(Triple, "o32", "mips32r2") { SizeType = UnsignedInt; PtrDiffType = SignedInt; Int64Type = SignedLongLong; IntMaxType = Int64Type; MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 32; } bool setABI(const std::string &Name) override { if (Name == "o32" || Name == "eabi") { ABI = Name; return true; } return false; } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { MipsTargetInfoBase::getTargetDefines(Opts, Builder); Builder.defineMacro("__mips", "32"); Builder.defineMacro("_MIPS_ISA", "_MIPS_ISA_MIPS32"); const std::string& CPUStr = getCPU(); if (CPUStr == "mips32") Builder.defineMacro("__mips_isa_rev", "1"); else if (CPUStr == "mips32r2") Builder.defineMacro("__mips_isa_rev", "2"); else if (CPUStr == "mips32r3") Builder.defineMacro("__mips_isa_rev", "3"); else if (CPUStr == "mips32r5") Builder.defineMacro("__mips_isa_rev", "5"); else if (CPUStr == "mips32r6") Builder.defineMacro("__mips_isa_rev", "6"); if (ABI == "o32") { Builder.defineMacro("__mips_o32"); Builder.defineMacro("_ABIO32", "1"); Builder.defineMacro("_MIPS_SIM", "_ABIO32"); } else if (ABI == "eabi") Builder.defineMacro("__mips_eabi"); else llvm_unreachable("Invalid ABI for Mips32."); } void getGCCRegAliases(const GCCRegAlias *&Aliases, unsigned &NumAliases) const override { static const TargetInfo::GCCRegAlias GCCRegAliases[] = { { { "at" }, "$1" }, { { "v0" }, "$2" }, { { "v1" }, "$3" }, { { "a0" }, "$4" }, { { "a1" }, "$5" }, { { "a2" }, "$6" }, { { "a3" }, "$7" }, { { "t0" }, "$8" }, { { "t1" }, "$9" }, { { "t2" }, "$10" }, { { "t3" }, "$11" }, { { "t4" }, "$12" }, { { "t5" }, "$13" }, { { "t6" }, "$14" }, { { "t7" }, "$15" }, { { "s0" }, "$16" }, { { "s1" }, "$17" }, { { "s2" }, "$18" }, { { "s3" }, "$19" }, { { "s4" }, "$20" }, { { "s5" }, "$21" }, { { "s6" }, "$22" }, { { "s7" }, "$23" }, { { "t8" }, "$24" }, { { "t9" }, "$25" }, { { "k0" }, "$26" }, { { "k1" }, "$27" }, { { "gp" }, "$28" }, { { "sp","$sp" }, "$29" }, { { "fp","$fp" }, "$30" }, { { "ra" }, "$31" } }; Aliases = GCCRegAliases; NumAliases = llvm::array_lengthof(GCCRegAliases); } }; class Mips32EBTargetInfo : public Mips32TargetInfoBase { void setDescriptionString() override { DescriptionString = "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64"; } public: Mips32EBTargetInfo(const llvm::Triple &Triple) : Mips32TargetInfoBase(Triple) { } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { DefineStd(Builder, "MIPSEB", Opts); Builder.defineMacro("_MIPSEB"); Mips32TargetInfoBase::getTargetDefines(Opts, Builder); } }; class Mips32ELTargetInfo : public Mips32TargetInfoBase { void setDescriptionString() override { DescriptionString = "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64"; } public: Mips32ELTargetInfo(const llvm::Triple &Triple) : Mips32TargetInfoBase(Triple) { BigEndian = false; } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { DefineStd(Builder, "MIPSEL", Opts); Builder.defineMacro("_MIPSEL"); Mips32TargetInfoBase::getTargetDefines(Opts, Builder); } }; class Mips64TargetInfoBase : public MipsTargetInfoBase { public: Mips64TargetInfoBase(const llvm::Triple &Triple) : MipsTargetInfoBase(Triple, "n64", "mips64r2") { LongDoubleWidth = LongDoubleAlign = 128; LongDoubleFormat = &llvm::APFloat::IEEEquad; if (getTriple().getOS() == llvm::Triple::FreeBSD) { LongDoubleWidth = LongDoubleAlign = 64; LongDoubleFormat = &llvm::APFloat::IEEEdouble; } setN64ABITypes(); SuitableAlign = 128; MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64; } void setN64ABITypes() { LongWidth = LongAlign = 64; PointerWidth = PointerAlign = 64; SizeType = UnsignedLong; PtrDiffType = SignedLong; Int64Type = SignedLong; IntMaxType = Int64Type; } void setN32ABITypes() { LongWidth = LongAlign = 32; PointerWidth = PointerAlign = 32; SizeType = UnsignedInt; PtrDiffType = SignedInt; Int64Type = SignedLongLong; IntMaxType = Int64Type; } bool setABI(const std::string &Name) override { if (Name == "n32") { setN32ABITypes(); ABI = Name; return true; } if (Name == "n64") { setN64ABITypes(); ABI = Name; return true; } return false; } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { MipsTargetInfoBase::getTargetDefines(Opts, Builder); Builder.defineMacro("__mips", "64"); Builder.defineMacro("__mips64"); Builder.defineMacro("__mips64__"); Builder.defineMacro("_MIPS_ISA", "_MIPS_ISA_MIPS64"); const std::string& CPUStr = getCPU(); if (CPUStr == "mips64") Builder.defineMacro("__mips_isa_rev", "1"); else if (CPUStr == "mips64r2") Builder.defineMacro("__mips_isa_rev", "2"); else if (CPUStr == "mips64r3") Builder.defineMacro("__mips_isa_rev", "3"); else if (CPUStr == "mips64r5") Builder.defineMacro("__mips_isa_rev", "5"); else if (CPUStr == "mips64r6") Builder.defineMacro("__mips_isa_rev", "6"); if (ABI == "n32") { Builder.defineMacro("__mips_n32"); Builder.defineMacro("_ABIN32", "2"); Builder.defineMacro("_MIPS_SIM", "_ABIN32"); } else if (ABI == "n64") { Builder.defineMacro("__mips_n64"); Builder.defineMacro("_ABI64", "3"); Builder.defineMacro("_MIPS_SIM", "_ABI64"); } else llvm_unreachable("Invalid ABI for Mips64."); } void getGCCRegAliases(const GCCRegAlias *&Aliases, unsigned &NumAliases) const override { static const TargetInfo::GCCRegAlias GCCRegAliases[] = { { { "at" }, "$1" }, { { "v0" }, "$2" }, { { "v1" }, "$3" }, { { "a0" }, "$4" }, { { "a1" }, "$5" }, { { "a2" }, "$6" }, { { "a3" }, "$7" }, { { "a4" }, "$8" }, { { "a5" }, "$9" }, { { "a6" }, "$10" }, { { "a7" }, "$11" }, { { "t0" }, "$12" }, { { "t1" }, "$13" }, { { "t2" }, "$14" }, { { "t3" }, "$15" }, { { "s0" }, "$16" }, { { "s1" }, "$17" }, { { "s2" }, "$18" }, { { "s3" }, "$19" }, { { "s4" }, "$20" }, { { "s5" }, "$21" }, { { "s6" }, "$22" }, { { "s7" }, "$23" }, { { "t8" }, "$24" }, { { "t9" }, "$25" }, { { "k0" }, "$26" }, { { "k1" }, "$27" }, { { "gp" }, "$28" }, { { "sp","$sp" }, "$29" }, { { "fp","$fp" }, "$30" }, { { "ra" }, "$31" } }; Aliases = GCCRegAliases; NumAliases = llvm::array_lengthof(GCCRegAliases); } bool hasInt128Type() const override { return true; } }; class Mips64EBTargetInfo : public Mips64TargetInfoBase { void setDescriptionString() override { if (ABI == "n32") DescriptionString = "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32:64-S128"; else DescriptionString = "E-m:m-i8:8:32-i16:16:32-i64:64-n32:64-S128"; } public: Mips64EBTargetInfo(const llvm::Triple &Triple) : Mips64TargetInfoBase(Triple) {} void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { DefineStd(Builder, "MIPSEB", Opts); Builder.defineMacro("_MIPSEB"); Mips64TargetInfoBase::getTargetDefines(Opts, Builder); } }; class Mips64ELTargetInfo : public Mips64TargetInfoBase { void setDescriptionString() override { if (ABI == "n32") DescriptionString = "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32:64-S128"; else DescriptionString = "e-m:m-i8:8:32-i16:16:32-i64:64-n32:64-S128"; } public: Mips64ELTargetInfo(const llvm::Triple &Triple) : Mips64TargetInfoBase(Triple) { // Default ABI is n64. BigEndian = false; } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { DefineStd(Builder, "MIPSEL", Opts); Builder.defineMacro("_MIPSEL"); Mips64TargetInfoBase::getTargetDefines(Opts, Builder); } }; class PNaClTargetInfo : public TargetInfo { public: PNaClTargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) { BigEndian = false; this->UserLabelPrefix = ""; this->LongAlign = 32; this->LongWidth = 32; this->PointerAlign = 32; this->PointerWidth = 32; this->IntMaxType = TargetInfo::SignedLongLong; this->Int64Type = TargetInfo::SignedLongLong; this->DoubleAlign = 64; this->LongDoubleWidth = 64; this->LongDoubleAlign = 64; this->SizeType = TargetInfo::UnsignedInt; this->PtrDiffType = TargetInfo::SignedInt; this->IntPtrType = TargetInfo::SignedInt; this->RegParmMax = 0; // Disallow regparm } void getDefaultFeatures(llvm::StringMap &Features) const override { } void getArchDefines(const LangOptions &Opts, MacroBuilder &Builder) const { Builder.defineMacro("__le32__"); Builder.defineMacro("__pnacl__"); } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { getArchDefines(Opts, Builder); } bool hasFeature(StringRef Feature) const override { return Feature == "pnacl"; } void getTargetBuiltins(const Builtin::Info *&Records, unsigned &NumRecords) const override { } BuiltinVaListKind getBuiltinVaListKind() const override { return TargetInfo::PNaClABIBuiltinVaList; } void getGCCRegNames(const char * const *&Names, unsigned &NumNames) const override; void getGCCRegAliases(const GCCRegAlias *&Aliases, unsigned &NumAliases) const override; bool validateAsmConstraint(const char *&Name, TargetInfo::ConstraintInfo &Info) const override { return false; } const char *getClobbers() const override { return ""; } }; void PNaClTargetInfo::getGCCRegNames(const char * const *&Names, unsigned &NumNames) const { Names = nullptr; NumNames = 0; } void PNaClTargetInfo::getGCCRegAliases(const GCCRegAlias *&Aliases, unsigned &NumAliases) const { Aliases = nullptr; NumAliases = 0; } // We attempt to use PNaCl (le32) frontend and Mips32EL backend. class NaClMips32ELTargetInfo : public Mips32ELTargetInfo { public: NaClMips32ELTargetInfo(const llvm::Triple &Triple) : Mips32ELTargetInfo(Triple) { MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 0; } BuiltinVaListKind getBuiltinVaListKind() const override { return TargetInfo::PNaClABIBuiltinVaList; } }; class Le64TargetInfo : public TargetInfo { static const Builtin::Info BuiltinInfo[]; public: Le64TargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) { BigEndian = false; NoAsmVariants = true; LongWidth = LongAlign = PointerWidth = PointerAlign = 64; MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64; DescriptionString = "e-m:e-v128:32-v16:16-v32:32-v96:32-n8:16:32:64-S128"; } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { DefineStd(Builder, "unix", Opts); defineCPUMacros(Builder, "le64", /*Tuning=*/false); Builder.defineMacro("__ELF__"); } void getTargetBuiltins(const Builtin::Info *&Records, unsigned &NumRecords) const override { Records = BuiltinInfo; NumRecords = clang::Le64::LastTSBuiltin - Builtin::FirstTSBuiltin; } BuiltinVaListKind getBuiltinVaListKind() const override { return TargetInfo::PNaClABIBuiltinVaList; } const char *getClobbers() const override { return ""; } void getGCCRegNames(const char *const *&Names, unsigned &NumNames) const override { Names = nullptr; NumNames = 0; } void getGCCRegAliases(const GCCRegAlias *&Aliases, unsigned &NumAliases) const override { Aliases = nullptr; NumAliases = 0; } bool validateAsmConstraint(const char *&Name, TargetInfo::ConstraintInfo &Info) const override { return false; } bool hasProtectedVisibility() const override { return false; } }; } // end anonymous namespace. const Builtin::Info Le64TargetInfo::BuiltinInfo[] = { #define BUILTIN(ID, TYPE, ATTRS) \ { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES }, #include "clang/Basic/BuiltinsLe64.def" }; namespace { static const unsigned SPIRAddrSpaceMap[] = { 1, // opencl_global 3, // opencl_local 2, // opencl_constant 4, // opencl_generic 0, // cuda_device 0, // cuda_constant 0 // cuda_shared }; class SPIRTargetInfo : public TargetInfo { public: SPIRTargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) { assert(getTriple().getOS() == llvm::Triple::UnknownOS && "SPIR target must use unknown OS"); assert(getTriple().getEnvironment() == llvm::Triple::UnknownEnvironment && "SPIR target must use unknown environment type"); BigEndian = false; TLSSupported = false; LongWidth = LongAlign = 64; AddrSpaceMap = &SPIRAddrSpaceMap; UseAddrSpaceMapMangling = true; // Define available target features // These must be defined in sorted order! NoAsmVariants = true; } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { DefineStd(Builder, "SPIR", Opts); } bool hasFeature(StringRef Feature) const override { return Feature == "spir"; } void getTargetBuiltins(const Builtin::Info *&Records, unsigned &NumRecords) const override {} const char *getClobbers() const override { return ""; } void getGCCRegNames(const char * const *&Names, unsigned &NumNames) const override {} bool validateAsmConstraint(const char *&Name, TargetInfo::ConstraintInfo &info) const override { return true; } void getGCCRegAliases(const GCCRegAlias *&Aliases, unsigned &NumAliases) const override {} BuiltinVaListKind getBuiltinVaListKind() const override { return TargetInfo::VoidPtrBuiltinVaList; } CallingConvCheckResult checkCallingConvention(CallingConv CC) const override { return (CC == CC_SpirFunction || CC == CC_SpirKernel) ? CCCR_OK : CCCR_Warning; } CallingConv getDefaultCallingConv(CallingConvMethodType MT) const override { return CC_SpirFunction; } }; class SPIR32TargetInfo : public SPIRTargetInfo { public: SPIR32TargetInfo(const llvm::Triple &Triple) : SPIRTargetInfo(Triple) { PointerWidth = PointerAlign = 32; SizeType = TargetInfo::UnsignedInt; PtrDiffType = IntPtrType = TargetInfo::SignedInt; DescriptionString = "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-" "v96:128-v192:256-v256:256-v512:512-v1024:1024"; } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { DefineStd(Builder, "SPIR32", Opts); } }; class SPIR64TargetInfo : public SPIRTargetInfo { public: SPIR64TargetInfo(const llvm::Triple &Triple) : SPIRTargetInfo(Triple) { PointerWidth = PointerAlign = 64; SizeType = TargetInfo::UnsignedLong; PtrDiffType = IntPtrType = TargetInfo::SignedLong; DescriptionString = "e-i64:64-v16:16-v24:32-v32:32-v48:64-" "v96:128-v192:256-v256:256-v512:512-v1024:1024"; } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { DefineStd(Builder, "SPIR64", Opts); } }; class XCoreTargetInfo : public TargetInfo { static const Builtin::Info BuiltinInfo[]; public: XCoreTargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) { BigEndian = false; NoAsmVariants = true; LongLongAlign = 32; SuitableAlign = 32; DoubleAlign = LongDoubleAlign = 32; SizeType = UnsignedInt; PtrDiffType = SignedInt; IntPtrType = SignedInt; WCharType = UnsignedChar; WIntType = UnsignedInt; UseZeroLengthBitfieldAlignment = true; DescriptionString = "e-m:e-p:32:32-i1:8:32-i8:8:32-i16:16:32-i64:32" "-f64:32-a:0:32-n32"; } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { Builder.defineMacro("__XS1B__"); } void getTargetBuiltins(const Builtin::Info *&Records, unsigned &NumRecords) const override { Records = BuiltinInfo; NumRecords = clang::XCore::LastTSBuiltin-Builtin::FirstTSBuiltin; } BuiltinVaListKind getBuiltinVaListKind() const override { return TargetInfo::VoidPtrBuiltinVaList; } const char *getClobbers() const override { return ""; } void getGCCRegNames(const char * const *&Names, unsigned &NumNames) const override { static const char * const GCCRegNames[] = { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "cp", "dp", "sp", "lr" }; Names = GCCRegNames; NumNames = llvm::array_lengthof(GCCRegNames); } void getGCCRegAliases(const GCCRegAlias *&Aliases, unsigned &NumAliases) const override { Aliases = nullptr; NumAliases = 0; } bool validateAsmConstraint(const char *&Name, TargetInfo::ConstraintInfo &Info) const override { return false; } int getEHDataRegisterNumber(unsigned RegNo) const override { // R0=ExceptionPointerRegister R1=ExceptionSelectorRegister return (RegNo < 2)? RegNo : -1; } }; const Builtin::Info XCoreTargetInfo::BuiltinInfo[] = { #define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES }, #define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\ ALL_LANGUAGES }, #include "clang/Basic/BuiltinsXCore.def" }; } // end anonymous namespace. namespace { // x86_32 Android target class AndroidX86_32TargetInfo : public LinuxTargetInfo { public: AndroidX86_32TargetInfo(const llvm::Triple &Triple) : LinuxTargetInfo(Triple) { SuitableAlign = 32; LongDoubleWidth = 64; LongDoubleFormat = &llvm::APFloat::IEEEdouble; } }; } // end anonymous namespace namespace { // x86_64 Android target class AndroidX86_64TargetInfo : public LinuxTargetInfo { public: AndroidX86_64TargetInfo(const llvm::Triple &Triple) : LinuxTargetInfo(Triple) { LongDoubleFormat = &llvm::APFloat::IEEEquad; } bool useFloat128ManglingForLongDouble() const override { return true; } }; } // end anonymous namespace //===----------------------------------------------------------------------===// // Driver code //===----------------------------------------------------------------------===// static TargetInfo *AllocateTarget(const llvm::Triple &Triple) { llvm::Triple::OSType os = Triple.getOS(); switch (Triple.getArch()) { default: return nullptr; case llvm::Triple::xcore: return new XCoreTargetInfo(Triple); case llvm::Triple::hexagon: return new HexagonTargetInfo(Triple); case llvm::Triple::aarch64: if (Triple.isOSDarwin()) return new DarwinAArch64TargetInfo(Triple); switch (os) { case llvm::Triple::FreeBSD: return new FreeBSDTargetInfo(Triple); case llvm::Triple::Linux: return new LinuxTargetInfo(Triple); case llvm::Triple::NetBSD: return new NetBSDTargetInfo(Triple); default: return new AArch64leTargetInfo(Triple); } case llvm::Triple::aarch64_be: switch (os) { case llvm::Triple::FreeBSD: return new FreeBSDTargetInfo(Triple); case llvm::Triple::Linux: return new LinuxTargetInfo(Triple); case llvm::Triple::NetBSD: return new NetBSDTargetInfo(Triple); default: return new AArch64beTargetInfo(Triple); } case llvm::Triple::arm: case llvm::Triple::thumb: if (Triple.isOSBinFormatMachO()) return new DarwinARMTargetInfo(Triple); switch (os) { case llvm::Triple::Linux: return new LinuxTargetInfo(Triple); case llvm::Triple::FreeBSD: return new FreeBSDTargetInfo(Triple); case llvm::Triple::NetBSD: return new NetBSDTargetInfo(Triple); case llvm::Triple::OpenBSD: return new OpenBSDTargetInfo(Triple); case llvm::Triple::Bitrig: return new BitrigTargetInfo(Triple); case llvm::Triple::RTEMS: return new RTEMSTargetInfo(Triple); case llvm::Triple::NaCl: return new NaClTargetInfo(Triple); case llvm::Triple::Win32: switch (Triple.getEnvironment()) { case llvm::Triple::Itanium: return new ItaniumWindowsARMleTargetInfo(Triple); case llvm::Triple::MSVC: default: // Assume MSVC for unknown environments return new MicrosoftARMleTargetInfo(Triple); } default: return new ARMleTargetInfo(Triple); } case llvm::Triple::armeb: case llvm::Triple::thumbeb: if (Triple.isOSDarwin()) return new DarwinARMTargetInfo(Triple); switch (os) { case llvm::Triple::Linux: return new LinuxTargetInfo(Triple); case llvm::Triple::FreeBSD: return new FreeBSDTargetInfo(Triple); case llvm::Triple::NetBSD: return new NetBSDTargetInfo(Triple); case llvm::Triple::OpenBSD: return new OpenBSDTargetInfo(Triple); case llvm::Triple::Bitrig: return new BitrigTargetInfo(Triple); case llvm::Triple::RTEMS: return new RTEMSTargetInfo(Triple); case llvm::Triple::NaCl: return new NaClTargetInfo(Triple); default: return new ARMbeTargetInfo(Triple); } case llvm::Triple::bpfeb: case llvm::Triple::bpfel: return new BPFTargetInfo(Triple); case llvm::Triple::msp430: return new MSP430TargetInfo(Triple); case llvm::Triple::mips: switch (os) { case llvm::Triple::Linux: return new LinuxTargetInfo(Triple); case llvm::Triple::RTEMS: return new RTEMSTargetInfo(Triple); case llvm::Triple::FreeBSD: return new FreeBSDTargetInfo(Triple); case llvm::Triple::NetBSD: return new NetBSDTargetInfo(Triple); default: return new Mips32EBTargetInfo(Triple); } case llvm::Triple::mipsel: switch (os) { case llvm::Triple::Linux: return new LinuxTargetInfo(Triple); case llvm::Triple::RTEMS: return new RTEMSTargetInfo(Triple); case llvm::Triple::FreeBSD: return new FreeBSDTargetInfo(Triple); case llvm::Triple::NetBSD: return new NetBSDTargetInfo(Triple); case llvm::Triple::NaCl: return new NaClTargetInfo(Triple); default: return new Mips32ELTargetInfo(Triple); } case llvm::Triple::mips64: switch (os) { case llvm::Triple::Linux: return new LinuxTargetInfo(Triple); case llvm::Triple::RTEMS: return new RTEMSTargetInfo(Triple); case llvm::Triple::FreeBSD: return new FreeBSDTargetInfo(Triple); case llvm::Triple::NetBSD: return new NetBSDTargetInfo(Triple); case llvm::Triple::OpenBSD: return new OpenBSDTargetInfo(Triple); default: return new Mips64EBTargetInfo(Triple); } case llvm::Triple::mips64el: switch (os) { case llvm::Triple::Linux: return new LinuxTargetInfo(Triple); case llvm::Triple::RTEMS: return new RTEMSTargetInfo(Triple); case llvm::Triple::FreeBSD: return new FreeBSDTargetInfo(Triple); case llvm::Triple::NetBSD: return new NetBSDTargetInfo(Triple); case llvm::Triple::OpenBSD: return new OpenBSDTargetInfo(Triple); default: return new Mips64ELTargetInfo(Triple); } case llvm::Triple::le32: switch (os) { case llvm::Triple::NaCl: return new NaClTargetInfo(Triple); default: return nullptr; } case llvm::Triple::le64: return new Le64TargetInfo(Triple); case llvm::Triple::ppc: if (Triple.isOSDarwin()) return new DarwinPPC32TargetInfo(Triple); switch (os) { case llvm::Triple::Linux: return new LinuxTargetInfo(Triple); case llvm::Triple::FreeBSD: return new FreeBSDTargetInfo(Triple); case llvm::Triple::NetBSD: return new NetBSDTargetInfo(Triple); case llvm::Triple::OpenBSD: return new OpenBSDTargetInfo(Triple); case llvm::Triple::RTEMS: return new RTEMSTargetInfo(Triple); default: return new PPC32TargetInfo(Triple); } case llvm::Triple::ppc64: if (Triple.isOSDarwin()) return new DarwinPPC64TargetInfo(Triple); switch (os) { case llvm::Triple::Linux: return new LinuxTargetInfo(Triple); case llvm::Triple::Lv2: return new PS3PPUTargetInfo(Triple); case llvm::Triple::FreeBSD: return new FreeBSDTargetInfo(Triple); case llvm::Triple::NetBSD: return new NetBSDTargetInfo(Triple); default: return new PPC64TargetInfo(Triple); } case llvm::Triple::ppc64le: switch (os) { case llvm::Triple::Linux: return new LinuxTargetInfo(Triple); case llvm::Triple::NetBSD: return new NetBSDTargetInfo(Triple); default: return new PPC64TargetInfo(Triple); } case llvm::Triple::nvptx: return new NVPTX32TargetInfo(Triple); case llvm::Triple::nvptx64: return new NVPTX64TargetInfo(Triple); case llvm::Triple::amdgcn: case llvm::Triple::r600: return new AMDGPUTargetInfo(Triple); case llvm::Triple::sparc: switch (os) { case llvm::Triple::Linux: return new LinuxTargetInfo(Triple); case llvm::Triple::Solaris: return new SolarisTargetInfo(Triple); case llvm::Triple::NetBSD: return new NetBSDTargetInfo(Triple); case llvm::Triple::OpenBSD: return new OpenBSDTargetInfo(Triple); case llvm::Triple::RTEMS: return new RTEMSTargetInfo(Triple); default: return new SparcV8TargetInfo(Triple); } // The 'sparcel' architecture copies all the above cases except for Solaris. case llvm::Triple::sparcel: switch (os) { case llvm::Triple::Linux: return new LinuxTargetInfo(Triple); case llvm::Triple::NetBSD: return new NetBSDTargetInfo(Triple); case llvm::Triple::OpenBSD: return new OpenBSDTargetInfo(Triple); case llvm::Triple::RTEMS: return new RTEMSTargetInfo(Triple); default: return new SparcV8elTargetInfo(Triple); } case llvm::Triple::sparcv9: switch (os) { case llvm::Triple::Linux: return new LinuxTargetInfo(Triple); case llvm::Triple::Solaris: return new SolarisTargetInfo(Triple); case llvm::Triple::NetBSD: return new NetBSDTargetInfo(Triple); case llvm::Triple::OpenBSD: return new OpenBSDTargetInfo(Triple); case llvm::Triple::FreeBSD: return new FreeBSDTargetInfo(Triple); default: return new SparcV9TargetInfo(Triple); } case llvm::Triple::systemz: switch (os) { case llvm::Triple::Linux: return new LinuxTargetInfo(Triple); default: return new SystemZTargetInfo(Triple); } case llvm::Triple::tce: return new TCETargetInfo(Triple); case llvm::Triple::x86: if (Triple.isOSDarwin()) return new DarwinI386TargetInfo(Triple); switch (os) { case llvm::Triple::CloudABI: return new CloudABITargetInfo(Triple); case llvm::Triple::Linux: { switch (Triple.getEnvironment()) { default: return new LinuxTargetInfo(Triple); case llvm::Triple::Android: return new AndroidX86_32TargetInfo(Triple); } } case llvm::Triple::DragonFly: return new DragonFlyBSDTargetInfo(Triple); case llvm::Triple::NetBSD: return new NetBSDI386TargetInfo(Triple); case llvm::Triple::OpenBSD: return new OpenBSDI386TargetInfo(Triple); case llvm::Triple::Bitrig: return new BitrigI386TargetInfo(Triple); case llvm::Triple::FreeBSD: return new FreeBSDTargetInfo(Triple); case llvm::Triple::KFreeBSD: return new KFreeBSDTargetInfo(Triple); case llvm::Triple::Minix: return new MinixTargetInfo(Triple); case llvm::Triple::Solaris: return new SolarisTargetInfo(Triple); case llvm::Triple::Win32: { switch (Triple.getEnvironment()) { case llvm::Triple::Cygnus: return new CygwinX86_32TargetInfo(Triple); case llvm::Triple::GNU: return new MinGWX86_32TargetInfo(Triple); case llvm::Triple::Itanium: case llvm::Triple::MSVC: default: // Assume MSVC for unknown environments return new MicrosoftX86_32TargetInfo(Triple); } } case llvm::Triple::Haiku: return new HaikuX86_32TargetInfo(Triple); case llvm::Triple::RTEMS: return new RTEMSX86_32TargetInfo(Triple); case llvm::Triple::NaCl: return new NaClTargetInfo(Triple); default: return new X86_32TargetInfo(Triple); } case llvm::Triple::x86_64: if (Triple.isOSDarwin() || Triple.isOSBinFormatMachO()) return new DarwinX86_64TargetInfo(Triple); switch (os) { case llvm::Triple::CloudABI: return new CloudABITargetInfo(Triple); case llvm::Triple::Linux: { switch (Triple.getEnvironment()) { default: return new LinuxTargetInfo(Triple); case llvm::Triple::Android: return new AndroidX86_64TargetInfo(Triple); } } case llvm::Triple::DragonFly: return new DragonFlyBSDTargetInfo(Triple); case llvm::Triple::NetBSD: return new NetBSDTargetInfo(Triple); case llvm::Triple::OpenBSD: return new OpenBSDX86_64TargetInfo(Triple); case llvm::Triple::Bitrig: return new BitrigX86_64TargetInfo(Triple); case llvm::Triple::FreeBSD: return new FreeBSDTargetInfo(Triple); case llvm::Triple::KFreeBSD: return new KFreeBSDTargetInfo(Triple); case llvm::Triple::Solaris: return new SolarisTargetInfo(Triple); case llvm::Triple::Win32: { switch (Triple.getEnvironment()) { case llvm::Triple::GNU: return new MinGWX86_64TargetInfo(Triple); case llvm::Triple::MSVC: default: // Assume MSVC for unknown environments return new MicrosoftX86_64TargetInfo(Triple); } } case llvm::Triple::NaCl: return new NaClTargetInfo(Triple); case llvm::Triple::PS4: return new PS4OSTargetInfo(Triple); default: return new X86_64TargetInfo(Triple); } case llvm::Triple::spir: { if (Triple.getOS() != llvm::Triple::UnknownOS || Triple.getEnvironment() != llvm::Triple::UnknownEnvironment) return nullptr; return new SPIR32TargetInfo(Triple); } case llvm::Triple::spir64: { if (Triple.getOS() != llvm::Triple::UnknownOS || Triple.getEnvironment() != llvm::Triple::UnknownEnvironment) return nullptr; return new SPIR64TargetInfo(Triple); } } } /// CreateTargetInfo - Return the target info object for the specified target /// triple. TargetInfo * TargetInfo::CreateTargetInfo(DiagnosticsEngine &Diags, const std::shared_ptr &Opts) { llvm::Triple Triple(Opts->Triple); // Construct the target std::unique_ptr Target(AllocateTarget(Triple)); if (!Target) { Diags.Report(diag::err_target_unknown_triple) << Triple.str(); return nullptr; } Target->TargetOpts = Opts; // Set the target CPU if specified. if (!Opts->CPU.empty() && !Target->setCPU(Opts->CPU)) { Diags.Report(diag::err_target_unknown_cpu) << Opts->CPU; return nullptr; } // Set the target ABI if specified. if (!Opts->ABI.empty() && !Target->setABI(Opts->ABI)) { Diags.Report(diag::err_target_unknown_abi) << Opts->ABI; return nullptr; } // Set the fp math unit. if (!Opts->FPMath.empty() && !Target->setFPMath(Opts->FPMath)) { Diags.Report(diag::err_target_unknown_fpmath) << Opts->FPMath; return nullptr; } // Compute the default target features, we need the target to handle this // because features may have dependencies on one another. llvm::StringMap Features; Target->getDefaultFeatures(Features); // Apply the user specified deltas. for (unsigned I = 0, N = Opts->FeaturesAsWritten.size(); I < N; ++I) { const char *Name = Opts->FeaturesAsWritten[I].c_str(); // Apply the feature via the target. bool Enabled = Name[0] == '+'; Target->setFeatureEnabled(Features, Name + 1, Enabled); } // Add the features to the compile options. // // FIXME: If we are completely confident that we have the right set, we only // need to pass the minuses. Opts->Features.clear(); for (llvm::StringMap::const_iterator it = Features.begin(), ie = Features.end(); it != ie; ++it) Opts->Features.push_back((it->second ? "+" : "-") + it->first().str()); if (!Target->handleTargetFeatures(Opts->Features, Diags)) return nullptr; return Target.release(); } Index: vendor/clang/dist/lib/Basic/Version.cpp =================================================================== --- vendor/clang/dist/lib/Basic/Version.cpp (revision 292727) +++ vendor/clang/dist/lib/Basic/Version.cpp (revision 292728) @@ -1,151 +1,151 @@ //===- Version.cpp - Clang Version Number -----------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines several version-related utility functions for Clang. // //===----------------------------------------------------------------------===// #include "clang/Basic/Version.h" #include "clang/Basic/LLVM.h" #include "clang/Config/config.h" #include "llvm/Support/raw_ostream.h" #include #include #ifdef HAVE_SVN_VERSION_INC # include "SVNVersion.inc" #endif namespace clang { std::string getClangRepositoryPath() { #if defined(CLANG_REPOSITORY_STRING) return CLANG_REPOSITORY_STRING; #else #ifdef SVN_REPOSITORY StringRef URL(SVN_REPOSITORY); #else StringRef URL(""); #endif // If the SVN_REPOSITORY is empty, try to use the SVN keyword. This helps us // pick up a tag in an SVN export, for example. - StringRef SVNRepository("$URL: https://llvm.org/svn/llvm-project/cfe/tags/RELEASE_370/final/lib/Basic/Version.cpp $"); + StringRef SVNRepository("$URL: https://llvm.org/svn/llvm-project/cfe/tags/RELEASE_371/final/lib/Basic/Version.cpp $"); if (URL.empty()) { URL = SVNRepository.slice(SVNRepository.find(':'), SVNRepository.find("/lib/Basic")); } // Strip off version from a build from an integration branch. URL = URL.slice(0, URL.find("/src/tools/clang")); // Trim path prefix off, assuming path came from standard cfe path. size_t Start = URL.find("cfe/"); if (Start != StringRef::npos) URL = URL.substr(Start + 4); return URL; #endif } std::string getLLVMRepositoryPath() { #ifdef LLVM_REPOSITORY StringRef URL(LLVM_REPOSITORY); #else StringRef URL(""); #endif // Trim path prefix off, assuming path came from standard llvm path. // Leave "llvm/" prefix to distinguish the following llvm revision from the // clang revision. size_t Start = URL.find("llvm/"); if (Start != StringRef::npos) URL = URL.substr(Start); return URL; } std::string getClangRevision() { #ifdef SVN_REVISION return SVN_REVISION; #else return ""; #endif } std::string getLLVMRevision() { #ifdef LLVM_REVISION return LLVM_REVISION; #else return ""; #endif } std::string getClangFullRepositoryVersion() { std::string buf; llvm::raw_string_ostream OS(buf); std::string Path = getClangRepositoryPath(); std::string Revision = getClangRevision(); if (!Path.empty() || !Revision.empty()) { OS << '('; if (!Path.empty()) OS << Path; if (!Revision.empty()) { if (!Path.empty()) OS << ' '; OS << Revision; } OS << ')'; } // Support LLVM in a separate repository. std::string LLVMRev = getLLVMRevision(); if (!LLVMRev.empty() && LLVMRev != Revision) { OS << " ("; std::string LLVMRepo = getLLVMRepositoryPath(); if (!LLVMRepo.empty()) OS << LLVMRepo << ' '; OS << LLVMRev << ')'; } return OS.str(); } std::string getClangFullVersion() { return getClangToolFullVersion("clang"); } std::string getClangToolFullVersion(StringRef ToolName) { std::string buf; llvm::raw_string_ostream OS(buf); #ifdef CLANG_VENDOR OS << CLANG_VENDOR; #endif OS << ToolName << " version " CLANG_VERSION_STRING " " << getClangFullRepositoryVersion(); // If vendor supplied, include the base LLVM version as well. #ifdef CLANG_VENDOR OS << " (based on " << BACKEND_PACKAGE_STRING << ")"; #endif return OS.str(); } std::string getClangFullCPPVersion() { // The version string we report in __VERSION__ is just a compacted version of // the one we report on the command line. std::string buf; llvm::raw_string_ostream OS(buf); #ifdef CLANG_VENDOR OS << CLANG_VENDOR; #endif OS << "Clang " CLANG_VERSION_STRING " " << getClangFullRepositoryVersion(); return OS.str(); } } // end namespace clang Index: vendor/clang/dist/lib/CodeGen/CGCall.cpp =================================================================== --- vendor/clang/dist/lib/CodeGen/CGCall.cpp (revision 292727) +++ vendor/clang/dist/lib/CodeGen/CGCall.cpp (revision 292728) @@ -1,3627 +1,3622 @@ //===--- CGCall.cpp - Encapsulate calling convention details --------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // These classes wrap the information about a call or function // definition used to handle ABI compliancy. // //===----------------------------------------------------------------------===// #include "CGCall.h" #include "ABIInfo.h" #include "CGCXXABI.h" #include "CodeGenFunction.h" #include "CodeGenModule.h" #include "TargetInfo.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclObjC.h" #include "clang/Basic/TargetInfo.h" #include "clang/CodeGen/CGFunctionInfo.h" #include "clang/Frontend/CodeGenOptions.h" #include "llvm/ADT/StringExtras.h" #include "llvm/IR/Attributes.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/InlineAsm.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/Transforms/Utils/Local.h" using namespace clang; using namespace CodeGen; /***/ static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) { switch (CC) { default: return llvm::CallingConv::C; case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; case CC_X86_64Win64: return llvm::CallingConv::X86_64_Win64; case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV; case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI; // TODO: Add support for __pascal to LLVM. case CC_X86Pascal: return llvm::CallingConv::C; // TODO: Add support for __vectorcall to LLVM. case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall; case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC; case CC_SpirKernel: return llvm::CallingConv::SPIR_KERNEL; } } /// Derives the 'this' type for codegen purposes, i.e. ignoring method /// qualification. /// FIXME: address space qualification? static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) { QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); } /// Returns the canonical formal type of the given C++ method. static CanQual GetFormalType(const CXXMethodDecl *MD) { return MD->getType()->getCanonicalTypeUnqualified() .getAs(); } /// Returns the "extra-canonicalized" return type, which discards /// qualifiers on the return type. Codegen doesn't care about them, /// and it makes ABI code a little easier to be able to assume that /// all parameter and return types are top-level unqualified. static CanQualType GetReturnType(QualType RetTy) { return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); } /// Arrange the argument and result information for a value of the given /// unprototyped freestanding function type. const CGFunctionInfo & CodeGenTypes::arrangeFreeFunctionType(CanQual FTNP) { // When translating an unprototyped function type, always use a // variadic type. return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(), /*instanceMethod=*/false, /*chainCall=*/false, None, FTNP->getExtInfo(), RequiredArgs(0)); } /// Arrange the LLVM function layout for a value of the given function /// type, on top of any implicit parameters already stored. static const CGFunctionInfo & arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, SmallVectorImpl &prefix, CanQual FTP) { RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size()); // FIXME: Kill copy. prefix.append(FTP->param_type_begin(), FTP->param_type_end()); CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod, /*chainCall=*/false, prefix, FTP->getExtInfo(), required); } /// Arrange the argument and result information for a value of the /// given freestanding function type. const CGFunctionInfo & CodeGenTypes::arrangeFreeFunctionType(CanQual FTP) { SmallVector argTypes; return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes, FTP); } static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) { // Set the appropriate calling convention for the Function. if (D->hasAttr()) return CC_X86StdCall; if (D->hasAttr()) return CC_X86FastCall; if (D->hasAttr()) return CC_X86ThisCall; if (D->hasAttr()) return CC_X86VectorCall; if (D->hasAttr()) return CC_X86Pascal; if (PcsAttr *PCS = D->getAttr()) return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); if (D->hasAttr()) return CC_IntelOclBicc; if (D->hasAttr()) return IsWindows ? CC_C : CC_X86_64Win64; if (D->hasAttr()) return IsWindows ? CC_X86_64SysV : CC_C; return CC_C; } /// Arrange the argument and result information for a call to an /// unknown C++ non-static member function of the given abstract type. /// (Zero value of RD means we don't have any meaningful "this" argument type, /// so fall back to a generic pointer type). /// The member function must be an ordinary function, i.e. not a /// constructor or destructor. const CGFunctionInfo & CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, const FunctionProtoType *FTP) { SmallVector argTypes; // Add the 'this' pointer. if (RD) argTypes.push_back(GetThisType(Context, RD)); else argTypes.push_back(Context.VoidPtrTy); return ::arrangeLLVMFunctionInfo( *this, true, argTypes, FTP->getCanonicalTypeUnqualified().getAs()); } /// Arrange the argument and result information for a declaration or /// definition of the given C++ non-static member function. The /// member function must be an ordinary function, i.e. not a /// constructor or destructor. const CGFunctionInfo & CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { assert(!isa(MD) && "wrong method for constructors!"); assert(!isa(MD) && "wrong method for destructors!"); CanQual prototype = GetFormalType(MD); if (MD->isInstance()) { // The abstract case is perfectly fine. const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD); return arrangeCXXMethodType(ThisType, prototype.getTypePtr()); } return arrangeFreeFunctionType(prototype); } const CGFunctionInfo & CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD, StructorType Type) { SmallVector argTypes; argTypes.push_back(GetThisType(Context, MD->getParent())); GlobalDecl GD; if (auto *CD = dyn_cast(MD)) { GD = GlobalDecl(CD, toCXXCtorType(Type)); } else { auto *DD = dyn_cast(MD); GD = GlobalDecl(DD, toCXXDtorType(Type)); } CanQual FTP = GetFormalType(MD); // Add the formal parameters. argTypes.append(FTP->param_type_begin(), FTP->param_type_end()); TheCXXABI.buildStructorSignature(MD, Type, argTypes); RequiredArgs required = (MD->isVariadic() ? RequiredArgs(argTypes.size()) : RequiredArgs::All); FunctionType::ExtInfo extInfo = FTP->getExtInfo(); CanQualType resultType = TheCXXABI.HasThisReturn(GD) ? argTypes.front() : TheCXXABI.hasMostDerivedReturn(GD) ? CGM.getContext().VoidPtrTy : Context.VoidTy; return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true, /*chainCall=*/false, argTypes, extInfo, required); } /// Arrange a call to a C++ method, passing the given arguments. const CGFunctionInfo & CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args, const CXXConstructorDecl *D, CXXCtorType CtorKind, unsigned ExtraArgs) { // FIXME: Kill copy. SmallVector ArgTypes; for (const auto &Arg : args) ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); CanQual FPT = GetFormalType(D); RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, 1 + ExtraArgs); GlobalDecl GD(D, CtorKind); CanQualType ResultType = TheCXXABI.HasThisReturn(GD) ? ArgTypes.front() : TheCXXABI.hasMostDerivedReturn(GD) ? CGM.getContext().VoidPtrTy : Context.VoidTy; FunctionType::ExtInfo Info = FPT->getExtInfo(); return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true, /*chainCall=*/false, ArgTypes, Info, Required); } /// Arrange the argument and result information for the declaration or /// definition of the given function. const CGFunctionInfo & CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { if (const CXXMethodDecl *MD = dyn_cast(FD)) if (MD->isInstance()) return arrangeCXXMethodDeclaration(MD); CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); assert(isa(FTy)); // When declaring a function without a prototype, always use a // non-variadic type. if (isa(FTy)) { CanQual noProto = FTy.getAs(); return arrangeLLVMFunctionInfo( noProto->getReturnType(), /*instanceMethod=*/false, /*chainCall=*/false, None, noProto->getExtInfo(), RequiredArgs::All); } assert(isa(FTy)); return arrangeFreeFunctionType(FTy.getAs()); } /// Arrange the argument and result information for the declaration or /// definition of an Objective-C method. const CGFunctionInfo & CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { // It happens that this is the same as a call with no optional // arguments, except also using the formal 'self' type. return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); } /// Arrange the argument and result information for the function type /// through which to perform a send to the given Objective-C method, /// using the given receiver type. The receiver type is not always /// the 'self' type of the method or even an Objective-C pointer type. /// This is *not* the right method for actually performing such a /// message send, due to the possibility of optional arguments. const CGFunctionInfo & CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, QualType receiverType) { SmallVector argTys; argTys.push_back(Context.getCanonicalParamType(receiverType)); argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); // FIXME: Kill copy? for (const auto *I : MD->params()) { argTys.push_back(Context.getCanonicalParamType(I->getType())); } FunctionType::ExtInfo einfo; bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows(); einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows)); if (getContext().getLangOpts().ObjCAutoRefCount && MD->hasAttr()) einfo = einfo.withProducesResult(true); RequiredArgs required = (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); return arrangeLLVMFunctionInfo( GetReturnType(MD->getReturnType()), /*instanceMethod=*/false, /*chainCall=*/false, argTys, einfo, required); } const CGFunctionInfo & CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { // FIXME: Do we need to handle ObjCMethodDecl? const FunctionDecl *FD = cast(GD.getDecl()); if (const CXXConstructorDecl *CD = dyn_cast(FD)) return arrangeCXXStructorDeclaration(CD, getFromCtorType(GD.getCtorType())); if (const CXXDestructorDecl *DD = dyn_cast(FD)) return arrangeCXXStructorDeclaration(DD, getFromDtorType(GD.getDtorType())); return arrangeFunctionDeclaration(FD); } /// Arrange a thunk that takes 'this' as the first parameter followed by /// varargs. Return a void pointer, regardless of the actual return type. /// The body of the thunk will end in a musttail call to a function of the /// correct type, and the caller will bitcast the function to the correct /// prototype. const CGFunctionInfo & CodeGenTypes::arrangeMSMemberPointerThunk(const CXXMethodDecl *MD) { assert(MD->isVirtual() && "only virtual memptrs have thunks"); CanQual FTP = GetFormalType(MD); CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) }; return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false, /*chainCall=*/false, ArgTys, FTP->getExtInfo(), RequiredArgs(1)); } const CGFunctionInfo & CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD, CXXCtorType CT) { assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure); CanQual FTP = GetFormalType(CD); SmallVector ArgTys; const CXXRecordDecl *RD = CD->getParent(); ArgTys.push_back(GetThisType(Context, RD)); if (CT == Ctor_CopyingClosure) ArgTys.push_back(*FTP->param_type_begin()); if (RD->getNumVBases() > 0) ArgTys.push_back(Context.IntTy); CallingConv CC = Context.getDefaultCallingConvention( /*IsVariadic=*/false, /*IsCXXMethod=*/true); return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true, /*chainCall=*/false, ArgTys, FunctionType::ExtInfo(CC), RequiredArgs::All); } /// Arrange a call as unto a free function, except possibly with an /// additional number of formal parameters considered required. static const CGFunctionInfo & arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, CodeGenModule &CGM, const CallArgList &args, const FunctionType *fnType, unsigned numExtraRequiredArgs, bool chainCall) { assert(args.size() >= numExtraRequiredArgs); // In most cases, there are no optional arguments. RequiredArgs required = RequiredArgs::All; // If we have a variadic prototype, the required arguments are the // extra prefix plus the arguments in the prototype. if (const FunctionProtoType *proto = dyn_cast(fnType)) { if (proto->isVariadic()) required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs); // If we don't have a prototype at all, but we're supposed to // explicitly use the variadic convention for unprototyped calls, // treat all of the arguments as required but preserve the nominal // possibility of variadics. } else if (CGM.getTargetCodeGenInfo() .isNoProtoCallVariadic(args, cast(fnType))) { required = RequiredArgs(args.size()); } // FIXME: Kill copy. SmallVector argTypes; for (const auto &arg : args) argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty)); return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()), /*instanceMethod=*/false, chainCall, argTypes, fnType->getExtInfo(), required); } /// Figure out the rules for calling a function with the given formal /// type using the given arguments. The arguments are necessary /// because the function might be unprototyped, in which case it's /// target-dependent in crazy ways. const CGFunctionInfo & CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args, const FunctionType *fnType, bool chainCall) { return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, chainCall ? 1 : 0, chainCall); } /// A block function call is essentially a free-function call with an /// extra implicit argument. const CGFunctionInfo & CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args, const FunctionType *fnType) { return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1, /*chainCall=*/false); } const CGFunctionInfo & CodeGenTypes::arrangeFreeFunctionCall(QualType resultType, const CallArgList &args, FunctionType::ExtInfo info, RequiredArgs required) { // FIXME: Kill copy. SmallVector argTypes; for (const auto &Arg : args) argTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); return arrangeLLVMFunctionInfo( GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false, argTypes, info, required); } /// Arrange a call to a C++ method, passing the given arguments. const CGFunctionInfo & CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, const FunctionProtoType *FPT, RequiredArgs required) { // FIXME: Kill copy. SmallVector argTypes; for (const auto &Arg : args) argTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); FunctionType::ExtInfo info = FPT->getExtInfo(); return arrangeLLVMFunctionInfo( GetReturnType(FPT->getReturnType()), /*instanceMethod=*/true, /*chainCall=*/false, argTypes, info, required); } const CGFunctionInfo &CodeGenTypes::arrangeFreeFunctionDeclaration( QualType resultType, const FunctionArgList &args, const FunctionType::ExtInfo &info, bool isVariadic) { // FIXME: Kill copy. SmallVector argTypes; for (auto Arg : args) argTypes.push_back(Context.getCanonicalParamType(Arg->getType())); RequiredArgs required = (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All); return arrangeLLVMFunctionInfo( GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false, argTypes, info, required); } const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { return arrangeLLVMFunctionInfo( getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false, None, FunctionType::ExtInfo(), RequiredArgs::All); } /// Arrange the argument and result information for an abstract value /// of a given function type. This is the method which all of the /// above functions ultimately defer to. const CGFunctionInfo & CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType, bool instanceMethod, bool chainCall, ArrayRef argTypes, FunctionType::ExtInfo info, RequiredArgs required) { assert(std::all_of(argTypes.begin(), argTypes.end(), std::mem_fun_ref(&CanQualType::isCanonicalAsParam))); unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); // Lookup or create unique function info. llvm::FoldingSetNodeID ID; CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, required, resultType, argTypes); void *insertPos = nullptr; CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); if (FI) return *FI; // Construct the function info. We co-allocate the ArgInfos. FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info, resultType, argTypes, required); FunctionInfos.InsertNode(FI, insertPos); bool inserted = FunctionsBeingProcessed.insert(FI).second; (void)inserted; assert(inserted && "Recursively being processed?"); // Compute ABI information. getABIInfo().computeInfo(*FI); // Loop over all of the computed argument and return value info. If any of // them are direct or extend without a specified coerce type, specify the // default now. ABIArgInfo &retInfo = FI->getReturnInfo(); if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) retInfo.setCoerceToType(ConvertType(FI->getReturnType())); for (auto &I : FI->arguments()) if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr) I.info.setCoerceToType(ConvertType(I.type)); bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; assert(erased && "Not in set?"); return *FI; } CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, bool instanceMethod, bool chainCall, const FunctionType::ExtInfo &info, CanQualType resultType, ArrayRef argTypes, RequiredArgs required) { void *buffer = operator new(sizeof(CGFunctionInfo) + sizeof(ArgInfo) * (argTypes.size() + 1)); CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); FI->CallingConvention = llvmCC; FI->EffectiveCallingConvention = llvmCC; FI->ASTCallingConvention = info.getCC(); FI->InstanceMethod = instanceMethod; FI->ChainCall = chainCall; FI->NoReturn = info.getNoReturn(); FI->ReturnsRetained = info.getProducesResult(); FI->Required = required; FI->HasRegParm = info.getHasRegParm(); FI->RegParm = info.getRegParm(); FI->ArgStruct = nullptr; FI->NumArgs = argTypes.size(); FI->getArgsBuffer()[0].type = resultType; for (unsigned i = 0, e = argTypes.size(); i != e; ++i) FI->getArgsBuffer()[i + 1].type = argTypes[i]; return FI; } /***/ namespace { // ABIArgInfo::Expand implementation. // Specifies the way QualType passed as ABIArgInfo::Expand is expanded. struct TypeExpansion { enum TypeExpansionKind { // Elements of constant arrays are expanded recursively. TEK_ConstantArray, // Record fields are expanded recursively (but if record is a union, only // the field with the largest size is expanded). TEK_Record, // For complex types, real and imaginary parts are expanded recursively. TEK_Complex, // All other types are not expandable. TEK_None }; const TypeExpansionKind Kind; TypeExpansion(TypeExpansionKind K) : Kind(K) {} virtual ~TypeExpansion() {} }; struct ConstantArrayExpansion : TypeExpansion { QualType EltTy; uint64_t NumElts; ConstantArrayExpansion(QualType EltTy, uint64_t NumElts) : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {} static bool classof(const TypeExpansion *TE) { return TE->Kind == TEK_ConstantArray; } }; struct RecordExpansion : TypeExpansion { SmallVector Bases; SmallVector Fields; RecordExpansion(SmallVector &&Bases, SmallVector &&Fields) : TypeExpansion(TEK_Record), Bases(Bases), Fields(Fields) {} static bool classof(const TypeExpansion *TE) { return TE->Kind == TEK_Record; } }; struct ComplexExpansion : TypeExpansion { QualType EltTy; ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {} static bool classof(const TypeExpansion *TE) { return TE->Kind == TEK_Complex; } }; struct NoExpansion : TypeExpansion { NoExpansion() : TypeExpansion(TEK_None) {} static bool classof(const TypeExpansion *TE) { return TE->Kind == TEK_None; } }; } // namespace static std::unique_ptr getTypeExpansion(QualType Ty, const ASTContext &Context) { if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { return llvm::make_unique( AT->getElementType(), AT->getSize().getZExtValue()); } if (const RecordType *RT = Ty->getAs()) { SmallVector Bases; SmallVector Fields; const RecordDecl *RD = RT->getDecl(); assert(!RD->hasFlexibleArrayMember() && "Cannot expand structure with flexible array."); if (RD->isUnion()) { // Unions can be here only in degenerative cases - all the fields are same // after flattening. Thus we have to use the "largest" field. const FieldDecl *LargestFD = nullptr; CharUnits UnionSize = CharUnits::Zero(); for (const auto *FD : RD->fields()) { // Skip zero length bitfields. if (FD->isBitField() && FD->getBitWidthValue(Context) == 0) continue; assert(!FD->isBitField() && "Cannot expand structure with bit-field members."); CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType()); if (UnionSize < FieldSize) { UnionSize = FieldSize; LargestFD = FD; } } if (LargestFD) Fields.push_back(LargestFD); } else { if (const auto *CXXRD = dyn_cast(RD)) { assert(!CXXRD->isDynamicClass() && "cannot expand vtable pointers in dynamic classes"); for (const CXXBaseSpecifier &BS : CXXRD->bases()) Bases.push_back(&BS); } for (const auto *FD : RD->fields()) { // Skip zero length bitfields. if (FD->isBitField() && FD->getBitWidthValue(Context) == 0) continue; assert(!FD->isBitField() && "Cannot expand structure with bit-field members."); Fields.push_back(FD); } } return llvm::make_unique(std::move(Bases), std::move(Fields)); } if (const ComplexType *CT = Ty->getAs()) { return llvm::make_unique(CT->getElementType()); } return llvm::make_unique(); } static int getExpansionSize(QualType Ty, const ASTContext &Context) { auto Exp = getTypeExpansion(Ty, Context); if (auto CAExp = dyn_cast(Exp.get())) { return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context); } if (auto RExp = dyn_cast(Exp.get())) { int Res = 0; for (auto BS : RExp->Bases) Res += getExpansionSize(BS->getType(), Context); for (auto FD : RExp->Fields) Res += getExpansionSize(FD->getType(), Context); return Res; } if (isa(Exp.get())) return 2; assert(isa(Exp.get())); return 1; } void CodeGenTypes::getExpandedTypes(QualType Ty, SmallVectorImpl::iterator &TI) { auto Exp = getTypeExpansion(Ty, Context); if (auto CAExp = dyn_cast(Exp.get())) { for (int i = 0, n = CAExp->NumElts; i < n; i++) { getExpandedTypes(CAExp->EltTy, TI); } } else if (auto RExp = dyn_cast(Exp.get())) { for (auto BS : RExp->Bases) getExpandedTypes(BS->getType(), TI); for (auto FD : RExp->Fields) getExpandedTypes(FD->getType(), TI); } else if (auto CExp = dyn_cast(Exp.get())) { llvm::Type *EltTy = ConvertType(CExp->EltTy); *TI++ = EltTy; *TI++ = EltTy; } else { assert(isa(Exp.get())); *TI++ = ConvertType(Ty); } } void CodeGenFunction::ExpandTypeFromArgs( QualType Ty, LValue LV, SmallVectorImpl::iterator &AI) { assert(LV.isSimple() && "Unexpected non-simple lvalue during struct expansion."); auto Exp = getTypeExpansion(Ty, getContext()); if (auto CAExp = dyn_cast(Exp.get())) { for (int i = 0, n = CAExp->NumElts; i < n; i++) { llvm::Value *EltAddr = Builder.CreateConstGEP2_32(nullptr, LV.getAddress(), 0, i); LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy); ExpandTypeFromArgs(CAExp->EltTy, LV, AI); } } else if (auto RExp = dyn_cast(Exp.get())) { llvm::Value *This = LV.getAddress(); for (const CXXBaseSpecifier *BS : RExp->Bases) { // Perform a single step derived-to-base conversion. llvm::Value *Base = GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, /*NullCheckValue=*/false, SourceLocation()); LValue SubLV = MakeAddrLValue(Base, BS->getType()); // Recurse onto bases. ExpandTypeFromArgs(BS->getType(), SubLV, AI); } for (auto FD : RExp->Fields) { // FIXME: What are the right qualifiers here? LValue SubLV = EmitLValueForField(LV, FD); ExpandTypeFromArgs(FD->getType(), SubLV, AI); } } else if (auto CExp = dyn_cast(Exp.get())) { llvm::Value *RealAddr = Builder.CreateStructGEP(nullptr, LV.getAddress(), 0, "real"); EmitStoreThroughLValue(RValue::get(*AI++), MakeAddrLValue(RealAddr, CExp->EltTy)); llvm::Value *ImagAddr = Builder.CreateStructGEP(nullptr, LV.getAddress(), 1, "imag"); EmitStoreThroughLValue(RValue::get(*AI++), MakeAddrLValue(ImagAddr, CExp->EltTy)); } else { assert(isa(Exp.get())); EmitStoreThroughLValue(RValue::get(*AI++), LV); } } void CodeGenFunction::ExpandTypeToArgs( QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy, SmallVectorImpl &IRCallArgs, unsigned &IRCallArgPos) { auto Exp = getTypeExpansion(Ty, getContext()); if (auto CAExp = dyn_cast(Exp.get())) { llvm::Value *Addr = RV.getAggregateAddr(); for (int i = 0, n = CAExp->NumElts; i < n; i++) { llvm::Value *EltAddr = Builder.CreateConstGEP2_32(nullptr, Addr, 0, i); RValue EltRV = convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()); ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos); } } else if (auto RExp = dyn_cast(Exp.get())) { llvm::Value *This = RV.getAggregateAddr(); for (const CXXBaseSpecifier *BS : RExp->Bases) { // Perform a single step derived-to-base conversion. llvm::Value *Base = GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, /*NullCheckValue=*/false, SourceLocation()); RValue BaseRV = RValue::getAggregate(Base); // Recurse onto bases. ExpandTypeToArgs(BS->getType(), BaseRV, IRFuncTy, IRCallArgs, IRCallArgPos); } LValue LV = MakeAddrLValue(This, Ty); for (auto FD : RExp->Fields) { RValue FldRV = EmitRValueForField(LV, FD, SourceLocation()); ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs, IRCallArgPos); } } else if (isa(Exp.get())) { ComplexPairTy CV = RV.getComplexVal(); IRCallArgs[IRCallArgPos++] = CV.first; IRCallArgs[IRCallArgPos++] = CV.second; } else { assert(isa(Exp.get())); assert(RV.isScalar() && "Unexpected non-scalar rvalue during struct expansion."); // Insert a bitcast as needed. llvm::Value *V = RV.getScalarVal(); if (IRCallArgPos < IRFuncTy->getNumParams() && V->getType() != IRFuncTy->getParamType(IRCallArgPos)) V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos)); IRCallArgs[IRCallArgPos++] = V; } } /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are /// accessing some number of bytes out of it, try to gep into the struct to get /// at its inner goodness. Dive as deep as possible without entering an element /// with an in-memory size smaller than DstSize. static llvm::Value * EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr, llvm::StructType *SrcSTy, uint64_t DstSize, CodeGenFunction &CGF) { // We can't dive into a zero-element struct. if (SrcSTy->getNumElements() == 0) return SrcPtr; llvm::Type *FirstElt = SrcSTy->getElementType(0); // If the first elt is at least as large as what we're looking for, or if the // first element is the same size as the whole struct, we can enter it. The // comparison must be made on the store size and not the alloca size. Using // the alloca size may overstate the size of the load. uint64_t FirstEltSize = CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt); if (FirstEltSize < DstSize && FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy)) return SrcPtr; // GEP into the first element. SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcSTy, SrcPtr, 0, 0, "coerce.dive"); // If the first element is a struct, recurse. llvm::Type *SrcTy = cast(SrcPtr->getType())->getElementType(); if (llvm::StructType *SrcSTy = dyn_cast(SrcTy)) return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); return SrcPtr; } /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both /// are either integers or pointers. This does a truncation of the value if it /// is too large or a zero extension if it is too small. /// /// This behaves as if the value were coerced through memory, so on big-endian /// targets the high bits are preserved in a truncation, while little-endian /// targets preserve the low bits. static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, llvm::Type *Ty, CodeGenFunction &CGF) { if (Val->getType() == Ty) return Val; if (isa(Val->getType())) { // If this is Pointer->Pointer avoid conversion to and from int. if (isa(Ty)) return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); // Convert the pointer to an integer so we can play with its width. Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); } llvm::Type *DestIntTy = Ty; if (isa(DestIntTy)) DestIntTy = CGF.IntPtrTy; if (Val->getType() != DestIntTy) { const llvm::DataLayout &DL = CGF.CGM.getDataLayout(); if (DL.isBigEndian()) { // Preserve the high bits on big-endian targets. // That is what memory coercion does. uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType()); uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy); if (SrcSize > DstSize) { Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits"); Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii"); } else { Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii"); Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits"); } } else { // Little-endian targets preserve the low bits. No shifts required. Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); } } if (isa(Ty)) Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); return Val; } /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as /// a pointer to an object of type \arg Ty, known to be aligned to /// \arg SrcAlign bytes. /// /// This safely handles the case when the src type is smaller than the /// destination type; in this situation the values of bits which not /// present in the src are undefined. static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr, llvm::Type *Ty, CharUnits SrcAlign, CodeGenFunction &CGF) { llvm::Type *SrcTy = cast(SrcPtr->getType())->getElementType(); // If SrcTy and Ty are the same, just do a load. if (SrcTy == Ty) return CGF.Builder.CreateAlignedLoad(SrcPtr, SrcAlign.getQuantity()); uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty); if (llvm::StructType *SrcSTy = dyn_cast(SrcTy)) { SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); SrcTy = cast(SrcPtr->getType())->getElementType(); } uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); // If the source and destination are integer or pointer types, just do an // extension or truncation to the desired type. if ((isa(Ty) || isa(Ty)) && (isa(SrcTy) || isa(SrcTy))) { llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(SrcPtr, SrcAlign.getQuantity()); return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); } // If load is legal, just bitcast the src pointer. if (SrcSize >= DstSize) { // Generally SrcSize is never greater than DstSize, since this means we are // losing bits. However, this can happen in cases where the structure has // additional padding, for example due to a user specified alignment. // // FIXME: Assert that we aren't truncating non-padding bits when have access // to that information. llvm::Value *Casted = CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty)); return CGF.Builder.CreateAlignedLoad(Casted, SrcAlign.getQuantity()); } // Otherwise do coercion through memory. This is stupid, but // simple. llvm::AllocaInst *Tmp = CGF.CreateTempAlloca(Ty); Tmp->setAlignment(SrcAlign.getQuantity()); llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy(); llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy); llvm::Value *SrcCasted = CGF.Builder.CreateBitCast(SrcPtr, I8PtrTy); CGF.Builder.CreateMemCpy(Casted, SrcCasted, llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize), SrcAlign.getQuantity(), false); return CGF.Builder.CreateAlignedLoad(Tmp, SrcAlign.getQuantity()); } // Function to store a first-class aggregate into memory. We prefer to // store the elements rather than the aggregate to be more friendly to // fast-isel. // FIXME: Do we need to recurse here? static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, llvm::Value *DestPtr, bool DestIsVolatile, CharUnits DestAlign) { // Prefer scalar stores to first-class aggregate stores. if (llvm::StructType *STy = dyn_cast(Val->getType())) { const llvm::StructLayout *Layout = CGF.CGM.getDataLayout().getStructLayout(STy); for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(STy, DestPtr, 0, i); llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i); uint64_t EltOffset = Layout->getElementOffset(i); CharUnits EltAlign = DestAlign.alignmentAtOffset(CharUnits::fromQuantity(EltOffset)); CGF.Builder.CreateAlignedStore(Elt, EltPtr, EltAlign.getQuantity(), DestIsVolatile); } } else { CGF.Builder.CreateAlignedStore(Val, DestPtr, DestAlign.getQuantity(), DestIsVolatile); } } /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, /// where the source and destination may have different types. The /// destination is known to be aligned to \arg DstAlign bytes. /// /// This safely handles the case when the src type is larger than the /// destination type; the upper bits of the src will be lost. static void CreateCoercedStore(llvm::Value *Src, llvm::Value *DstPtr, bool DstIsVolatile, CharUnits DstAlign, CodeGenFunction &CGF) { llvm::Type *SrcTy = Src->getType(); llvm::Type *DstTy = cast(DstPtr->getType())->getElementType(); if (SrcTy == DstTy) { CGF.Builder.CreateAlignedStore(Src, DstPtr, DstAlign.getQuantity(), DstIsVolatile); return; } uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); if (llvm::StructType *DstSTy = dyn_cast(DstTy)) { DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF); DstTy = cast(DstPtr->getType())->getElementType(); } // If the source and destination are integer or pointer types, just do an // extension or truncation to the desired type. if ((isa(SrcTy) || isa(SrcTy)) && (isa(DstTy) || isa(DstTy))) { Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); CGF.Builder.CreateAlignedStore(Src, DstPtr, DstAlign.getQuantity(), DstIsVolatile); return; } uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy); // If store is legal, just bitcast the src pointer. if (SrcSize <= DstSize) { llvm::Value *Casted = CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy)); BuildAggStore(CGF, Src, Casted, DstIsVolatile, DstAlign); } else { // Otherwise do coercion through memory. This is stupid, but // simple. // Generally SrcSize is never greater than DstSize, since this means we are // losing bits. However, this can happen in cases where the structure has // additional padding, for example due to a user specified alignment. // // FIXME: Assert that we aren't truncating non-padding bits when have access // to that information. llvm::AllocaInst *Tmp = CGF.CreateTempAlloca(SrcTy); Tmp->setAlignment(DstAlign.getQuantity()); CGF.Builder.CreateAlignedStore(Src, Tmp, DstAlign.getQuantity()); llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy(); llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy); llvm::Value *DstCasted = CGF.Builder.CreateBitCast(DstPtr, I8PtrTy); CGF.Builder.CreateMemCpy(DstCasted, Casted, llvm::ConstantInt::get(CGF.IntPtrTy, DstSize), DstAlign.getQuantity(), false); } } namespace { /// Encapsulates information about the way function arguments from /// CGFunctionInfo should be passed to actual LLVM IR function. class ClangToLLVMArgMapping { static const unsigned InvalidIndex = ~0U; unsigned InallocaArgNo; unsigned SRetArgNo; unsigned TotalIRArgs; /// Arguments of LLVM IR function corresponding to single Clang argument. struct IRArgs { unsigned PaddingArgIndex; // Argument is expanded to IR arguments at positions // [FirstArgIndex, FirstArgIndex + NumberOfArgs). unsigned FirstArgIndex; unsigned NumberOfArgs; IRArgs() : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex), NumberOfArgs(0) {} }; SmallVector ArgInfo; public: ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI, bool OnlyRequiredArgs = false) : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0), ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) { construct(Context, FI, OnlyRequiredArgs); } bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; } unsigned getInallocaArgNo() const { assert(hasInallocaArg()); return InallocaArgNo; } bool hasSRetArg() const { return SRetArgNo != InvalidIndex; } unsigned getSRetArgNo() const { assert(hasSRetArg()); return SRetArgNo; } unsigned totalIRArgs() const { return TotalIRArgs; } bool hasPaddingArg(unsigned ArgNo) const { assert(ArgNo < ArgInfo.size()); return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex; } unsigned getPaddingArgNo(unsigned ArgNo) const { assert(hasPaddingArg(ArgNo)); return ArgInfo[ArgNo].PaddingArgIndex; } /// Returns index of first IR argument corresponding to ArgNo, and their /// quantity. std::pair getIRArgs(unsigned ArgNo) const { assert(ArgNo < ArgInfo.size()); return std::make_pair(ArgInfo[ArgNo].FirstArgIndex, ArgInfo[ArgNo].NumberOfArgs); } private: void construct(const ASTContext &Context, const CGFunctionInfo &FI, bool OnlyRequiredArgs); }; void ClangToLLVMArgMapping::construct(const ASTContext &Context, const CGFunctionInfo &FI, bool OnlyRequiredArgs) { unsigned IRArgNo = 0; bool SwapThisWithSRet = false; const ABIArgInfo &RetAI = FI.getReturnInfo(); if (RetAI.getKind() == ABIArgInfo::Indirect) { SwapThisWithSRet = RetAI.isSRetAfterThis(); SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++; } unsigned ArgNo = 0; unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs; ++I, ++ArgNo) { assert(I != FI.arg_end()); QualType ArgType = I->type; const ABIArgInfo &AI = I->info; // Collect data about IR arguments corresponding to Clang argument ArgNo. auto &IRArgs = ArgInfo[ArgNo]; if (AI.getPaddingType()) IRArgs.PaddingArgIndex = IRArgNo++; switch (AI.getKind()) { case ABIArgInfo::Extend: case ABIArgInfo::Direct: { // FIXME: handle sseregparm someday... llvm::StructType *STy = dyn_cast(AI.getCoerceToType()); if (AI.isDirect() && AI.getCanBeFlattened() && STy) { IRArgs.NumberOfArgs = STy->getNumElements(); } else { IRArgs.NumberOfArgs = 1; } break; } case ABIArgInfo::Indirect: IRArgs.NumberOfArgs = 1; break; case ABIArgInfo::Ignore: case ABIArgInfo::InAlloca: // ignore and inalloca doesn't have matching LLVM parameters. IRArgs.NumberOfArgs = 0; break; case ABIArgInfo::Expand: { IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context); break; } } if (IRArgs.NumberOfArgs > 0) { IRArgs.FirstArgIndex = IRArgNo; IRArgNo += IRArgs.NumberOfArgs; } // Skip over the sret parameter when it comes second. We already handled it // above. if (IRArgNo == 1 && SwapThisWithSRet) IRArgNo++; } assert(ArgNo == ArgInfo.size()); if (FI.usesInAlloca()) InallocaArgNo = IRArgNo++; TotalIRArgs = IRArgNo; } } // namespace /***/ bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { return FI.getReturnInfo().isIndirect(); } bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) { return ReturnTypeUsesSRet(FI) && getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs(); } bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { if (const BuiltinType *BT = ResultType->getAs()) { switch (BT->getKind()) { default: return false; case BuiltinType::Float: return getTarget().useObjCFPRetForRealType(TargetInfo::Float); case BuiltinType::Double: return getTarget().useObjCFPRetForRealType(TargetInfo::Double); case BuiltinType::LongDouble: return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble); } } return false; } bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { if (const ComplexType *CT = ResultType->getAs()) { if (const BuiltinType *BT = CT->getElementType()->getAs()) { if (BT->getKind() == BuiltinType::LongDouble) return getTarget().useObjCFP2RetForComplexLongDouble(); } } return false; } llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); return GetFunctionType(FI); } llvm::FunctionType * CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { bool Inserted = FunctionsBeingProcessed.insert(&FI).second; (void)Inserted; assert(Inserted && "Recursively being processed?"); llvm::Type *resultType = nullptr; const ABIArgInfo &retAI = FI.getReturnInfo(); switch (retAI.getKind()) { case ABIArgInfo::Expand: llvm_unreachable("Invalid ABI kind for return argument"); case ABIArgInfo::Extend: case ABIArgInfo::Direct: resultType = retAI.getCoerceToType(); break; case ABIArgInfo::InAlloca: if (retAI.getInAllocaSRet()) { // sret things on win32 aren't void, they return the sret pointer. QualType ret = FI.getReturnType(); llvm::Type *ty = ConvertType(ret); unsigned addressSpace = Context.getTargetAddressSpace(ret); resultType = llvm::PointerType::get(ty, addressSpace); } else { resultType = llvm::Type::getVoidTy(getLLVMContext()); } break; - case ABIArgInfo::Indirect: { - assert(!retAI.getIndirectAlign() && "Align unused on indirect return."); - resultType = llvm::Type::getVoidTy(getLLVMContext()); - break; - } - + case ABIArgInfo::Indirect: case ABIArgInfo::Ignore: resultType = llvm::Type::getVoidTy(getLLVMContext()); break; } ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true); SmallVector ArgTypes(IRFunctionArgs.totalIRArgs()); // Add type for sret argument. if (IRFunctionArgs.hasSRetArg()) { QualType Ret = FI.getReturnType(); llvm::Type *Ty = ConvertType(Ret); unsigned AddressSpace = Context.getTargetAddressSpace(Ret); ArgTypes[IRFunctionArgs.getSRetArgNo()] = llvm::PointerType::get(Ty, AddressSpace); } // Add type for inalloca argument. if (IRFunctionArgs.hasInallocaArg()) { auto ArgStruct = FI.getArgStruct(); assert(ArgStruct); ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo(); } // Add in all of the required arguments. unsigned ArgNo = 0; CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), ie = it + FI.getNumRequiredArgs(); for (; it != ie; ++it, ++ArgNo) { const ABIArgInfo &ArgInfo = it->info; // Insert a padding type to ensure proper alignment. if (IRFunctionArgs.hasPaddingArg(ArgNo)) ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] = ArgInfo.getPaddingType(); unsigned FirstIRArg, NumIRArgs; std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); switch (ArgInfo.getKind()) { case ABIArgInfo::Ignore: case ABIArgInfo::InAlloca: assert(NumIRArgs == 0); break; case ABIArgInfo::Indirect: { assert(NumIRArgs == 1); // indirect arguments are always on the stack, which is addr space #0. llvm::Type *LTy = ConvertTypeForMem(it->type); ArgTypes[FirstIRArg] = LTy->getPointerTo(); break; } case ABIArgInfo::Extend: case ABIArgInfo::Direct: { // Fast-isel and the optimizer generally like scalar values better than // FCAs, so we flatten them if this is safe to do for this argument. llvm::Type *argType = ArgInfo.getCoerceToType(); llvm::StructType *st = dyn_cast(argType); if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { assert(NumIRArgs == st->getNumElements()); for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) ArgTypes[FirstIRArg + i] = st->getElementType(i); } else { assert(NumIRArgs == 1); ArgTypes[FirstIRArg] = argType; } break; } case ABIArgInfo::Expand: auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; getExpandedTypes(it->type, ArgTypesIter); assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); break; } } bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased; assert(Erased && "Not in set?"); return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic()); } llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { const CXXMethodDecl *MD = cast(GD.getDecl()); const FunctionProtoType *FPT = MD->getType()->getAs(); if (!isFuncTypeConvertible(FPT)) return llvm::StructType::get(getLLVMContext()); const CGFunctionInfo *Info; if (isa(MD)) Info = &arrangeCXXStructorDeclaration(MD, getFromDtorType(GD.getDtorType())); else Info = &arrangeCXXMethodDeclaration(MD); return GetFunctionType(*Info); } void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, const Decl *TargetDecl, AttributeListType &PAL, unsigned &CallingConv, bool AttrOnCallSite) { llvm::AttrBuilder FuncAttrs; llvm::AttrBuilder RetAttrs; bool HasOptnone = false; CallingConv = FI.getEffectiveCallingConvention(); if (FI.isNoReturn()) FuncAttrs.addAttribute(llvm::Attribute::NoReturn); // FIXME: handle sseregparm someday... if (TargetDecl) { if (TargetDecl->hasAttr()) FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice); if (TargetDecl->hasAttr()) FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); if (TargetDecl->hasAttr()) FuncAttrs.addAttribute(llvm::Attribute::NoReturn); if (TargetDecl->hasAttr()) FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate); if (const FunctionDecl *Fn = dyn_cast(TargetDecl)) { const FunctionProtoType *FPT = Fn->getType()->getAs(); if (FPT && FPT->isNothrow(getContext())) FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); // Don't use [[noreturn]] or _Noreturn for a call to a virtual function. // These attributes are not inherited by overloads. const CXXMethodDecl *MD = dyn_cast(Fn); if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual())) FuncAttrs.addAttribute(llvm::Attribute::NoReturn); } // 'const' and 'pure' attribute functions are also nounwind. if (TargetDecl->hasAttr()) { FuncAttrs.addAttribute(llvm::Attribute::ReadNone); FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); } else if (TargetDecl->hasAttr()) { FuncAttrs.addAttribute(llvm::Attribute::ReadOnly); FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); } if (TargetDecl->hasAttr()) RetAttrs.addAttribute(llvm::Attribute::NoAlias); if (TargetDecl->hasAttr()) RetAttrs.addAttribute(llvm::Attribute::NonNull); HasOptnone = TargetDecl->hasAttr(); } // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed. if (!HasOptnone) { if (CodeGenOpts.OptimizeSize) FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize); if (CodeGenOpts.OptimizeSize == 2) FuncAttrs.addAttribute(llvm::Attribute::MinSize); } if (CodeGenOpts.DisableRedZone) FuncAttrs.addAttribute(llvm::Attribute::NoRedZone); if (CodeGenOpts.NoImplicitFloat) FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat); if (CodeGenOpts.EnableSegmentedStacks && !(TargetDecl && TargetDecl->hasAttr())) FuncAttrs.addAttribute("split-stack"); if (AttrOnCallSite) { // Attributes that should go on the call site only. if (!CodeGenOpts.SimplifyLibCalls) FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin); if (!CodeGenOpts.TrapFuncName.empty()) FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName); } else { // Attributes that should go on the function, but not the call site. if (!CodeGenOpts.DisableFPElim) { FuncAttrs.addAttribute("no-frame-pointer-elim", "false"); } else if (CodeGenOpts.OmitLeafFramePointer) { FuncAttrs.addAttribute("no-frame-pointer-elim", "false"); FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf"); } else { FuncAttrs.addAttribute("no-frame-pointer-elim", "true"); FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf"); } FuncAttrs.addAttribute("disable-tail-calls", llvm::toStringRef(CodeGenOpts.DisableTailCalls)); FuncAttrs.addAttribute("less-precise-fpmad", llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD)); FuncAttrs.addAttribute("no-infs-fp-math", llvm::toStringRef(CodeGenOpts.NoInfsFPMath)); FuncAttrs.addAttribute("no-nans-fp-math", llvm::toStringRef(CodeGenOpts.NoNaNsFPMath)); FuncAttrs.addAttribute("unsafe-fp-math", llvm::toStringRef(CodeGenOpts.UnsafeFPMath)); FuncAttrs.addAttribute("use-soft-float", llvm::toStringRef(CodeGenOpts.SoftFloat)); FuncAttrs.addAttribute("stack-protector-buffer-size", llvm::utostr(CodeGenOpts.SSPBufferSize)); if (!CodeGenOpts.StackRealignment) FuncAttrs.addAttribute("no-realign-stack"); // Add target-cpu and target-features attributes to functions. If // we have a decl for the function and it has a target attribute then // parse that and add it to the feature set. StringRef TargetCPU = getTarget().getTargetOpts().CPU; // TODO: Features gets us the features on the command line including // feature dependencies. For canonicalization purposes we might want to // avoid putting features in the target-features set if we know it'll be // one of the default features in the backend, e.g. corei7-avx and +avx or // figure out non-explicit dependencies. // Canonicalize the existing features in a new feature map. // TODO: Migrate the existing backends to keep the map around rather than // the vector. llvm::StringMap FeatureMap; for (auto F : getTarget().getTargetOpts().Features) { const char *Name = F.c_str(); bool Enabled = Name[0] == '+'; getTarget().setFeatureEnabled(FeatureMap, Name + 1, Enabled); } const FunctionDecl *FD = dyn_cast_or_null(TargetDecl); if (FD) { if (const auto *TD = FD->getAttr()) { StringRef FeaturesStr = TD->getFeatures(); SmallVector AttrFeatures; FeaturesStr.split(AttrFeatures, ","); // Grab the various features and prepend a "+" to turn on the feature to // the backend and add them to our existing set of features. for (auto &Feature : AttrFeatures) { // Go ahead and trim whitespace rather than either erroring or // accepting it weirdly. Feature = Feature.trim(); // While we're here iterating check for a different target cpu. if (Feature.startswith("arch=")) TargetCPU = Feature.split("=").second.trim(); else if (Feature.startswith("tune=")) // We don't support cpu tuning this way currently. ; else if (Feature.startswith("fpmath=")) // TODO: Support the fpmath option this way. It will require checking // overall feature validity for the function with the rest of the // attributes on the function. ; else if (Feature.startswith("mno-")) getTarget().setFeatureEnabled(FeatureMap, Feature.split("-").second, false); else getTarget().setFeatureEnabled(FeatureMap, Feature, true); } } } // Produce the canonical string for this set of features. std::vector Features; for (llvm::StringMap::const_iterator it = FeatureMap.begin(), ie = FeatureMap.end(); it != ie; ++it) Features.push_back((it->second ? "+" : "-") + it->first().str()); // Now add the target-cpu and target-features to the function. if (TargetCPU != "") FuncAttrs.addAttribute("target-cpu", TargetCPU); if (!Features.empty()) { std::sort(Features.begin(), Features.end()); FuncAttrs.addAttribute("target-features", llvm::join(Features.begin(), Features.end(), ",")); } } ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI); QualType RetTy = FI.getReturnType(); const ABIArgInfo &RetAI = FI.getReturnInfo(); switch (RetAI.getKind()) { case ABIArgInfo::Extend: if (RetTy->hasSignedIntegerRepresentation()) RetAttrs.addAttribute(llvm::Attribute::SExt); else if (RetTy->hasUnsignedIntegerRepresentation()) RetAttrs.addAttribute(llvm::Attribute::ZExt); // FALL THROUGH case ABIArgInfo::Direct: if (RetAI.getInReg()) RetAttrs.addAttribute(llvm::Attribute::InReg); break; case ABIArgInfo::Ignore: break; case ABIArgInfo::InAlloca: case ABIArgInfo::Indirect: { // inalloca and sret disable readnone and readonly FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) .removeAttribute(llvm::Attribute::ReadNone); break; } case ABIArgInfo::Expand: llvm_unreachable("Invalid ABI kind for return argument"); } if (const auto *RefTy = RetTy->getAs()) { QualType PTy = RefTy->getPointeeType(); if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy) .getQuantity()); else if (getContext().getTargetAddressSpace(PTy) == 0) RetAttrs.addAttribute(llvm::Attribute::NonNull); } // Attach return attributes. if (RetAttrs.hasAttributes()) { PAL.push_back(llvm::AttributeSet::get( getLLVMContext(), llvm::AttributeSet::ReturnIndex, RetAttrs)); } // Attach attributes to sret. if (IRFunctionArgs.hasSRetArg()) { llvm::AttrBuilder SRETAttrs; SRETAttrs.addAttribute(llvm::Attribute::StructRet); if (RetAI.getInReg()) SRETAttrs.addAttribute(llvm::Attribute::InReg); PAL.push_back(llvm::AttributeSet::get( getLLVMContext(), IRFunctionArgs.getSRetArgNo() + 1, SRETAttrs)); } // Attach attributes to inalloca argument. if (IRFunctionArgs.hasInallocaArg()) { llvm::AttrBuilder Attrs; Attrs.addAttribute(llvm::Attribute::InAlloca); PAL.push_back(llvm::AttributeSet::get( getLLVMContext(), IRFunctionArgs.getInallocaArgNo() + 1, Attrs)); } unsigned ArgNo = 0; for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(), E = FI.arg_end(); I != E; ++I, ++ArgNo) { QualType ParamType = I->type; const ABIArgInfo &AI = I->info; llvm::AttrBuilder Attrs; // Add attribute for padding argument, if necessary. if (IRFunctionArgs.hasPaddingArg(ArgNo)) { if (AI.getPaddingInReg()) PAL.push_back(llvm::AttributeSet::get( getLLVMContext(), IRFunctionArgs.getPaddingArgNo(ArgNo) + 1, llvm::Attribute::InReg)); } // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we // have the corresponding parameter variable. It doesn't make // sense to do it here because parameters are so messed up. switch (AI.getKind()) { case ABIArgInfo::Extend: if (ParamType->isSignedIntegerOrEnumerationType()) Attrs.addAttribute(llvm::Attribute::SExt); else if (ParamType->isUnsignedIntegerOrEnumerationType()) { if (getTypes().getABIInfo().shouldSignExtUnsignedType(ParamType)) Attrs.addAttribute(llvm::Attribute::SExt); else Attrs.addAttribute(llvm::Attribute::ZExt); } // FALL THROUGH case ABIArgInfo::Direct: if (ArgNo == 0 && FI.isChainCall()) Attrs.addAttribute(llvm::Attribute::Nest); else if (AI.getInReg()) Attrs.addAttribute(llvm::Attribute::InReg); break; case ABIArgInfo::Indirect: if (AI.getInReg()) Attrs.addAttribute(llvm::Attribute::InReg); if (AI.getIndirectByVal()) Attrs.addAttribute(llvm::Attribute::ByVal); Attrs.addAlignmentAttr(AI.getIndirectAlign()); // byval disables readnone and readonly. FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) .removeAttribute(llvm::Attribute::ReadNone); break; case ABIArgInfo::Ignore: case ABIArgInfo::Expand: continue; case ABIArgInfo::InAlloca: // inalloca disables readnone and readonly. FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) .removeAttribute(llvm::Attribute::ReadNone); continue; } if (const auto *RefTy = ParamType->getAs()) { QualType PTy = RefTy->getPointeeType(); if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy) .getQuantity()); else if (getContext().getTargetAddressSpace(PTy) == 0) Attrs.addAttribute(llvm::Attribute::NonNull); } if (Attrs.hasAttributes()) { unsigned FirstIRArg, NumIRArgs; std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); for (unsigned i = 0; i < NumIRArgs; i++) PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), FirstIRArg + i + 1, Attrs)); } } assert(ArgNo == FI.arg_size()); if (FuncAttrs.hasAttributes()) PAL.push_back(llvm:: AttributeSet::get(getLLVMContext(), llvm::AttributeSet::FunctionIndex, FuncAttrs)); } /// An argument came in as a promoted argument; demote it back to its /// declared type. static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, const VarDecl *var, llvm::Value *value) { llvm::Type *varType = CGF.ConvertType(var->getType()); // This can happen with promotions that actually don't change the // underlying type, like the enum promotions. if (value->getType() == varType) return value; assert((varType->isIntegerTy() || varType->isFloatingPointTy()) && "unexpected promotion type"); if (isa(varType)) return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote"); return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote"); } /// Returns the attribute (either parameter attribute, or function /// attribute), which declares argument ArgNo to be non-null. static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, QualType ArgType, unsigned ArgNo) { // FIXME: __attribute__((nonnull)) can also be applied to: // - references to pointers, where the pointee is known to be // nonnull (apparently a Clang extension) // - transparent unions containing pointers // In the former case, LLVM IR cannot represent the constraint. In // the latter case, we have no guarantee that the transparent union // is in fact passed as a pointer. if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType()) return nullptr; // First, check attribute on parameter itself. if (PVD) { if (auto ParmNNAttr = PVD->getAttr()) return ParmNNAttr; } // Check function attributes. if (!FD) return nullptr; for (const auto *NNAttr : FD->specific_attrs()) { if (NNAttr->isNonNull(ArgNo)) return NNAttr; } return nullptr; } void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args) { if (CurCodeDecl && CurCodeDecl->hasAttr()) // Naked functions don't have prologues. return; // If this is an implicit-return-zero function, go ahead and // initialize the return value. TODO: it might be nice to have // a more general mechanism for this that didn't require synthesized // return statements. if (const FunctionDecl *FD = dyn_cast_or_null(CurCodeDecl)) { if (FD->hasImplicitReturnZero()) { QualType RetTy = FD->getReturnType().getUnqualifiedType(); llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); Builder.CreateStore(Zero, ReturnValue); } } // FIXME: We no longer need the types from FunctionArgList; lift up and // simplify. ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI); // Flattened function arguments. SmallVector FnArgs; FnArgs.reserve(IRFunctionArgs.totalIRArgs()); for (auto &Arg : Fn->args()) { FnArgs.push_back(&Arg); } assert(FnArgs.size() == IRFunctionArgs.totalIRArgs()); // If we're using inalloca, all the memory arguments are GEPs off of the last // parameter, which is a pointer to the complete memory area. llvm::Value *ArgStruct = nullptr; if (IRFunctionArgs.hasInallocaArg()) { ArgStruct = FnArgs[IRFunctionArgs.getInallocaArgNo()]; assert(ArgStruct->getType() == FI.getArgStruct()->getPointerTo()); } // Name the struct return parameter. if (IRFunctionArgs.hasSRetArg()) { auto AI = FnArgs[IRFunctionArgs.getSRetArgNo()]; AI->setName("agg.result"); AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1, llvm::Attribute::NoAlias)); } // Track if we received the parameter as a pointer (indirect, byval, or // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it // into a local alloca for us. enum ValOrPointer { HaveValue = 0, HavePointer = 1 }; typedef llvm::PointerIntPair ValueAndIsPtr; SmallVector ArgVals; ArgVals.reserve(Args.size()); // Create a pointer value for every parameter declaration. This usually // entails copying one or more LLVM IR arguments into an alloca. Don't push // any cleanups or do anything that might unwind. We do that separately, so // we can push the cleanups in the correct order for the ABI. assert(FI.arg_size() == Args.size() && "Mismatch between function signature & arguments."); unsigned ArgNo = 0; CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); i != e; ++i, ++info_it, ++ArgNo) { const VarDecl *Arg = *i; QualType Ty = info_it->type; const ABIArgInfo &ArgI = info_it->info; bool isPromoted = isa(Arg) && cast(Arg)->isKNRPromoted(); unsigned FirstIRArg, NumIRArgs; std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); switch (ArgI.getKind()) { case ABIArgInfo::InAlloca: { assert(NumIRArgs == 0); llvm::Value *V = Builder.CreateStructGEP(FI.getArgStruct(), ArgStruct, ArgI.getInAllocaFieldIndex(), Arg->getName()); ArgVals.push_back(ValueAndIsPtr(V, HavePointer)); break; } case ABIArgInfo::Indirect: { assert(NumIRArgs == 1); llvm::Value *V = FnArgs[FirstIRArg]; if (!hasScalarEvaluationKind(Ty)) { // Aggregates and complex variables are accessed by reference. All we // need to do is realign the value, if requested if (ArgI.getIndirectRealign()) { llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce"); // Copy from the incoming argument pointer to the temporary with the // appropriate alignment. // // FIXME: We should have a common utility for generating an aggregate // copy. llvm::Type *I8PtrTy = Builder.getInt8PtrTy(); CharUnits Size = getContext().getTypeSizeInChars(Ty); llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy); llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy); Builder.CreateMemCpy(Dst, Src, llvm::ConstantInt::get(IntPtrTy, Size.getQuantity()), ArgI.getIndirectAlign(), false); V = AlignedTemp; } ArgVals.push_back(ValueAndIsPtr(V, HavePointer)); } else { // Load scalar value from indirect argument. V = EmitLoadOfScalar(V, false, ArgI.getIndirectAlign(), Ty, Arg->getLocStart()); if (isPromoted) V = emitArgumentDemotion(*this, Arg, V); ArgVals.push_back(ValueAndIsPtr(V, HaveValue)); } break; } case ABIArgInfo::Extend: case ABIArgInfo::Direct: { // If we have the trivial case, handle it with no muss and fuss. if (!isa(ArgI.getCoerceToType()) && ArgI.getCoerceToType() == ConvertType(Ty) && ArgI.getDirectOffset() == 0) { assert(NumIRArgs == 1); auto AI = FnArgs[FirstIRArg]; llvm::Value *V = AI; if (const ParmVarDecl *PVD = dyn_cast(Arg)) { if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(), PVD->getFunctionScopeIndex())) AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1, llvm::Attribute::NonNull)); QualType OTy = PVD->getOriginalType(); if (const auto *ArrTy = getContext().getAsConstantArrayType(OTy)) { // A C99 array parameter declaration with the static keyword also // indicates dereferenceability, and if the size is constant we can // use the dereferenceable attribute (which requires the size in // bytes). if (ArrTy->getSizeModifier() == ArrayType::Static) { QualType ETy = ArrTy->getElementType(); uint64_t ArrSize = ArrTy->getSize().getZExtValue(); if (!ETy->isIncompleteType() && ETy->isConstantSizeType() && ArrSize) { llvm::AttrBuilder Attrs; Attrs.addDereferenceableAttr( getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize); AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1, Attrs)); } else if (getContext().getTargetAddressSpace(ETy) == 0) { AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1, llvm::Attribute::NonNull)); } } } else if (const auto *ArrTy = getContext().getAsVariableArrayType(OTy)) { // For C99 VLAs with the static keyword, we don't know the size so // we can't use the dereferenceable attribute, but in addrspace(0) // we know that it must be nonnull. if (ArrTy->getSizeModifier() == VariableArrayType::Static && !getContext().getTargetAddressSpace(ArrTy->getElementType())) AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1, llvm::Attribute::NonNull)); } const auto *AVAttr = PVD->getAttr(); if (!AVAttr) if (const auto *TOTy = dyn_cast(OTy)) AVAttr = TOTy->getDecl()->getAttr(); if (AVAttr) { llvm::Value *AlignmentValue = EmitScalarExpr(AVAttr->getAlignment()); llvm::ConstantInt *AlignmentCI = cast(AlignmentValue); unsigned Alignment = std::min((unsigned) AlignmentCI->getZExtValue(), +llvm::Value::MaximumAlignment); llvm::AttrBuilder Attrs; Attrs.addAlignmentAttr(Alignment); AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1, Attrs)); } } if (Arg->getType().isRestrictQualified()) AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1, llvm::Attribute::NoAlias)); // Ensure the argument is the correct type. if (V->getType() != ArgI.getCoerceToType()) V = Builder.CreateBitCast(V, ArgI.getCoerceToType()); if (isPromoted) V = emitArgumentDemotion(*this, Arg, V); if (const CXXMethodDecl *MD = dyn_cast_or_null(CurCodeDecl)) { if (MD->isVirtual() && Arg == CXXABIThisDecl) V = CGM.getCXXABI(). adjustThisParameterInVirtualFunctionPrologue(*this, CurGD, V); } // Because of merging of function types from multiple decls it is // possible for the type of an argument to not match the corresponding // type in the function type. Since we are codegening the callee // in here, add a cast to the argument type. llvm::Type *LTy = ConvertType(Arg->getType()); if (V->getType() != LTy) V = Builder.CreateBitCast(V, LTy); ArgVals.push_back(ValueAndIsPtr(V, HaveValue)); break; } llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName()); // The alignment we need to use is the max of the requested alignment for // the argument plus the alignment required by our access code below. unsigned AlignmentToUse = CGM.getDataLayout().getABITypeAlignment(ArgI.getCoerceToType()); AlignmentToUse = std::max(AlignmentToUse, (unsigned)getContext().getDeclAlign(Arg).getQuantity()); Alloca->setAlignment(AlignmentToUse); llvm::Value *V = Alloca; llvm::Value *Ptr = V; // Pointer to store into. CharUnits PtrAlign = CharUnits::fromQuantity(AlignmentToUse); // If the value is offset in memory, apply the offset now. if (unsigned Offs = ArgI.getDirectOffset()) { Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy()); Ptr = Builder.CreateConstGEP1_32(Builder.getInt8Ty(), Ptr, Offs); Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(ArgI.getCoerceToType())); PtrAlign = PtrAlign.alignmentAtOffset(CharUnits::fromQuantity(Offs)); } // Fast-isel and the optimizer generally like scalar values better than // FCAs, so we flatten them if this is safe to do for this argument. llvm::StructType *STy = dyn_cast(ArgI.getCoerceToType()); if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy && STy->getNumElements() > 1) { uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy); llvm::Type *DstTy = cast(Ptr->getType())->getElementType(); uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy); if (SrcSize <= DstSize) { Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy)); assert(STy->getNumElements() == NumIRArgs); for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { auto AI = FnArgs[FirstIRArg + i]; AI->setName(Arg->getName() + ".coerce" + Twine(i)); llvm::Value *EltPtr = Builder.CreateConstGEP2_32(STy, Ptr, 0, i); Builder.CreateStore(AI, EltPtr); } } else { llvm::AllocaInst *TempAlloca = CreateTempAlloca(ArgI.getCoerceToType(), "coerce"); TempAlloca->setAlignment(AlignmentToUse); llvm::Value *TempV = TempAlloca; assert(STy->getNumElements() == NumIRArgs); for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { auto AI = FnArgs[FirstIRArg + i]; AI->setName(Arg->getName() + ".coerce" + Twine(i)); llvm::Value *EltPtr = Builder.CreateConstGEP2_32(ArgI.getCoerceToType(), TempV, 0, i); Builder.CreateStore(AI, EltPtr); } Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse); } } else { // Simple case, just do a coerced store of the argument into the alloca. assert(NumIRArgs == 1); auto AI = FnArgs[FirstIRArg]; AI->setName(Arg->getName() + ".coerce"); CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, PtrAlign, *this); } // Match to what EmitParmDecl is expecting for this type. if (CodeGenFunction::hasScalarEvaluationKind(Ty)) { V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty, Arg->getLocStart()); if (isPromoted) V = emitArgumentDemotion(*this, Arg, V); ArgVals.push_back(ValueAndIsPtr(V, HaveValue)); } else { ArgVals.push_back(ValueAndIsPtr(V, HavePointer)); } break; } case ABIArgInfo::Expand: { // If this structure was expanded into multiple arguments then // we need to create a temporary and reconstruct it from the // arguments. llvm::AllocaInst *Alloca = CreateMemTemp(Ty); CharUnits Align = getContext().getDeclAlign(Arg); Alloca->setAlignment(Align.getQuantity()); LValue LV = MakeAddrLValue(Alloca, Ty, Align); ArgVals.push_back(ValueAndIsPtr(Alloca, HavePointer)); auto FnArgIter = FnArgs.begin() + FirstIRArg; ExpandTypeFromArgs(Ty, LV, FnArgIter); assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs); for (unsigned i = 0, e = NumIRArgs; i != e; ++i) { auto AI = FnArgs[FirstIRArg + i]; AI->setName(Arg->getName() + "." + Twine(i)); } break; } case ABIArgInfo::Ignore: assert(NumIRArgs == 0); // Initialize the local variable appropriately. if (!hasScalarEvaluationKind(Ty)) { ArgVals.push_back(ValueAndIsPtr(CreateMemTemp(Ty), HavePointer)); } else { llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType())); ArgVals.push_back(ValueAndIsPtr(U, HaveValue)); } break; } } if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { for (int I = Args.size() - 1; I >= 0; --I) EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(), I + 1); } else { for (unsigned I = 0, E = Args.size(); I != E; ++I) EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(), I + 1); } } static void eraseUnusedBitCasts(llvm::Instruction *insn) { while (insn->use_empty()) { llvm::BitCastInst *bitcast = dyn_cast(insn); if (!bitcast) return; // This is "safe" because we would have used a ConstantExpr otherwise. insn = cast(bitcast->getOperand(0)); bitcast->eraseFromParent(); } } /// Try to emit a fused autorelease of a return result. static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result) { // We must be immediately followed the cast. llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); if (BB->empty()) return nullptr; if (&BB->back() != result) return nullptr; llvm::Type *resultType = result->getType(); // result is in a BasicBlock and is therefore an Instruction. llvm::Instruction *generator = cast(result); SmallVector insnsToKill; // Look for: // %generator = bitcast %type1* %generator2 to %type2* while (llvm::BitCastInst *bitcast = dyn_cast(generator)) { // We would have emitted this as a constant if the operand weren't // an Instruction. generator = cast(bitcast->getOperand(0)); // Require the generator to be immediately followed by the cast. if (generator->getNextNode() != bitcast) return nullptr; insnsToKill.push_back(bitcast); } // Look for: // %generator = call i8* @objc_retain(i8* %originalResult) // or // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) llvm::CallInst *call = dyn_cast(generator); if (!call) return nullptr; bool doRetainAutorelease; if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) { doRetainAutorelease = true; } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints() .objc_retainAutoreleasedReturnValue) { doRetainAutorelease = false; // If we emitted an assembly marker for this call (and the // ARCEntrypoints field should have been set if so), go looking // for that call. If we can't find it, we can't do this // optimization. But it should always be the immediately previous // instruction, unless we needed bitcasts around the call. if (CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker) { llvm::Instruction *prev = call->getPrevNode(); assert(prev); if (isa(prev)) { prev = prev->getPrevNode(); assert(prev); } assert(isa(prev)); assert(cast(prev)->getCalledValue() == CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker); insnsToKill.push_back(prev); } } else { return nullptr; } result = call->getArgOperand(0); insnsToKill.push_back(call); // Keep killing bitcasts, for sanity. Note that we no longer care // about precise ordering as long as there's exactly one use. while (llvm::BitCastInst *bitcast = dyn_cast(result)) { if (!bitcast->hasOneUse()) break; insnsToKill.push_back(bitcast); result = bitcast->getOperand(0); } // Delete all the unnecessary instructions, from latest to earliest. for (SmallVectorImpl::iterator i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i) (*i)->eraseFromParent(); // Do the fused retain/autorelease if we were asked to. if (doRetainAutorelease) result = CGF.EmitARCRetainAutoreleaseReturnValue(result); // Cast back to the result type. return CGF.Builder.CreateBitCast(result, resultType); } /// If this is a +1 of the value of an immutable 'self', remove it. static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, llvm::Value *result) { // This is only applicable to a method with an immutable 'self'. const ObjCMethodDecl *method = dyn_cast_or_null(CGF.CurCodeDecl); if (!method) return nullptr; const VarDecl *self = method->getSelfDecl(); if (!self->getType().isConstQualified()) return nullptr; // Look for a retain call. llvm::CallInst *retainCall = dyn_cast(result->stripPointerCasts()); if (!retainCall || retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain) return nullptr; // Look for an ordinary load of 'self'. llvm::Value *retainedValue = retainCall->getArgOperand(0); llvm::LoadInst *load = dyn_cast(retainedValue->stripPointerCasts()); if (!load || load->isAtomic() || load->isVolatile() || load->getPointerOperand() != CGF.GetAddrOfLocalVar(self)) return nullptr; // Okay! Burn it all down. This relies for correctness on the // assumption that the retain is emitted as part of the return and // that thereafter everything is used "linearly". llvm::Type *resultType = result->getType(); eraseUnusedBitCasts(cast(result)); assert(retainCall->use_empty()); retainCall->eraseFromParent(); eraseUnusedBitCasts(cast(retainedValue)); return CGF.Builder.CreateBitCast(load, resultType); } /// Emit an ARC autorelease of the result of a function. /// /// \return the value to actually return from the function static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result) { // If we're returning 'self', kill the initial retain. This is a // heuristic attempt to "encourage correctness" in the really unfortunate // case where we have a return of self during a dealloc and we desperately // need to avoid the possible autorelease. if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) return self; // At -O0, try to emit a fused retain/autorelease. if (CGF.shouldUseFusedARCCalls()) if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) return fused; return CGF.EmitARCAutoreleaseReturnValue(result); } /// Heuristically search for a dominating store to the return-value slot. static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { // If there are multiple uses of the return-value slot, just check // for something immediately preceding the IP. Sometimes this can // happen with how we generate implicit-returns; it can also happen // with noreturn cleanups. if (!CGF.ReturnValue->hasOneUse()) { llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); if (IP->empty()) return nullptr; llvm::Instruction *I = &IP->back(); // Skip lifetime markers for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(), IE = IP->rend(); II != IE; ++II) { if (llvm::IntrinsicInst *Intrinsic = dyn_cast(&*II)) { if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) { const llvm::Value *CastAddr = Intrinsic->getArgOperand(1); ++II; if (II == IE) break; if (isa(&*II) && (CastAddr == &*II)) continue; } } I = &*II; break; } llvm::StoreInst *store = dyn_cast(I); if (!store) return nullptr; if (store->getPointerOperand() != CGF.ReturnValue) return nullptr; assert(!store->isAtomic() && !store->isVolatile()); // see below return store; } llvm::StoreInst *store = dyn_cast(CGF.ReturnValue->user_back()); if (!store) return nullptr; // These aren't actually possible for non-coerced returns, and we // only care about non-coerced returns on this code path. assert(!store->isAtomic() && !store->isVolatile()); // Now do a first-and-dirty dominance check: just walk up the // single-predecessors chain from the current insertion point. llvm::BasicBlock *StoreBB = store->getParent(); llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); while (IP != StoreBB) { if (!(IP = IP->getSinglePredecessor())) return nullptr; } // Okay, the store's basic block dominates the insertion point; we // can do our thing. return store; } void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc) { if (CurCodeDecl && CurCodeDecl->hasAttr()) { // Naked functions don't have epilogues. Builder.CreateUnreachable(); return; } // Functions with no result always return void. if (!ReturnValue) { Builder.CreateRetVoid(); return; } llvm::DebugLoc RetDbgLoc; llvm::Value *RV = nullptr; QualType RetTy = FI.getReturnType(); const ABIArgInfo &RetAI = FI.getReturnInfo(); switch (RetAI.getKind()) { case ABIArgInfo::InAlloca: // Aggregrates get evaluated directly into the destination. Sometimes we // need to return the sret value in a register, though. assert(hasAggregateEvaluationKind(RetTy)); if (RetAI.getInAllocaSRet()) { llvm::Function::arg_iterator EI = CurFn->arg_end(); --EI; llvm::Value *ArgStruct = EI; llvm::Value *SRet = Builder.CreateStructGEP( nullptr, ArgStruct, RetAI.getInAllocaFieldIndex()); RV = Builder.CreateLoad(SRet, "sret"); } break; case ABIArgInfo::Indirect: { auto AI = CurFn->arg_begin(); if (RetAI.isSRetAfterThis()) ++AI; switch (getEvaluationKind(RetTy)) { case TEK_Complex: { ComplexPairTy RT = EmitLoadOfComplex(MakeNaturalAlignAddrLValue(ReturnValue, RetTy), EndLoc); EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(AI, RetTy), /*isInit*/ true); break; } case TEK_Aggregate: // Do nothing; aggregrates get evaluated directly into the destination. break; case TEK_Scalar: EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), MakeNaturalAlignAddrLValue(AI, RetTy), /*isInit*/ true); break; } break; } case ABIArgInfo::Extend: case ABIArgInfo::Direct: if (RetAI.getCoerceToType() == ConvertType(RetTy) && RetAI.getDirectOffset() == 0) { // The internal return value temp always will have pointer-to-return-type // type, just do a load. // If there is a dominating store to ReturnValue, we can elide // the load, zap the store, and usually zap the alloca. if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) { // Reuse the debug location from the store unless there is // cleanup code to be emitted between the store and return // instruction. if (EmitRetDbgLoc && !AutoreleaseResult) RetDbgLoc = SI->getDebugLoc(); // Get the stored value and nuke the now-dead store. RV = SI->getValueOperand(); SI->eraseFromParent(); // If that was the only use of the return value, nuke it as well now. if (ReturnValue->use_empty() && isa(ReturnValue)) { cast(ReturnValue)->eraseFromParent(); ReturnValue = nullptr; } // Otherwise, we have to do a simple load. } else { RV = Builder.CreateLoad(ReturnValue); } } else { llvm::Value *V = ReturnValue; CharUnits Align = getContext().getTypeAlignInChars(RetTy); // If the value is offset in memory, apply the offset now. if (unsigned Offs = RetAI.getDirectOffset()) { V = Builder.CreateBitCast(V, Builder.getInt8PtrTy()); V = Builder.CreateConstGEP1_32(Builder.getInt8Ty(), V, Offs); V = Builder.CreateBitCast(V, llvm::PointerType::getUnqual(RetAI.getCoerceToType())); Align = Align.alignmentAtOffset(CharUnits::fromQuantity(Offs)); } RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), Align, *this); } // In ARC, end functions that return a retainable type with a call // to objc_autoreleaseReturnValue. if (AutoreleaseResult) { assert(getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained() && RetTy->isObjCRetainableType()); RV = emitAutoreleaseOfResult(*this, RV); } break; case ABIArgInfo::Ignore: break; case ABIArgInfo::Expand: llvm_unreachable("Invalid ABI kind for return argument"); } llvm::Instruction *Ret; if (RV) { if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) { if (auto RetNNAttr = CurGD.getDecl()->getAttr()) { SanitizerScope SanScope(this); llvm::Value *Cond = Builder.CreateICmpNE( RV, llvm::Constant::getNullValue(RV->getType())); llvm::Constant *StaticData[] = { EmitCheckSourceLocation(EndLoc), EmitCheckSourceLocation(RetNNAttr->getLocation()), }; EmitCheck(std::make_pair(Cond, SanitizerKind::ReturnsNonnullAttribute), "nonnull_return", StaticData, None); } } Ret = Builder.CreateRet(RV); } else { Ret = Builder.CreateRetVoid(); } if (RetDbgLoc) Ret->setDebugLoc(std::move(RetDbgLoc)); } static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) { const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory; } static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty) { // FIXME: Generate IR in one pass, rather than going back and fixing up these // placeholders. llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty); llvm::Value *Placeholder = llvm::UndefValue::get(IRTy->getPointerTo()->getPointerTo()); Placeholder = CGF.Builder.CreateLoad(Placeholder); return AggValueSlot::forAddr(Placeholder, CharUnits::Zero(), Ty.getQualifiers(), AggValueSlot::IsNotDestructed, AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased); } void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, const VarDecl *param, SourceLocation loc) { // StartFunction converted the ABI-lowered parameter(s) into a // local alloca. We need to turn that into an r-value suitable // for EmitCall. llvm::Value *local = GetAddrOfLocalVar(param); QualType type = param->getType(); // For the most part, we just need to load the alloca, except: // 1) aggregate r-values are actually pointers to temporaries, and // 2) references to non-scalars are pointers directly to the aggregate. // I don't know why references to scalars are different here. if (const ReferenceType *ref = type->getAs()) { if (!hasScalarEvaluationKind(ref->getPointeeType())) return args.add(RValue::getAggregate(local), type); // Locals which are references to scalars are represented // with allocas holding the pointer. return args.add(RValue::get(Builder.CreateLoad(local)), type); } assert(!isInAllocaArgument(CGM.getCXXABI(), type) && "cannot emit delegate call arguments for inalloca arguments!"); args.add(convertTempToRValue(local, type, loc), type); } static bool isProvablyNull(llvm::Value *addr) { return isa(addr); } static bool isProvablyNonNull(llvm::Value *addr) { return isa(addr); } /// Emit the actual writing-back of a writeback. static void emitWriteback(CodeGenFunction &CGF, const CallArgList::Writeback &writeback) { const LValue &srcLV = writeback.Source; llvm::Value *srcAddr = srcLV.getAddress(); assert(!isProvablyNull(srcAddr) && "shouldn't have writeback for provably null argument"); llvm::BasicBlock *contBB = nullptr; // If the argument wasn't provably non-null, we need to null check // before doing the store. bool provablyNonNull = isProvablyNonNull(srcAddr); if (!provablyNonNull) { llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback"); contBB = CGF.createBasicBlock("icr.done"); llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull"); CGF.Builder.CreateCondBr(isNull, contBB, writebackBB); CGF.EmitBlock(writebackBB); } // Load the value to writeback. llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary); // Cast it back, in case we're writing an id to a Foo* or something. value = CGF.Builder.CreateBitCast(value, cast(srcAddr->getType())->getElementType(), "icr.writeback-cast"); // Perform the writeback. // If we have a "to use" value, it's something we need to emit a use // of. This has to be carefully threaded in: if it's done after the // release it's potentially undefined behavior (and the optimizer // will ignore it), and if it happens before the retain then the // optimizer could move the release there. if (writeback.ToUse) { assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong); // Retain the new value. No need to block-copy here: the block's // being passed up the stack. value = CGF.EmitARCRetainNonBlock(value); // Emit the intrinsic use here. CGF.EmitARCIntrinsicUse(writeback.ToUse); // Load the old value (primitively). llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation()); // Put the new value in place (primitively). CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false); // Release the old value. CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime()); // Otherwise, we can just do a normal lvalue store. } else { CGF.EmitStoreThroughLValue(RValue::get(value), srcLV); } // Jump to the continuation block. if (!provablyNonNull) CGF.EmitBlock(contBB); } static void emitWritebacks(CodeGenFunction &CGF, const CallArgList &args) { for (const auto &I : args.writebacks()) emitWriteback(CGF, I); } static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, const CallArgList &CallArgs) { assert(CGF.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()); ArrayRef Cleanups = CallArgs.getCleanupsToDeactivate(); // Iterate in reverse to increase the likelihood of popping the cleanup. for (ArrayRef::reverse_iterator I = Cleanups.rbegin(), E = Cleanups.rend(); I != E; ++I) { CGF.DeactivateCleanupBlock(I->Cleanup, I->IsActiveIP); I->IsActiveIP->eraseFromParent(); } } static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) { if (const UnaryOperator *uop = dyn_cast(E->IgnoreParens())) if (uop->getOpcode() == UO_AddrOf) return uop->getSubExpr(); return nullptr; } /// Emit an argument that's being passed call-by-writeback. That is, /// we are passing the address of static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, const ObjCIndirectCopyRestoreExpr *CRE) { LValue srcLV; // Make an optimistic effort to emit the address as an l-value. // This can fail if the argument expression is more complicated. if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) { srcLV = CGF.EmitLValue(lvExpr); // Otherwise, just emit it as a scalar. } else { llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr()); QualType srcAddrType = CRE->getSubExpr()->getType()->castAs()->getPointeeType(); srcLV = CGF.MakeNaturalAlignAddrLValue(srcAddr, srcAddrType); } llvm::Value *srcAddr = srcLV.getAddress(); // The dest and src types don't necessarily match in LLVM terms // because of the crazy ObjC compatibility rules. llvm::PointerType *destType = cast(CGF.ConvertType(CRE->getType())); // If the address is a constant null, just pass the appropriate null. if (isProvablyNull(srcAddr)) { args.add(RValue::get(llvm::ConstantPointerNull::get(destType)), CRE->getType()); return; } // Create the temporary. llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(), "icr.temp"); // Loading an l-value can introduce a cleanup if the l-value is __weak, // and that cleanup will be conditional if we can't prove that the l-value // isn't null, so we need to register a dominating point so that the cleanups // system will make valid IR. CodeGenFunction::ConditionalEvaluation condEval(CGF); // Zero-initialize it if we're not doing a copy-initialization. bool shouldCopy = CRE->shouldCopy(); if (!shouldCopy) { llvm::Value *null = llvm::ConstantPointerNull::get( cast(destType->getElementType())); CGF.Builder.CreateStore(null, temp); } llvm::BasicBlock *contBB = nullptr; llvm::BasicBlock *originBB = nullptr; // If the address is *not* known to be non-null, we need to switch. llvm::Value *finalArgument; bool provablyNonNull = isProvablyNonNull(srcAddr); if (provablyNonNull) { finalArgument = temp; } else { llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull"); finalArgument = CGF.Builder.CreateSelect(isNull, llvm::ConstantPointerNull::get(destType), temp, "icr.argument"); // If we need to copy, then the load has to be conditional, which // means we need control flow. if (shouldCopy) { originBB = CGF.Builder.GetInsertBlock(); contBB = CGF.createBasicBlock("icr.cont"); llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy"); CGF.Builder.CreateCondBr(isNull, contBB, copyBB); CGF.EmitBlock(copyBB); condEval.begin(CGF); } } llvm::Value *valueToUse = nullptr; // Perform a copy if necessary. if (shouldCopy) { RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation()); assert(srcRV.isScalar()); llvm::Value *src = srcRV.getScalarVal(); src = CGF.Builder.CreateBitCast(src, destType->getElementType(), "icr.cast"); // Use an ordinary store, not a store-to-lvalue. CGF.Builder.CreateStore(src, temp); // If optimization is enabled, and the value was held in a // __strong variable, we need to tell the optimizer that this // value has to stay alive until we're doing the store back. // This is because the temporary is effectively unretained, // and so otherwise we can violate the high-level semantics. if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 && srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) { valueToUse = src; } } // Finish the control flow if we needed it. if (shouldCopy && !provablyNonNull) { llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock(); CGF.EmitBlock(contBB); // Make a phi for the value to intrinsically use. if (valueToUse) { llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2, "icr.to-use"); phiToUse->addIncoming(valueToUse, copyBB); phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()), originBB); valueToUse = phiToUse; } condEval.end(CGF); } args.addWriteback(srcLV, temp, valueToUse); args.add(RValue::get(finalArgument), CRE->getType()); } void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) { assert(!StackBase && !StackCleanup.isValid()); // Save the stack. llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave); StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save"); // Control gets really tied up in landing pads, so we have to spill the // stacksave to an alloca to avoid violating SSA form. // TODO: This is dead if we never emit the cleanup. We should create the // alloca and store lazily on the first cleanup emission. StackBaseMem = CGF.CreateTempAlloca(CGF.Int8PtrTy, "inalloca.spmem"); CGF.Builder.CreateStore(StackBase, StackBaseMem); CGF.pushStackRestore(EHCleanup, StackBaseMem); StackCleanup = CGF.EHStack.getInnermostEHScope(); assert(StackCleanup.isValid()); } void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const { if (StackBase) { CGF.DeactivateCleanupBlock(StackCleanup, StackBase); llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore); // We could load StackBase from StackBaseMem, but in the non-exceptional // case we can skip it. CGF.Builder.CreateCall(F, StackBase); } } void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, const FunctionDecl *FD, unsigned ParmNum) { if (!SanOpts.has(SanitizerKind::NonnullAttribute) || !FD) return; auto PVD = ParmNum < FD->getNumParams() ? FD->getParamDecl(ParmNum) : nullptr; unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum; auto NNAttr = getNonNullAttr(FD, PVD, ArgType, ArgNo); if (!NNAttr) return; SanitizerScope SanScope(this); assert(RV.isScalar()); llvm::Value *V = RV.getScalarVal(); llvm::Value *Cond = Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType())); llvm::Constant *StaticData[] = { EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(NNAttr->getLocation()), llvm::ConstantInt::get(Int32Ty, ArgNo + 1), }; EmitCheck(std::make_pair(Cond, SanitizerKind::NonnullAttribute), "nonnull_arg", StaticData, None); } void CodeGenFunction::EmitCallArgs(CallArgList &Args, ArrayRef ArgTypes, CallExpr::const_arg_iterator ArgBeg, CallExpr::const_arg_iterator ArgEnd, const FunctionDecl *CalleeDecl, unsigned ParamsToSkip) { // We *have* to evaluate arguments from right to left in the MS C++ ABI, // because arguments are destroyed left to right in the callee. if (CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { // Insert a stack save if we're going to need any inalloca args. bool HasInAllocaArgs = false; for (ArrayRef::iterator I = ArgTypes.begin(), E = ArgTypes.end(); I != E && !HasInAllocaArgs; ++I) HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I); if (HasInAllocaArgs) { assert(getTarget().getTriple().getArch() == llvm::Triple::x86); Args.allocateArgumentMemory(*this); } // Evaluate each argument. size_t CallArgsStart = Args.size(); for (int I = ArgTypes.size() - 1; I >= 0; --I) { CallExpr::const_arg_iterator Arg = ArgBeg + I; EmitCallArg(Args, *Arg, ArgTypes[I]); EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], Arg->getExprLoc(), CalleeDecl, ParamsToSkip + I); } // Un-reverse the arguments we just evaluated so they match up with the LLVM // IR function. std::reverse(Args.begin() + CallArgsStart, Args.end()); return; } for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) { CallExpr::const_arg_iterator Arg = ArgBeg + I; assert(Arg != ArgEnd); EmitCallArg(Args, *Arg, ArgTypes[I]); EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], Arg->getExprLoc(), CalleeDecl, ParamsToSkip + I); } } namespace { struct DestroyUnpassedArg : EHScopeStack::Cleanup { DestroyUnpassedArg(llvm::Value *Addr, QualType Ty) : Addr(Addr), Ty(Ty) {} llvm::Value *Addr; QualType Ty; void Emit(CodeGenFunction &CGF, Flags flags) override { const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor(); assert(!Dtor->isTrivial()); CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false, /*Delegating=*/false, Addr); } }; } struct DisableDebugLocationUpdates { CodeGenFunction &CGF; bool disabledDebugInfo; DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) { if ((disabledDebugInfo = isa(E) && CGF.getDebugInfo())) CGF.disableDebugInfo(); } ~DisableDebugLocationUpdates() { if (disabledDebugInfo) CGF.enableDebugInfo(); } }; void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, QualType type) { DisableDebugLocationUpdates Dis(*this, E); if (const ObjCIndirectCopyRestoreExpr *CRE = dyn_cast(E)) { assert(getLangOpts().ObjCAutoRefCount); assert(getContext().hasSameType(E->getType(), type)); return emitWritebackArg(*this, args, CRE); } assert(type->isReferenceType() == E->isGLValue() && "reference binding to unmaterialized r-value!"); if (E->isGLValue()) { assert(E->getObjectKind() == OK_Ordinary); return args.add(EmitReferenceBindingToExpr(E), type); } bool HasAggregateEvalKind = hasAggregateEvaluationKind(type); // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee. // However, we still have to push an EH-only cleanup in case we unwind before // we make it to the call. if (HasAggregateEvalKind && CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { // If we're using inalloca, use the argument memory. Otherwise, use a // temporary. AggValueSlot Slot; if (args.isUsingInAlloca()) Slot = createPlaceholderSlot(*this, type); else Slot = CreateAggTemp(type, "agg.tmp"); const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); bool DestroyedInCallee = RD && RD->hasNonTrivialDestructor() && CGM.getCXXABI().getRecordArgABI(RD) != CGCXXABI::RAA_Default; if (DestroyedInCallee) Slot.setExternallyDestructed(); EmitAggExpr(E, Slot); RValue RV = Slot.asRValue(); args.add(RV, type); if (DestroyedInCallee) { // Create a no-op GEP between the placeholder and the cleanup so we can // RAUW it successfully. It also serves as a marker of the first // instruction where the cleanup is active. pushFullExprCleanup(EHCleanup, Slot.getAddr(), type); // This unreachable is a temporary marker which will be removed later. llvm::Instruction *IsActive = Builder.CreateUnreachable(); args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive); } return; } if (HasAggregateEvalKind && isa(E) && cast(E)->getCastKind() == CK_LValueToRValue) { LValue L = EmitLValue(cast(E)->getSubExpr()); assert(L.isSimple()); if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) { args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true); } else { // We can't represent a misaligned lvalue in the CallArgList, so copy // to an aligned temporary now. llvm::Value *tmp = CreateMemTemp(type); EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile(), L.getAlignment()); args.add(RValue::getAggregate(tmp), type); } return; } args.add(EmitAnyExprToTemp(E), type); } QualType CodeGenFunction::getVarArgType(const Expr *Arg) { // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC // implicitly widens null pointer constants that are arguments to varargs // functions to pointer-sized ints. if (!getTarget().getTriple().isOSWindows()) return Arg->getType(); if (Arg->getType()->isIntegerType() && getContext().getTypeSize(Arg->getType()) < getContext().getTargetInfo().getPointerWidth(0) && Arg->isNullPointerConstant(getContext(), Expr::NPC_ValueDependentIsNotNull)) { return getContext().getIntPtrType(); } return Arg->getType(); } // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC // optimizer it can aggressively ignore unwind edges. void CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { if (CGM.getCodeGenOpts().OptimizationLevel != 0 && !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) Inst->setMetadata("clang.arc.no_objc_arc_exceptions", CGM.getNoObjCARCExceptionsMetadata()); } /// Emits a call to the given no-arguments nounwind runtime function. llvm::CallInst * CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee, const llvm::Twine &name) { return EmitNounwindRuntimeCall(callee, None, name); } /// Emits a call to the given nounwind runtime function. llvm::CallInst * CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee, ArrayRef args, const llvm::Twine &name) { llvm::CallInst *call = EmitRuntimeCall(callee, args, name); call->setDoesNotThrow(); return call; } /// Emits a simple call (never an invoke) to the given no-arguments /// runtime function. llvm::CallInst * CodeGenFunction::EmitRuntimeCall(llvm::Value *callee, const llvm::Twine &name) { return EmitRuntimeCall(callee, None, name); } /// Emits a simple call (never an invoke) to the given runtime /// function. llvm::CallInst * CodeGenFunction::EmitRuntimeCall(llvm::Value *callee, ArrayRef args, const llvm::Twine &name) { llvm::CallInst *call = Builder.CreateCall(callee, args, name); call->setCallingConv(getRuntimeCC()); return call; } /// Emits a call or invoke to the given noreturn runtime function. void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee, ArrayRef args) { if (getInvokeDest()) { llvm::InvokeInst *invoke = Builder.CreateInvoke(callee, getUnreachableBlock(), getInvokeDest(), args); invoke->setDoesNotReturn(); invoke->setCallingConv(getRuntimeCC()); } else { llvm::CallInst *call = Builder.CreateCall(callee, args); call->setDoesNotReturn(); call->setCallingConv(getRuntimeCC()); Builder.CreateUnreachable(); } } /// Emits a call or invoke instruction to the given nullary runtime /// function. llvm::CallSite CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee, const Twine &name) { return EmitRuntimeCallOrInvoke(callee, None, name); } /// Emits a call or invoke instruction to the given runtime function. llvm::CallSite CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee, ArrayRef args, const Twine &name) { llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name); callSite.setCallingConv(getRuntimeCC()); return callSite; } llvm::CallSite CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, const Twine &Name) { return EmitCallOrInvoke(Callee, None, Name); } /// Emits a call or invoke instruction to the given function, depending /// on the current state of the EH stack. llvm::CallSite CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, ArrayRef Args, const Twine &Name) { llvm::BasicBlock *InvokeDest = getInvokeDest(); llvm::Instruction *Inst; if (!InvokeDest) Inst = Builder.CreateCall(Callee, Args, Name); else { llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name); EmitBlock(ContBB); } // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC // optimizer it can aggressively ignore unwind edges. if (CGM.getLangOpts().ObjCAutoRefCount) AddObjCARCExceptionMetadata(Inst); return llvm::CallSite(Inst); } /// \brief Store a non-aggregate value to an address to initialize it. For /// initialization, a non-atomic store will be used. static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src, LValue Dst) { if (Src.isScalar()) CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true); else CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true); } void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old, llvm::Value *New) { DeferredReplacements.push_back(std::make_pair(Old, New)); } RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, llvm::Value *Callee, ReturnValueSlot ReturnValue, const CallArgList &CallArgs, const Decl *TargetDecl, llvm::Instruction **callOrInvoke) { // FIXME: We no longer need the types from CallArgs; lift up and simplify. // Handle struct-return functions by passing a pointer to the // location that we would like to return into. QualType RetTy = CallInfo.getReturnType(); const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); llvm::FunctionType *IRFuncTy = cast( cast(Callee->getType())->getElementType()); // If we're using inalloca, insert the allocation after the stack save. // FIXME: Do this earlier rather than hacking it in here! llvm::AllocaInst *ArgMemory = nullptr; if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) { llvm::Instruction *IP = CallArgs.getStackBase(); llvm::AllocaInst *AI; if (IP) { IP = IP->getNextNode(); AI = new llvm::AllocaInst(ArgStruct, "argmem", IP); } else { AI = CreateTempAlloca(ArgStruct, "argmem"); } AI->setUsedWithInAlloca(true); assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca()); ArgMemory = AI; } ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo); SmallVector IRCallArgs(IRFunctionArgs.totalIRArgs()); // If the call returns a temporary with struct return, create a temporary // alloca to hold the result, unless one is given to us. llvm::Value *SRetPtr = nullptr; size_t UnusedReturnSize = 0; if (RetAI.isIndirect() || RetAI.isInAlloca()) { SRetPtr = ReturnValue.getValue(); if (!SRetPtr) { SRetPtr = CreateMemTemp(RetTy); if (HaveInsertPoint() && ReturnValue.isUnused()) { uint64_t size = CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy)); if (EmitLifetimeStart(size, SRetPtr)) UnusedReturnSize = size; } } if (IRFunctionArgs.hasSRetArg()) { IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr; } else { llvm::Value *Addr = Builder.CreateStructGEP(ArgMemory->getAllocatedType(), ArgMemory, RetAI.getInAllocaFieldIndex()); Builder.CreateStore(SRetPtr, Addr); } } assert(CallInfo.arg_size() == CallArgs.size() && "Mismatch between function signature & arguments."); unsigned ArgNo = 0; CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); I != E; ++I, ++info_it, ++ArgNo) { const ABIArgInfo &ArgInfo = info_it->info; RValue RV = I->RV; CharUnits TypeAlign = getContext().getTypeAlignInChars(I->Ty); // Insert a padding argument to ensure proper alignment. if (IRFunctionArgs.hasPaddingArg(ArgNo)) IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = llvm::UndefValue::get(ArgInfo.getPaddingType()); unsigned FirstIRArg, NumIRArgs; std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); switch (ArgInfo.getKind()) { case ABIArgInfo::InAlloca: { assert(NumIRArgs == 0); assert(getTarget().getTriple().getArch() == llvm::Triple::x86); if (RV.isAggregate()) { // Replace the placeholder with the appropriate argument slot GEP. llvm::Instruction *Placeholder = cast(RV.getAggregateAddr()); CGBuilderTy::InsertPoint IP = Builder.saveIP(); Builder.SetInsertPoint(Placeholder); llvm::Value *Addr = Builder.CreateStructGEP(ArgMemory->getAllocatedType(), ArgMemory, ArgInfo.getInAllocaFieldIndex()); Builder.restoreIP(IP); deferPlaceholderReplacement(Placeholder, Addr); } else { // Store the RValue into the argument struct. llvm::Value *Addr = Builder.CreateStructGEP(ArgMemory->getAllocatedType(), ArgMemory, ArgInfo.getInAllocaFieldIndex()); unsigned AS = Addr->getType()->getPointerAddressSpace(); llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS); // There are some cases where a trivial bitcast is not avoidable. The // definition of a type later in a translation unit may change it's type // from {}* to (%struct.foo*)*. if (Addr->getType() != MemType) Addr = Builder.CreateBitCast(Addr, MemType); LValue argLV = MakeAddrLValue(Addr, I->Ty, TypeAlign); EmitInitStoreOfNonAggregate(*this, RV, argLV); } break; } case ABIArgInfo::Indirect: { assert(NumIRArgs == 1); if (RV.isScalar() || RV.isComplex()) { // Make a temporary alloca to pass the argument. llvm::AllocaInst *AI = CreateMemTemp(I->Ty); if (ArgInfo.getIndirectAlign() > AI->getAlignment()) AI->setAlignment(ArgInfo.getIndirectAlign()); IRCallArgs[FirstIRArg] = AI; LValue argLV = MakeAddrLValue(AI, I->Ty, TypeAlign); EmitInitStoreOfNonAggregate(*this, RV, argLV); } else { // We want to avoid creating an unnecessary temporary+copy here; // however, we need one in three cases: // 1. If the argument is not byval, and we are required to copy the // source. (This case doesn't occur on any common architecture.) // 2. If the argument is byval, RV is not sufficiently aligned, and // we cannot force it to be sufficiently aligned. // 3. If the argument is byval, but RV is located in an address space // different than that of the argument (0). llvm::Value *Addr = RV.getAggregateAddr(); unsigned Align = ArgInfo.getIndirectAlign(); const llvm::DataLayout *TD = &CGM.getDataLayout(); const unsigned RVAddrSpace = Addr->getType()->getPointerAddressSpace(); const unsigned ArgAddrSpace = (FirstIRArg < IRFuncTy->getNumParams() ? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() : 0); if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) || (ArgInfo.getIndirectByVal() && TypeAlign.getQuantity() < Align && llvm::getOrEnforceKnownAlignment(Addr, Align, *TD) < Align) || (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) { // Create an aligned temporary, and copy to it. llvm::AllocaInst *AI = CreateMemTemp(I->Ty); if (Align > AI->getAlignment()) AI->setAlignment(Align); IRCallArgs[FirstIRArg] = AI; EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified()); } else { // Skip the extra memcpy call. IRCallArgs[FirstIRArg] = Addr; } } break; } case ABIArgInfo::Ignore: assert(NumIRArgs == 0); break; case ABIArgInfo::Extend: case ABIArgInfo::Direct: { if (!isa(ArgInfo.getCoerceToType()) && ArgInfo.getCoerceToType() == ConvertType(info_it->type) && ArgInfo.getDirectOffset() == 0) { assert(NumIRArgs == 1); llvm::Value *V; if (RV.isScalar()) V = RV.getScalarVal(); else V = Builder.CreateLoad(RV.getAggregateAddr()); // We might have to widen integers, but we should never truncate. if (ArgInfo.getCoerceToType() != V->getType() && V->getType()->isIntegerTy()) V = Builder.CreateZExt(V, ArgInfo.getCoerceToType()); // If the argument doesn't match, perform a bitcast to coerce it. This // can happen due to trivial type mismatches. if (FirstIRArg < IRFuncTy->getNumParams() && V->getType() != IRFuncTy->getParamType(FirstIRArg)) V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg)); IRCallArgs[FirstIRArg] = V; break; } // FIXME: Avoid the conversion through memory if possible. llvm::Value *SrcPtr; CharUnits SrcAlign; if (RV.isScalar() || RV.isComplex()) { SrcPtr = CreateMemTemp(I->Ty, "coerce"); SrcAlign = TypeAlign; LValue SrcLV = MakeAddrLValue(SrcPtr, I->Ty, TypeAlign); EmitInitStoreOfNonAggregate(*this, RV, SrcLV); } else { SrcPtr = RV.getAggregateAddr(); // This alignment is guaranteed by EmitCallArg. SrcAlign = TypeAlign; } // If the value is offset in memory, apply the offset now. if (unsigned Offs = ArgInfo.getDirectOffset()) { SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy()); SrcPtr = Builder.CreateConstGEP1_32(Builder.getInt8Ty(), SrcPtr, Offs); SrcPtr = Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(ArgInfo.getCoerceToType())); SrcAlign = SrcAlign.alignmentAtOffset(CharUnits::fromQuantity(Offs)); } // Fast-isel and the optimizer generally like scalar values better than // FCAs, so we flatten them if this is safe to do for this argument. llvm::StructType *STy = dyn_cast(ArgInfo.getCoerceToType()); if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { llvm::Type *SrcTy = cast(SrcPtr->getType())->getElementType(); uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy); uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy); // If the source type is smaller than the destination type of the // coerce-to logic, copy the source value into a temp alloca the size // of the destination type to allow loading all of it. The bits past // the source value are left undef. if (SrcSize < DstSize) { llvm::AllocaInst *TempAlloca = CreateTempAlloca(STy, SrcPtr->getName() + ".coerce"); Builder.CreateMemCpy(TempAlloca, SrcPtr, SrcSize, 0); SrcPtr = TempAlloca; } else { SrcPtr = Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(STy)); } assert(NumIRArgs == STy->getNumElements()); for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { llvm::Value *EltPtr = Builder.CreateConstGEP2_32(STy, SrcPtr, 0, i); llvm::LoadInst *LI = Builder.CreateLoad(EltPtr); // We don't know what we're loading from. LI->setAlignment(1); IRCallArgs[FirstIRArg + i] = LI; } } else { // In the simple case, just pass the coerced loaded value. assert(NumIRArgs == 1); IRCallArgs[FirstIRArg] = CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), SrcAlign, *this); } break; } case ABIArgInfo::Expand: unsigned IRArgPos = FirstIRArg; ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos); assert(IRArgPos == FirstIRArg + NumIRArgs); break; } } if (ArgMemory) { llvm::Value *Arg = ArgMemory; if (CallInfo.isVariadic()) { // When passing non-POD arguments by value to variadic functions, we will // end up with a variadic prototype and an inalloca call site. In such // cases, we can't do any parameter mismatch checks. Give up and bitcast // the callee. unsigned CalleeAS = cast(Callee->getType())->getAddressSpace(); Callee = Builder.CreateBitCast( Callee, getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS)); } else { llvm::Type *LastParamTy = IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1); if (Arg->getType() != LastParamTy) { #ifndef NDEBUG // Assert that these structs have equivalent element types. llvm::StructType *FullTy = CallInfo.getArgStruct(); llvm::StructType *DeclaredTy = cast( cast(LastParamTy)->getElementType()); assert(DeclaredTy->getNumElements() == FullTy->getNumElements()); for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(), DE = DeclaredTy->element_end(), FI = FullTy->element_begin(); DI != DE; ++DI, ++FI) assert(*DI == *FI); #endif Arg = Builder.CreateBitCast(Arg, LastParamTy); } } assert(IRFunctionArgs.hasInallocaArg()); IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg; } if (!CallArgs.getCleanupsToDeactivate().empty()) deactivateArgCleanupsBeforeCall(*this, CallArgs); // If the callee is a bitcast of a function to a varargs pointer to function // type, check to see if we can remove the bitcast. This handles some cases // with unprototyped functions. if (llvm::ConstantExpr *CE = dyn_cast(Callee)) if (llvm::Function *CalleeF = dyn_cast(CE->getOperand(0))) { llvm::PointerType *CurPT=cast(Callee->getType()); llvm::FunctionType *CurFT = cast(CurPT->getElementType()); llvm::FunctionType *ActualFT = CalleeF->getFunctionType(); if (CE->getOpcode() == llvm::Instruction::BitCast && ActualFT->getReturnType() == CurFT->getReturnType() && ActualFT->getNumParams() == CurFT->getNumParams() && ActualFT->getNumParams() == IRCallArgs.size() && (CurFT->isVarArg() || !ActualFT->isVarArg())) { bool ArgsMatch = true; for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i) if (ActualFT->getParamType(i) != CurFT->getParamType(i)) { ArgsMatch = false; break; } // Strip the cast if we can get away with it. This is a nice cleanup, // but also allows us to inline the function at -O0 if it is marked // always_inline. if (ArgsMatch) Callee = CalleeF; } } assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg()); for (unsigned i = 0; i < IRCallArgs.size(); ++i) { // Inalloca argument can have different type. if (IRFunctionArgs.hasInallocaArg() && i == IRFunctionArgs.getInallocaArgNo()) continue; if (i < IRFuncTy->getNumParams()) assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)); } unsigned CallingConv; CodeGen::AttributeListType AttributeList; CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv, true); llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(), AttributeList); llvm::BasicBlock *InvokeDest = nullptr; if (!Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex, llvm::Attribute::NoUnwind) || currentFunctionUsesSEHTry()) InvokeDest = getInvokeDest(); llvm::CallSite CS; if (!InvokeDest) { CS = Builder.CreateCall(Callee, IRCallArgs); } else { llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, IRCallArgs); EmitBlock(Cont); } if (callOrInvoke) *callOrInvoke = CS.getInstruction(); if (CurCodeDecl && CurCodeDecl->hasAttr() && !CS.hasFnAttr(llvm::Attribute::NoInline)) Attrs = Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex, llvm::Attribute::AlwaysInline); // Disable inlining inside SEH __try blocks. if (isSEHTryScope()) Attrs = Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex, llvm::Attribute::NoInline); CS.setAttributes(Attrs); CS.setCallingConv(static_cast(CallingConv)); // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC // optimizer it can aggressively ignore unwind edges. if (CGM.getLangOpts().ObjCAutoRefCount) AddObjCARCExceptionMetadata(CS.getInstruction()); // If the call doesn't return, finish the basic block and clear the // insertion point; this allows the rest of IRgen to discard // unreachable code. if (CS.doesNotReturn()) { if (UnusedReturnSize) EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize), SRetPtr); Builder.CreateUnreachable(); Builder.ClearInsertionPoint(); // FIXME: For now, emit a dummy basic block because expr emitters in // generally are not ready to handle emitting expressions at unreachable // points. EnsureInsertPoint(); // Return a reasonable RValue. return GetUndefRValue(RetTy); } llvm::Instruction *CI = CS.getInstruction(); if (Builder.isNamePreserving() && !CI->getType()->isVoidTy()) CI->setName("call"); // Emit any writebacks immediately. Arguably this should happen // after any return-value munging. if (CallArgs.hasWritebacks()) emitWritebacks(*this, CallArgs); // The stack cleanup for inalloca arguments has to run out of the normal // lexical order, so deactivate it and run it manually here. CallArgs.freeArgumentMemory(*this); RValue Ret = [&] { switch (RetAI.getKind()) { case ABIArgInfo::InAlloca: case ABIArgInfo::Indirect: { RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation()); if (UnusedReturnSize) EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize), SRetPtr); return ret; } case ABIArgInfo::Ignore: // If we are ignoring an argument that had a result, make sure to // construct the appropriate return value for our caller. return GetUndefRValue(RetTy); case ABIArgInfo::Extend: case ABIArgInfo::Direct: { llvm::Type *RetIRTy = ConvertType(RetTy); if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { switch (getEvaluationKind(RetTy)) { case TEK_Complex: { llvm::Value *Real = Builder.CreateExtractValue(CI, 0); llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); return RValue::getComplex(std::make_pair(Real, Imag)); } case TEK_Aggregate: { llvm::Value *DestPtr = ReturnValue.getValue(); bool DestIsVolatile = ReturnValue.isVolatile(); CharUnits DestAlign = getContext().getTypeAlignInChars(RetTy); if (!DestPtr) { DestPtr = CreateMemTemp(RetTy, "agg.tmp"); DestIsVolatile = false; } BuildAggStore(*this, CI, DestPtr, DestIsVolatile, DestAlign); return RValue::getAggregate(DestPtr); } case TEK_Scalar: { // If the argument doesn't match, perform a bitcast to coerce it. This // can happen due to trivial type mismatches. llvm::Value *V = CI; if (V->getType() != RetIRTy) V = Builder.CreateBitCast(V, RetIRTy); return RValue::get(V); } } llvm_unreachable("bad evaluation kind"); } llvm::Value *DestPtr = ReturnValue.getValue(); bool DestIsVolatile = ReturnValue.isVolatile(); CharUnits DestAlign = getContext().getTypeAlignInChars(RetTy); if (!DestPtr) { DestPtr = CreateMemTemp(RetTy, "coerce"); DestIsVolatile = false; } // If the value is offset in memory, apply the offset now. llvm::Value *StorePtr = DestPtr; CharUnits StoreAlign = DestAlign; if (unsigned Offs = RetAI.getDirectOffset()) { StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy()); StorePtr = Builder.CreateConstGEP1_32(Builder.getInt8Ty(), StorePtr, Offs); StorePtr = Builder.CreateBitCast(StorePtr, llvm::PointerType::getUnqual(RetAI.getCoerceToType())); StoreAlign = StoreAlign.alignmentAtOffset(CharUnits::fromQuantity(Offs)); } CreateCoercedStore(CI, StorePtr, DestIsVolatile, StoreAlign, *this); return convertTempToRValue(DestPtr, RetTy, SourceLocation()); } case ABIArgInfo::Expand: llvm_unreachable("Invalid ABI kind for return argument"); } llvm_unreachable("Unhandled ABIArgInfo::Kind"); } (); if (Ret.isScalar() && TargetDecl) { if (const auto *AA = TargetDecl->getAttr()) { llvm::Value *OffsetValue = nullptr; if (const auto *Offset = AA->getOffset()) OffsetValue = EmitScalarExpr(Offset); llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment()); llvm::ConstantInt *AlignmentCI = cast(Alignment); EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(), OffsetValue); } } return Ret; } /* VarArg handling */ llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) { return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this); } Index: vendor/clang/dist/lib/CodeGen/CodeGenModule.cpp =================================================================== --- vendor/clang/dist/lib/CodeGen/CodeGenModule.cpp (revision 292727) +++ vendor/clang/dist/lib/CodeGen/CodeGenModule.cpp (revision 292728) @@ -1,3687 +1,3692 @@ //===--- CodeGenModule.cpp - Emit LLVM Code from ASTs for a Module --------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This coordinates the per-module state used while generating code. // //===----------------------------------------------------------------------===// #include "CodeGenModule.h" #include "CGCUDARuntime.h" #include "CGCXXABI.h" #include "CGCall.h" #include "CGDebugInfo.h" #include "CGObjCRuntime.h" #include "CGOpenCLRuntime.h" #include "CGOpenMPRuntime.h" #include "CodeGenFunction.h" #include "CodeGenPGO.h" #include "CodeGenTBAA.h" #include "CoverageMappingGen.h" #include "TargetInfo.h" #include "clang/AST/ASTContext.h" #include "clang/AST/CharUnits.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/Mangle.h" #include "clang/AST/RecordLayout.h" #include "clang/AST/RecursiveASTVisitor.h" #include "clang/Basic/Builtins.h" #include "clang/Basic/CharInfo.h" #include "clang/Basic/Diagnostic.h" #include "clang/Basic/Module.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/TargetInfo.h" #include "clang/Basic/Version.h" #include "clang/Frontend/CodeGenOptions.h" #include "clang/Sema/SemaDiagnostic.h" #include "llvm/ADT/APSInt.h" #include "llvm/ADT/Triple.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/CallingConv.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/ProfileData/InstrProfReader.h" #include "llvm/Support/ConvertUTF.h" #include "llvm/Support/ErrorHandling.h" using namespace clang; using namespace CodeGen; static const char AnnotationSection[] = "llvm.metadata"; static CGCXXABI *createCXXABI(CodeGenModule &CGM) { switch (CGM.getTarget().getCXXABI().getKind()) { case TargetCXXABI::GenericAArch64: case TargetCXXABI::GenericARM: case TargetCXXABI::iOS: case TargetCXXABI::iOS64: case TargetCXXABI::GenericMIPS: case TargetCXXABI::GenericItanium: return CreateItaniumCXXABI(CGM); case TargetCXXABI::Microsoft: return CreateMicrosoftCXXABI(CGM); } llvm_unreachable("invalid C++ ABI kind"); } CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO, const PreprocessorOptions &PPO, const CodeGenOptions &CGO, llvm::Module &M, const llvm::DataLayout &TD, DiagnosticsEngine &diags, CoverageSourceInfo *CoverageInfo) : Context(C), LangOpts(C.getLangOpts()), HeaderSearchOpts(HSO), PreprocessorOpts(PPO), CodeGenOpts(CGO), TheModule(M), Diags(diags), TheDataLayout(TD), Target(C.getTargetInfo()), ABI(createCXXABI(*this)), VMContext(M.getContext()), TBAA(nullptr), TheTargetCodeGenInfo(nullptr), Types(*this), VTables(*this), ObjCRuntime(nullptr), OpenCLRuntime(nullptr), OpenMPRuntime(nullptr), CUDARuntime(nullptr), DebugInfo(nullptr), ARCData(nullptr), NoObjCARCExceptionsMetadata(nullptr), RRData(nullptr), PGOReader(nullptr), CFConstantStringClassRef(nullptr), ConstantStringClassRef(nullptr), NSConstantStringType(nullptr), NSConcreteGlobalBlock(nullptr), NSConcreteStackBlock(nullptr), BlockObjectAssign(nullptr), BlockObjectDispose(nullptr), BlockDescriptorType(nullptr), GenericBlockLiteralType(nullptr), LifetimeStartFn(nullptr), LifetimeEndFn(nullptr), SanitizerMD(new SanitizerMetadata(*this)) { // Initialize the type cache. llvm::LLVMContext &LLVMContext = M.getContext(); VoidTy = llvm::Type::getVoidTy(LLVMContext); Int8Ty = llvm::Type::getInt8Ty(LLVMContext); Int16Ty = llvm::Type::getInt16Ty(LLVMContext); Int32Ty = llvm::Type::getInt32Ty(LLVMContext); Int64Ty = llvm::Type::getInt64Ty(LLVMContext); FloatTy = llvm::Type::getFloatTy(LLVMContext); DoubleTy = llvm::Type::getDoubleTy(LLVMContext); PointerWidthInBits = C.getTargetInfo().getPointerWidth(0); PointerAlignInBytes = C.toCharUnitsFromBits(C.getTargetInfo().getPointerAlign(0)).getQuantity(); IntTy = llvm::IntegerType::get(LLVMContext, C.getTargetInfo().getIntWidth()); IntPtrTy = llvm::IntegerType::get(LLVMContext, PointerWidthInBits); Int8PtrTy = Int8Ty->getPointerTo(0); Int8PtrPtrTy = Int8PtrTy->getPointerTo(0); RuntimeCC = getTargetCodeGenInfo().getABIInfo().getRuntimeCC(); BuiltinCC = getTargetCodeGenInfo().getABIInfo().getBuiltinCC(); if (LangOpts.ObjC1) createObjCRuntime(); if (LangOpts.OpenCL) createOpenCLRuntime(); if (LangOpts.OpenMP) createOpenMPRuntime(); if (LangOpts.CUDA) createCUDARuntime(); // Enable TBAA unless it's suppressed. ThreadSanitizer needs TBAA even at O0. if (LangOpts.Sanitize.has(SanitizerKind::Thread) || (!CodeGenOpts.RelaxedAliasing && CodeGenOpts.OptimizationLevel > 0)) TBAA = new CodeGenTBAA(Context, VMContext, CodeGenOpts, getLangOpts(), getCXXABI().getMangleContext()); // If debug info or coverage generation is enabled, create the CGDebugInfo // object. if (CodeGenOpts.getDebugInfo() != CodeGenOptions::NoDebugInfo || CodeGenOpts.EmitGcovArcs || CodeGenOpts.EmitGcovNotes) DebugInfo = new CGDebugInfo(*this); Block.GlobalUniqueCount = 0; if (C.getLangOpts().ObjCAutoRefCount) ARCData = new ARCEntrypoints(); RRData = new RREntrypoints(); if (!CodeGenOpts.InstrProfileInput.empty()) { auto ReaderOrErr = llvm::IndexedInstrProfReader::create(CodeGenOpts.InstrProfileInput); if (std::error_code EC = ReaderOrErr.getError()) { unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, "Could not read profile %0: %1"); getDiags().Report(DiagID) << CodeGenOpts.InstrProfileInput << EC.message(); } else PGOReader = std::move(ReaderOrErr.get()); } // If coverage mapping generation is enabled, create the // CoverageMappingModuleGen object. if (CodeGenOpts.CoverageMapping) CoverageMapping.reset(new CoverageMappingModuleGen(*this, *CoverageInfo)); } CodeGenModule::~CodeGenModule() { delete ObjCRuntime; delete OpenCLRuntime; delete OpenMPRuntime; delete CUDARuntime; delete TheTargetCodeGenInfo; delete TBAA; delete DebugInfo; delete ARCData; delete RRData; } void CodeGenModule::createObjCRuntime() { // This is just isGNUFamily(), but we want to force implementors of // new ABIs to decide how best to do this. switch (LangOpts.ObjCRuntime.getKind()) { case ObjCRuntime::GNUstep: case ObjCRuntime::GCC: case ObjCRuntime::ObjFW: ObjCRuntime = CreateGNUObjCRuntime(*this); return; case ObjCRuntime::FragileMacOSX: case ObjCRuntime::MacOSX: case ObjCRuntime::iOS: ObjCRuntime = CreateMacObjCRuntime(*this); return; } llvm_unreachable("bad runtime kind"); } void CodeGenModule::createOpenCLRuntime() { OpenCLRuntime = new CGOpenCLRuntime(*this); } void CodeGenModule::createOpenMPRuntime() { OpenMPRuntime = new CGOpenMPRuntime(*this); } void CodeGenModule::createCUDARuntime() { CUDARuntime = CreateNVCUDARuntime(*this); } void CodeGenModule::addReplacement(StringRef Name, llvm::Constant *C) { Replacements[Name] = C; } void CodeGenModule::applyReplacements() { for (auto &I : Replacements) { StringRef MangledName = I.first(); llvm::Constant *Replacement = I.second; llvm::GlobalValue *Entry = GetGlobalValue(MangledName); if (!Entry) continue; auto *OldF = cast(Entry); auto *NewF = dyn_cast(Replacement); if (!NewF) { if (auto *Alias = dyn_cast(Replacement)) { NewF = dyn_cast(Alias->getAliasee()); } else { auto *CE = cast(Replacement); assert(CE->getOpcode() == llvm::Instruction::BitCast || CE->getOpcode() == llvm::Instruction::GetElementPtr); NewF = dyn_cast(CE->getOperand(0)); } } // Replace old with new, but keep the old order. OldF->replaceAllUsesWith(Replacement); if (NewF) { NewF->removeFromParent(); OldF->getParent()->getFunctionList().insertAfter(OldF, NewF); } OldF->eraseFromParent(); } } // This is only used in aliases that we created and we know they have a // linear structure. static const llvm::GlobalObject *getAliasedGlobal(const llvm::GlobalAlias &GA) { llvm::SmallPtrSet Visited; const llvm::Constant *C = &GA; for (;;) { C = C->stripPointerCasts(); if (auto *GO = dyn_cast(C)) return GO; // stripPointerCasts will not walk over weak aliases. auto *GA2 = dyn_cast(C); if (!GA2) return nullptr; if (!Visited.insert(GA2).second) return nullptr; C = GA2->getAliasee(); } } void CodeGenModule::checkAliases() { // Check if the constructed aliases are well formed. It is really unfortunate // that we have to do this in CodeGen, but we only construct mangled names // and aliases during codegen. bool Error = false; DiagnosticsEngine &Diags = getDiags(); for (const GlobalDecl &GD : Aliases) { const auto *D = cast(GD.getDecl()); const AliasAttr *AA = D->getAttr(); StringRef MangledName = getMangledName(GD); llvm::GlobalValue *Entry = GetGlobalValue(MangledName); auto *Alias = cast(Entry); const llvm::GlobalValue *GV = getAliasedGlobal(*Alias); if (!GV) { Error = true; Diags.Report(AA->getLocation(), diag::err_cyclic_alias); } else if (GV->isDeclaration()) { Error = true; Diags.Report(AA->getLocation(), diag::err_alias_to_undefined); } llvm::Constant *Aliasee = Alias->getAliasee(); llvm::GlobalValue *AliaseeGV; if (auto CE = dyn_cast(Aliasee)) AliaseeGV = cast(CE->getOperand(0)); else AliaseeGV = cast(Aliasee); if (const SectionAttr *SA = D->getAttr()) { StringRef AliasSection = SA->getName(); if (AliasSection != AliaseeGV->getSection()) Diags.Report(SA->getLocation(), diag::warn_alias_with_section) << AliasSection; } // We have to handle alias to weak aliases in here. LLVM itself disallows // this since the object semantics would not match the IL one. For // compatibility with gcc we implement it by just pointing the alias // to its aliasee's aliasee. We also warn, since the user is probably // expecting the link to be weak. if (auto GA = dyn_cast(AliaseeGV)) { if (GA->mayBeOverridden()) { Diags.Report(AA->getLocation(), diag::warn_alias_to_weak_alias) << GV->getName() << GA->getName(); Aliasee = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast( GA->getAliasee(), Alias->getType()); Alias->setAliasee(Aliasee); } } } if (!Error) return; for (const GlobalDecl &GD : Aliases) { StringRef MangledName = getMangledName(GD); llvm::GlobalValue *Entry = GetGlobalValue(MangledName); auto *Alias = cast(Entry); Alias->replaceAllUsesWith(llvm::UndefValue::get(Alias->getType())); Alias->eraseFromParent(); } } void CodeGenModule::clear() { DeferredDeclsToEmit.clear(); if (OpenMPRuntime) OpenMPRuntime->clear(); } void InstrProfStats::reportDiagnostics(DiagnosticsEngine &Diags, StringRef MainFile) { if (!hasDiagnostics()) return; if (VisitedInMainFile > 0 && VisitedInMainFile == MissingInMainFile) { if (MainFile.empty()) MainFile = ""; Diags.Report(diag::warn_profile_data_unprofiled) << MainFile; } else Diags.Report(diag::warn_profile_data_out_of_date) << Visited << Missing << Mismatched; } void CodeGenModule::Release() { EmitDeferred(); applyReplacements(); checkAliases(); EmitCXXGlobalInitFunc(); EmitCXXGlobalDtorFunc(); EmitCXXThreadLocalInitFunc(); if (ObjCRuntime) if (llvm::Function *ObjCInitFunction = ObjCRuntime->ModuleInitFunction()) AddGlobalCtor(ObjCInitFunction); if (Context.getLangOpts().CUDA && !Context.getLangOpts().CUDAIsDevice && CUDARuntime) { if (llvm::Function *CudaCtorFunction = CUDARuntime->makeModuleCtorFunction()) AddGlobalCtor(CudaCtorFunction); if (llvm::Function *CudaDtorFunction = CUDARuntime->makeModuleDtorFunction()) AddGlobalDtor(CudaDtorFunction); } if (PGOReader && PGOStats.hasDiagnostics()) PGOStats.reportDiagnostics(getDiags(), getCodeGenOpts().MainFileName); EmitCtorList(GlobalCtors, "llvm.global_ctors"); EmitCtorList(GlobalDtors, "llvm.global_dtors"); EmitGlobalAnnotations(); EmitStaticExternCAliases(); EmitDeferredUnusedCoverageMappings(); if (CoverageMapping) CoverageMapping->emit(); emitLLVMUsed(); if (CodeGenOpts.Autolink && (Context.getLangOpts().Modules || !LinkerOptionsMetadata.empty())) { EmitModuleLinkOptions(); } if (CodeGenOpts.DwarfVersion) // We actually want the latest version when there are conflicts. // We can change from Warning to Latest if such mode is supported. getModule().addModuleFlag(llvm::Module::Warning, "Dwarf Version", CodeGenOpts.DwarfVersion); if (DebugInfo) // We support a single version in the linked module. The LLVM // parser will drop debug info with a different version number // (and warn about it, too). getModule().addModuleFlag(llvm::Module::Warning, "Debug Info Version", llvm::DEBUG_METADATA_VERSION); // We need to record the widths of enums and wchar_t, so that we can generate // the correct build attributes in the ARM backend. llvm::Triple::ArchType Arch = Context.getTargetInfo().getTriple().getArch(); if ( Arch == llvm::Triple::arm || Arch == llvm::Triple::armeb || Arch == llvm::Triple::thumb || Arch == llvm::Triple::thumbeb) { // Width of wchar_t in bytes uint64_t WCharWidth = Context.getTypeSizeInChars(Context.getWideCharType()).getQuantity(); getModule().addModuleFlag(llvm::Module::Error, "wchar_size", WCharWidth); // The minimum width of an enum in bytes uint64_t EnumWidth = Context.getLangOpts().ShortEnums ? 1 : 4; getModule().addModuleFlag(llvm::Module::Error, "min_enum_size", EnumWidth); } if (uint32_t PLevel = Context.getLangOpts().PICLevel) { llvm::PICLevel::Level PL = llvm::PICLevel::Default; switch (PLevel) { case 0: break; case 1: PL = llvm::PICLevel::Small; break; case 2: PL = llvm::PICLevel::Large; break; default: llvm_unreachable("Invalid PIC Level"); } getModule().setPICLevel(PL); } SimplifyPersonality(); if (getCodeGenOpts().EmitDeclMetadata) EmitDeclMetadata(); if (getCodeGenOpts().EmitGcovArcs || getCodeGenOpts().EmitGcovNotes) EmitCoverageFile(); if (DebugInfo) DebugInfo->finalize(); EmitVersionIdentMetadata(); EmitTargetMetadata(); } void CodeGenModule::UpdateCompletedType(const TagDecl *TD) { // Make sure that this type is translated. Types.UpdateCompletedType(TD); } llvm::MDNode *CodeGenModule::getTBAAInfo(QualType QTy) { if (!TBAA) return nullptr; return TBAA->getTBAAInfo(QTy); } llvm::MDNode *CodeGenModule::getTBAAInfoForVTablePtr() { if (!TBAA) return nullptr; return TBAA->getTBAAInfoForVTablePtr(); } llvm::MDNode *CodeGenModule::getTBAAStructInfo(QualType QTy) { if (!TBAA) return nullptr; return TBAA->getTBAAStructInfo(QTy); } llvm::MDNode *CodeGenModule::getTBAAStructTypeInfo(QualType QTy) { if (!TBAA) return nullptr; return TBAA->getTBAAStructTypeInfo(QTy); } llvm::MDNode *CodeGenModule::getTBAAStructTagInfo(QualType BaseTy, llvm::MDNode *AccessN, uint64_t O) { if (!TBAA) return nullptr; return TBAA->getTBAAStructTagInfo(BaseTy, AccessN, O); } /// Decorate the instruction with a TBAA tag. For both scalar TBAA /// and struct-path aware TBAA, the tag has the same format: /// base type, access type and offset. /// When ConvertTypeToTag is true, we create a tag based on the scalar type. void CodeGenModule::DecorateInstruction(llvm::Instruction *Inst, llvm::MDNode *TBAAInfo, bool ConvertTypeToTag) { if (ConvertTypeToTag && TBAA) Inst->setMetadata(llvm::LLVMContext::MD_tbaa, TBAA->getTBAAScalarTagInfo(TBAAInfo)); else Inst->setMetadata(llvm::LLVMContext::MD_tbaa, TBAAInfo); } void CodeGenModule::Error(SourceLocation loc, StringRef message) { unsigned diagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error, "%0"); getDiags().Report(Context.getFullLoc(loc), diagID) << message; } /// ErrorUnsupported - Print out an error that codegen doesn't support the /// specified stmt yet. void CodeGenModule::ErrorUnsupported(const Stmt *S, const char *Type) { unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error, "cannot compile this %0 yet"); std::string Msg = Type; getDiags().Report(Context.getFullLoc(S->getLocStart()), DiagID) << Msg << S->getSourceRange(); } /// ErrorUnsupported - Print out an error that codegen doesn't support the /// specified decl yet. void CodeGenModule::ErrorUnsupported(const Decl *D, const char *Type) { unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error, "cannot compile this %0 yet"); std::string Msg = Type; getDiags().Report(Context.getFullLoc(D->getLocation()), DiagID) << Msg; } llvm::ConstantInt *CodeGenModule::getSize(CharUnits size) { return llvm::ConstantInt::get(SizeTy, size.getQuantity()); } void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV, const NamedDecl *D) const { // Internal definitions always have default visibility. if (GV->hasLocalLinkage()) { GV->setVisibility(llvm::GlobalValue::DefaultVisibility); return; } // Set visibility for definitions. LinkageInfo LV = D->getLinkageAndVisibility(); if (LV.isVisibilityExplicit() || !GV->hasAvailableExternallyLinkage()) GV->setVisibility(GetLLVMVisibility(LV.getVisibility())); } static llvm::GlobalVariable::ThreadLocalMode GetLLVMTLSModel(StringRef S) { return llvm::StringSwitch(S) .Case("global-dynamic", llvm::GlobalVariable::GeneralDynamicTLSModel) .Case("local-dynamic", llvm::GlobalVariable::LocalDynamicTLSModel) .Case("initial-exec", llvm::GlobalVariable::InitialExecTLSModel) .Case("local-exec", llvm::GlobalVariable::LocalExecTLSModel); } static llvm::GlobalVariable::ThreadLocalMode GetLLVMTLSModel( CodeGenOptions::TLSModel M) { switch (M) { case CodeGenOptions::GeneralDynamicTLSModel: return llvm::GlobalVariable::GeneralDynamicTLSModel; case CodeGenOptions::LocalDynamicTLSModel: return llvm::GlobalVariable::LocalDynamicTLSModel; case CodeGenOptions::InitialExecTLSModel: return llvm::GlobalVariable::InitialExecTLSModel; case CodeGenOptions::LocalExecTLSModel: return llvm::GlobalVariable::LocalExecTLSModel; } llvm_unreachable("Invalid TLS model!"); } void CodeGenModule::setTLSMode(llvm::GlobalValue *GV, const VarDecl &D) const { assert(D.getTLSKind() && "setting TLS mode on non-TLS var!"); llvm::GlobalValue::ThreadLocalMode TLM; TLM = GetLLVMTLSModel(CodeGenOpts.getDefaultTLSModel()); // Override the TLS model if it is explicitly specified. if (const TLSModelAttr *Attr = D.getAttr()) { TLM = GetLLVMTLSModel(Attr->getModel()); } GV->setThreadLocalMode(TLM); } StringRef CodeGenModule::getMangledName(GlobalDecl GD) { StringRef &FoundStr = MangledDeclNames[GD.getCanonicalDecl()]; if (!FoundStr.empty()) return FoundStr; const auto *ND = cast(GD.getDecl()); SmallString<256> Buffer; StringRef Str; if (getCXXABI().getMangleContext().shouldMangleDeclName(ND)) { llvm::raw_svector_ostream Out(Buffer); if (const auto *D = dyn_cast(ND)) getCXXABI().getMangleContext().mangleCXXCtor(D, GD.getCtorType(), Out); else if (const auto *D = dyn_cast(ND)) getCXXABI().getMangleContext().mangleCXXDtor(D, GD.getDtorType(), Out); else getCXXABI().getMangleContext().mangleName(ND, Out); Str = Out.str(); } else { IdentifierInfo *II = ND->getIdentifier(); assert(II && "Attempt to mangle unnamed decl."); Str = II->getName(); } // Keep the first result in the case of a mangling collision. auto Result = Manglings.insert(std::make_pair(Str, GD)); return FoundStr = Result.first->first(); } StringRef CodeGenModule::getBlockMangledName(GlobalDecl GD, const BlockDecl *BD) { MangleContext &MangleCtx = getCXXABI().getMangleContext(); const Decl *D = GD.getDecl(); SmallString<256> Buffer; llvm::raw_svector_ostream Out(Buffer); if (!D) MangleCtx.mangleGlobalBlock(BD, dyn_cast_or_null(initializedGlobalDecl.getDecl()), Out); else if (const auto *CD = dyn_cast(D)) MangleCtx.mangleCtorBlock(CD, GD.getCtorType(), BD, Out); else if (const auto *DD = dyn_cast(D)) MangleCtx.mangleDtorBlock(DD, GD.getDtorType(), BD, Out); else MangleCtx.mangleBlock(cast(D), BD, Out); auto Result = Manglings.insert(std::make_pair(Out.str(), BD)); return Result.first->first(); } llvm::GlobalValue *CodeGenModule::GetGlobalValue(StringRef Name) { return getModule().getNamedValue(Name); } /// AddGlobalCtor - Add a function to the list that will be called before /// main() runs. void CodeGenModule::AddGlobalCtor(llvm::Function *Ctor, int Priority, llvm::Constant *AssociatedData) { // FIXME: Type coercion of void()* types. GlobalCtors.push_back(Structor(Priority, Ctor, AssociatedData)); } /// AddGlobalDtor - Add a function to the list that will be called /// when the module is unloaded. void CodeGenModule::AddGlobalDtor(llvm::Function *Dtor, int Priority) { // FIXME: Type coercion of void()* types. GlobalDtors.push_back(Structor(Priority, Dtor, nullptr)); } void CodeGenModule::EmitCtorList(const CtorList &Fns, const char *GlobalName) { // Ctor function type is void()*. llvm::FunctionType* CtorFTy = llvm::FunctionType::get(VoidTy, false); llvm::Type *CtorPFTy = llvm::PointerType::getUnqual(CtorFTy); // Get the type of a ctor entry, { i32, void ()*, i8* }. llvm::StructType *CtorStructTy = llvm::StructType::get( Int32Ty, llvm::PointerType::getUnqual(CtorFTy), VoidPtrTy, nullptr); // Construct the constructor and destructor arrays. SmallVector Ctors; for (const auto &I : Fns) { llvm::Constant *S[] = { llvm::ConstantInt::get(Int32Ty, I.Priority, false), llvm::ConstantExpr::getBitCast(I.Initializer, CtorPFTy), (I.AssociatedData ? llvm::ConstantExpr::getBitCast(I.AssociatedData, VoidPtrTy) : llvm::Constant::getNullValue(VoidPtrTy))}; Ctors.push_back(llvm::ConstantStruct::get(CtorStructTy, S)); } if (!Ctors.empty()) { llvm::ArrayType *AT = llvm::ArrayType::get(CtorStructTy, Ctors.size()); new llvm::GlobalVariable(TheModule, AT, false, llvm::GlobalValue::AppendingLinkage, llvm::ConstantArray::get(AT, Ctors), GlobalName); } } llvm::GlobalValue::LinkageTypes CodeGenModule::getFunctionLinkage(GlobalDecl GD) { const auto *D = cast(GD.getDecl()); GVALinkage Linkage = getContext().GetGVALinkageForFunction(D); if (isa(D) && getCXXABI().useThunkForDtorVariant(cast(D), GD.getDtorType())) { // Destructor variants in the Microsoft C++ ABI are always internal or // linkonce_odr thunks emitted on an as-needed basis. return Linkage == GVA_Internal ? llvm::GlobalValue::InternalLinkage : llvm::GlobalValue::LinkOnceODRLinkage; } return getLLVMLinkageForDeclarator(D, Linkage, /*isConstantVariable=*/false); } void CodeGenModule::setFunctionDLLStorageClass(GlobalDecl GD, llvm::Function *F) { const auto *FD = cast(GD.getDecl()); if (const auto *Dtor = dyn_cast_or_null(FD)) { if (getCXXABI().useThunkForDtorVariant(Dtor, GD.getDtorType())) { // Don't dllexport/import destructor thunks. F->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass); return; } } if (FD->hasAttr()) F->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass); else if (FD->hasAttr()) F->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass); else F->setDLLStorageClass(llvm::GlobalVariable::DefaultStorageClass); } void CodeGenModule::setFunctionDefinitionAttributes(const FunctionDecl *D, llvm::Function *F) { setNonAliasAttributes(D, F); } void CodeGenModule::SetLLVMFunctionAttributes(const Decl *D, const CGFunctionInfo &Info, llvm::Function *F) { unsigned CallingConv; AttributeListType AttributeList; ConstructAttributeList(Info, D, AttributeList, CallingConv, false); F->setAttributes(llvm::AttributeSet::get(getLLVMContext(), AttributeList)); F->setCallingConv(static_cast(CallingConv)); } /// Determines whether the language options require us to model /// unwind exceptions. We treat -fexceptions as mandating this /// except under the fragile ObjC ABI with only ObjC exceptions /// enabled. This means, for example, that C with -fexceptions /// enables this. static bool hasUnwindExceptions(const LangOptions &LangOpts) { // If exceptions are completely disabled, obviously this is false. if (!LangOpts.Exceptions) return false; // If C++ exceptions are enabled, this is true. if (LangOpts.CXXExceptions) return true; // If ObjC exceptions are enabled, this depends on the ABI. if (LangOpts.ObjCExceptions) { return LangOpts.ObjCRuntime.hasUnwindExceptions(); } return true; } void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D, llvm::Function *F) { llvm::AttrBuilder B; if (CodeGenOpts.UnwindTables) B.addAttribute(llvm::Attribute::UWTable); if (!hasUnwindExceptions(LangOpts)) B.addAttribute(llvm::Attribute::NoUnwind); if (D->hasAttr()) { // Naked implies noinline: we should not be inlining such functions. B.addAttribute(llvm::Attribute::Naked); B.addAttribute(llvm::Attribute::NoInline); } else if (D->hasAttr()) { B.addAttribute(llvm::Attribute::NoDuplicate); } else if (D->hasAttr()) { B.addAttribute(llvm::Attribute::NoInline); } else if (D->hasAttr() && !F->getAttributes().hasAttribute(llvm::AttributeSet::FunctionIndex, llvm::Attribute::NoInline)) { // (noinline wins over always_inline, and we can't specify both in IR) B.addAttribute(llvm::Attribute::AlwaysInline); } if (D->hasAttr()) { if (!D->hasAttr()) B.addAttribute(llvm::Attribute::OptimizeForSize); B.addAttribute(llvm::Attribute::Cold); } if (D->hasAttr()) B.addAttribute(llvm::Attribute::MinSize); if (LangOpts.getStackProtector() == LangOptions::SSPOn) B.addAttribute(llvm::Attribute::StackProtect); else if (LangOpts.getStackProtector() == LangOptions::SSPStrong) B.addAttribute(llvm::Attribute::StackProtectStrong); else if (LangOpts.getStackProtector() == LangOptions::SSPReq) B.addAttribute(llvm::Attribute::StackProtectReq); F->addAttributes(llvm::AttributeSet::FunctionIndex, llvm::AttributeSet::get( F->getContext(), llvm::AttributeSet::FunctionIndex, B)); if (D->hasAttr()) { // OptimizeNone implies noinline; we should not be inlining such functions. F->addFnAttr(llvm::Attribute::OptimizeNone); F->addFnAttr(llvm::Attribute::NoInline); // OptimizeNone wins over OptimizeForSize, MinSize, AlwaysInline. assert(!F->hasFnAttribute(llvm::Attribute::OptimizeForSize) && "OptimizeNone and OptimizeForSize on same function!"); assert(!F->hasFnAttribute(llvm::Attribute::MinSize) && "OptimizeNone and MinSize on same function!"); assert(!F->hasFnAttribute(llvm::Attribute::AlwaysInline) && "OptimizeNone and AlwaysInline on same function!"); // Attribute 'inlinehint' has no effect on 'optnone' functions. // Explicitly remove it from the set of function attributes. F->removeFnAttr(llvm::Attribute::InlineHint); } if (isa(D) || isa(D)) F->setUnnamedAddr(true); else if (const auto *MD = dyn_cast(D)) if (MD->isVirtual()) F->setUnnamedAddr(true); unsigned alignment = D->getMaxAlignment() / Context.getCharWidth(); if (alignment) F->setAlignment(alignment); // C++ ABI requires 2-byte alignment for member functions. if (F->getAlignment() < 2 && isa(D)) F->setAlignment(2); } void CodeGenModule::SetCommonAttributes(const Decl *D, llvm::GlobalValue *GV) { if (const auto *ND = dyn_cast(D)) setGlobalVisibility(GV, ND); else GV->setVisibility(llvm::GlobalValue::DefaultVisibility); if (D->hasAttr()) addUsedGlobal(GV); } void CodeGenModule::setAliasAttributes(const Decl *D, llvm::GlobalValue *GV) { SetCommonAttributes(D, GV); // Process the dllexport attribute based on whether the original definition // (not necessarily the aliasee) was exported. if (D->hasAttr()) GV->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass); } void CodeGenModule::setNonAliasAttributes(const Decl *D, llvm::GlobalObject *GO) { SetCommonAttributes(D, GO); if (const SectionAttr *SA = D->getAttr()) GO->setSection(SA->getName()); getTargetCodeGenInfo().setTargetAttributes(D, GO, *this); } void CodeGenModule::SetInternalFunctionAttributes(const Decl *D, llvm::Function *F, const CGFunctionInfo &FI) { SetLLVMFunctionAttributes(D, FI, F); SetLLVMFunctionAttributesForDefinition(D, F); F->setLinkage(llvm::Function::InternalLinkage); setNonAliasAttributes(D, F); } static void setLinkageAndVisibilityForGV(llvm::GlobalValue *GV, const NamedDecl *ND) { // Set linkage and visibility in case we never see a definition. LinkageInfo LV = ND->getLinkageAndVisibility(); if (LV.getLinkage() != ExternalLinkage) { // Don't set internal linkage on declarations. } else { if (ND->hasAttr()) { GV->setLinkage(llvm::GlobalValue::ExternalLinkage); GV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass); } else if (ND->hasAttr()) { GV->setLinkage(llvm::GlobalValue::ExternalLinkage); GV->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass); } else if (ND->hasAttr() || ND->isWeakImported()) { // "extern_weak" is overloaded in LLVM; we probably should have // separate linkage types for this. GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage); } // Set visibility on a declaration only if it's explicit. if (LV.isVisibilityExplicit()) GV->setVisibility(CodeGenModule::GetLLVMVisibility(LV.getVisibility())); } } void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F, bool IsIncompleteFunction, bool IsThunk) { if (llvm::Intrinsic::ID IID = F->getIntrinsicID()) { // If this is an intrinsic function, set the function's attributes // to the intrinsic's attributes. F->setAttributes(llvm::Intrinsic::getAttributes(getLLVMContext(), IID)); return; } const auto *FD = cast(GD.getDecl()); if (!IsIncompleteFunction) SetLLVMFunctionAttributes(FD, getTypes().arrangeGlobalDeclaration(GD), F); // Add the Returned attribute for "this", except for iOS 5 and earlier // where substantial code, including the libstdc++ dylib, was compiled with // GCC and does not actually return "this". if (!IsThunk && getCXXABI().HasThisReturn(GD) && !(getTarget().getTriple().isiOS() && getTarget().getTriple().isOSVersionLT(6))) { assert(!F->arg_empty() && F->arg_begin()->getType() ->canLosslesslyBitCastTo(F->getReturnType()) && "unexpected this return"); F->addAttribute(1, llvm::Attribute::Returned); } // Only a few attributes are set on declarations; these may later be // overridden by a definition. setLinkageAndVisibilityForGV(F, FD); if (const SectionAttr *SA = FD->getAttr()) F->setSection(SA->getName()); // A replaceable global allocation function does not act like a builtin by // default, only if it is invoked by a new-expression or delete-expression. if (FD->isReplaceableGlobalAllocationFunction()) F->addAttribute(llvm::AttributeSet::FunctionIndex, llvm::Attribute::NoBuiltin); } void CodeGenModule::addUsedGlobal(llvm::GlobalValue *GV) { assert(!GV->isDeclaration() && "Only globals with definition can force usage."); LLVMUsed.emplace_back(GV); } void CodeGenModule::addCompilerUsedGlobal(llvm::GlobalValue *GV) { assert(!GV->isDeclaration() && "Only globals with definition can force usage."); LLVMCompilerUsed.emplace_back(GV); } static void emitUsed(CodeGenModule &CGM, StringRef Name, std::vector &List) { // Don't create llvm.used if there is no need. if (List.empty()) return; // Convert List to what ConstantArray needs. SmallVector UsedArray; UsedArray.resize(List.size()); for (unsigned i = 0, e = List.size(); i != e; ++i) { UsedArray[i] = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast( cast(&*List[i]), CGM.Int8PtrTy); } if (UsedArray.empty()) return; llvm::ArrayType *ATy = llvm::ArrayType::get(CGM.Int8PtrTy, UsedArray.size()); auto *GV = new llvm::GlobalVariable( CGM.getModule(), ATy, false, llvm::GlobalValue::AppendingLinkage, llvm::ConstantArray::get(ATy, UsedArray), Name); GV->setSection("llvm.metadata"); } void CodeGenModule::emitLLVMUsed() { emitUsed(*this, "llvm.used", LLVMUsed); emitUsed(*this, "llvm.compiler.used", LLVMCompilerUsed); } void CodeGenModule::AppendLinkerOptions(StringRef Opts) { auto *MDOpts = llvm::MDString::get(getLLVMContext(), Opts); LinkerOptionsMetadata.push_back(llvm::MDNode::get(getLLVMContext(), MDOpts)); } void CodeGenModule::AddDetectMismatch(StringRef Name, StringRef Value) { llvm::SmallString<32> Opt; getTargetCodeGenInfo().getDetectMismatchOption(Name, Value, Opt); auto *MDOpts = llvm::MDString::get(getLLVMContext(), Opt); LinkerOptionsMetadata.push_back(llvm::MDNode::get(getLLVMContext(), MDOpts)); } void CodeGenModule::AddDependentLib(StringRef Lib) { llvm::SmallString<24> Opt; getTargetCodeGenInfo().getDependentLibraryOption(Lib, Opt); auto *MDOpts = llvm::MDString::get(getLLVMContext(), Opt); LinkerOptionsMetadata.push_back(llvm::MDNode::get(getLLVMContext(), MDOpts)); } /// \brief Add link options implied by the given module, including modules /// it depends on, using a postorder walk. static void addLinkOptionsPostorder(CodeGenModule &CGM, Module *Mod, SmallVectorImpl &Metadata, llvm::SmallPtrSet &Visited) { // Import this module's parent. if (Mod->Parent && Visited.insert(Mod->Parent).second) { addLinkOptionsPostorder(CGM, Mod->Parent, Metadata, Visited); } // Import this module's dependencies. for (unsigned I = Mod->Imports.size(); I > 0; --I) { if (Visited.insert(Mod->Imports[I - 1]).second) addLinkOptionsPostorder(CGM, Mod->Imports[I-1], Metadata, Visited); } // Add linker options to link against the libraries/frameworks // described by this module. llvm::LLVMContext &Context = CGM.getLLVMContext(); for (unsigned I = Mod->LinkLibraries.size(); I > 0; --I) { // Link against a framework. Frameworks are currently Darwin only, so we // don't to ask TargetCodeGenInfo for the spelling of the linker option. if (Mod->LinkLibraries[I-1].IsFramework) { llvm::Metadata *Args[2] = { llvm::MDString::get(Context, "-framework"), llvm::MDString::get(Context, Mod->LinkLibraries[I - 1].Library)}; Metadata.push_back(llvm::MDNode::get(Context, Args)); continue; } // Link against a library. llvm::SmallString<24> Opt; CGM.getTargetCodeGenInfo().getDependentLibraryOption( Mod->LinkLibraries[I-1].Library, Opt); auto *OptString = llvm::MDString::get(Context, Opt); Metadata.push_back(llvm::MDNode::get(Context, OptString)); } } void CodeGenModule::EmitModuleLinkOptions() { // Collect the set of all of the modules we want to visit to emit link // options, which is essentially the imported modules and all of their // non-explicit child modules. llvm::SetVector LinkModules; llvm::SmallPtrSet Visited; SmallVector Stack; // Seed the stack with imported modules. for (Module *M : ImportedModules) if (Visited.insert(M).second) Stack.push_back(M); // Find all of the modules to import, making a little effort to prune // non-leaf modules. while (!Stack.empty()) { clang::Module *Mod = Stack.pop_back_val(); bool AnyChildren = false; // Visit the submodules of this module. for (clang::Module::submodule_iterator Sub = Mod->submodule_begin(), SubEnd = Mod->submodule_end(); Sub != SubEnd; ++Sub) { // Skip explicit children; they need to be explicitly imported to be // linked against. if ((*Sub)->IsExplicit) continue; if (Visited.insert(*Sub).second) { Stack.push_back(*Sub); AnyChildren = true; } } // We didn't find any children, so add this module to the list of // modules to link against. if (!AnyChildren) { LinkModules.insert(Mod); } } // Add link options for all of the imported modules in reverse topological // order. We don't do anything to try to order import link flags with respect // to linker options inserted by things like #pragma comment(). SmallVector MetadataArgs; Visited.clear(); for (Module *M : LinkModules) if (Visited.insert(M).second) addLinkOptionsPostorder(*this, M, MetadataArgs, Visited); std::reverse(MetadataArgs.begin(), MetadataArgs.end()); LinkerOptionsMetadata.append(MetadataArgs.begin(), MetadataArgs.end()); // Add the linker options metadata flag. getModule().addModuleFlag(llvm::Module::AppendUnique, "Linker Options", llvm::MDNode::get(getLLVMContext(), LinkerOptionsMetadata)); } void CodeGenModule::EmitDeferred() { // Emit code for any potentially referenced deferred decls. Since a // previously unused static decl may become used during the generation of code // for a static function, iterate until no changes are made. if (!DeferredVTables.empty()) { EmitDeferredVTables(); // Emitting a v-table doesn't directly cause more v-tables to // become deferred, although it can cause functions to be // emitted that then need those v-tables. assert(DeferredVTables.empty()); } // Stop if we're out of both deferred v-tables and deferred declarations. if (DeferredDeclsToEmit.empty()) return; // Grab the list of decls to emit. If EmitGlobalDefinition schedules more // work, it will not interfere with this. std::vector CurDeclsToEmit; CurDeclsToEmit.swap(DeferredDeclsToEmit); for (DeferredGlobal &G : CurDeclsToEmit) { GlobalDecl D = G.GD; llvm::GlobalValue *GV = G.GV; G.GV = nullptr; assert(!GV || GV == GetGlobalValue(getMangledName(D))); if (!GV) GV = GetGlobalValue(getMangledName(D)); // Check to see if we've already emitted this. This is necessary // for a couple of reasons: first, decls can end up in the // deferred-decls queue multiple times, and second, decls can end // up with definitions in unusual ways (e.g. by an extern inline // function acquiring a strong function redefinition). Just // ignore these cases. if (GV && !GV->isDeclaration()) continue; // Otherwise, emit the definition and move on to the next one. EmitGlobalDefinition(D, GV); // If we found out that we need to emit more decls, do that recursively. // This has the advantage that the decls are emitted in a DFS and related // ones are close together, which is convenient for testing. if (!DeferredVTables.empty() || !DeferredDeclsToEmit.empty()) { EmitDeferred(); assert(DeferredVTables.empty() && DeferredDeclsToEmit.empty()); } } } void CodeGenModule::EmitGlobalAnnotations() { if (Annotations.empty()) return; // Create a new global variable for the ConstantStruct in the Module. llvm::Constant *Array = llvm::ConstantArray::get(llvm::ArrayType::get( Annotations[0]->getType(), Annotations.size()), Annotations); auto *gv = new llvm::GlobalVariable(getModule(), Array->getType(), false, llvm::GlobalValue::AppendingLinkage, Array, "llvm.global.annotations"); gv->setSection(AnnotationSection); } llvm::Constant *CodeGenModule::EmitAnnotationString(StringRef Str) { llvm::Constant *&AStr = AnnotationStrings[Str]; if (AStr) return AStr; // Not found yet, create a new global. llvm::Constant *s = llvm::ConstantDataArray::getString(getLLVMContext(), Str); auto *gv = new llvm::GlobalVariable(getModule(), s->getType(), true, llvm::GlobalValue::PrivateLinkage, s, ".str"); gv->setSection(AnnotationSection); gv->setUnnamedAddr(true); AStr = gv; return gv; } llvm::Constant *CodeGenModule::EmitAnnotationUnit(SourceLocation Loc) { SourceManager &SM = getContext().getSourceManager(); PresumedLoc PLoc = SM.getPresumedLoc(Loc); if (PLoc.isValid()) return EmitAnnotationString(PLoc.getFilename()); return EmitAnnotationString(SM.getBufferName(Loc)); } llvm::Constant *CodeGenModule::EmitAnnotationLineNo(SourceLocation L) { SourceManager &SM = getContext().getSourceManager(); PresumedLoc PLoc = SM.getPresumedLoc(L); unsigned LineNo = PLoc.isValid() ? PLoc.getLine() : SM.getExpansionLineNumber(L); return llvm::ConstantInt::get(Int32Ty, LineNo); } llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV, const AnnotateAttr *AA, SourceLocation L) { // Get the globals for file name, annotation, and the line number. llvm::Constant *AnnoGV = EmitAnnotationString(AA->getAnnotation()), *UnitGV = EmitAnnotationUnit(L), *LineNoCst = EmitAnnotationLineNo(L); // Create the ConstantStruct for the global annotation. llvm::Constant *Fields[4] = { llvm::ConstantExpr::getBitCast(GV, Int8PtrTy), llvm::ConstantExpr::getBitCast(AnnoGV, Int8PtrTy), llvm::ConstantExpr::getBitCast(UnitGV, Int8PtrTy), LineNoCst }; return llvm::ConstantStruct::getAnon(Fields); } void CodeGenModule::AddGlobalAnnotations(const ValueDecl *D, llvm::GlobalValue *GV) { assert(D->hasAttr() && "no annotate attribute"); // Get the struct elements for these annotations. for (const auto *I : D->specific_attrs()) Annotations.push_back(EmitAnnotateAttr(GV, I, D->getLocation())); } bool CodeGenModule::isInSanitizerBlacklist(llvm::Function *Fn, SourceLocation Loc) const { const auto &SanitizerBL = getContext().getSanitizerBlacklist(); // Blacklist by function name. if (SanitizerBL.isBlacklistedFunction(Fn->getName())) return true; // Blacklist by location. if (!Loc.isInvalid()) return SanitizerBL.isBlacklistedLocation(Loc); // If location is unknown, this may be a compiler-generated function. Assume // it's located in the main file. auto &SM = Context.getSourceManager(); if (const auto *MainFile = SM.getFileEntryForID(SM.getMainFileID())) { return SanitizerBL.isBlacklistedFile(MainFile->getName()); } return false; } bool CodeGenModule::isInSanitizerBlacklist(llvm::GlobalVariable *GV, SourceLocation Loc, QualType Ty, StringRef Category) const { // For now globals can be blacklisted only in ASan and KASan. if (!LangOpts.Sanitize.hasOneOf( SanitizerKind::Address | SanitizerKind::KernelAddress)) return false; const auto &SanitizerBL = getContext().getSanitizerBlacklist(); if (SanitizerBL.isBlacklistedGlobal(GV->getName(), Category)) return true; if (SanitizerBL.isBlacklistedLocation(Loc, Category)) return true; // Check global type. if (!Ty.isNull()) { // Drill down the array types: if global variable of a fixed type is // blacklisted, we also don't instrument arrays of them. while (auto AT = dyn_cast(Ty.getTypePtr())) Ty = AT->getElementType(); Ty = Ty.getCanonicalType().getUnqualifiedType(); // We allow to blacklist only record types (classes, structs etc.) if (Ty->isRecordType()) { std::string TypeStr = Ty.getAsString(getContext().getPrintingPolicy()); if (SanitizerBL.isBlacklistedType(TypeStr, Category)) return true; } } return false; } bool CodeGenModule::MustBeEmitted(const ValueDecl *Global) { // Never defer when EmitAllDecls is specified. if (LangOpts.EmitAllDecls) return true; return getContext().DeclMustBeEmitted(Global); } bool CodeGenModule::MayBeEmittedEagerly(const ValueDecl *Global) { if (const auto *FD = dyn_cast(Global)) if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) // Implicit template instantiations may change linkage if they are later // explicitly instantiated, so they should not be emitted eagerly. return false; // If OpenMP is enabled and threadprivates must be generated like TLS, delay // codegen for global variables, because they may be marked as threadprivate. if (LangOpts.OpenMP && LangOpts.OpenMPUseTLS && getContext().getTargetInfo().isTLSSupported() && isa(Global)) return false; return true; } llvm::Constant *CodeGenModule::GetAddrOfUuidDescriptor( const CXXUuidofExpr* E) { // Sema has verified that IIDSource has a __declspec(uuid()), and that its // well-formed. StringRef Uuid = E->getUuidAsStringRef(Context); std::string Name = "_GUID_" + Uuid.lower(); std::replace(Name.begin(), Name.end(), '-', '_'); // Look for an existing global. if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name)) return GV; llvm::Constant *Init = EmitUuidofInitializer(Uuid); assert(Init && "failed to initialize as constant"); auto *GV = new llvm::GlobalVariable( getModule(), Init->getType(), /*isConstant=*/true, llvm::GlobalValue::LinkOnceODRLinkage, Init, Name); if (supportsCOMDAT()) GV->setComdat(TheModule.getOrInsertComdat(GV->getName())); return GV; } llvm::Constant *CodeGenModule::GetWeakRefReference(const ValueDecl *VD) { const AliasAttr *AA = VD->getAttr(); assert(AA && "No alias?"); llvm::Type *DeclTy = getTypes().ConvertTypeForMem(VD->getType()); // See if there is already something with the target's name in the module. llvm::GlobalValue *Entry = GetGlobalValue(AA->getAliasee()); if (Entry) { unsigned AS = getContext().getTargetAddressSpace(VD->getType()); return llvm::ConstantExpr::getBitCast(Entry, DeclTy->getPointerTo(AS)); } llvm::Constant *Aliasee; if (isa(DeclTy)) Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy, GlobalDecl(cast(VD)), /*ForVTable=*/false); else Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(), llvm::PointerType::getUnqual(DeclTy), nullptr); auto *F = cast(Aliasee); F->setLinkage(llvm::Function::ExternalWeakLinkage); WeakRefReferences.insert(F); return Aliasee; } void CodeGenModule::EmitGlobal(GlobalDecl GD) { const auto *Global = cast(GD.getDecl()); // Weak references don't produce any output by themselves. if (Global->hasAttr()) return; // If this is an alias definition (which otherwise looks like a declaration) // emit it now. if (Global->hasAttr()) return EmitAliasDefinition(GD); // If this is CUDA, be selective about which declarations we emit. if (LangOpts.CUDA) { if (LangOpts.CUDAIsDevice) { if (!Global->hasAttr() && !Global->hasAttr() && !Global->hasAttr() && !Global->hasAttr()) return; } else { if (!Global->hasAttr() && ( Global->hasAttr() || Global->hasAttr() || Global->hasAttr())) return; } } // Ignore declarations, they will be emitted on their first use. if (const auto *FD = dyn_cast(Global)) { // Forward declarations are emitted lazily on first use. if (!FD->doesThisDeclarationHaveABody()) { if (!FD->doesDeclarationForceExternallyVisibleDefinition()) return; StringRef MangledName = getMangledName(GD); // Compute the function info and LLVM type. const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD); llvm::Type *Ty = getTypes().GetFunctionType(FI); GetOrCreateLLVMFunction(MangledName, Ty, GD, /*ForVTable=*/false, /*DontDefer=*/false); return; } } else { const auto *VD = cast(Global); assert(VD->isFileVarDecl() && "Cannot emit local var decl as global."); if (VD->isThisDeclarationADefinition() != VarDecl::Definition && !Context.isMSStaticDataMemberInlineDefinition(VD)) return; } // Defer code generation to first use when possible, e.g. if this is an inline // function. If the global must always be emitted, do it eagerly if possible // to benefit from cache locality. if (MustBeEmitted(Global) && MayBeEmittedEagerly(Global)) { // Emit the definition if it can't be deferred. EmitGlobalDefinition(GD); return; } // If we're deferring emission of a C++ variable with an // initializer, remember the order in which it appeared in the file. if (getLangOpts().CPlusPlus && isa(Global) && cast(Global)->hasInit()) { DelayedCXXInitPosition[Global] = CXXGlobalInits.size(); CXXGlobalInits.push_back(nullptr); } StringRef MangledName = getMangledName(GD); if (llvm::GlobalValue *GV = GetGlobalValue(MangledName)) { // The value has already been used and should therefore be emitted. addDeferredDeclToEmit(GV, GD); } else if (MustBeEmitted(Global)) { // The value must be emitted, but cannot be emitted eagerly. assert(!MayBeEmittedEagerly(Global)); addDeferredDeclToEmit(/*GV=*/nullptr, GD); } else { // Otherwise, remember that we saw a deferred decl with this name. The // first use of the mangled name will cause it to move into // DeferredDeclsToEmit. DeferredDecls[MangledName] = GD; } } namespace { struct FunctionIsDirectlyRecursive : public RecursiveASTVisitor { const StringRef Name; const Builtin::Context &BI; bool Result; FunctionIsDirectlyRecursive(StringRef N, const Builtin::Context &C) : Name(N), BI(C), Result(false) { } typedef RecursiveASTVisitor Base; bool TraverseCallExpr(CallExpr *E) { const FunctionDecl *FD = E->getDirectCallee(); if (!FD) return true; AsmLabelAttr *Attr = FD->getAttr(); if (Attr && Name == Attr->getLabel()) { Result = true; return false; } unsigned BuiltinID = FD->getBuiltinID(); if (!BuiltinID || !BI.isLibFunction(BuiltinID)) return true; StringRef BuiltinName = BI.GetName(BuiltinID); if (BuiltinName.startswith("__builtin_") && Name == BuiltinName.slice(strlen("__builtin_"), StringRef::npos)) { Result = true; return false; } return true; } }; } // isTriviallyRecursive - Check if this function calls another // decl that, because of the asm attribute or the other decl being a builtin, // ends up pointing to itself. bool CodeGenModule::isTriviallyRecursive(const FunctionDecl *FD) { StringRef Name; if (getCXXABI().getMangleContext().shouldMangleDeclName(FD)) { // asm labels are a special kind of mangling we have to support. AsmLabelAttr *Attr = FD->getAttr(); if (!Attr) return false; Name = Attr->getLabel(); } else { Name = FD->getName(); } FunctionIsDirectlyRecursive Walker(Name, Context.BuiltinInfo); Walker.TraverseFunctionDecl(const_cast(FD)); return Walker.Result; } bool CodeGenModule::shouldEmitFunction(GlobalDecl GD) { if (getFunctionLinkage(GD) != llvm::Function::AvailableExternallyLinkage) return true; const auto *F = cast(GD.getDecl()); if (CodeGenOpts.OptimizationLevel == 0 && !F->hasAttr()) return false; // PR9614. Avoid cases where the source code is lying to us. An available // externally function should have an equivalent function somewhere else, // but a function that calls itself is clearly not equivalent to the real // implementation. // This happens in glibc's btowc and in some configure checks. return !isTriviallyRecursive(F); } /// If the type for the method's class was generated by /// CGDebugInfo::createContextChain(), the cache contains only a /// limited DIType without any declarations. Since EmitFunctionStart() /// needs to find the canonical declaration for each method, we need /// to construct the complete type prior to emitting the method. void CodeGenModule::CompleteDIClassType(const CXXMethodDecl* D) { if (!D->isInstance()) return; if (CGDebugInfo *DI = getModuleDebugInfo()) if (getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo) { const auto *ThisPtr = cast(D->getThisType(getContext())); DI->getOrCreateRecordType(ThisPtr->getPointeeType(), D->getLocation()); } } void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) { const auto *D = cast(GD.getDecl()); PrettyStackTraceDecl CrashInfo(const_cast(D), D->getLocation(), Context.getSourceManager(), "Generating code for declaration"); if (isa(D)) { // At -O0, don't generate IR for functions with available_externally // linkage. if (!shouldEmitFunction(GD)) return; if (const auto *Method = dyn_cast(D)) { CompleteDIClassType(Method); // Make sure to emit the definition(s) before we emit the thunks. // This is necessary for the generation of certain thunks. if (const auto *CD = dyn_cast(Method)) ABI->emitCXXStructor(CD, getFromCtorType(GD.getCtorType())); else if (const auto *DD = dyn_cast(Method)) ABI->emitCXXStructor(DD, getFromDtorType(GD.getDtorType())); else EmitGlobalFunctionDefinition(GD, GV); if (Method->isVirtual()) getVTables().EmitThunks(GD); return; } return EmitGlobalFunctionDefinition(GD, GV); } if (const auto *VD = dyn_cast(D)) return EmitGlobalVarDefinition(VD); llvm_unreachable("Invalid argument to EmitGlobalDefinition()"); } /// GetOrCreateLLVMFunction - If the specified mangled name is not in the /// module, create and return an llvm Function with the specified type. If there /// is something in the module with the specified name, return it potentially /// bitcasted to the right type. /// /// If D is non-null, it specifies a decl that correspond to this. This is used /// to set the attributes on the function when it is first created. llvm::Constant * CodeGenModule::GetOrCreateLLVMFunction(StringRef MangledName, llvm::Type *Ty, GlobalDecl GD, bool ForVTable, bool DontDefer, bool IsThunk, llvm::AttributeSet ExtraAttrs) { const Decl *D = GD.getDecl(); // Lookup the entry, lazily creating it if necessary. llvm::GlobalValue *Entry = GetGlobalValue(MangledName); if (Entry) { if (WeakRefReferences.erase(Entry)) { const FunctionDecl *FD = cast_or_null(D); if (FD && !FD->hasAttr()) Entry->setLinkage(llvm::Function::ExternalLinkage); } // Handle dropped DLL attributes. if (D && !D->hasAttr() && !D->hasAttr()) Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass); if (Entry->getType()->getElementType() == Ty) return Entry; // Make sure the result is of the correct type. return llvm::ConstantExpr::getBitCast(Entry, Ty->getPointerTo()); } // This function doesn't have a complete type (for example, the return // type is an incomplete struct). Use a fake type instead, and make // sure not to try to set attributes. bool IsIncompleteFunction = false; llvm::FunctionType *FTy; if (isa(Ty)) { FTy = cast(Ty); } else { FTy = llvm::FunctionType::get(VoidTy, false); IsIncompleteFunction = true; } llvm::Function *F = llvm::Function::Create(FTy, llvm::Function::ExternalLinkage, MangledName, &getModule()); assert(F->getName() == MangledName && "name was uniqued!"); if (D) SetFunctionAttributes(GD, F, IsIncompleteFunction, IsThunk); if (ExtraAttrs.hasAttributes(llvm::AttributeSet::FunctionIndex)) { llvm::AttrBuilder B(ExtraAttrs, llvm::AttributeSet::FunctionIndex); F->addAttributes(llvm::AttributeSet::FunctionIndex, llvm::AttributeSet::get(VMContext, llvm::AttributeSet::FunctionIndex, B)); } if (!DontDefer) { // All MSVC dtors other than the base dtor are linkonce_odr and delegate to // each other bottoming out with the base dtor. Therefore we emit non-base // dtors on usage, even if there is no dtor definition in the TU. if (D && isa(D) && getCXXABI().useThunkForDtorVariant(cast(D), GD.getDtorType())) addDeferredDeclToEmit(F, GD); // This is the first use or definition of a mangled name. If there is a // deferred decl with this name, remember that we need to emit it at the end // of the file. auto DDI = DeferredDecls.find(MangledName); if (DDI != DeferredDecls.end()) { // Move the potentially referenced deferred decl to the // DeferredDeclsToEmit list, and remove it from DeferredDecls (since we // don't need it anymore). addDeferredDeclToEmit(F, DDI->second); DeferredDecls.erase(DDI); // Otherwise, there are cases we have to worry about where we're // using a declaration for which we must emit a definition but where // we might not find a top-level definition: // - member functions defined inline in their classes // - friend functions defined inline in some class // - special member functions with implicit definitions // If we ever change our AST traversal to walk into class methods, // this will be unnecessary. // // We also don't emit a definition for a function if it's going to be an // entry in a vtable, unless it's already marked as used. } else if (getLangOpts().CPlusPlus && D) { // Look for a declaration that's lexically in a record. for (const auto *FD = cast(D)->getMostRecentDecl(); FD; FD = FD->getPreviousDecl()) { if (isa(FD->getLexicalDeclContext())) { if (FD->doesThisDeclarationHaveABody()) { addDeferredDeclToEmit(F, GD.getWithDecl(FD)); break; } } } } } // Make sure the result is of the requested type. if (!IsIncompleteFunction) { assert(F->getType()->getElementType() == Ty); return F; } llvm::Type *PTy = llvm::PointerType::getUnqual(Ty); return llvm::ConstantExpr::getBitCast(F, PTy); } /// GetAddrOfFunction - Return the address of the given function. If Ty is /// non-null, then this function will use the specified type if it has to /// create it (this occurs when we see a definition of the function). llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD, llvm::Type *Ty, bool ForVTable, bool DontDefer) { // If there was no specific requested type, just convert it now. if (!Ty) Ty = getTypes().ConvertType(cast(GD.getDecl())->getType()); StringRef MangledName = getMangledName(GD); return GetOrCreateLLVMFunction(MangledName, Ty, GD, ForVTable, DontDefer); } /// CreateRuntimeFunction - Create a new runtime function with the specified /// type and name. llvm::Constant * CodeGenModule::CreateRuntimeFunction(llvm::FunctionType *FTy, StringRef Name, llvm::AttributeSet ExtraAttrs) { llvm::Constant *C = GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(), /*ForVTable=*/false, /*DontDefer=*/false, /*IsThunk=*/false, ExtraAttrs); if (auto *F = dyn_cast(C)) if (F->empty()) F->setCallingConv(getRuntimeCC()); return C; } /// CreateBuiltinFunction - Create a new builtin function with the specified /// type and name. llvm::Constant * CodeGenModule::CreateBuiltinFunction(llvm::FunctionType *FTy, StringRef Name, llvm::AttributeSet ExtraAttrs) { llvm::Constant *C = GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(), /*ForVTable=*/false, /*DontDefer=*/false, /*IsThunk=*/false, ExtraAttrs); if (auto *F = dyn_cast(C)) if (F->empty()) F->setCallingConv(getBuiltinCC()); return C; } /// isTypeConstant - Determine whether an object of this type can be emitted /// as a constant. /// /// If ExcludeCtor is true, the duration when the object's constructor runs /// will not be considered. The caller will need to verify that the object is /// not written to during its construction. bool CodeGenModule::isTypeConstant(QualType Ty, bool ExcludeCtor) { if (!Ty.isConstant(Context) && !Ty->isReferenceType()) return false; if (Context.getLangOpts().CPlusPlus) { if (const CXXRecordDecl *Record = Context.getBaseElementType(Ty)->getAsCXXRecordDecl()) return ExcludeCtor && !Record->hasMutableFields() && Record->hasTrivialDestructor(); } return true; } /// GetOrCreateLLVMGlobal - If the specified mangled name is not in the module, /// create and return an llvm GlobalVariable with the specified type. If there /// is something in the module with the specified name, return it potentially /// bitcasted to the right type. /// /// If D is non-null, it specifies a decl that correspond to this. This is used /// to set the attributes on the global when it is first created. llvm::Constant * CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName, llvm::PointerType *Ty, const VarDecl *D) { // Lookup the entry, lazily creating it if necessary. llvm::GlobalValue *Entry = GetGlobalValue(MangledName); if (Entry) { if (WeakRefReferences.erase(Entry)) { if (D && !D->hasAttr()) Entry->setLinkage(llvm::Function::ExternalLinkage); } // Handle dropped DLL attributes. if (D && !D->hasAttr() && !D->hasAttr()) Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass); if (Entry->getType() == Ty) return Entry; // Make sure the result is of the correct type. if (Entry->getType()->getAddressSpace() != Ty->getAddressSpace()) return llvm::ConstantExpr::getAddrSpaceCast(Entry, Ty); return llvm::ConstantExpr::getBitCast(Entry, Ty); } unsigned AddrSpace = GetGlobalVarAddressSpace(D, Ty->getAddressSpace()); auto *GV = new llvm::GlobalVariable( getModule(), Ty->getElementType(), false, llvm::GlobalValue::ExternalLinkage, nullptr, MangledName, nullptr, llvm::GlobalVariable::NotThreadLocal, AddrSpace); // This is the first use or definition of a mangled name. If there is a // deferred decl with this name, remember that we need to emit it at the end // of the file. auto DDI = DeferredDecls.find(MangledName); if (DDI != DeferredDecls.end()) { // Move the potentially referenced deferred decl to the DeferredDeclsToEmit // list, and remove it from DeferredDecls (since we don't need it anymore). addDeferredDeclToEmit(GV, DDI->second); DeferredDecls.erase(DDI); } // Handle things which are present even on external declarations. if (D) { // FIXME: This code is overly simple and should be merged with other global // handling. GV->setConstant(isTypeConstant(D->getType(), false)); GV->setAlignment(getContext().getDeclAlign(D).getQuantity()); setLinkageAndVisibilityForGV(GV, D); if (D->getTLSKind()) { if (D->getTLSKind() == VarDecl::TLS_Dynamic) CXXThreadLocals.push_back(std::make_pair(D, GV)); setTLSMode(GV, *D); } // If required by the ABI, treat declarations of static data members with // inline initializers as definitions. if (getContext().isMSStaticDataMemberInlineDefinition(D)) { EmitGlobalVarDefinition(D); } // Handle XCore specific ABI requirements. if (getTarget().getTriple().getArch() == llvm::Triple::xcore && D->getLanguageLinkage() == CLanguageLinkage && D->getType().isConstant(Context) && isExternallyVisible(D->getLinkageAndVisibility().getLinkage())) GV->setSection(".cp.rodata"); } if (AddrSpace != Ty->getAddressSpace()) return llvm::ConstantExpr::getAddrSpaceCast(GV, Ty); return GV; } llvm::GlobalVariable * CodeGenModule::CreateOrReplaceCXXRuntimeVariable(StringRef Name, llvm::Type *Ty, llvm::GlobalValue::LinkageTypes Linkage) { llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name); llvm::GlobalVariable *OldGV = nullptr; if (GV) { // Check if the variable has the right type. if (GV->getType()->getElementType() == Ty) return GV; // Because C++ name mangling, the only way we can end up with an already // existing global with the same name is if it has been declared extern "C". assert(GV->isDeclaration() && "Declaration has wrong type!"); OldGV = GV; } // Create a new variable. GV = new llvm::GlobalVariable(getModule(), Ty, /*isConstant=*/true, Linkage, nullptr, Name); if (OldGV) { // Replace occurrences of the old variable if needed. GV->takeName(OldGV); if (!OldGV->use_empty()) { llvm::Constant *NewPtrForOldDecl = llvm::ConstantExpr::getBitCast(GV, OldGV->getType()); OldGV->replaceAllUsesWith(NewPtrForOldDecl); } OldGV->eraseFromParent(); } if (supportsCOMDAT() && GV->isWeakForLinker() && !GV->hasAvailableExternallyLinkage()) GV->setComdat(TheModule.getOrInsertComdat(GV->getName())); return GV; } /// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the /// given global variable. If Ty is non-null and if the global doesn't exist, /// then it will be created with the specified type instead of whatever the /// normal requested type would be. llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D, llvm::Type *Ty) { assert(D->hasGlobalStorage() && "Not a global variable"); QualType ASTTy = D->getType(); if (!Ty) Ty = getTypes().ConvertTypeForMem(ASTTy); llvm::PointerType *PTy = llvm::PointerType::get(Ty, getContext().getTargetAddressSpace(ASTTy)); StringRef MangledName = getMangledName(D); return GetOrCreateLLVMGlobal(MangledName, PTy, D); } /// CreateRuntimeVariable - Create a new runtime global variable with the /// specified type and name. llvm::Constant * CodeGenModule::CreateRuntimeVariable(llvm::Type *Ty, StringRef Name) { return GetOrCreateLLVMGlobal(Name, llvm::PointerType::getUnqual(Ty), nullptr); } void CodeGenModule::EmitTentativeDefinition(const VarDecl *D) { assert(!D->getInit() && "Cannot emit definite definitions here!"); if (!MustBeEmitted(D)) { // If we have not seen a reference to this variable yet, place it // into the deferred declarations table to be emitted if needed // later. StringRef MangledName = getMangledName(D); if (!GetGlobalValue(MangledName)) { DeferredDecls[MangledName] = D; return; } } // The tentative definition is the only definition. EmitGlobalVarDefinition(D); } CharUnits CodeGenModule::GetTargetTypeStoreSize(llvm::Type *Ty) const { return Context.toCharUnitsFromBits( TheDataLayout.getTypeStoreSizeInBits(Ty)); } unsigned CodeGenModule::GetGlobalVarAddressSpace(const VarDecl *D, unsigned AddrSpace) { if (LangOpts.CUDA && LangOpts.CUDAIsDevice) { if (D->hasAttr()) AddrSpace = getContext().getTargetAddressSpace(LangAS::cuda_constant); else if (D->hasAttr()) AddrSpace = getContext().getTargetAddressSpace(LangAS::cuda_shared); else AddrSpace = getContext().getTargetAddressSpace(LangAS::cuda_device); } return AddrSpace; } template void CodeGenModule::MaybeHandleStaticInExternC(const SomeDecl *D, llvm::GlobalValue *GV) { if (!getLangOpts().CPlusPlus) return; // Must have 'used' attribute, or else inline assembly can't rely on // the name existing. if (!D->template hasAttr()) return; // Must have internal linkage and an ordinary name. if (!D->getIdentifier() || D->getFormalLinkage() != InternalLinkage) return; // Must be in an extern "C" context. Entities declared directly within // a record are not extern "C" even if the record is in such a context. const SomeDecl *First = D->getFirstDecl(); if (First->getDeclContext()->isRecord() || !First->isInExternCContext()) return; // OK, this is an internal linkage entity inside an extern "C" linkage // specification. Make a note of that so we can give it the "expected" // mangled name if nothing else is using that name. std::pair R = StaticExternCValues.insert(std::make_pair(D->getIdentifier(), GV)); // If we have multiple internal linkage entities with the same name // in extern "C" regions, none of them gets that name. if (!R.second) R.first->second = nullptr; } static bool shouldBeInCOMDAT(CodeGenModule &CGM, const Decl &D) { if (!CGM.supportsCOMDAT()) return false; if (D.hasAttr()) return true; GVALinkage Linkage; if (auto *VD = dyn_cast(&D)) Linkage = CGM.getContext().GetGVALinkageForVariable(VD); else Linkage = CGM.getContext().GetGVALinkageForFunction(cast(&D)); switch (Linkage) { case GVA_Internal: case GVA_AvailableExternally: case GVA_StrongExternal: return false; case GVA_DiscardableODR: case GVA_StrongODR: return true; } llvm_unreachable("No such linkage"); } void CodeGenModule::maybeSetTrivialComdat(const Decl &D, llvm::GlobalObject &GO) { if (!shouldBeInCOMDAT(*this, D)) return; GO.setComdat(TheModule.getOrInsertComdat(GO.getName())); } void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) { llvm::Constant *Init = nullptr; QualType ASTTy = D->getType(); CXXRecordDecl *RD = ASTTy->getBaseElementTypeUnsafe()->getAsCXXRecordDecl(); bool NeedsGlobalCtor = false; bool NeedsGlobalDtor = RD && !RD->hasTrivialDestructor(); const VarDecl *InitDecl; const Expr *InitExpr = D->getAnyInitializer(InitDecl); if (!InitExpr) { // This is a tentative definition; tentative definitions are // implicitly initialized with { 0 }. // // Note that tentative definitions are only emitted at the end of // a translation unit, so they should never have incomplete // type. In addition, EmitTentativeDefinition makes sure that we // never attempt to emit a tentative definition if a real one // exists. A use may still exists, however, so we still may need // to do a RAUW. assert(!ASTTy->isIncompleteType() && "Unexpected incomplete type"); Init = EmitNullConstant(D->getType()); } else { initializedGlobalDecl = GlobalDecl(D); Init = EmitConstantInit(*InitDecl); if (!Init) { QualType T = InitExpr->getType(); if (D->getType()->isReferenceType()) T = D->getType(); if (getLangOpts().CPlusPlus) { Init = EmitNullConstant(T); NeedsGlobalCtor = true; } else { ErrorUnsupported(D, "static initializer"); Init = llvm::UndefValue::get(getTypes().ConvertType(T)); } } else { // We don't need an initializer, so remove the entry for the delayed // initializer position (just in case this entry was delayed) if we // also don't need to register a destructor. if (getLangOpts().CPlusPlus && !NeedsGlobalDtor) DelayedCXXInitPosition.erase(D); } } llvm::Type* InitType = Init->getType(); llvm::Constant *Entry = GetAddrOfGlobalVar(D, InitType); // Strip off a bitcast if we got one back. if (auto *CE = dyn_cast(Entry)) { assert(CE->getOpcode() == llvm::Instruction::BitCast || CE->getOpcode() == llvm::Instruction::AddrSpaceCast || // All zero index gep. CE->getOpcode() == llvm::Instruction::GetElementPtr); Entry = CE->getOperand(0); } // Entry is now either a Function or GlobalVariable. auto *GV = dyn_cast(Entry); // We have a definition after a declaration with the wrong type. // We must make a new GlobalVariable* and update everything that used OldGV // (a declaration or tentative definition) with the new GlobalVariable* // (which will be a definition). // // This happens if there is a prototype for a global (e.g. // "extern int x[];") and then a definition of a different type (e.g. // "int x[10];"). This also happens when an initializer has a different type // from the type of the global (this happens with unions). if (!GV || GV->getType()->getElementType() != InitType || GV->getType()->getAddressSpace() != GetGlobalVarAddressSpace(D, getContext().getTargetAddressSpace(ASTTy))) { // Move the old entry aside so that we'll create a new one. Entry->setName(StringRef()); // Make a new global with the correct type, this is now guaranteed to work. GV = cast(GetAddrOfGlobalVar(D, InitType)); // Replace all uses of the old global with the new global llvm::Constant *NewPtrForOldDecl = llvm::ConstantExpr::getBitCast(GV, Entry->getType()); Entry->replaceAllUsesWith(NewPtrForOldDecl); // Erase the old global, since it is no longer used. cast(Entry)->eraseFromParent(); } MaybeHandleStaticInExternC(D, GV); if (D->hasAttr()) AddGlobalAnnotations(D, GV); GV->setInitializer(Init); // If it is safe to mark the global 'constant', do so now. GV->setConstant(!NeedsGlobalCtor && !NeedsGlobalDtor && isTypeConstant(D->getType(), true)); // If it is in a read-only section, mark it 'constant'. if (const SectionAttr *SA = D->getAttr()) { const ASTContext::SectionInfo &SI = Context.SectionInfos[SA->getName()]; if ((SI.SectionFlags & ASTContext::PSF_Write) == 0) GV->setConstant(true); } GV->setAlignment(getContext().getDeclAlign(D).getQuantity()); // Set the llvm linkage type as appropriate. llvm::GlobalValue::LinkageTypes Linkage = getLLVMLinkageVarDefinition(D, GV->isConstant()); // On Darwin, the backing variable for a C++11 thread_local variable always // has internal linkage; all accesses should just be calls to the // Itanium-specified entry point, which has the normal linkage of the // variable. if (!D->isStaticLocal() && D->getTLSKind() == VarDecl::TLS_Dynamic && Context.getTargetInfo().getTriple().isMacOSX()) Linkage = llvm::GlobalValue::InternalLinkage; GV->setLinkage(Linkage); if (D->hasAttr()) GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass); else if (D->hasAttr()) GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass); else GV->setDLLStorageClass(llvm::GlobalVariable::DefaultStorageClass); if (Linkage == llvm::GlobalVariable::CommonLinkage) // common vars aren't constant even if declared const. GV->setConstant(false); setNonAliasAttributes(D, GV); if (D->getTLSKind() && !GV->isThreadLocal()) { if (D->getTLSKind() == VarDecl::TLS_Dynamic) CXXThreadLocals.push_back(std::make_pair(D, GV)); setTLSMode(GV, *D); } maybeSetTrivialComdat(*D, *GV); // Emit the initializer function if necessary. if (NeedsGlobalCtor || NeedsGlobalDtor) EmitCXXGlobalVarDeclInitFunc(D, GV, NeedsGlobalCtor); SanitizerMD->reportGlobalToASan(GV, *D, NeedsGlobalCtor); // Emit global variable debug information. if (CGDebugInfo *DI = getModuleDebugInfo()) if (getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo) DI->EmitGlobalVariable(GV, D); } static bool isVarDeclStrongDefinition(const ASTContext &Context, CodeGenModule &CGM, const VarDecl *D, bool NoCommon) { // Don't give variables common linkage if -fno-common was specified unless it // was overridden by a NoCommon attribute. if ((NoCommon || D->hasAttr()) && !D->hasAttr()) return true; // C11 6.9.2/2: // A declaration of an identifier for an object that has file scope without // an initializer, and without a storage-class specifier or with the // storage-class specifier static, constitutes a tentative definition. if (D->getInit() || D->hasExternalStorage()) return true; // A variable cannot be both common and exist in a section. if (D->hasAttr()) return true; // Thread local vars aren't considered common linkage. if (D->getTLSKind()) return true; // Tentative definitions marked with WeakImportAttr are true definitions. if (D->hasAttr()) return true; // A variable cannot be both common and exist in a comdat. if (shouldBeInCOMDAT(CGM, *D)) return true; // Declarations with a required alignment do not have common linakge in MSVC // mode. if (Context.getLangOpts().MSVCCompat) { if (D->hasAttr()) return true; QualType VarType = D->getType(); if (Context.isAlignmentRequired(VarType)) return true; if (const auto *RT = VarType->getAs()) { const RecordDecl *RD = RT->getDecl(); for (const FieldDecl *FD : RD->fields()) { if (FD->isBitField()) continue; if (FD->hasAttr()) return true; if (Context.isAlignmentRequired(FD->getType())) return true; } } } return false; } llvm::GlobalValue::LinkageTypes CodeGenModule::getLLVMLinkageForDeclarator( const DeclaratorDecl *D, GVALinkage Linkage, bool IsConstantVariable) { if (Linkage == GVA_Internal) return llvm::Function::InternalLinkage; if (D->hasAttr()) { if (IsConstantVariable) return llvm::GlobalVariable::WeakODRLinkage; else return llvm::GlobalVariable::WeakAnyLinkage; } // We are guaranteed to have a strong definition somewhere else, // so we can use available_externally linkage. if (Linkage == GVA_AvailableExternally) return llvm::Function::AvailableExternallyLinkage; // Note that Apple's kernel linker doesn't support symbol // coalescing, so we need to avoid linkonce and weak linkages there. // Normally, this means we just map to internal, but for explicit // instantiations we'll map to external. // In C++, the compiler has to emit a definition in every translation unit // that references the function. We should use linkonce_odr because // a) if all references in this translation unit are optimized away, we // don't need to codegen it. b) if the function persists, it needs to be // merged with other definitions. c) C++ has the ODR, so we know the // definition is dependable. if (Linkage == GVA_DiscardableODR) return !Context.getLangOpts().AppleKext ? llvm::Function::LinkOnceODRLinkage : llvm::Function::InternalLinkage; // An explicit instantiation of a template has weak linkage, since // explicit instantiations can occur in multiple translation units // and must all be equivalent. However, we are not allowed to // throw away these explicit instantiations. if (Linkage == GVA_StrongODR) return !Context.getLangOpts().AppleKext ? llvm::Function::WeakODRLinkage : llvm::Function::ExternalLinkage; // C++ doesn't have tentative definitions and thus cannot have common // linkage. if (!getLangOpts().CPlusPlus && isa(D) && !isVarDeclStrongDefinition(Context, *this, cast(D), CodeGenOpts.NoCommon)) return llvm::GlobalVariable::CommonLinkage; // selectany symbols are externally visible, so use weak instead of // linkonce. MSVC optimizes away references to const selectany globals, so // all definitions should be the same and ODR linkage should be used. // http://msdn.microsoft.com/en-us/library/5tkz6s71.aspx if (D->hasAttr()) return llvm::GlobalVariable::WeakODRLinkage; // Otherwise, we have strong external linkage. assert(Linkage == GVA_StrongExternal); return llvm::GlobalVariable::ExternalLinkage; } llvm::GlobalValue::LinkageTypes CodeGenModule::getLLVMLinkageVarDefinition( const VarDecl *VD, bool IsConstant) { GVALinkage Linkage = getContext().GetGVALinkageForVariable(VD); return getLLVMLinkageForDeclarator(VD, Linkage, IsConstant); } /// Replace the uses of a function that was declared with a non-proto type. /// We want to silently drop extra arguments from call sites static void replaceUsesOfNonProtoConstant(llvm::Constant *old, llvm::Function *newFn) { // Fast path. if (old->use_empty()) return; llvm::Type *newRetTy = newFn->getReturnType(); SmallVector newArgs; for (llvm::Value::use_iterator ui = old->use_begin(), ue = old->use_end(); ui != ue; ) { llvm::Value::use_iterator use = ui++; // Increment before the use is erased. llvm::User *user = use->getUser(); // Recognize and replace uses of bitcasts. Most calls to // unprototyped functions will use bitcasts. if (auto *bitcast = dyn_cast(user)) { if (bitcast->getOpcode() == llvm::Instruction::BitCast) replaceUsesOfNonProtoConstant(bitcast, newFn); continue; } // Recognize calls to the function. llvm::CallSite callSite(user); if (!callSite) continue; if (!callSite.isCallee(&*use)) continue; // If the return types don't match exactly, then we can't // transform this call unless it's dead. if (callSite->getType() != newRetTy && !callSite->use_empty()) continue; // Get the call site's attribute list. SmallVector newAttrs; llvm::AttributeSet oldAttrs = callSite.getAttributes(); // Collect any return attributes from the call. if (oldAttrs.hasAttributes(llvm::AttributeSet::ReturnIndex)) newAttrs.push_back( llvm::AttributeSet::get(newFn->getContext(), oldAttrs.getRetAttributes())); // If the function was passed too few arguments, don't transform. unsigned newNumArgs = newFn->arg_size(); if (callSite.arg_size() < newNumArgs) continue; // If extra arguments were passed, we silently drop them. // If any of the types mismatch, we don't transform. unsigned argNo = 0; bool dontTransform = false; for (llvm::Function::arg_iterator ai = newFn->arg_begin(), ae = newFn->arg_end(); ai != ae; ++ai, ++argNo) { if (callSite.getArgument(argNo)->getType() != ai->getType()) { dontTransform = true; break; } // Add any parameter attributes. if (oldAttrs.hasAttributes(argNo + 1)) newAttrs. push_back(llvm:: AttributeSet::get(newFn->getContext(), oldAttrs.getParamAttributes(argNo + 1))); } if (dontTransform) continue; if (oldAttrs.hasAttributes(llvm::AttributeSet::FunctionIndex)) newAttrs.push_back(llvm::AttributeSet::get(newFn->getContext(), oldAttrs.getFnAttributes())); // Okay, we can transform this. Create the new call instruction and copy // over the required information. newArgs.append(callSite.arg_begin(), callSite.arg_begin() + argNo); llvm::CallSite newCall; if (callSite.isCall()) { newCall = llvm::CallInst::Create(newFn, newArgs, "", callSite.getInstruction()); } else { auto *oldInvoke = cast(callSite.getInstruction()); newCall = llvm::InvokeInst::Create(newFn, oldInvoke->getNormalDest(), oldInvoke->getUnwindDest(), newArgs, "", callSite.getInstruction()); } newArgs.clear(); // for the next iteration if (!newCall->getType()->isVoidTy()) newCall->takeName(callSite.getInstruction()); newCall.setAttributes( llvm::AttributeSet::get(newFn->getContext(), newAttrs)); newCall.setCallingConv(callSite.getCallingConv()); // Finally, remove the old call, replacing any uses with the new one. if (!callSite->use_empty()) callSite->replaceAllUsesWith(newCall.getInstruction()); // Copy debug location attached to CI. if (callSite->getDebugLoc()) newCall->setDebugLoc(callSite->getDebugLoc()); callSite->eraseFromParent(); } } /// ReplaceUsesOfNonProtoTypeWithRealFunction - This function is called when we /// implement a function with no prototype, e.g. "int foo() {}". If there are /// existing call uses of the old function in the module, this adjusts them to /// call the new function directly. /// /// This is not just a cleanup: the always_inline pass requires direct calls to /// functions to be able to inline them. If there is a bitcast in the way, it /// won't inline them. Instcombine normally deletes these calls, but it isn't /// run at -O0. static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old, llvm::Function *NewFn) { // If we're redefining a global as a function, don't transform it. if (!isa(Old)) return; replaceUsesOfNonProtoConstant(Old, NewFn); } void CodeGenModule::HandleCXXStaticMemberVarInstantiation(VarDecl *VD) { TemplateSpecializationKind TSK = VD->getTemplateSpecializationKind(); // If we have a definition, this might be a deferred decl. If the // instantiation is explicit, make sure we emit it at the end. if (VD->getDefinition() && TSK == TSK_ExplicitInstantiationDefinition) GetAddrOfGlobalVar(VD); EmitTopLevelDecl(VD); } void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD, llvm::GlobalValue *GV) { const auto *D = cast(GD.getDecl()); // Compute the function info and LLVM type. const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD); llvm::FunctionType *Ty = getTypes().GetFunctionType(FI); // Get or create the prototype for the function. if (!GV) { llvm::Constant *C = GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer*/ true); // Strip off a bitcast if we got one back. if (auto *CE = dyn_cast(C)) { assert(CE->getOpcode() == llvm::Instruction::BitCast); GV = cast(CE->getOperand(0)); } else { GV = cast(C); } } if (!GV->isDeclaration()) { getDiags().Report(D->getLocation(), diag::err_duplicate_mangled_name); GlobalDecl OldGD = Manglings.lookup(GV->getName()); if (auto *Prev = OldGD.getDecl()) getDiags().Report(Prev->getLocation(), diag::note_previous_definition); return; } if (GV->getType()->getElementType() != Ty) { // If the types mismatch then we have to rewrite the definition. assert(GV->isDeclaration() && "Shouldn't replace non-declaration"); // F is the Function* for the one with the wrong type, we must make a new // Function* and update everything that used F (a declaration) with the new // Function* (which will be a definition). // // This happens if there is a prototype for a function // (e.g. "int f()") and then a definition of a different type // (e.g. "int f(int x)"). Move the old function aside so that it // doesn't interfere with GetAddrOfFunction. GV->setName(StringRef()); auto *NewFn = cast(GetAddrOfFunction(GD, Ty)); // This might be an implementation of a function without a // prototype, in which case, try to do special replacement of // calls which match the new prototype. The really key thing here // is that we also potentially drop arguments from the call site // so as to make a direct call, which makes the inliner happier // and suppresses a number of optimizer warnings (!) about // dropping arguments. if (!GV->use_empty()) { ReplaceUsesOfNonProtoTypeWithRealFunction(GV, NewFn); GV->removeDeadConstantUsers(); } // Replace uses of F with the Function we will endow with a body. if (!GV->use_empty()) { llvm::Constant *NewPtrForOldDecl = llvm::ConstantExpr::getBitCast(NewFn, GV->getType()); GV->replaceAllUsesWith(NewPtrForOldDecl); } // Ok, delete the old function now, which is dead. GV->eraseFromParent(); GV = NewFn; } // We need to set linkage and visibility on the function before // generating code for it because various parts of IR generation // want to propagate this information down (e.g. to local static // declarations). auto *Fn = cast(GV); setFunctionLinkage(GD, Fn); setFunctionDLLStorageClass(GD, Fn); // FIXME: this is redundant with part of setFunctionDefinitionAttributes setGlobalVisibility(Fn, D); MaybeHandleStaticInExternC(D, Fn); maybeSetTrivialComdat(*D, *Fn); CodeGenFunction(*this).GenerateCode(D, Fn, FI); setFunctionDefinitionAttributes(D, Fn); SetLLVMFunctionAttributesForDefinition(D, Fn); if (const ConstructorAttr *CA = D->getAttr()) AddGlobalCtor(Fn, CA->getPriority()); if (const DestructorAttr *DA = D->getAttr()) AddGlobalDtor(Fn, DA->getPriority()); if (D->hasAttr()) AddGlobalAnnotations(D, Fn); } void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) { const auto *D = cast(GD.getDecl()); const AliasAttr *AA = D->getAttr(); assert(AA && "Not an alias?"); StringRef MangledName = getMangledName(GD); + if (AA->getAliasee() == MangledName) { + Diags.Report(AA->getLocation(), diag::err_cyclic_alias); + return; + } + // If there is a definition in the module, then it wins over the alias. // This is dubious, but allow it to be safe. Just ignore the alias. llvm::GlobalValue *Entry = GetGlobalValue(MangledName); if (Entry && !Entry->isDeclaration()) return; Aliases.push_back(GD); llvm::Type *DeclTy = getTypes().ConvertTypeForMem(D->getType()); // Create a reference to the named value. This ensures that it is emitted // if a deferred decl. llvm::Constant *Aliasee; if (isa(DeclTy)) Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy, GD, /*ForVTable=*/false); else Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(), llvm::PointerType::getUnqual(DeclTy), /*D=*/nullptr); // Create the new alias itself, but don't set a name yet. auto *GA = llvm::GlobalAlias::create( cast(Aliasee->getType()), llvm::Function::ExternalLinkage, "", Aliasee, &getModule()); if (Entry) { if (GA->getAliasee() == Entry) { Diags.Report(AA->getLocation(), diag::err_cyclic_alias); return; } assert(Entry->isDeclaration()); // If there is a declaration in the module, then we had an extern followed // by the alias, as in: // extern int test6(); // ... // int test6() __attribute__((alias("test7"))); // // Remove it and replace uses of it with the alias. GA->takeName(Entry); Entry->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(GA, Entry->getType())); Entry->eraseFromParent(); } else { GA->setName(MangledName); } // Set attributes which are particular to an alias; this is a // specialization of the attributes which may be set on a global // variable/function. if (D->hasAttr() || D->hasAttr() || D->isWeakImported()) { GA->setLinkage(llvm::Function::WeakAnyLinkage); } if (const auto *VD = dyn_cast(D)) if (VD->getTLSKind()) setTLSMode(GA, *VD); setAliasAttributes(D, GA); } llvm::Function *CodeGenModule::getIntrinsic(unsigned IID, ArrayRef Tys) { return llvm::Intrinsic::getDeclaration(&getModule(), (llvm::Intrinsic::ID)IID, Tys); } static llvm::StringMapEntry & GetConstantCFStringEntry(llvm::StringMap &Map, const StringLiteral *Literal, bool TargetIsLSB, bool &IsUTF16, unsigned &StringLength) { StringRef String = Literal->getString(); unsigned NumBytes = String.size(); // Check for simple case. if (!Literal->containsNonAsciiOrNull()) { StringLength = NumBytes; return *Map.insert(std::make_pair(String, nullptr)).first; } // Otherwise, convert the UTF8 literals into a string of shorts. IsUTF16 = true; SmallVector ToBuf(NumBytes + 1); // +1 for ending nulls. const UTF8 *FromPtr = (const UTF8 *)String.data(); UTF16 *ToPtr = &ToBuf[0]; (void)ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr, ToPtr + NumBytes, strictConversion); // ConvertUTF8toUTF16 returns the length in ToPtr. StringLength = ToPtr - &ToBuf[0]; // Add an explicit null. *ToPtr = 0; return *Map.insert(std::make_pair( StringRef(reinterpret_cast(ToBuf.data()), (StringLength + 1) * 2), nullptr)).first; } static llvm::StringMapEntry & GetConstantStringEntry(llvm::StringMap &Map, const StringLiteral *Literal, unsigned &StringLength) { StringRef String = Literal->getString(); StringLength = String.size(); return *Map.insert(std::make_pair(String, nullptr)).first; } llvm::Constant * CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) { unsigned StringLength = 0; bool isUTF16 = false; llvm::StringMapEntry &Entry = GetConstantCFStringEntry(CFConstantStringMap, Literal, getDataLayout().isLittleEndian(), isUTF16, StringLength); if (auto *C = Entry.second) return C; llvm::Constant *Zero = llvm::Constant::getNullValue(Int32Ty); llvm::Constant *Zeros[] = { Zero, Zero }; llvm::Value *V; // If we don't already have it, get __CFConstantStringClassReference. if (!CFConstantStringClassRef) { llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy); Ty = llvm::ArrayType::get(Ty, 0); llvm::Constant *GV = CreateRuntimeVariable(Ty, "__CFConstantStringClassReference"); // Decay array -> ptr V = llvm::ConstantExpr::getGetElementPtr(Ty, GV, Zeros); CFConstantStringClassRef = V; } else V = CFConstantStringClassRef; QualType CFTy = getContext().getCFConstantStringType(); auto *STy = cast(getTypes().ConvertType(CFTy)); llvm::Constant *Fields[4]; // Class pointer. Fields[0] = cast(V); // Flags. llvm::Type *Ty = getTypes().ConvertType(getContext().UnsignedIntTy); Fields[1] = isUTF16 ? llvm::ConstantInt::get(Ty, 0x07d0) : llvm::ConstantInt::get(Ty, 0x07C8); // String pointer. llvm::Constant *C = nullptr; if (isUTF16) { ArrayRef Arr = llvm::makeArrayRef( reinterpret_cast(const_cast(Entry.first().data())), Entry.first().size() / 2); C = llvm::ConstantDataArray::get(VMContext, Arr); } else { C = llvm::ConstantDataArray::getString(VMContext, Entry.first()); } // Note: -fwritable-strings doesn't make the backing store strings of // CFStrings writable. (See ) auto *GV = new llvm::GlobalVariable(getModule(), C->getType(), /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, C, ".str"); GV->setUnnamedAddr(true); // Don't enforce the target's minimum global alignment, since the only use // of the string is via this class initializer. // FIXME: We set the section explicitly to avoid a bug in ld64 224.1. Without // it LLVM can merge the string with a non unnamed_addr one during LTO. Doing // that changes the section it ends in, which surprises ld64. if (isUTF16) { CharUnits Align = getContext().getTypeAlignInChars(getContext().ShortTy); GV->setAlignment(Align.getQuantity()); GV->setSection("__TEXT,__ustring"); } else { CharUnits Align = getContext().getTypeAlignInChars(getContext().CharTy); GV->setAlignment(Align.getQuantity()); GV->setSection("__TEXT,__cstring,cstring_literals"); } // String. Fields[2] = llvm::ConstantExpr::getGetElementPtr(GV->getValueType(), GV, Zeros); if (isUTF16) // Cast the UTF16 string to the correct type. Fields[2] = llvm::ConstantExpr::getBitCast(Fields[2], Int8PtrTy); // String length. Ty = getTypes().ConvertType(getContext().LongTy); Fields[3] = llvm::ConstantInt::get(Ty, StringLength); // The struct. C = llvm::ConstantStruct::get(STy, Fields); GV = new llvm::GlobalVariable(getModule(), C->getType(), true, llvm::GlobalVariable::PrivateLinkage, C, "_unnamed_cfstring_"); GV->setSection("__DATA,__cfstring"); Entry.second = GV; return GV; } llvm::GlobalVariable * CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) { unsigned StringLength = 0; llvm::StringMapEntry &Entry = GetConstantStringEntry(CFConstantStringMap, Literal, StringLength); if (auto *C = Entry.second) return C; llvm::Constant *Zero = llvm::Constant::getNullValue(Int32Ty); llvm::Constant *Zeros[] = { Zero, Zero }; llvm::Value *V; // If we don't already have it, get _NSConstantStringClassReference. if (!ConstantStringClassRef) { std::string StringClass(getLangOpts().ObjCConstantStringClass); llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy); llvm::Constant *GV; if (LangOpts.ObjCRuntime.isNonFragile()) { std::string str = StringClass.empty() ? "OBJC_CLASS_$_NSConstantString" : "OBJC_CLASS_$_" + StringClass; GV = getObjCRuntime().GetClassGlobal(str); // Make sure the result is of the correct type. llvm::Type *PTy = llvm::PointerType::getUnqual(Ty); V = llvm::ConstantExpr::getBitCast(GV, PTy); ConstantStringClassRef = V; } else { std::string str = StringClass.empty() ? "_NSConstantStringClassReference" : "_" + StringClass + "ClassReference"; llvm::Type *PTy = llvm::ArrayType::get(Ty, 0); GV = CreateRuntimeVariable(PTy, str); // Decay array -> ptr V = llvm::ConstantExpr::getGetElementPtr(PTy, GV, Zeros); ConstantStringClassRef = V; } } else V = ConstantStringClassRef; if (!NSConstantStringType) { // Construct the type for a constant NSString. RecordDecl *D = Context.buildImplicitRecord("__builtin_NSString"); D->startDefinition(); QualType FieldTypes[3]; // const int *isa; FieldTypes[0] = Context.getPointerType(Context.IntTy.withConst()); // const char *str; FieldTypes[1] = Context.getPointerType(Context.CharTy.withConst()); // unsigned int length; FieldTypes[2] = Context.UnsignedIntTy; // Create fields for (unsigned i = 0; i < 3; ++i) { FieldDecl *Field = FieldDecl::Create(Context, D, SourceLocation(), SourceLocation(), nullptr, FieldTypes[i], /*TInfo=*/nullptr, /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); Field->setAccess(AS_public); D->addDecl(Field); } D->completeDefinition(); QualType NSTy = Context.getTagDeclType(D); NSConstantStringType = cast(getTypes().ConvertType(NSTy)); } llvm::Constant *Fields[3]; // Class pointer. Fields[0] = cast(V); // String pointer. llvm::Constant *C = llvm::ConstantDataArray::getString(VMContext, Entry.first()); llvm::GlobalValue::LinkageTypes Linkage; bool isConstant; Linkage = llvm::GlobalValue::PrivateLinkage; isConstant = !LangOpts.WritableStrings; auto *GV = new llvm::GlobalVariable(getModule(), C->getType(), isConstant, Linkage, C, ".str"); GV->setUnnamedAddr(true); // Don't enforce the target's minimum global alignment, since the only use // of the string is via this class initializer. CharUnits Align = getContext().getTypeAlignInChars(getContext().CharTy); GV->setAlignment(Align.getQuantity()); Fields[1] = llvm::ConstantExpr::getGetElementPtr(GV->getValueType(), GV, Zeros); // String length. llvm::Type *Ty = getTypes().ConvertType(getContext().UnsignedIntTy); Fields[2] = llvm::ConstantInt::get(Ty, StringLength); // The struct. C = llvm::ConstantStruct::get(NSConstantStringType, Fields); GV = new llvm::GlobalVariable(getModule(), C->getType(), true, llvm::GlobalVariable::PrivateLinkage, C, "_unnamed_nsstring_"); const char *NSStringSection = "__OBJC,__cstring_object,regular,no_dead_strip"; const char *NSStringNonFragileABISection = "__DATA,__objc_stringobj,regular,no_dead_strip"; // FIXME. Fix section. GV->setSection(LangOpts.ObjCRuntime.isNonFragile() ? NSStringNonFragileABISection : NSStringSection); Entry.second = GV; return GV; } QualType CodeGenModule::getObjCFastEnumerationStateType() { if (ObjCFastEnumerationStateType.isNull()) { RecordDecl *D = Context.buildImplicitRecord("__objcFastEnumerationState"); D->startDefinition(); QualType FieldTypes[] = { Context.UnsignedLongTy, Context.getPointerType(Context.getObjCIdType()), Context.getPointerType(Context.UnsignedLongTy), Context.getConstantArrayType(Context.UnsignedLongTy, llvm::APInt(32, 5), ArrayType::Normal, 0) }; for (size_t i = 0; i < 4; ++i) { FieldDecl *Field = FieldDecl::Create(Context, D, SourceLocation(), SourceLocation(), nullptr, FieldTypes[i], /*TInfo=*/nullptr, /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); Field->setAccess(AS_public); D->addDecl(Field); } D->completeDefinition(); ObjCFastEnumerationStateType = Context.getTagDeclType(D); } return ObjCFastEnumerationStateType; } llvm::Constant * CodeGenModule::GetConstantArrayFromStringLiteral(const StringLiteral *E) { assert(!E->getType()->isPointerType() && "Strings are always arrays"); // Don't emit it as the address of the string, emit the string data itself // as an inline array. if (E->getCharByteWidth() == 1) { SmallString<64> Str(E->getString()); // Resize the string to the right size, which is indicated by its type. const ConstantArrayType *CAT = Context.getAsConstantArrayType(E->getType()); Str.resize(CAT->getSize().getZExtValue()); return llvm::ConstantDataArray::getString(VMContext, Str, false); } auto *AType = cast(getTypes().ConvertType(E->getType())); llvm::Type *ElemTy = AType->getElementType(); unsigned NumElements = AType->getNumElements(); // Wide strings have either 2-byte or 4-byte elements. if (ElemTy->getPrimitiveSizeInBits() == 16) { SmallVector Elements; Elements.reserve(NumElements); for(unsigned i = 0, e = E->getLength(); i != e; ++i) Elements.push_back(E->getCodeUnit(i)); Elements.resize(NumElements); return llvm::ConstantDataArray::get(VMContext, Elements); } assert(ElemTy->getPrimitiveSizeInBits() == 32); SmallVector Elements; Elements.reserve(NumElements); for(unsigned i = 0, e = E->getLength(); i != e; ++i) Elements.push_back(E->getCodeUnit(i)); Elements.resize(NumElements); return llvm::ConstantDataArray::get(VMContext, Elements); } static llvm::GlobalVariable * GenerateStringLiteral(llvm::Constant *C, llvm::GlobalValue::LinkageTypes LT, CodeGenModule &CGM, StringRef GlobalName, unsigned Alignment) { // OpenCL v1.2 s6.5.3: a string literal is in the constant address space. unsigned AddrSpace = 0; if (CGM.getLangOpts().OpenCL) AddrSpace = CGM.getContext().getTargetAddressSpace(LangAS::opencl_constant); llvm::Module &M = CGM.getModule(); // Create a global variable for this string auto *GV = new llvm::GlobalVariable( M, C->getType(), !CGM.getLangOpts().WritableStrings, LT, C, GlobalName, nullptr, llvm::GlobalVariable::NotThreadLocal, AddrSpace); GV->setAlignment(Alignment); GV->setUnnamedAddr(true); if (GV->isWeakForLinker()) { assert(CGM.supportsCOMDAT() && "Only COFF uses weak string literals"); GV->setComdat(M.getOrInsertComdat(GV->getName())); } return GV; } /// GetAddrOfConstantStringFromLiteral - Return a pointer to a /// constant array for the given string literal. llvm::GlobalVariable * CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S, StringRef Name) { auto Alignment = getContext().getAlignOfGlobalVarInChars(S->getType()).getQuantity(); llvm::Constant *C = GetConstantArrayFromStringLiteral(S); llvm::GlobalVariable **Entry = nullptr; if (!LangOpts.WritableStrings) { Entry = &ConstantStringMap[C]; if (auto GV = *Entry) { if (Alignment > GV->getAlignment()) GV->setAlignment(Alignment); return GV; } } SmallString<256> MangledNameBuffer; StringRef GlobalVariableName; llvm::GlobalValue::LinkageTypes LT; // Mangle the string literal if the ABI allows for it. However, we cannot // do this if we are compiling with ASan or -fwritable-strings because they // rely on strings having normal linkage. if (!LangOpts.WritableStrings && !LangOpts.Sanitize.has(SanitizerKind::Address) && getCXXABI().getMangleContext().shouldMangleStringLiteral(S)) { llvm::raw_svector_ostream Out(MangledNameBuffer); getCXXABI().getMangleContext().mangleStringLiteral(S, Out); Out.flush(); LT = llvm::GlobalValue::LinkOnceODRLinkage; GlobalVariableName = MangledNameBuffer; } else { LT = llvm::GlobalValue::PrivateLinkage; GlobalVariableName = Name; } auto GV = GenerateStringLiteral(C, LT, *this, GlobalVariableName, Alignment); if (Entry) *Entry = GV; SanitizerMD->reportGlobalToASan(GV, S->getStrTokenLoc(0), "", QualType()); return GV; } /// GetAddrOfConstantStringFromObjCEncode - Return a pointer to a constant /// array for the given ObjCEncodeExpr node. llvm::GlobalVariable * CodeGenModule::GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *E) { std::string Str; getContext().getObjCEncodingForType(E->getEncodedType(), Str); return GetAddrOfConstantCString(Str); } /// GetAddrOfConstantCString - Returns a pointer to a character array containing /// the literal and a terminating '\0' character. /// The result has pointer to array type. llvm::GlobalVariable *CodeGenModule::GetAddrOfConstantCString( const std::string &Str, const char *GlobalName, unsigned Alignment) { StringRef StrWithNull(Str.c_str(), Str.size() + 1); if (Alignment == 0) { Alignment = getContext() .getAlignOfGlobalVarInChars(getContext().CharTy) .getQuantity(); } llvm::Constant *C = llvm::ConstantDataArray::getString(getLLVMContext(), StrWithNull, false); // Don't share any string literals if strings aren't constant. llvm::GlobalVariable **Entry = nullptr; if (!LangOpts.WritableStrings) { Entry = &ConstantStringMap[C]; if (auto GV = *Entry) { if (Alignment > GV->getAlignment()) GV->setAlignment(Alignment); return GV; } } // Get the default prefix if a name wasn't specified. if (!GlobalName) GlobalName = ".str"; // Create a global variable for this. auto GV = GenerateStringLiteral(C, llvm::GlobalValue::PrivateLinkage, *this, GlobalName, Alignment); if (Entry) *Entry = GV; return GV; } llvm::Constant *CodeGenModule::GetAddrOfGlobalTemporary( const MaterializeTemporaryExpr *E, const Expr *Init) { assert((E->getStorageDuration() == SD_Static || E->getStorageDuration() == SD_Thread) && "not a global temporary"); const auto *VD = cast(E->getExtendingDecl()); // If we're not materializing a subobject of the temporary, keep the // cv-qualifiers from the type of the MaterializeTemporaryExpr. QualType MaterializedType = Init->getType(); if (Init == E->GetTemporaryExpr()) MaterializedType = E->getType(); llvm::Constant *&Slot = MaterializedGlobalTemporaryMap[E]; if (Slot) return Slot; // FIXME: If an externally-visible declaration extends multiple temporaries, // we need to give each temporary the same name in every translation unit (and // we also need to make the temporaries externally-visible). SmallString<256> Name; llvm::raw_svector_ostream Out(Name); getCXXABI().getMangleContext().mangleReferenceTemporary( VD, E->getManglingNumber(), Out); Out.flush(); APValue *Value = nullptr; if (E->getStorageDuration() == SD_Static) { // We might have a cached constant initializer for this temporary. Note // that this might have a different value from the value computed by // evaluating the initializer if the surrounding constant expression // modifies the temporary. Value = getContext().getMaterializedTemporaryValue(E, false); if (Value && Value->isUninit()) Value = nullptr; } // Try evaluating it now, it might have a constant initializer. Expr::EvalResult EvalResult; if (!Value && Init->EvaluateAsRValue(EvalResult, getContext()) && !EvalResult.hasSideEffects()) Value = &EvalResult.Val; llvm::Constant *InitialValue = nullptr; bool Constant = false; llvm::Type *Type; if (Value) { // The temporary has a constant initializer, use it. InitialValue = EmitConstantValue(*Value, MaterializedType, nullptr); Constant = isTypeConstant(MaterializedType, /*ExcludeCtor*/Value); Type = InitialValue->getType(); } else { // No initializer, the initialization will be provided when we // initialize the declaration which performed lifetime extension. Type = getTypes().ConvertTypeForMem(MaterializedType); } // Create a global variable for this lifetime-extended temporary. llvm::GlobalValue::LinkageTypes Linkage = getLLVMLinkageVarDefinition(VD, Constant); if (Linkage == llvm::GlobalVariable::ExternalLinkage) { const VarDecl *InitVD; if (VD->isStaticDataMember() && VD->getAnyInitializer(InitVD) && isa(InitVD->getLexicalDeclContext())) { // Temporaries defined inside a class get linkonce_odr linkage because the // class can be defined in multipe translation units. Linkage = llvm::GlobalVariable::LinkOnceODRLinkage; } else { // There is no need for this temporary to have external linkage if the // VarDecl has external linkage. Linkage = llvm::GlobalVariable::InternalLinkage; } } unsigned AddrSpace = GetGlobalVarAddressSpace( VD, getContext().getTargetAddressSpace(MaterializedType)); auto *GV = new llvm::GlobalVariable( getModule(), Type, Constant, Linkage, InitialValue, Name.c_str(), /*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal, AddrSpace); setGlobalVisibility(GV, VD); GV->setAlignment( getContext().getTypeAlignInChars(MaterializedType).getQuantity()); if (supportsCOMDAT() && GV->isWeakForLinker()) GV->setComdat(TheModule.getOrInsertComdat(GV->getName())); if (VD->getTLSKind()) setTLSMode(GV, *VD); Slot = GV; return GV; } /// EmitObjCPropertyImplementations - Emit information for synthesized /// properties for an implementation. void CodeGenModule::EmitObjCPropertyImplementations(const ObjCImplementationDecl *D) { for (const auto *PID : D->property_impls()) { // Dynamic is just for type-checking. if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) { ObjCPropertyDecl *PD = PID->getPropertyDecl(); // Determine which methods need to be implemented, some may have // been overridden. Note that ::isPropertyAccessor is not the method // we want, that just indicates if the decl came from a // property. What we want to know is if the method is defined in // this implementation. if (!D->getInstanceMethod(PD->getGetterName())) CodeGenFunction(*this).GenerateObjCGetter( const_cast(D), PID); if (!PD->isReadOnly() && !D->getInstanceMethod(PD->getSetterName())) CodeGenFunction(*this).GenerateObjCSetter( const_cast(D), PID); } } } static bool needsDestructMethod(ObjCImplementationDecl *impl) { const ObjCInterfaceDecl *iface = impl->getClassInterface(); for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin(); ivar; ivar = ivar->getNextIvar()) if (ivar->getType().isDestructedType()) return true; return false; } static bool AllTrivialInitializers(CodeGenModule &CGM, ObjCImplementationDecl *D) { CodeGenFunction CGF(CGM); for (ObjCImplementationDecl::init_iterator B = D->init_begin(), E = D->init_end(); B != E; ++B) { CXXCtorInitializer *CtorInitExp = *B; Expr *Init = CtorInitExp->getInit(); if (!CGF.isTrivialInitializer(Init)) return false; } return true; } /// EmitObjCIvarInitializations - Emit information for ivar initialization /// for an implementation. void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) { // We might need a .cxx_destruct even if we don't have any ivar initializers. if (needsDestructMethod(D)) { IdentifierInfo *II = &getContext().Idents.get(".cxx_destruct"); Selector cxxSelector = getContext().Selectors.getSelector(0, &II); ObjCMethodDecl *DTORMethod = ObjCMethodDecl::Create(getContext(), D->getLocation(), D->getLocation(), cxxSelector, getContext().VoidTy, nullptr, D, /*isInstance=*/true, /*isVariadic=*/false, /*isPropertyAccessor=*/true, /*isImplicitlyDeclared=*/true, /*isDefined=*/false, ObjCMethodDecl::Required); D->addInstanceMethod(DTORMethod); CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, DTORMethod, false); D->setHasDestructors(true); } // If the implementation doesn't have any ivar initializers, we don't need // a .cxx_construct. if (D->getNumIvarInitializers() == 0 || AllTrivialInitializers(*this, D)) return; IdentifierInfo *II = &getContext().Idents.get(".cxx_construct"); Selector cxxSelector = getContext().Selectors.getSelector(0, &II); // The constructor returns 'self'. ObjCMethodDecl *CTORMethod = ObjCMethodDecl::Create(getContext(), D->getLocation(), D->getLocation(), cxxSelector, getContext().getObjCIdType(), nullptr, D, /*isInstance=*/true, /*isVariadic=*/false, /*isPropertyAccessor=*/true, /*isImplicitlyDeclared=*/true, /*isDefined=*/false, ObjCMethodDecl::Required); D->addInstanceMethod(CTORMethod); CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, CTORMethod, true); D->setHasNonZeroConstructors(true); } /// EmitNamespace - Emit all declarations in a namespace. void CodeGenModule::EmitNamespace(const NamespaceDecl *ND) { for (auto *I : ND->decls()) { if (const auto *VD = dyn_cast(I)) if (VD->getTemplateSpecializationKind() != TSK_ExplicitSpecialization && VD->getTemplateSpecializationKind() != TSK_Undeclared) continue; EmitTopLevelDecl(I); } } // EmitLinkageSpec - Emit all declarations in a linkage spec. void CodeGenModule::EmitLinkageSpec(const LinkageSpecDecl *LSD) { if (LSD->getLanguage() != LinkageSpecDecl::lang_c && LSD->getLanguage() != LinkageSpecDecl::lang_cxx) { ErrorUnsupported(LSD, "linkage spec"); return; } for (auto *I : LSD->decls()) { // Meta-data for ObjC class includes references to implemented methods. // Generate class's method definitions first. if (auto *OID = dyn_cast(I)) { for (auto *M : OID->methods()) EmitTopLevelDecl(M); } EmitTopLevelDecl(I); } } /// EmitTopLevelDecl - Emit code for a single top level declaration. void CodeGenModule::EmitTopLevelDecl(Decl *D) { // Ignore dependent declarations. if (D->getDeclContext() && D->getDeclContext()->isDependentContext()) return; switch (D->getKind()) { case Decl::CXXConversion: case Decl::CXXMethod: case Decl::Function: // Skip function templates if (cast(D)->getDescribedFunctionTemplate() || cast(D)->isLateTemplateParsed()) return; EmitGlobal(cast(D)); // Always provide some coverage mapping // even for the functions that aren't emitted. AddDeferredUnusedCoverageMapping(D); break; case Decl::Var: // Skip variable templates if (cast(D)->getDescribedVarTemplate()) return; case Decl::VarTemplateSpecialization: EmitGlobal(cast(D)); break; // Indirect fields from global anonymous structs and unions can be // ignored; only the actual variable requires IR gen support. case Decl::IndirectField: break; // C++ Decls case Decl::Namespace: EmitNamespace(cast(D)); break; // No code generation needed. case Decl::UsingShadow: case Decl::ClassTemplate: case Decl::VarTemplate: case Decl::VarTemplatePartialSpecialization: case Decl::FunctionTemplate: case Decl::TypeAliasTemplate: case Decl::Block: case Decl::Empty: break; case Decl::Using: // using X; [C++] if (CGDebugInfo *DI = getModuleDebugInfo()) DI->EmitUsingDecl(cast(*D)); return; case Decl::NamespaceAlias: if (CGDebugInfo *DI = getModuleDebugInfo()) DI->EmitNamespaceAlias(cast(*D)); return; case Decl::UsingDirective: // using namespace X; [C++] if (CGDebugInfo *DI = getModuleDebugInfo()) DI->EmitUsingDirective(cast(*D)); return; case Decl::CXXConstructor: // Skip function templates if (cast(D)->getDescribedFunctionTemplate() || cast(D)->isLateTemplateParsed()) return; getCXXABI().EmitCXXConstructors(cast(D)); break; case Decl::CXXDestructor: if (cast(D)->isLateTemplateParsed()) return; getCXXABI().EmitCXXDestructors(cast(D)); break; case Decl::StaticAssert: // Nothing to do. break; // Objective-C Decls // Forward declarations, no (immediate) code generation. case Decl::ObjCInterface: case Decl::ObjCCategory: break; case Decl::ObjCProtocol: { auto *Proto = cast(D); if (Proto->isThisDeclarationADefinition()) ObjCRuntime->GenerateProtocol(Proto); break; } case Decl::ObjCCategoryImpl: // Categories have properties but don't support synthesize so we // can ignore them here. ObjCRuntime->GenerateCategory(cast(D)); break; case Decl::ObjCImplementation: { auto *OMD = cast(D); EmitObjCPropertyImplementations(OMD); EmitObjCIvarInitializations(OMD); ObjCRuntime->GenerateClass(OMD); // Emit global variable debug information. if (CGDebugInfo *DI = getModuleDebugInfo()) if (getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo) DI->getOrCreateInterfaceType(getContext().getObjCInterfaceType( OMD->getClassInterface()), OMD->getLocation()); break; } case Decl::ObjCMethod: { auto *OMD = cast(D); // If this is not a prototype, emit the body. if (OMD->getBody()) CodeGenFunction(*this).GenerateObjCMethod(OMD); break; } case Decl::ObjCCompatibleAlias: ObjCRuntime->RegisterAlias(cast(D)); break; case Decl::LinkageSpec: EmitLinkageSpec(cast(D)); break; case Decl::FileScopeAsm: { // File-scope asm is ignored during device-side CUDA compilation. if (LangOpts.CUDA && LangOpts.CUDAIsDevice) break; auto *AD = cast(D); getModule().appendModuleInlineAsm(AD->getAsmString()->getString()); break; } case Decl::Import: { auto *Import = cast(D); // Ignore import declarations that come from imported modules. if (clang::Module *Owner = Import->getImportedOwningModule()) { if (getLangOpts().CurrentModule.empty() || Owner->getTopLevelModule()->Name == getLangOpts().CurrentModule) break; } if (CGDebugInfo *DI = getModuleDebugInfo()) DI->EmitImportDecl(*Import); ImportedModules.insert(Import->getImportedModule()); break; } case Decl::OMPThreadPrivate: EmitOMPThreadPrivateDecl(cast(D)); break; case Decl::ClassTemplateSpecialization: { const auto *Spec = cast(D); if (DebugInfo && Spec->getSpecializationKind() == TSK_ExplicitInstantiationDefinition && Spec->hasDefinition()) DebugInfo->completeTemplateDefinition(*Spec); break; } default: // Make sure we handled everything we should, every other kind is a // non-top-level decl. FIXME: Would be nice to have an isTopLevelDeclKind // function. Need to recode Decl::Kind to do that easily. assert(isa(D) && "Unsupported decl kind"); break; } } void CodeGenModule::AddDeferredUnusedCoverageMapping(Decl *D) { // Do we need to generate coverage mapping? if (!CodeGenOpts.CoverageMapping) return; switch (D->getKind()) { case Decl::CXXConversion: case Decl::CXXMethod: case Decl::Function: case Decl::ObjCMethod: case Decl::CXXConstructor: case Decl::CXXDestructor: { if (!cast(D)->hasBody()) return; auto I = DeferredEmptyCoverageMappingDecls.find(D); if (I == DeferredEmptyCoverageMappingDecls.end()) DeferredEmptyCoverageMappingDecls[D] = true; break; } default: break; }; } void CodeGenModule::ClearUnusedCoverageMapping(const Decl *D) { // Do we need to generate coverage mapping? if (!CodeGenOpts.CoverageMapping) return; if (const auto *Fn = dyn_cast(D)) { if (Fn->isTemplateInstantiation()) ClearUnusedCoverageMapping(Fn->getTemplateInstantiationPattern()); } auto I = DeferredEmptyCoverageMappingDecls.find(D); if (I == DeferredEmptyCoverageMappingDecls.end()) DeferredEmptyCoverageMappingDecls[D] = false; else I->second = false; } void CodeGenModule::EmitDeferredUnusedCoverageMappings() { std::vector DeferredDecls; for (const auto &I : DeferredEmptyCoverageMappingDecls) { if (!I.second) continue; DeferredDecls.push_back(I.first); } // Sort the declarations by their location to make sure that the tests get a // predictable order for the coverage mapping for the unused declarations. if (CodeGenOpts.DumpCoverageMapping) std::sort(DeferredDecls.begin(), DeferredDecls.end(), [] (const Decl *LHS, const Decl *RHS) { return LHS->getLocStart() < RHS->getLocStart(); }); for (const auto *D : DeferredDecls) { switch (D->getKind()) { case Decl::CXXConversion: case Decl::CXXMethod: case Decl::Function: case Decl::ObjCMethod: { CodeGenPGO PGO(*this); GlobalDecl GD(cast(D)); PGO.emitEmptyCounterMapping(D, getMangledName(GD), getFunctionLinkage(GD)); break; } case Decl::CXXConstructor: { CodeGenPGO PGO(*this); GlobalDecl GD(cast(D), Ctor_Base); PGO.emitEmptyCounterMapping(D, getMangledName(GD), getFunctionLinkage(GD)); break; } case Decl::CXXDestructor: { CodeGenPGO PGO(*this); GlobalDecl GD(cast(D), Dtor_Base); PGO.emitEmptyCounterMapping(D, getMangledName(GD), getFunctionLinkage(GD)); break; } default: break; }; } } /// Turns the given pointer into a constant. static llvm::Constant *GetPointerConstant(llvm::LLVMContext &Context, const void *Ptr) { uintptr_t PtrInt = reinterpret_cast(Ptr); llvm::Type *i64 = llvm::Type::getInt64Ty(Context); return llvm::ConstantInt::get(i64, PtrInt); } static void EmitGlobalDeclMetadata(CodeGenModule &CGM, llvm::NamedMDNode *&GlobalMetadata, GlobalDecl D, llvm::GlobalValue *Addr) { if (!GlobalMetadata) GlobalMetadata = CGM.getModule().getOrInsertNamedMetadata("clang.global.decl.ptrs"); // TODO: should we report variant information for ctors/dtors? llvm::Metadata *Ops[] = {llvm::ConstantAsMetadata::get(Addr), llvm::ConstantAsMetadata::get(GetPointerConstant( CGM.getLLVMContext(), D.getDecl()))}; GlobalMetadata->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops)); } /// For each function which is declared within an extern "C" region and marked /// as 'used', but has internal linkage, create an alias from the unmangled /// name to the mangled name if possible. People expect to be able to refer /// to such functions with an unmangled name from inline assembly within the /// same translation unit. void CodeGenModule::EmitStaticExternCAliases() { for (auto &I : StaticExternCValues) { IdentifierInfo *Name = I.first; llvm::GlobalValue *Val = I.second; if (Val && !getModule().getNamedValue(Name->getName())) addUsedGlobal(llvm::GlobalAlias::create(Name->getName(), Val)); } } bool CodeGenModule::lookupRepresentativeDecl(StringRef MangledName, GlobalDecl &Result) const { auto Res = Manglings.find(MangledName); if (Res == Manglings.end()) return false; Result = Res->getValue(); return true; } /// Emits metadata nodes associating all the global values in the /// current module with the Decls they came from. This is useful for /// projects using IR gen as a subroutine. /// /// Since there's currently no way to associate an MDNode directly /// with an llvm::GlobalValue, we create a global named metadata /// with the name 'clang.global.decl.ptrs'. void CodeGenModule::EmitDeclMetadata() { llvm::NamedMDNode *GlobalMetadata = nullptr; // StaticLocalDeclMap for (auto &I : MangledDeclNames) { llvm::GlobalValue *Addr = getModule().getNamedValue(I.second); EmitGlobalDeclMetadata(*this, GlobalMetadata, I.first, Addr); } } /// Emits metadata nodes for all the local variables in the current /// function. void CodeGenFunction::EmitDeclMetadata() { if (LocalDeclMap.empty()) return; llvm::LLVMContext &Context = getLLVMContext(); // Find the unique metadata ID for this name. unsigned DeclPtrKind = Context.getMDKindID("clang.decl.ptr"); llvm::NamedMDNode *GlobalMetadata = nullptr; for (auto &I : LocalDeclMap) { const Decl *D = I.first; llvm::Value *Addr = I.second; if (auto *Alloca = dyn_cast(Addr)) { llvm::Value *DAddr = GetPointerConstant(getLLVMContext(), D); Alloca->setMetadata( DeclPtrKind, llvm::MDNode::get( Context, llvm::ValueAsMetadata::getConstant(DAddr))); } else if (auto *GV = dyn_cast(Addr)) { GlobalDecl GD = GlobalDecl(cast(D)); EmitGlobalDeclMetadata(CGM, GlobalMetadata, GD, GV); } } } void CodeGenModule::EmitVersionIdentMetadata() { llvm::NamedMDNode *IdentMetadata = TheModule.getOrInsertNamedMetadata("llvm.ident"); std::string Version = getClangFullVersion(); llvm::LLVMContext &Ctx = TheModule.getContext(); llvm::Metadata *IdentNode[] = {llvm::MDString::get(Ctx, Version)}; IdentMetadata->addOperand(llvm::MDNode::get(Ctx, IdentNode)); } void CodeGenModule::EmitTargetMetadata() { // Warning, new MangledDeclNames may be appended within this loop. // We rely on MapVector insertions adding new elements to the end // of the container. // FIXME: Move this loop into the one target that needs it, and only // loop over those declarations for which we couldn't emit the target // metadata when we emitted the declaration. for (unsigned I = 0; I != MangledDeclNames.size(); ++I) { auto Val = *(MangledDeclNames.begin() + I); const Decl *D = Val.first.getDecl()->getMostRecentDecl(); llvm::GlobalValue *GV = GetGlobalValue(Val.second); getTargetCodeGenInfo().emitTargetMD(D, GV, *this); } } void CodeGenModule::EmitCoverageFile() { if (!getCodeGenOpts().CoverageFile.empty()) { if (llvm::NamedMDNode *CUNode = TheModule.getNamedMetadata("llvm.dbg.cu")) { llvm::NamedMDNode *GCov = TheModule.getOrInsertNamedMetadata("llvm.gcov"); llvm::LLVMContext &Ctx = TheModule.getContext(); llvm::MDString *CoverageFile = llvm::MDString::get(Ctx, getCodeGenOpts().CoverageFile); for (int i = 0, e = CUNode->getNumOperands(); i != e; ++i) { llvm::MDNode *CU = CUNode->getOperand(i); llvm::Metadata *Elts[] = {CoverageFile, CU}; GCov->addOperand(llvm::MDNode::get(Ctx, Elts)); } } } } llvm::Constant *CodeGenModule::EmitUuidofInitializer(StringRef Uuid) { // Sema has checked that all uuid strings are of the form // "12345678-1234-1234-1234-1234567890ab". assert(Uuid.size() == 36); for (unsigned i = 0; i < 36; ++i) { if (i == 8 || i == 13 || i == 18 || i == 23) assert(Uuid[i] == '-'); else assert(isHexDigit(Uuid[i])); } // The starts of all bytes of Field3 in Uuid. Field 3 is "1234-1234567890ab". const unsigned Field3ValueOffsets[8] = { 19, 21, 24, 26, 28, 30, 32, 34 }; llvm::Constant *Field3[8]; for (unsigned Idx = 0; Idx < 8; ++Idx) Field3[Idx] = llvm::ConstantInt::get( Int8Ty, Uuid.substr(Field3ValueOffsets[Idx], 2), 16); llvm::Constant *Fields[4] = { llvm::ConstantInt::get(Int32Ty, Uuid.substr(0, 8), 16), llvm::ConstantInt::get(Int16Ty, Uuid.substr(9, 4), 16), llvm::ConstantInt::get(Int16Ty, Uuid.substr(14, 4), 16), llvm::ConstantArray::get(llvm::ArrayType::get(Int8Ty, 8), Field3) }; return llvm::ConstantStruct::getAnon(Fields); } llvm::Constant * CodeGenModule::getAddrOfCXXCatchHandlerType(QualType Ty, QualType CatchHandlerType) { return getCXXABI().getAddrOfCXXCatchHandlerType(Ty, CatchHandlerType); } llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty, bool ForEH) { // Return a bogus pointer if RTTI is disabled, unless it's for EH. // FIXME: should we even be calling this method if RTTI is disabled // and it's not for EH? if (!ForEH && !getLangOpts().RTTI) return llvm::Constant::getNullValue(Int8PtrTy); if (ForEH && Ty->isObjCObjectPointerType() && LangOpts.ObjCRuntime.isGNUFamily()) return ObjCRuntime->GetEHType(Ty); return getCXXABI().getAddrOfRTTIDescriptor(Ty); } void CodeGenModule::EmitOMPThreadPrivateDecl(const OMPThreadPrivateDecl *D) { for (auto RefExpr : D->varlists()) { auto *VD = cast(cast(RefExpr)->getDecl()); bool PerformInit = VD->getAnyInitializer() && !VD->getAnyInitializer()->isConstantInitializer(getContext(), /*ForRef=*/false); if (auto InitFunction = getOpenMPRuntime().emitThreadPrivateVarDefinition( VD, GetAddrOfGlobalVar(VD), RefExpr->getLocStart(), PerformInit)) CXXGlobalInits.push_back(InitFunction); } } llvm::MDTuple *CodeGenModule::CreateVTableBitSetEntry( llvm::GlobalVariable *VTable, CharUnits Offset, const CXXRecordDecl *RD) { std::string OutName; llvm::raw_string_ostream Out(OutName); getCXXABI().getMangleContext().mangleCXXVTableBitSet(RD, Out); llvm::Metadata *BitsetOps[] = { llvm::MDString::get(getLLVMContext(), Out.str()), llvm::ConstantAsMetadata::get(VTable), llvm::ConstantAsMetadata::get( llvm::ConstantInt::get(Int64Ty, Offset.getQuantity()))}; return llvm::MDTuple::get(getLLVMContext(), BitsetOps); } Index: vendor/clang/dist/lib/CodeGen/TargetInfo.cpp =================================================================== --- vendor/clang/dist/lib/CodeGen/TargetInfo.cpp (revision 292727) +++ vendor/clang/dist/lib/CodeGen/TargetInfo.cpp (revision 292728) @@ -1,7242 +1,7249 @@ //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // These classes wrap the information about a call or function // definition used to handle ABI compliancy. // //===----------------------------------------------------------------------===// #include "TargetInfo.h" #include "ABIInfo.h" #include "CGCXXABI.h" #include "CGValue.h" #include "CodeGenFunction.h" #include "clang/AST/RecordLayout.h" #include "clang/CodeGen/CGFunctionInfo.h" #include "clang/Frontend/CodeGenOptions.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/Triple.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Type.h" #include "llvm/Support/raw_ostream.h" #include // std::sort using namespace clang; using namespace CodeGen; static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, llvm::Value *Array, llvm::Value *Value, unsigned FirstIndex, unsigned LastIndex) { // Alternatively, we could emit this as a loop in the source. for (unsigned I = FirstIndex; I <= LastIndex; ++I) { llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I); Builder.CreateStore(Value, Cell); } } static bool isAggregateTypeForABI(QualType T) { return !CodeGenFunction::hasScalarEvaluationKind(T) || T->isMemberFunctionPointerType(); } ABIInfo::~ABIInfo() {} static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI) { const CXXRecordDecl *RD = dyn_cast(RT->getDecl()); if (!RD) return CGCXXABI::RAA_Default; return CXXABI.getRecordArgABI(RD); } static CGCXXABI::RecordArgABI getRecordArgABI(QualType T, CGCXXABI &CXXABI) { const RecordType *RT = T->getAs(); if (!RT) return CGCXXABI::RAA_Default; return getRecordArgABI(RT, CXXABI); } /// Pass transparent unions as if they were the type of the first element. Sema /// should ensure that all elements of the union have the same "machine type". static QualType useFirstFieldIfTransparentUnion(QualType Ty) { if (const RecordType *UT = Ty->getAsUnionType()) { const RecordDecl *UD = UT->getDecl(); if (UD->hasAttr()) { assert(!UD->field_empty() && "sema created an empty transparent union"); return UD->field_begin()->getType(); } } return Ty; } CGCXXABI &ABIInfo::getCXXABI() const { return CGT.getCXXABI(); } ASTContext &ABIInfo::getContext() const { return CGT.getContext(); } llvm::LLVMContext &ABIInfo::getVMContext() const { return CGT.getLLVMContext(); } const llvm::DataLayout &ABIInfo::getDataLayout() const { return CGT.getDataLayout(); } const TargetInfo &ABIInfo::getTarget() const { return CGT.getTarget(); } bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { return false; } bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, uint64_t Members) const { return false; } bool ABIInfo::shouldSignExtUnsignedType(QualType Ty) const { return false; } void ABIArgInfo::dump() const { raw_ostream &OS = llvm::errs(); OS << "(ABIArgInfo Kind="; switch (TheKind) { case Direct: OS << "Direct Type="; if (llvm::Type *Ty = getCoerceToType()) Ty->print(OS); else OS << "null"; break; case Extend: OS << "Extend"; break; case Ignore: OS << "Ignore"; break; case InAlloca: OS << "InAlloca Offset=" << getInAllocaFieldIndex(); break; case Indirect: OS << "Indirect Align=" << getIndirectAlign() << " ByVal=" << getIndirectByVal() << " Realign=" << getIndirectRealign(); break; case Expand: OS << "Expand"; break; } OS << ")\n"; } TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; } // If someone can figure out a general rule for this, that would be great. // It's probably just doomed to be platform-dependent, though. unsigned TargetCodeGenInfo::getSizeOfUnwindException() const { // Verified for: // x86-64 FreeBSD, Linux, Darwin // x86-32 FreeBSD, Linux, Darwin // PowerPC Linux, Darwin // ARM Darwin (*not* EABI) // AArch64 Linux return 32; } bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args, const FunctionNoProtoType *fnType) const { // The following conventions are known to require this to be false: // x86_stdcall // MIPS // For everything else, we just prefer false unless we opt out. return false; } void TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib, llvm::SmallString<24> &Opt) const { // This assumes the user is passing a library name like "rt" instead of a // filename like "librt.a/so", and that they don't care whether it's static or // dynamic. Opt = "-l"; Opt += Lib; } static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays); /// isEmptyField - Return true iff a the field is "empty", that is it /// is an unnamed bit-field or an (array of) empty record(s). static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, bool AllowArrays) { if (FD->isUnnamedBitfield()) return true; QualType FT = FD->getType(); // Constant arrays of empty records count as empty, strip them off. // Constant arrays of zero length always count as empty. if (AllowArrays) while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { if (AT->getSize() == 0) return true; FT = AT->getElementType(); } const RecordType *RT = FT->getAs(); if (!RT) return false; // C++ record fields are never empty, at least in the Itanium ABI. // // FIXME: We should use a predicate for whether this behavior is true in the // current ABI. if (isa(RT->getDecl())) return false; return isEmptyRecord(Context, FT, AllowArrays); } /// isEmptyRecord - Return true iff a structure contains only empty /// fields. Note that a structure with a flexible array member is not /// considered empty. static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) { const RecordType *RT = T->getAs(); if (!RT) return 0; const RecordDecl *RD = RT->getDecl(); if (RD->hasFlexibleArrayMember()) return false; // If this is a C++ record, check the bases first. if (const CXXRecordDecl *CXXRD = dyn_cast(RD)) for (const auto &I : CXXRD->bases()) if (!isEmptyRecord(Context, I.getType(), true)) return false; for (const auto *I : RD->fields()) if (!isEmptyField(Context, I, AllowArrays)) return false; return true; } /// isSingleElementStruct - Determine if a structure is a "single /// element struct", i.e. it has exactly one non-empty field or /// exactly one field which is itself a single element /// struct. Structures with flexible array members are never /// considered single element structs. /// /// \return The field declaration for the single non-empty field, if /// it exists. static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { const RecordType *RT = T->getAs(); if (!RT) return nullptr; const RecordDecl *RD = RT->getDecl(); if (RD->hasFlexibleArrayMember()) return nullptr; const Type *Found = nullptr; // If this is a C++ record, check the bases first. if (const CXXRecordDecl *CXXRD = dyn_cast(RD)) { for (const auto &I : CXXRD->bases()) { // Ignore empty records. if (isEmptyRecord(Context, I.getType(), true)) continue; // If we already found an element then this isn't a single-element struct. if (Found) return nullptr; // If this is non-empty and not a single element struct, the composite // cannot be a single element struct. Found = isSingleElementStruct(I.getType(), Context); if (!Found) return nullptr; } } // Check for single element. for (const auto *FD : RD->fields()) { QualType FT = FD->getType(); // Ignore empty fields. if (isEmptyField(Context, FD, true)) continue; // If we already found an element then this isn't a single-element // struct. if (Found) return nullptr; // Treat single element arrays as the element. while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { if (AT->getSize().getZExtValue() != 1) break; FT = AT->getElementType(); } if (!isAggregateTypeForABI(FT)) { Found = FT.getTypePtr(); } else { Found = isSingleElementStruct(FT, Context); if (!Found) return nullptr; } } // We don't consider a struct a single-element struct if it has // padding beyond the element type. if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T)) return nullptr; return Found; } static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { // Treat complex types as the element type. if (const ComplexType *CTy = Ty->getAs()) Ty = CTy->getElementType(); // Check for a type which we know has a simple scalar argument-passing // convention without any padding. (We're specifically looking for 32 // and 64-bit integer and integer-equivalents, float, and double.) if (!Ty->getAs() && !Ty->hasPointerRepresentation() && !Ty->isEnumeralType() && !Ty->isBlockPointerType()) return false; uint64_t Size = Context.getTypeSize(Ty); return Size == 32 || Size == 64; } /// canExpandIndirectArgument - Test whether an argument type which is to be /// passed indirectly (on the stack) would have the equivalent layout if it was /// expanded into separate arguments. If so, we prefer to do the latter to avoid /// inhibiting optimizations. /// // FIXME: This predicate is missing many cases, currently it just follows // llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We // should probably make this smarter, or better yet make the LLVM backend // capable of handling it. static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) { // We can only expand structure types. const RecordType *RT = Ty->getAs(); if (!RT) return false; // We can only expand (C) structures. // // FIXME: This needs to be generalized to handle classes as well. const RecordDecl *RD = RT->getDecl(); if (!RD->isStruct()) return false; // We try to expand CLike CXXRecordDecl. if (const CXXRecordDecl *CXXRD = dyn_cast(RD)) { if (!CXXRD->isCLike()) return false; } uint64_t Size = 0; for (const auto *FD : RD->fields()) { if (!is32Or64BitBasicType(FD->getType(), Context)) return false; // FIXME: Reject bit-fields wholesale; there are two problems, we don't know // how to expand them yet, and the predicate for telling if a bitfield still // counts as "basic" is more complicated than what we were doing previously. if (FD->isBitField()) return false; Size += Context.getTypeSize(FD->getType()); } // Make sure there are not any holes in the struct. if (Size != Context.getTypeSize(Ty)) return false; return true; } namespace { /// DefaultABIInfo - The default implementation for ABI specific /// details. This implementation provides information which results in /// self-consistent and sensible LLVM IR generation, but does not /// conform to any particular ABI. class DefaultABIInfo : public ABIInfo { public: DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} ABIArgInfo classifyReturnType(QualType RetTy) const; ABIArgInfo classifyArgumentType(QualType RetTy) const; void computeInfo(CGFunctionInfo &FI) const override { if (!getCXXABI().classifyReturnType(FI)) FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); for (auto &I : FI.arguments()) I.info = classifyArgumentType(I.type); } llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const override; }; class DefaultTargetCodeGenInfo : public TargetCodeGenInfo { public: DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} }; llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const { return nullptr; } ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { Ty = useFirstFieldIfTransparentUnion(Ty); if (isAggregateTypeForABI(Ty)) { // Records with non-trivial destructors/copy-constructors should not be // passed by value. if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); return ABIArgInfo::getIndirect(0); } // Treat an enum type as its underlying type. if (const EnumType *EnumTy = Ty->getAs()) Ty = EnumTy->getDecl()->getIntegerType(); return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); } ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const { if (RetTy->isVoidType()) return ABIArgInfo::getIgnore(); if (isAggregateTypeForABI(RetTy)) return ABIArgInfo::getIndirect(0); // Treat an enum type as its underlying type. if (const EnumType *EnumTy = RetTy->getAs()) RetTy = EnumTy->getDecl()->getIntegerType(); return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); } //===----------------------------------------------------------------------===// // le32/PNaCl bitcode ABI Implementation // // This is a simplified version of the x86_32 ABI. Arguments and return values // are always passed on the stack. //===----------------------------------------------------------------------===// class PNaClABIInfo : public ABIInfo { public: PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} ABIArgInfo classifyReturnType(QualType RetTy) const; ABIArgInfo classifyArgumentType(QualType RetTy) const; void computeInfo(CGFunctionInfo &FI) const override; llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const override; }; class PNaClTargetCodeGenInfo : public TargetCodeGenInfo { public: PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {} }; void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const { if (!getCXXABI().classifyReturnType(FI)) FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); for (auto &I : FI.arguments()) I.info = classifyArgumentType(I.type); } llvm::Value *PNaClABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const { return nullptr; } /// \brief Classify argument of given type \p Ty. ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const { if (isAggregateTypeForABI(Ty)) { if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); return ABIArgInfo::getIndirect(0); } else if (const EnumType *EnumTy = Ty->getAs()) { // Treat an enum type as its underlying type. Ty = EnumTy->getDecl()->getIntegerType(); } else if (Ty->isFloatingType()) { // Floating-point types don't go inreg. return ABIArgInfo::getDirect(); } return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); } ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const { if (RetTy->isVoidType()) return ABIArgInfo::getIgnore(); // In the PNaCl ABI we always return records/structures on the stack. if (isAggregateTypeForABI(RetTy)) return ABIArgInfo::getIndirect(0); // Treat an enum type as its underlying type. if (const EnumType *EnumTy = RetTy->getAs()) RetTy = EnumTy->getDecl()->getIntegerType(); return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); } /// IsX86_MMXType - Return true if this is an MMX type. bool IsX86_MMXType(llvm::Type *IRType) { // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>. return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 && cast(IRType)->getElementType()->isIntegerTy() && IRType->getScalarSizeInBits() != 64; } static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF, StringRef Constraint, llvm::Type* Ty) { if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) { if (cast(Ty)->getBitWidth() != 64) { // Invalid MMX constraint return nullptr; } return llvm::Type::getX86_MMXTy(CGF.getLLVMContext()); } // No operation needed return Ty; } /// Returns true if this type can be passed in SSE registers with the /// X86_VectorCall calling convention. Shared between x86_32 and x86_64. static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) { if (const BuiltinType *BT = Ty->getAs()) { if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) return true; } else if (const VectorType *VT = Ty->getAs()) { // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX // registers specially. unsigned VecSize = Context.getTypeSize(VT); if (VecSize == 128 || VecSize == 256 || VecSize == 512) return true; } return false; } /// Returns true if this aggregate is small enough to be passed in SSE registers /// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64. static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) { return NumMembers <= 4; } //===----------------------------------------------------------------------===// // X86-32 ABI Implementation //===----------------------------------------------------------------------===// /// \brief Similar to llvm::CCState, but for Clang. struct CCState { CCState(unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {} unsigned CC; unsigned FreeRegs; unsigned FreeSSERegs; }; /// X86_32ABIInfo - The X86-32 ABI information. class X86_32ABIInfo : public ABIInfo { enum Class { Integer, Float }; static const unsigned MinABIStackAlignInBytes = 4; bool IsDarwinVectorABI; bool IsSmallStructInRegABI; bool IsWin32StructABI; unsigned DefaultNumRegisterParameters; static bool isRegisterSize(unsigned Size) { return (Size == 8 || Size == 16 || Size == 32 || Size == 64); } bool isHomogeneousAggregateBaseType(QualType Ty) const override { // FIXME: Assumes vectorcall is in use. return isX86VectorTypeForVectorCall(getContext(), Ty); } bool isHomogeneousAggregateSmallEnough(const Type *Ty, uint64_t NumMembers) const override { // FIXME: Assumes vectorcall is in use. return isX86VectorCallAggregateSmallEnough(NumMembers); } bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const; /// getIndirectResult - Give a source type \arg Ty, return a suitable result /// such that the argument will be passed in memory. ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const; ABIArgInfo getIndirectReturnResult(CCState &State) const; /// \brief Return the alignment to use for the given type on the stack. unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const; Class classify(QualType Ty) const; ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const; ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const; bool shouldUseInReg(QualType Ty, CCState &State, bool &NeedsPadding) const; /// \brief Rewrite the function info so that all memory arguments use /// inalloca. void rewriteWithInAlloca(CGFunctionInfo &FI) const; void addFieldToArgStruct(SmallVector &FrameFields, unsigned &StackOffset, ABIArgInfo &Info, QualType Type) const; public: void computeInfo(CGFunctionInfo &FI) const override; llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const override; X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool w, unsigned r) : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p), IsWin32StructABI(w), DefaultNumRegisterParameters(r) {} }; class X86_32TargetCodeGenInfo : public TargetCodeGenInfo { public: X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool w, unsigned r) :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, w, r)) {} static bool isStructReturnInRegABI( const llvm::Triple &Triple, const CodeGenOptions &Opts); void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const override; int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { // Darwin uses different dwarf register numbers for EH. if (CGM.getTarget().getTriple().isOSDarwin()) return 5; return 4; } bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const override; llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, StringRef Constraint, llvm::Type* Ty) const override { return X86AdjustInlineAsmType(CGF, Constraint, Ty); } void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue, std::string &Constraints, std::vector &ResultRegTypes, std::vector &ResultTruncRegTypes, std::vector &ResultRegDests, std::string &AsmString, unsigned NumOutputs) const override; llvm::Constant * getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override { unsigned Sig = (0xeb << 0) | // jmp rel8 (0x06 << 8) | // .+0x08 ('F' << 16) | ('T' << 24); return llvm::ConstantInt::get(CGM.Int32Ty, Sig); } }; } /// Rewrite input constraint references after adding some output constraints. /// In the case where there is one output and one input and we add one output, /// we need to replace all operand references greater than or equal to 1: /// mov $0, $1 /// mov eax, $1 /// The result will be: /// mov $0, $2 /// mov eax, $2 static void rewriteInputConstraintReferences(unsigned FirstIn, unsigned NumNewOuts, std::string &AsmString) { std::string Buf; llvm::raw_string_ostream OS(Buf); size_t Pos = 0; while (Pos < AsmString.size()) { size_t DollarStart = AsmString.find('$', Pos); if (DollarStart == std::string::npos) DollarStart = AsmString.size(); size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart); if (DollarEnd == std::string::npos) DollarEnd = AsmString.size(); OS << StringRef(&AsmString[Pos], DollarEnd - Pos); Pos = DollarEnd; size_t NumDollars = DollarEnd - DollarStart; if (NumDollars % 2 != 0 && Pos < AsmString.size()) { // We have an operand reference. size_t DigitStart = Pos; size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart); if (DigitEnd == std::string::npos) DigitEnd = AsmString.size(); StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart); unsigned OperandIndex; if (!OperandStr.getAsInteger(10, OperandIndex)) { if (OperandIndex >= FirstIn) OperandIndex += NumNewOuts; OS << OperandIndex; } else { OS << OperandStr; } Pos = DigitEnd; } } AsmString = std::move(OS.str()); } /// Add output constraints for EAX:EDX because they are return registers. void X86_32TargetCodeGenInfo::addReturnRegisterOutputs( CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints, std::vector &ResultRegTypes, std::vector &ResultTruncRegTypes, std::vector &ResultRegDests, std::string &AsmString, unsigned NumOutputs) const { uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType()); // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is // larger. if (!Constraints.empty()) Constraints += ','; if (RetWidth <= 32) { Constraints += "={eax}"; ResultRegTypes.push_back(CGF.Int32Ty); } else { // Use the 'A' constraint for EAX:EDX. Constraints += "=A"; ResultRegTypes.push_back(CGF.Int64Ty); } // Truncate EAX or EAX:EDX to an integer of the appropriate size. llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth); ResultTruncRegTypes.push_back(CoerceTy); // Coerce the integer by bitcasting the return slot pointer. ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(), CoerceTy->getPointerTo())); ResultRegDests.push_back(ReturnSlot); rewriteInputConstraintReferences(NumOutputs, 1, AsmString); } /// shouldReturnTypeInRegister - Determine if the given type should be /// passed in a register (for the Darwin ABI). bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const { uint64_t Size = Context.getTypeSize(Ty); // Type must be register sized. if (!isRegisterSize(Size)) return false; if (Ty->isVectorType()) { // 64- and 128- bit vectors inside structures are not returned in // registers. if (Size == 64 || Size == 128) return false; return true; } // If this is a builtin, pointer, enum, complex type, member pointer, or // member function pointer it is ok. if (Ty->getAs() || Ty->hasPointerRepresentation() || Ty->isAnyComplexType() || Ty->isEnumeralType() || Ty->isBlockPointerType() || Ty->isMemberPointerType()) return true; // Arrays are treated like records. if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) return shouldReturnTypeInRegister(AT->getElementType(), Context); // Otherwise, it must be a record type. const RecordType *RT = Ty->getAs(); if (!RT) return false; // FIXME: Traverse bases here too. // Structure types are passed in register if all fields would be // passed in a register. for (const auto *FD : RT->getDecl()->fields()) { // Empty fields are ignored. if (isEmptyField(Context, FD, true)) continue; // Check fields recursively. if (!shouldReturnTypeInRegister(FD->getType(), Context)) return false; } return true; } ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(CCState &State) const { // If the return value is indirect, then the hidden argument is consuming one // integer register. if (State.FreeRegs) { --State.FreeRegs; return ABIArgInfo::getIndirectInReg(/*Align=*/0, /*ByVal=*/false); } return ABIArgInfo::getIndirect(/*Align=*/0, /*ByVal=*/false); } ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, CCState &State) const { if (RetTy->isVoidType()) return ABIArgInfo::getIgnore(); const Type *Base = nullptr; uint64_t NumElts = 0; if (State.CC == llvm::CallingConv::X86_VectorCall && isHomogeneousAggregate(RetTy, Base, NumElts)) { // The LLVM struct type for such an aggregate should lower properly. return ABIArgInfo::getDirect(); } if (const VectorType *VT = RetTy->getAs()) { // On Darwin, some vectors are returned in registers. if (IsDarwinVectorABI) { uint64_t Size = getContext().getTypeSize(RetTy); // 128-bit vectors are a special case; they are returned in // registers and we need to make sure to pick a type the LLVM // backend will like. if (Size == 128) return ABIArgInfo::getDirect(llvm::VectorType::get( llvm::Type::getInt64Ty(getVMContext()), 2)); // Always return in register if it fits in a general purpose // register, or if it is 64 bits and has a single element. if ((Size == 8 || Size == 16 || Size == 32) || (Size == 64 && VT->getNumElements() == 1)) return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size)); return getIndirectReturnResult(State); } return ABIArgInfo::getDirect(); } if (isAggregateTypeForABI(RetTy)) { if (const RecordType *RT = RetTy->getAs()) { // Structures with flexible arrays are always indirect. if (RT->getDecl()->hasFlexibleArrayMember()) return getIndirectReturnResult(State); } // If specified, structs and unions are always indirect. if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType()) return getIndirectReturnResult(State); // Small structures which are register sized are generally returned // in a register. if (shouldReturnTypeInRegister(RetTy, getContext())) { uint64_t Size = getContext().getTypeSize(RetTy); // As a special-case, if the struct is a "single-element" struct, and // the field is of type "float" or "double", return it in a // floating-point register. (MSVC does not apply this special case.) // We apply a similar transformation for pointer types to improve the // quality of the generated IR. if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) if ((!IsWin32StructABI && SeltTy->isRealFloatingType()) || SeltTy->hasPointerRepresentation()) return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); // FIXME: We should be able to narrow this integer in cases with dead // padding. return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size)); } return getIndirectReturnResult(State); } // Treat an enum type as its underlying type. if (const EnumType *EnumTy = RetTy->getAs()) RetTy = EnumTy->getDecl()->getIntegerType(); return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); } static bool isSSEVectorType(ASTContext &Context, QualType Ty) { return Ty->getAs() && Context.getTypeSize(Ty) == 128; } static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) { const RecordType *RT = Ty->getAs(); if (!RT) return 0; const RecordDecl *RD = RT->getDecl(); // If this is a C++ record, check the bases first. if (const CXXRecordDecl *CXXRD = dyn_cast(RD)) for (const auto &I : CXXRD->bases()) if (!isRecordWithSSEVectorType(Context, I.getType())) return false; for (const auto *i : RD->fields()) { QualType FT = i->getType(); if (isSSEVectorType(Context, FT)) return true; if (isRecordWithSSEVectorType(Context, FT)) return true; } return false; } unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty, unsigned Align) const { // Otherwise, if the alignment is less than or equal to the minimum ABI // alignment, just use the default; the backend will handle this. if (Align <= MinABIStackAlignInBytes) return 0; // Use default alignment. // On non-Darwin, the stack type alignment is always 4. if (!IsDarwinVectorABI) { // Set explicit alignment, since we may need to realign the top. return MinABIStackAlignInBytes; } // Otherwise, if the type contains an SSE vector type, the alignment is 16. if (Align >= 16 && (isSSEVectorType(getContext(), Ty) || isRecordWithSSEVectorType(getContext(), Ty))) return 16; return MinABIStackAlignInBytes; } ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal, CCState &State) const { if (!ByVal) { if (State.FreeRegs) { --State.FreeRegs; // Non-byval indirects just use one pointer. return ABIArgInfo::getIndirectInReg(0, false); } return ABIArgInfo::getIndirect(0, false); } // Compute the byval alignment. unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign); if (StackAlign == 0) return ABIArgInfo::getIndirect(4, /*ByVal=*/true); // If the stack alignment is less than the type alignment, realign the // argument. bool Realign = TypeAlign > StackAlign; return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true, Realign); } X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const { const Type *T = isSingleElementStruct(Ty, getContext()); if (!T) T = Ty.getTypePtr(); if (const BuiltinType *BT = T->getAs()) { BuiltinType::Kind K = BT->getKind(); if (K == BuiltinType::Float || K == BuiltinType::Double) return Float; } return Integer; } bool X86_32ABIInfo::shouldUseInReg(QualType Ty, CCState &State, bool &NeedsPadding) const { NeedsPadding = false; Class C = classify(Ty); if (C == Float) return false; unsigned Size = getContext().getTypeSize(Ty); unsigned SizeInRegs = (Size + 31) / 32; if (SizeInRegs == 0) return false; if (SizeInRegs > State.FreeRegs) { State.FreeRegs = 0; return false; } State.FreeRegs -= SizeInRegs; if (State.CC == llvm::CallingConv::X86_FastCall || State.CC == llvm::CallingConv::X86_VectorCall) { if (Size > 32) return false; if (Ty->isIntegralOrEnumerationType()) return true; if (Ty->isPointerType()) return true; if (Ty->isReferenceType()) return true; if (State.FreeRegs) NeedsPadding = true; return false; } return true; } ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, CCState &State) const { // FIXME: Set alignment on indirect arguments. Ty = useFirstFieldIfTransparentUnion(Ty); // Check with the C++ ABI first. const RecordType *RT = Ty->getAs(); if (RT) { CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()); if (RAA == CGCXXABI::RAA_Indirect) { return getIndirectResult(Ty, false, State); } else if (RAA == CGCXXABI::RAA_DirectInMemory) { // The field index doesn't matter, we'll fix it up later. return ABIArgInfo::getInAlloca(/*FieldIndex=*/0); } } // vectorcall adds the concept of a homogenous vector aggregate, similar // to other targets. const Type *Base = nullptr; uint64_t NumElts = 0; if (State.CC == llvm::CallingConv::X86_VectorCall && isHomogeneousAggregate(Ty, Base, NumElts)) { if (State.FreeSSERegs >= NumElts) { State.FreeSSERegs -= NumElts; if (Ty->isBuiltinType() || Ty->isVectorType()) return ABIArgInfo::getDirect(); return ABIArgInfo::getExpand(); } return getIndirectResult(Ty, /*ByVal=*/false, State); } if (isAggregateTypeForABI(Ty)) { if (RT) { // Structs are always byval on win32, regardless of what they contain. if (IsWin32StructABI) return getIndirectResult(Ty, true, State); // Structures with flexible arrays are always indirect. if (RT->getDecl()->hasFlexibleArrayMember()) return getIndirectResult(Ty, true, State); } // Ignore empty structs/unions. if (isEmptyRecord(getContext(), Ty, true)) return ABIArgInfo::getIgnore(); llvm::LLVMContext &LLVMContext = getVMContext(); llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); bool NeedsPadding; if (shouldUseInReg(Ty, State, NeedsPadding)) { unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; SmallVector Elements(SizeInRegs, Int32); llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); return ABIArgInfo::getDirectInReg(Result); } llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr; // Expand small (<= 128-bit) record types when we know that the stack layout // of those arguments will match the struct. This is important because the // LLVM backend isn't smart enough to remove byval, which inhibits many // optimizations. if (getContext().getTypeSize(Ty) <= 4*32 && canExpandIndirectArgument(Ty, getContext())) return ABIArgInfo::getExpandWithPadding( State.CC == llvm::CallingConv::X86_FastCall || State.CC == llvm::CallingConv::X86_VectorCall, PaddingType); return getIndirectResult(Ty, true, State); } if (const VectorType *VT = Ty->getAs()) { // On Darwin, some vectors are passed in memory, we handle this by passing // it as an i8/i16/i32/i64. if (IsDarwinVectorABI) { uint64_t Size = getContext().getTypeSize(Ty); if ((Size == 8 || Size == 16 || Size == 32) || (Size == 64 && VT->getNumElements() == 1)) return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size)); } if (IsX86_MMXType(CGT.ConvertType(Ty))) return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64)); return ABIArgInfo::getDirect(); } if (const EnumType *EnumTy = Ty->getAs()) Ty = EnumTy->getDecl()->getIntegerType(); bool NeedsPadding; bool InReg = shouldUseInReg(Ty, State, NeedsPadding); if (Ty->isPromotableIntegerType()) { if (InReg) return ABIArgInfo::getExtendInReg(); return ABIArgInfo::getExtend(); } if (InReg) return ABIArgInfo::getDirectInReg(); return ABIArgInfo::getDirect(); } void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const { CCState State(FI.getCallingConvention()); if (State.CC == llvm::CallingConv::X86_FastCall) State.FreeRegs = 2; else if (State.CC == llvm::CallingConv::X86_VectorCall) { State.FreeRegs = 2; State.FreeSSERegs = 6; } else if (FI.getHasRegParm()) State.FreeRegs = FI.getRegParm(); else State.FreeRegs = DefaultNumRegisterParameters; if (!getCXXABI().classifyReturnType(FI)) { FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State); } else if (FI.getReturnInfo().isIndirect()) { // The C++ ABI is not aware of register usage, so we have to check if the // return value was sret and put it in a register ourselves if appropriate. if (State.FreeRegs) { --State.FreeRegs; // The sret parameter consumes a register. FI.getReturnInfo().setInReg(true); } } // The chain argument effectively gives us another free register. if (FI.isChainCall()) ++State.FreeRegs; bool UsedInAlloca = false; for (auto &I : FI.arguments()) { I.info = classifyArgumentType(I.type, State); UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca); } // If we needed to use inalloca for any argument, do a second pass and rewrite // all the memory arguments to use inalloca. if (UsedInAlloca) rewriteWithInAlloca(FI); } void X86_32ABIInfo::addFieldToArgStruct(SmallVector &FrameFields, unsigned &StackOffset, ABIArgInfo &Info, QualType Type) const { assert(StackOffset % 4U == 0 && "unaligned inalloca struct"); Info = ABIArgInfo::getInAlloca(FrameFields.size()); FrameFields.push_back(CGT.ConvertTypeForMem(Type)); StackOffset += getContext().getTypeSizeInChars(Type).getQuantity(); // Insert padding bytes to respect alignment. For x86_32, each argument is 4 // byte aligned. if (StackOffset % 4U) { unsigned OldOffset = StackOffset; StackOffset = llvm::RoundUpToAlignment(StackOffset, 4U); unsigned NumBytes = StackOffset - OldOffset; assert(NumBytes); llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext()); Ty = llvm::ArrayType::get(Ty, NumBytes); FrameFields.push_back(Ty); } } static bool isArgInAlloca(const ABIArgInfo &Info) { // Leave ignored and inreg arguments alone. switch (Info.getKind()) { case ABIArgInfo::InAlloca: return true; case ABIArgInfo::Indirect: assert(Info.getIndirectByVal()); return true; case ABIArgInfo::Ignore: return false; case ABIArgInfo::Direct: case ABIArgInfo::Extend: case ABIArgInfo::Expand: if (Info.getInReg()) return false; return true; } llvm_unreachable("invalid enum"); } void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const { assert(IsWin32StructABI && "inalloca only supported on win32"); // Build a packed struct type for all of the arguments in memory. SmallVector FrameFields; unsigned StackOffset = 0; CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end(); // Put 'this' into the struct before 'sret', if necessary. bool IsThisCall = FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall; ABIArgInfo &Ret = FI.getReturnInfo(); if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall && isArgInAlloca(I->info)) { addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type); ++I; } // Put the sret parameter into the inalloca struct if it's in memory. if (Ret.isIndirect() && !Ret.getInReg()) { CanQualType PtrTy = getContext().getPointerType(FI.getReturnType()); addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy); // On Windows, the hidden sret parameter is always returned in eax. Ret.setInAllocaSRet(IsWin32StructABI); } // Skip the 'this' parameter in ecx. if (IsThisCall) ++I; // Put arguments passed in memory into the struct. for (; I != E; ++I) { if (isArgInAlloca(I->info)) addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type); } FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields, /*isPacked=*/true)); } llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const { llvm::Type *BPP = CGF.Int8PtrPtrTy; CGBuilderTy &Builder = CGF.Builder; llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); // Compute if the address needs to be aligned unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity(); Align = getTypeStackAlignInBytes(Ty, Align); Align = std::max(Align, 4U); if (Align > 4) { // addr = (addr + align - 1) & -align; llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, Align - 1); Addr = CGF.Builder.CreateGEP(Addr, Offset); llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr, CGF.Int32Ty); llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align); Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), Addr->getType(), "ap.cur.aligned"); } llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); uint64_t Offset = llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align); llvm::Value *NextAddr = Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next"); Builder.CreateStore(NextAddr, VAListAddrAsBPP); return AddrTyped; } bool X86_32TargetCodeGenInfo::isStructReturnInRegABI( const llvm::Triple &Triple, const CodeGenOptions &Opts) { assert(Triple.getArch() == llvm::Triple::x86); switch (Opts.getStructReturnConvention()) { case CodeGenOptions::SRCK_Default: break; case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return return false; case CodeGenOptions::SRCK_InRegs: // -freg-struct-return return true; } if (Triple.isOSDarwin()) return true; switch (Triple.getOS()) { case llvm::Triple::DragonFly: case llvm::Triple::FreeBSD: case llvm::Triple::OpenBSD: case llvm::Triple::Bitrig: case llvm::Triple::Win32: return true; default: return false; } } void X86_32TargetCodeGenInfo::setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { if (const FunctionDecl *FD = dyn_cast(D)) { if (FD->hasAttr()) { // Get the LLVM function. llvm::Function *Fn = cast(GV); // Now add the 'alignstack' attribute with a value of 16. llvm::AttrBuilder B; B.addStackAlignmentAttr(16); Fn->addAttributes(llvm::AttributeSet::FunctionIndex, llvm::AttributeSet::get(CGM.getLLVMContext(), llvm::AttributeSet::FunctionIndex, B)); } } } bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const { CodeGen::CGBuilderTy &Builder = CGF.Builder; llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); // 0-7 are the eight integer registers; the order is different // on Darwin (for EH), but the range is the same. // 8 is %eip. AssignToArrayRange(Builder, Address, Four8, 0, 8); if (CGF.CGM.getTarget().getTriple().isOSDarwin()) { // 12-16 are st(0..4). Not sure why we stop at 4. // These have size 16, which is sizeof(long double) on // platforms with 8-byte alignment for that type. llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16); AssignToArrayRange(Builder, Address, Sixteen8, 12, 16); } else { // 9 is %eflags, which doesn't get a size on Darwin for some // reason. Builder.CreateStore( Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9)); // 11-16 are st(0..5). Not sure why we stop at 5. // These have size 12, which is sizeof(long double) on // platforms with 4-byte alignment for that type. llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12); AssignToArrayRange(Builder, Address, Twelve8, 11, 16); } return false; } //===----------------------------------------------------------------------===// // X86-64 ABI Implementation //===----------------------------------------------------------------------===// namespace { /// The AVX ABI level for X86 targets. enum class X86AVXABILevel { None, AVX, AVX512 }; /// \p returns the size in bits of the largest (native) vector for \p AVXLevel. static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) { switch (AVXLevel) { case X86AVXABILevel::AVX512: return 512; case X86AVXABILevel::AVX: return 256; case X86AVXABILevel::None: return 128; } llvm_unreachable("Unknown AVXLevel"); } /// X86_64ABIInfo - The X86_64 ABI information. class X86_64ABIInfo : public ABIInfo { enum Class { Integer = 0, SSE, SSEUp, X87, X87Up, ComplexX87, NoClass, Memory }; /// merge - Implement the X86_64 ABI merging algorithm. /// /// Merge an accumulating classification \arg Accum with a field /// classification \arg Field. /// /// \param Accum - The accumulating classification. This should /// always be either NoClass or the result of a previous merge /// call. In addition, this should never be Memory (the caller /// should just return Memory for the aggregate). static Class merge(Class Accum, Class Field); /// postMerge - Implement the X86_64 ABI post merging algorithm. /// /// Post merger cleanup, reduces a malformed Hi and Lo pair to /// final MEMORY or SSE classes when necessary. /// /// \param AggregateSize - The size of the current aggregate in /// the classification process. /// /// \param Lo - The classification for the parts of the type /// residing in the low word of the containing object. /// /// \param Hi - The classification for the parts of the type /// residing in the higher words of the containing object. /// void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const; /// classify - Determine the x86_64 register classes in which the /// given type T should be passed. /// /// \param Lo - The classification for the parts of the type /// residing in the low word of the containing object. /// /// \param Hi - The classification for the parts of the type /// residing in the high word of the containing object. /// /// \param OffsetBase - The bit offset of this type in the /// containing object. Some parameters are classified different /// depending on whether they straddle an eightbyte boundary. /// /// \param isNamedArg - Whether the argument in question is a "named" /// argument, as used in AMD64-ABI 3.5.7. /// /// If a word is unused its result will be NoClass; if a type should /// be passed in Memory then at least the classification of \arg Lo /// will be Memory. /// /// The \arg Lo class will be NoClass iff the argument is ignored. /// /// If the \arg Lo class is ComplexX87, then the \arg Hi class will /// also be ComplexX87. void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi, bool isNamedArg) const; llvm::Type *GetByteVectorType(QualType Ty) const; llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset, QualType SourceTy, unsigned SourceOffset) const; llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, QualType SourceTy, unsigned SourceOffset) const; /// getIndirectResult - Give a source type \arg Ty, return a suitable result /// such that the argument will be returned in memory. ABIArgInfo getIndirectReturnResult(QualType Ty) const; /// getIndirectResult - Give a source type \arg Ty, return a suitable result /// such that the argument will be passed in memory. /// /// \param freeIntRegs - The number of free integer registers remaining /// available. ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const; ABIArgInfo classifyReturnType(QualType RetTy) const; ABIArgInfo classifyArgumentType(QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE, bool isNamedArg) const; bool IsIllegalVectorType(QualType Ty) const; /// The 0.98 ABI revision clarified a lot of ambiguities, /// unfortunately in ways that were not always consistent with /// certain previous compilers. In particular, platforms which /// required strict binary compatibility with older versions of GCC /// may need to exempt themselves. bool honorsRevision0_98() const { return !getTarget().getTriple().isOSDarwin(); } X86AVXABILevel AVXLevel; // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on // 64-bit hardware. bool Has64BitPointers; public: X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) : ABIInfo(CGT), AVXLevel(AVXLevel), Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) { } bool isPassedUsingAVXType(QualType type) const { unsigned neededInt, neededSSE; // The freeIntRegs argument doesn't matter here. ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE, /*isNamedArg*/true); if (info.isDirect()) { llvm::Type *ty = info.getCoerceToType(); if (llvm::VectorType *vectorTy = dyn_cast_or_null(ty)) return (vectorTy->getBitWidth() > 128); } return false; } void computeInfo(CGFunctionInfo &FI) const override; llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const override; bool has64BitPointers() const { return Has64BitPointers; } }; /// WinX86_64ABIInfo - The Windows X86_64 ABI information. class WinX86_64ABIInfo : public ABIInfo { - - ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, - bool IsReturnType) const; - public: - WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} + WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) + : ABIInfo(CGT), + IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {} void computeInfo(CGFunctionInfo &FI) const override; llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const override; bool isHomogeneousAggregateBaseType(QualType Ty) const override { // FIXME: Assumes vectorcall is in use. return isX86VectorTypeForVectorCall(getContext(), Ty); } bool isHomogeneousAggregateSmallEnough(const Type *Ty, uint64_t NumMembers) const override { // FIXME: Assumes vectorcall is in use. return isX86VectorCallAggregateSmallEnough(NumMembers); } + +private: + ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, + bool IsReturnType) const; + + bool IsMingw64; }; class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { public: X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) : TargetCodeGenInfo(new X86_64ABIInfo(CGT, AVXLevel)) {} const X86_64ABIInfo &getABIInfo() const { return static_cast(TargetCodeGenInfo::getABIInfo()); } int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { return 7; } bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const override { llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); // 0-15 are the 16 integer registers. // 16 is %rip. AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); return false; } llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, StringRef Constraint, llvm::Type* Ty) const override { return X86AdjustInlineAsmType(CGF, Constraint, Ty); } bool isNoProtoCallVariadic(const CallArgList &args, const FunctionNoProtoType *fnType) const override { // The default CC on x86-64 sets %al to the number of SSA // registers used, and GCC sets this when calling an unprototyped // function, so we override the default behavior. However, don't do // that when AVX types are involved: the ABI explicitly states it is // undefined, and it doesn't work in practice because of how the ABI // defines varargs anyway. if (fnType->getCallConv() == CC_C) { bool HasAVXType = false; for (CallArgList::const_iterator it = args.begin(), ie = args.end(); it != ie; ++it) { if (getABIInfo().isPassedUsingAVXType(it->Ty)) { HasAVXType = true; break; } } if (!HasAVXType) return true; } return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType); } llvm::Constant * getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override { unsigned Sig; if (getABIInfo().has64BitPointers()) Sig = (0xeb << 0) | // jmp rel8 (0x0a << 8) | // .+0x0c ('F' << 16) | ('T' << 24); else Sig = (0xeb << 0) | // jmp rel8 (0x06 << 8) | // .+0x08 ('F' << 16) | ('T' << 24); return llvm::ConstantInt::get(CGM.Int32Ty, Sig); } }; class PS4TargetCodeGenInfo : public X86_64TargetCodeGenInfo { public: PS4TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) : X86_64TargetCodeGenInfo(CGT, AVXLevel) {} void getDependentLibraryOption(llvm::StringRef Lib, llvm::SmallString<24> &Opt) const override { Opt = "\01"; Opt += Lib; } }; static std::string qualifyWindowsLibrary(llvm::StringRef Lib) { // If the argument does not end in .lib, automatically add the suffix. // If the argument contains a space, enclose it in quotes. // This matches the behavior of MSVC. bool Quote = (Lib.find(" ") != StringRef::npos); std::string ArgStr = Quote ? "\"" : ""; ArgStr += Lib; if (!Lib.endswith_lower(".lib")) ArgStr += ".lib"; ArgStr += Quote ? "\"" : ""; return ArgStr; } class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo { public: WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool w, unsigned RegParms) : X86_32TargetCodeGenInfo(CGT, d, p, w, RegParms) {} void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const override; void getDependentLibraryOption(llvm::StringRef Lib, llvm::SmallString<24> &Opt) const override { Opt = "/DEFAULTLIB:"; Opt += qualifyWindowsLibrary(Lib); } void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value, llvm::SmallString<32> &Opt) const override { Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; } }; static void addStackProbeSizeTargetAttribute(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) { if (isa(D)) { if (CGM.getCodeGenOpts().StackProbeSize != 4096) { llvm::Function *Fn = cast(GV); Fn->addFnAttr("stack-probe-size", llvm::utostr(CGM.getCodeGenOpts().StackProbeSize)); } } } void WinX86_32TargetCodeGenInfo::setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM); addStackProbeSizeTargetAttribute(D, GV, CGM); } class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo { public: WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {} void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const override; int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { return 7; } bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const override { llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); // 0-15 are the 16 integer registers. // 16 is %rip. AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); return false; } void getDependentLibraryOption(llvm::StringRef Lib, llvm::SmallString<24> &Opt) const override { Opt = "/DEFAULTLIB:"; Opt += qualifyWindowsLibrary(Lib); } void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value, llvm::SmallString<32> &Opt) const override { Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; } }; void WinX86_64TargetCodeGenInfo::setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { TargetCodeGenInfo::setTargetAttributes(D, GV, CGM); addStackProbeSizeTargetAttribute(D, GV, CGM); } } void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const { // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: // // (a) If one of the classes is Memory, the whole argument is passed in // memory. // // (b) If X87UP is not preceded by X87, the whole argument is passed in // memory. // // (c) If the size of the aggregate exceeds two eightbytes and the first // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole // argument is passed in memory. NOTE: This is necessary to keep the // ABI working for processors that don't support the __m256 type. // // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE. // // Some of these are enforced by the merging logic. Others can arise // only with unions; for example: // union { _Complex double; unsigned; } // // Note that clauses (b) and (c) were added in 0.98. // if (Hi == Memory) Lo = Memory; if (Hi == X87Up && Lo != X87 && honorsRevision0_98()) Lo = Memory; if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp)) Lo = Memory; if (Hi == SSEUp && Lo != SSE) Hi = SSE; } X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is // classified recursively so that always two fields are // considered. The resulting class is calculated according to // the classes of the fields in the eightbyte: // // (a) If both classes are equal, this is the resulting class. // // (b) If one of the classes is NO_CLASS, the resulting class is // the other class. // // (c) If one of the classes is MEMORY, the result is the MEMORY // class. // // (d) If one of the classes is INTEGER, the result is the // INTEGER. // // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, // MEMORY is used as class. // // (f) Otherwise class SSE is used. // Accum should never be memory (we should have returned) or // ComplexX87 (because this cannot be passed in a structure). assert((Accum != Memory && Accum != ComplexX87) && "Invalid accumulated classification during merge."); if (Accum == Field || Field == NoClass) return Accum; if (Field == Memory) return Memory; if (Accum == NoClass) return Field; if (Accum == Integer || Field == Integer) return Integer; if (Field == X87 || Field == X87Up || Field == ComplexX87 || Accum == X87 || Accum == X87Up) return Memory; return SSE; } void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, bool isNamedArg) const { // FIXME: This code can be simplified by introducing a simple value class for // Class pairs with appropriate constructor methods for the various // situations. // FIXME: Some of the split computations are wrong; unaligned vectors // shouldn't be passed in registers for example, so there is no chance they // can straddle an eightbyte. Verify & simplify. Lo = Hi = NoClass; Class &Current = OffsetBase < 64 ? Lo : Hi; Current = Memory; if (const BuiltinType *BT = Ty->getAs()) { BuiltinType::Kind k = BT->getKind(); if (k == BuiltinType::Void) { Current = NoClass; } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { Lo = Integer; Hi = Integer; } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { Current = Integer; } else if (k == BuiltinType::Float || k == BuiltinType::Double) { Current = SSE; } else if (k == BuiltinType::LongDouble) { const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat(); if (LDF == &llvm::APFloat::IEEEquad) { Lo = SSE; Hi = SSEUp; } else if (LDF == &llvm::APFloat::x87DoubleExtended) { Lo = X87; Hi = X87Up; } else if (LDF == &llvm::APFloat::IEEEdouble) { Current = SSE; } else llvm_unreachable("unexpected long double representation!"); } // FIXME: _Decimal32 and _Decimal64 are SSE. // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). return; } if (const EnumType *ET = Ty->getAs()) { // Classify the underlying integer type. classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg); return; } if (Ty->hasPointerRepresentation()) { Current = Integer; return; } if (Ty->isMemberPointerType()) { if (Ty->isMemberFunctionPointerType()) { if (Has64BitPointers) { // If Has64BitPointers, this is an {i64, i64}, so classify both // Lo and Hi now. Lo = Hi = Integer; } else { // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that // straddles an eightbyte boundary, Hi should be classified as well. uint64_t EB_FuncPtr = (OffsetBase) / 64; uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64; if (EB_FuncPtr != EB_ThisAdj) { Lo = Hi = Integer; } else { Current = Integer; } } } else { Current = Integer; } return; } if (const VectorType *VT = Ty->getAs()) { uint64_t Size = getContext().getTypeSize(VT); if (Size == 32) { // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x // float> as integer. Current = Integer; // If this type crosses an eightbyte boundary, it should be // split. uint64_t EB_Real = (OffsetBase) / 64; uint64_t EB_Imag = (OffsetBase + Size - 1) / 64; if (EB_Real != EB_Imag) Hi = Lo; } else if (Size == 64) { // gcc passes <1 x double> in memory. :( if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) return; // gcc passes <1 x long long> as INTEGER. if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) || VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) || VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) || VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong)) Current = Integer; else Current = SSE; // If this type crosses an eightbyte boundary, it should be // split. if (OffsetBase && OffsetBase != 64) Hi = Lo; } else if (Size == 128 || (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) { // Arguments of 256-bits are split into four eightbyte chunks. The // least significant one belongs to class SSE and all the others to class // SSEUP. The original Lo and Hi design considers that types can't be // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense. // This design isn't correct for 256-bits, but since there're no cases // where the upper parts would need to be inspected, avoid adding // complexity and just consider Hi to match the 64-256 part. // // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in // registers if they are "named", i.e. not part of the "..." of a // variadic function. // // Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are // split into eight eightbyte chunks, one SSE and seven SSEUP. Lo = SSE; Hi = SSEUp; } return; } if (const ComplexType *CT = Ty->getAs()) { QualType ET = getContext().getCanonicalType(CT->getElementType()); uint64_t Size = getContext().getTypeSize(Ty); if (ET->isIntegralOrEnumerationType()) { if (Size <= 64) Current = Integer; else if (Size <= 128) Lo = Hi = Integer; } else if (ET == getContext().FloatTy) { Current = SSE; } else if (ET == getContext().DoubleTy) { Lo = Hi = SSE; } else if (ET == getContext().LongDoubleTy) { const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat(); if (LDF == &llvm::APFloat::IEEEquad) Current = Memory; else if (LDF == &llvm::APFloat::x87DoubleExtended) Current = ComplexX87; else if (LDF == &llvm::APFloat::IEEEdouble) Lo = Hi = SSE; else llvm_unreachable("unexpected long double representation!"); } // If this complex type crosses an eightbyte boundary then it // should be split. uint64_t EB_Real = (OffsetBase) / 64; uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64; if (Hi == NoClass && EB_Real != EB_Imag) Hi = Lo; return; } if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { // Arrays are treated like structures. uint64_t Size = getContext().getTypeSize(Ty); // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger // than four eightbytes, ..., it has class MEMORY. if (Size > 256) return; // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned // fields, it has class MEMORY. // // Only need to check alignment of array base. if (OffsetBase % getContext().getTypeAlign(AT->getElementType())) return; // Otherwise implement simplified merge. We could be smarter about // this, but it isn't worth it and would be harder to verify. Current = NoClass; uint64_t EltSize = getContext().getTypeSize(AT->getElementType()); uint64_t ArraySize = AT->getSize().getZExtValue(); // The only case a 256-bit wide vector could be used is when the array // contains a single 256-bit element. Since Lo and Hi logic isn't extended // to work for sizes wider than 128, early check and fallback to memory. if (Size > 128 && EltSize != 256) return; for (uint64_t i=0, Offset=OffsetBase; igetElementType(), Offset, FieldLo, FieldHi, isNamedArg); Lo = merge(Lo, FieldLo); Hi = merge(Hi, FieldHi); if (Lo == Memory || Hi == Memory) break; } postMerge(Size, Lo, Hi); assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); return; } if (const RecordType *RT = Ty->getAs()) { uint64_t Size = getContext().getTypeSize(Ty); // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger // than four eightbytes, ..., it has class MEMORY. if (Size > 256) return; // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial // copy constructor or a non-trivial destructor, it is passed by invisible // reference. if (getRecordArgABI(RT, getCXXABI())) return; const RecordDecl *RD = RT->getDecl(); // Assume variable sized types are passed in memory. if (RD->hasFlexibleArrayMember()) return; const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); // Reset Lo class, this will be recomputed. Current = NoClass; // If this is a C++ record, classify the bases first. if (const CXXRecordDecl *CXXRD = dyn_cast(RD)) { for (const auto &I : CXXRD->bases()) { assert(!I.isVirtual() && !I.getType()->isDependentType() && "Unexpected base class!"); const CXXRecordDecl *Base = cast(I.getType()->getAs()->getDecl()); // Classify this field. // // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a // single eightbyte, each is classified separately. Each eightbyte gets // initialized to class NO_CLASS. Class FieldLo, FieldHi; uint64_t Offset = OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base)); classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg); Lo = merge(Lo, FieldLo); Hi = merge(Hi, FieldHi); if (Lo == Memory || Hi == Memory) { postMerge(Size, Lo, Hi); return; } } } // Classify the fields one at a time, merging the results. unsigned idx = 0; for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); i != e; ++i, ++idx) { uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); bool BitField = i->isBitField(); // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than // four eightbytes, or it contains unaligned fields, it has class MEMORY. // // The only case a 256-bit wide vector could be used is when the struct // contains a single 256-bit element. Since Lo and Hi logic isn't extended // to work for sizes wider than 128, early check and fallback to memory. // if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) { Lo = Memory; postMerge(Size, Lo, Hi); return; } // Note, skip this test for bit-fields, see below. if (!BitField && Offset % getContext().getTypeAlign(i->getType())) { Lo = Memory; postMerge(Size, Lo, Hi); return; } // Classify this field. // // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate // exceeds a single eightbyte, each is classified // separately. Each eightbyte gets initialized to class // NO_CLASS. Class FieldLo, FieldHi; // Bit-fields require special handling, they do not force the // structure to be passed in memory even if unaligned, and // therefore they can straddle an eightbyte. if (BitField) { // Ignore padding bit-fields. if (i->isUnnamedBitfield()) continue; uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); uint64_t Size = i->getBitWidthValue(getContext()); uint64_t EB_Lo = Offset / 64; uint64_t EB_Hi = (Offset + Size - 1) / 64; if (EB_Lo) { assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); FieldLo = NoClass; FieldHi = Integer; } else { FieldLo = Integer; FieldHi = EB_Hi ? Integer : NoClass; } } else classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg); Lo = merge(Lo, FieldLo); Hi = merge(Hi, FieldHi); if (Lo == Memory || Hi == Memory) break; } postMerge(Size, Lo, Hi); } } ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const { // If this is a scalar LLVM value then assume LLVM will pass it in the right // place naturally. if (!isAggregateTypeForABI(Ty)) { // Treat an enum type as its underlying type. if (const EnumType *EnumTy = Ty->getAs()) Ty = EnumTy->getDecl()->getIntegerType(); return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); } return ABIArgInfo::getIndirect(0); } bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const { if (const VectorType *VecTy = Ty->getAs()) { uint64_t Size = getContext().getTypeSize(VecTy); unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel); if (Size <= 64 || Size > LargestVector) return true; } return false; } ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, unsigned freeIntRegs) const { // If this is a scalar LLVM value then assume LLVM will pass it in the right // place naturally. // // This assumption is optimistic, as there could be free registers available // when we need to pass this argument in memory, and LLVM could try to pass // the argument in the free register. This does not seem to happen currently, // but this code would be much safer if we could mark the argument with // 'onstack'. See PR12193. if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) { // Treat an enum type as its underlying type. if (const EnumType *EnumTy = Ty->getAs()) Ty = EnumTy->getDecl()->getIntegerType(); return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); } if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); // Compute the byval alignment. We specify the alignment of the byval in all // cases so that the mid-level optimizer knows the alignment of the byval. unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U); // Attempt to avoid passing indirect results using byval when possible. This // is important for good codegen. // // We do this by coercing the value into a scalar type which the backend can // handle naturally (i.e., without using byval). // // For simplicity, we currently only do this when we have exhausted all of the // free integer registers. Doing this when there are free integer registers // would require more care, as we would have to ensure that the coerced value // did not claim the unused register. That would require either reording the // arguments to the function (so that any subsequent inreg values came first), // or only doing this optimization when there were no following arguments that // might be inreg. // // We currently expect it to be rare (particularly in well written code) for // arguments to be passed on the stack when there are still free integer // registers available (this would typically imply large structs being passed // by value), so this seems like a fair tradeoff for now. // // We can revisit this if the backend grows support for 'onstack' parameter // attributes. See PR12193. if (freeIntRegs == 0) { uint64_t Size = getContext().getTypeSize(Ty); // If this type fits in an eightbyte, coerce it into the matching integral // type, which will end up on the stack (with alignment 8). if (Align == 8 && Size <= 64) return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size)); } return ABIArgInfo::getIndirect(Align); } /// The ABI specifies that a value should be passed in a full vector XMM/YMM /// register. Pick an LLVM IR type that will be passed as a vector register. llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const { // Wrapper structs/arrays that only contain vectors are passed just like // vectors; strip them off if present. if (const Type *InnerTy = isSingleElementStruct(Ty, getContext())) Ty = QualType(InnerTy, 0); llvm::Type *IRType = CGT.ConvertType(Ty); if (isa(IRType) || IRType->getTypeID() == llvm::Type::FP128TyID) return IRType; // We couldn't find the preferred IR vector type for 'Ty'. uint64_t Size = getContext().getTypeSize(Ty); assert((Size == 128 || Size == 256) && "Invalid type found!"); // Return a LLVM IR vector type based on the size of 'Ty'. return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), Size / 64); } /// BitsContainNoUserData - Return true if the specified [start,end) bit range /// is known to either be off the end of the specified type or being in /// alignment padding. The user type specified is known to be at most 128 bits /// in size, and have passed through X86_64ABIInfo::classify with a successful /// classification that put one of the two halves in the INTEGER class. /// /// It is conservatively correct to return false. static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, unsigned EndBit, ASTContext &Context) { // If the bytes being queried are off the end of the type, there is no user // data hiding here. This handles analysis of builtins, vectors and other // types that don't contain interesting padding. unsigned TySize = (unsigned)Context.getTypeSize(Ty); if (TySize <= StartBit) return true; if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType()); unsigned NumElts = (unsigned)AT->getSize().getZExtValue(); // Check each element to see if the element overlaps with the queried range. for (unsigned i = 0; i != NumElts; ++i) { // If the element is after the span we care about, then we're done.. unsigned EltOffset = i*EltSize; if (EltOffset >= EndBit) break; unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0; if (!BitsContainNoUserData(AT->getElementType(), EltStart, EndBit-EltOffset, Context)) return false; } // If it overlaps no elements, then it is safe to process as padding. return true; } if (const RecordType *RT = Ty->getAs()) { const RecordDecl *RD = RT->getDecl(); const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); // If this is a C++ record, check the bases first. if (const CXXRecordDecl *CXXRD = dyn_cast(RD)) { for (const auto &I : CXXRD->bases()) { assert(!I.isVirtual() && !I.getType()->isDependentType() && "Unexpected base class!"); const CXXRecordDecl *Base = cast(I.getType()->getAs()->getDecl()); // If the base is after the span we care about, ignore it. unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base)); if (BaseOffset >= EndBit) continue; unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0; if (!BitsContainNoUserData(I.getType(), BaseStart, EndBit-BaseOffset, Context)) return false; } } // Verify that no field has data that overlaps the region of interest. Yes // this could be sped up a lot by being smarter about queried fields, // however we're only looking at structs up to 16 bytes, so we don't care // much. unsigned idx = 0; for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); i != e; ++i, ++idx) { unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx); // If we found a field after the region we care about, then we're done. if (FieldOffset >= EndBit) break; unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0; if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset, Context)) return false; } // If nothing in this record overlapped the area of interest, then we're // clean. return true; } return false; } /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a /// float member at the specified offset. For example, {int,{float}} has a /// float at offset 4. It is conservatively correct for this routine to return /// false. static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, const llvm::DataLayout &TD) { // Base case if we find a float. if (IROffset == 0 && IRType->isFloatTy()) return true; // If this is a struct, recurse into the field at the specified offset. if (llvm::StructType *STy = dyn_cast(IRType)) { const llvm::StructLayout *SL = TD.getStructLayout(STy); unsigned Elt = SL->getElementContainingOffset(IROffset); IROffset -= SL->getElementOffset(Elt); return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD); } // If this is an array, recurse into the field at the specified offset. if (llvm::ArrayType *ATy = dyn_cast(IRType)) { llvm::Type *EltTy = ATy->getElementType(); unsigned EltSize = TD.getTypeAllocSize(EltTy); IROffset -= IROffset/EltSize*EltSize; return ContainsFloatAtOffset(EltTy, IROffset, TD); } return false; } /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the /// low 8 bytes of an XMM register, corresponding to the SSE class. llvm::Type *X86_64ABIInfo:: GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset, QualType SourceTy, unsigned SourceOffset) const { // The only three choices we have are either double, <2 x float>, or float. We // pass as float if the last 4 bytes is just padding. This happens for // structs that contain 3 floats. if (BitsContainNoUserData(SourceTy, SourceOffset*8+32, SourceOffset*8+64, getContext())) return llvm::Type::getFloatTy(getVMContext()); // We want to pass as <2 x float> if the LLVM IR type contains a float at // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the // case. if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) && ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout())) return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2); return llvm::Type::getDoubleTy(getVMContext()); } /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in /// an 8-byte GPR. This means that we either have a scalar or we are talking /// about the high or low part of an up-to-16-byte struct. This routine picks /// the best LLVM IR type to represent this, which may be i64 or may be anything /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*, /// etc). /// /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for /// the source type. IROffset is an offset in bytes into the LLVM IR type that /// the 8-byte value references. PrefType may be null. /// /// SourceTy is the source-level type for the entire argument. SourceOffset is /// an offset into this that we're processing (which is always either 0 or 8). /// llvm::Type *X86_64ABIInfo:: GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, QualType SourceTy, unsigned SourceOffset) const { // If we're dealing with an un-offset LLVM IR type, then it means that we're // returning an 8-byte unit starting with it. See if we can safely use it. if (IROffset == 0) { // Pointers and int64's always fill the 8-byte unit. if ((isa(IRType) && Has64BitPointers) || IRType->isIntegerTy(64)) return IRType; // If we have a 1/2/4-byte integer, we can use it only if the rest of the // goodness in the source type is just tail padding. This is allowed to // kick in for struct {double,int} on the int, but not on // struct{double,int,int} because we wouldn't return the second int. We // have to do this analysis on the source type because we can't depend on // unions being lowered a specific way etc. if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) || IRType->isIntegerTy(32) || (isa(IRType) && !Has64BitPointers)) { unsigned BitWidth = isa(IRType) ? 32 : cast(IRType)->getBitWidth(); if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth, SourceOffset*8+64, getContext())) return IRType; } } if (llvm::StructType *STy = dyn_cast(IRType)) { // If this is a struct, recurse into the field at the specified offset. const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy); if (IROffset < SL->getSizeInBytes()) { unsigned FieldIdx = SL->getElementContainingOffset(IROffset); IROffset -= SL->getElementOffset(FieldIdx); return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset, SourceTy, SourceOffset); } } if (llvm::ArrayType *ATy = dyn_cast(IRType)) { llvm::Type *EltTy = ATy->getElementType(); unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy); unsigned EltOffset = IROffset/EltSize*EltSize; return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy, SourceOffset); } // Okay, we don't have any better idea of what to pass, so we pass this in an // integer register that isn't too big to fit the rest of the struct. unsigned TySizeInBytes = (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); assert(TySizeInBytes != SourceOffset && "Empty field?"); // It is always safe to classify this as an integer type up to i64 that // isn't larger than the structure. return llvm::IntegerType::get(getVMContext(), std::min(TySizeInBytes-SourceOffset, 8U)*8); } /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally /// be used as elements of a two register pair to pass or return, return a /// first class aggregate to represent them. For example, if the low part of /// a by-value argument should be passed as i32* and the high part as float, /// return {i32*, float}. static llvm::Type * GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, const llvm::DataLayout &TD) { // In order to correctly satisfy the ABI, we need to the high part to start // at offset 8. If the high and low parts we inferred are both 4-byte types // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have // the second element at offset 8. Check for this: unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo); unsigned HiAlign = TD.getABITypeAlignment(Hi); unsigned HiStart = llvm::RoundUpToAlignment(LoSize, HiAlign); assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!"); // To handle this, we have to increase the size of the low part so that the // second element will start at an 8 byte offset. We can't increase the size // of the second element because it might make us access off the end of the // struct. if (HiStart != 8) { // There are usually two sorts of types the ABI generation code can produce // for the low part of a pair that aren't 8 bytes in size: float or // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and // NaCl). // Promote these to a larger type. if (Lo->isFloatTy()) Lo = llvm::Type::getDoubleTy(Lo->getContext()); else { assert((Lo->isIntegerTy() || Lo->isPointerTy()) && "Invalid/unknown lo type"); Lo = llvm::Type::getInt64Ty(Lo->getContext()); } } llvm::StructType *Result = llvm::StructType::get(Lo, Hi, nullptr); // Verify that the second element is at an 8-byte offset. assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 && "Invalid x86-64 argument pair!"); return Result; } ABIArgInfo X86_64ABIInfo:: classifyReturnType(QualType RetTy) const { // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the // classification algorithm. X86_64ABIInfo::Class Lo, Hi; classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true); // Check some invariants. assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); llvm::Type *ResType = nullptr; switch (Lo) { case NoClass: if (Hi == NoClass) return ABIArgInfo::getIgnore(); // If the low part is just padding, it takes no register, leave ResType // null. assert((Hi == SSE || Hi == Integer || Hi == X87Up) && "Unknown missing lo part"); break; case SSEUp: case X87Up: llvm_unreachable("Invalid classification for lo word."); // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via // hidden argument. case Memory: return getIndirectReturnResult(RetTy); // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next // available register of the sequence %rax, %rdx is used. case Integer: ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); // If we have a sign or zero extended integer, make sure to return Extend // so that the parameter gets the right LLVM IR attributes. if (Hi == NoClass && isa(ResType)) { // Treat an enum type as its underlying type. if (const EnumType *EnumTy = RetTy->getAs()) RetTy = EnumTy->getDecl()->getIntegerType(); if (RetTy->isIntegralOrEnumerationType() && RetTy->isPromotableIntegerType()) return ABIArgInfo::getExtend(); } break; // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next // available SSE register of the sequence %xmm0, %xmm1 is used. case SSE: ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); break; // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is // returned on the X87 stack in %st0 as 80-bit x87 number. case X87: ResType = llvm::Type::getX86_FP80Ty(getVMContext()); break; // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real // part of the value is returned in %st0 and the imaginary part in // %st1. case ComplexX87: assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()), llvm::Type::getX86_FP80Ty(getVMContext()), nullptr); break; } llvm::Type *HighPart = nullptr; switch (Hi) { // Memory was handled previously and X87 should // never occur as a hi class. case Memory: case X87: llvm_unreachable("Invalid classification for hi word."); case ComplexX87: // Previously handled. case NoClass: break; case Integer: HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); if (Lo == NoClass) // Return HighPart at offset 8 in memory. return ABIArgInfo::getDirect(HighPart, 8); break; case SSE: HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); if (Lo == NoClass) // Return HighPart at offset 8 in memory. return ABIArgInfo::getDirect(HighPart, 8); break; // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte // is passed in the next available eightbyte chunk if the last used // vector register. // // SSEUP should always be preceded by SSE, just widen. case SSEUp: assert(Lo == SSE && "Unexpected SSEUp classification."); ResType = GetByteVectorType(RetTy); break; // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is // returned together with the previous X87 value in %st0. case X87Up: // If X87Up is preceded by X87, we don't need to do // anything. However, in some cases with unions it may not be // preceded by X87. In such situations we follow gcc and pass the // extra bits in an SSE reg. if (Lo != X87) { HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); if (Lo == NoClass) // Return HighPart at offset 8 in memory. return ABIArgInfo::getDirect(HighPart, 8); } break; } // If a high part was specified, merge it together with the low part. It is // known to pass in the high eightbyte of the result. We do this by forming a // first class struct aggregate with the high and low part: {low, high} if (HighPart) ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); return ABIArgInfo::getDirect(ResType); } ABIArgInfo X86_64ABIInfo::classifyArgumentType( QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE, bool isNamedArg) const { Ty = useFirstFieldIfTransparentUnion(Ty); X86_64ABIInfo::Class Lo, Hi; classify(Ty, 0, Lo, Hi, isNamedArg); // Check some invariants. // FIXME: Enforce these by construction. assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); neededInt = 0; neededSSE = 0; llvm::Type *ResType = nullptr; switch (Lo) { case NoClass: if (Hi == NoClass) return ABIArgInfo::getIgnore(); // If the low part is just padding, it takes no register, leave ResType // null. assert((Hi == SSE || Hi == Integer || Hi == X87Up) && "Unknown missing lo part"); break; // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument // on the stack. case Memory: // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or // COMPLEX_X87, it is passed in memory. case X87: case ComplexX87: if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect) ++neededInt; return getIndirectResult(Ty, freeIntRegs); case SSEUp: case X87Up: llvm_unreachable("Invalid classification for lo word."); // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 // and %r9 is used. case Integer: ++neededInt; // Pick an 8-byte type based on the preferred type. ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0); // If we have a sign or zero extended integer, make sure to return Extend // so that the parameter gets the right LLVM IR attributes. if (Hi == NoClass && isa(ResType)) { // Treat an enum type as its underlying type. if (const EnumType *EnumTy = Ty->getAs()) Ty = EnumTy->getDecl()->getIntegerType(); if (Ty->isIntegralOrEnumerationType() && Ty->isPromotableIntegerType()) return ABIArgInfo::getExtend(); } break; // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next // available SSE register is used, the registers are taken in the // order from %xmm0 to %xmm7. case SSE: { llvm::Type *IRType = CGT.ConvertType(Ty); ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0); ++neededSSE; break; } } llvm::Type *HighPart = nullptr; switch (Hi) { // Memory was handled previously, ComplexX87 and X87 should // never occur as hi classes, and X87Up must be preceded by X87, // which is passed in memory. case Memory: case X87: case ComplexX87: llvm_unreachable("Invalid classification for hi word."); case NoClass: break; case Integer: ++neededInt; // Pick an 8-byte type based on the preferred type. HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); if (Lo == NoClass) // Pass HighPart at offset 8 in memory. return ABIArgInfo::getDirect(HighPart, 8); break; // X87Up generally doesn't occur here (long double is passed in // memory), except in situations involving unions. case X87Up: case SSE: HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); if (Lo == NoClass) // Pass HighPart at offset 8 in memory. return ABIArgInfo::getDirect(HighPart, 8); ++neededSSE; break; // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the // eightbyte is passed in the upper half of the last used SSE // register. This only happens when 128-bit vectors are passed. case SSEUp: assert(Lo == SSE && "Unexpected SSEUp classification"); ResType = GetByteVectorType(Ty); break; } // If a high part was specified, merge it together with the low part. It is // known to pass in the high eightbyte of the result. We do this by forming a // first class struct aggregate with the high and low part: {low, high} if (HighPart) ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); return ABIArgInfo::getDirect(ResType); } void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { if (!getCXXABI().classifyReturnType(FI)) FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); // Keep track of the number of assigned registers. unsigned freeIntRegs = 6, freeSSERegs = 8; // If the return value is indirect, then the hidden argument is consuming one // integer register. if (FI.getReturnInfo().isIndirect()) --freeIntRegs; // The chain argument effectively gives us another free register. if (FI.isChainCall()) ++freeIntRegs; unsigned NumRequiredArgs = FI.getNumRequiredArgs(); // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers // get assigned (in left-to-right order) for passing as follows... unsigned ArgNo = 0; for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); it != ie; ++it, ++ArgNo) { bool IsNamedArg = ArgNo < NumRequiredArgs; unsigned neededInt, neededSSE; it->info = classifyArgumentType(it->type, freeIntRegs, neededInt, neededSSE, IsNamedArg); // AMD64-ABI 3.2.3p3: If there are no registers available for any // eightbyte of an argument, the whole argument is passed on the // stack. If registers have already been assigned for some // eightbytes of such an argument, the assignments get reverted. if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) { freeIntRegs -= neededInt; freeSSERegs -= neededSSE; } else { it->info = getIndirectResult(it->type, freeIntRegs); } } } static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) { llvm::Value *overflow_arg_area_p = CGF.Builder.CreateStructGEP( nullptr, VAListAddr, 2, "overflow_arg_area_p"); llvm::Value *overflow_arg_area = CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 // byte boundary if alignment needed by type exceeds 8 byte boundary. // It isn't stated explicitly in the standard, but in practice we use // alignment greater than 16 where necessary. uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; if (Align > 8) { // overflow_arg_area = (overflow_arg_area + align - 1) & -align; llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int64Ty, Align - 1); overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset); llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area, CGF.Int64Ty); llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align); overflow_arg_area = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), overflow_arg_area->getType(), "overflow_arg_area.align"); } // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); llvm::Value *Res = CGF.Builder.CreateBitCast(overflow_arg_area, llvm::PointerType::getUnqual(LTy)); // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: // l->overflow_arg_area + sizeof(type). // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to // an 8 byte boundary. uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, "overflow_arg_area.next"); CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type. return Res; } llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const { // Assume that va_list type is correct; should be pointer to LLVM type: // struct { // i32 gp_offset; // i32 fp_offset; // i8* overflow_arg_area; // i8* reg_save_area; // }; unsigned neededInt, neededSSE; Ty = CGF.getContext().getCanonicalType(Ty); ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE, /*isNamedArg*/false); // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed // in the registers. If not go to step 7. if (!neededInt && !neededSSE) return EmitVAArgFromMemory(VAListAddr, Ty, CGF); // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of // general purpose registers needed to pass type and num_fp to hold // the number of floating point registers needed. // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into // registers. In the case: l->gp_offset > 48 - num_gp * 8 or // l->fp_offset > 304 - num_fp * 16 go to step 7. // // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of // register save space). llvm::Value *InRegs = nullptr; llvm::Value *gp_offset_p = nullptr, *gp_offset = nullptr; llvm::Value *fp_offset_p = nullptr, *fp_offset = nullptr; if (neededInt) { gp_offset_p = CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 0, "gp_offset_p"); gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8); InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp"); } if (neededSSE) { fp_offset_p = CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 1, "fp_offset_p"); fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); llvm::Value *FitsInFP = llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp"); InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; } llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); // Emit code to load the value if it was passed in registers. CGF.EmitBlock(InRegBlock); // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with // an offset of l->gp_offset and/or l->fp_offset. This may require // copying to a temporary location in case the parameter is passed // in different register classes or requires an alignment greater // than 8 for general purpose registers and 16 for XMM registers. // // FIXME: This really results in shameful code when we end up needing to // collect arguments from different places; often what should result in a // simple assembling of a structure from scattered addresses has many more // loads than necessary. Can we clean this up? llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); llvm::Value *RegAddr = CGF.Builder.CreateLoad( CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 3), "reg_save_area"); if (neededInt && neededSSE) { // FIXME: Cleanup. assert(AI.isDirect() && "Unexpected ABI info for mixed regs"); llvm::StructType *ST = cast(AI.getCoerceToType()); llvm::Value *Tmp = CGF.CreateMemTemp(Ty); Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo()); assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); llvm::Type *TyLo = ST->getElementType(0); llvm::Type *TyHi = ST->getElementType(1); assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && "Unexpected ABI info for mixed regs"); llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr; llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr; llvm::Value *V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo)); CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(ST, Tmp, 0)); V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi)); CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(ST, Tmp, 1)); RegAddr = CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(LTy)); } else if (neededInt) { RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); RegAddr = CGF.Builder.CreateBitCast(RegAddr, llvm::PointerType::getUnqual(LTy)); // Copy to a temporary if necessary to ensure the appropriate alignment. std::pair SizeAlign = CGF.getContext().getTypeInfoInChars(Ty); uint64_t TySize = SizeAlign.first.getQuantity(); unsigned TyAlign = SizeAlign.second.getQuantity(); if (TyAlign > 8) { llvm::Value *Tmp = CGF.CreateMemTemp(Ty); CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, 8, false); RegAddr = Tmp; } } else if (neededSSE == 1) { RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); RegAddr = CGF.Builder.CreateBitCast(RegAddr, llvm::PointerType::getUnqual(LTy)); } else { assert(neededSSE == 2 && "Invalid number of needed registers!"); // SSE registers are spaced 16 bytes apart in the register save // area, we need to collect the two eightbytes together. llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset); llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16); llvm::Type *DoubleTy = CGF.DoubleTy; llvm::Type *DblPtrTy = llvm::PointerType::getUnqual(DoubleTy); llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy, nullptr); llvm::Value *V, *Tmp = CGF.CreateMemTemp(Ty); Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo()); V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo, DblPtrTy)); CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(ST, Tmp, 0)); V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi, DblPtrTy)); CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(ST, Tmp, 1)); RegAddr = CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(LTy)); } // AMD64-ABI 3.5.7p5: Step 5. Set: // l->gp_offset = l->gp_offset + num_gp * 8 // l->fp_offset = l->fp_offset + num_fp * 16. if (neededInt) { llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8); CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), gp_offset_p); } if (neededSSE) { llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16); CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), fp_offset_p); } CGF.EmitBranch(ContBlock); // Emit code to load the value if it was passed in memory. CGF.EmitBlock(InMemBlock); llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF); // Return the appropriate result. CGF.EmitBlock(ContBlock); llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2, "vaarg.addr"); ResAddr->addIncoming(RegAddr, InRegBlock); ResAddr->addIncoming(MemAddr, InMemBlock); return ResAddr; } ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs, bool IsReturnType) const { if (Ty->isVoidType()) return ABIArgInfo::getIgnore(); if (const EnumType *EnumTy = Ty->getAs()) Ty = EnumTy->getDecl()->getIntegerType(); TypeInfo Info = getContext().getTypeInfo(Ty); uint64_t Width = Info.Width; unsigned Align = getContext().toCharUnitsFromBits(Info.Align).getQuantity(); const RecordType *RT = Ty->getAs(); if (RT) { if (!IsReturnType) { if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI())) return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); } if (RT->getDecl()->hasFlexibleArrayMember()) return ABIArgInfo::getIndirect(0, /*ByVal=*/false); - - // FIXME: mingw-w64-gcc emits 128-bit struct as i128 - if (Width == 128 && getTarget().getTriple().isWindowsGNUEnvironment()) - return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), - Width)); } // vectorcall adds the concept of a homogenous vector aggregate, similar to // other targets. const Type *Base = nullptr; uint64_t NumElts = 0; if (FreeSSERegs && isHomogeneousAggregate(Ty, Base, NumElts)) { if (FreeSSERegs >= NumElts) { FreeSSERegs -= NumElts; if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType()) return ABIArgInfo::getDirect(); return ABIArgInfo::getExpand(); } return ABIArgInfo::getIndirect(Align, /*ByVal=*/false); } if (Ty->isMemberPointerType()) { // If the member pointer is represented by an LLVM int or ptr, pass it // directly. llvm::Type *LLTy = CGT.ConvertType(Ty); if (LLTy->isPointerTy() || LLTy->isIntegerTy()) return ABIArgInfo::getDirect(); } if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) { // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is // not 1, 2, 4, or 8 bytes, must be passed by reference." if (Width > 64 || !llvm::isPowerOf2_64(Width)) return ABIArgInfo::getIndirect(0, /*ByVal=*/false); // Otherwise, coerce it to a small integer. return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width)); } // Bool type is always extended to the ABI, other builtin types are not // extended. const BuiltinType *BT = Ty->getAs(); if (BT && BT->getKind() == BuiltinType::Bool) return ABIArgInfo::getExtend(); + + // Mingw64 GCC uses the old 80 bit extended precision floating point unit. It + // passes them indirectly through memory. + if (IsMingw64 && BT && BT->getKind() == BuiltinType::LongDouble) { + const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat(); + if (LDF == &llvm::APFloat::x87DoubleExtended) + return ABIArgInfo::getIndirect(Align, /*ByVal=*/false); + } return ABIArgInfo::getDirect(); } void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { bool IsVectorCall = FI.getCallingConvention() == llvm::CallingConv::X86_VectorCall; // We can use up to 4 SSE return registers with vectorcall. unsigned FreeSSERegs = IsVectorCall ? 4 : 0; if (!getCXXABI().classifyReturnType(FI)) FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true); // We can use up to 6 SSE register parameters with vectorcall. FreeSSERegs = IsVectorCall ? 6 : 0; for (auto &I : FI.arguments()) I.info = classify(I.type, FreeSSERegs, false); } llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const { llvm::Type *BPP = CGF.Int8PtrPtrTy; CGBuilderTy &Builder = CGF.Builder; llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); uint64_t Offset = llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8); llvm::Value *NextAddr = Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next"); Builder.CreateStore(NextAddr, VAListAddrAsBPP); return AddrTyped; } // PowerPC-32 namespace { /// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information. class PPC32_SVR4_ABIInfo : public DefaultABIInfo { public: PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const override; }; class PPC32TargetCodeGenInfo : public TargetCodeGenInfo { public: PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : TargetCodeGenInfo(new PPC32_SVR4_ABIInfo(CGT)) {} int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { // This is recovered from gcc output. return 1; // r1 is the dedicated stack pointer } bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const override; }; } llvm::Value *PPC32_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const { if (const ComplexType *CTy = Ty->getAs()) { // TODO: Implement this. For now ignore. (void)CTy; return nullptr; } bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64; bool isInt = Ty->isIntegerType() || Ty->isPointerType() || Ty->isAggregateType(); llvm::Type *CharPtr = CGF.Int8PtrTy; llvm::Type *CharPtrPtr = CGF.Int8PtrPtrTy; CGBuilderTy &Builder = CGF.Builder; llvm::Value *GPRPtr = Builder.CreateBitCast(VAListAddr, CharPtr, "gprptr"); llvm::Value *GPRPtrAsInt = Builder.CreatePtrToInt(GPRPtr, CGF.Int32Ty); llvm::Value *FPRPtrAsInt = Builder.CreateAdd(GPRPtrAsInt, Builder.getInt32(1)); llvm::Value *FPRPtr = Builder.CreateIntToPtr(FPRPtrAsInt, CharPtr); llvm::Value *OverflowAreaPtrAsInt = Builder.CreateAdd(FPRPtrAsInt, Builder.getInt32(3)); llvm::Value *OverflowAreaPtr = Builder.CreateIntToPtr(OverflowAreaPtrAsInt, CharPtrPtr); llvm::Value *RegsaveAreaPtrAsInt = Builder.CreateAdd(OverflowAreaPtrAsInt, Builder.getInt32(4)); llvm::Value *RegsaveAreaPtr = Builder.CreateIntToPtr(RegsaveAreaPtrAsInt, CharPtrPtr); llvm::Value *GPR = Builder.CreateLoad(GPRPtr, false, "gpr"); // Align GPR when TY is i64. if (isI64) { llvm::Value *GPRAnd = Builder.CreateAnd(GPR, Builder.getInt8(1)); llvm::Value *CC64 = Builder.CreateICmpEQ(GPRAnd, Builder.getInt8(1)); llvm::Value *GPRPlusOne = Builder.CreateAdd(GPR, Builder.getInt8(1)); GPR = Builder.CreateSelect(CC64, GPRPlusOne, GPR); } llvm::Value *FPR = Builder.CreateLoad(FPRPtr, false, "fpr"); llvm::Value *OverflowArea = Builder.CreateLoad(OverflowAreaPtr, false, "overflow_area"); llvm::Value *OverflowAreaAsInt = Builder.CreatePtrToInt(OverflowArea, CGF.Int32Ty); llvm::Value *RegsaveArea = Builder.CreateLoad(RegsaveAreaPtr, false, "regsave_area"); llvm::Value *RegsaveAreaAsInt = Builder.CreatePtrToInt(RegsaveArea, CGF.Int32Ty); llvm::Value *CC = Builder.CreateICmpULT(isInt ? GPR : FPR, Builder.getInt8(8), "cond"); llvm::Value *RegConstant = Builder.CreateMul(isInt ? GPR : FPR, Builder.getInt8(isInt ? 4 : 8)); llvm::Value *OurReg = Builder.CreateAdd( RegsaveAreaAsInt, Builder.CreateSExt(RegConstant, CGF.Int32Ty)); if (Ty->isFloatingType()) OurReg = Builder.CreateAdd(OurReg, Builder.getInt32(32)); llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs"); llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow"); llvm::BasicBlock *Cont = CGF.createBasicBlock("cont"); Builder.CreateCondBr(CC, UsingRegs, UsingOverflow); CGF.EmitBlock(UsingRegs); llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); llvm::Value *Result1 = Builder.CreateIntToPtr(OurReg, PTy); // Increase the GPR/FPR indexes. if (isInt) { GPR = Builder.CreateAdd(GPR, Builder.getInt8(isI64 ? 2 : 1)); Builder.CreateStore(GPR, GPRPtr); } else { FPR = Builder.CreateAdd(FPR, Builder.getInt8(1)); Builder.CreateStore(FPR, FPRPtr); } CGF.EmitBranch(Cont); CGF.EmitBlock(UsingOverflow); // Increase the overflow area. llvm::Value *Result2 = Builder.CreateIntToPtr(OverflowAreaAsInt, PTy); OverflowAreaAsInt = Builder.CreateAdd(OverflowAreaAsInt, Builder.getInt32(isInt ? 4 : 8)); Builder.CreateStore(Builder.CreateIntToPtr(OverflowAreaAsInt, CharPtr), OverflowAreaPtr); CGF.EmitBranch(Cont); CGF.EmitBlock(Cont); llvm::PHINode *Result = CGF.Builder.CreatePHI(PTy, 2, "vaarg.addr"); Result->addIncoming(Result1, UsingRegs); Result->addIncoming(Result2, UsingOverflow); if (Ty->isAggregateType()) { llvm::Value *AGGPtr = Builder.CreateBitCast(Result, CharPtrPtr, "aggrptr"); return Builder.CreateLoad(AGGPtr, false, "aggr"); } return Result; } bool PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const { // This is calculated from the LLVM and GCC tables and verified // against gcc output. AFAIK all ABIs use the same encoding. CodeGen::CGBuilderTy &Builder = CGF.Builder; llvm::IntegerType *i8 = CGF.Int8Ty; llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); // 0-31: r0-31, the 4-byte general-purpose registers AssignToArrayRange(Builder, Address, Four8, 0, 31); // 32-63: fp0-31, the 8-byte floating-point registers AssignToArrayRange(Builder, Address, Eight8, 32, 63); // 64-76 are various 4-byte special-purpose registers: // 64: mq // 65: lr // 66: ctr // 67: ap // 68-75 cr0-7 // 76: xer AssignToArrayRange(Builder, Address, Four8, 64, 76); // 77-108: v0-31, the 16-byte vector registers AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); // 109: vrsave // 110: vscr // 111: spe_acc // 112: spefscr // 113: sfp AssignToArrayRange(Builder, Address, Four8, 109, 113); return false; } // PowerPC-64 namespace { /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information. class PPC64_SVR4_ABIInfo : public DefaultABIInfo { public: enum ABIKind { ELFv1 = 0, ELFv2 }; private: static const unsigned GPRBits = 64; ABIKind Kind; bool HasQPX; // A vector of float or double will be promoted to <4 x f32> or <4 x f64> and // will be passed in a QPX register. bool IsQPXVectorTy(const Type *Ty) const { if (!HasQPX) return false; if (const VectorType *VT = Ty->getAs()) { unsigned NumElements = VT->getNumElements(); if (NumElements == 1) return false; if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) { if (getContext().getTypeSize(Ty) <= 256) return true; } else if (VT->getElementType()-> isSpecificBuiltinType(BuiltinType::Float)) { if (getContext().getTypeSize(Ty) <= 128) return true; } } return false; } bool IsQPXVectorTy(QualType Ty) const { return IsQPXVectorTy(Ty.getTypePtr()); } public: PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind, bool HasQPX) : DefaultABIInfo(CGT), Kind(Kind), HasQPX(HasQPX) {} bool isPromotableTypeForABI(QualType Ty) const; bool isAlignedParamType(QualType Ty, bool &Align32) const; ABIArgInfo classifyReturnType(QualType RetTy) const; ABIArgInfo classifyArgumentType(QualType Ty) const; bool isHomogeneousAggregateBaseType(QualType Ty) const override; bool isHomogeneousAggregateSmallEnough(const Type *Ty, uint64_t Members) const override; // TODO: We can add more logic to computeInfo to improve performance. // Example: For aggregate arguments that fit in a register, we could // use getDirectInReg (as is done below for structs containing a single // floating-point value) to avoid pushing them to memory on function // entry. This would require changing the logic in PPCISelLowering // when lowering the parameters in the caller and args in the callee. void computeInfo(CGFunctionInfo &FI) const override { if (!getCXXABI().classifyReturnType(FI)) FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); for (auto &I : FI.arguments()) { // We rely on the default argument classification for the most part. // One exception: An aggregate containing a single floating-point // or vector item must be passed in a register if one is available. const Type *T = isSingleElementStruct(I.type, getContext()); if (T) { const BuiltinType *BT = T->getAs(); if (IsQPXVectorTy(T) || (T->isVectorType() && getContext().getTypeSize(T) == 128) || (BT && BT->isFloatingPoint())) { QualType QT(T, 0); I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT)); continue; } } I.info = classifyArgumentType(I.type); } } llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const override; }; class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo { public: PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT, PPC64_SVR4_ABIInfo::ABIKind Kind, bool HasQPX) : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind, HasQPX)) {} int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { // This is recovered from gcc output. return 1; // r1 is the dedicated stack pointer } bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const override; }; class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo { public: PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { // This is recovered from gcc output. return 1; // r1 is the dedicated stack pointer } bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const override; }; } // Return true if the ABI requires Ty to be passed sign- or zero- // extended to 64 bits. bool PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const { // Treat an enum type as its underlying type. if (const EnumType *EnumTy = Ty->getAs()) Ty = EnumTy->getDecl()->getIntegerType(); // Promotable integer types are required to be promoted by the ABI. if (Ty->isPromotableIntegerType()) return true; // In addition to the usual promotable integer types, we also need to // extend all 32-bit types, since the ABI requires promotion to 64 bits. if (const BuiltinType *BT = Ty->getAs()) switch (BT->getKind()) { case BuiltinType::Int: case BuiltinType::UInt: return true; default: break; } return false; } /// isAlignedParamType - Determine whether a type requires 16-byte /// alignment in the parameter area. bool PPC64_SVR4_ABIInfo::isAlignedParamType(QualType Ty, bool &Align32) const { Align32 = false; // Complex types are passed just like their elements. if (const ComplexType *CTy = Ty->getAs()) Ty = CTy->getElementType(); // Only vector types of size 16 bytes need alignment (larger types are // passed via reference, smaller types are not aligned). if (IsQPXVectorTy(Ty)) { if (getContext().getTypeSize(Ty) > 128) Align32 = true; return true; } else if (Ty->isVectorType()) { return getContext().getTypeSize(Ty) == 128; } // For single-element float/vector structs, we consider the whole type // to have the same alignment requirements as its single element. const Type *AlignAsType = nullptr; const Type *EltType = isSingleElementStruct(Ty, getContext()); if (EltType) { const BuiltinType *BT = EltType->getAs(); if (IsQPXVectorTy(EltType) || (EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) || (BT && BT->isFloatingPoint())) AlignAsType = EltType; } // Likewise for ELFv2 homogeneous aggregates. const Type *Base = nullptr; uint64_t Members = 0; if (!AlignAsType && Kind == ELFv2 && isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members)) AlignAsType = Base; // With special case aggregates, only vector base types need alignment. if (AlignAsType && IsQPXVectorTy(AlignAsType)) { if (getContext().getTypeSize(AlignAsType) > 128) Align32 = true; return true; } else if (AlignAsType) { return AlignAsType->isVectorType(); } // Otherwise, we only need alignment for any aggregate type that // has an alignment requirement of >= 16 bytes. if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) { if (HasQPX && getContext().getTypeAlign(Ty) >= 256) Align32 = true; return true; } return false; } /// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous /// aggregate. Base is set to the base element type, and Members is set /// to the number of base elements. bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base, uint64_t &Members) const { if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { uint64_t NElements = AT->getSize().getZExtValue(); if (NElements == 0) return false; if (!isHomogeneousAggregate(AT->getElementType(), Base, Members)) return false; Members *= NElements; } else if (const RecordType *RT = Ty->getAs()) { const RecordDecl *RD = RT->getDecl(); if (RD->hasFlexibleArrayMember()) return false; Members = 0; // If this is a C++ record, check the bases first. if (const CXXRecordDecl *CXXRD = dyn_cast(RD)) { for (const auto &I : CXXRD->bases()) { // Ignore empty records. if (isEmptyRecord(getContext(), I.getType(), true)) continue; uint64_t FldMembers; if (!isHomogeneousAggregate(I.getType(), Base, FldMembers)) return false; Members += FldMembers; } } for (const auto *FD : RD->fields()) { // Ignore (non-zero arrays of) empty records. QualType FT = FD->getType(); while (const ConstantArrayType *AT = getContext().getAsConstantArrayType(FT)) { if (AT->getSize().getZExtValue() == 0) return false; FT = AT->getElementType(); } if (isEmptyRecord(getContext(), FT, true)) continue; // For compatibility with GCC, ignore empty bitfields in C++ mode. if (getContext().getLangOpts().CPlusPlus && FD->isBitField() && FD->getBitWidthValue(getContext()) == 0) continue; uint64_t FldMembers; if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers)) return false; Members = (RD->isUnion() ? std::max(Members, FldMembers) : Members + FldMembers); } if (!Base) return false; // Ensure there is no padding. if (getContext().getTypeSize(Base) * Members != getContext().getTypeSize(Ty)) return false; } else { Members = 1; if (const ComplexType *CT = Ty->getAs()) { Members = 2; Ty = CT->getElementType(); } // Most ABIs only support float, double, and some vector type widths. if (!isHomogeneousAggregateBaseType(Ty)) return false; // The base type must be the same for all members. Types that // agree in both total size and mode (float vs. vector) are // treated as being equivalent here. const Type *TyPtr = Ty.getTypePtr(); if (!Base) Base = TyPtr; if (Base->isVectorType() != TyPtr->isVectorType() || getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr)) return false; } return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members); } bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { // Homogeneous aggregates for ELFv2 must have base types of float, // double, long double, or 128-bit vectors. if (const BuiltinType *BT = Ty->getAs()) { if (BT->getKind() == BuiltinType::Float || BT->getKind() == BuiltinType::Double || BT->getKind() == BuiltinType::LongDouble) return true; } if (const VectorType *VT = Ty->getAs()) { if (getContext().getTypeSize(VT) == 128 || IsQPXVectorTy(Ty)) return true; } return false; } bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough( const Type *Base, uint64_t Members) const { // Vector types require one register, floating point types require one // or two registers depending on their size. uint32_t NumRegs = Base->isVectorType() ? 1 : (getContext().getTypeSize(Base) + 63) / 64; // Homogeneous Aggregates may occupy at most 8 registers. return Members * NumRegs <= 8; } ABIArgInfo PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const { Ty = useFirstFieldIfTransparentUnion(Ty); if (Ty->isAnyComplexType()) return ABIArgInfo::getDirect(); // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes) // or via reference (larger than 16 bytes). if (Ty->isVectorType() && !IsQPXVectorTy(Ty)) { uint64_t Size = getContext().getTypeSize(Ty); if (Size > 128) return ABIArgInfo::getIndirect(0, /*ByVal=*/false); else if (Size < 128) { llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size); return ABIArgInfo::getDirect(CoerceTy); } } if (isAggregateTypeForABI(Ty)) { if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); bool Align32; uint64_t ABIAlign = isAlignedParamType(Ty, Align32) ? (Align32 ? 32 : 16) : 8; uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8; // ELFv2 homogeneous aggregates are passed as array types. const Type *Base = nullptr; uint64_t Members = 0; if (Kind == ELFv2 && isHomogeneousAggregate(Ty, Base, Members)) { llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0)); llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members); return ABIArgInfo::getDirect(CoerceTy); } // If an aggregate may end up fully in registers, we do not // use the ByVal method, but pass the aggregate as array. // This is usually beneficial since we avoid forcing the // back-end to store the argument to memory. uint64_t Bits = getContext().getTypeSize(Ty); if (Bits > 0 && Bits <= 8 * GPRBits) { llvm::Type *CoerceTy; // Types up to 8 bytes are passed as integer type (which will be // properly aligned in the argument save area doubleword). if (Bits <= GPRBits) CoerceTy = llvm::IntegerType::get(getVMContext(), llvm::RoundUpToAlignment(Bits, 8)); // Larger types are passed as arrays, with the base type selected // according to the required alignment in the save area. else { uint64_t RegBits = ABIAlign * 8; uint64_t NumRegs = llvm::RoundUpToAlignment(Bits, RegBits) / RegBits; llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits); CoerceTy = llvm::ArrayType::get(RegTy, NumRegs); } return ABIArgInfo::getDirect(CoerceTy); } // All other aggregates are passed ByVal. return ABIArgInfo::getIndirect(ABIAlign, /*ByVal=*/true, /*Realign=*/TyAlign > ABIAlign); } return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); } ABIArgInfo PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const { if (RetTy->isVoidType()) return ABIArgInfo::getIgnore(); if (RetTy->isAnyComplexType()) return ABIArgInfo::getDirect(); // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes) // or via reference (larger than 16 bytes). if (RetTy->isVectorType() && !IsQPXVectorTy(RetTy)) { uint64_t Size = getContext().getTypeSize(RetTy); if (Size > 128) return ABIArgInfo::getIndirect(0); else if (Size < 128) { llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size); return ABIArgInfo::getDirect(CoerceTy); } } if (isAggregateTypeForABI(RetTy)) { // ELFv2 homogeneous aggregates are returned as array types. const Type *Base = nullptr; uint64_t Members = 0; if (Kind == ELFv2 && isHomogeneousAggregate(RetTy, Base, Members)) { llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0)); llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members); return ABIArgInfo::getDirect(CoerceTy); } // ELFv2 small aggregates are returned in up to two registers. uint64_t Bits = getContext().getTypeSize(RetTy); if (Kind == ELFv2 && Bits <= 2 * GPRBits) { if (Bits == 0) return ABIArgInfo::getIgnore(); llvm::Type *CoerceTy; if (Bits > GPRBits) { CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits); CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy, nullptr); } else CoerceTy = llvm::IntegerType::get(getVMContext(), llvm::RoundUpToAlignment(Bits, 8)); return ABIArgInfo::getDirect(CoerceTy); } // All other aggregates are returned indirectly. return ABIArgInfo::getIndirect(0); } return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); } // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine. llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const { llvm::Type *BP = CGF.Int8PtrTy; llvm::Type *BPP = CGF.Int8PtrPtrTy; CGBuilderTy &Builder = CGF.Builder; llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); // Handle types that require 16-byte alignment in the parameter save area. bool Align32; if (isAlignedParamType(Ty, Align32)) { llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty); AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(Align32 ? 31 : 15)); AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt64(Align32 ? -32 : -16)); Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align"); } // Update the va_list pointer. The pointer should be bumped by the // size of the object. We can trust getTypeSize() except for a complex // type whose base type is smaller than a doubleword. For these, the // size of the object is 16 bytes; see below for further explanation. unsigned SizeInBytes = CGF.getContext().getTypeSize(Ty) / 8; QualType BaseTy; unsigned CplxBaseSize = 0; if (const ComplexType *CTy = Ty->getAs()) { BaseTy = CTy->getElementType(); CplxBaseSize = CGF.getContext().getTypeSize(BaseTy) / 8; if (CplxBaseSize < 8) SizeInBytes = 16; } unsigned Offset = llvm::RoundUpToAlignment(SizeInBytes, 8); llvm::Value *NextAddr = Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), "ap.next"); Builder.CreateStore(NextAddr, VAListAddrAsBPP); // If we have a complex type and the base type is smaller than 8 bytes, // the ABI calls for the real and imaginary parts to be right-adjusted // in separate doublewords. However, Clang expects us to produce a // pointer to a structure with the two parts packed tightly. So generate // loads of the real and imaginary parts relative to the va_list pointer, // and store them to a temporary structure. if (CplxBaseSize && CplxBaseSize < 8) { llvm::Value *RealAddr = Builder.CreatePtrToInt(Addr, CGF.Int64Ty); llvm::Value *ImagAddr = RealAddr; if (CGF.CGM.getDataLayout().isBigEndian()) { RealAddr = Builder.CreateAdd(RealAddr, Builder.getInt64(8 - CplxBaseSize)); ImagAddr = Builder.CreateAdd(ImagAddr, Builder.getInt64(16 - CplxBaseSize)); } else { ImagAddr = Builder.CreateAdd(ImagAddr, Builder.getInt64(8)); } llvm::Type *PBaseTy = llvm::PointerType::getUnqual(CGF.ConvertType(BaseTy)); RealAddr = Builder.CreateIntToPtr(RealAddr, PBaseTy); ImagAddr = Builder.CreateIntToPtr(ImagAddr, PBaseTy); llvm::Value *Real = Builder.CreateLoad(RealAddr, false, ".vareal"); llvm::Value *Imag = Builder.CreateLoad(ImagAddr, false, ".vaimag"); llvm::AllocaInst *Ptr = CGF.CreateTempAlloca(CGT.ConvertTypeForMem(Ty), "vacplx"); llvm::Value *RealPtr = Builder.CreateStructGEP(Ptr->getAllocatedType(), Ptr, 0, ".real"); llvm::Value *ImagPtr = Builder.CreateStructGEP(Ptr->getAllocatedType(), Ptr, 1, ".imag"); Builder.CreateStore(Real, RealPtr, false); Builder.CreateStore(Imag, ImagPtr, false); return Ptr; } // If the argument is smaller than 8 bytes, it is right-adjusted in // its doubleword slot. Adjust the pointer to pick it up from the // correct offset. if (SizeInBytes < 8 && CGF.CGM.getDataLayout().isBigEndian()) { llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty); AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(8 - SizeInBytes)); Addr = Builder.CreateIntToPtr(AddrAsInt, BP); } llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); return Builder.CreateBitCast(Addr, PTy); } static bool PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) { // This is calculated from the LLVM and GCC tables and verified // against gcc output. AFAIK all ABIs use the same encoding. CodeGen::CGBuilderTy &Builder = CGF.Builder; llvm::IntegerType *i8 = CGF.Int8Ty; llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); // 0-31: r0-31, the 8-byte general-purpose registers AssignToArrayRange(Builder, Address, Eight8, 0, 31); // 32-63: fp0-31, the 8-byte floating-point registers AssignToArrayRange(Builder, Address, Eight8, 32, 63); // 64-76 are various 4-byte special-purpose registers: // 64: mq // 65: lr // 66: ctr // 67: ap // 68-75 cr0-7 // 76: xer AssignToArrayRange(Builder, Address, Four8, 64, 76); // 77-108: v0-31, the 16-byte vector registers AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); // 109: vrsave // 110: vscr // 111: spe_acc // 112: spefscr // 113: sfp AssignToArrayRange(Builder, Address, Four8, 109, 113); return false; } bool PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable( CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const { return PPC64_initDwarfEHRegSizeTable(CGF, Address); } bool PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const { return PPC64_initDwarfEHRegSizeTable(CGF, Address); } //===----------------------------------------------------------------------===// // AArch64 ABI Implementation //===----------------------------------------------------------------------===// namespace { class AArch64ABIInfo : public ABIInfo { public: enum ABIKind { AAPCS = 0, DarwinPCS }; private: ABIKind Kind; public: AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind) : ABIInfo(CGT), Kind(Kind) {} private: ABIKind getABIKind() const { return Kind; } bool isDarwinPCS() const { return Kind == DarwinPCS; } ABIArgInfo classifyReturnType(QualType RetTy) const; ABIArgInfo classifyArgumentType(QualType RetTy) const; bool isHomogeneousAggregateBaseType(QualType Ty) const override; bool isHomogeneousAggregateSmallEnough(const Type *Ty, uint64_t Members) const override; bool isIllegalVectorType(QualType Ty) const; void computeInfo(CGFunctionInfo &FI) const override { if (!getCXXABI().classifyReturnType(FI)) FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); for (auto &it : FI.arguments()) it.info = classifyArgumentType(it.type); } llvm::Value *EmitDarwinVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const; llvm::Value *EmitAAPCSVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const; llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const override { return isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF) : EmitAAPCSVAArg(VAListAddr, Ty, CGF); } }; class AArch64TargetCodeGenInfo : public TargetCodeGenInfo { public: AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind) : TargetCodeGenInfo(new AArch64ABIInfo(CGT, Kind)) {} StringRef getARCRetainAutoreleasedReturnValueMarker() const override { return "mov\tfp, fp\t\t; marker for objc_retainAutoreleaseReturnValue"; } int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { return 31; } bool doesReturnSlotInterfereWithArgs() const override { return false; } }; } ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const { Ty = useFirstFieldIfTransparentUnion(Ty); // Handle illegal vector types here. if (isIllegalVectorType(Ty)) { uint64_t Size = getContext().getTypeSize(Ty); if (Size <= 32) { llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext()); return ABIArgInfo::getDirect(ResType); } if (Size == 64) { llvm::Type *ResType = llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2); return ABIArgInfo::getDirect(ResType); } if (Size == 128) { llvm::Type *ResType = llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4); return ABIArgInfo::getDirect(ResType); } return ABIArgInfo::getIndirect(0, /*ByVal=*/false); } if (!isAggregateTypeForABI(Ty)) { // Treat an enum type as its underlying type. if (const EnumType *EnumTy = Ty->getAs()) Ty = EnumTy->getDecl()->getIntegerType(); return (Ty->isPromotableIntegerType() && isDarwinPCS() ? ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); } // Structures with either a non-trivial destructor or a non-trivial // copy constructor are always indirect. if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { return ABIArgInfo::getIndirect(0, /*ByVal=*/RAA == CGCXXABI::RAA_DirectInMemory); } // Empty records are always ignored on Darwin, but actually passed in C++ mode // elsewhere for GNU compatibility. if (isEmptyRecord(getContext(), Ty, true)) { if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS()) return ABIArgInfo::getIgnore(); return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); } // Homogeneous Floating-point Aggregates (HFAs) need to be expanded. const Type *Base = nullptr; uint64_t Members = 0; if (isHomogeneousAggregate(Ty, Base, Members)) { return ABIArgInfo::getDirect( llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members)); } // Aggregates <= 16 bytes are passed directly in registers or on the stack. uint64_t Size = getContext().getTypeSize(Ty); if (Size <= 128) { unsigned Alignment = getContext().getTypeAlign(Ty); Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes // We use a pair of i64 for 16-byte aggregate with 8-byte alignment. // For aggregates with 16-byte alignment, we use i128. if (Alignment < 128 && Size == 128) { llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext()); return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64)); } return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size)); } return ABIArgInfo::getIndirect(0, /*ByVal=*/false); } ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const { if (RetTy->isVoidType()) return ABIArgInfo::getIgnore(); // Large vector types should be returned via memory. if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) return ABIArgInfo::getIndirect(0); if (!isAggregateTypeForABI(RetTy)) { // Treat an enum type as its underlying type. if (const EnumType *EnumTy = RetTy->getAs()) RetTy = EnumTy->getDecl()->getIntegerType(); return (RetTy->isPromotableIntegerType() && isDarwinPCS() ? ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); } if (isEmptyRecord(getContext(), RetTy, true)) return ABIArgInfo::getIgnore(); const Type *Base = nullptr; uint64_t Members = 0; if (isHomogeneousAggregate(RetTy, Base, Members)) // Homogeneous Floating-point Aggregates (HFAs) are returned directly. return ABIArgInfo::getDirect(); // Aggregates <= 16 bytes are returned directly in registers or on the stack. uint64_t Size = getContext().getTypeSize(RetTy); if (Size <= 128) { unsigned Alignment = getContext().getTypeAlign(RetTy); Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes // We use a pair of i64 for 16-byte aggregate with 8-byte alignment. // For aggregates with 16-byte alignment, we use i128. if (Alignment < 128 && Size == 128) { llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext()); return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64)); } return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size)); } return ABIArgInfo::getIndirect(0); } /// isIllegalVectorType - check whether the vector type is legal for AArch64. bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const { if (const VectorType *VT = Ty->getAs()) { // Check whether VT is legal. unsigned NumElements = VT->getNumElements(); uint64_t Size = getContext().getTypeSize(VT); // NumElements should be power of 2 between 1 and 16. if ((NumElements & (NumElements - 1)) != 0 || NumElements > 16) return true; return Size != 64 && (Size != 128 || NumElements == 1); } return false; } bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { // Homogeneous aggregates for AAPCS64 must have base types of a floating // point type or a short-vector type. This is the same as the 32-bit ABI, // but with the difference that any floating-point type is allowed, // including __fp16. if (const BuiltinType *BT = Ty->getAs()) { if (BT->isFloatingPoint()) return true; } else if (const VectorType *VT = Ty->getAs()) { unsigned VecSize = getContext().getTypeSize(VT); if (VecSize == 64 || VecSize == 128) return true; } return false; } bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, uint64_t Members) const { return Members <= 4; } llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const { ABIArgInfo AI = classifyArgumentType(Ty); bool IsIndirect = AI.isIndirect(); llvm::Type *BaseTy = CGF.ConvertType(Ty); if (IsIndirect) BaseTy = llvm::PointerType::getUnqual(BaseTy); else if (AI.getCoerceToType()) BaseTy = AI.getCoerceToType(); unsigned NumRegs = 1; if (llvm::ArrayType *ArrTy = dyn_cast(BaseTy)) { BaseTy = ArrTy->getElementType(); NumRegs = ArrTy->getNumElements(); } bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy(); // The AArch64 va_list type and handling is specified in the Procedure Call // Standard, section B.4: // // struct { // void *__stack; // void *__gr_top; // void *__vr_top; // int __gr_offs; // int __vr_offs; // }; llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg"); llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack"); llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); auto &Ctx = CGF.getContext(); llvm::Value *reg_offs_p = nullptr, *reg_offs = nullptr; int reg_top_index; int RegSize = IsIndirect ? 8 : getContext().getTypeSize(Ty) / 8; if (!IsFPR) { // 3 is the field number of __gr_offs reg_offs_p = CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 3, "gr_offs_p"); reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs"); reg_top_index = 1; // field number for __gr_top RegSize = llvm::RoundUpToAlignment(RegSize, 8); } else { // 4 is the field number of __vr_offs. reg_offs_p = CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 4, "vr_offs_p"); reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs"); reg_top_index = 2; // field number for __vr_top RegSize = 16 * NumRegs; } //======================================= // Find out where argument was passed //======================================= // If reg_offs >= 0 we're already using the stack for this type of // argument. We don't want to keep updating reg_offs (in case it overflows, // though anyone passing 2GB of arguments, each at most 16 bytes, deserves // whatever they get). llvm::Value *UsingStack = nullptr; UsingStack = CGF.Builder.CreateICmpSGE( reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0)); CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock); // Otherwise, at least some kind of argument could go in these registers, the // question is whether this particular type is too big. CGF.EmitBlock(MaybeRegBlock); // Integer arguments may need to correct register alignment (for example a // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we // align __gr_offs to calculate the potential address. if (!IsFPR && !IsIndirect && Ctx.getTypeAlign(Ty) > 64) { int Align = Ctx.getTypeAlign(Ty) / 8; reg_offs = CGF.Builder.CreateAdd( reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1), "align_regoffs"); reg_offs = CGF.Builder.CreateAnd( reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align), "aligned_regoffs"); } // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list. llvm::Value *NewOffset = nullptr; NewOffset = CGF.Builder.CreateAdd( reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs"); CGF.Builder.CreateStore(NewOffset, reg_offs_p); // Now we're in a position to decide whether this argument really was in // registers or not. llvm::Value *InRegs = nullptr; InRegs = CGF.Builder.CreateICmpSLE( NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg"); CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock); //======================================= // Argument was in registers //======================================= // Now we emit the code for if the argument was originally passed in // registers. First start the appropriate block: CGF.EmitBlock(InRegBlock); llvm::Value *reg_top_p = nullptr, *reg_top = nullptr; reg_top_p = CGF.Builder.CreateStructGEP(nullptr, VAListAddr, reg_top_index, "reg_top_p"); reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top"); llvm::Value *BaseAddr = CGF.Builder.CreateGEP(reg_top, reg_offs); llvm::Value *RegAddr = nullptr; llvm::Type *MemTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty)); if (IsIndirect) { // If it's been passed indirectly (actually a struct), whatever we find from // stored registers or on the stack will actually be a struct **. MemTy = llvm::PointerType::getUnqual(MemTy); } const Type *Base = nullptr; uint64_t NumMembers = 0; bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers); if (IsHFA && NumMembers > 1) { // Homogeneous aggregates passed in registers will have their elements split // and stored 16-bytes apart regardless of size (they're notionally in qN, // qN+1, ...). We reload and store into a temporary local variable // contiguously. assert(!IsIndirect && "Homogeneous aggregates should be passed directly"); llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0)); llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers); llvm::AllocaInst *Tmp = CGF.CreateTempAlloca(HFATy); int Offset = 0; if (CGF.CGM.getDataLayout().isBigEndian() && Ctx.getTypeSize(Base) < 128) Offset = 16 - Ctx.getTypeSize(Base) / 8; for (unsigned i = 0; i < NumMembers; ++i) { llvm::Value *BaseOffset = llvm::ConstantInt::get(CGF.Int32Ty, 16 * i + Offset); llvm::Value *LoadAddr = CGF.Builder.CreateGEP(BaseAddr, BaseOffset); LoadAddr = CGF.Builder.CreateBitCast( LoadAddr, llvm::PointerType::getUnqual(BaseTy)); llvm::Value *StoreAddr = CGF.Builder.CreateStructGEP(Tmp->getAllocatedType(), Tmp, i); llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr); CGF.Builder.CreateStore(Elem, StoreAddr); } RegAddr = CGF.Builder.CreateBitCast(Tmp, MemTy); } else { // Otherwise the object is contiguous in memory unsigned BeAlign = reg_top_index == 2 ? 16 : 8; if (CGF.CGM.getDataLayout().isBigEndian() && (IsHFA || !isAggregateTypeForABI(Ty)) && Ctx.getTypeSize(Ty) < (BeAlign * 8)) { int Offset = BeAlign - Ctx.getTypeSize(Ty) / 8; BaseAddr = CGF.Builder.CreatePtrToInt(BaseAddr, CGF.Int64Ty); BaseAddr = CGF.Builder.CreateAdd( BaseAddr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), "align_be"); BaseAddr = CGF.Builder.CreateIntToPtr(BaseAddr, CGF.Int8PtrTy); } RegAddr = CGF.Builder.CreateBitCast(BaseAddr, MemTy); } CGF.EmitBranch(ContBlock); //======================================= // Argument was on the stack //======================================= CGF.EmitBlock(OnStackBlock); llvm::Value *stack_p = nullptr, *OnStackAddr = nullptr; stack_p = CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 0, "stack_p"); OnStackAddr = CGF.Builder.CreateLoad(stack_p, "stack"); // Again, stack arguments may need realigmnent. In this case both integer and // floating-point ones might be affected. if (!IsIndirect && Ctx.getTypeAlign(Ty) > 64) { int Align = Ctx.getTypeAlign(Ty) / 8; OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty); OnStackAddr = CGF.Builder.CreateAdd( OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1), "align_stack"); OnStackAddr = CGF.Builder.CreateAnd( OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, -Align), "align_stack"); OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy); } uint64_t StackSize; if (IsIndirect) StackSize = 8; else StackSize = Ctx.getTypeSize(Ty) / 8; // All stack slots are 8 bytes StackSize = llvm::RoundUpToAlignment(StackSize, 8); llvm::Value *StackSizeC = llvm::ConstantInt::get(CGF.Int32Ty, StackSize); llvm::Value *NewStack = CGF.Builder.CreateGEP(OnStackAddr, StackSizeC, "new_stack"); // Write the new value of __stack for the next call to va_arg CGF.Builder.CreateStore(NewStack, stack_p); if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) && Ctx.getTypeSize(Ty) < 64) { int Offset = 8 - Ctx.getTypeSize(Ty) / 8; OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty); OnStackAddr = CGF.Builder.CreateAdd( OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), "align_be"); OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy); } OnStackAddr = CGF.Builder.CreateBitCast(OnStackAddr, MemTy); CGF.EmitBranch(ContBlock); //======================================= // Tidy up //======================================= CGF.EmitBlock(ContBlock); llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(MemTy, 2, "vaarg.addr"); ResAddr->addIncoming(RegAddr, InRegBlock); ResAddr->addIncoming(OnStackAddr, OnStackBlock); if (IsIndirect) return CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"); return ResAddr; } llvm::Value *AArch64ABIInfo::EmitDarwinVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const { // We do not support va_arg for aggregates or illegal vector types. // Lower VAArg here for these cases and use the LLVM va_arg instruction for // other cases. if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty)) return nullptr; uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8; uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; const Type *Base = nullptr; uint64_t Members = 0; bool isHA = isHomogeneousAggregate(Ty, Base, Members); bool isIndirect = false; // Arguments bigger than 16 bytes which aren't homogeneous aggregates should // be passed indirectly. if (Size > 16 && !isHA) { isIndirect = true; Size = 8; Align = 8; } llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); llvm::Type *BPP = llvm::PointerType::getUnqual(BP); CGBuilderTy &Builder = CGF.Builder; llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); if (isEmptyRecord(getContext(), Ty, true)) { // These are ignored for parameter passing purposes. llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); return Builder.CreateBitCast(Addr, PTy); } const uint64_t MinABIAlign = 8; if (Align > MinABIAlign) { llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, Align - 1); Addr = Builder.CreateGEP(Addr, Offset); llvm::Value *AsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty); llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, ~(Align - 1)); llvm::Value *Aligned = Builder.CreateAnd(AsInt, Mask); Addr = Builder.CreateIntToPtr(Aligned, BP, "ap.align"); } uint64_t Offset = llvm::RoundUpToAlignment(Size, MinABIAlign); llvm::Value *NextAddr = Builder.CreateGEP( Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next"); Builder.CreateStore(NextAddr, VAListAddrAsBPP); if (isIndirect) Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP)); llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); return AddrTyped; } //===----------------------------------------------------------------------===// // ARM ABI Implementation //===----------------------------------------------------------------------===// namespace { class ARMABIInfo : public ABIInfo { public: enum ABIKind { APCS = 0, AAPCS = 1, AAPCS_VFP }; private: ABIKind Kind; public: ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) { setCCs(); } bool isEABI() const { switch (getTarget().getTriple().getEnvironment()) { case llvm::Triple::Android: case llvm::Triple::EABI: case llvm::Triple::EABIHF: case llvm::Triple::GNUEABI: case llvm::Triple::GNUEABIHF: return true; default: return false; } } bool isEABIHF() const { switch (getTarget().getTriple().getEnvironment()) { case llvm::Triple::EABIHF: case llvm::Triple::GNUEABIHF: return true; default: return false; } } ABIKind getABIKind() const { return Kind; } private: ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic) const; ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic) const; bool isIllegalVectorType(QualType Ty) const; bool isHomogeneousAggregateBaseType(QualType Ty) const override; bool isHomogeneousAggregateSmallEnough(const Type *Ty, uint64_t Members) const override; void computeInfo(CGFunctionInfo &FI) const override; llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const override; llvm::CallingConv::ID getLLVMDefaultCC() const; llvm::CallingConv::ID getABIDefaultCC() const; void setCCs(); }; class ARMTargetCodeGenInfo : public TargetCodeGenInfo { public: ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {} const ARMABIInfo &getABIInfo() const { return static_cast(TargetCodeGenInfo::getABIInfo()); } int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { return 13; } StringRef getARCRetainAutoreleasedReturnValueMarker() const override { return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue"; } bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const override { llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); // 0-15 are the 16 integer registers. AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15); return false; } unsigned getSizeOfUnwindException() const override { if (getABIInfo().isEABI()) return 88; return TargetCodeGenInfo::getSizeOfUnwindException(); } void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const override { const FunctionDecl *FD = dyn_cast(D); if (!FD) return; const ARMInterruptAttr *Attr = FD->getAttr(); if (!Attr) return; const char *Kind; switch (Attr->getInterrupt()) { case ARMInterruptAttr::Generic: Kind = ""; break; case ARMInterruptAttr::IRQ: Kind = "IRQ"; break; case ARMInterruptAttr::FIQ: Kind = "FIQ"; break; case ARMInterruptAttr::SWI: Kind = "SWI"; break; case ARMInterruptAttr::ABORT: Kind = "ABORT"; break; case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break; } llvm::Function *Fn = cast(GV); Fn->addFnAttr("interrupt", Kind); if (cast(getABIInfo()).getABIKind() == ARMABIInfo::APCS) return; // AAPCS guarantees that sp will be 8-byte aligned on any public interface, // however this is not necessarily true on taking any interrupt. Instruct // the backend to perform a realignment as part of the function prologue. llvm::AttrBuilder B; B.addStackAlignmentAttr(8); Fn->addAttributes(llvm::AttributeSet::FunctionIndex, llvm::AttributeSet::get(CGM.getLLVMContext(), llvm::AttributeSet::FunctionIndex, B)); } }; class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo { void addStackProbeSizeTargetAttribute(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const; public: WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) : ARMTargetCodeGenInfo(CGT, K) {} void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const override; }; void WindowsARMTargetCodeGenInfo::addStackProbeSizeTargetAttribute( const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { if (!isa(D)) return; if (CGM.getCodeGenOpts().StackProbeSize == 4096) return; llvm::Function *F = cast(GV); F->addFnAttr("stack-probe-size", llvm::utostr(CGM.getCodeGenOpts().StackProbeSize)); } void WindowsARMTargetCodeGenInfo::setTargetAttributes( const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM); addStackProbeSizeTargetAttribute(D, GV, CGM); } } void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { if (!getCXXABI().classifyReturnType(FI)) FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic()); for (auto &I : FI.arguments()) I.info = classifyArgumentType(I.type, FI.isVariadic()); // Always honor user-specified calling convention. if (FI.getCallingConvention() != llvm::CallingConv::C) return; llvm::CallingConv::ID cc = getRuntimeCC(); if (cc != llvm::CallingConv::C) FI.setEffectiveCallingConvention(cc); } /// Return the default calling convention that LLVM will use. llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const { // The default calling convention that LLVM will infer. if (isEABIHF()) return llvm::CallingConv::ARM_AAPCS_VFP; else if (isEABI()) return llvm::CallingConv::ARM_AAPCS; else return llvm::CallingConv::ARM_APCS; } /// Return the calling convention that our ABI would like us to use /// as the C calling convention. llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const { switch (getABIKind()) { case APCS: return llvm::CallingConv::ARM_APCS; case AAPCS: return llvm::CallingConv::ARM_AAPCS; case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; } llvm_unreachable("bad ABI kind"); } void ARMABIInfo::setCCs() { assert(getRuntimeCC() == llvm::CallingConv::C); // Don't muddy up the IR with a ton of explicit annotations if // they'd just match what LLVM will infer from the triple. llvm::CallingConv::ID abiCC = getABIDefaultCC(); if (abiCC != getLLVMDefaultCC()) RuntimeCC = abiCC; BuiltinCC = (getABIKind() == APCS ? llvm::CallingConv::ARM_APCS : llvm::CallingConv::ARM_AAPCS); } ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic) const { // 6.1.2.1 The following argument types are VFP CPRCs: // A single-precision floating-point type (including promoted // half-precision types); A double-precision floating-point type; // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate // with a Base Type of a single- or double-precision floating-point type, // 64-bit containerized vectors or 128-bit containerized vectors with one // to four Elements. bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic; Ty = useFirstFieldIfTransparentUnion(Ty); // Handle illegal vector types here. if (isIllegalVectorType(Ty)) { uint64_t Size = getContext().getTypeSize(Ty); if (Size <= 32) { llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext()); return ABIArgInfo::getDirect(ResType); } if (Size == 64) { llvm::Type *ResType = llvm::VectorType::get( llvm::Type::getInt32Ty(getVMContext()), 2); return ABIArgInfo::getDirect(ResType); } if (Size == 128) { llvm::Type *ResType = llvm::VectorType::get( llvm::Type::getInt32Ty(getVMContext()), 4); return ABIArgInfo::getDirect(ResType); } return ABIArgInfo::getIndirect(0, /*ByVal=*/false); } if (!isAggregateTypeForABI(Ty)) { // Treat an enum type as its underlying type. if (const EnumType *EnumTy = Ty->getAs()) { Ty = EnumTy->getDecl()->getIntegerType(); } return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); } if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); } // Ignore empty records. if (isEmptyRecord(getContext(), Ty, true)) return ABIArgInfo::getIgnore(); if (IsEffectivelyAAPCS_VFP) { // Homogeneous Aggregates need to be expanded when we can fit the aggregate // into VFP registers. const Type *Base = nullptr; uint64_t Members = 0; if (isHomogeneousAggregate(Ty, Base, Members)) { assert(Base && "Base class should be set for homogeneous aggregate"); // Base can be a floating-point or a vector. return ABIArgInfo::getDirect(nullptr, 0, nullptr, false); } } // Support byval for ARM. // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at // most 8-byte. We realign the indirect argument if type alignment is bigger // than ABI alignment. uint64_t ABIAlign = 4; uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8; if (getABIKind() == ARMABIInfo::AAPCS_VFP || getABIKind() == ARMABIInfo::AAPCS) ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8); if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) { return ABIArgInfo::getIndirect(ABIAlign, /*ByVal=*/true, /*Realign=*/TyAlign > ABIAlign); } // Otherwise, pass by coercing to a structure of the appropriate size. llvm::Type* ElemTy; unsigned SizeRegs; // FIXME: Try to match the types of the arguments more accurately where // we can. if (getContext().getTypeAlign(Ty) <= 32) { ElemTy = llvm::Type::getInt32Ty(getVMContext()); SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32; } else { ElemTy = llvm::Type::getInt64Ty(getVMContext()); SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; } return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs)); } static bool isIntegerLikeType(QualType Ty, ASTContext &Context, llvm::LLVMContext &VMContext) { // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure // is called integer-like if its size is less than or equal to one word, and // the offset of each of its addressable sub-fields is zero. uint64_t Size = Context.getTypeSize(Ty); // Check that the type fits in a word. if (Size > 32) return false; // FIXME: Handle vector types! if (Ty->isVectorType()) return false; // Float types are never treated as "integer like". if (Ty->isRealFloatingType()) return false; // If this is a builtin or pointer type then it is ok. if (Ty->getAs() || Ty->isPointerType()) return true; // Small complex integer types are "integer like". if (const ComplexType *CT = Ty->getAs()) return isIntegerLikeType(CT->getElementType(), Context, VMContext); // Single element and zero sized arrays should be allowed, by the definition // above, but they are not. // Otherwise, it must be a record type. const RecordType *RT = Ty->getAs(); if (!RT) return false; // Ignore records with flexible arrays. const RecordDecl *RD = RT->getDecl(); if (RD->hasFlexibleArrayMember()) return false; // Check that all sub-fields are at offset 0, and are themselves "integer // like". const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); bool HadField = false; unsigned idx = 0; for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); i != e; ++i, ++idx) { const FieldDecl *FD = *i; // Bit-fields are not addressable, we only need to verify they are "integer // like". We still have to disallow a subsequent non-bitfield, for example: // struct { int : 0; int x } // is non-integer like according to gcc. if (FD->isBitField()) { if (!RD->isUnion()) HadField = true; if (!isIntegerLikeType(FD->getType(), Context, VMContext)) return false; continue; } // Check if this field is at offset 0. if (Layout.getFieldOffset(idx) != 0) return false; if (!isIntegerLikeType(FD->getType(), Context, VMContext)) return false; // Only allow at most one field in a structure. This doesn't match the // wording above, but follows gcc in situations with a field following an // empty structure. if (!RD->isUnion()) { if (HadField) return false; HadField = true; } } return true; } ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic) const { bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic; if (RetTy->isVoidType()) return ABIArgInfo::getIgnore(); // Large vector types should be returned via memory. if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) { return ABIArgInfo::getIndirect(0); } if (!isAggregateTypeForABI(RetTy)) { // Treat an enum type as its underlying type. if (const EnumType *EnumTy = RetTy->getAs()) RetTy = EnumTy->getDecl()->getIntegerType(); return RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend() : ABIArgInfo::getDirect(); } // Are we following APCS? if (getABIKind() == APCS) { if (isEmptyRecord(getContext(), RetTy, false)) return ABIArgInfo::getIgnore(); // Complex types are all returned as packed integers. // // FIXME: Consider using 2 x vector types if the back end handles them // correctly. if (RetTy->isAnyComplexType()) return ABIArgInfo::getDirect(llvm::IntegerType::get( getVMContext(), getContext().getTypeSize(RetTy))); // Integer like structures are returned in r0. if (isIntegerLikeType(RetTy, getContext(), getVMContext())) { // Return in the smallest viable integer type. uint64_t Size = getContext().getTypeSize(RetTy); if (Size <= 8) return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); if (Size <= 16) return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); } // Otherwise return in memory. return ABIArgInfo::getIndirect(0); } // Otherwise this is an AAPCS variant. if (isEmptyRecord(getContext(), RetTy, true)) return ABIArgInfo::getIgnore(); // Check for homogeneous aggregates with AAPCS-VFP. if (IsEffectivelyAAPCS_VFP) { const Type *Base = nullptr; uint64_t Members; if (isHomogeneousAggregate(RetTy, Base, Members)) { assert(Base && "Base class should be set for homogeneous aggregate"); // Homogeneous Aggregates are returned directly. return ABIArgInfo::getDirect(nullptr, 0, nullptr, false); } } // Aggregates <= 4 bytes are returned in r0; other aggregates // are returned indirectly. uint64_t Size = getContext().getTypeSize(RetTy); if (Size <= 32) { if (getDataLayout().isBigEndian()) // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4) return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); // Return in the smallest viable integer type. if (Size <= 8) return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); if (Size <= 16) return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); } return ABIArgInfo::getIndirect(0); } /// isIllegalVector - check whether Ty is an illegal vector type. bool ARMABIInfo::isIllegalVectorType(QualType Ty) const { if (const VectorType *VT = Ty->getAs()) { // Check whether VT is legal. unsigned NumElements = VT->getNumElements(); uint64_t Size = getContext().getTypeSize(VT); // NumElements should be power of 2. if ((NumElements & (NumElements - 1)) != 0) return true; // Size should be greater than 32 bits. return Size <= 32; } return false; } bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { // Homogeneous aggregates for AAPCS-VFP must have base types of float, // double, or 64-bit or 128-bit vectors. if (const BuiltinType *BT = Ty->getAs()) { if (BT->getKind() == BuiltinType::Float || BT->getKind() == BuiltinType::Double || BT->getKind() == BuiltinType::LongDouble) return true; } else if (const VectorType *VT = Ty->getAs()) { unsigned VecSize = getContext().getTypeSize(VT); if (VecSize == 64 || VecSize == 128) return true; } return false; } bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, uint64_t Members) const { return Members <= 4; } llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const { llvm::Type *BP = CGF.Int8PtrTy; llvm::Type *BPP = CGF.Int8PtrPtrTy; CGBuilderTy &Builder = CGF.Builder; llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); if (isEmptyRecord(getContext(), Ty, true)) { // These are ignored for parameter passing purposes. llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); return Builder.CreateBitCast(Addr, PTy); } uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8; uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8; bool IsIndirect = false; // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte. if (getABIKind() == ARMABIInfo::AAPCS_VFP || getABIKind() == ARMABIInfo::AAPCS) TyAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8); else TyAlign = 4; // Use indirect if size of the illegal vector is bigger than 16 bytes. if (isIllegalVectorType(Ty) && Size > 16) { IsIndirect = true; Size = 4; TyAlign = 4; } // Handle address alignment for ABI alignment > 4 bytes. if (TyAlign > 4) { assert((TyAlign & (TyAlign - 1)) == 0 && "Alignment is not power of 2!"); llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty); AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1)); AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1))); Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align"); } uint64_t Offset = llvm::RoundUpToAlignment(Size, 4); llvm::Value *NextAddr = Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next"); Builder.CreateStore(NextAddr, VAListAddrAsBPP); if (IsIndirect) Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP)); else if (TyAlign < CGF.getContext().getTypeAlign(Ty) / 8) { // We can't directly cast ap.cur to pointer to a vector type, since ap.cur // may not be correctly aligned for the vector type. We create an aligned // temporary space and copy the content over from ap.cur to the temporary // space. This is necessary if the natural alignment of the type is greater // than the ABI alignment. llvm::Type *I8PtrTy = Builder.getInt8PtrTy(); CharUnits CharSize = getContext().getTypeSizeInChars(Ty); llvm::Value *AlignedTemp = CGF.CreateTempAlloca(CGF.ConvertType(Ty), "var.align"); llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy); llvm::Value *Src = Builder.CreateBitCast(Addr, I8PtrTy); Builder.CreateMemCpy(Dst, Src, llvm::ConstantInt::get(CGF.IntPtrTy, CharSize.getQuantity()), TyAlign, false); Addr = AlignedTemp; //The content is in aligned location. } llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); return AddrTyped; } //===----------------------------------------------------------------------===// // NVPTX ABI Implementation //===----------------------------------------------------------------------===// namespace { class NVPTXABIInfo : public ABIInfo { public: NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} ABIArgInfo classifyReturnType(QualType RetTy) const; ABIArgInfo classifyArgumentType(QualType Ty) const; void computeInfo(CGFunctionInfo &FI) const override; llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CFG) const override; }; class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo { public: NVPTXTargetCodeGenInfo(CodeGenTypes &CGT) : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {} void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const override; private: // Adds a NamedMDNode with F, Name, and Operand as operands, and adds the // resulting MDNode to the nvvm.annotations MDNode. static void addNVVMMetadata(llvm::Function *F, StringRef Name, int Operand); }; ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const { if (RetTy->isVoidType()) return ABIArgInfo::getIgnore(); // note: this is different from default ABI if (!RetTy->isScalarType()) return ABIArgInfo::getDirect(); // Treat an enum type as its underlying type. if (const EnumType *EnumTy = RetTy->getAs()) RetTy = EnumTy->getDecl()->getIntegerType(); return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); } ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const { // Treat an enum type as its underlying type. if (const EnumType *EnumTy = Ty->getAs()) Ty = EnumTy->getDecl()->getIntegerType(); // Return aggregates type as indirect by value if (isAggregateTypeForABI(Ty)) return ABIArgInfo::getIndirect(0, /* byval */ true); return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); } void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const { if (!getCXXABI().classifyReturnType(FI)) FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); for (auto &I : FI.arguments()) I.info = classifyArgumentType(I.type); // Always honor user-specified calling convention. if (FI.getCallingConvention() != llvm::CallingConv::C) return; FI.setEffectiveCallingConvention(getRuntimeCC()); } llvm::Value *NVPTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CFG) const { llvm_unreachable("NVPTX does not support varargs"); } void NVPTXTargetCodeGenInfo:: setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const{ const FunctionDecl *FD = dyn_cast(D); if (!FD) return; llvm::Function *F = cast(GV); // Perform special handling in OpenCL mode if (M.getLangOpts().OpenCL) { // Use OpenCL function attributes to check for kernel functions // By default, all functions are device functions if (FD->hasAttr()) { // OpenCL __kernel functions get kernel metadata // Create !{, metadata !"kernel", i32 1} node addNVVMMetadata(F, "kernel", 1); // And kernel functions are not subject to inlining F->addFnAttr(llvm::Attribute::NoInline); } } // Perform special handling in CUDA mode. if (M.getLangOpts().CUDA) { // CUDA __global__ functions get a kernel metadata entry. Since // __global__ functions cannot be called from the device, we do not // need to set the noinline attribute. if (FD->hasAttr()) { // Create !{, metadata !"kernel", i32 1} node addNVVMMetadata(F, "kernel", 1); } if (CUDALaunchBoundsAttr *Attr = FD->getAttr()) { // Create !{, metadata !"maxntidx", i32 } node llvm::APSInt MaxThreads(32); MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.getContext()); if (MaxThreads > 0) addNVVMMetadata(F, "maxntidx", MaxThreads.getExtValue()); // min blocks is an optional argument for CUDALaunchBoundsAttr. If it was // not specified in __launch_bounds__ or if the user specified a 0 value, // we don't have to add a PTX directive. if (Attr->getMinBlocks()) { llvm::APSInt MinBlocks(32); MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.getContext()); if (MinBlocks > 0) // Create !{, metadata !"minctasm", i32 } node addNVVMMetadata(F, "minctasm", MinBlocks.getExtValue()); } } } } void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name, int Operand) { llvm::Module *M = F->getParent(); llvm::LLVMContext &Ctx = M->getContext(); // Get "nvvm.annotations" metadata node llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations"); llvm::Metadata *MDVals[] = { llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name), llvm::ConstantAsMetadata::get( llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))}; // Append metadata to nvvm.annotations MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); } } //===----------------------------------------------------------------------===// // SystemZ ABI Implementation //===----------------------------------------------------------------------===// namespace { class SystemZABIInfo : public ABIInfo { bool HasVector; public: SystemZABIInfo(CodeGenTypes &CGT, bool HV) : ABIInfo(CGT), HasVector(HV) {} bool isPromotableIntegerType(QualType Ty) const; bool isCompoundType(QualType Ty) const; bool isVectorArgumentType(QualType Ty) const; bool isFPArgumentType(QualType Ty) const; QualType GetSingleElementType(QualType Ty) const; ABIArgInfo classifyReturnType(QualType RetTy) const; ABIArgInfo classifyArgumentType(QualType ArgTy) const; void computeInfo(CGFunctionInfo &FI) const override { if (!getCXXABI().classifyReturnType(FI)) FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); for (auto &I : FI.arguments()) I.info = classifyArgumentType(I.type); } llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const override; }; class SystemZTargetCodeGenInfo : public TargetCodeGenInfo { public: SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector) : TargetCodeGenInfo(new SystemZABIInfo(CGT, HasVector)) {} }; } bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const { // Treat an enum type as its underlying type. if (const EnumType *EnumTy = Ty->getAs()) Ty = EnumTy->getDecl()->getIntegerType(); // Promotable integer types are required to be promoted by the ABI. if (Ty->isPromotableIntegerType()) return true; // 32-bit values must also be promoted. if (const BuiltinType *BT = Ty->getAs()) switch (BT->getKind()) { case BuiltinType::Int: case BuiltinType::UInt: return true; default: return false; } return false; } bool SystemZABIInfo::isCompoundType(QualType Ty) const { return (Ty->isAnyComplexType() || Ty->isVectorType() || isAggregateTypeForABI(Ty)); } bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const { return (HasVector && Ty->isVectorType() && getContext().getTypeSize(Ty) <= 128); } bool SystemZABIInfo::isFPArgumentType(QualType Ty) const { if (const BuiltinType *BT = Ty->getAs()) switch (BT->getKind()) { case BuiltinType::Float: case BuiltinType::Double: return true; default: return false; } return false; } QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const { if (const RecordType *RT = Ty->getAsStructureType()) { const RecordDecl *RD = RT->getDecl(); QualType Found; // If this is a C++ record, check the bases first. if (const CXXRecordDecl *CXXRD = dyn_cast(RD)) for (const auto &I : CXXRD->bases()) { QualType Base = I.getType(); // Empty bases don't affect things either way. if (isEmptyRecord(getContext(), Base, true)) continue; if (!Found.isNull()) return Ty; Found = GetSingleElementType(Base); } // Check the fields. for (const auto *FD : RD->fields()) { // For compatibility with GCC, ignore empty bitfields in C++ mode. // Unlike isSingleElementStruct(), empty structure and array fields // do count. So do anonymous bitfields that aren't zero-sized. if (getContext().getLangOpts().CPlusPlus && FD->isBitField() && FD->getBitWidthValue(getContext()) == 0) continue; // Unlike isSingleElementStruct(), arrays do not count. // Nested structures still do though. if (!Found.isNull()) return Ty; Found = GetSingleElementType(FD->getType()); } // Unlike isSingleElementStruct(), trailing padding is allowed. // An 8-byte aligned struct s { float f; } is passed as a double. if (!Found.isNull()) return Found; } return Ty; } llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const { // Assume that va_list type is correct; should be pointer to LLVM type: // struct { // i64 __gpr; // i64 __fpr; // i8 *__overflow_arg_area; // i8 *__reg_save_area; // }; // Every non-vector argument occupies 8 bytes and is passed by preference // in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are // always passed on the stack. Ty = CGF.getContext().getCanonicalType(Ty); llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty); llvm::Type *APTy = llvm::PointerType::getUnqual(ArgTy); ABIArgInfo AI = classifyArgumentType(Ty); bool IsIndirect = AI.isIndirect(); bool InFPRs = false; bool IsVector = false; unsigned UnpaddedBitSize; if (IsIndirect) { APTy = llvm::PointerType::getUnqual(APTy); UnpaddedBitSize = 64; } else { if (AI.getCoerceToType()) ArgTy = AI.getCoerceToType(); InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy(); IsVector = ArgTy->isVectorTy(); UnpaddedBitSize = getContext().getTypeSize(Ty); } unsigned PaddedBitSize = (IsVector && UnpaddedBitSize > 64) ? 128 : 64; assert((UnpaddedBitSize <= PaddedBitSize) && "Invalid argument size."); unsigned PaddedSize = PaddedBitSize / 8; unsigned Padding = (PaddedBitSize - UnpaddedBitSize) / 8; llvm::Type *IndexTy = CGF.Int64Ty; llvm::Value *PaddedSizeV = llvm::ConstantInt::get(IndexTy, PaddedSize); if (IsVector) { // Work out the address of a vector argument on the stack. // Vector arguments are always passed in the high bits of a // single (8 byte) or double (16 byte) stack slot. llvm::Value *OverflowArgAreaPtr = CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 2, "overflow_arg_area_ptr"); llvm::Value *OverflowArgArea = CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"); llvm::Value *MemAddr = CGF.Builder.CreateBitCast(OverflowArgArea, APTy, "mem_addr"); // Update overflow_arg_area_ptr pointer llvm::Value *NewOverflowArgArea = CGF.Builder.CreateGEP(OverflowArgArea, PaddedSizeV, "overflow_arg_area"); CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr); return MemAddr; } unsigned MaxRegs, RegCountField, RegSaveIndex, RegPadding; if (InFPRs) { MaxRegs = 4; // Maximum of 4 FPR arguments RegCountField = 1; // __fpr RegSaveIndex = 16; // save offset for f0 RegPadding = 0; // floats are passed in the high bits of an FPR } else { MaxRegs = 5; // Maximum of 5 GPR arguments RegCountField = 0; // __gpr RegSaveIndex = 2; // save offset for r2 RegPadding = Padding; // values are passed in the low bits of a GPR } llvm::Value *RegCountPtr = CGF.Builder.CreateStructGEP( nullptr, VAListAddr, RegCountField, "reg_count_ptr"); llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count"); llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs); llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV, "fits_in_regs"); llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); // Emit code to load the value if it was passed in registers. CGF.EmitBlock(InRegBlock); // Work out the address of an argument register. llvm::Value *ScaledRegCount = CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count"); llvm::Value *RegBase = llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize + RegPadding); llvm::Value *RegOffset = CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset"); llvm::Value *RegSaveAreaPtr = CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 3, "reg_save_area_ptr"); llvm::Value *RegSaveArea = CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area"); llvm::Value *RawRegAddr = CGF.Builder.CreateGEP(RegSaveArea, RegOffset, "raw_reg_addr"); llvm::Value *RegAddr = CGF.Builder.CreateBitCast(RawRegAddr, APTy, "reg_addr"); // Update the register count llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1); llvm::Value *NewRegCount = CGF.Builder.CreateAdd(RegCount, One, "reg_count"); CGF.Builder.CreateStore(NewRegCount, RegCountPtr); CGF.EmitBranch(ContBlock); // Emit code to load the value if it was passed in memory. CGF.EmitBlock(InMemBlock); // Work out the address of a stack argument. llvm::Value *OverflowArgAreaPtr = CGF.Builder.CreateStructGEP( nullptr, VAListAddr, 2, "overflow_arg_area_ptr"); llvm::Value *OverflowArgArea = CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"); llvm::Value *PaddingV = llvm::ConstantInt::get(IndexTy, Padding); llvm::Value *RawMemAddr = CGF.Builder.CreateGEP(OverflowArgArea, PaddingV, "raw_mem_addr"); llvm::Value *MemAddr = CGF.Builder.CreateBitCast(RawMemAddr, APTy, "mem_addr"); // Update overflow_arg_area_ptr pointer llvm::Value *NewOverflowArgArea = CGF.Builder.CreateGEP(OverflowArgArea, PaddedSizeV, "overflow_arg_area"); CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr); CGF.EmitBranch(ContBlock); // Return the appropriate result. CGF.EmitBlock(ContBlock); llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(APTy, 2, "va_arg.addr"); ResAddr->addIncoming(RegAddr, InRegBlock); ResAddr->addIncoming(MemAddr, InMemBlock); if (IsIndirect) return CGF.Builder.CreateLoad(ResAddr, "indirect_arg"); return ResAddr; } ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const { if (RetTy->isVoidType()) return ABIArgInfo::getIgnore(); if (isVectorArgumentType(RetTy)) return ABIArgInfo::getDirect(); if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64) return ABIArgInfo::getIndirect(0); return (isPromotableIntegerType(RetTy) ? ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); } ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const { // Handle the generic C++ ABI. if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); // Integers and enums are extended to full register width. if (isPromotableIntegerType(Ty)) return ABIArgInfo::getExtend(); // Handle vector types and vector-like structure types. Note that // as opposed to float-like structure types, we do not allow any // padding for vector-like structures, so verify the sizes match. uint64_t Size = getContext().getTypeSize(Ty); QualType SingleElementTy = GetSingleElementType(Ty); if (isVectorArgumentType(SingleElementTy) && getContext().getTypeSize(SingleElementTy) == Size) return ABIArgInfo::getDirect(CGT.ConvertType(SingleElementTy)); // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly. if (Size != 8 && Size != 16 && Size != 32 && Size != 64) return ABIArgInfo::getIndirect(0, /*ByVal=*/false); // Handle small structures. if (const RecordType *RT = Ty->getAs()) { // Structures with flexible arrays have variable length, so really // fail the size test above. const RecordDecl *RD = RT->getDecl(); if (RD->hasFlexibleArrayMember()) return ABIArgInfo::getIndirect(0, /*ByVal=*/false); // The structure is passed as an unextended integer, a float, or a double. llvm::Type *PassTy; if (isFPArgumentType(SingleElementTy)) { assert(Size == 32 || Size == 64); if (Size == 32) PassTy = llvm::Type::getFloatTy(getVMContext()); else PassTy = llvm::Type::getDoubleTy(getVMContext()); } else PassTy = llvm::IntegerType::get(getVMContext(), Size); return ABIArgInfo::getDirect(PassTy); } // Non-structure compounds are passed indirectly. if (isCompoundType(Ty)) return ABIArgInfo::getIndirect(0, /*ByVal=*/false); return ABIArgInfo::getDirect(nullptr); } //===----------------------------------------------------------------------===// // MSP430 ABI Implementation //===----------------------------------------------------------------------===// namespace { class MSP430TargetCodeGenInfo : public TargetCodeGenInfo { public: MSP430TargetCodeGenInfo(CodeGenTypes &CGT) : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const override; }; } void MSP430TargetCodeGenInfo::setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const { if (const FunctionDecl *FD = dyn_cast(D)) { if (const MSP430InterruptAttr *attr = FD->getAttr()) { // Handle 'interrupt' attribute: llvm::Function *F = cast(GV); // Step 1: Set ISR calling convention. F->setCallingConv(llvm::CallingConv::MSP430_INTR); // Step 2: Add attributes goodness. F->addFnAttr(llvm::Attribute::NoInline); // Step 3: Emit ISR vector alias. unsigned Num = attr->getNumber() / 2; llvm::GlobalAlias::create(llvm::Function::ExternalLinkage, "__isr_" + Twine(Num), F); } } } //===----------------------------------------------------------------------===// // MIPS ABI Implementation. This works for both little-endian and // big-endian variants. //===----------------------------------------------------------------------===// namespace { class MipsABIInfo : public ABIInfo { bool IsO32; unsigned MinABIStackAlignInBytes, StackAlignInBytes; void CoerceToIntArgs(uint64_t TySize, SmallVectorImpl &ArgList) const; llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const; llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const; llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const; public: MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) : ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8), StackAlignInBytes(IsO32 ? 8 : 16) {} ABIArgInfo classifyReturnType(QualType RetTy) const; ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const; void computeInfo(CGFunctionInfo &FI) const override; llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const override; bool shouldSignExtUnsignedType(QualType Ty) const override; }; class MIPSTargetCodeGenInfo : public TargetCodeGenInfo { unsigned SizeOfUnwindException; public: MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32) : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)), SizeOfUnwindException(IsO32 ? 24 : 32) {} int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { return 29; } void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const override { const FunctionDecl *FD = dyn_cast(D); if (!FD) return; llvm::Function *Fn = cast(GV); if (FD->hasAttr()) { Fn->addFnAttr("mips16"); } else if (FD->hasAttr()) { Fn->addFnAttr("nomips16"); } } bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const override; unsigned getSizeOfUnwindException() const override { return SizeOfUnwindException; } }; } void MipsABIInfo::CoerceToIntArgs( uint64_t TySize, SmallVectorImpl &ArgList) const { llvm::IntegerType *IntTy = llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); // Add (TySize / MinABIStackAlignInBytes) args of IntTy. for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N) ArgList.push_back(IntTy); // If necessary, add one more integer type to ArgList. unsigned R = TySize % (MinABIStackAlignInBytes * 8); if (R) ArgList.push_back(llvm::IntegerType::get(getVMContext(), R)); } // In N32/64, an aligned double precision floating point field is passed in // a register. llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const { SmallVector ArgList, IntArgList; if (IsO32) { CoerceToIntArgs(TySize, ArgList); return llvm::StructType::get(getVMContext(), ArgList); } if (Ty->isComplexType()) return CGT.ConvertType(Ty); const RecordType *RT = Ty->getAs(); // Unions/vectors are passed in integer registers. if (!RT || !RT->isStructureOrClassType()) { CoerceToIntArgs(TySize, ArgList); return llvm::StructType::get(getVMContext(), ArgList); } const RecordDecl *RD = RT->getDecl(); const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); assert(!(TySize % 8) && "Size of structure must be multiple of 8."); uint64_t LastOffset = 0; unsigned idx = 0; llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64); // Iterate over fields in the struct/class and check if there are any aligned // double fields. for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); i != e; ++i, ++idx) { const QualType Ty = i->getType(); const BuiltinType *BT = Ty->getAs(); if (!BT || BT->getKind() != BuiltinType::Double) continue; uint64_t Offset = Layout.getFieldOffset(idx); if (Offset % 64) // Ignore doubles that are not aligned. continue; // Add ((Offset - LastOffset) / 64) args of type i64. for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j) ArgList.push_back(I64); // Add double type. ArgList.push_back(llvm::Type::getDoubleTy(getVMContext())); LastOffset = Offset + 64; } CoerceToIntArgs(TySize - LastOffset, IntArgList); ArgList.append(IntArgList.begin(), IntArgList.end()); return llvm::StructType::get(getVMContext(), ArgList); } llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset, uint64_t Offset) const { if (OrigOffset + MinABIStackAlignInBytes > Offset) return nullptr; return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8); } ABIArgInfo MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const { Ty = useFirstFieldIfTransparentUnion(Ty); uint64_t OrigOffset = Offset; uint64_t TySize = getContext().getTypeSize(Ty); uint64_t Align = getContext().getTypeAlign(Ty) / 8; Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes), (uint64_t)StackAlignInBytes); unsigned CurrOffset = llvm::RoundUpToAlignment(Offset, Align); Offset = CurrOffset + llvm::RoundUpToAlignment(TySize, Align * 8) / 8; if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) { // Ignore empty aggregates. if (TySize == 0) return ABIArgInfo::getIgnore(); if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { Offset = OrigOffset + MinABIStackAlignInBytes; return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); } // If we have reached here, aggregates are passed directly by coercing to // another structure type. Padding is inserted if the offset of the // aggregate is unaligned. ABIArgInfo ArgInfo = ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0, getPaddingType(OrigOffset, CurrOffset)); ArgInfo.setInReg(true); return ArgInfo; } // Treat an enum type as its underlying type. if (const EnumType *EnumTy = Ty->getAs()) Ty = EnumTy->getDecl()->getIntegerType(); // All integral types are promoted to the GPR width. if (Ty->isIntegralOrEnumerationType()) return ABIArgInfo::getExtend(); return ABIArgInfo::getDirect( nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset)); } llvm::Type* MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const { const RecordType *RT = RetTy->getAs(); SmallVector RTList; if (RT && RT->isStructureOrClassType()) { const RecordDecl *RD = RT->getDecl(); const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); unsigned FieldCnt = Layout.getFieldCount(); // N32/64 returns struct/classes in floating point registers if the // following conditions are met: // 1. The size of the struct/class is no larger than 128-bit. // 2. The struct/class has one or two fields all of which are floating // point types. // 3. The offset of the first field is zero (this follows what gcc does). // // Any other composite results are returned in integer registers. // if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) { RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end(); for (; b != e; ++b) { const BuiltinType *BT = b->getType()->getAs(); if (!BT || !BT->isFloatingPoint()) break; RTList.push_back(CGT.ConvertType(b->getType())); } if (b == e) return llvm::StructType::get(getVMContext(), RTList, RD->hasAttr()); RTList.clear(); } } CoerceToIntArgs(Size, RTList); return llvm::StructType::get(getVMContext(), RTList); } ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const { uint64_t Size = getContext().getTypeSize(RetTy); if (RetTy->isVoidType()) return ABIArgInfo::getIgnore(); // O32 doesn't treat zero-sized structs differently from other structs. // However, N32/N64 ignores zero sized return values. if (!IsO32 && Size == 0) return ABIArgInfo::getIgnore(); if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) { if (Size <= 128) { if (RetTy->isAnyComplexType()) return ABIArgInfo::getDirect(); // O32 returns integer vectors in registers and N32/N64 returns all small // aggregates in registers. if (!IsO32 || (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) { ABIArgInfo ArgInfo = ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); ArgInfo.setInReg(true); return ArgInfo; } } return ABIArgInfo::getIndirect(0); } // Treat an enum type as its underlying type. if (const EnumType *EnumTy = RetTy->getAs()) RetTy = EnumTy->getDecl()->getIntegerType(); return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); } void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const { ABIArgInfo &RetInfo = FI.getReturnInfo(); if (!getCXXABI().classifyReturnType(FI)) RetInfo = classifyReturnType(FI.getReturnType()); // Check if a pointer to an aggregate is passed as a hidden argument. uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0; for (auto &I : FI.arguments()) I.info = classifyArgumentType(I.type, Offset); } llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const { llvm::Type *BP = CGF.Int8PtrTy; llvm::Type *BPP = CGF.Int8PtrPtrTy; // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64. // Pointers are also promoted in the same way but this only matters for N32. unsigned SlotSizeInBits = IsO32 ? 32 : 64; unsigned PtrWidth = getTarget().getPointerWidth(0); if ((Ty->isIntegerType() && CGF.getContext().getIntWidth(Ty) < SlotSizeInBits) || (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) { Ty = CGF.getContext().getIntTypeForBitwidth(SlotSizeInBits, Ty->isSignedIntegerType()); } CGBuilderTy &Builder = CGF.Builder; llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); int64_t TypeAlign = std::min(getContext().getTypeAlign(Ty) / 8, StackAlignInBytes); llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); llvm::Value *AddrTyped; llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty; if (TypeAlign > MinABIStackAlignInBytes) { llvm::Value *AddrAsInt = CGF.Builder.CreatePtrToInt(Addr, IntTy); llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1); llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign); llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt, Inc); llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask); AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy); } else AddrTyped = Builder.CreateBitCast(Addr, PTy); llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP); TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes); unsigned ArgSizeInBits = CGF.getContext().getTypeSize(Ty); uint64_t Offset = llvm::RoundUpToAlignment(ArgSizeInBits / 8, TypeAlign); llvm::Value *NextAddr = Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset), "ap.next"); Builder.CreateStore(NextAddr, VAListAddrAsBPP); return AddrTyped; } bool MipsABIInfo::shouldSignExtUnsignedType(QualType Ty) const { int TySize = getContext().getTypeSize(Ty); // MIPS64 ABI requires unsigned 32 bit integers to be sign extended. if (Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32) return true; return false; } bool MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const { // This information comes from gcc's implementation, which seems to // as canonical as it gets. // Everything on MIPS is 4 bytes. Double-precision FP registers // are aliased to pairs of single-precision FP registers. llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); // 0-31 are the general purpose registers, $0 - $31. // 32-63 are the floating-point registers, $f0 - $f31. // 64 and 65 are the multiply/divide registers, $hi and $lo. // 66 is the (notional, I think) register for signal-handler return. AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65); // 67-74 are the floating-point status registers, $fcc0 - $fcc7. // They are one bit wide and ignored here. // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31. // (coprocessor 1 is the FP unit) // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31. // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31. // 176-181 are the DSP accumulator registers. AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181); return false; } //===----------------------------------------------------------------------===// // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults. // Currently subclassed only to implement custom OpenCL C function attribute // handling. //===----------------------------------------------------------------------===// namespace { class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo { public: TCETargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const override; }; void TCETargetCodeGenInfo::setTargetAttributes( const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const { const FunctionDecl *FD = dyn_cast(D); if (!FD) return; llvm::Function *F = cast(GV); if (M.getLangOpts().OpenCL) { if (FD->hasAttr()) { // OpenCL C Kernel functions are not subject to inlining F->addFnAttr(llvm::Attribute::NoInline); const ReqdWorkGroupSizeAttr *Attr = FD->getAttr(); if (Attr) { // Convert the reqd_work_group_size() attributes to metadata. llvm::LLVMContext &Context = F->getContext(); llvm::NamedMDNode *OpenCLMetadata = M.getModule().getOrInsertNamedMetadata( "opencl.kernel_wg_size_info"); SmallVector Operands; Operands.push_back(llvm::ConstantAsMetadata::get(F)); Operands.push_back( llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( M.Int32Ty, llvm::APInt(32, Attr->getXDim())))); Operands.push_back( llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( M.Int32Ty, llvm::APInt(32, Attr->getYDim())))); Operands.push_back( llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( M.Int32Ty, llvm::APInt(32, Attr->getZDim())))); // Add a boolean constant operand for "required" (true) or "hint" // (false) for implementing the work_group_size_hint attr later. // Currently always true as the hint is not yet implemented. Operands.push_back( llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context))); OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands)); } } } } } //===----------------------------------------------------------------------===// // Hexagon ABI Implementation //===----------------------------------------------------------------------===// namespace { class HexagonABIInfo : public ABIInfo { public: HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} private: ABIArgInfo classifyReturnType(QualType RetTy) const; ABIArgInfo classifyArgumentType(QualType RetTy) const; void computeInfo(CGFunctionInfo &FI) const override; llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const override; }; class HexagonTargetCodeGenInfo : public TargetCodeGenInfo { public: HexagonTargetCodeGenInfo(CodeGenTypes &CGT) :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {} int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { return 29; } }; } void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const { if (!getCXXABI().classifyReturnType(FI)) FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); for (auto &I : FI.arguments()) I.info = classifyArgumentType(I.type); } ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const { if (!isAggregateTypeForABI(Ty)) { // Treat an enum type as its underlying type. if (const EnumType *EnumTy = Ty->getAs()) Ty = EnumTy->getDecl()->getIntegerType(); return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); } // Ignore empty records. if (isEmptyRecord(getContext(), Ty, true)) return ABIArgInfo::getIgnore(); if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); uint64_t Size = getContext().getTypeSize(Ty); if (Size > 64) return ABIArgInfo::getIndirect(0, /*ByVal=*/true); // Pass in the smallest viable integer type. else if (Size > 32) return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); else if (Size > 16) return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); else if (Size > 8) return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); else return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); } ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const { if (RetTy->isVoidType()) return ABIArgInfo::getIgnore(); // Large vector types should be returned via memory. if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64) return ABIArgInfo::getIndirect(0); if (!isAggregateTypeForABI(RetTy)) { // Treat an enum type as its underlying type. if (const EnumType *EnumTy = RetTy->getAs()) RetTy = EnumTy->getDecl()->getIntegerType(); return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); } if (isEmptyRecord(getContext(), RetTy, true)) return ABIArgInfo::getIgnore(); // Aggregates <= 8 bytes are returned in r0; other aggregates // are returned indirectly. uint64_t Size = getContext().getTypeSize(RetTy); if (Size <= 64) { // Return in the smallest viable integer type. if (Size <= 8) return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); if (Size <= 16) return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); if (Size <= 32) return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); } return ABIArgInfo::getIndirect(0, /*ByVal=*/true); } llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const { // FIXME: Need to handle alignment llvm::Type *BPP = CGF.Int8PtrPtrTy; CGBuilderTy &Builder = CGF.Builder; llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); uint64_t Offset = llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); llvm::Value *NextAddr = Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next"); Builder.CreateStore(NextAddr, VAListAddrAsBPP); return AddrTyped; } //===----------------------------------------------------------------------===// // AMDGPU ABI Implementation //===----------------------------------------------------------------------===// namespace { class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo { public: AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT) : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const override; }; } void AMDGPUTargetCodeGenInfo::setTargetAttributes( const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const { const FunctionDecl *FD = dyn_cast(D); if (!FD) return; if (const auto Attr = FD->getAttr()) { llvm::Function *F = cast(GV); uint32_t NumVGPR = Attr->getNumVGPR(); if (NumVGPR != 0) F->addFnAttr("amdgpu_num_vgpr", llvm::utostr(NumVGPR)); } if (const auto Attr = FD->getAttr()) { llvm::Function *F = cast(GV); unsigned NumSGPR = Attr->getNumSGPR(); if (NumSGPR != 0) F->addFnAttr("amdgpu_num_sgpr", llvm::utostr(NumSGPR)); } } //===----------------------------------------------------------------------===// // SPARC v9 ABI Implementation. // Based on the SPARC Compliance Definition version 2.4.1. // // Function arguments a mapped to a nominal "parameter array" and promoted to // registers depending on their type. Each argument occupies 8 or 16 bytes in // the array, structs larger than 16 bytes are passed indirectly. // // One case requires special care: // // struct mixed { // int i; // float f; // }; // // When a struct mixed is passed by value, it only occupies 8 bytes in the // parameter array, but the int is passed in an integer register, and the float // is passed in a floating point register. This is represented as two arguments // with the LLVM IR inreg attribute: // // declare void f(i32 inreg %i, float inreg %f) // // The code generator will only allocate 4 bytes from the parameter array for // the inreg arguments. All other arguments are allocated a multiple of 8 // bytes. // namespace { class SparcV9ABIInfo : public ABIInfo { public: SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} private: ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const; void computeInfo(CGFunctionInfo &FI) const override; llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const override; // Coercion type builder for structs passed in registers. The coercion type // serves two purposes: // // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned' // in registers. // 2. Expose aligned floating point elements as first-level elements, so the // code generator knows to pass them in floating point registers. // // We also compute the InReg flag which indicates that the struct contains // aligned 32-bit floats. // struct CoerceBuilder { llvm::LLVMContext &Context; const llvm::DataLayout &DL; SmallVector Elems; uint64_t Size; bool InReg; CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl) : Context(c), DL(dl), Size(0), InReg(false) {} // Pad Elems with integers until Size is ToSize. void pad(uint64_t ToSize) { assert(ToSize >= Size && "Cannot remove elements"); if (ToSize == Size) return; // Finish the current 64-bit word. uint64_t Aligned = llvm::RoundUpToAlignment(Size, 64); if (Aligned > Size && Aligned <= ToSize) { Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size)); Size = Aligned; } // Add whole 64-bit words. while (Size + 64 <= ToSize) { Elems.push_back(llvm::Type::getInt64Ty(Context)); Size += 64; } // Final in-word padding. if (Size < ToSize) { Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size)); Size = ToSize; } } // Add a floating point element at Offset. void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) { // Unaligned floats are treated as integers. if (Offset % Bits) return; // The InReg flag is only required if there are any floats < 64 bits. if (Bits < 64) InReg = true; pad(Offset); Elems.push_back(Ty); Size = Offset + Bits; } // Add a struct type to the coercion type, starting at Offset (in bits). void addStruct(uint64_t Offset, llvm::StructType *StrTy) { const llvm::StructLayout *Layout = DL.getStructLayout(StrTy); for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) { llvm::Type *ElemTy = StrTy->getElementType(i); uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i); switch (ElemTy->getTypeID()) { case llvm::Type::StructTyID: addStruct(ElemOffset, cast(ElemTy)); break; case llvm::Type::FloatTyID: addFloat(ElemOffset, ElemTy, 32); break; case llvm::Type::DoubleTyID: addFloat(ElemOffset, ElemTy, 64); break; case llvm::Type::FP128TyID: addFloat(ElemOffset, ElemTy, 128); break; case llvm::Type::PointerTyID: if (ElemOffset % 64 == 0) { pad(ElemOffset); Elems.push_back(ElemTy); Size += 64; } break; default: break; } } } // Check if Ty is a usable substitute for the coercion type. bool isUsableType(llvm::StructType *Ty) const { return llvm::makeArrayRef(Elems) == Ty->elements(); } // Get the coercion type as a literal struct type. llvm::Type *getType() const { if (Elems.size() == 1) return Elems.front(); else return llvm::StructType::get(Context, Elems); } }; }; } // end anonymous namespace ABIArgInfo SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const { if (Ty->isVoidType()) return ABIArgInfo::getIgnore(); uint64_t Size = getContext().getTypeSize(Ty); // Anything too big to fit in registers is passed with an explicit indirect // pointer / sret pointer. if (Size > SizeLimit) return ABIArgInfo::getIndirect(0, /*ByVal=*/false); // Treat an enum type as its underlying type. if (const EnumType *EnumTy = Ty->getAs()) Ty = EnumTy->getDecl()->getIntegerType(); // Integer types smaller than a register are extended. if (Size < 64 && Ty->isIntegerType()) return ABIArgInfo::getExtend(); // Other non-aggregates go in registers. if (!isAggregateTypeForABI(Ty)) return ABIArgInfo::getDirect(); // If a C++ object has either a non-trivial copy constructor or a non-trivial // destructor, it is passed with an explicit indirect pointer / sret pointer. if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); // This is a small aggregate type that should be passed in registers. // Build a coercion type from the LLVM struct type. llvm::StructType *StrTy = dyn_cast(CGT.ConvertType(Ty)); if (!StrTy) return ABIArgInfo::getDirect(); CoerceBuilder CB(getVMContext(), getDataLayout()); CB.addStruct(0, StrTy); CB.pad(llvm::RoundUpToAlignment(CB.DL.getTypeSizeInBits(StrTy), 64)); // Try to use the original type for coercion. llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType(); if (CB.InReg) return ABIArgInfo::getDirectInReg(CoerceTy); else return ABIArgInfo::getDirect(CoerceTy); } llvm::Value *SparcV9ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const { ABIArgInfo AI = classifyType(Ty, 16 * 8); llvm::Type *ArgTy = CGT.ConvertType(Ty); if (AI.canHaveCoerceToType() && !AI.getCoerceToType()) AI.setCoerceToType(ArgTy); llvm::Type *BPP = CGF.Int8PtrPtrTy; CGBuilderTy &Builder = CGF.Builder; llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy); llvm::Value *ArgAddr; unsigned Stride; switch (AI.getKind()) { case ABIArgInfo::Expand: case ABIArgInfo::InAlloca: llvm_unreachable("Unsupported ABI kind for va_arg"); case ABIArgInfo::Extend: Stride = 8; ArgAddr = Builder .CreateConstGEP1_32(Addr, 8 - getDataLayout().getTypeAllocSize(ArgTy), "extend"); break; case ABIArgInfo::Direct: Stride = getDataLayout().getTypeAllocSize(AI.getCoerceToType()); ArgAddr = Addr; break; case ABIArgInfo::Indirect: Stride = 8; ArgAddr = Builder.CreateBitCast(Addr, llvm::PointerType::getUnqual(ArgPtrTy), "indirect"); ArgAddr = Builder.CreateLoad(ArgAddr, "indirect.arg"); break; case ABIArgInfo::Ignore: return llvm::UndefValue::get(ArgPtrTy); } // Update VAList. Addr = Builder.CreateConstGEP1_32(Addr, Stride, "ap.next"); Builder.CreateStore(Addr, VAListAddrAsBPP); return Builder.CreatePointerCast(ArgAddr, ArgPtrTy, "arg.addr"); } void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const { FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8); for (auto &I : FI.arguments()) I.info = classifyType(I.type, 16 * 8); } namespace { class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo { public: SparcV9TargetCodeGenInfo(CodeGenTypes &CGT) : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {} int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { return 14; } bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const override; }; } // end anonymous namespace bool SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const { // This is calculated from the LLVM and GCC tables and verified // against gcc output. AFAIK all ABIs use the same encoding. CodeGen::CGBuilderTy &Builder = CGF.Builder; llvm::IntegerType *i8 = CGF.Int8Ty; llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); // 0-31: the 8-byte general-purpose registers AssignToArrayRange(Builder, Address, Eight8, 0, 31); // 32-63: f0-31, the 4-byte floating-point registers AssignToArrayRange(Builder, Address, Four8, 32, 63); // Y = 64 // PSR = 65 // WIM = 66 // TBR = 67 // PC = 68 // NPC = 69 // FSR = 70 // CSR = 71 AssignToArrayRange(Builder, Address, Eight8, 64, 71); // 72-87: d0-15, the 8-byte floating-point registers AssignToArrayRange(Builder, Address, Eight8, 72, 87); return false; } //===----------------------------------------------------------------------===// // XCore ABI Implementation //===----------------------------------------------------------------------===// namespace { /// A SmallStringEnc instance is used to build up the TypeString by passing /// it by reference between functions that append to it. typedef llvm::SmallString<128> SmallStringEnc; /// TypeStringCache caches the meta encodings of Types. /// /// The reason for caching TypeStrings is two fold: /// 1. To cache a type's encoding for later uses; /// 2. As a means to break recursive member type inclusion. /// /// A cache Entry can have a Status of: /// NonRecursive: The type encoding is not recursive; /// Recursive: The type encoding is recursive; /// Incomplete: An incomplete TypeString; /// IncompleteUsed: An incomplete TypeString that has been used in a /// Recursive type encoding. /// /// A NonRecursive entry will have all of its sub-members expanded as fully /// as possible. Whilst it may contain types which are recursive, the type /// itself is not recursive and thus its encoding may be safely used whenever /// the type is encountered. /// /// A Recursive entry will have all of its sub-members expanded as fully as /// possible. The type itself is recursive and it may contain other types which /// are recursive. The Recursive encoding must not be used during the expansion /// of a recursive type's recursive branch. For simplicity the code uses /// IncompleteCount to reject all usage of Recursive encodings for member types. /// /// An Incomplete entry is always a RecordType and only encodes its /// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and /// are placed into the cache during type expansion as a means to identify and /// handle recursive inclusion of types as sub-members. If there is recursion /// the entry becomes IncompleteUsed. /// /// During the expansion of a RecordType's members: /// /// If the cache contains a NonRecursive encoding for the member type, the /// cached encoding is used; /// /// If the cache contains a Recursive encoding for the member type, the /// cached encoding is 'Swapped' out, as it may be incorrect, and... /// /// If the member is a RecordType, an Incomplete encoding is placed into the /// cache to break potential recursive inclusion of itself as a sub-member; /// /// Once a member RecordType has been expanded, its temporary incomplete /// entry is removed from the cache. If a Recursive encoding was swapped out /// it is swapped back in; /// /// If an incomplete entry is used to expand a sub-member, the incomplete /// entry is marked as IncompleteUsed. The cache keeps count of how many /// IncompleteUsed entries it currently contains in IncompleteUsedCount; /// /// If a member's encoding is found to be a NonRecursive or Recursive viz: /// IncompleteUsedCount==0, the member's encoding is added to the cache. /// Else the member is part of a recursive type and thus the recursion has /// been exited too soon for the encoding to be correct for the member. /// class TypeStringCache { enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed}; struct Entry { std::string Str; // The encoded TypeString for the type. enum Status State; // Information about the encoding in 'Str'. std::string Swapped; // A temporary place holder for a Recursive encoding // during the expansion of RecordType's members. }; std::map Map; unsigned IncompleteCount; // Number of Incomplete entries in the Map. unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map. public: TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {}; void addIncomplete(const IdentifierInfo *ID, std::string StubEnc); bool removeIncomplete(const IdentifierInfo *ID); void addIfComplete(const IdentifierInfo *ID, StringRef Str, bool IsRecursive); StringRef lookupStr(const IdentifierInfo *ID); }; /// TypeString encodings for enum & union fields must be order. /// FieldEncoding is a helper for this ordering process. class FieldEncoding { bool HasName; std::string Enc; public: FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {}; StringRef str() {return Enc.c_str();}; bool operator<(const FieldEncoding &rhs) const { if (HasName != rhs.HasName) return HasName; return Enc < rhs.Enc; } }; class XCoreABIInfo : public DefaultABIInfo { public: XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const override; }; class XCoreTargetCodeGenInfo : public TargetCodeGenInfo { mutable TypeStringCache TSC; public: XCoreTargetCodeGenInfo(CodeGenTypes &CGT) :TargetCodeGenInfo(new XCoreABIInfo(CGT)) {} void emitTargetMD(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const override; }; } // End anonymous namespace. llvm::Value *XCoreABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const { CGBuilderTy &Builder = CGF.Builder; // Get the VAList. llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, CGF.Int8PtrPtrTy); llvm::Value *AP = Builder.CreateLoad(VAListAddrAsBPP); // Handle the argument. ABIArgInfo AI = classifyArgumentType(Ty); llvm::Type *ArgTy = CGT.ConvertType(Ty); if (AI.canHaveCoerceToType() && !AI.getCoerceToType()) AI.setCoerceToType(ArgTy); llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy); llvm::Value *Val; uint64_t ArgSize = 0; switch (AI.getKind()) { case ABIArgInfo::Expand: case ABIArgInfo::InAlloca: llvm_unreachable("Unsupported ABI kind for va_arg"); case ABIArgInfo::Ignore: Val = llvm::UndefValue::get(ArgPtrTy); ArgSize = 0; break; case ABIArgInfo::Extend: case ABIArgInfo::Direct: Val = Builder.CreatePointerCast(AP, ArgPtrTy); ArgSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType()); if (ArgSize < 4) ArgSize = 4; break; case ABIArgInfo::Indirect: llvm::Value *ArgAddr; ArgAddr = Builder.CreateBitCast(AP, llvm::PointerType::getUnqual(ArgPtrTy)); ArgAddr = Builder.CreateLoad(ArgAddr); Val = Builder.CreatePointerCast(ArgAddr, ArgPtrTy); ArgSize = 4; break; } // Increment the VAList. if (ArgSize) { llvm::Value *APN = Builder.CreateConstGEP1_32(AP, ArgSize); Builder.CreateStore(APN, VAListAddrAsBPP); } return Val; } /// During the expansion of a RecordType, an incomplete TypeString is placed /// into the cache as a means to identify and break recursion. /// If there is a Recursive encoding in the cache, it is swapped out and will /// be reinserted by removeIncomplete(). /// All other types of encoding should have been used rather than arriving here. void TypeStringCache::addIncomplete(const IdentifierInfo *ID, std::string StubEnc) { if (!ID) return; Entry &E = Map[ID]; assert( (E.Str.empty() || E.State == Recursive) && "Incorrectly use of addIncomplete"); assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()"); E.Swapped.swap(E.Str); // swap out the Recursive E.Str.swap(StubEnc); E.State = Incomplete; ++IncompleteCount; } /// Once the RecordType has been expanded, the temporary incomplete TypeString /// must be removed from the cache. /// If a Recursive was swapped out by addIncomplete(), it will be replaced. /// Returns true if the RecordType was defined recursively. bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) { if (!ID) return false; auto I = Map.find(ID); assert(I != Map.end() && "Entry not present"); Entry &E = I->second; assert( (E.State == Incomplete || E.State == IncompleteUsed) && "Entry must be an incomplete type"); bool IsRecursive = false; if (E.State == IncompleteUsed) { // We made use of our Incomplete encoding, thus we are recursive. IsRecursive = true; --IncompleteUsedCount; } if (E.Swapped.empty()) Map.erase(I); else { // Swap the Recursive back. E.Swapped.swap(E.Str); E.Swapped.clear(); E.State = Recursive; } --IncompleteCount; return IsRecursive; } /// Add the encoded TypeString to the cache only if it is NonRecursive or /// Recursive (viz: all sub-members were expanded as fully as possible). void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str, bool IsRecursive) { if (!ID || IncompleteUsedCount) return; // No key or it is is an incomplete sub-type so don't add. Entry &E = Map[ID]; if (IsRecursive && !E.Str.empty()) { assert(E.State==Recursive && E.Str.size() == Str.size() && "This is not the same Recursive entry"); // The parent container was not recursive after all, so we could have used // this Recursive sub-member entry after all, but we assumed the worse when // we started viz: IncompleteCount!=0. return; } assert(E.Str.empty() && "Entry already present"); E.Str = Str.str(); E.State = IsRecursive? Recursive : NonRecursive; } /// Return a cached TypeString encoding for the ID. If there isn't one, or we /// are recursively expanding a type (IncompleteCount != 0) and the cached /// encoding is Recursive, return an empty StringRef. StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) { if (!ID) return StringRef(); // We have no key. auto I = Map.find(ID); if (I == Map.end()) return StringRef(); // We have no encoding. Entry &E = I->second; if (E.State == Recursive && IncompleteCount) return StringRef(); // We don't use Recursive encodings for member types. if (E.State == Incomplete) { // The incomplete type is being used to break out of recursion. E.State = IncompleteUsed; ++IncompleteUsedCount; } return E.Str.c_str(); } /// The XCore ABI includes a type information section that communicates symbol /// type information to the linker. The linker uses this information to verify /// safety/correctness of things such as array bound and pointers et al. /// The ABI only requires C (and XC) language modules to emit TypeStrings. /// This type information (TypeString) is emitted into meta data for all global /// symbols: definitions, declarations, functions & variables. /// /// The TypeString carries type, qualifier, name, size & value details. /// Please see 'Tools Development Guide' section 2.16.2 for format details: /// https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf /// The output is tested by test/CodeGen/xcore-stringtype.c. /// static bool getTypeString(SmallStringEnc &Enc, const Decl *D, CodeGen::CodeGenModule &CGM, TypeStringCache &TSC); /// XCore uses emitTargetMD to emit TypeString metadata for global symbols. void XCoreTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { SmallStringEnc Enc; if (getTypeString(Enc, D, CGM, TSC)) { llvm::LLVMContext &Ctx = CGM.getModule().getContext(); llvm::SmallVector MDVals; MDVals.push_back(llvm::ConstantAsMetadata::get(GV)); MDVals.push_back(llvm::MDString::get(Ctx, Enc.str())); llvm::NamedMDNode *MD = CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings"); MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); } } static bool appendType(SmallStringEnc &Enc, QualType QType, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC); /// Helper function for appendRecordType(). /// Builds a SmallVector containing the encoded field types in declaration /// order. static bool extractFieldType(SmallVectorImpl &FE, const RecordDecl *RD, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC) { for (const auto *Field : RD->fields()) { SmallStringEnc Enc; Enc += "m("; Enc += Field->getName(); Enc += "){"; if (Field->isBitField()) { Enc += "b("; llvm::raw_svector_ostream OS(Enc); OS.resync(); OS << Field->getBitWidthValue(CGM.getContext()); OS.flush(); Enc += ':'; } if (!appendType(Enc, Field->getType(), CGM, TSC)) return false; if (Field->isBitField()) Enc += ')'; Enc += '}'; FE.emplace_back(!Field->getName().empty(), Enc); } return true; } /// Appends structure and union types to Enc and adds encoding to cache. /// Recursively calls appendType (via extractFieldType) for each field. /// Union types have their fields ordered according to the ABI. static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC, const IdentifierInfo *ID) { // Append the cached TypeString if we have one. StringRef TypeString = TSC.lookupStr(ID); if (!TypeString.empty()) { Enc += TypeString; return true; } // Start to emit an incomplete TypeString. size_t Start = Enc.size(); Enc += (RT->isUnionType()? 'u' : 's'); Enc += '('; if (ID) Enc += ID->getName(); Enc += "){"; // We collect all encoded fields and order as necessary. bool IsRecursive = false; const RecordDecl *RD = RT->getDecl()->getDefinition(); if (RD && !RD->field_empty()) { // An incomplete TypeString stub is placed in the cache for this RecordType // so that recursive calls to this RecordType will use it whilst building a // complete TypeString for this RecordType. SmallVector FE; std::string StubEnc(Enc.substr(Start).str()); StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString. TSC.addIncomplete(ID, std::move(StubEnc)); if (!extractFieldType(FE, RD, CGM, TSC)) { (void) TSC.removeIncomplete(ID); return false; } IsRecursive = TSC.removeIncomplete(ID); // The ABI requires unions to be sorted but not structures. // See FieldEncoding::operator< for sort algorithm. if (RT->isUnionType()) std::sort(FE.begin(), FE.end()); // We can now complete the TypeString. unsigned E = FE.size(); for (unsigned I = 0; I != E; ++I) { if (I) Enc += ','; Enc += FE[I].str(); } } Enc += '}'; TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive); return true; } /// Appends enum types to Enc and adds the encoding to the cache. static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET, TypeStringCache &TSC, const IdentifierInfo *ID) { // Append the cached TypeString if we have one. StringRef TypeString = TSC.lookupStr(ID); if (!TypeString.empty()) { Enc += TypeString; return true; } size_t Start = Enc.size(); Enc += "e("; if (ID) Enc += ID->getName(); Enc += "){"; // We collect all encoded enumerations and order them alphanumerically. if (const EnumDecl *ED = ET->getDecl()->getDefinition()) { SmallVector FE; for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E; ++I) { SmallStringEnc EnumEnc; EnumEnc += "m("; EnumEnc += I->getName(); EnumEnc += "){"; I->getInitVal().toString(EnumEnc); EnumEnc += '}'; FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc)); } std::sort(FE.begin(), FE.end()); unsigned E = FE.size(); for (unsigned I = 0; I != E; ++I) { if (I) Enc += ','; Enc += FE[I].str(); } } Enc += '}'; TSC.addIfComplete(ID, Enc.substr(Start), false); return true; } /// Appends type's qualifier to Enc. /// This is done prior to appending the type's encoding. static void appendQualifier(SmallStringEnc &Enc, QualType QT) { // Qualifiers are emitted in alphabetical order. static const char *Table[] = {"","c:","r:","cr:","v:","cv:","rv:","crv:"}; int Lookup = 0; if (QT.isConstQualified()) Lookup += 1<<0; if (QT.isRestrictQualified()) Lookup += 1<<1; if (QT.isVolatileQualified()) Lookup += 1<<2; Enc += Table[Lookup]; } /// Appends built-in types to Enc. static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) { const char *EncType; switch (BT->getKind()) { case BuiltinType::Void: EncType = "0"; break; case BuiltinType::Bool: EncType = "b"; break; case BuiltinType::Char_U: EncType = "uc"; break; case BuiltinType::UChar: EncType = "uc"; break; case BuiltinType::SChar: EncType = "sc"; break; case BuiltinType::UShort: EncType = "us"; break; case BuiltinType::Short: EncType = "ss"; break; case BuiltinType::UInt: EncType = "ui"; break; case BuiltinType::Int: EncType = "si"; break; case BuiltinType::ULong: EncType = "ul"; break; case BuiltinType::Long: EncType = "sl"; break; case BuiltinType::ULongLong: EncType = "ull"; break; case BuiltinType::LongLong: EncType = "sll"; break; case BuiltinType::Float: EncType = "ft"; break; case BuiltinType::Double: EncType = "d"; break; case BuiltinType::LongDouble: EncType = "ld"; break; default: return false; } Enc += EncType; return true; } /// Appends a pointer encoding to Enc before calling appendType for the pointee. static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC) { Enc += "p("; if (!appendType(Enc, PT->getPointeeType(), CGM, TSC)) return false; Enc += ')'; return true; } /// Appends array encoding to Enc before calling appendType for the element. static bool appendArrayType(SmallStringEnc &Enc, QualType QT, const ArrayType *AT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC, StringRef NoSizeEnc) { if (AT->getSizeModifier() != ArrayType::Normal) return false; Enc += "a("; if (const ConstantArrayType *CAT = dyn_cast(AT)) CAT->getSize().toStringUnsigned(Enc); else Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "". Enc += ':'; // The Qualifiers should be attached to the type rather than the array. appendQualifier(Enc, QT); if (!appendType(Enc, AT->getElementType(), CGM, TSC)) return false; Enc += ')'; return true; } /// Appends a function encoding to Enc, calling appendType for the return type /// and the arguments. static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC) { Enc += "f{"; if (!appendType(Enc, FT->getReturnType(), CGM, TSC)) return false; Enc += "}("; if (const FunctionProtoType *FPT = FT->getAs()) { // N.B. we are only interested in the adjusted param types. auto I = FPT->param_type_begin(); auto E = FPT->param_type_end(); if (I != E) { do { if (!appendType(Enc, *I, CGM, TSC)) return false; ++I; if (I != E) Enc += ','; } while (I != E); if (FPT->isVariadic()) Enc += ",va"; } else { if (FPT->isVariadic()) Enc += "va"; else Enc += '0'; } } Enc += ')'; return true; } /// Handles the type's qualifier before dispatching a call to handle specific /// type encodings. static bool appendType(SmallStringEnc &Enc, QualType QType, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC) { QualType QT = QType.getCanonicalType(); if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) // The Qualifiers should be attached to the type rather than the array. // Thus we don't call appendQualifier() here. return appendArrayType(Enc, QT, AT, CGM, TSC, ""); appendQualifier(Enc, QT); if (const BuiltinType *BT = QT->getAs()) return appendBuiltinType(Enc, BT); if (const PointerType *PT = QT->getAs()) return appendPointerType(Enc, PT, CGM, TSC); if (const EnumType *ET = QT->getAs()) return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier()); if (const RecordType *RT = QT->getAsStructureType()) return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier()); if (const RecordType *RT = QT->getAsUnionType()) return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier()); if (const FunctionType *FT = QT->getAs()) return appendFunctionType(Enc, FT, CGM, TSC); return false; } static bool getTypeString(SmallStringEnc &Enc, const Decl *D, CodeGen::CodeGenModule &CGM, TypeStringCache &TSC) { if (!D) return false; if (const FunctionDecl *FD = dyn_cast(D)) { if (FD->getLanguageLinkage() != CLanguageLinkage) return false; return appendType(Enc, FD->getType(), CGM, TSC); } if (const VarDecl *VD = dyn_cast(D)) { if (VD->getLanguageLinkage() != CLanguageLinkage) return false; QualType QT = VD->getType().getCanonicalType(); if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) { // Global ArrayTypes are given a size of '*' if the size is unknown. // The Qualifiers should be attached to the type rather than the array. // Thus we don't call appendQualifier() here. return appendArrayType(Enc, QT, AT, CGM, TSC, "*"); } return appendType(Enc, QT, CGM, TSC); } return false; } //===----------------------------------------------------------------------===// // Driver code //===----------------------------------------------------------------------===// const llvm::Triple &CodeGenModule::getTriple() const { return getTarget().getTriple(); } bool CodeGenModule::supportsCOMDAT() const { return !getTriple().isOSBinFormatMachO(); } const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { if (TheTargetCodeGenInfo) return *TheTargetCodeGenInfo; const llvm::Triple &Triple = getTarget().getTriple(); switch (Triple.getArch()) { default: return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types)); case llvm::Triple::le32: return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types)); case llvm::Triple::mips: case llvm::Triple::mipsel: if (Triple.getOS() == llvm::Triple::NaCl) return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types)); return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true)); case llvm::Triple::mips64: case llvm::Triple::mips64el: return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false)); case llvm::Triple::aarch64: case llvm::Triple::aarch64_be: { AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS; if (getTarget().getABI() == "darwinpcs") Kind = AArch64ABIInfo::DarwinPCS; return *(TheTargetCodeGenInfo = new AArch64TargetCodeGenInfo(Types, Kind)); } case llvm::Triple::arm: case llvm::Triple::armeb: case llvm::Triple::thumb: case llvm::Triple::thumbeb: { if (Triple.getOS() == llvm::Triple::Win32) { TheTargetCodeGenInfo = new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP); return *TheTargetCodeGenInfo; } ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS; if (getTarget().getABI() == "apcs-gnu") Kind = ARMABIInfo::APCS; else if (CodeGenOpts.FloatABI == "hard" || (CodeGenOpts.FloatABI != "soft" && Triple.getEnvironment() == llvm::Triple::GNUEABIHF)) Kind = ARMABIInfo::AAPCS_VFP; return *(TheTargetCodeGenInfo = new ARMTargetCodeGenInfo(Types, Kind)); } case llvm::Triple::ppc: return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types)); case llvm::Triple::ppc64: if (Triple.isOSBinFormatELF()) { PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1; if (getTarget().getABI() == "elfv2") Kind = PPC64_SVR4_ABIInfo::ELFv2; bool HasQPX = getTarget().getABI() == "elfv1-qpx"; return *(TheTargetCodeGenInfo = new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX)); } else return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types)); case llvm::Triple::ppc64le: { assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!"); PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2; if (getTarget().getABI() == "elfv1" || getTarget().getABI() == "elfv1-qpx") Kind = PPC64_SVR4_ABIInfo::ELFv1; bool HasQPX = getTarget().getABI() == "elfv1-qpx"; return *(TheTargetCodeGenInfo = new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX)); } case llvm::Triple::nvptx: case llvm::Triple::nvptx64: return *(TheTargetCodeGenInfo = new NVPTXTargetCodeGenInfo(Types)); case llvm::Triple::msp430: return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types)); case llvm::Triple::systemz: { bool HasVector = getTarget().getABI() == "vector"; return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types, HasVector)); } case llvm::Triple::tce: return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types)); case llvm::Triple::x86: { bool IsDarwinVectorABI = Triple.isOSDarwin(); bool IsSmallStructInRegABI = X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts); bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing(); if (Triple.getOS() == llvm::Triple::Win32) { return *(TheTargetCodeGenInfo = new WinX86_32TargetCodeGenInfo( Types, IsDarwinVectorABI, IsSmallStructInRegABI, IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters)); } else { return *(TheTargetCodeGenInfo = new X86_32TargetCodeGenInfo( Types, IsDarwinVectorABI, IsSmallStructInRegABI, IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters)); } } case llvm::Triple::x86_64: { StringRef ABI = getTarget().getABI(); X86AVXABILevel AVXLevel = (ABI == "avx512" ? X86AVXABILevel::AVX512 : ABI == "avx" ? X86AVXABILevel::AVX : X86AVXABILevel::None); switch (Triple.getOS()) { case llvm::Triple::Win32: return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types, AVXLevel)); case llvm::Triple::PS4: return *(TheTargetCodeGenInfo = new PS4TargetCodeGenInfo(Types, AVXLevel)); default: return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types, AVXLevel)); } } case llvm::Triple::hexagon: return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types)); case llvm::Triple::r600: return *(TheTargetCodeGenInfo = new AMDGPUTargetCodeGenInfo(Types)); case llvm::Triple::amdgcn: return *(TheTargetCodeGenInfo = new AMDGPUTargetCodeGenInfo(Types)); case llvm::Triple::sparcv9: return *(TheTargetCodeGenInfo = new SparcV9TargetCodeGenInfo(Types)); case llvm::Triple::xcore: return *(TheTargetCodeGenInfo = new XCoreTargetCodeGenInfo(Types)); } } Index: vendor/clang/dist/lib/Driver/Tools.cpp =================================================================== --- vendor/clang/dist/lib/Driver/Tools.cpp (revision 292727) +++ vendor/clang/dist/lib/Driver/Tools.cpp (revision 292728) @@ -1,9509 +1,9519 @@ //===--- Tools.cpp - Tools Implementations --------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "Tools.h" #include "InputInfo.h" #include "ToolChains.h" #include "clang/Basic/CharInfo.h" #include "clang/Basic/LangOptions.h" #include "clang/Basic/ObjCRuntime.h" #include "clang/Basic/Version.h" #include "clang/Config/config.h" #include "clang/Driver/Action.h" #include "clang/Driver/Compilation.h" #include "clang/Driver/Driver.h" #include "clang/Driver/DriverDiagnostic.h" #include "clang/Driver/Job.h" #include "clang/Driver/Options.h" #include "clang/Driver/SanitizerArgs.h" #include "clang/Driver/ToolChain.h" #include "clang/Driver/Util.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/StringSwitch.h" #include "llvm/ADT/Twine.h" #include "llvm/Option/Arg.h" #include "llvm/Option/ArgList.h" #include "llvm/Option/Option.h" #include "llvm/Support/TargetParser.h" #include "llvm/Support/Compression.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/FileSystem.h" #include "llvm/Support/Host.h" #include "llvm/Support/Path.h" #include "llvm/Support/Process.h" #include "llvm/Support/Program.h" #include "llvm/Support/raw_ostream.h" #ifdef LLVM_ON_UNIX #include // For getuid(). #endif using namespace clang::driver; using namespace clang::driver::tools; using namespace clang; using namespace llvm::opt; static void addAssemblerKPIC(const ArgList &Args, ArgStringList &CmdArgs) { Arg *LastPICArg = Args.getLastArg(options::OPT_fPIC, options::OPT_fno_PIC, options::OPT_fpic, options::OPT_fno_pic, options::OPT_fPIE, options::OPT_fno_PIE, options::OPT_fpie, options::OPT_fno_pie); if (!LastPICArg) return; if (LastPICArg->getOption().matches(options::OPT_fPIC) || LastPICArg->getOption().matches(options::OPT_fpic) || LastPICArg->getOption().matches(options::OPT_fPIE) || LastPICArg->getOption().matches(options::OPT_fpie)) { CmdArgs.push_back("-KPIC"); } } /// CheckPreprocessingOptions - Perform some validation of preprocessing /// arguments that is shared with gcc. static void CheckPreprocessingOptions(const Driver &D, const ArgList &Args) { if (Arg *A = Args.getLastArg(options::OPT_C, options::OPT_CC)) { if (!Args.hasArg(options::OPT_E) && !Args.hasArg(options::OPT__SLASH_P) && !Args.hasArg(options::OPT__SLASH_EP) && !D.CCCIsCPP()) { D.Diag(diag::err_drv_argument_only_allowed_with) << A->getBaseArg().getAsString(Args) << (D.IsCLMode() ? "/E, /P or /EP" : "-E"); } } } /// CheckCodeGenerationOptions - Perform some validation of code generation /// arguments that is shared with gcc. static void CheckCodeGenerationOptions(const Driver &D, const ArgList &Args) { // In gcc, only ARM checks this, but it seems reasonable to check universally. if (Args.hasArg(options::OPT_static)) if (const Arg *A = Args.getLastArg(options::OPT_dynamic, options::OPT_mdynamic_no_pic)) D.Diag(diag::err_drv_argument_not_allowed_with) << A->getAsString(Args) << "-static"; } // Add backslashes to escape spaces and other backslashes. // This is used for the space-separated argument list specified with // the -dwarf-debug-flags option. static void EscapeSpacesAndBackslashes(const char *Arg, SmallVectorImpl &Res) { for (; *Arg; ++Arg) { switch (*Arg) { default: break; case ' ': case '\\': Res.push_back('\\'); break; } Res.push_back(*Arg); } } // Quote target names for inclusion in GNU Make dependency files. // Only the characters '$', '#', ' ', '\t' are quoted. static void QuoteTarget(StringRef Target, SmallVectorImpl &Res) { for (unsigned i = 0, e = Target.size(); i != e; ++i) { switch (Target[i]) { case ' ': case '\t': // Escape the preceding backslashes for (int j = i - 1; j >= 0 && Target[j] == '\\'; --j) Res.push_back('\\'); // Escape the space/tab Res.push_back('\\'); break; case '$': Res.push_back('$'); break; case '#': Res.push_back('\\'); break; default: break; } Res.push_back(Target[i]); } } static void addDirectoryList(const ArgList &Args, ArgStringList &CmdArgs, const char *ArgName, const char *EnvVar) { const char *DirList = ::getenv(EnvVar); bool CombinedArg = false; if (!DirList) return; // Nothing to do. StringRef Name(ArgName); if (Name.equals("-I") || Name.equals("-L")) CombinedArg = true; StringRef Dirs(DirList); if (Dirs.empty()) // Empty string should not add '.'. return; StringRef::size_type Delim; while ((Delim = Dirs.find(llvm::sys::EnvPathSeparator)) != StringRef::npos) { if (Delim == 0) { // Leading colon. if (CombinedArg) { CmdArgs.push_back(Args.MakeArgString(std::string(ArgName) + ".")); } else { CmdArgs.push_back(ArgName); CmdArgs.push_back("."); } } else { if (CombinedArg) { CmdArgs.push_back( Args.MakeArgString(std::string(ArgName) + Dirs.substr(0, Delim))); } else { CmdArgs.push_back(ArgName); CmdArgs.push_back(Args.MakeArgString(Dirs.substr(0, Delim))); } } Dirs = Dirs.substr(Delim + 1); } if (Dirs.empty()) { // Trailing colon. if (CombinedArg) { CmdArgs.push_back(Args.MakeArgString(std::string(ArgName) + ".")); } else { CmdArgs.push_back(ArgName); CmdArgs.push_back("."); } } else { // Add the last path. if (CombinedArg) { CmdArgs.push_back(Args.MakeArgString(std::string(ArgName) + Dirs)); } else { CmdArgs.push_back(ArgName); CmdArgs.push_back(Args.MakeArgString(Dirs)); } } } static void AddLinkerInputs(const ToolChain &TC, const InputInfoList &Inputs, const ArgList &Args, ArgStringList &CmdArgs) { const Driver &D = TC.getDriver(); // Add extra linker input arguments which are not treated as inputs // (constructed via -Xarch_). Args.AddAllArgValues(CmdArgs, options::OPT_Zlinker_input); for (const auto &II : Inputs) { if (!TC.HasNativeLLVMSupport()) { // Don't try to pass LLVM inputs unless we have native support. if (II.getType() == types::TY_LLVM_IR || II.getType() == types::TY_LTO_IR || II.getType() == types::TY_LLVM_BC || II.getType() == types::TY_LTO_BC) D.Diag(diag::err_drv_no_linker_llvm_support) << TC.getTripleString(); } // Add filenames immediately. if (II.isFilename()) { CmdArgs.push_back(II.getFilename()); continue; } // Otherwise, this is a linker input argument. const Arg &A = II.getInputArg(); // Handle reserved library options. if (A.getOption().matches(options::OPT_Z_reserved_lib_stdcxx)) TC.AddCXXStdlibLibArgs(Args, CmdArgs); else if (A.getOption().matches(options::OPT_Z_reserved_lib_cckext)) TC.AddCCKextLibArgs(Args, CmdArgs); else if (A.getOption().matches(options::OPT_z)) { // Pass -z prefix for gcc linker compatibility. A.claim(); A.render(Args, CmdArgs); } else { A.renderAsInput(Args, CmdArgs); } } // LIBRARY_PATH - included following the user specified library paths. // and only supported on native toolchains. if (!TC.isCrossCompiling()) addDirectoryList(Args, CmdArgs, "-L", "LIBRARY_PATH"); } /// \brief Determine whether Objective-C automated reference counting is /// enabled. static bool isObjCAutoRefCount(const ArgList &Args) { return Args.hasFlag(options::OPT_fobjc_arc, options::OPT_fno_objc_arc, false); } /// \brief Determine whether we are linking the ObjC runtime. static bool isObjCRuntimeLinked(const ArgList &Args) { if (isObjCAutoRefCount(Args)) { Args.ClaimAllArgs(options::OPT_fobjc_link_runtime); return true; } return Args.hasArg(options::OPT_fobjc_link_runtime); } static bool forwardToGCC(const Option &O) { // Don't forward inputs from the original command line. They are added from // InputInfoList. return O.getKind() != Option::InputClass && !O.hasFlag(options::DriverOption) && !O.hasFlag(options::LinkerInput); } void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA, const Driver &D, const ArgList &Args, ArgStringList &CmdArgs, const InputInfo &Output, const InputInfoList &Inputs) const { Arg *A; CheckPreprocessingOptions(D, Args); Args.AddLastArg(CmdArgs, options::OPT_C); Args.AddLastArg(CmdArgs, options::OPT_CC); // Handle dependency file generation. if ((A = Args.getLastArg(options::OPT_M, options::OPT_MM)) || (A = Args.getLastArg(options::OPT_MD)) || (A = Args.getLastArg(options::OPT_MMD))) { // Determine the output location. const char *DepFile; if (Arg *MF = Args.getLastArg(options::OPT_MF)) { DepFile = MF->getValue(); C.addFailureResultFile(DepFile, &JA); } else if (Output.getType() == types::TY_Dependencies) { DepFile = Output.getFilename(); } else if (A->getOption().matches(options::OPT_M) || A->getOption().matches(options::OPT_MM)) { DepFile = "-"; } else { DepFile = getDependencyFileName(Args, Inputs); C.addFailureResultFile(DepFile, &JA); } CmdArgs.push_back("-dependency-file"); CmdArgs.push_back(DepFile); // Add a default target if one wasn't specified. if (!Args.hasArg(options::OPT_MT) && !Args.hasArg(options::OPT_MQ)) { const char *DepTarget; // If user provided -o, that is the dependency target, except // when we are only generating a dependency file. Arg *OutputOpt = Args.getLastArg(options::OPT_o); if (OutputOpt && Output.getType() != types::TY_Dependencies) { DepTarget = OutputOpt->getValue(); } else { // Otherwise derive from the base input. // // FIXME: This should use the computed output file location. SmallString<128> P(Inputs[0].getBaseInput()); llvm::sys::path::replace_extension(P, "o"); DepTarget = Args.MakeArgString(llvm::sys::path::filename(P)); } CmdArgs.push_back("-MT"); SmallString<128> Quoted; QuoteTarget(DepTarget, Quoted); CmdArgs.push_back(Args.MakeArgString(Quoted)); } if (A->getOption().matches(options::OPT_M) || A->getOption().matches(options::OPT_MD)) CmdArgs.push_back("-sys-header-deps"); if ((isa(JA) && !Args.hasArg(options::OPT_fno_module_file_deps)) || Args.hasArg(options::OPT_fmodule_file_deps)) CmdArgs.push_back("-module-file-deps"); } if (Args.hasArg(options::OPT_MG)) { if (!A || A->getOption().matches(options::OPT_MD) || A->getOption().matches(options::OPT_MMD)) D.Diag(diag::err_drv_mg_requires_m_or_mm); CmdArgs.push_back("-MG"); } Args.AddLastArg(CmdArgs, options::OPT_MP); Args.AddLastArg(CmdArgs, options::OPT_MV); // Convert all -MQ args to -MT for (const Arg *A : Args.filtered(options::OPT_MT, options::OPT_MQ)) { A->claim(); if (A->getOption().matches(options::OPT_MQ)) { CmdArgs.push_back("-MT"); SmallString<128> Quoted; QuoteTarget(A->getValue(), Quoted); CmdArgs.push_back(Args.MakeArgString(Quoted)); // -MT flag - no change } else { A->render(Args, CmdArgs); } } // Add -i* options, and automatically translate to // -include-pch/-include-pth for transparent PCH support. It's // wonky, but we include looking for .gch so we can support seamless // replacement into a build system already set up to be generating // .gch files. bool RenderedImplicitInclude = false; for (const Arg *A : Args.filtered(options::OPT_clang_i_Group)) { if (A->getOption().matches(options::OPT_include)) { bool IsFirstImplicitInclude = !RenderedImplicitInclude; RenderedImplicitInclude = true; // Use PCH if the user requested it. bool UsePCH = D.CCCUsePCH; bool FoundPTH = false; bool FoundPCH = false; SmallString<128> P(A->getValue()); // We want the files to have a name like foo.h.pch. Add a dummy extension // so that replace_extension does the right thing. P += ".dummy"; if (UsePCH) { llvm::sys::path::replace_extension(P, "pch"); if (llvm::sys::fs::exists(P)) FoundPCH = true; } if (!FoundPCH) { llvm::sys::path::replace_extension(P, "pth"); if (llvm::sys::fs::exists(P)) FoundPTH = true; } if (!FoundPCH && !FoundPTH) { llvm::sys::path::replace_extension(P, "gch"); if (llvm::sys::fs::exists(P)) { FoundPCH = UsePCH; FoundPTH = !UsePCH; } } if (FoundPCH || FoundPTH) { if (IsFirstImplicitInclude) { A->claim(); if (UsePCH) CmdArgs.push_back("-include-pch"); else CmdArgs.push_back("-include-pth"); CmdArgs.push_back(Args.MakeArgString(P)); continue; } else { // Ignore the PCH if not first on command line and emit warning. D.Diag(diag::warn_drv_pch_not_first_include) << P << A->getAsString(Args); } } } // Not translated, render as usual. A->claim(); A->render(Args, CmdArgs); } Args.AddAllArgs(CmdArgs, options::OPT_D, options::OPT_U); Args.AddAllArgs(CmdArgs, options::OPT_I_Group, options::OPT_F, options::OPT_index_header_map); // Add -Wp, and -Xassembler if using the preprocessor. // FIXME: There is a very unfortunate problem here, some troubled // souls abuse -Wp, to pass preprocessor options in gcc syntax. To // really support that we would have to parse and then translate // those options. :( Args.AddAllArgValues(CmdArgs, options::OPT_Wp_COMMA, options::OPT_Xpreprocessor); // -I- is a deprecated GCC feature, reject it. if (Arg *A = Args.getLastArg(options::OPT_I_)) D.Diag(diag::err_drv_I_dash_not_supported) << A->getAsString(Args); // If we have a --sysroot, and don't have an explicit -isysroot flag, add an // -isysroot to the CC1 invocation. StringRef sysroot = C.getSysRoot(); if (sysroot != "") { if (!Args.hasArg(options::OPT_isysroot)) { CmdArgs.push_back("-isysroot"); CmdArgs.push_back(C.getArgs().MakeArgString(sysroot)); } } // Parse additional include paths from environment variables. // FIXME: We should probably sink the logic for handling these from the // frontend into the driver. It will allow deleting 4 otherwise unused flags. // CPATH - included following the user specified includes (but prior to // builtin and standard includes). addDirectoryList(Args, CmdArgs, "-I", "CPATH"); // C_INCLUDE_PATH - system includes enabled when compiling C. addDirectoryList(Args, CmdArgs, "-c-isystem", "C_INCLUDE_PATH"); // CPLUS_INCLUDE_PATH - system includes enabled when compiling C++. addDirectoryList(Args, CmdArgs, "-cxx-isystem", "CPLUS_INCLUDE_PATH"); // OBJC_INCLUDE_PATH - system includes enabled when compiling ObjC. addDirectoryList(Args, CmdArgs, "-objc-isystem", "OBJC_INCLUDE_PATH"); // OBJCPLUS_INCLUDE_PATH - system includes enabled when compiling ObjC++. addDirectoryList(Args, CmdArgs, "-objcxx-isystem", "OBJCPLUS_INCLUDE_PATH"); // Add C++ include arguments, if needed. if (types::isCXX(Inputs[0].getType())) getToolChain().AddClangCXXStdlibIncludeArgs(Args, CmdArgs); // Add system include arguments. getToolChain().AddClangSystemIncludeArgs(Args, CmdArgs); } // FIXME: Move to target hook. static bool isSignedCharDefault(const llvm::Triple &Triple) { switch (Triple.getArch()) { default: return true; case llvm::Triple::aarch64: case llvm::Triple::aarch64_be: case llvm::Triple::arm: case llvm::Triple::armeb: case llvm::Triple::thumb: case llvm::Triple::thumbeb: if (Triple.isOSDarwin() || Triple.isOSWindows()) return true; return false; case llvm::Triple::ppc: case llvm::Triple::ppc64: if (Triple.isOSDarwin()) return true; return false; case llvm::Triple::hexagon: case llvm::Triple::ppc64le: case llvm::Triple::systemz: case llvm::Triple::xcore: return false; } } static bool isNoCommonDefault(const llvm::Triple &Triple) { switch (Triple.getArch()) { default: return false; case llvm::Triple::xcore: return true; } } // ARM tools start. // Get SubArch (vN). static int getARMSubArchVersionNumber(const llvm::Triple &Triple) { llvm::StringRef Arch = Triple.getArchName(); return llvm::ARMTargetParser::parseArchVersion(Arch); } // True if M-profile. static bool isARMMProfile(const llvm::Triple &Triple) { llvm::StringRef Arch = Triple.getArchName(); unsigned Profile = llvm::ARMTargetParser::parseArchProfile(Arch); return Profile == llvm::ARM::PK_M; } // Get Arch/CPU from args. static void getARMArchCPUFromArgs(const ArgList &Args, llvm::StringRef &Arch, llvm::StringRef &CPU, bool FromAs = false) { if (const Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) CPU = A->getValue(); if (const Arg *A = Args.getLastArg(options::OPT_march_EQ)) Arch = A->getValue(); if (!FromAs) return; for (const Arg *A : Args.filtered(options::OPT_Wa_COMMA, options::OPT_Xassembler)) { StringRef Value = A->getValue(); if (Value.startswith("-mcpu=")) CPU = Value.substr(6); if (Value.startswith("-march=")) Arch = Value.substr(7); } } // Handle -mhwdiv=. // FIXME: Use ARMTargetParser. static void getARMHWDivFeatures(const Driver &D, const Arg *A, const ArgList &Args, StringRef HWDiv, std::vector &Features) { if (HWDiv == "arm") { Features.push_back("+hwdiv-arm"); Features.push_back("-hwdiv"); } else if (HWDiv == "thumb") { Features.push_back("-hwdiv-arm"); Features.push_back("+hwdiv"); } else if (HWDiv == "arm,thumb" || HWDiv == "thumb,arm") { Features.push_back("+hwdiv-arm"); Features.push_back("+hwdiv"); } else if (HWDiv == "none") { Features.push_back("-hwdiv-arm"); Features.push_back("-hwdiv"); } else D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args); } // Handle -mfpu=. static void getARMFPUFeatures(const Driver &D, const Arg *A, const ArgList &Args, StringRef FPU, std::vector &Features) { unsigned FPUID = llvm::ARMTargetParser::parseFPU(FPU); if (!llvm::ARMTargetParser::getFPUFeatures(FPUID, Features)) D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args); } // Check if -march is valid by checking if it can be canonicalised and parsed. // getARMArch is used here instead of just checking the -march value in order // to handle -march=native correctly. static void checkARMArchName(const Driver &D, const Arg *A, const ArgList &Args, llvm::StringRef ArchName, const llvm::Triple &Triple) { std::string MArch = arm::getARMArch(ArchName, Triple); if (llvm::ARMTargetParser::parseArch(MArch) == llvm::ARM::AK_INVALID) D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args); } // Check -mcpu=. Needs ArchName to handle -mcpu=generic. static void checkARMCPUName(const Driver &D, const Arg *A, const ArgList &Args, llvm::StringRef CPUName, llvm::StringRef ArchName, const llvm::Triple &Triple) { std::string CPU = arm::getARMTargetCPU(CPUName, ArchName, Triple); std::string Arch = arm::getARMArch(ArchName, Triple); if (strcmp(arm::getLLVMArchSuffixForARM(CPU, Arch), "") == 0) D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args); } // Select the float ABI as determined by -msoft-float, -mhard-float, and // -mfloat-abi=. StringRef tools::arm::getARMFloatABI(const Driver &D, const ArgList &Args, const llvm::Triple &Triple) { StringRef FloatABI; if (Arg *A = Args.getLastArg(options::OPT_msoft_float, options::OPT_mhard_float, options::OPT_mfloat_abi_EQ)) { if (A->getOption().matches(options::OPT_msoft_float)) FloatABI = "soft"; else if (A->getOption().matches(options::OPT_mhard_float)) FloatABI = "hard"; else { FloatABI = A->getValue(); if (FloatABI != "soft" && FloatABI != "softfp" && FloatABI != "hard") { D.Diag(diag::err_drv_invalid_mfloat_abi) << A->getAsString(Args); FloatABI = "soft"; } } } // If unspecified, choose the default based on the platform. if (FloatABI.empty()) { switch (Triple.getOS()) { case llvm::Triple::Darwin: case llvm::Triple::MacOSX: case llvm::Triple::IOS: { // Darwin defaults to "softfp" for v6 and v7. // if (getARMSubArchVersionNumber(Triple) == 6 || getARMSubArchVersionNumber(Triple) == 7) FloatABI = "softfp"; else FloatABI = "soft"; break; } // FIXME: this is invalid for WindowsCE case llvm::Triple::Win32: FloatABI = "hard"; break; case llvm::Triple::FreeBSD: switch (Triple.getEnvironment()) { case llvm::Triple::GNUEABIHF: FloatABI = "hard"; break; default: // FreeBSD defaults to soft float FloatABI = "soft"; break; } break; default: switch (Triple.getEnvironment()) { case llvm::Triple::GNUEABIHF: FloatABI = "hard"; break; case llvm::Triple::GNUEABI: FloatABI = "softfp"; break; case llvm::Triple::EABIHF: FloatABI = "hard"; break; case llvm::Triple::EABI: // EABI is always AAPCS, and if it was not marked 'hard', it's softfp FloatABI = "softfp"; break; case llvm::Triple::Android: { if (getARMSubArchVersionNumber(Triple) == 7) FloatABI = "softfp"; else FloatABI = "soft"; break; } default: // Assume "soft", but warn the user we are guessing. FloatABI = "soft"; if (Triple.getOS() != llvm::Triple::UnknownOS || !Triple.isOSBinFormatMachO()) D.Diag(diag::warn_drv_assuming_mfloat_abi_is) << "soft"; break; } } } return FloatABI; } static void getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple, const ArgList &Args, std::vector &Features, bool ForAS) { bool KernelOrKext = Args.hasArg(options::OPT_mkernel, options::OPT_fapple_kext); StringRef FloatABI = tools::arm::getARMFloatABI(D, Args, Triple); const Arg *WaCPU = nullptr, *WaFPU = nullptr; const Arg *WaHDiv = nullptr, *WaArch = nullptr; if (!ForAS) { // FIXME: Note, this is a hack, the LLVM backend doesn't actually use these // yet (it uses the -mfloat-abi and -msoft-float options), and it is // stripped out by the ARM target. We should probably pass this a new // -target-option, which is handled by the -cc1/-cc1as invocation. // // FIXME2: For consistency, it would be ideal if we set up the target // machine state the same when using the frontend or the assembler. We don't // currently do that for the assembler, we pass the options directly to the // backend and never even instantiate the frontend TargetInfo. If we did, // and used its handleTargetFeatures hook, then we could ensure the // assembler and the frontend behave the same. // Use software floating point operations? if (FloatABI == "soft") Features.push_back("+soft-float"); // Use software floating point argument passing? if (FloatABI != "hard") Features.push_back("+soft-float-abi"); } else { // Here, we make sure that -Wa,-mfpu/cpu/arch/hwdiv will be passed down // to the assembler correctly. for (const Arg *A : Args.filtered(options::OPT_Wa_COMMA, options::OPT_Xassembler)) { StringRef Value = A->getValue(); if (Value.startswith("-mfpu=")) { WaFPU = A; } else if (Value.startswith("-mcpu=")) { WaCPU = A; } else if (Value.startswith("-mhwdiv=")) { WaHDiv = A; } else if (Value.startswith("-march=")) { WaArch = A; } } } // Honor -mfpu=. ClangAs gives preference to -Wa,-mfpu=. const Arg *FPUArg = Args.getLastArg(options::OPT_mfpu_EQ); if (WaFPU) { if (FPUArg) D.Diag(clang::diag::warn_drv_unused_argument) << FPUArg->getAsString(Args); getARMFPUFeatures(D, WaFPU, Args, StringRef(WaFPU->getValue()).substr(6), Features); } else if (FPUArg) { getARMFPUFeatures(D, FPUArg, Args, FPUArg->getValue(), Features); } // Honor -mhwdiv=. ClangAs gives preference to -Wa,-mhwdiv=. const Arg *HDivArg = Args.getLastArg(options::OPT_mhwdiv_EQ); if (WaHDiv) { if (HDivArg) D.Diag(clang::diag::warn_drv_unused_argument) << HDivArg->getAsString(Args); getARMHWDivFeatures(D, WaHDiv, Args, StringRef(WaHDiv->getValue()).substr(8), Features); } else if (HDivArg) getARMHWDivFeatures(D, HDivArg, Args, HDivArg->getValue(), Features); // Check -march. ClangAs gives preference to -Wa,-march=. const Arg *ArchArg = Args.getLastArg(options::OPT_march_EQ); StringRef ArchName; if (WaArch) { if (ArchArg) D.Diag(clang::diag::warn_drv_unused_argument) << ArchArg->getAsString(Args); ArchName = StringRef(WaArch->getValue()).substr(7); checkARMArchName(D, WaArch, Args, ArchName, Triple); // FIXME: Set Arch. D.Diag(clang::diag::warn_drv_unused_argument) << WaArch->getAsString(Args); } else if (ArchArg) { ArchName = ArchArg->getValue(); checkARMArchName(D, ArchArg, Args, ArchName, Triple); } // Check -mcpu. ClangAs gives preference to -Wa,-mcpu=. const Arg *CPUArg = Args.getLastArg(options::OPT_mcpu_EQ); StringRef CPUName; if (WaCPU) { if (CPUArg) D.Diag(clang::diag::warn_drv_unused_argument) << CPUArg->getAsString(Args); CPUName = StringRef(WaCPU->getValue()).substr(6); checkARMCPUName(D, WaCPU, Args, CPUName, ArchName, Triple); } else if (CPUArg) { CPUName = CPUArg->getValue(); checkARMCPUName(D, CPUArg, Args, CPUName, ArchName, Triple); } // Setting -msoft-float effectively disables NEON because of the GCC // implementation, although the same isn't true of VFP or VFP3. if (FloatABI == "soft") { Features.push_back("-neon"); // Also need to explicitly disable features which imply NEON. Features.push_back("-crypto"); } // En/disable crc code generation. if (Arg *A = Args.getLastArg(options::OPT_mcrc, options::OPT_mnocrc)) { if (A->getOption().matches(options::OPT_mcrc)) Features.push_back("+crc"); else Features.push_back("-crc"); } if (Triple.getSubArch() == llvm::Triple::SubArchType::ARMSubArch_v8_1a) { Features.insert(Features.begin(), "+v8.1a"); } // Look for the last occurrence of -mlong-calls or -mno-long-calls. If // neither options are specified, see if we are compiling for kernel/kext and // decide whether to pass "+long-calls" based on the OS and its version. if (Arg *A = Args.getLastArg(options::OPT_mlong_calls, options::OPT_mno_long_calls)) { if (A->getOption().matches(options::OPT_mlong_calls)) Features.push_back("+long-calls"); } else if (KernelOrKext && (!Triple.isiOS() || Triple.isOSVersionLT(6))) { Features.push_back("+long-calls"); } } void Clang::AddARMTargetArgs(const ArgList &Args, ArgStringList &CmdArgs, bool KernelOrKext) const { const Driver &D = getToolChain().getDriver(); // Get the effective triple, which takes into account the deployment target. std::string TripleStr = getToolChain().ComputeEffectiveClangTriple(Args); llvm::Triple Triple(TripleStr); // Select the ABI to use. // // FIXME: Support -meabi. // FIXME: Parts of this are duplicated in the backend, unify this somehow. const char *ABIName = nullptr; if (Arg *A = Args.getLastArg(options::OPT_mabi_EQ)) { ABIName = A->getValue(); } else if (Triple.isOSBinFormatMachO()) { // The backend is hardwired to assume AAPCS for M-class processors, ensure // the frontend matches that. if (Triple.getEnvironment() == llvm::Triple::EABI || Triple.getOS() == llvm::Triple::UnknownOS || isARMMProfile(Triple)) { ABIName = "aapcs"; } else { ABIName = "apcs-gnu"; } } else if (Triple.isOSWindows()) { // FIXME: this is invalid for WindowsCE ABIName = "aapcs"; } else { // Select the default based on the platform. switch (Triple.getEnvironment()) { case llvm::Triple::Android: case llvm::Triple::GNUEABI: case llvm::Triple::GNUEABIHF: ABIName = "aapcs-linux"; break; case llvm::Triple::EABIHF: case llvm::Triple::EABI: ABIName = "aapcs"; break; default: if (Triple.getOS() == llvm::Triple::NetBSD) ABIName = "apcs-gnu"; else ABIName = "aapcs"; break; } } CmdArgs.push_back("-target-abi"); CmdArgs.push_back(ABIName); // Determine floating point ABI from the options & target defaults. StringRef FloatABI = tools::arm::getARMFloatABI(D, Args, Triple); if (FloatABI == "soft") { // Floating point operations and argument passing are soft. // // FIXME: This changes CPP defines, we need -target-soft-float. CmdArgs.push_back("-msoft-float"); CmdArgs.push_back("-mfloat-abi"); CmdArgs.push_back("soft"); } else if (FloatABI == "softfp") { // Floating point operations are hard, but argument passing is soft. CmdArgs.push_back("-mfloat-abi"); CmdArgs.push_back("soft"); } else { // Floating point operations and argument passing are hard. assert(FloatABI == "hard" && "Invalid float abi!"); CmdArgs.push_back("-mfloat-abi"); CmdArgs.push_back("hard"); } // Kernel code has more strict alignment requirements. if (KernelOrKext) { CmdArgs.push_back("-backend-option"); CmdArgs.push_back("-arm-strict-align"); // The kext linker doesn't know how to deal with movw/movt. CmdArgs.push_back("-backend-option"); CmdArgs.push_back("-arm-use-movt=0"); } // -mkernel implies -mstrict-align; don't add the redundant option. if (!KernelOrKext) { if (Arg *A = Args.getLastArg(options::OPT_mno_unaligned_access, options::OPT_munaligned_access)) { CmdArgs.push_back("-backend-option"); if (A->getOption().matches(options::OPT_mno_unaligned_access)) CmdArgs.push_back("-arm-strict-align"); else { if (Triple.getSubArch() == llvm::Triple::SubArchType::ARMSubArch_v6m) D.Diag(diag::err_target_unsupported_unaligned) << "v6m"; CmdArgs.push_back("-arm-no-strict-align"); } } } // Forward the -mglobal-merge option for explicit control over the pass. if (Arg *A = Args.getLastArg(options::OPT_mglobal_merge, options::OPT_mno_global_merge)) { CmdArgs.push_back("-backend-option"); if (A->getOption().matches(options::OPT_mno_global_merge)) CmdArgs.push_back("-arm-global-merge=false"); else CmdArgs.push_back("-arm-global-merge=true"); } if (!Args.hasFlag(options::OPT_mimplicit_float, options::OPT_mno_implicit_float, true)) CmdArgs.push_back("-no-implicit-float"); // llvm does not support reserving registers in general. There is support // for reserving r9 on ARM though (defined as a platform-specific register // in ARM EABI). if (Args.hasArg(options::OPT_ffixed_r9)) { CmdArgs.push_back("-backend-option"); CmdArgs.push_back("-arm-reserve-r9"); } } // ARM tools end. /// getAArch64TargetCPU - Get the (LLVM) name of the AArch64 cpu we are /// targeting. static std::string getAArch64TargetCPU(const ArgList &Args) { Arg *A; std::string CPU; // If we have -mtune or -mcpu, use that. if ((A = Args.getLastArg(options::OPT_mtune_EQ))) { CPU = A->getValue(); } else if ((A = Args.getLastArg(options::OPT_mcpu_EQ))) { StringRef Mcpu = A->getValue(); CPU = Mcpu.split("+").first.lower(); } // Handle CPU name is 'native'. if (CPU == "native") return llvm::sys::getHostCPUName(); else if (CPU.size()) return CPU; // Make sure we pick "cyclone" if -arch is used. // FIXME: Should this be picked by checking the target triple instead? if (Args.getLastArg(options::OPT_arch)) return "cyclone"; return "generic"; } void Clang::AddAArch64TargetArgs(const ArgList &Args, ArgStringList &CmdArgs) const { std::string TripleStr = getToolChain().ComputeEffectiveClangTriple(Args); llvm::Triple Triple(TripleStr); if (!Args.hasFlag(options::OPT_mred_zone, options::OPT_mno_red_zone, true) || Args.hasArg(options::OPT_mkernel) || Args.hasArg(options::OPT_fapple_kext)) CmdArgs.push_back("-disable-red-zone"); if (!Args.hasFlag(options::OPT_mimplicit_float, options::OPT_mno_implicit_float, true)) CmdArgs.push_back("-no-implicit-float"); const char *ABIName = nullptr; if (Arg *A = Args.getLastArg(options::OPT_mabi_EQ)) ABIName = A->getValue(); else if (Triple.isOSDarwin()) ABIName = "darwinpcs"; else ABIName = "aapcs"; CmdArgs.push_back("-target-abi"); CmdArgs.push_back(ABIName); if (Arg *A = Args.getLastArg(options::OPT_mno_unaligned_access, options::OPT_munaligned_access)) { CmdArgs.push_back("-backend-option"); if (A->getOption().matches(options::OPT_mno_unaligned_access)) CmdArgs.push_back("-aarch64-strict-align"); else CmdArgs.push_back("-aarch64-no-strict-align"); } if (Arg *A = Args.getLastArg(options::OPT_mfix_cortex_a53_835769, options::OPT_mno_fix_cortex_a53_835769)) { CmdArgs.push_back("-backend-option"); if (A->getOption().matches(options::OPT_mfix_cortex_a53_835769)) CmdArgs.push_back("-aarch64-fix-cortex-a53-835769=1"); else CmdArgs.push_back("-aarch64-fix-cortex-a53-835769=0"); } else if (Triple.getEnvironment() == llvm::Triple::Android) { // Enabled A53 errata (835769) workaround by default on android CmdArgs.push_back("-backend-option"); CmdArgs.push_back("-aarch64-fix-cortex-a53-835769=1"); } // Forward the -mglobal-merge option for explicit control over the pass. if (Arg *A = Args.getLastArg(options::OPT_mglobal_merge, options::OPT_mno_global_merge)) { CmdArgs.push_back("-backend-option"); if (A->getOption().matches(options::OPT_mno_global_merge)) CmdArgs.push_back("-aarch64-global-merge=false"); else CmdArgs.push_back("-aarch64-global-merge=true"); } if (Args.hasArg(options::OPT_ffixed_x18)) { CmdArgs.push_back("-backend-option"); CmdArgs.push_back("-aarch64-reserve-x18"); } } // Get CPU and ABI names. They are not independent // so we have to calculate them together. void mips::getMipsCPUAndABI(const ArgList &Args, const llvm::Triple &Triple, StringRef &CPUName, StringRef &ABIName) { const char *DefMips32CPU = "mips32r2"; const char *DefMips64CPU = "mips64r2"; // MIPS32r6 is the default for mips(el)?-img-linux-gnu and MIPS64r6 is the // default for mips64(el)?-img-linux-gnu. if (Triple.getVendor() == llvm::Triple::ImaginationTechnologies && Triple.getEnvironment() == llvm::Triple::GNU) { DefMips32CPU = "mips32r6"; DefMips64CPU = "mips64r6"; } // MIPS3 is the default for mips64*-unknown-openbsd. if (Triple.getOS() == llvm::Triple::OpenBSD) DefMips64CPU = "mips3"; if (Arg *A = Args.getLastArg(options::OPT_march_EQ, options::OPT_mcpu_EQ)) CPUName = A->getValue(); if (Arg *A = Args.getLastArg(options::OPT_mabi_EQ)) { ABIName = A->getValue(); // Convert a GNU style Mips ABI name to the name // accepted by LLVM Mips backend. ABIName = llvm::StringSwitch(ABIName) .Case("32", "o32") .Case("64", "n64") .Default(ABIName); } // Setup default CPU and ABI names. if (CPUName.empty() && ABIName.empty()) { switch (Triple.getArch()) { default: llvm_unreachable("Unexpected triple arch name"); case llvm::Triple::mips: case llvm::Triple::mipsel: CPUName = DefMips32CPU; break; case llvm::Triple::mips64: case llvm::Triple::mips64el: CPUName = DefMips64CPU; break; } } if (ABIName.empty()) { // Deduce ABI name from the target triple. if (Triple.getArch() == llvm::Triple::mips || Triple.getArch() == llvm::Triple::mipsel) ABIName = "o32"; else ABIName = "n64"; } if (CPUName.empty()) { // Deduce CPU name from ABI name. CPUName = llvm::StringSwitch(ABIName) .Cases("o32", "eabi", DefMips32CPU) .Cases("n32", "n64", DefMips64CPU) .Default(""); } // FIXME: Warn on inconsistent use of -march and -mabi. } // Convert ABI name to the GNU tools acceptable variant. static StringRef getGnuCompatibleMipsABIName(StringRef ABI) { return llvm::StringSwitch(ABI) .Case("o32", "32") .Case("n64", "64") .Default(ABI); } // Select the MIPS float ABI as determined by -msoft-float, -mhard-float, // and -mfloat-abi=. static StringRef getMipsFloatABI(const Driver &D, const ArgList &Args) { StringRef FloatABI; if (Arg *A = Args.getLastArg(options::OPT_msoft_float, options::OPT_mhard_float, options::OPT_mfloat_abi_EQ)) { if (A->getOption().matches(options::OPT_msoft_float)) FloatABI = "soft"; else if (A->getOption().matches(options::OPT_mhard_float)) FloatABI = "hard"; else { FloatABI = A->getValue(); if (FloatABI != "soft" && FloatABI != "hard") { D.Diag(diag::err_drv_invalid_mfloat_abi) << A->getAsString(Args); FloatABI = "hard"; } } } // If unspecified, choose the default based on the platform. if (FloatABI.empty()) { // Assume "hard", because it's a default value used by gcc. // When we start to recognize specific target MIPS processors, // we will be able to select the default more correctly. FloatABI = "hard"; } return FloatABI; } static void AddTargetFeature(const ArgList &Args, std::vector &Features, OptSpecifier OnOpt, OptSpecifier OffOpt, StringRef FeatureName) { if (Arg *A = Args.getLastArg(OnOpt, OffOpt)) { if (A->getOption().matches(OnOpt)) Features.push_back(Args.MakeArgString("+" + FeatureName)); else Features.push_back(Args.MakeArgString("-" + FeatureName)); } } static void getMIPSTargetFeatures(const Driver &D, const llvm::Triple &Triple, const ArgList &Args, std::vector &Features) { StringRef CPUName; StringRef ABIName; mips::getMipsCPUAndABI(Args, Triple, CPUName, ABIName); ABIName = getGnuCompatibleMipsABIName(ABIName); AddTargetFeature(Args, Features, options::OPT_mno_abicalls, options::OPT_mabicalls, "noabicalls"); StringRef FloatABI = getMipsFloatABI(D, Args); if (FloatABI == "soft") { // FIXME: Note, this is a hack. We need to pass the selected float // mode to the MipsTargetInfoBase to define appropriate macros there. // Now it is the only method. Features.push_back("+soft-float"); } if (Arg *A = Args.getLastArg(options::OPT_mnan_EQ)) { StringRef Val = StringRef(A->getValue()); if (Val == "2008") { if (mips::getSupportedNanEncoding(CPUName) & mips::Nan2008) Features.push_back("+nan2008"); else { Features.push_back("-nan2008"); D.Diag(diag::warn_target_unsupported_nan2008) << CPUName; } } else if (Val == "legacy") { if (mips::getSupportedNanEncoding(CPUName) & mips::NanLegacy) Features.push_back("-nan2008"); else { Features.push_back("+nan2008"); D.Diag(diag::warn_target_unsupported_nanlegacy) << CPUName; } } else D.Diag(diag::err_drv_unsupported_option_argument) << A->getOption().getName() << Val; } AddTargetFeature(Args, Features, options::OPT_msingle_float, options::OPT_mdouble_float, "single-float"); AddTargetFeature(Args, Features, options::OPT_mips16, options::OPT_mno_mips16, "mips16"); AddTargetFeature(Args, Features, options::OPT_mmicromips, options::OPT_mno_micromips, "micromips"); AddTargetFeature(Args, Features, options::OPT_mdsp, options::OPT_mno_dsp, "dsp"); AddTargetFeature(Args, Features, options::OPT_mdspr2, options::OPT_mno_dspr2, "dspr2"); AddTargetFeature(Args, Features, options::OPT_mmsa, options::OPT_mno_msa, "msa"); // Add the last -mfp32/-mfpxx/-mfp64 or if none are given and the ABI is O32 // pass -mfpxx if (Arg *A = Args.getLastArg(options::OPT_mfp32, options::OPT_mfpxx, options::OPT_mfp64)) { if (A->getOption().matches(options::OPT_mfp32)) Features.push_back(Args.MakeArgString("-fp64")); else if (A->getOption().matches(options::OPT_mfpxx)) { Features.push_back(Args.MakeArgString("+fpxx")); Features.push_back(Args.MakeArgString("+nooddspreg")); } else Features.push_back(Args.MakeArgString("+fp64")); } else if (mips::shouldUseFPXX(Args, Triple, CPUName, ABIName, FloatABI)) { Features.push_back(Args.MakeArgString("+fpxx")); Features.push_back(Args.MakeArgString("+nooddspreg")); } AddTargetFeature(Args, Features, options::OPT_mno_odd_spreg, options::OPT_modd_spreg, "nooddspreg"); } void Clang::AddMIPSTargetArgs(const ArgList &Args, ArgStringList &CmdArgs) const { const Driver &D = getToolChain().getDriver(); StringRef CPUName; StringRef ABIName; const llvm::Triple &Triple = getToolChain().getTriple(); mips::getMipsCPUAndABI(Args, Triple, CPUName, ABIName); CmdArgs.push_back("-target-abi"); CmdArgs.push_back(ABIName.data()); StringRef FloatABI = getMipsFloatABI(D, Args); if (FloatABI == "soft") { // Floating point operations and argument passing are soft. CmdArgs.push_back("-msoft-float"); CmdArgs.push_back("-mfloat-abi"); CmdArgs.push_back("soft"); } else { // Floating point operations and argument passing are hard. assert(FloatABI == "hard" && "Invalid float abi!"); CmdArgs.push_back("-mfloat-abi"); CmdArgs.push_back("hard"); } if (Arg *A = Args.getLastArg(options::OPT_mxgot, options::OPT_mno_xgot)) { if (A->getOption().matches(options::OPT_mxgot)) { CmdArgs.push_back("-mllvm"); CmdArgs.push_back("-mxgot"); } } if (Arg *A = Args.getLastArg(options::OPT_mldc1_sdc1, options::OPT_mno_ldc1_sdc1)) { if (A->getOption().matches(options::OPT_mno_ldc1_sdc1)) { CmdArgs.push_back("-mllvm"); CmdArgs.push_back("-mno-ldc1-sdc1"); } } if (Arg *A = Args.getLastArg(options::OPT_mcheck_zero_division, options::OPT_mno_check_zero_division)) { if (A->getOption().matches(options::OPT_mno_check_zero_division)) { CmdArgs.push_back("-mllvm"); CmdArgs.push_back("-mno-check-zero-division"); } } if (Arg *A = Args.getLastArg(options::OPT_G)) { StringRef v = A->getValue(); CmdArgs.push_back("-mllvm"); CmdArgs.push_back(Args.MakeArgString("-mips-ssection-threshold=" + v)); A->claim(); } } /// getPPCTargetCPU - Get the (LLVM) name of the PowerPC cpu we are targeting. static std::string getPPCTargetCPU(const ArgList &Args) { if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) { StringRef CPUName = A->getValue(); if (CPUName == "native") { std::string CPU = llvm::sys::getHostCPUName(); if (!CPU.empty() && CPU != "generic") return CPU; else return ""; } return llvm::StringSwitch(CPUName) .Case("common", "generic") .Case("440", "440") .Case("440fp", "440") .Case("450", "450") .Case("601", "601") .Case("602", "602") .Case("603", "603") .Case("603e", "603e") .Case("603ev", "603ev") .Case("604", "604") .Case("604e", "604e") .Case("620", "620") .Case("630", "pwr3") .Case("G3", "g3") .Case("7400", "7400") .Case("G4", "g4") .Case("7450", "7450") .Case("G4+", "g4+") .Case("750", "750") .Case("970", "970") .Case("G5", "g5") .Case("a2", "a2") .Case("a2q", "a2q") .Case("e500mc", "e500mc") .Case("e5500", "e5500") .Case("power3", "pwr3") .Case("power4", "pwr4") .Case("power5", "pwr5") .Case("power5x", "pwr5x") .Case("power6", "pwr6") .Case("power6x", "pwr6x") .Case("power7", "pwr7") .Case("power8", "pwr8") .Case("pwr3", "pwr3") .Case("pwr4", "pwr4") .Case("pwr5", "pwr5") .Case("pwr5x", "pwr5x") .Case("pwr6", "pwr6") .Case("pwr6x", "pwr6x") .Case("pwr7", "pwr7") .Case("pwr8", "pwr8") .Case("powerpc", "ppc") .Case("powerpc64", "ppc64") .Case("powerpc64le", "ppc64le") .Default(""); } return ""; } static void getPPCTargetFeatures(const ArgList &Args, std::vector &Features) { for (const Arg *A : Args.filtered(options::OPT_m_ppc_Features_Group)) { StringRef Name = A->getOption().getName(); A->claim(); // Skip over "-m". assert(Name.startswith("m") && "Invalid feature name."); Name = Name.substr(1); bool IsNegative = Name.startswith("no-"); if (IsNegative) Name = Name.substr(3); // Note that gcc calls this mfcrf and LLVM calls this mfocrf so we // pass the correct option to the backend while calling the frontend // option the same. // TODO: Change the LLVM backend option maybe? if (Name == "mfcrf") Name = "mfocrf"; Features.push_back(Args.MakeArgString((IsNegative ? "-" : "+") + Name)); } // Altivec is a bit weird, allow overriding of the Altivec feature here. AddTargetFeature(Args, Features, options::OPT_faltivec, options::OPT_fno_altivec, "altivec"); } void Clang::AddPPCTargetArgs(const ArgList &Args, ArgStringList &CmdArgs) const { // Select the ABI to use. const char *ABIName = nullptr; if (getToolChain().getTriple().isOSLinux()) switch (getToolChain().getArch()) { case llvm::Triple::ppc64: { // When targeting a processor that supports QPX, or if QPX is // specifically enabled, default to using the ABI that supports QPX (so // long as it is not specifically disabled). bool HasQPX = false; if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) HasQPX = A->getValue() == StringRef("a2q"); HasQPX = Args.hasFlag(options::OPT_mqpx, options::OPT_mno_qpx, HasQPX); if (HasQPX) { ABIName = "elfv1-qpx"; break; } ABIName = "elfv1"; break; } case llvm::Triple::ppc64le: ABIName = "elfv2"; break; default: break; } if (Arg *A = Args.getLastArg(options::OPT_mabi_EQ)) // The ppc64 linux abis are all "altivec" abis by default. Accept and ignore // the option if given as we don't have backend support for any targets // that don't use the altivec abi. if (StringRef(A->getValue()) != "altivec") ABIName = A->getValue(); if (ABIName) { CmdArgs.push_back("-target-abi"); CmdArgs.push_back(ABIName); } } bool ppc::hasPPCAbiArg(const ArgList &Args, const char *Value) { Arg *A = Args.getLastArg(options::OPT_mabi_EQ); return A && (A->getValue() == StringRef(Value)); } /// Get the (LLVM) name of the R600 gpu we are targeting. static std::string getR600TargetGPU(const ArgList &Args) { if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) { const char *GPUName = A->getValue(); return llvm::StringSwitch(GPUName) .Cases("rv630", "rv635", "r600") .Cases("rv610", "rv620", "rs780", "rs880") .Case("rv740", "rv770") .Case("palm", "cedar") .Cases("sumo", "sumo2", "sumo") .Case("hemlock", "cypress") .Case("aruba", "cayman") .Default(GPUName); } return ""; } void Clang::AddSparcTargetArgs(const ArgList &Args, ArgStringList &CmdArgs) const { const Driver &D = getToolChain().getDriver(); std::string Triple = getToolChain().ComputeEffectiveClangTriple(Args); bool SoftFloatABI = false; if (Arg *A = Args.getLastArg(options::OPT_msoft_float, options::OPT_mhard_float)) { if (A->getOption().matches(options::OPT_msoft_float)) SoftFloatABI = true; } // Only the hard-float ABI on Sparc is standardized, and it is the // default. GCC also supports a nonstandard soft-float ABI mode, and // perhaps LLVM should implement that, too. However, since llvm // currently does not support Sparc soft-float, at all, display an // error if it's requested. if (SoftFloatABI) { D.Diag(diag::err_drv_unsupported_opt_for_target) << "-msoft-float" << Triple; } } static const char *getSystemZTargetCPU(const ArgList &Args) { if (const Arg *A = Args.getLastArg(options::OPT_march_EQ)) return A->getValue(); return "z10"; } static void getSystemZTargetFeatures(const ArgList &Args, std::vector &Features) { // -m(no-)htm overrides use of the transactional-execution facility. if (Arg *A = Args.getLastArg(options::OPT_mhtm, options::OPT_mno_htm)) { if (A->getOption().matches(options::OPT_mhtm)) Features.push_back("+transactional-execution"); else Features.push_back("-transactional-execution"); } // -m(no-)vx overrides use of the vector facility. if (Arg *A = Args.getLastArg(options::OPT_mvx, options::OPT_mno_vx)) { if (A->getOption().matches(options::OPT_mvx)) Features.push_back("+vector"); else Features.push_back("-vector"); } } static const char *getX86TargetCPU(const ArgList &Args, const llvm::Triple &Triple) { if (const Arg *A = Args.getLastArg(options::OPT_march_EQ)) { if (StringRef(A->getValue()) != "native") { if (Triple.isOSDarwin() && Triple.getArchName() == "x86_64h") return "core-avx2"; return A->getValue(); } // FIXME: Reject attempts to use -march=native unless the target matches // the host. // // FIXME: We should also incorporate the detected target features for use // with -native. std::string CPU = llvm::sys::getHostCPUName(); if (!CPU.empty() && CPU != "generic") return Args.MakeArgString(CPU); } if (const Arg *A = Args.getLastArg(options::OPT__SLASH_arch)) { // Mapping built by referring to X86TargetInfo::getDefaultFeatures(). StringRef Arch = A->getValue(); const char *CPU; if (Triple.getArch() == llvm::Triple::x86) { CPU = llvm::StringSwitch(Arch) .Case("IA32", "i386") .Case("SSE", "pentium3") .Case("SSE2", "pentium4") .Case("AVX", "sandybridge") .Case("AVX2", "haswell") .Default(nullptr); } else { CPU = llvm::StringSwitch(Arch) .Case("AVX", "sandybridge") .Case("AVX2", "haswell") .Default(nullptr); } if (CPU) return CPU; } // Select the default CPU if none was given (or detection failed). if (Triple.getArch() != llvm::Triple::x86_64 && Triple.getArch() != llvm::Triple::x86) return nullptr; // This routine is only handling x86 targets. bool Is64Bit = Triple.getArch() == llvm::Triple::x86_64; // FIXME: Need target hooks. if (Triple.isOSDarwin()) { if (Triple.getArchName() == "x86_64h") return "core-avx2"; return Is64Bit ? "core2" : "yonah"; } // Set up default CPU name for PS4 compilers. if (Triple.isPS4CPU()) return "btver2"; // On Android use targets compatible with gcc if (Triple.getEnvironment() == llvm::Triple::Android) return Is64Bit ? "x86-64" : "i686"; // Everything else goes to x86-64 in 64-bit mode. if (Is64Bit) return "x86-64"; switch (Triple.getOS()) { case llvm::Triple::FreeBSD: case llvm::Triple::NetBSD: case llvm::Triple::OpenBSD: return "i486"; case llvm::Triple::Haiku: return "i586"; case llvm::Triple::Bitrig: return "i686"; default: // Fallback to p4. return "pentium4"; } } static std::string getCPUName(const ArgList &Args, const llvm::Triple &T, bool FromAs = false) { switch (T.getArch()) { default: return ""; case llvm::Triple::aarch64: case llvm::Triple::aarch64_be: return getAArch64TargetCPU(Args); case llvm::Triple::arm: case llvm::Triple::armeb: case llvm::Triple::thumb: case llvm::Triple::thumbeb: { StringRef MArch, MCPU; getARMArchCPUFromArgs(Args, MArch, MCPU, FromAs); return arm::getARMTargetCPU(MCPU, MArch, T); } case llvm::Triple::mips: case llvm::Triple::mipsel: case llvm::Triple::mips64: case llvm::Triple::mips64el: { StringRef CPUName; StringRef ABIName; mips::getMipsCPUAndABI(Args, T, CPUName, ABIName); return CPUName; } case llvm::Triple::nvptx: case llvm::Triple::nvptx64: if (const Arg *A = Args.getLastArg(options::OPT_march_EQ)) return A->getValue(); return ""; case llvm::Triple::ppc: case llvm::Triple::ppc64: case llvm::Triple::ppc64le: { std::string TargetCPUName = getPPCTargetCPU(Args); // LLVM may default to generating code for the native CPU, // but, like gcc, we default to a more generic option for // each architecture. (except on Darwin) if (TargetCPUName.empty() && !T.isOSDarwin()) { if (T.getArch() == llvm::Triple::ppc64) TargetCPUName = "ppc64"; else if (T.getArch() == llvm::Triple::ppc64le) TargetCPUName = "ppc64le"; else TargetCPUName = "ppc"; } return TargetCPUName; } case llvm::Triple::sparc: case llvm::Triple::sparcel: case llvm::Triple::sparcv9: if (const Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) return A->getValue(); return ""; case llvm::Triple::x86: case llvm::Triple::x86_64: return getX86TargetCPU(Args, T); case llvm::Triple::hexagon: return "hexagon" + toolchains::Hexagon_TC::GetTargetCPU(Args).str(); case llvm::Triple::systemz: return getSystemZTargetCPU(Args); case llvm::Triple::r600: case llvm::Triple::amdgcn: return getR600TargetGPU(Args); } } static void AddGoldPlugin(const ToolChain &ToolChain, const ArgList &Args, ArgStringList &CmdArgs) { // Tell the linker to load the plugin. This has to come before AddLinkerInputs // as gold requires -plugin to come before any -plugin-opt that -Wl might // forward. CmdArgs.push_back("-plugin"); std::string Plugin = ToolChain.getDriver().Dir + "/../lib" CLANG_LIBDIR_SUFFIX "/LLVMgold.so"; CmdArgs.push_back(Args.MakeArgString(Plugin)); // Try to pass driver level flags relevant to LTO code generation down to // the plugin. // Handle flags for selecting CPU variants. std::string CPU = getCPUName(Args, ToolChain.getTriple()); if (!CPU.empty()) CmdArgs.push_back(Args.MakeArgString(Twine("-plugin-opt=mcpu=") + CPU)); } /// This is a helper function for validating the optional refinement step /// parameter in reciprocal argument strings. Return false if there is an error /// parsing the refinement step. Otherwise, return true and set the Position /// of the refinement step in the input string. static bool getRefinementStep(const StringRef &In, const Driver &D, const Arg &A, size_t &Position) { const char RefinementStepToken = ':'; Position = In.find(RefinementStepToken); if (Position != StringRef::npos) { StringRef Option = A.getOption().getName(); StringRef RefStep = In.substr(Position + 1); // Allow exactly one numeric character for the additional refinement // step parameter. This is reasonable for all currently-supported // operations and architectures because we would expect that a larger value // of refinement steps would cause the estimate "optimization" to // under-perform the native operation. Also, if the estimate does not // converge quickly, it probably will not ever converge, so further // refinement steps will not produce a better answer. if (RefStep.size() != 1) { D.Diag(diag::err_drv_invalid_value) << Option << RefStep; return false; } char RefStepChar = RefStep[0]; if (RefStepChar < '0' || RefStepChar > '9') { D.Diag(diag::err_drv_invalid_value) << Option << RefStep; return false; } } return true; } /// The -mrecip flag requires processing of many optional parameters. static void ParseMRecip(const Driver &D, const ArgList &Args, ArgStringList &OutStrings) { StringRef DisabledPrefixIn = "!"; StringRef DisabledPrefixOut = "!"; StringRef EnabledPrefixOut = ""; StringRef Out = "-mrecip="; Arg *A = Args.getLastArg(options::OPT_mrecip, options::OPT_mrecip_EQ); if (!A) return; unsigned NumOptions = A->getNumValues(); if (NumOptions == 0) { // No option is the same as "all". OutStrings.push_back(Args.MakeArgString(Out + "all")); return; } // Pass through "all", "none", or "default" with an optional refinement step. if (NumOptions == 1) { StringRef Val = A->getValue(0); size_t RefStepLoc; if (!getRefinementStep(Val, D, *A, RefStepLoc)) return; StringRef ValBase = Val.slice(0, RefStepLoc); if (ValBase == "all" || ValBase == "none" || ValBase == "default") { OutStrings.push_back(Args.MakeArgString(Out + Val)); return; } } // Each reciprocal type may be enabled or disabled individually. // Check each input value for validity, concatenate them all back together, // and pass through. llvm::StringMap OptionStrings; OptionStrings.insert(std::make_pair("divd", false)); OptionStrings.insert(std::make_pair("divf", false)); OptionStrings.insert(std::make_pair("vec-divd", false)); OptionStrings.insert(std::make_pair("vec-divf", false)); OptionStrings.insert(std::make_pair("sqrtd", false)); OptionStrings.insert(std::make_pair("sqrtf", false)); OptionStrings.insert(std::make_pair("vec-sqrtd", false)); OptionStrings.insert(std::make_pair("vec-sqrtf", false)); for (unsigned i = 0; i != NumOptions; ++i) { StringRef Val = A->getValue(i); bool IsDisabled = Val.startswith(DisabledPrefixIn); // Ignore the disablement token for string matching. if (IsDisabled) Val = Val.substr(1); size_t RefStep; if (!getRefinementStep(Val, D, *A, RefStep)) return; StringRef ValBase = Val.slice(0, RefStep); llvm::StringMap::iterator OptionIter = OptionStrings.find(ValBase); if (OptionIter == OptionStrings.end()) { // Try again specifying float suffix. OptionIter = OptionStrings.find(ValBase.str() + 'f'); if (OptionIter == OptionStrings.end()) { // The input name did not match any known option string. D.Diag(diag::err_drv_unknown_argument) << Val; return; } // The option was specified without a float or double suffix. // Make sure that the double entry was not already specified. // The float entry will be checked below. if (OptionStrings[ValBase.str() + 'd']) { D.Diag(diag::err_drv_invalid_value) << A->getOption().getName() << Val; return; } } if (OptionIter->second == true) { // Duplicate option specified. D.Diag(diag::err_drv_invalid_value) << A->getOption().getName() << Val; return; } // Mark the matched option as found. Do not allow duplicate specifiers. OptionIter->second = true; // If the precision was not specified, also mark the double entry as found. if (ValBase.back() != 'f' && ValBase.back() != 'd') OptionStrings[ValBase.str() + 'd'] = true; // Build the output string. StringRef Prefix = IsDisabled ? DisabledPrefixOut : EnabledPrefixOut; Out = Args.MakeArgString(Out + Prefix + Val); if (i != NumOptions - 1) Out = Args.MakeArgString(Out + ","); } OutStrings.push_back(Args.MakeArgString(Out)); } static void getX86TargetFeatures(const Driver &D, const llvm::Triple &Triple, const ArgList &Args, std::vector &Features) { // If -march=native, autodetect the feature list. if (const Arg *A = Args.getLastArg(options::OPT_march_EQ)) { if (StringRef(A->getValue()) == "native") { llvm::StringMap HostFeatures; if (llvm::sys::getHostCPUFeatures(HostFeatures)) for (auto &F : HostFeatures) Features.push_back( Args.MakeArgString((F.second ? "+" : "-") + F.first())); } } if (Triple.getArchName() == "x86_64h") { // x86_64h implies quite a few of the more modern subtarget features // for Haswell class CPUs, but not all of them. Opt-out of a few. Features.push_back("-rdrnd"); Features.push_back("-aes"); Features.push_back("-pclmul"); Features.push_back("-rtm"); Features.push_back("-hle"); Features.push_back("-fsgsbase"); } const llvm::Triple::ArchType ArchType = Triple.getArch(); // Add features to be compatible with gcc for Android. if (Triple.getEnvironment() == llvm::Triple::Android) { if (ArchType == llvm::Triple::x86_64) { Features.push_back("+sse4.2"); Features.push_back("+popcnt"); } else Features.push_back("+ssse3"); } // Set features according to the -arch flag on MSVC. if (Arg *A = Args.getLastArg(options::OPT__SLASH_arch)) { StringRef Arch = A->getValue(); bool ArchUsed = false; // First, look for flags that are shared in x86 and x86-64. if (ArchType == llvm::Triple::x86_64 || ArchType == llvm::Triple::x86) { if (Arch == "AVX" || Arch == "AVX2") { ArchUsed = true; Features.push_back(Args.MakeArgString("+" + Arch.lower())); } } // Then, look for x86-specific flags. if (ArchType == llvm::Triple::x86) { if (Arch == "IA32") { ArchUsed = true; } else if (Arch == "SSE" || Arch == "SSE2") { ArchUsed = true; Features.push_back(Args.MakeArgString("+" + Arch.lower())); } } if (!ArchUsed) D.Diag(clang::diag::warn_drv_unused_argument) << A->getAsString(Args); } // Now add any that the user explicitly requested on the command line, // which may override the defaults. for (const Arg *A : Args.filtered(options::OPT_m_x86_Features_Group)) { StringRef Name = A->getOption().getName(); A->claim(); // Skip over "-m". assert(Name.startswith("m") && "Invalid feature name."); Name = Name.substr(1); bool IsNegative = Name.startswith("no-"); if (IsNegative) Name = Name.substr(3); Features.push_back(Args.MakeArgString((IsNegative ? "-" : "+") + Name)); } } void Clang::AddX86TargetArgs(const ArgList &Args, ArgStringList &CmdArgs) const { if (!Args.hasFlag(options::OPT_mred_zone, options::OPT_mno_red_zone, true) || Args.hasArg(options::OPT_mkernel) || Args.hasArg(options::OPT_fapple_kext)) CmdArgs.push_back("-disable-red-zone"); // Default to avoid implicit floating-point for kernel/kext code, but allow // that to be overridden with -mno-soft-float. bool NoImplicitFloat = (Args.hasArg(options::OPT_mkernel) || Args.hasArg(options::OPT_fapple_kext)); if (Arg *A = Args.getLastArg( options::OPT_msoft_float, options::OPT_mno_soft_float, options::OPT_mimplicit_float, options::OPT_mno_implicit_float)) { const Option &O = A->getOption(); NoImplicitFloat = (O.matches(options::OPT_mno_implicit_float) || O.matches(options::OPT_msoft_float)); } if (NoImplicitFloat) CmdArgs.push_back("-no-implicit-float"); if (Arg *A = Args.getLastArg(options::OPT_masm_EQ)) { StringRef Value = A->getValue(); if (Value == "intel" || Value == "att") { CmdArgs.push_back("-mllvm"); CmdArgs.push_back(Args.MakeArgString("-x86-asm-syntax=" + Value)); } else { getToolChain().getDriver().Diag(diag::err_drv_unsupported_option_argument) << A->getOption().getName() << Value; } } } void Clang::AddHexagonTargetArgs(const ArgList &Args, ArgStringList &CmdArgs) const { CmdArgs.push_back("-mqdsp6-compat"); CmdArgs.push_back("-Wreturn-type"); if (const char *v = toolchains::Hexagon_TC::GetSmallDataThreshold(Args)) { std::string SmallDataThreshold = "-hexagon-small-data-threshold="; SmallDataThreshold += v; CmdArgs.push_back("-mllvm"); CmdArgs.push_back(Args.MakeArgString(SmallDataThreshold)); } if (!Args.hasArg(options::OPT_fno_short_enums)) CmdArgs.push_back("-fshort-enums"); if (Args.getLastArg(options::OPT_mieee_rnd_near)) { CmdArgs.push_back("-mllvm"); CmdArgs.push_back("-enable-hexagon-ieee-rnd-near"); } CmdArgs.push_back("-mllvm"); CmdArgs.push_back("-machine-sink-split=0"); } // Decode AArch64 features from string like +[no]featureA+[no]featureB+... static bool DecodeAArch64Features(const Driver &D, StringRef text, std::vector &Features) { SmallVector Split; text.split(Split, StringRef("+"), -1, false); for (const StringRef Feature : Split) { const char *result = llvm::StringSwitch(Feature) .Case("fp", "+fp-armv8") .Case("simd", "+neon") .Case("crc", "+crc") .Case("crypto", "+crypto") .Case("nofp", "-fp-armv8") .Case("nosimd", "-neon") .Case("nocrc", "-crc") .Case("nocrypto", "-crypto") .Default(nullptr); if (result) Features.push_back(result); else if (Feature == "neon" || Feature == "noneon") D.Diag(diag::err_drv_no_neon_modifier); else return false; } return true; } // Check if the CPU name and feature modifiers in -mcpu are legal. If yes, // decode CPU and feature. static bool DecodeAArch64Mcpu(const Driver &D, StringRef Mcpu, StringRef &CPU, std::vector &Features) { std::pair Split = Mcpu.split("+"); CPU = Split.first; if (CPU == "cyclone" || CPU == "cortex-a53" || CPU == "cortex-a57" || CPU == "cortex-a72") { Features.push_back("+neon"); Features.push_back("+crc"); Features.push_back("+crypto"); } else if (CPU == "generic") { Features.push_back("+neon"); } else { return false; } if (Split.second.size() && !DecodeAArch64Features(D, Split.second, Features)) return false; return true; } static bool getAArch64ArchFeaturesFromMarch(const Driver &D, StringRef March, const ArgList &Args, std::vector &Features) { std::string MarchLowerCase = March.lower(); std::pair Split = StringRef(MarchLowerCase).split("+"); if (Split.first == "armv8-a" || Split.first == "armv8a") { // ok, no additional features. } else if (Split.first == "armv8.1-a" || Split.first == "armv8.1a") { Features.push_back("+v8.1a"); } else { return false; } if (Split.second.size() && !DecodeAArch64Features(D, Split.second, Features)) return false; return true; } static bool getAArch64ArchFeaturesFromMcpu(const Driver &D, StringRef Mcpu, const ArgList &Args, std::vector &Features) { StringRef CPU; std::string McpuLowerCase = Mcpu.lower(); if (!DecodeAArch64Mcpu(D, McpuLowerCase, CPU, Features)) return false; return true; } static bool getAArch64MicroArchFeaturesFromMtune(const Driver &D, StringRef Mtune, const ArgList &Args, std::vector &Features) { // Handle CPU name is 'native'. if (Mtune == "native") Mtune = llvm::sys::getHostCPUName(); if (Mtune == "cyclone") { Features.push_back("+zcm"); Features.push_back("+zcz"); } return true; } static bool getAArch64MicroArchFeaturesFromMcpu(const Driver &D, StringRef Mcpu, const ArgList &Args, std::vector &Features) { StringRef CPU; std::vector DecodedFeature; std::string McpuLowerCase = Mcpu.lower(); if (!DecodeAArch64Mcpu(D, McpuLowerCase, CPU, DecodedFeature)) return false; return getAArch64MicroArchFeaturesFromMtune(D, CPU, Args, Features); } static void getAArch64TargetFeatures(const Driver &D, const ArgList &Args, std::vector &Features) { Arg *A; bool success = true; // Enable NEON by default. Features.push_back("+neon"); if ((A = Args.getLastArg(options::OPT_march_EQ))) success = getAArch64ArchFeaturesFromMarch(D, A->getValue(), Args, Features); else if ((A = Args.getLastArg(options::OPT_mcpu_EQ))) success = getAArch64ArchFeaturesFromMcpu(D, A->getValue(), Args, Features); else if (Args.hasArg(options::OPT_arch)) success = getAArch64ArchFeaturesFromMcpu(D, getAArch64TargetCPU(Args), Args, Features); if (success && (A = Args.getLastArg(options::OPT_mtune_EQ))) success = getAArch64MicroArchFeaturesFromMtune(D, A->getValue(), Args, Features); else if (success && (A = Args.getLastArg(options::OPT_mcpu_EQ))) success = getAArch64MicroArchFeaturesFromMcpu(D, A->getValue(), Args, Features); else if (Args.hasArg(options::OPT_arch)) success = getAArch64MicroArchFeaturesFromMcpu(D, getAArch64TargetCPU(Args), Args, Features); if (!success) D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args); if (Args.getLastArg(options::OPT_mgeneral_regs_only)) { Features.push_back("-fp-armv8"); Features.push_back("-crypto"); Features.push_back("-neon"); } // En/disable crc if (Arg *A = Args.getLastArg(options::OPT_mcrc, options::OPT_mnocrc)) { if (A->getOption().matches(options::OPT_mcrc)) Features.push_back("+crc"); else Features.push_back("-crc"); } } static void getTargetFeatures(const Driver &D, const llvm::Triple &Triple, const ArgList &Args, ArgStringList &CmdArgs, bool ForAS) { std::vector Features; switch (Triple.getArch()) { default: break; case llvm::Triple::mips: case llvm::Triple::mipsel: case llvm::Triple::mips64: case llvm::Triple::mips64el: getMIPSTargetFeatures(D, Triple, Args, Features); break; case llvm::Triple::arm: case llvm::Triple::armeb: case llvm::Triple::thumb: case llvm::Triple::thumbeb: getARMTargetFeatures(D, Triple, Args, Features, ForAS); break; case llvm::Triple::ppc: case llvm::Triple::ppc64: case llvm::Triple::ppc64le: getPPCTargetFeatures(Args, Features); break; case llvm::Triple::systemz: getSystemZTargetFeatures(Args, Features); break; case llvm::Triple::aarch64: case llvm::Triple::aarch64_be: getAArch64TargetFeatures(D, Args, Features); break; case llvm::Triple::x86: case llvm::Triple::x86_64: getX86TargetFeatures(D, Triple, Args, Features); break; } // Find the last of each feature. llvm::StringMap LastOpt; for (unsigned I = 0, N = Features.size(); I < N; ++I) { const char *Name = Features[I]; assert(Name[0] == '-' || Name[0] == '+'); LastOpt[Name + 1] = I; } for (unsigned I = 0, N = Features.size(); I < N; ++I) { // If this feature was overridden, ignore it. const char *Name = Features[I]; llvm::StringMap::iterator LastI = LastOpt.find(Name + 1); assert(LastI != LastOpt.end()); unsigned Last = LastI->second; if (Last != I) continue; CmdArgs.push_back("-target-feature"); CmdArgs.push_back(Name); } } static bool shouldUseExceptionTablesForObjCExceptions(const ObjCRuntime &runtime, const llvm::Triple &Triple) { // We use the zero-cost exception tables for Objective-C if the non-fragile // ABI is enabled or when compiling for x86_64 and ARM on Snow Leopard and // later. if (runtime.isNonFragile()) return true; if (!Triple.isMacOSX()) return false; return (!Triple.isMacOSXVersionLT(10, 5) && (Triple.getArch() == llvm::Triple::x86_64 || Triple.getArch() == llvm::Triple::arm)); } /// Adds exception related arguments to the driver command arguments. There's a /// master flag, -fexceptions and also language specific flags to enable/disable /// C++ and Objective-C exceptions. This makes it possible to for example /// disable C++ exceptions but enable Objective-C exceptions. static void addExceptionArgs(const ArgList &Args, types::ID InputType, const ToolChain &TC, bool KernelOrKext, const ObjCRuntime &objcRuntime, ArgStringList &CmdArgs) { const Driver &D = TC.getDriver(); const llvm::Triple &Triple = TC.getTriple(); if (KernelOrKext) { // -mkernel and -fapple-kext imply no exceptions, so claim exception related // arguments now to avoid warnings about unused arguments. Args.ClaimAllArgs(options::OPT_fexceptions); Args.ClaimAllArgs(options::OPT_fno_exceptions); Args.ClaimAllArgs(options::OPT_fobjc_exceptions); Args.ClaimAllArgs(options::OPT_fno_objc_exceptions); Args.ClaimAllArgs(options::OPT_fcxx_exceptions); Args.ClaimAllArgs(options::OPT_fno_cxx_exceptions); return; } // See if the user explicitly enabled exceptions. bool EH = Args.hasFlag(options::OPT_fexceptions, options::OPT_fno_exceptions, false); // Obj-C exceptions are enabled by default, regardless of -fexceptions. This // is not necessarily sensible, but follows GCC. if (types::isObjC(InputType) && Args.hasFlag(options::OPT_fobjc_exceptions, options::OPT_fno_objc_exceptions, true)) { CmdArgs.push_back("-fobjc-exceptions"); EH |= shouldUseExceptionTablesForObjCExceptions(objcRuntime, Triple); } if (types::isCXX(InputType)) { // Disable C++ EH by default on XCore, PS4, and MSVC. // FIXME: Remove MSVC from this list once things work. bool CXXExceptionsEnabled = Triple.getArch() != llvm::Triple::xcore && !Triple.isPS4CPU() && !Triple.isWindowsMSVCEnvironment(); Arg *ExceptionArg = Args.getLastArg( options::OPT_fcxx_exceptions, options::OPT_fno_cxx_exceptions, options::OPT_fexceptions, options::OPT_fno_exceptions); if (ExceptionArg) CXXExceptionsEnabled = ExceptionArg->getOption().matches(options::OPT_fcxx_exceptions) || ExceptionArg->getOption().matches(options::OPT_fexceptions); if (CXXExceptionsEnabled) { if (Triple.isPS4CPU()) { ToolChain::RTTIMode RTTIMode = TC.getRTTIMode(); assert(ExceptionArg && "On the PS4 exceptions should only be enabled if passing " "an argument"); if (RTTIMode == ToolChain::RM_DisabledExplicitly) { const Arg *RTTIArg = TC.getRTTIArg(); assert(RTTIArg && "RTTI disabled explicitly but no RTTIArg!"); D.Diag(diag::err_drv_argument_not_allowed_with) << RTTIArg->getAsString(Args) << ExceptionArg->getAsString(Args); } else if (RTTIMode == ToolChain::RM_EnabledImplicitly) D.Diag(diag::warn_drv_enabling_rtti_with_exceptions); } else assert(TC.getRTTIMode() != ToolChain::RM_DisabledImplicitly); CmdArgs.push_back("-fcxx-exceptions"); EH = true; } } if (EH) CmdArgs.push_back("-fexceptions"); } static bool ShouldDisableAutolink(const ArgList &Args, const ToolChain &TC) { bool Default = true; if (TC.getTriple().isOSDarwin()) { // The native darwin assembler doesn't support the linker_option directives, // so we disable them if we think the .s file will be passed to it. Default = TC.useIntegratedAs(); } return !Args.hasFlag(options::OPT_fautolink, options::OPT_fno_autolink, Default); } static bool ShouldDisableDwarfDirectory(const ArgList &Args, const ToolChain &TC) { bool UseDwarfDirectory = Args.hasFlag(options::OPT_fdwarf_directory_asm, options::OPT_fno_dwarf_directory_asm, TC.useIntegratedAs()); return !UseDwarfDirectory; } /// \brief Check whether the given input tree contains any compilation actions. static bool ContainsCompileAction(const Action *A) { if (isa(A) || isa(A)) return true; for (const auto &Act : *A) if (ContainsCompileAction(Act)) return true; return false; } /// \brief Check if -relax-all should be passed to the internal assembler. /// This is done by default when compiling non-assembler source with -O0. static bool UseRelaxAll(Compilation &C, const ArgList &Args) { bool RelaxDefault = true; if (Arg *A = Args.getLastArg(options::OPT_O_Group)) RelaxDefault = A->getOption().matches(options::OPT_O0); if (RelaxDefault) { RelaxDefault = false; for (const auto &Act : C.getActions()) { if (ContainsCompileAction(Act)) { RelaxDefault = true; break; } } } return Args.hasFlag(options::OPT_mrelax_all, options::OPT_mno_relax_all, RelaxDefault); } static void CollectArgsForIntegratedAssembler(Compilation &C, const ArgList &Args, ArgStringList &CmdArgs, const Driver &D) { if (UseRelaxAll(C, Args)) CmdArgs.push_back("-mrelax-all"); // When passing -I arguments to the assembler we sometimes need to // unconditionally take the next argument. For example, when parsing // '-Wa,-I -Wa,foo' we need to accept the -Wa,foo arg after seeing the // -Wa,-I arg and when parsing '-Wa,-I,foo' we need to accept the 'foo' // arg after parsing the '-I' arg. bool TakeNextArg = false; // When using an integrated assembler, translate -Wa, and -Xassembler // options. bool CompressDebugSections = false; for (const Arg *A : Args.filtered(options::OPT_Wa_COMMA, options::OPT_Xassembler)) { A->claim(); for (const StringRef Value : A->getValues()) { if (TakeNextArg) { CmdArgs.push_back(Value.data()); TakeNextArg = false; continue; } if (Value == "-force_cpusubtype_ALL") { // Do nothing, this is the default and we don't support anything else. } else if (Value == "-L") { CmdArgs.push_back("-msave-temp-labels"); } else if (Value == "--fatal-warnings") { CmdArgs.push_back("-massembler-fatal-warnings"); } else if (Value == "--noexecstack") { CmdArgs.push_back("-mnoexecstack"); } else if (Value == "-compress-debug-sections" || Value == "--compress-debug-sections") { CompressDebugSections = true; } else if (Value == "-nocompress-debug-sections" || Value == "--nocompress-debug-sections") { CompressDebugSections = false; } else if (Value.startswith("-I")) { CmdArgs.push_back(Value.data()); // We need to consume the next argument if the current arg is a plain // -I. The next arg will be the include directory. if (Value == "-I") TakeNextArg = true; } else if (Value.startswith("-gdwarf-")) { CmdArgs.push_back(Value.data()); } else if (Value.startswith("-mcpu") || Value.startswith("-mfpu") || Value.startswith("-mhwdiv") || Value.startswith("-march")) { // Do nothing, we'll validate it later. } else { D.Diag(diag::err_drv_unsupported_option_argument) << A->getOption().getName() << Value; } } } if (CompressDebugSections) { if (llvm::zlib::isAvailable()) CmdArgs.push_back("-compress-debug-sections"); else D.Diag(diag::warn_debug_compression_unavailable); } } // Until ARM libraries are build separately, we have them all in one library static StringRef getArchNameForCompilerRTLib(const ToolChain &TC) { if (TC.getTriple().isWindowsMSVCEnvironment() && TC.getArch() == llvm::Triple::x86) return "i386"; if (TC.getArch() == llvm::Triple::arm || TC.getArch() == llvm::Triple::armeb) return "arm"; return TC.getArchName(); } static SmallString<128> getCompilerRTLibDir(const ToolChain &TC) { // The runtimes are located in the OS-specific resource directory. SmallString<128> Res(TC.getDriver().ResourceDir); const llvm::Triple &Triple = TC.getTriple(); // TC.getOS() yield "freebsd10.0" whereas "freebsd" is expected. StringRef OSLibName = (Triple.getOS() == llvm::Triple::FreeBSD) ? "freebsd" : TC.getOS(); llvm::sys::path::append(Res, "lib", OSLibName); return Res; } SmallString<128> tools::getCompilerRT(const ToolChain &TC, StringRef Component, bool Shared) { const char *Env = TC.getTriple().getEnvironment() == llvm::Triple::Android ? "-android" : ""; bool IsOSWindows = TC.getTriple().isOSWindows(); bool IsITANMSVCWindows = TC.getTriple().isWindowsMSVCEnvironment() || TC.getTriple().isWindowsItaniumEnvironment(); StringRef Arch = getArchNameForCompilerRTLib(TC); const char *Prefix = IsITANMSVCWindows ? "" : "lib"; const char *Suffix = Shared ? (IsOSWindows ? ".dll" : ".so") : (IsITANMSVCWindows ? ".lib" : ".a"); SmallString<128> Path = getCompilerRTLibDir(TC); llvm::sys::path::append(Path, Prefix + Twine("clang_rt.") + Component + "-" + Arch + Env + Suffix); return Path; } // This adds the static libclang_rt.builtins-arch.a directly to the command line // FIXME: Make sure we can also emit shared objects if they're requested // and available, check for possible errors, etc. static void addClangRT(const ToolChain &TC, const ArgList &Args, ArgStringList &CmdArgs) { CmdArgs.push_back(Args.MakeArgString(getCompilerRT(TC, "builtins"))); if (!TC.getTriple().isOSWindows()) { // FIXME: why do we link against gcc when we are using compiler-rt? CmdArgs.push_back("-lgcc_s"); if (TC.getDriver().CCCIsCXX()) CmdArgs.push_back("-lgcc_eh"); } } static void addProfileRT(const ToolChain &TC, const ArgList &Args, ArgStringList &CmdArgs) { if (!(Args.hasFlag(options::OPT_fprofile_arcs, options::OPT_fno_profile_arcs, false) || Args.hasArg(options::OPT_fprofile_generate) || Args.hasArg(options::OPT_fprofile_generate_EQ) || Args.hasArg(options::OPT_fprofile_instr_generate) || Args.hasArg(options::OPT_fprofile_instr_generate_EQ) || Args.hasArg(options::OPT_fcreate_profile) || Args.hasArg(options::OPT_coverage))) return; CmdArgs.push_back(Args.MakeArgString(getCompilerRT(TC, "profile"))); } namespace { enum OpenMPRuntimeKind { /// An unknown OpenMP runtime. We can't generate effective OpenMP code /// without knowing what runtime to target. OMPRT_Unknown, /// The LLVM OpenMP runtime. When completed and integrated, this will become /// the default for Clang. OMPRT_OMP, /// The GNU OpenMP runtime. Clang doesn't support generating OpenMP code for /// this runtime but can swallow the pragmas, and find and link against the /// runtime library itself. OMPRT_GOMP, /// The legacy name for the LLVM OpenMP runtime from when it was the Intel /// OpenMP runtime. We support this mode for users with existing dependencies /// on this runtime library name. OMPRT_IOMP5 }; } /// Compute the desired OpenMP runtime from the flag provided. static OpenMPRuntimeKind getOpenMPRuntime(const ToolChain &TC, const ArgList &Args) { StringRef RuntimeName(CLANG_DEFAULT_OPENMP_RUNTIME); const Arg *A = Args.getLastArg(options::OPT_fopenmp_EQ); if (A) RuntimeName = A->getValue(); auto RT = llvm::StringSwitch(RuntimeName) .Case("libomp", OMPRT_OMP) .Case("libgomp", OMPRT_GOMP) .Case("libiomp5", OMPRT_IOMP5) .Default(OMPRT_Unknown); if (RT == OMPRT_Unknown) { if (A) TC.getDriver().Diag(diag::err_drv_unsupported_option_argument) << A->getOption().getName() << A->getValue(); else // FIXME: We could use a nicer diagnostic here. TC.getDriver().Diag(diag::err_drv_unsupported_opt) << "-fopenmp"; } return RT; } +static void addOpenMPRuntime(ArgStringList &CmdArgs, const ToolChain &TC, + const ArgList &Args) { + if (!Args.hasFlag(options::OPT_fopenmp, options::OPT_fopenmp_EQ, + options::OPT_fno_openmp, false)) + return; + + switch (getOpenMPRuntime(TC, Args)) { + case OMPRT_OMP: + CmdArgs.push_back("-lomp"); + break; + case OMPRT_GOMP: + CmdArgs.push_back("-lgomp"); + break; + case OMPRT_IOMP5: + CmdArgs.push_back("-liomp5"); + break; + case OMPRT_Unknown: + // Already diagnosed. + break; + } +} + static void addSanitizerRuntime(const ToolChain &TC, const ArgList &Args, ArgStringList &CmdArgs, StringRef Sanitizer, bool IsShared) { // Static runtimes must be forced into executable, so we wrap them in // whole-archive. if (!IsShared) CmdArgs.push_back("-whole-archive"); CmdArgs.push_back(Args.MakeArgString(getCompilerRT(TC, Sanitizer, IsShared))); if (!IsShared) CmdArgs.push_back("-no-whole-archive"); } // Tries to use a file with the list of dynamic symbols that need to be exported // from the runtime library. Returns true if the file was found. static bool addSanitizerDynamicList(const ToolChain &TC, const ArgList &Args, ArgStringList &CmdArgs, StringRef Sanitizer) { SmallString<128> SanRT = getCompilerRT(TC, Sanitizer); if (llvm::sys::fs::exists(SanRT + ".syms")) { CmdArgs.push_back(Args.MakeArgString("--dynamic-list=" + SanRT + ".syms")); return true; } return false; } static void linkSanitizerRuntimeDeps(const ToolChain &TC, ArgStringList &CmdArgs) { // Force linking against the system libraries sanitizers depends on // (see PR15823 why this is necessary). CmdArgs.push_back("--no-as-needed"); CmdArgs.push_back("-lpthread"); CmdArgs.push_back("-lrt"); CmdArgs.push_back("-lm"); // There's no libdl on FreeBSD. if (TC.getTriple().getOS() != llvm::Triple::FreeBSD) CmdArgs.push_back("-ldl"); } static void collectSanitizerRuntimes(const ToolChain &TC, const ArgList &Args, SmallVectorImpl &SharedRuntimes, SmallVectorImpl &StaticRuntimes, SmallVectorImpl &HelperStaticRuntimes) { const SanitizerArgs &SanArgs = TC.getSanitizerArgs(); // Collect shared runtimes. if (SanArgs.needsAsanRt() && SanArgs.needsSharedAsanRt()) { SharedRuntimes.push_back("asan"); } // Collect static runtimes. if (Args.hasArg(options::OPT_shared) || (TC.getTriple().getEnvironment() == llvm::Triple::Android)) { // Don't link static runtimes into DSOs or if compiling for Android. return; } if (SanArgs.needsAsanRt()) { if (SanArgs.needsSharedAsanRt()) { HelperStaticRuntimes.push_back("asan-preinit"); } else { StaticRuntimes.push_back("asan"); if (SanArgs.linkCXXRuntimes()) StaticRuntimes.push_back("asan_cxx"); } } if (SanArgs.needsDfsanRt()) StaticRuntimes.push_back("dfsan"); if (SanArgs.needsLsanRt()) StaticRuntimes.push_back("lsan"); if (SanArgs.needsMsanRt()) { StaticRuntimes.push_back("msan"); if (SanArgs.linkCXXRuntimes()) StaticRuntimes.push_back("msan_cxx"); } if (SanArgs.needsTsanRt()) { StaticRuntimes.push_back("tsan"); if (SanArgs.linkCXXRuntimes()) StaticRuntimes.push_back("tsan_cxx"); } if (SanArgs.needsUbsanRt()) { StaticRuntimes.push_back("ubsan_standalone"); if (SanArgs.linkCXXRuntimes()) StaticRuntimes.push_back("ubsan_standalone_cxx"); } if (SanArgs.needsSafeStackRt()) StaticRuntimes.push_back("safestack"); } // Should be called before we add system libraries (C++ ABI, libstdc++/libc++, // C runtime, etc). Returns true if sanitizer system deps need to be linked in. static bool addSanitizerRuntimes(const ToolChain &TC, const ArgList &Args, ArgStringList &CmdArgs) { SmallVector SharedRuntimes, StaticRuntimes, HelperStaticRuntimes; collectSanitizerRuntimes(TC, Args, SharedRuntimes, StaticRuntimes, HelperStaticRuntimes); for (auto RT : SharedRuntimes) addSanitizerRuntime(TC, Args, CmdArgs, RT, true); for (auto RT : HelperStaticRuntimes) addSanitizerRuntime(TC, Args, CmdArgs, RT, false); bool AddExportDynamic = false; for (auto RT : StaticRuntimes) { addSanitizerRuntime(TC, Args, CmdArgs, RT, false); AddExportDynamic |= !addSanitizerDynamicList(TC, Args, CmdArgs, RT); } // If there is a static runtime with no dynamic list, force all the symbols // to be dynamic to be sure we export sanitizer interface functions. if (AddExportDynamic) CmdArgs.push_back("-export-dynamic"); return !StaticRuntimes.empty(); } static bool areOptimizationsEnabled(const ArgList &Args) { // Find the last -O arg and see if it is non-zero. if (Arg *A = Args.getLastArg(options::OPT_O_Group)) return !A->getOption().matches(options::OPT_O0); // Defaults to -O0. return false; } static bool shouldUseFramePointerForTarget(const ArgList &Args, const llvm::Triple &Triple) { // XCore never wants frame pointers, regardless of OS. if (Triple.getArch() == llvm::Triple::xcore) { return false; } if (Triple.isOSLinux()) { switch (Triple.getArch()) { // Don't use a frame pointer on linux if optimizing for certain targets. case llvm::Triple::mips64: case llvm::Triple::mips64el: case llvm::Triple::mips: case llvm::Triple::mipsel: case llvm::Triple::systemz: case llvm::Triple::x86: case llvm::Triple::x86_64: return !areOptimizationsEnabled(Args); default: return true; } } if (Triple.isOSWindows()) { switch (Triple.getArch()) { case llvm::Triple::x86: return !areOptimizationsEnabled(Args); default: // All other supported Windows ISAs use xdata unwind information, so frame // pointers are not generally useful. return false; } } return true; } static bool shouldUseFramePointer(const ArgList &Args, const llvm::Triple &Triple) { if (Arg *A = Args.getLastArg(options::OPT_fno_omit_frame_pointer, options::OPT_fomit_frame_pointer)) return A->getOption().matches(options::OPT_fno_omit_frame_pointer); return shouldUseFramePointerForTarget(Args, Triple); } static bool shouldUseLeafFramePointer(const ArgList &Args, const llvm::Triple &Triple) { if (Arg *A = Args.getLastArg(options::OPT_mno_omit_leaf_frame_pointer, options::OPT_momit_leaf_frame_pointer)) return A->getOption().matches(options::OPT_mno_omit_leaf_frame_pointer); if (Triple.isPS4CPU()) return false; return shouldUseFramePointerForTarget(Args, Triple); } /// Add a CC1 option to specify the debug compilation directory. static void addDebugCompDirArg(const ArgList &Args, ArgStringList &CmdArgs) { SmallString<128> cwd; if (!llvm::sys::fs::current_path(cwd)) { CmdArgs.push_back("-fdebug-compilation-dir"); CmdArgs.push_back(Args.MakeArgString(cwd)); } } static const char *SplitDebugName(const ArgList &Args, const InputInfo &Input) { Arg *FinalOutput = Args.getLastArg(options::OPT_o); if (FinalOutput && Args.hasArg(options::OPT_c)) { SmallString<128> T(FinalOutput->getValue()); llvm::sys::path::replace_extension(T, "dwo"); return Args.MakeArgString(T); } else { // Use the compilation dir. SmallString<128> T( Args.getLastArgValue(options::OPT_fdebug_compilation_dir)); SmallString<128> F(llvm::sys::path::stem(Input.getBaseInput())); llvm::sys::path::replace_extension(F, "dwo"); T += F; return Args.MakeArgString(F); } } static void SplitDebugInfo(const ToolChain &TC, Compilation &C, const Tool &T, const JobAction &JA, const ArgList &Args, const InputInfo &Output, const char *OutFile) { ArgStringList ExtractArgs; ExtractArgs.push_back("--extract-dwo"); ArgStringList StripArgs; StripArgs.push_back("--strip-dwo"); // Grabbing the output of the earlier compile step. StripArgs.push_back(Output.getFilename()); ExtractArgs.push_back(Output.getFilename()); ExtractArgs.push_back(OutFile); const char *Exec = Args.MakeArgString(TC.GetProgramPath("objcopy")); // First extract the dwo sections. C.addCommand(llvm::make_unique(JA, T, Exec, ExtractArgs)); // Then remove them from the original .o file. C.addCommand(llvm::make_unique(JA, T, Exec, StripArgs)); } /// \brief Vectorize at all optimization levels greater than 1 except for -Oz. /// For -Oz the loop vectorizer is disable, while the slp vectorizer is enabled. static bool shouldEnableVectorizerAtOLevel(const ArgList &Args, bool isSlpVec) { if (Arg *A = Args.getLastArg(options::OPT_O_Group)) { if (A->getOption().matches(options::OPT_O4) || A->getOption().matches(options::OPT_Ofast)) return true; if (A->getOption().matches(options::OPT_O0)) return false; assert(A->getOption().matches(options::OPT_O) && "Must have a -O flag"); // Vectorize -Os. StringRef S(A->getValue()); if (S == "s") return true; // Don't vectorize -Oz, unless it's the slp vectorizer. if (S == "z") return isSlpVec; unsigned OptLevel = 0; if (S.getAsInteger(10, OptLevel)) return false; return OptLevel > 1; } return false; } /// Add -x lang to \p CmdArgs for \p Input. static void addDashXForInput(const ArgList &Args, const InputInfo &Input, ArgStringList &CmdArgs) { // When using -verify-pch, we don't want to provide the type // 'precompiled-header' if it was inferred from the file extension if (Args.hasArg(options::OPT_verify_pch) && Input.getType() == types::TY_PCH) return; CmdArgs.push_back("-x"); if (Args.hasArg(options::OPT_rewrite_objc)) CmdArgs.push_back(types::getTypeName(types::TY_PP_ObjCXX)); else CmdArgs.push_back(types::getTypeName(Input.getType())); } static VersionTuple getMSCompatibilityVersion(unsigned Version) { if (Version < 100) return VersionTuple(Version); if (Version < 10000) return VersionTuple(Version / 100, Version % 100); unsigned Build = 0, Factor = 1; for (; Version > 10000; Version = Version / 10, Factor = Factor * 10) Build = Build + (Version % 10) * Factor; return VersionTuple(Version / 100, Version % 100, Build); } // Claim options we don't want to warn if they are unused. We do this for // options that build systems might add but are unused when assembling or only // running the preprocessor for example. static void claimNoWarnArgs(const ArgList &Args) { // Don't warn about unused -f(no-)?lto. This can happen when we're // preprocessing, precompiling or assembling. Args.ClaimAllArgs(options::OPT_flto); Args.ClaimAllArgs(options::OPT_fno_lto); } static void appendUserToPath(SmallVectorImpl &Result) { #ifdef LLVM_ON_UNIX const char *Username = getenv("LOGNAME"); #else const char *Username = getenv("USERNAME"); #endif if (Username) { // Validate that LoginName can be used in a path, and get its length. size_t Len = 0; for (const char *P = Username; *P; ++P, ++Len) { if (!isAlphanumeric(*P) && *P != '_') { Username = nullptr; break; } } if (Username && Len > 0) { Result.append(Username, Username + Len); return; } } // Fallback to user id. #ifdef LLVM_ON_UNIX std::string UID = llvm::utostr(getuid()); #else // FIXME: Windows seems to have an 'SID' that might work. std::string UID = "9999"; #endif Result.append(UID.begin(), UID.end()); } VersionTuple visualstudio::getMSVCVersion(const Driver *D, const llvm::Triple &Triple, const llvm::opt::ArgList &Args, bool IsWindowsMSVC) { if (Args.hasFlag(options::OPT_fms_extensions, options::OPT_fno_ms_extensions, IsWindowsMSVC) || Args.hasArg(options::OPT_fmsc_version) || Args.hasArg(options::OPT_fms_compatibility_version)) { const Arg *MSCVersion = Args.getLastArg(options::OPT_fmsc_version); const Arg *MSCompatibilityVersion = Args.getLastArg(options::OPT_fms_compatibility_version); if (MSCVersion && MSCompatibilityVersion) { if (D) D->Diag(diag::err_drv_argument_not_allowed_with) << MSCVersion->getAsString(Args) << MSCompatibilityVersion->getAsString(Args); return VersionTuple(); } if (MSCompatibilityVersion) { VersionTuple MSVT; if (MSVT.tryParse(MSCompatibilityVersion->getValue()) && D) D->Diag(diag::err_drv_invalid_value) << MSCompatibilityVersion->getAsString(Args) << MSCompatibilityVersion->getValue(); return MSVT; } if (MSCVersion) { unsigned Version = 0; if (StringRef(MSCVersion->getValue()).getAsInteger(10, Version) && D) D->Diag(diag::err_drv_invalid_value) << MSCVersion->getAsString(Args) << MSCVersion->getValue(); return getMSCompatibilityVersion(Version); } unsigned Major, Minor, Micro; Triple.getEnvironmentVersion(Major, Minor, Micro); if (Major || Minor || Micro) return VersionTuple(Major, Minor, Micro); return VersionTuple(18); } return VersionTuple(); } static void addPGOAndCoverageFlags(Compilation &C, const Driver &D, const InputInfo &Output, const ArgList &Args, ArgStringList &CmdArgs) { auto *ProfileGenerateArg = Args.getLastArg( options::OPT_fprofile_instr_generate, options::OPT_fprofile_instr_generate_EQ, options::OPT_fprofile_generate, options::OPT_fprofile_generate_EQ); auto *ProfileUseArg = Args.getLastArg( options::OPT_fprofile_instr_use, options::OPT_fprofile_instr_use_EQ, options::OPT_fprofile_use, options::OPT_fprofile_use_EQ); if (ProfileGenerateArg && ProfileUseArg) D.Diag(diag::err_drv_argument_not_allowed_with) << ProfileGenerateArg->getSpelling() << ProfileUseArg->getSpelling(); if (ProfileGenerateArg && ProfileGenerateArg->getOption().matches( options::OPT_fprofile_instr_generate_EQ)) ProfileGenerateArg->render(Args, CmdArgs); else if (ProfileGenerateArg && ProfileGenerateArg->getOption().matches( options::OPT_fprofile_generate_EQ)) { SmallString<128> Path(ProfileGenerateArg->getValue()); llvm::sys::path::append(Path, "default.profraw"); CmdArgs.push_back( Args.MakeArgString(Twine("-fprofile-instr-generate=") + Path)); } else Args.AddAllArgs(CmdArgs, options::OPT_fprofile_instr_generate); if (ProfileUseArg && ProfileUseArg->getOption().matches(options::OPT_fprofile_instr_use_EQ)) ProfileUseArg->render(Args, CmdArgs); else if (ProfileUseArg && (ProfileUseArg->getOption().matches(options::OPT_fprofile_use_EQ) || ProfileUseArg->getOption().matches( options::OPT_fprofile_instr_use))) { SmallString<128> Path( ProfileUseArg->getNumValues() == 0 ? "" : ProfileUseArg->getValue()); if (Path.empty() || llvm::sys::fs::is_directory(Path)) llvm::sys::path::append(Path, "default.profdata"); CmdArgs.push_back(Args.MakeArgString(Twine("-fprofile-instr-use=") + Path)); } if (Args.hasArg(options::OPT_ftest_coverage) || Args.hasArg(options::OPT_coverage)) CmdArgs.push_back("-femit-coverage-notes"); if (Args.hasFlag(options::OPT_fprofile_arcs, options::OPT_fno_profile_arcs, false) || Args.hasArg(options::OPT_coverage)) CmdArgs.push_back("-femit-coverage-data"); if (Args.hasArg(options::OPT_fcoverage_mapping) && !ProfileGenerateArg) D.Diag(diag::err_drv_argument_only_allowed_with) << "-fcoverage-mapping" << "-fprofile-instr-generate"; if (Args.hasArg(options::OPT_fcoverage_mapping)) CmdArgs.push_back("-fcoverage-mapping"); if (C.getArgs().hasArg(options::OPT_c) || C.getArgs().hasArg(options::OPT_S)) { if (Output.isFilename()) { CmdArgs.push_back("-coverage-file"); SmallString<128> CoverageFilename; if (Arg *FinalOutput = C.getArgs().getLastArg(options::OPT_o)) { CoverageFilename = FinalOutput->getValue(); } else { CoverageFilename = llvm::sys::path::filename(Output.getBaseInput()); } if (llvm::sys::path::is_relative(CoverageFilename)) { SmallString<128> Pwd; if (!llvm::sys::fs::current_path(Pwd)) { llvm::sys::path::append(Pwd, CoverageFilename); CoverageFilename.swap(Pwd); } } CmdArgs.push_back(Args.MakeArgString(CoverageFilename)); } } } void Clang::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { bool KernelOrKext = Args.hasArg(options::OPT_mkernel, options::OPT_fapple_kext); const Driver &D = getToolChain().getDriver(); ArgStringList CmdArgs; bool IsWindowsGNU = getToolChain().getTriple().isWindowsGNUEnvironment(); bool IsWindowsCygnus = getToolChain().getTriple().isWindowsCygwinEnvironment(); bool IsWindowsMSVC = getToolChain().getTriple().isWindowsMSVCEnvironment(); // Check number of inputs for sanity. We need at least one input. assert(Inputs.size() >= 1 && "Must have at least one input."); const InputInfo &Input = Inputs[0]; // CUDA compilation may have multiple inputs (source file + results of // device-side compilations). All other jobs are expected to have exactly one // input. bool IsCuda = types::isCuda(Input.getType()); assert((IsCuda || Inputs.size() == 1) && "Unable to handle multiple inputs."); // Invoke ourselves in -cc1 mode. // // FIXME: Implement custom jobs for internal actions. CmdArgs.push_back("-cc1"); // Add the "effective" target triple. CmdArgs.push_back("-triple"); std::string TripleStr = getToolChain().ComputeEffectiveClangTriple(Args); CmdArgs.push_back(Args.MakeArgString(TripleStr)); const llvm::Triple TT(TripleStr); if (TT.isOSWindows() && (TT.getArch() == llvm::Triple::arm || TT.getArch() == llvm::Triple::thumb)) { unsigned Offset = TT.getArch() == llvm::Triple::arm ? 4 : 6; unsigned Version; TT.getArchName().substr(Offset).getAsInteger(10, Version); if (Version < 7) D.Diag(diag::err_target_unsupported_arch) << TT.getArchName() << TripleStr; } // Push all default warning arguments that are specific to // the given target. These come before user provided warning options // are provided. getToolChain().addClangWarningOptions(CmdArgs); // Select the appropriate action. RewriteKind rewriteKind = RK_None; if (isa(JA)) { assert(JA.getType() == types::TY_Plist && "Invalid output type."); CmdArgs.push_back("-analyze"); } else if (isa(JA)) { CmdArgs.push_back("-migrate"); } else if (isa(JA)) { if (Output.getType() == types::TY_Dependencies) CmdArgs.push_back("-Eonly"); else { CmdArgs.push_back("-E"); if (Args.hasArg(options::OPT_rewrite_objc) && !Args.hasArg(options::OPT_g_Group)) CmdArgs.push_back("-P"); } } else if (isa(JA)) { CmdArgs.push_back("-emit-obj"); CollectArgsForIntegratedAssembler(C, Args, CmdArgs, D); // Also ignore explicit -force_cpusubtype_ALL option. (void)Args.hasArg(options::OPT_force__cpusubtype__ALL); } else if (isa(JA)) { // Use PCH if the user requested it. bool UsePCH = D.CCCUsePCH; if (JA.getType() == types::TY_Nothing) CmdArgs.push_back("-fsyntax-only"); else if (UsePCH) CmdArgs.push_back("-emit-pch"); else CmdArgs.push_back("-emit-pth"); } else if (isa(JA)) { CmdArgs.push_back("-verify-pch"); } else { assert((isa(JA) || isa(JA)) && "Invalid action for clang tool."); if (JA.getType() == types::TY_LTO_IR || JA.getType() == types::TY_LTO_BC) { CmdArgs.push_back("-flto"); } if (JA.getType() == types::TY_Nothing) { CmdArgs.push_back("-fsyntax-only"); } else if (JA.getType() == types::TY_LLVM_IR || JA.getType() == types::TY_LTO_IR) { CmdArgs.push_back("-emit-llvm"); } else if (JA.getType() == types::TY_LLVM_BC || JA.getType() == types::TY_LTO_BC) { CmdArgs.push_back("-emit-llvm-bc"); } else if (JA.getType() == types::TY_PP_Asm) { CmdArgs.push_back("-S"); } else if (JA.getType() == types::TY_AST) { CmdArgs.push_back("-emit-pch"); } else if (JA.getType() == types::TY_ModuleFile) { CmdArgs.push_back("-module-file-info"); } else if (JA.getType() == types::TY_RewrittenObjC) { CmdArgs.push_back("-rewrite-objc"); rewriteKind = RK_NonFragile; } else if (JA.getType() == types::TY_RewrittenLegacyObjC) { CmdArgs.push_back("-rewrite-objc"); rewriteKind = RK_Fragile; } else { assert(JA.getType() == types::TY_PP_Asm && "Unexpected output type!"); } // Preserve use-list order by default when emitting bitcode, so that // loading the bitcode up in 'opt' or 'llc' and running passes gives the // same result as running passes here. For LTO, we don't need to preserve // the use-list order, since serialization to bitcode is part of the flow. if (JA.getType() == types::TY_LLVM_BC) CmdArgs.push_back("-emit-llvm-uselists"); } // We normally speed up the clang process a bit by skipping destructors at // exit, but when we're generating diagnostics we can rely on some of the // cleanup. if (!C.isForDiagnostics()) CmdArgs.push_back("-disable-free"); // Disable the verification pass in -asserts builds. #ifdef NDEBUG CmdArgs.push_back("-disable-llvm-verifier"); #endif // Set the main file name, so that debug info works even with // -save-temps. CmdArgs.push_back("-main-file-name"); CmdArgs.push_back(getBaseInputName(Args, Input)); // Some flags which affect the language (via preprocessor // defines). if (Args.hasArg(options::OPT_static)) CmdArgs.push_back("-static-define"); if (isa(JA)) { // Enable region store model by default. CmdArgs.push_back("-analyzer-store=region"); // Treat blocks as analysis entry points. CmdArgs.push_back("-analyzer-opt-analyze-nested-blocks"); CmdArgs.push_back("-analyzer-eagerly-assume"); // Add default argument set. if (!Args.hasArg(options::OPT__analyzer_no_default_checks)) { CmdArgs.push_back("-analyzer-checker=core"); if (!IsWindowsMSVC) CmdArgs.push_back("-analyzer-checker=unix"); if (getToolChain().getTriple().getVendor() == llvm::Triple::Apple) CmdArgs.push_back("-analyzer-checker=osx"); CmdArgs.push_back("-analyzer-checker=deadcode"); if (types::isCXX(Input.getType())) CmdArgs.push_back("-analyzer-checker=cplusplus"); // Enable the following experimental checkers for testing. CmdArgs.push_back( "-analyzer-checker=security.insecureAPI.UncheckedReturn"); CmdArgs.push_back("-analyzer-checker=security.insecureAPI.getpw"); CmdArgs.push_back("-analyzer-checker=security.insecureAPI.gets"); CmdArgs.push_back("-analyzer-checker=security.insecureAPI.mktemp"); CmdArgs.push_back("-analyzer-checker=security.insecureAPI.mkstemp"); CmdArgs.push_back("-analyzer-checker=security.insecureAPI.vfork"); } // Set the output format. The default is plist, for (lame) historical // reasons. CmdArgs.push_back("-analyzer-output"); if (Arg *A = Args.getLastArg(options::OPT__analyzer_output)) CmdArgs.push_back(A->getValue()); else CmdArgs.push_back("plist"); // Disable the presentation of standard compiler warnings when // using --analyze. We only want to show static analyzer diagnostics // or frontend errors. CmdArgs.push_back("-w"); // Add -Xanalyzer arguments when running as analyzer. Args.AddAllArgValues(CmdArgs, options::OPT_Xanalyzer); } CheckCodeGenerationOptions(D, Args); bool PIE = getToolChain().isPIEDefault(); bool PIC = PIE || getToolChain().isPICDefault(); bool IsPICLevelTwo = PIC; // Android-specific defaults for PIC/PIE if (getToolChain().getTriple().getEnvironment() == llvm::Triple::Android) { switch (getToolChain().getArch()) { case llvm::Triple::arm: case llvm::Triple::armeb: case llvm::Triple::thumb: case llvm::Triple::thumbeb: case llvm::Triple::aarch64: case llvm::Triple::mips: case llvm::Triple::mipsel: case llvm::Triple::mips64: case llvm::Triple::mips64el: PIC = true; // "-fpic" break; case llvm::Triple::x86: case llvm::Triple::x86_64: PIC = true; // "-fPIC" IsPICLevelTwo = true; break; default: break; } } // OpenBSD-specific defaults for PIE if (getToolChain().getTriple().getOS() == llvm::Triple::OpenBSD) { switch (getToolChain().getArch()) { case llvm::Triple::mips64: case llvm::Triple::mips64el: case llvm::Triple::sparcel: case llvm::Triple::x86: case llvm::Triple::x86_64: IsPICLevelTwo = false; // "-fpie" break; case llvm::Triple::ppc: case llvm::Triple::sparc: case llvm::Triple::sparcv9: IsPICLevelTwo = true; // "-fPIE" break; default: break; } } // For the PIC and PIE flag options, this logic is different from the // legacy logic in very old versions of GCC, as that logic was just // a bug no one had ever fixed. This logic is both more rational and // consistent with GCC's new logic now that the bugs are fixed. The last // argument relating to either PIC or PIE wins, and no other argument is // used. If the last argument is any flavor of the '-fno-...' arguments, // both PIC and PIE are disabled. Any PIE option implicitly enables PIC // at the same level. Arg *LastPICArg = Args.getLastArg(options::OPT_fPIC, options::OPT_fno_PIC, options::OPT_fpic, options::OPT_fno_pic, options::OPT_fPIE, options::OPT_fno_PIE, options::OPT_fpie, options::OPT_fno_pie); // Check whether the tool chain trumps the PIC-ness decision. If the PIC-ness // is forced, then neither PIC nor PIE flags will have no effect. if (!getToolChain().isPICDefaultForced()) { if (LastPICArg) { Option O = LastPICArg->getOption(); if (O.matches(options::OPT_fPIC) || O.matches(options::OPT_fpic) || O.matches(options::OPT_fPIE) || O.matches(options::OPT_fpie)) { PIE = O.matches(options::OPT_fPIE) || O.matches(options::OPT_fpie); PIC = PIE || O.matches(options::OPT_fPIC) || O.matches(options::OPT_fpic); IsPICLevelTwo = O.matches(options::OPT_fPIE) || O.matches(options::OPT_fPIC); } else { PIE = PIC = false; } } } // Introduce a Darwin-specific hack. If the default is PIC but the flags // specified while enabling PIC enabled level 1 PIC, just force it back to // level 2 PIC instead. This matches the behavior of Darwin GCC (based on my // informal testing). if (PIC && getToolChain().getTriple().isOSDarwin()) IsPICLevelTwo |= getToolChain().isPICDefault(); // Note that these flags are trump-cards. Regardless of the order w.r.t. the // PIC or PIE options above, if these show up, PIC is disabled. llvm::Triple Triple(TripleStr); if (KernelOrKext && (!Triple.isiOS() || Triple.isOSVersionLT(6))) PIC = PIE = false; if (Args.hasArg(options::OPT_static)) PIC = PIE = false; if (Arg *A = Args.getLastArg(options::OPT_mdynamic_no_pic)) { // This is a very special mode. It trumps the other modes, almost no one // uses it, and it isn't even valid on any OS but Darwin. if (!getToolChain().getTriple().isOSDarwin()) D.Diag(diag::err_drv_unsupported_opt_for_target) << A->getSpelling() << getToolChain().getTriple().str(); // FIXME: Warn when this flag trumps some other PIC or PIE flag. CmdArgs.push_back("-mrelocation-model"); CmdArgs.push_back("dynamic-no-pic"); // Only a forced PIC mode can cause the actual compile to have PIC defines // etc., no flags are sufficient. This behavior was selected to closely // match that of llvm-gcc and Apple GCC before that. if (getToolChain().isPICDefault() && getToolChain().isPICDefaultForced()) { CmdArgs.push_back("-pic-level"); CmdArgs.push_back("2"); } } else { // Currently, LLVM only knows about PIC vs. static; the PIE differences are // handled in Clang's IRGen by the -pie-level flag. CmdArgs.push_back("-mrelocation-model"); CmdArgs.push_back(PIC ? "pic" : "static"); if (PIC) { CmdArgs.push_back("-pic-level"); CmdArgs.push_back(IsPICLevelTwo ? "2" : "1"); if (PIE) { CmdArgs.push_back("-pie-level"); CmdArgs.push_back(IsPICLevelTwo ? "2" : "1"); } } } CmdArgs.push_back("-mthread-model"); if (Arg *A = Args.getLastArg(options::OPT_mthread_model)) CmdArgs.push_back(A->getValue()); else CmdArgs.push_back(Args.MakeArgString(getToolChain().getThreadModel())); Args.AddLastArg(CmdArgs, options::OPT_fveclib); if (!Args.hasFlag(options::OPT_fmerge_all_constants, options::OPT_fno_merge_all_constants)) CmdArgs.push_back("-fno-merge-all-constants"); // LLVM Code Generator Options. if (Args.hasArg(options::OPT_frewrite_map_file) || Args.hasArg(options::OPT_frewrite_map_file_EQ)) { for (const Arg *A : Args.filtered(options::OPT_frewrite_map_file, options::OPT_frewrite_map_file_EQ)) { CmdArgs.push_back("-frewrite-map-file"); CmdArgs.push_back(A->getValue()); A->claim(); } } if (Arg *A = Args.getLastArg(options::OPT_Wframe_larger_than_EQ)) { StringRef v = A->getValue(); CmdArgs.push_back("-mllvm"); CmdArgs.push_back(Args.MakeArgString("-warn-stack-size=" + v)); A->claim(); } if (Arg *A = Args.getLastArg(options::OPT_mregparm_EQ)) { CmdArgs.push_back("-mregparm"); CmdArgs.push_back(A->getValue()); } if (Arg *A = Args.getLastArg(options::OPT_fpcc_struct_return, options::OPT_freg_struct_return)) { if (getToolChain().getArch() != llvm::Triple::x86) { D.Diag(diag::err_drv_unsupported_opt_for_target) << A->getSpelling() << getToolChain().getTriple().str(); } else if (A->getOption().matches(options::OPT_fpcc_struct_return)) { CmdArgs.push_back("-fpcc-struct-return"); } else { assert(A->getOption().matches(options::OPT_freg_struct_return)); CmdArgs.push_back("-freg-struct-return"); } } if (Args.hasFlag(options::OPT_mrtd, options::OPT_mno_rtd, false)) CmdArgs.push_back("-mrtd"); if (shouldUseFramePointer(Args, getToolChain().getTriple())) CmdArgs.push_back("-mdisable-fp-elim"); if (!Args.hasFlag(options::OPT_fzero_initialized_in_bss, options::OPT_fno_zero_initialized_in_bss)) CmdArgs.push_back("-mno-zero-initialized-in-bss"); bool OFastEnabled = isOptimizationLevelFast(Args); // If -Ofast is the optimization level, then -fstrict-aliasing should be // enabled. This alias option is being used to simplify the hasFlag logic. OptSpecifier StrictAliasingAliasOption = OFastEnabled ? options::OPT_Ofast : options::OPT_fstrict_aliasing; // We turn strict aliasing off by default if we're in CL mode, since MSVC // doesn't do any TBAA. bool TBAAOnByDefault = !getToolChain().getDriver().IsCLMode(); if (!Args.hasFlag(options::OPT_fstrict_aliasing, StrictAliasingAliasOption, options::OPT_fno_strict_aliasing, TBAAOnByDefault)) CmdArgs.push_back("-relaxed-aliasing"); if (!Args.hasFlag(options::OPT_fstruct_path_tbaa, options::OPT_fno_struct_path_tbaa)) CmdArgs.push_back("-no-struct-path-tbaa"); if (Args.hasFlag(options::OPT_fstrict_enums, options::OPT_fno_strict_enums, false)) CmdArgs.push_back("-fstrict-enums"); if (!Args.hasFlag(options::OPT_foptimize_sibling_calls, options::OPT_fno_optimize_sibling_calls)) CmdArgs.push_back("-mdisable-tail-calls"); // Handle segmented stacks. if (Args.hasArg(options::OPT_fsplit_stack)) CmdArgs.push_back("-split-stacks"); // If -Ofast is the optimization level, then -ffast-math should be enabled. // This alias option is being used to simplify the getLastArg logic. OptSpecifier FastMathAliasOption = OFastEnabled ? options::OPT_Ofast : options::OPT_ffast_math; // Handle various floating point optimization flags, mapping them to the // appropriate LLVM code generation flags. The pattern for all of these is to // default off the codegen optimizations, and if any flag enables them and no // flag disables them after the flag enabling them, enable the codegen // optimization. This is complicated by several "umbrella" flags. if (Arg *A = Args.getLastArg( options::OPT_ffast_math, FastMathAliasOption, options::OPT_fno_fast_math, options::OPT_ffinite_math_only, options::OPT_fno_finite_math_only, options::OPT_fhonor_infinities, options::OPT_fno_honor_infinities)) if (A->getOption().getID() != options::OPT_fno_fast_math && A->getOption().getID() != options::OPT_fno_finite_math_only && A->getOption().getID() != options::OPT_fhonor_infinities) CmdArgs.push_back("-menable-no-infs"); if (Arg *A = Args.getLastArg( options::OPT_ffast_math, FastMathAliasOption, options::OPT_fno_fast_math, options::OPT_ffinite_math_only, options::OPT_fno_finite_math_only, options::OPT_fhonor_nans, options::OPT_fno_honor_nans)) if (A->getOption().getID() != options::OPT_fno_fast_math && A->getOption().getID() != options::OPT_fno_finite_math_only && A->getOption().getID() != options::OPT_fhonor_nans) CmdArgs.push_back("-menable-no-nans"); // -fmath-errno is the default on some platforms, e.g. BSD-derived OSes. bool MathErrno = getToolChain().IsMathErrnoDefault(); if (Arg *A = Args.getLastArg(options::OPT_ffast_math, FastMathAliasOption, options::OPT_fno_fast_math, options::OPT_fmath_errno, options::OPT_fno_math_errno)) { // Turning on -ffast_math (with either flag) removes the need for MathErrno. // However, turning *off* -ffast_math merely restores the toolchain default // (which may be false). if (A->getOption().getID() == options::OPT_fno_math_errno || A->getOption().getID() == options::OPT_ffast_math || A->getOption().getID() == options::OPT_Ofast) MathErrno = false; else if (A->getOption().getID() == options::OPT_fmath_errno) MathErrno = true; } if (MathErrno) CmdArgs.push_back("-fmath-errno"); // There are several flags which require disabling very specific // optimizations. Any of these being disabled forces us to turn off the // entire set of LLVM optimizations, so collect them through all the flag // madness. bool AssociativeMath = false; if (Arg *A = Args.getLastArg( options::OPT_ffast_math, FastMathAliasOption, options::OPT_fno_fast_math, options::OPT_funsafe_math_optimizations, options::OPT_fno_unsafe_math_optimizations, options::OPT_fassociative_math, options::OPT_fno_associative_math)) if (A->getOption().getID() != options::OPT_fno_fast_math && A->getOption().getID() != options::OPT_fno_unsafe_math_optimizations && A->getOption().getID() != options::OPT_fno_associative_math) AssociativeMath = true; bool ReciprocalMath = false; if (Arg *A = Args.getLastArg( options::OPT_ffast_math, FastMathAliasOption, options::OPT_fno_fast_math, options::OPT_funsafe_math_optimizations, options::OPT_fno_unsafe_math_optimizations, options::OPT_freciprocal_math, options::OPT_fno_reciprocal_math)) if (A->getOption().getID() != options::OPT_fno_fast_math && A->getOption().getID() != options::OPT_fno_unsafe_math_optimizations && A->getOption().getID() != options::OPT_fno_reciprocal_math) ReciprocalMath = true; bool SignedZeros = true; if (Arg *A = Args.getLastArg( options::OPT_ffast_math, FastMathAliasOption, options::OPT_fno_fast_math, options::OPT_funsafe_math_optimizations, options::OPT_fno_unsafe_math_optimizations, options::OPT_fsigned_zeros, options::OPT_fno_signed_zeros)) if (A->getOption().getID() != options::OPT_fno_fast_math && A->getOption().getID() != options::OPT_fno_unsafe_math_optimizations && A->getOption().getID() != options::OPT_fsigned_zeros) SignedZeros = false; bool TrappingMath = true; if (Arg *A = Args.getLastArg( options::OPT_ffast_math, FastMathAliasOption, options::OPT_fno_fast_math, options::OPT_funsafe_math_optimizations, options::OPT_fno_unsafe_math_optimizations, options::OPT_ftrapping_math, options::OPT_fno_trapping_math)) if (A->getOption().getID() != options::OPT_fno_fast_math && A->getOption().getID() != options::OPT_fno_unsafe_math_optimizations && A->getOption().getID() != options::OPT_ftrapping_math) TrappingMath = false; if (!MathErrno && AssociativeMath && ReciprocalMath && !SignedZeros && !TrappingMath) CmdArgs.push_back("-menable-unsafe-fp-math"); if (!SignedZeros) CmdArgs.push_back("-fno-signed-zeros"); if (ReciprocalMath) CmdArgs.push_back("-freciprocal-math"); // Validate and pass through -fp-contract option. if (Arg *A = Args.getLastArg(options::OPT_ffast_math, FastMathAliasOption, options::OPT_fno_fast_math, options::OPT_ffp_contract)) { if (A->getOption().getID() == options::OPT_ffp_contract) { StringRef Val = A->getValue(); if (Val == "fast" || Val == "on" || Val == "off") { CmdArgs.push_back(Args.MakeArgString("-ffp-contract=" + Val)); } else { D.Diag(diag::err_drv_unsupported_option_argument) << A->getOption().getName() << Val; } } else if (A->getOption().matches(options::OPT_ffast_math) || (OFastEnabled && A->getOption().matches(options::OPT_Ofast))) { // If fast-math is set then set the fp-contract mode to fast. CmdArgs.push_back(Args.MakeArgString("-ffp-contract=fast")); } } ParseMRecip(getToolChain().getDriver(), Args, CmdArgs); // We separately look for the '-ffast-math' and '-ffinite-math-only' flags, // and if we find them, tell the frontend to provide the appropriate // preprocessor macros. This is distinct from enabling any optimizations as // these options induce language changes which must survive serialization // and deserialization, etc. if (Arg *A = Args.getLastArg(options::OPT_ffast_math, FastMathAliasOption, options::OPT_fno_fast_math)) if (!A->getOption().matches(options::OPT_fno_fast_math)) CmdArgs.push_back("-ffast-math"); if (Arg *A = Args.getLastArg(options::OPT_ffinite_math_only, options::OPT_fno_fast_math)) if (A->getOption().matches(options::OPT_ffinite_math_only)) CmdArgs.push_back("-ffinite-math-only"); // Decide whether to use verbose asm. Verbose assembly is the default on // toolchains which have the integrated assembler on by default. bool IsIntegratedAssemblerDefault = getToolChain().IsIntegratedAssemblerDefault(); if (Args.hasFlag(options::OPT_fverbose_asm, options::OPT_fno_verbose_asm, IsIntegratedAssemblerDefault) || Args.hasArg(options::OPT_dA)) CmdArgs.push_back("-masm-verbose"); if (!Args.hasFlag(options::OPT_fintegrated_as, options::OPT_fno_integrated_as, IsIntegratedAssemblerDefault)) CmdArgs.push_back("-no-integrated-as"); if (Args.hasArg(options::OPT_fdebug_pass_structure)) { CmdArgs.push_back("-mdebug-pass"); CmdArgs.push_back("Structure"); } if (Args.hasArg(options::OPT_fdebug_pass_arguments)) { CmdArgs.push_back("-mdebug-pass"); CmdArgs.push_back("Arguments"); } // Enable -mconstructor-aliases except on darwin, where we have to // work around a linker bug; see . if (!getToolChain().getTriple().isOSDarwin()) CmdArgs.push_back("-mconstructor-aliases"); // Darwin's kernel doesn't support guard variables; just die if we // try to use them. if (KernelOrKext && getToolChain().getTriple().isOSDarwin()) CmdArgs.push_back("-fforbid-guard-variables"); if (Args.hasArg(options::OPT_mms_bitfields)) { CmdArgs.push_back("-mms-bitfields"); } // This is a coarse approximation of what llvm-gcc actually does, both // -fasynchronous-unwind-tables and -fnon-call-exceptions interact in more // complicated ways. bool AsynchronousUnwindTables = Args.hasFlag(options::OPT_fasynchronous_unwind_tables, options::OPT_fno_asynchronous_unwind_tables, (getToolChain().IsUnwindTablesDefault() || getToolChain().getSanitizerArgs().needsUnwindTables()) && !KernelOrKext); if (Args.hasFlag(options::OPT_funwind_tables, options::OPT_fno_unwind_tables, AsynchronousUnwindTables)) CmdArgs.push_back("-munwind-tables"); getToolChain().addClangTargetOptions(Args, CmdArgs); if (Arg *A = Args.getLastArg(options::OPT_flimited_precision_EQ)) { CmdArgs.push_back("-mlimit-float-precision"); CmdArgs.push_back(A->getValue()); } // FIXME: Handle -mtune=. (void)Args.hasArg(options::OPT_mtune_EQ); if (Arg *A = Args.getLastArg(options::OPT_mcmodel_EQ)) { CmdArgs.push_back("-mcode-model"); CmdArgs.push_back(A->getValue()); } // Add the target cpu std::string CPU = getCPUName(Args, Triple, /*FromAs*/ false); if (!CPU.empty()) { CmdArgs.push_back("-target-cpu"); CmdArgs.push_back(Args.MakeArgString(CPU)); } if (const Arg *A = Args.getLastArg(options::OPT_mfpmath_EQ)) { CmdArgs.push_back("-mfpmath"); CmdArgs.push_back(A->getValue()); } // Add the target features getTargetFeatures(D, Triple, Args, CmdArgs, false); // Add target specific flags. switch (getToolChain().getArch()) { default: break; case llvm::Triple::arm: case llvm::Triple::armeb: case llvm::Triple::thumb: case llvm::Triple::thumbeb: AddARMTargetArgs(Args, CmdArgs, KernelOrKext); break; case llvm::Triple::aarch64: case llvm::Triple::aarch64_be: AddAArch64TargetArgs(Args, CmdArgs); break; case llvm::Triple::mips: case llvm::Triple::mipsel: case llvm::Triple::mips64: case llvm::Triple::mips64el: AddMIPSTargetArgs(Args, CmdArgs); break; case llvm::Triple::ppc: case llvm::Triple::ppc64: case llvm::Triple::ppc64le: AddPPCTargetArgs(Args, CmdArgs); break; case llvm::Triple::sparc: case llvm::Triple::sparcel: case llvm::Triple::sparcv9: AddSparcTargetArgs(Args, CmdArgs); break; case llvm::Triple::x86: case llvm::Triple::x86_64: AddX86TargetArgs(Args, CmdArgs); break; case llvm::Triple::hexagon: AddHexagonTargetArgs(Args, CmdArgs); break; } // Add clang-cl arguments. if (getToolChain().getDriver().IsCLMode()) AddClangCLArgs(Args, CmdArgs); // Pass the linker version in use. if (Arg *A = Args.getLastArg(options::OPT_mlinker_version_EQ)) { CmdArgs.push_back("-target-linker-version"); CmdArgs.push_back(A->getValue()); } if (!shouldUseLeafFramePointer(Args, getToolChain().getTriple())) CmdArgs.push_back("-momit-leaf-frame-pointer"); // Explicitly error on some things we know we don't support and can't just // ignore. types::ID InputType = Input.getType(); if (!Args.hasArg(options::OPT_fallow_unsupported)) { Arg *Unsupported; if (types::isCXX(InputType) && getToolChain().getTriple().isOSDarwin() && getToolChain().getArch() == llvm::Triple::x86) { if ((Unsupported = Args.getLastArg(options::OPT_fapple_kext)) || (Unsupported = Args.getLastArg(options::OPT_mkernel))) D.Diag(diag::err_drv_clang_unsupported_opt_cxx_darwin_i386) << Unsupported->getOption().getName(); } } Args.AddAllArgs(CmdArgs, options::OPT_v); Args.AddLastArg(CmdArgs, options::OPT_H); if (D.CCPrintHeaders && !D.CCGenDiagnostics) { CmdArgs.push_back("-header-include-file"); CmdArgs.push_back(D.CCPrintHeadersFilename ? D.CCPrintHeadersFilename : "-"); } Args.AddLastArg(CmdArgs, options::OPT_P); Args.AddLastArg(CmdArgs, options::OPT_print_ivar_layout); if (D.CCLogDiagnostics && !D.CCGenDiagnostics) { CmdArgs.push_back("-diagnostic-log-file"); CmdArgs.push_back(D.CCLogDiagnosticsFilename ? D.CCLogDiagnosticsFilename : "-"); } // Use the last option from "-g" group. "-gline-tables-only" and "-gdwarf-x" // are preserved, all other debug options are substituted with "-g". Args.ClaimAllArgs(options::OPT_g_Group); if (Arg *A = Args.getLastArg(options::OPT_g_Group)) { if (A->getOption().matches(options::OPT_gline_tables_only) || A->getOption().matches(options::OPT_g1)) { // FIXME: we should support specifying dwarf version with // -gline-tables-only. CmdArgs.push_back("-gline-tables-only"); // Default is dwarf-2 for Darwin, OpenBSD, FreeBSD and Solaris. const llvm::Triple &Triple = getToolChain().getTriple(); if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::OpenBSD || Triple.getOS() == llvm::Triple::FreeBSD || Triple.getOS() == llvm::Triple::Solaris) CmdArgs.push_back("-gdwarf-2"); } else if (A->getOption().matches(options::OPT_gdwarf_2)) CmdArgs.push_back("-gdwarf-2"); else if (A->getOption().matches(options::OPT_gdwarf_3)) CmdArgs.push_back("-gdwarf-3"); else if (A->getOption().matches(options::OPT_gdwarf_4)) CmdArgs.push_back("-gdwarf-4"); else if (!A->getOption().matches(options::OPT_g0) && !A->getOption().matches(options::OPT_ggdb0)) { // Default is dwarf-2 for Darwin, OpenBSD, FreeBSD and Solaris. const llvm::Triple &Triple = getToolChain().getTriple(); if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::OpenBSD || Triple.getOS() == llvm::Triple::FreeBSD || Triple.getOS() == llvm::Triple::Solaris) CmdArgs.push_back("-gdwarf-2"); else CmdArgs.push_back("-g"); } } // We ignore flags -gstrict-dwarf and -grecord-gcc-switches for now. Args.ClaimAllArgs(options::OPT_g_flags_Group); if (Args.hasFlag(options::OPT_gcolumn_info, options::OPT_gno_column_info, /*Default*/ true)) CmdArgs.push_back("-dwarf-column-info"); // FIXME: Move backend command line options to the module. // -gsplit-dwarf should turn on -g and enable the backend dwarf // splitting and extraction. // FIXME: Currently only works on Linux. if (getToolChain().getTriple().isOSLinux() && Args.hasArg(options::OPT_gsplit_dwarf)) { CmdArgs.push_back("-g"); CmdArgs.push_back("-backend-option"); CmdArgs.push_back("-split-dwarf=Enable"); } // -ggnu-pubnames turns on gnu style pubnames in the backend. if (Args.hasArg(options::OPT_ggnu_pubnames)) { CmdArgs.push_back("-backend-option"); CmdArgs.push_back("-generate-gnu-dwarf-pub-sections"); } // -gdwarf-aranges turns on the emission of the aranges section in the // backend. if (Args.hasArg(options::OPT_gdwarf_aranges)) { CmdArgs.push_back("-backend-option"); CmdArgs.push_back("-generate-arange-section"); } if (Args.hasFlag(options::OPT_fdebug_types_section, options::OPT_fno_debug_types_section, false)) { CmdArgs.push_back("-backend-option"); CmdArgs.push_back("-generate-type-units"); } // CloudABI uses -ffunction-sections and -fdata-sections by default. bool UseSeparateSections = Triple.getOS() == llvm::Triple::CloudABI; if (Args.hasFlag(options::OPT_ffunction_sections, options::OPT_fno_function_sections, UseSeparateSections)) { CmdArgs.push_back("-ffunction-sections"); } if (Args.hasFlag(options::OPT_fdata_sections, options::OPT_fno_data_sections, UseSeparateSections)) { CmdArgs.push_back("-fdata-sections"); } if (!Args.hasFlag(options::OPT_funique_section_names, options::OPT_fno_unique_section_names, true)) CmdArgs.push_back("-fno-unique-section-names"); Args.AddAllArgs(CmdArgs, options::OPT_finstrument_functions); addPGOAndCoverageFlags(C, D, Output, Args, CmdArgs); // Pass options for controlling the default header search paths. if (Args.hasArg(options::OPT_nostdinc)) { CmdArgs.push_back("-nostdsysteminc"); CmdArgs.push_back("-nobuiltininc"); } else { if (Args.hasArg(options::OPT_nostdlibinc)) CmdArgs.push_back("-nostdsysteminc"); Args.AddLastArg(CmdArgs, options::OPT_nostdincxx); Args.AddLastArg(CmdArgs, options::OPT_nobuiltininc); } // Pass the path to compiler resource files. CmdArgs.push_back("-resource-dir"); CmdArgs.push_back(D.ResourceDir.c_str()); Args.AddLastArg(CmdArgs, options::OPT_working_directory); bool ARCMTEnabled = false; if (!Args.hasArg(options::OPT_fno_objc_arc, options::OPT_fobjc_arc)) { if (const Arg *A = Args.getLastArg(options::OPT_ccc_arcmt_check, options::OPT_ccc_arcmt_modify, options::OPT_ccc_arcmt_migrate)) { ARCMTEnabled = true; switch (A->getOption().getID()) { default: llvm_unreachable("missed a case"); case options::OPT_ccc_arcmt_check: CmdArgs.push_back("-arcmt-check"); break; case options::OPT_ccc_arcmt_modify: CmdArgs.push_back("-arcmt-modify"); break; case options::OPT_ccc_arcmt_migrate: CmdArgs.push_back("-arcmt-migrate"); CmdArgs.push_back("-mt-migrate-directory"); CmdArgs.push_back(A->getValue()); Args.AddLastArg(CmdArgs, options::OPT_arcmt_migrate_report_output); Args.AddLastArg(CmdArgs, options::OPT_arcmt_migrate_emit_arc_errors); break; } } } else { Args.ClaimAllArgs(options::OPT_ccc_arcmt_check); Args.ClaimAllArgs(options::OPT_ccc_arcmt_modify); Args.ClaimAllArgs(options::OPT_ccc_arcmt_migrate); } if (const Arg *A = Args.getLastArg(options::OPT_ccc_objcmt_migrate)) { if (ARCMTEnabled) { D.Diag(diag::err_drv_argument_not_allowed_with) << A->getAsString(Args) << "-ccc-arcmt-migrate"; } CmdArgs.push_back("-mt-migrate-directory"); CmdArgs.push_back(A->getValue()); if (!Args.hasArg(options::OPT_objcmt_migrate_literals, options::OPT_objcmt_migrate_subscripting, options::OPT_objcmt_migrate_property)) { // None specified, means enable them all. CmdArgs.push_back("-objcmt-migrate-literals"); CmdArgs.push_back("-objcmt-migrate-subscripting"); CmdArgs.push_back("-objcmt-migrate-property"); } else { Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_literals); Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_subscripting); Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_property); } } else { Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_literals); Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_subscripting); Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_property); Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_all); Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_readonly_property); Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_readwrite_property); Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_property_dot_syntax); Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_annotation); Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_instancetype); Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_nsmacros); Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_protocol_conformance); Args.AddLastArg(CmdArgs, options::OPT_objcmt_atomic_property); Args.AddLastArg(CmdArgs, options::OPT_objcmt_returns_innerpointer_property); Args.AddLastArg(CmdArgs, options::OPT_objcmt_ns_nonatomic_iosonly); Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_designated_init); Args.AddLastArg(CmdArgs, options::OPT_objcmt_whitelist_dir_path); } // Add preprocessing options like -I, -D, etc. if we are using the // preprocessor. // // FIXME: Support -fpreprocessed if (types::getPreprocessedType(InputType) != types::TY_INVALID) AddPreprocessingOptions(C, JA, D, Args, CmdArgs, Output, Inputs); // Don't warn about "clang -c -DPIC -fPIC test.i" because libtool.m4 assumes // that "The compiler can only warn and ignore the option if not recognized". // When building with ccache, it will pass -D options to clang even on // preprocessed inputs and configure concludes that -fPIC is not supported. Args.ClaimAllArgs(options::OPT_D); // Manually translate -O4 to -O3; let clang reject others. if (Arg *A = Args.getLastArg(options::OPT_O_Group)) { if (A->getOption().matches(options::OPT_O4)) { CmdArgs.push_back("-O3"); D.Diag(diag::warn_O4_is_O3); } else { A->render(Args, CmdArgs); } } // Warn about ignored options to clang. for (const Arg *A : Args.filtered(options::OPT_clang_ignored_gcc_optimization_f_Group)) { D.Diag(diag::warn_ignored_gcc_optimization) << A->getAsString(Args); } claimNoWarnArgs(Args); Args.AddAllArgs(CmdArgs, options::OPT_R_Group); Args.AddAllArgs(CmdArgs, options::OPT_W_Group); if (Args.hasFlag(options::OPT_pedantic, options::OPT_no_pedantic, false)) CmdArgs.push_back("-pedantic"); Args.AddLastArg(CmdArgs, options::OPT_pedantic_errors); Args.AddLastArg(CmdArgs, options::OPT_w); // Handle -{std, ansi, trigraphs} -- take the last of -{std, ansi} // (-ansi is equivalent to -std=c89 or -std=c++98). // // If a std is supplied, only add -trigraphs if it follows the // option. bool ImplyVCPPCXXVer = false; if (Arg *Std = Args.getLastArg(options::OPT_std_EQ, options::OPT_ansi)) { if (Std->getOption().matches(options::OPT_ansi)) if (types::isCXX(InputType)) CmdArgs.push_back("-std=c++98"); else CmdArgs.push_back("-std=c89"); else Std->render(Args, CmdArgs); // If -f(no-)trigraphs appears after the language standard flag, honor it. if (Arg *A = Args.getLastArg(options::OPT_std_EQ, options::OPT_ansi, options::OPT_ftrigraphs, options::OPT_fno_trigraphs)) if (A != Std) A->render(Args, CmdArgs); } else { // Honor -std-default. // // FIXME: Clang doesn't correctly handle -std= when the input language // doesn't match. For the time being just ignore this for C++ inputs; // eventually we want to do all the standard defaulting here instead of // splitting it between the driver and clang -cc1. if (!types::isCXX(InputType)) Args.AddAllArgsTranslated(CmdArgs, options::OPT_std_default_EQ, "-std=", /*Joined=*/true); else if (IsWindowsMSVC) ImplyVCPPCXXVer = true; Args.AddLastArg(CmdArgs, options::OPT_ftrigraphs, options::OPT_fno_trigraphs); } // GCC's behavior for -Wwrite-strings is a bit strange: // * In C, this "warning flag" changes the types of string literals from // 'char[N]' to 'const char[N]', and thus triggers an unrelated warning // for the discarded qualifier. // * In C++, this is just a normal warning flag. // // Implementing this warning correctly in C is hard, so we follow GCC's // behavior for now. FIXME: Directly diagnose uses of a string literal as // a non-const char* in C, rather than using this crude hack. if (!types::isCXX(InputType)) { // FIXME: This should behave just like a warning flag, and thus should also // respect -Weverything, -Wno-everything, -Werror=write-strings, and so on. Arg *WriteStrings = Args.getLastArg(options::OPT_Wwrite_strings, options::OPT_Wno_write_strings, options::OPT_w); if (WriteStrings && WriteStrings->getOption().matches(options::OPT_Wwrite_strings)) CmdArgs.push_back("-fconst-strings"); } // GCC provides a macro definition '__DEPRECATED' when -Wdeprecated is active // during C++ compilation, which it is by default. GCC keeps this define even // in the presence of '-w', match this behavior bug-for-bug. if (types::isCXX(InputType) && Args.hasFlag(options::OPT_Wdeprecated, options::OPT_Wno_deprecated, true)) { CmdArgs.push_back("-fdeprecated-macro"); } // Translate GCC's misnamer '-fasm' arguments to '-fgnu-keywords'. if (Arg *Asm = Args.getLastArg(options::OPT_fasm, options::OPT_fno_asm)) { if (Asm->getOption().matches(options::OPT_fasm)) CmdArgs.push_back("-fgnu-keywords"); else CmdArgs.push_back("-fno-gnu-keywords"); } if (ShouldDisableDwarfDirectory(Args, getToolChain())) CmdArgs.push_back("-fno-dwarf-directory-asm"); if (ShouldDisableAutolink(Args, getToolChain())) CmdArgs.push_back("-fno-autolink"); // Add in -fdebug-compilation-dir if necessary. addDebugCompDirArg(Args, CmdArgs); if (Arg *A = Args.getLastArg(options::OPT_ftemplate_depth_, options::OPT_ftemplate_depth_EQ)) { CmdArgs.push_back("-ftemplate-depth"); CmdArgs.push_back(A->getValue()); } if (Arg *A = Args.getLastArg(options::OPT_foperator_arrow_depth_EQ)) { CmdArgs.push_back("-foperator-arrow-depth"); CmdArgs.push_back(A->getValue()); } if (Arg *A = Args.getLastArg(options::OPT_fconstexpr_depth_EQ)) { CmdArgs.push_back("-fconstexpr-depth"); CmdArgs.push_back(A->getValue()); } if (Arg *A = Args.getLastArg(options::OPT_fconstexpr_steps_EQ)) { CmdArgs.push_back("-fconstexpr-steps"); CmdArgs.push_back(A->getValue()); } if (Arg *A = Args.getLastArg(options::OPT_fbracket_depth_EQ)) { CmdArgs.push_back("-fbracket-depth"); CmdArgs.push_back(A->getValue()); } if (Arg *A = Args.getLastArg(options::OPT_Wlarge_by_value_copy_EQ, options::OPT_Wlarge_by_value_copy_def)) { if (A->getNumValues()) { StringRef bytes = A->getValue(); CmdArgs.push_back(Args.MakeArgString("-Wlarge-by-value-copy=" + bytes)); } else CmdArgs.push_back("-Wlarge-by-value-copy=64"); // default value } if (Args.hasArg(options::OPT_relocatable_pch)) CmdArgs.push_back("-relocatable-pch"); if (Arg *A = Args.getLastArg(options::OPT_fconstant_string_class_EQ)) { CmdArgs.push_back("-fconstant-string-class"); CmdArgs.push_back(A->getValue()); } if (Arg *A = Args.getLastArg(options::OPT_ftabstop_EQ)) { CmdArgs.push_back("-ftabstop"); CmdArgs.push_back(A->getValue()); } CmdArgs.push_back("-ferror-limit"); if (Arg *A = Args.getLastArg(options::OPT_ferror_limit_EQ)) CmdArgs.push_back(A->getValue()); else CmdArgs.push_back("19"); if (Arg *A = Args.getLastArg(options::OPT_fmacro_backtrace_limit_EQ)) { CmdArgs.push_back("-fmacro-backtrace-limit"); CmdArgs.push_back(A->getValue()); } if (Arg *A = Args.getLastArg(options::OPT_ftemplate_backtrace_limit_EQ)) { CmdArgs.push_back("-ftemplate-backtrace-limit"); CmdArgs.push_back(A->getValue()); } if (Arg *A = Args.getLastArg(options::OPT_fconstexpr_backtrace_limit_EQ)) { CmdArgs.push_back("-fconstexpr-backtrace-limit"); CmdArgs.push_back(A->getValue()); } if (Arg *A = Args.getLastArg(options::OPT_fspell_checking_limit_EQ)) { CmdArgs.push_back("-fspell-checking-limit"); CmdArgs.push_back(A->getValue()); } // Pass -fmessage-length=. CmdArgs.push_back("-fmessage-length"); if (Arg *A = Args.getLastArg(options::OPT_fmessage_length_EQ)) { CmdArgs.push_back(A->getValue()); } else { // If -fmessage-length=N was not specified, determine whether this is a // terminal and, if so, implicitly define -fmessage-length appropriately. unsigned N = llvm::sys::Process::StandardErrColumns(); CmdArgs.push_back(Args.MakeArgString(Twine(N))); } // -fvisibility= and -fvisibility-ms-compat are of a piece. if (const Arg *A = Args.getLastArg(options::OPT_fvisibility_EQ, options::OPT_fvisibility_ms_compat)) { if (A->getOption().matches(options::OPT_fvisibility_EQ)) { CmdArgs.push_back("-fvisibility"); CmdArgs.push_back(A->getValue()); } else { assert(A->getOption().matches(options::OPT_fvisibility_ms_compat)); CmdArgs.push_back("-fvisibility"); CmdArgs.push_back("hidden"); CmdArgs.push_back("-ftype-visibility"); CmdArgs.push_back("default"); } } Args.AddLastArg(CmdArgs, options::OPT_fvisibility_inlines_hidden); Args.AddLastArg(CmdArgs, options::OPT_ftlsmodel_EQ); // -fhosted is default. if (Args.hasFlag(options::OPT_ffreestanding, options::OPT_fhosted, false) || KernelOrKext) CmdArgs.push_back("-ffreestanding"); // Forward -f (flag) options which we can pass directly. Args.AddLastArg(CmdArgs, options::OPT_femit_all_decls); Args.AddLastArg(CmdArgs, options::OPT_fheinous_gnu_extensions); Args.AddLastArg(CmdArgs, options::OPT_fstandalone_debug); Args.AddLastArg(CmdArgs, options::OPT_fno_standalone_debug); Args.AddLastArg(CmdArgs, options::OPT_fno_operator_names); // AltiVec-like language extensions aren't relevant for assembling. if (!isa(JA) || Output.getType() != types::TY_PP_Asm) { Args.AddLastArg(CmdArgs, options::OPT_faltivec); Args.AddLastArg(CmdArgs, options::OPT_fzvector); } Args.AddLastArg(CmdArgs, options::OPT_fdiagnostics_show_template_tree); Args.AddLastArg(CmdArgs, options::OPT_fno_elide_type); // Forward flags for OpenMP if (Args.hasFlag(options::OPT_fopenmp, options::OPT_fopenmp_EQ, options::OPT_fno_openmp, false)) switch (getOpenMPRuntime(getToolChain(), Args)) { case OMPRT_OMP: case OMPRT_IOMP5: // Clang can generate useful OpenMP code for these two runtime libraries. CmdArgs.push_back("-fopenmp"); // If no option regarding the use of TLS in OpenMP codegeneration is // given, decide a default based on the target. Otherwise rely on the // options and pass the right information to the frontend. if (!Args.hasFlag(options::OPT_fopenmp_use_tls, options::OPT_fnoopenmp_use_tls, getToolChain().getArch() == llvm::Triple::ppc || getToolChain().getArch() == llvm::Triple::ppc64 || getToolChain().getArch() == llvm::Triple::ppc64le)) CmdArgs.push_back("-fnoopenmp-use-tls"); break; default: // By default, if Clang doesn't know how to generate useful OpenMP code // for a specific runtime library, we just don't pass the '-fopenmp' flag // down to the actual compilation. // FIXME: It would be better to have a mode which *only* omits IR // generation based on the OpenMP support so that we get consistent // semantic analysis, etc. break; } const SanitizerArgs &Sanitize = getToolChain().getSanitizerArgs(); Sanitize.addArgs(getToolChain(), Args, CmdArgs, InputType); // Report an error for -faltivec on anything other than PowerPC. if (const Arg *A = Args.getLastArg(options::OPT_faltivec)) { const llvm::Triple::ArchType Arch = getToolChain().getArch(); if (!(Arch == llvm::Triple::ppc || Arch == llvm::Triple::ppc64 || Arch == llvm::Triple::ppc64le)) D.Diag(diag::err_drv_argument_only_allowed_with) << A->getAsString(Args) << "ppc/ppc64/ppc64le"; } // -fzvector is incompatible with -faltivec. if (Arg *A = Args.getLastArg(options::OPT_fzvector)) if (Args.hasArg(options::OPT_faltivec)) D.Diag(diag::err_drv_argument_not_allowed_with) << A->getAsString(Args) << "-faltivec"; if (getToolChain().SupportsProfiling()) Args.AddLastArg(CmdArgs, options::OPT_pg); // -flax-vector-conversions is default. if (!Args.hasFlag(options::OPT_flax_vector_conversions, options::OPT_fno_lax_vector_conversions)) CmdArgs.push_back("-fno-lax-vector-conversions"); if (Args.getLastArg(options::OPT_fapple_kext)) CmdArgs.push_back("-fapple-kext"); Args.AddLastArg(CmdArgs, options::OPT_fobjc_sender_dependent_dispatch); Args.AddLastArg(CmdArgs, options::OPT_fdiagnostics_print_source_range_info); Args.AddLastArg(CmdArgs, options::OPT_fdiagnostics_parseable_fixits); Args.AddLastArg(CmdArgs, options::OPT_ftime_report); Args.AddLastArg(CmdArgs, options::OPT_ftrapv); if (Arg *A = Args.getLastArg(options::OPT_ftrapv_handler_EQ)) { CmdArgs.push_back("-ftrapv-handler"); CmdArgs.push_back(A->getValue()); } Args.AddLastArg(CmdArgs, options::OPT_ftrap_function_EQ); // -fno-strict-overflow implies -fwrapv if it isn't disabled, but // -fstrict-overflow won't turn off an explicitly enabled -fwrapv. if (Arg *A = Args.getLastArg(options::OPT_fwrapv, options::OPT_fno_wrapv)) { if (A->getOption().matches(options::OPT_fwrapv)) CmdArgs.push_back("-fwrapv"); } else if (Arg *A = Args.getLastArg(options::OPT_fstrict_overflow, options::OPT_fno_strict_overflow)) { if (A->getOption().matches(options::OPT_fno_strict_overflow)) CmdArgs.push_back("-fwrapv"); } if (Arg *A = Args.getLastArg(options::OPT_freroll_loops, options::OPT_fno_reroll_loops)) if (A->getOption().matches(options::OPT_freroll_loops)) CmdArgs.push_back("-freroll-loops"); Args.AddLastArg(CmdArgs, options::OPT_fwritable_strings); Args.AddLastArg(CmdArgs, options::OPT_funroll_loops, options::OPT_fno_unroll_loops); Args.AddLastArg(CmdArgs, options::OPT_pthread); // -stack-protector=0 is default. unsigned StackProtectorLevel = 0; if (getToolChain().getSanitizerArgs().needsSafeStackRt()) { Args.ClaimAllArgs(options::OPT_fno_stack_protector); Args.ClaimAllArgs(options::OPT_fstack_protector_all); Args.ClaimAllArgs(options::OPT_fstack_protector_strong); Args.ClaimAllArgs(options::OPT_fstack_protector); } else if (Arg *A = Args.getLastArg(options::OPT_fno_stack_protector, options::OPT_fstack_protector_all, options::OPT_fstack_protector_strong, options::OPT_fstack_protector)) { if (A->getOption().matches(options::OPT_fstack_protector)) { StackProtectorLevel = std::max( LangOptions::SSPOn, getToolChain().GetDefaultStackProtectorLevel(KernelOrKext)); } else if (A->getOption().matches(options::OPT_fstack_protector_strong)) StackProtectorLevel = LangOptions::SSPStrong; else if (A->getOption().matches(options::OPT_fstack_protector_all)) StackProtectorLevel = LangOptions::SSPReq; } else { StackProtectorLevel = getToolChain().GetDefaultStackProtectorLevel(KernelOrKext); } if (StackProtectorLevel) { CmdArgs.push_back("-stack-protector"); CmdArgs.push_back(Args.MakeArgString(Twine(StackProtectorLevel))); } // --param ssp-buffer-size= for (const Arg *A : Args.filtered(options::OPT__param)) { StringRef Str(A->getValue()); if (Str.startswith("ssp-buffer-size=")) { if (StackProtectorLevel) { CmdArgs.push_back("-stack-protector-buffer-size"); // FIXME: Verify the argument is a valid integer. CmdArgs.push_back(Args.MakeArgString(Str.drop_front(16))); } A->claim(); } } // Translate -mstackrealign if (Args.hasFlag(options::OPT_mstackrealign, options::OPT_mno_stackrealign, false)) { CmdArgs.push_back("-backend-option"); CmdArgs.push_back("-force-align-stack"); } if (!Args.hasFlag(options::OPT_mno_stackrealign, options::OPT_mstackrealign, false)) { CmdArgs.push_back(Args.MakeArgString("-mstackrealign")); } if (Args.hasArg(options::OPT_mstack_alignment)) { StringRef alignment = Args.getLastArgValue(options::OPT_mstack_alignment); CmdArgs.push_back(Args.MakeArgString("-mstack-alignment=" + alignment)); } if (Args.hasArg(options::OPT_mstack_probe_size)) { StringRef Size = Args.getLastArgValue(options::OPT_mstack_probe_size); if (!Size.empty()) CmdArgs.push_back(Args.MakeArgString("-mstack-probe-size=" + Size)); else CmdArgs.push_back("-mstack-probe-size=0"); } if (getToolChain().getArch() == llvm::Triple::aarch64 || getToolChain().getArch() == llvm::Triple::aarch64_be) CmdArgs.push_back("-fallow-half-arguments-and-returns"); if (Arg *A = Args.getLastArg(options::OPT_mrestrict_it, options::OPT_mno_restrict_it)) { if (A->getOption().matches(options::OPT_mrestrict_it)) { CmdArgs.push_back("-backend-option"); CmdArgs.push_back("-arm-restrict-it"); } else { CmdArgs.push_back("-backend-option"); CmdArgs.push_back("-arm-no-restrict-it"); } } else if (TT.isOSWindows() && (TT.getArch() == llvm::Triple::arm || TT.getArch() == llvm::Triple::thumb)) { // Windows on ARM expects restricted IT blocks CmdArgs.push_back("-backend-option"); CmdArgs.push_back("-arm-restrict-it"); } // Forward -f options with positive and negative forms; we translate // these by hand. if (Arg *A = Args.getLastArg(options::OPT_fprofile_sample_use_EQ)) { StringRef fname = A->getValue(); if (!llvm::sys::fs::exists(fname)) D.Diag(diag::err_drv_no_such_file) << fname; else A->render(Args, CmdArgs); } if (Args.hasArg(options::OPT_mkernel)) { if (!Args.hasArg(options::OPT_fapple_kext) && types::isCXX(InputType)) CmdArgs.push_back("-fapple-kext"); if (!Args.hasArg(options::OPT_fbuiltin)) CmdArgs.push_back("-fno-builtin"); Args.ClaimAllArgs(options::OPT_fno_builtin); } // -fbuiltin is default. else if (!Args.hasFlag(options::OPT_fbuiltin, options::OPT_fno_builtin)) CmdArgs.push_back("-fno-builtin"); if (!Args.hasFlag(options::OPT_fassume_sane_operator_new, options::OPT_fno_assume_sane_operator_new)) CmdArgs.push_back("-fno-assume-sane-operator-new"); // -fblocks=0 is default. if (Args.hasFlag(options::OPT_fblocks, options::OPT_fno_blocks, getToolChain().IsBlocksDefault()) || (Args.hasArg(options::OPT_fgnu_runtime) && Args.hasArg(options::OPT_fobjc_nonfragile_abi) && !Args.hasArg(options::OPT_fno_blocks))) { CmdArgs.push_back("-fblocks"); if (!Args.hasArg(options::OPT_fgnu_runtime) && !getToolChain().hasBlocksRuntime()) CmdArgs.push_back("-fblocks-runtime-optional"); } // -fmodules enables the use of precompiled modules (off by default). // Users can pass -fno-cxx-modules to turn off modules support for // C++/Objective-C++ programs. bool HaveModules = false; if (Args.hasFlag(options::OPT_fmodules, options::OPT_fno_modules, false)) { bool AllowedInCXX = Args.hasFlag(options::OPT_fcxx_modules, options::OPT_fno_cxx_modules, true); if (AllowedInCXX || !types::isCXX(InputType)) { CmdArgs.push_back("-fmodules"); HaveModules = true; } } // -fmodule-maps enables implicit reading of module map files. By default, // this is enabled if we are using precompiled modules. if (Args.hasFlag(options::OPT_fimplicit_module_maps, options::OPT_fno_implicit_module_maps, HaveModules)) { CmdArgs.push_back("-fimplicit-module-maps"); } // -fmodules-decluse checks that modules used are declared so (off by // default). if (Args.hasFlag(options::OPT_fmodules_decluse, options::OPT_fno_modules_decluse, false)) { CmdArgs.push_back("-fmodules-decluse"); } // -fmodules-strict-decluse is like -fmodule-decluse, but also checks that // all #included headers are part of modules. if (Args.hasFlag(options::OPT_fmodules_strict_decluse, options::OPT_fno_modules_strict_decluse, false)) { CmdArgs.push_back("-fmodules-strict-decluse"); } // -fno-implicit-modules turns off implicitly compiling modules on demand. if (!Args.hasFlag(options::OPT_fimplicit_modules, options::OPT_fno_implicit_modules)) { CmdArgs.push_back("-fno-implicit-modules"); } // -fmodule-name specifies the module that is currently being built (or // used for header checking by -fmodule-maps). Args.AddLastArg(CmdArgs, options::OPT_fmodule_name); // -fmodule-map-file can be used to specify files containing module // definitions. Args.AddAllArgs(CmdArgs, options::OPT_fmodule_map_file); // -fmodule-file can be used to specify files containing precompiled modules. Args.AddAllArgs(CmdArgs, options::OPT_fmodule_file); // -fmodule-cache-path specifies where our implicitly-built module files // should be written. SmallString<128> Path; if (Arg *A = Args.getLastArg(options::OPT_fmodules_cache_path)) Path = A->getValue(); if (HaveModules) { if (C.isForDiagnostics()) { // When generating crash reports, we want to emit the modules along with // the reproduction sources, so we ignore any provided module path. Path = Output.getFilename(); llvm::sys::path::replace_extension(Path, ".cache"); llvm::sys::path::append(Path, "modules"); } else if (Path.empty()) { // No module path was provided: use the default. llvm::sys::path::system_temp_directory(/*erasedOnReboot=*/false, Path); llvm::sys::path::append(Path, "org.llvm.clang."); appendUserToPath(Path); llvm::sys::path::append(Path, "ModuleCache"); } const char Arg[] = "-fmodules-cache-path="; Path.insert(Path.begin(), Arg, Arg + strlen(Arg)); CmdArgs.push_back(Args.MakeArgString(Path)); } // When building modules and generating crashdumps, we need to dump a module // dependency VFS alongside the output. if (HaveModules && C.isForDiagnostics()) { SmallString<128> VFSDir(Output.getFilename()); llvm::sys::path::replace_extension(VFSDir, ".cache"); // Add the cache directory as a temp so the crash diagnostics pick it up. C.addTempFile(Args.MakeArgString(VFSDir)); llvm::sys::path::append(VFSDir, "vfs"); CmdArgs.push_back("-module-dependency-dir"); CmdArgs.push_back(Args.MakeArgString(VFSDir)); } if (HaveModules) Args.AddLastArg(CmdArgs, options::OPT_fmodules_user_build_path); // Pass through all -fmodules-ignore-macro arguments. Args.AddAllArgs(CmdArgs, options::OPT_fmodules_ignore_macro); Args.AddLastArg(CmdArgs, options::OPT_fmodules_prune_interval); Args.AddLastArg(CmdArgs, options::OPT_fmodules_prune_after); Args.AddLastArg(CmdArgs, options::OPT_fbuild_session_timestamp); if (Arg *A = Args.getLastArg(options::OPT_fbuild_session_file)) { if (Args.hasArg(options::OPT_fbuild_session_timestamp)) D.Diag(diag::err_drv_argument_not_allowed_with) << A->getAsString(Args) << "-fbuild-session-timestamp"; llvm::sys::fs::file_status Status; if (llvm::sys::fs::status(A->getValue(), Status)) D.Diag(diag::err_drv_no_such_file) << A->getValue(); CmdArgs.push_back(Args.MakeArgString( "-fbuild-session-timestamp=" + Twine((uint64_t)Status.getLastModificationTime().toEpochTime()))); } if (Args.getLastArg(options::OPT_fmodules_validate_once_per_build_session)) { if (!Args.getLastArg(options::OPT_fbuild_session_timestamp, options::OPT_fbuild_session_file)) D.Diag(diag::err_drv_modules_validate_once_requires_timestamp); Args.AddLastArg(CmdArgs, options::OPT_fmodules_validate_once_per_build_session); } Args.AddLastArg(CmdArgs, options::OPT_fmodules_validate_system_headers); // -faccess-control is default. if (Args.hasFlag(options::OPT_fno_access_control, options::OPT_faccess_control, false)) CmdArgs.push_back("-fno-access-control"); // -felide-constructors is the default. if (Args.hasFlag(options::OPT_fno_elide_constructors, options::OPT_felide_constructors, false)) CmdArgs.push_back("-fno-elide-constructors"); ToolChain::RTTIMode RTTIMode = getToolChain().getRTTIMode(); if (KernelOrKext || (types::isCXX(InputType) && (RTTIMode == ToolChain::RM_DisabledExplicitly || RTTIMode == ToolChain::RM_DisabledImplicitly))) CmdArgs.push_back("-fno-rtti"); // -fshort-enums=0 is default for all architectures except Hexagon. if (Args.hasFlag(options::OPT_fshort_enums, options::OPT_fno_short_enums, getToolChain().getArch() == llvm::Triple::hexagon)) CmdArgs.push_back("-fshort-enums"); // -fsigned-char is default. if (Arg *A = Args.getLastArg( options::OPT_fsigned_char, options::OPT_fno_signed_char, options::OPT_funsigned_char, options::OPT_fno_unsigned_char)) { if (A->getOption().matches(options::OPT_funsigned_char) || A->getOption().matches(options::OPT_fno_signed_char)) { CmdArgs.push_back("-fno-signed-char"); } } else if (!isSignedCharDefault(getToolChain().getTriple())) { CmdArgs.push_back("-fno-signed-char"); } // -fuse-cxa-atexit is default. if (!Args.hasFlag(options::OPT_fuse_cxa_atexit, options::OPT_fno_use_cxa_atexit, !IsWindowsCygnus && !IsWindowsGNU && getToolChain().getArch() != llvm::Triple::hexagon && getToolChain().getArch() != llvm::Triple::xcore) || KernelOrKext) CmdArgs.push_back("-fno-use-cxa-atexit"); // -fms-extensions=0 is default. if (Args.hasFlag(options::OPT_fms_extensions, options::OPT_fno_ms_extensions, IsWindowsMSVC)) CmdArgs.push_back("-fms-extensions"); // -fno-use-line-directives is default. if (Args.hasFlag(options::OPT_fuse_line_directives, options::OPT_fno_use_line_directives, false)) CmdArgs.push_back("-fuse-line-directives"); // -fms-compatibility=0 is default. if (Args.hasFlag(options::OPT_fms_compatibility, options::OPT_fno_ms_compatibility, (IsWindowsMSVC && Args.hasFlag(options::OPT_fms_extensions, options::OPT_fno_ms_extensions, true)))) CmdArgs.push_back("-fms-compatibility"); // -fms-compatibility-version=18.00 is default. VersionTuple MSVT = visualstudio::getMSVCVersion( &D, getToolChain().getTriple(), Args, IsWindowsMSVC); if (!MSVT.empty()) CmdArgs.push_back( Args.MakeArgString("-fms-compatibility-version=" + MSVT.getAsString())); bool IsMSVC2015Compatible = MSVT.getMajor() >= 19; if (ImplyVCPPCXXVer) { if (IsMSVC2015Compatible) CmdArgs.push_back("-std=c++14"); else CmdArgs.push_back("-std=c++11"); } // -fno-borland-extensions is default. if (Args.hasFlag(options::OPT_fborland_extensions, options::OPT_fno_borland_extensions, false)) CmdArgs.push_back("-fborland-extensions"); // -fthreadsafe-static is default, except for MSVC compatibility versions less // than 19. if (!Args.hasFlag(options::OPT_fthreadsafe_statics, options::OPT_fno_threadsafe_statics, !IsWindowsMSVC || IsMSVC2015Compatible)) CmdArgs.push_back("-fno-threadsafe-statics"); // -fno-delayed-template-parsing is default, except for Windows where MSVC STL // needs it. if (Args.hasFlag(options::OPT_fdelayed_template_parsing, options::OPT_fno_delayed_template_parsing, IsWindowsMSVC)) CmdArgs.push_back("-fdelayed-template-parsing"); // -fgnu-keywords default varies depending on language; only pass if // specified. if (Arg *A = Args.getLastArg(options::OPT_fgnu_keywords, options::OPT_fno_gnu_keywords)) A->render(Args, CmdArgs); if (Args.hasFlag(options::OPT_fgnu89_inline, options::OPT_fno_gnu89_inline, false)) CmdArgs.push_back("-fgnu89-inline"); if (Args.hasArg(options::OPT_fno_inline)) CmdArgs.push_back("-fno-inline"); if (Args.hasArg(options::OPT_fno_inline_functions)) CmdArgs.push_back("-fno-inline-functions"); ObjCRuntime objcRuntime = AddObjCRuntimeArgs(Args, CmdArgs, rewriteKind); // -fobjc-dispatch-method is only relevant with the nonfragile-abi, and // legacy is the default. Except for deployment taget of 10.5, // next runtime is always legacy dispatch and -fno-objc-legacy-dispatch // gets ignored silently. if (objcRuntime.isNonFragile()) { if (!Args.hasFlag(options::OPT_fobjc_legacy_dispatch, options::OPT_fno_objc_legacy_dispatch, objcRuntime.isLegacyDispatchDefaultForArch( getToolChain().getArch()))) { if (getToolChain().UseObjCMixedDispatch()) CmdArgs.push_back("-fobjc-dispatch-method=mixed"); else CmdArgs.push_back("-fobjc-dispatch-method=non-legacy"); } } // When ObjectiveC legacy runtime is in effect on MacOSX, // turn on the option to do Array/Dictionary subscripting // by default. if (getToolChain().getArch() == llvm::Triple::x86 && getToolChain().getTriple().isMacOSX() && !getToolChain().getTriple().isMacOSXVersionLT(10, 7) && objcRuntime.getKind() == ObjCRuntime::FragileMacOSX && objcRuntime.isNeXTFamily()) CmdArgs.push_back("-fobjc-subscripting-legacy-runtime"); // -fencode-extended-block-signature=1 is default. if (getToolChain().IsEncodeExtendedBlockSignatureDefault()) { CmdArgs.push_back("-fencode-extended-block-signature"); } // Allow -fno-objc-arr to trump -fobjc-arr/-fobjc-arc. // NOTE: This logic is duplicated in ToolChains.cpp. bool ARC = isObjCAutoRefCount(Args); if (ARC) { getToolChain().CheckObjCARC(); CmdArgs.push_back("-fobjc-arc"); // FIXME: It seems like this entire block, and several around it should be // wrapped in isObjC, but for now we just use it here as this is where it // was being used previously. if (types::isCXX(InputType) && types::isObjC(InputType)) { if (getToolChain().GetCXXStdlibType(Args) == ToolChain::CST_Libcxx) CmdArgs.push_back("-fobjc-arc-cxxlib=libc++"); else CmdArgs.push_back("-fobjc-arc-cxxlib=libstdc++"); } // Allow the user to enable full exceptions code emission. // We define off for Objective-CC, on for Objective-C++. if (Args.hasFlag(options::OPT_fobjc_arc_exceptions, options::OPT_fno_objc_arc_exceptions, /*default*/ types::isCXX(InputType))) CmdArgs.push_back("-fobjc-arc-exceptions"); } // -fobjc-infer-related-result-type is the default, except in the Objective-C // rewriter. if (rewriteKind != RK_None) CmdArgs.push_back("-fno-objc-infer-related-result-type"); // Handle -fobjc-gc and -fobjc-gc-only. They are exclusive, and -fobjc-gc-only // takes precedence. const Arg *GCArg = Args.getLastArg(options::OPT_fobjc_gc_only); if (!GCArg) GCArg = Args.getLastArg(options::OPT_fobjc_gc); if (GCArg) { if (ARC) { D.Diag(diag::err_drv_objc_gc_arr) << GCArg->getAsString(Args); } else if (getToolChain().SupportsObjCGC()) { GCArg->render(Args, CmdArgs); } else { // FIXME: We should move this to a hard error. D.Diag(diag::warn_drv_objc_gc_unsupported) << GCArg->getAsString(Args); } } if (Args.hasFlag(options::OPT_fapplication_extension, options::OPT_fno_application_extension, false)) CmdArgs.push_back("-fapplication-extension"); // Handle GCC-style exception args. if (!C.getDriver().IsCLMode()) addExceptionArgs(Args, InputType, getToolChain(), KernelOrKext, objcRuntime, CmdArgs); if (getToolChain().UseSjLjExceptions()) CmdArgs.push_back("-fsjlj-exceptions"); // C++ "sane" operator new. if (!Args.hasFlag(options::OPT_fassume_sane_operator_new, options::OPT_fno_assume_sane_operator_new)) CmdArgs.push_back("-fno-assume-sane-operator-new"); // -fsized-deallocation is off by default, as it is an ABI-breaking change for // most platforms. if (Args.hasFlag(options::OPT_fsized_deallocation, options::OPT_fno_sized_deallocation, false)) CmdArgs.push_back("-fsized-deallocation"); // -fconstant-cfstrings is default, and may be subject to argument translation // on Darwin. if (!Args.hasFlag(options::OPT_fconstant_cfstrings, options::OPT_fno_constant_cfstrings) || !Args.hasFlag(options::OPT_mconstant_cfstrings, options::OPT_mno_constant_cfstrings)) CmdArgs.push_back("-fno-constant-cfstrings"); // -fshort-wchar default varies depending on platform; only // pass if specified. if (Arg *A = Args.getLastArg(options::OPT_fshort_wchar, options::OPT_fno_short_wchar)) A->render(Args, CmdArgs); // -fno-pascal-strings is default, only pass non-default. if (Args.hasFlag(options::OPT_fpascal_strings, options::OPT_fno_pascal_strings, false)) CmdArgs.push_back("-fpascal-strings"); // Honor -fpack-struct= and -fpack-struct, if given. Note that // -fno-pack-struct doesn't apply to -fpack-struct=. if (Arg *A = Args.getLastArg(options::OPT_fpack_struct_EQ)) { std::string PackStructStr = "-fpack-struct="; PackStructStr += A->getValue(); CmdArgs.push_back(Args.MakeArgString(PackStructStr)); } else if (Args.hasFlag(options::OPT_fpack_struct, options::OPT_fno_pack_struct, false)) { CmdArgs.push_back("-fpack-struct=1"); } // Handle -fmax-type-align=N and -fno-type-align bool SkipMaxTypeAlign = Args.hasArg(options::OPT_fno_max_type_align); if (Arg *A = Args.getLastArg(options::OPT_fmax_type_align_EQ)) { if (!SkipMaxTypeAlign) { std::string MaxTypeAlignStr = "-fmax-type-align="; MaxTypeAlignStr += A->getValue(); CmdArgs.push_back(Args.MakeArgString(MaxTypeAlignStr)); } } else if (getToolChain().getTriple().isOSDarwin()) { if (!SkipMaxTypeAlign) { std::string MaxTypeAlignStr = "-fmax-type-align=16"; CmdArgs.push_back(Args.MakeArgString(MaxTypeAlignStr)); } } if (KernelOrKext || isNoCommonDefault(getToolChain().getTriple())) { if (!Args.hasArg(options::OPT_fcommon)) CmdArgs.push_back("-fno-common"); Args.ClaimAllArgs(options::OPT_fno_common); } // -fcommon is default, only pass non-default. else if (!Args.hasFlag(options::OPT_fcommon, options::OPT_fno_common)) CmdArgs.push_back("-fno-common"); // -fsigned-bitfields is default, and clang doesn't yet support // -funsigned-bitfields. if (!Args.hasFlag(options::OPT_fsigned_bitfields, options::OPT_funsigned_bitfields)) D.Diag(diag::warn_drv_clang_unsupported) << Args.getLastArg(options::OPT_funsigned_bitfields)->getAsString(Args); // -fsigned-bitfields is default, and clang doesn't support -fno-for-scope. if (!Args.hasFlag(options::OPT_ffor_scope, options::OPT_fno_for_scope)) D.Diag(diag::err_drv_clang_unsupported) << Args.getLastArg(options::OPT_fno_for_scope)->getAsString(Args); // -finput_charset=UTF-8 is default. Reject others if (Arg *inputCharset = Args.getLastArg(options::OPT_finput_charset_EQ)) { StringRef value = inputCharset->getValue(); if (value != "UTF-8") D.Diag(diag::err_drv_invalid_value) << inputCharset->getAsString(Args) << value; } // -fexec_charset=UTF-8 is default. Reject others if (Arg *execCharset = Args.getLastArg(options::OPT_fexec_charset_EQ)) { StringRef value = execCharset->getValue(); if (value != "UTF-8") D.Diag(diag::err_drv_invalid_value) << execCharset->getAsString(Args) << value; } // -fcaret-diagnostics is default. if (!Args.hasFlag(options::OPT_fcaret_diagnostics, options::OPT_fno_caret_diagnostics, true)) CmdArgs.push_back("-fno-caret-diagnostics"); // -fdiagnostics-fixit-info is default, only pass non-default. if (!Args.hasFlag(options::OPT_fdiagnostics_fixit_info, options::OPT_fno_diagnostics_fixit_info)) CmdArgs.push_back("-fno-diagnostics-fixit-info"); // Enable -fdiagnostics-show-option by default. if (Args.hasFlag(options::OPT_fdiagnostics_show_option, options::OPT_fno_diagnostics_show_option)) CmdArgs.push_back("-fdiagnostics-show-option"); if (const Arg *A = Args.getLastArg(options::OPT_fdiagnostics_show_category_EQ)) { CmdArgs.push_back("-fdiagnostics-show-category"); CmdArgs.push_back(A->getValue()); } if (const Arg *A = Args.getLastArg(options::OPT_fdiagnostics_format_EQ)) { CmdArgs.push_back("-fdiagnostics-format"); CmdArgs.push_back(A->getValue()); } if (Arg *A = Args.getLastArg( options::OPT_fdiagnostics_show_note_include_stack, options::OPT_fno_diagnostics_show_note_include_stack)) { if (A->getOption().matches( options::OPT_fdiagnostics_show_note_include_stack)) CmdArgs.push_back("-fdiagnostics-show-note-include-stack"); else CmdArgs.push_back("-fno-diagnostics-show-note-include-stack"); } // Color diagnostics are the default, unless the terminal doesn't support // them. // Support both clang's -f[no-]color-diagnostics and gcc's // -f[no-]diagnostics-colors[=never|always|auto]. enum { Colors_On, Colors_Off, Colors_Auto } ShowColors = Colors_Auto; for (const auto &Arg : Args) { const Option &O = Arg->getOption(); if (!O.matches(options::OPT_fcolor_diagnostics) && !O.matches(options::OPT_fdiagnostics_color) && !O.matches(options::OPT_fno_color_diagnostics) && !O.matches(options::OPT_fno_diagnostics_color) && !O.matches(options::OPT_fdiagnostics_color_EQ)) continue; Arg->claim(); if (O.matches(options::OPT_fcolor_diagnostics) || O.matches(options::OPT_fdiagnostics_color)) { ShowColors = Colors_On; } else if (O.matches(options::OPT_fno_color_diagnostics) || O.matches(options::OPT_fno_diagnostics_color)) { ShowColors = Colors_Off; } else { assert(O.matches(options::OPT_fdiagnostics_color_EQ)); StringRef value(Arg->getValue()); if (value == "always") ShowColors = Colors_On; else if (value == "never") ShowColors = Colors_Off; else if (value == "auto") ShowColors = Colors_Auto; else getToolChain().getDriver().Diag(diag::err_drv_clang_unsupported) << ("-fdiagnostics-color=" + value).str(); } } if (ShowColors == Colors_On || (ShowColors == Colors_Auto && llvm::sys::Process::StandardErrHasColors())) CmdArgs.push_back("-fcolor-diagnostics"); if (Args.hasArg(options::OPT_fansi_escape_codes)) CmdArgs.push_back("-fansi-escape-codes"); if (!Args.hasFlag(options::OPT_fshow_source_location, options::OPT_fno_show_source_location)) CmdArgs.push_back("-fno-show-source-location"); if (!Args.hasFlag(options::OPT_fshow_column, options::OPT_fno_show_column, true)) CmdArgs.push_back("-fno-show-column"); if (!Args.hasFlag(options::OPT_fspell_checking, options::OPT_fno_spell_checking)) CmdArgs.push_back("-fno-spell-checking"); // -fno-asm-blocks is default. if (Args.hasFlag(options::OPT_fasm_blocks, options::OPT_fno_asm_blocks, false)) CmdArgs.push_back("-fasm-blocks"); // -fgnu-inline-asm is default. if (!Args.hasFlag(options::OPT_fgnu_inline_asm, options::OPT_fno_gnu_inline_asm, true)) CmdArgs.push_back("-fno-gnu-inline-asm"); // Enable vectorization per default according to the optimization level // selected. For optimization levels that want vectorization we use the alias // option to simplify the hasFlag logic. bool EnableVec = shouldEnableVectorizerAtOLevel(Args, false); OptSpecifier VectorizeAliasOption = EnableVec ? options::OPT_O_Group : options::OPT_fvectorize; if (Args.hasFlag(options::OPT_fvectorize, VectorizeAliasOption, options::OPT_fno_vectorize, EnableVec)) CmdArgs.push_back("-vectorize-loops"); // -fslp-vectorize is enabled based on the optimization level selected. bool EnableSLPVec = shouldEnableVectorizerAtOLevel(Args, true); OptSpecifier SLPVectAliasOption = EnableSLPVec ? options::OPT_O_Group : options::OPT_fslp_vectorize; if (Args.hasFlag(options::OPT_fslp_vectorize, SLPVectAliasOption, options::OPT_fno_slp_vectorize, EnableSLPVec)) CmdArgs.push_back("-vectorize-slp"); // -fno-slp-vectorize-aggressive is default. if (Args.hasFlag(options::OPT_fslp_vectorize_aggressive, options::OPT_fno_slp_vectorize_aggressive, false)) CmdArgs.push_back("-vectorize-slp-aggressive"); if (Arg *A = Args.getLastArg(options::OPT_fshow_overloads_EQ)) A->render(Args, CmdArgs); // -fdollars-in-identifiers default varies depending on platform and // language; only pass if specified. if (Arg *A = Args.getLastArg(options::OPT_fdollars_in_identifiers, options::OPT_fno_dollars_in_identifiers)) { if (A->getOption().matches(options::OPT_fdollars_in_identifiers)) CmdArgs.push_back("-fdollars-in-identifiers"); else CmdArgs.push_back("-fno-dollars-in-identifiers"); } // -funit-at-a-time is default, and we don't support -fno-unit-at-a-time for // practical purposes. if (Arg *A = Args.getLastArg(options::OPT_funit_at_a_time, options::OPT_fno_unit_at_a_time)) { if (A->getOption().matches(options::OPT_fno_unit_at_a_time)) D.Diag(diag::warn_drv_clang_unsupported) << A->getAsString(Args); } if (Args.hasFlag(options::OPT_fapple_pragma_pack, options::OPT_fno_apple_pragma_pack, false)) CmdArgs.push_back("-fapple-pragma-pack"); // le32-specific flags: // -fno-math-builtin: clang should not convert math builtins to intrinsics // by default. if (getToolChain().getArch() == llvm::Triple::le32) { CmdArgs.push_back("-fno-math-builtin"); } // Default to -fno-builtin-str{cat,cpy} on Darwin for ARM. // // FIXME: This is disabled until clang -cc1 supports -fno-builtin-foo. PR4941. #if 0 if (getToolChain().getTriple().isOSDarwin() && (getToolChain().getArch() == llvm::Triple::arm || getToolChain().getArch() == llvm::Triple::thumb)) { if (!Args.hasArg(options::OPT_fbuiltin_strcat)) CmdArgs.push_back("-fno-builtin-strcat"); if (!Args.hasArg(options::OPT_fbuiltin_strcpy)) CmdArgs.push_back("-fno-builtin-strcpy"); } #endif // Enable rewrite includes if the user's asked for it or if we're generating // diagnostics. // TODO: Once -module-dependency-dir works with -frewrite-includes it'd be // nice to enable this when doing a crashdump for modules as well. if (Args.hasFlag(options::OPT_frewrite_includes, options::OPT_fno_rewrite_includes, false) || (C.isForDiagnostics() && !HaveModules)) CmdArgs.push_back("-frewrite-includes"); // Only allow -traditional or -traditional-cpp outside in preprocessing modes. if (Arg *A = Args.getLastArg(options::OPT_traditional, options::OPT_traditional_cpp)) { if (isa(JA)) CmdArgs.push_back("-traditional-cpp"); else D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args); } Args.AddLastArg(CmdArgs, options::OPT_dM); Args.AddLastArg(CmdArgs, options::OPT_dD); // Handle serialized diagnostics. if (Arg *A = Args.getLastArg(options::OPT__serialize_diags)) { CmdArgs.push_back("-serialize-diagnostic-file"); CmdArgs.push_back(Args.MakeArgString(A->getValue())); } if (Args.hasArg(options::OPT_fretain_comments_from_system_headers)) CmdArgs.push_back("-fretain-comments-from-system-headers"); // Forward -fcomment-block-commands to -cc1. Args.AddAllArgs(CmdArgs, options::OPT_fcomment_block_commands); // Forward -fparse-all-comments to -cc1. Args.AddAllArgs(CmdArgs, options::OPT_fparse_all_comments); // Forward -Xclang arguments to -cc1, and -mllvm arguments to the LLVM option // parser. Args.AddAllArgValues(CmdArgs, options::OPT_Xclang); bool OptDisabled = false; for (const Arg *A : Args.filtered(options::OPT_mllvm)) { A->claim(); // We translate this by hand to the -cc1 argument, since nightly test uses // it and developers have been trained to spell it with -mllvm. if (StringRef(A->getValue(0)) == "-disable-llvm-optzns") { CmdArgs.push_back("-disable-llvm-optzns"); OptDisabled = true; } else A->render(Args, CmdArgs); } // With -save-temps, we want to save the unoptimized bitcode output from the // CompileJobAction, so disable optimizations if they are not already // disabled. if (C.getDriver().isSaveTempsEnabled() && !OptDisabled && isa(JA)) CmdArgs.push_back("-disable-llvm-optzns"); if (Output.getType() == types::TY_Dependencies) { // Handled with other dependency code. } else if (Output.isFilename()) { CmdArgs.push_back("-o"); CmdArgs.push_back(Output.getFilename()); } else { assert(Output.isNothing() && "Invalid output."); } addDashXForInput(Args, Input, CmdArgs); if (Input.isFilename()) CmdArgs.push_back(Input.getFilename()); else Input.getInputArg().renderAsInput(Args, CmdArgs); Args.AddAllArgs(CmdArgs, options::OPT_undef); const char *Exec = getToolChain().getDriver().getClangProgramPath(); // Optionally embed the -cc1 level arguments into the debug info, for build // analysis. if (getToolChain().UseDwarfDebugFlags()) { ArgStringList OriginalArgs; for (const auto &Arg : Args) Arg->render(Args, OriginalArgs); SmallString<256> Flags; Flags += Exec; for (const char *OriginalArg : OriginalArgs) { SmallString<128> EscapedArg; EscapeSpacesAndBackslashes(OriginalArg, EscapedArg); Flags += " "; Flags += EscapedArg; } CmdArgs.push_back("-dwarf-debug-flags"); CmdArgs.push_back(Args.MakeArgString(Flags)); } // Add the split debug info name to the command lines here so we // can propagate it to the backend. bool SplitDwarf = Args.hasArg(options::OPT_gsplit_dwarf) && getToolChain().getTriple().isOSLinux() && (isa(JA) || isa(JA) || isa(JA)); const char *SplitDwarfOut; if (SplitDwarf) { CmdArgs.push_back("-split-dwarf-file"); SplitDwarfOut = SplitDebugName(Args, Input); CmdArgs.push_back(SplitDwarfOut); } // Host-side cuda compilation receives device-side outputs as Inputs[1...]. // Include them with -fcuda-include-gpubinary. if (IsCuda && Inputs.size() > 1) for (InputInfoList::const_iterator it = std::next(Inputs.begin()), ie = Inputs.end(); it != ie; ++it) { CmdArgs.push_back("-fcuda-include-gpubinary"); CmdArgs.push_back(it->getFilename()); } // Finally add the compile command to the compilation. if (Args.hasArg(options::OPT__SLASH_fallback) && Output.getType() == types::TY_Object && (InputType == types::TY_C || InputType == types::TY_CXX)) { auto CLCommand = getCLFallback()->GetCommand(C, JA, Output, Inputs, Args, LinkingOutput); C.addCommand(llvm::make_unique(JA, *this, Exec, CmdArgs, std::move(CLCommand))); } else { C.addCommand(llvm::make_unique(JA, *this, Exec, CmdArgs)); } // Handle the debug info splitting at object creation time if we're // creating an object. // TODO: Currently only works on linux with newer objcopy. if (SplitDwarf && !isa(JA) && !isa(JA)) SplitDebugInfo(getToolChain(), C, *this, JA, Args, Output, SplitDwarfOut); if (Arg *A = Args.getLastArg(options::OPT_pg)) if (Args.hasArg(options::OPT_fomit_frame_pointer)) D.Diag(diag::err_drv_argument_not_allowed_with) << "-fomit-frame-pointer" << A->getAsString(Args); // Claim some arguments which clang supports automatically. // -fpch-preprocess is used with gcc to add a special marker in the output to // include the PCH file. Clang's PTH solution is completely transparent, so we // do not need to deal with it at all. Args.ClaimAllArgs(options::OPT_fpch_preprocess); // Claim some arguments which clang doesn't support, but we don't // care to warn the user about. Args.ClaimAllArgs(options::OPT_clang_ignored_f_Group); Args.ClaimAllArgs(options::OPT_clang_ignored_m_Group); // Disable warnings for clang -E -emit-llvm foo.c Args.ClaimAllArgs(options::OPT_emit_llvm); } /// Add options related to the Objective-C runtime/ABI. /// /// Returns true if the runtime is non-fragile. ObjCRuntime Clang::AddObjCRuntimeArgs(const ArgList &args, ArgStringList &cmdArgs, RewriteKind rewriteKind) const { // Look for the controlling runtime option. Arg *runtimeArg = args.getLastArg(options::OPT_fnext_runtime, options::OPT_fgnu_runtime, options::OPT_fobjc_runtime_EQ); // Just forward -fobjc-runtime= to the frontend. This supercedes // options about fragility. if (runtimeArg && runtimeArg->getOption().matches(options::OPT_fobjc_runtime_EQ)) { ObjCRuntime runtime; StringRef value = runtimeArg->getValue(); if (runtime.tryParse(value)) { getToolChain().getDriver().Diag(diag::err_drv_unknown_objc_runtime) << value; } runtimeArg->render(args, cmdArgs); return runtime; } // Otherwise, we'll need the ABI "version". Version numbers are // slightly confusing for historical reasons: // 1 - Traditional "fragile" ABI // 2 - Non-fragile ABI, version 1 // 3 - Non-fragile ABI, version 2 unsigned objcABIVersion = 1; // If -fobjc-abi-version= is present, use that to set the version. if (Arg *abiArg = args.getLastArg(options::OPT_fobjc_abi_version_EQ)) { StringRef value = abiArg->getValue(); if (value == "1") objcABIVersion = 1; else if (value == "2") objcABIVersion = 2; else if (value == "3") objcABIVersion = 3; else getToolChain().getDriver().Diag(diag::err_drv_clang_unsupported) << value; } else { // Otherwise, determine if we are using the non-fragile ABI. bool nonFragileABIIsDefault = (rewriteKind == RK_NonFragile || (rewriteKind == RK_None && getToolChain().IsObjCNonFragileABIDefault())); if (args.hasFlag(options::OPT_fobjc_nonfragile_abi, options::OPT_fno_objc_nonfragile_abi, nonFragileABIIsDefault)) { // Determine the non-fragile ABI version to use. #ifdef DISABLE_DEFAULT_NONFRAGILEABI_TWO unsigned nonFragileABIVersion = 1; #else unsigned nonFragileABIVersion = 2; #endif if (Arg *abiArg = args.getLastArg(options::OPT_fobjc_nonfragile_abi_version_EQ)) { StringRef value = abiArg->getValue(); if (value == "1") nonFragileABIVersion = 1; else if (value == "2") nonFragileABIVersion = 2; else getToolChain().getDriver().Diag(diag::err_drv_clang_unsupported) << value; } objcABIVersion = 1 + nonFragileABIVersion; } else { objcABIVersion = 1; } } // We don't actually care about the ABI version other than whether // it's non-fragile. bool isNonFragile = objcABIVersion != 1; // If we have no runtime argument, ask the toolchain for its default runtime. // However, the rewriter only really supports the Mac runtime, so assume that. ObjCRuntime runtime; if (!runtimeArg) { switch (rewriteKind) { case RK_None: runtime = getToolChain().getDefaultObjCRuntime(isNonFragile); break; case RK_Fragile: runtime = ObjCRuntime(ObjCRuntime::FragileMacOSX, VersionTuple()); break; case RK_NonFragile: runtime = ObjCRuntime(ObjCRuntime::MacOSX, VersionTuple()); break; } // -fnext-runtime } else if (runtimeArg->getOption().matches(options::OPT_fnext_runtime)) { // On Darwin, make this use the default behavior for the toolchain. if (getToolChain().getTriple().isOSDarwin()) { runtime = getToolChain().getDefaultObjCRuntime(isNonFragile); // Otherwise, build for a generic macosx port. } else { runtime = ObjCRuntime(ObjCRuntime::MacOSX, VersionTuple()); } // -fgnu-runtime } else { assert(runtimeArg->getOption().matches(options::OPT_fgnu_runtime)); // Legacy behaviour is to target the gnustep runtime if we are i // non-fragile mode or the GCC runtime in fragile mode. if (isNonFragile) runtime = ObjCRuntime(ObjCRuntime::GNUstep, VersionTuple(1, 6)); else runtime = ObjCRuntime(ObjCRuntime::GCC, VersionTuple()); } cmdArgs.push_back( args.MakeArgString("-fobjc-runtime=" + runtime.getAsString())); return runtime; } static bool maybeConsumeDash(const std::string &EH, size_t &I) { bool HaveDash = (I + 1 < EH.size() && EH[I + 1] == '-'); I += HaveDash; return !HaveDash; } struct EHFlags { EHFlags() : Synch(false), Asynch(false), NoExceptC(false) {} bool Synch; bool Asynch; bool NoExceptC; }; /// /EH controls whether to run destructor cleanups when exceptions are /// thrown. There are three modifiers: /// - s: Cleanup after "synchronous" exceptions, aka C++ exceptions. /// - a: Cleanup after "asynchronous" exceptions, aka structured exceptions. /// The 'a' modifier is unimplemented and fundamentally hard in LLVM IR. /// - c: Assume that extern "C" functions are implicitly noexcept. This /// modifier is an optimization, so we ignore it for now. /// The default is /EHs-c-, meaning cleanups are disabled. static EHFlags parseClangCLEHFlags(const Driver &D, const ArgList &Args) { EHFlags EH; std::vector EHArgs = Args.getAllArgValues(options::OPT__SLASH_EH); for (auto EHVal : EHArgs) { for (size_t I = 0, E = EHVal.size(); I != E; ++I) { switch (EHVal[I]) { case 'a': EH.Asynch = maybeConsumeDash(EHVal, I); continue; case 'c': EH.NoExceptC = maybeConsumeDash(EHVal, I); continue; case 's': EH.Synch = maybeConsumeDash(EHVal, I); continue; default: break; } D.Diag(clang::diag::err_drv_invalid_value) << "/EH" << EHVal; break; } } // FIXME: Disable C++ EH completely, until it becomes more reliable. Users // can use -Xclang to manually enable C++ EH until then. EH = EHFlags(); return EH; } void Clang::AddClangCLArgs(const ArgList &Args, ArgStringList &CmdArgs) const { unsigned RTOptionID = options::OPT__SLASH_MT; if (Args.hasArg(options::OPT__SLASH_LDd)) // The /LDd option implies /MTd. The dependent lib part can be overridden, // but defining _DEBUG is sticky. RTOptionID = options::OPT__SLASH_MTd; if (Arg *A = Args.getLastArg(options::OPT__SLASH_M_Group)) RTOptionID = A->getOption().getID(); switch (RTOptionID) { case options::OPT__SLASH_MD: if (Args.hasArg(options::OPT__SLASH_LDd)) CmdArgs.push_back("-D_DEBUG"); CmdArgs.push_back("-D_MT"); CmdArgs.push_back("-D_DLL"); CmdArgs.push_back("--dependent-lib=msvcrt"); break; case options::OPT__SLASH_MDd: CmdArgs.push_back("-D_DEBUG"); CmdArgs.push_back("-D_MT"); CmdArgs.push_back("-D_DLL"); CmdArgs.push_back("--dependent-lib=msvcrtd"); break; case options::OPT__SLASH_MT: if (Args.hasArg(options::OPT__SLASH_LDd)) CmdArgs.push_back("-D_DEBUG"); CmdArgs.push_back("-D_MT"); CmdArgs.push_back("--dependent-lib=libcmt"); break; case options::OPT__SLASH_MTd: CmdArgs.push_back("-D_DEBUG"); CmdArgs.push_back("-D_MT"); CmdArgs.push_back("--dependent-lib=libcmtd"); break; default: llvm_unreachable("Unexpected option ID."); } // This provides POSIX compatibility (maps 'open' to '_open'), which most // users want. The /Za flag to cl.exe turns this off, but it's not // implemented in clang. CmdArgs.push_back("--dependent-lib=oldnames"); // Both /showIncludes and /E (and /EP) write to stdout. Allowing both // would produce interleaved output, so ignore /showIncludes in such cases. if (!Args.hasArg(options::OPT_E) && !Args.hasArg(options::OPT__SLASH_EP)) if (Arg *A = Args.getLastArg(options::OPT_show_includes)) A->render(Args, CmdArgs); // This controls whether or not we emit RTTI data for polymorphic types. if (Args.hasFlag(options::OPT__SLASH_GR_, options::OPT__SLASH_GR, /*default=*/false)) CmdArgs.push_back("-fno-rtti-data"); const Driver &D = getToolChain().getDriver(); EHFlags EH = parseClangCLEHFlags(D, Args); // FIXME: Do something with NoExceptC. if (EH.Synch || EH.Asynch) { CmdArgs.push_back("-fcxx-exceptions"); CmdArgs.push_back("-fexceptions"); } // /EP should expand to -E -P. if (Args.hasArg(options::OPT__SLASH_EP)) { CmdArgs.push_back("-E"); CmdArgs.push_back("-P"); } unsigned VolatileOptionID; if (getToolChain().getArch() == llvm::Triple::x86_64 || getToolChain().getArch() == llvm::Triple::x86) VolatileOptionID = options::OPT__SLASH_volatile_ms; else VolatileOptionID = options::OPT__SLASH_volatile_iso; if (Arg *A = Args.getLastArg(options::OPT__SLASH_volatile_Group)) VolatileOptionID = A->getOption().getID(); if (VolatileOptionID == options::OPT__SLASH_volatile_ms) CmdArgs.push_back("-fms-volatile"); Arg *MostGeneralArg = Args.getLastArg(options::OPT__SLASH_vmg); Arg *BestCaseArg = Args.getLastArg(options::OPT__SLASH_vmb); if (MostGeneralArg && BestCaseArg) D.Diag(clang::diag::err_drv_argument_not_allowed_with) << MostGeneralArg->getAsString(Args) << BestCaseArg->getAsString(Args); if (MostGeneralArg) { Arg *SingleArg = Args.getLastArg(options::OPT__SLASH_vms); Arg *MultipleArg = Args.getLastArg(options::OPT__SLASH_vmm); Arg *VirtualArg = Args.getLastArg(options::OPT__SLASH_vmv); Arg *FirstConflict = SingleArg ? SingleArg : MultipleArg; Arg *SecondConflict = VirtualArg ? VirtualArg : MultipleArg; if (FirstConflict && SecondConflict && FirstConflict != SecondConflict) D.Diag(clang::diag::err_drv_argument_not_allowed_with) << FirstConflict->getAsString(Args) << SecondConflict->getAsString(Args); if (SingleArg) CmdArgs.push_back("-fms-memptr-rep=single"); else if (MultipleArg) CmdArgs.push_back("-fms-memptr-rep=multiple"); else CmdArgs.push_back("-fms-memptr-rep=virtual"); } if (Arg *A = Args.getLastArg(options::OPT_vtordisp_mode_EQ)) A->render(Args, CmdArgs); if (!Args.hasArg(options::OPT_fdiagnostics_format_EQ)) { CmdArgs.push_back("-fdiagnostics-format"); if (Args.hasArg(options::OPT__SLASH_fallback)) CmdArgs.push_back("msvc-fallback"); else CmdArgs.push_back("msvc"); } } visualstudio::Compiler *Clang::getCLFallback() const { if (!CLFallback) CLFallback.reset(new visualstudio::Compiler(getToolChain())); return CLFallback.get(); } void ClangAs::AddMIPSTargetArgs(const ArgList &Args, ArgStringList &CmdArgs) const { StringRef CPUName; StringRef ABIName; const llvm::Triple &Triple = getToolChain().getTriple(); mips::getMipsCPUAndABI(Args, Triple, CPUName, ABIName); CmdArgs.push_back("-target-abi"); CmdArgs.push_back(ABIName.data()); } void ClangAs::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { ArgStringList CmdArgs; assert(Inputs.size() == 1 && "Unexpected number of inputs."); const InputInfo &Input = Inputs[0]; // Don't warn about "clang -w -c foo.s" Args.ClaimAllArgs(options::OPT_w); // and "clang -emit-llvm -c foo.s" Args.ClaimAllArgs(options::OPT_emit_llvm); claimNoWarnArgs(Args); // Invoke ourselves in -cc1as mode. // // FIXME: Implement custom jobs for internal actions. CmdArgs.push_back("-cc1as"); // Add the "effective" target triple. CmdArgs.push_back("-triple"); std::string TripleStr = getToolChain().ComputeEffectiveClangTriple(Args, Input.getType()); CmdArgs.push_back(Args.MakeArgString(TripleStr)); // Set the output mode, we currently only expect to be used as a real // assembler. CmdArgs.push_back("-filetype"); CmdArgs.push_back("obj"); // Set the main file name, so that debug info works even with // -save-temps or preprocessed assembly. CmdArgs.push_back("-main-file-name"); CmdArgs.push_back(Clang::getBaseInputName(Args, Input)); // Add the target cpu const llvm::Triple Triple(TripleStr); std::string CPU = getCPUName(Args, Triple, /*FromAs*/ true); if (!CPU.empty()) { CmdArgs.push_back("-target-cpu"); CmdArgs.push_back(Args.MakeArgString(CPU)); } // Add the target features const Driver &D = getToolChain().getDriver(); getTargetFeatures(D, Triple, Args, CmdArgs, true); // Ignore explicit -force_cpusubtype_ALL option. (void)Args.hasArg(options::OPT_force__cpusubtype__ALL); // Pass along any -I options so we get proper .include search paths. Args.AddAllArgs(CmdArgs, options::OPT_I_Group); // Determine the original source input. const Action *SourceAction = &JA; while (SourceAction->getKind() != Action::InputClass) { assert(!SourceAction->getInputs().empty() && "unexpected root action!"); SourceAction = SourceAction->getInputs()[0]; } // Forward -g and handle debug info related flags, assuming we are dealing // with an actual assembly file. if (SourceAction->getType() == types::TY_Asm || SourceAction->getType() == types::TY_PP_Asm) { Args.ClaimAllArgs(options::OPT_g_Group); if (Arg *A = Args.getLastArg(options::OPT_g_Group)) if (!A->getOption().matches(options::OPT_g0)) CmdArgs.push_back("-g"); if (Args.hasArg(options::OPT_gdwarf_2)) CmdArgs.push_back("-gdwarf-2"); if (Args.hasArg(options::OPT_gdwarf_3)) CmdArgs.push_back("-gdwarf-3"); if (Args.hasArg(options::OPT_gdwarf_4)) CmdArgs.push_back("-gdwarf-4"); // Add the -fdebug-compilation-dir flag if needed. addDebugCompDirArg(Args, CmdArgs); // Set the AT_producer to the clang version when using the integrated // assembler on assembly source files. CmdArgs.push_back("-dwarf-debug-producer"); CmdArgs.push_back(Args.MakeArgString(getClangFullVersion())); } // Optionally embed the -cc1as level arguments into the debug info, for build // analysis. if (getToolChain().UseDwarfDebugFlags()) { ArgStringList OriginalArgs; for (const auto &Arg : Args) Arg->render(Args, OriginalArgs); SmallString<256> Flags; const char *Exec = getToolChain().getDriver().getClangProgramPath(); Flags += Exec; for (const char *OriginalArg : OriginalArgs) { SmallString<128> EscapedArg; EscapeSpacesAndBackslashes(OriginalArg, EscapedArg); Flags += " "; Flags += EscapedArg; } CmdArgs.push_back("-dwarf-debug-flags"); CmdArgs.push_back(Args.MakeArgString(Flags)); } // FIXME: Add -static support, once we have it. // Add target specific flags. switch (getToolChain().getArch()) { default: break; case llvm::Triple::mips: case llvm::Triple::mipsel: case llvm::Triple::mips64: case llvm::Triple::mips64el: AddMIPSTargetArgs(Args, CmdArgs); break; } // Consume all the warning flags. Usually this would be handled more // gracefully by -cc1 (warning about unknown warning flags, etc) but -cc1as // doesn't handle that so rather than warning about unused flags that are // actually used, we'll lie by omission instead. // FIXME: Stop lying and consume only the appropriate driver flags for (const Arg *A : Args.filtered(options::OPT_W_Group)) A->claim(); CollectArgsForIntegratedAssembler(C, Args, CmdArgs, getToolChain().getDriver()); Args.AddAllArgs(CmdArgs, options::OPT_mllvm); assert(Output.isFilename() && "Unexpected lipo output."); CmdArgs.push_back("-o"); CmdArgs.push_back(Output.getFilename()); assert(Input.isFilename() && "Invalid input."); CmdArgs.push_back(Input.getFilename()); const char *Exec = getToolChain().getDriver().getClangProgramPath(); C.addCommand(llvm::make_unique(JA, *this, Exec, CmdArgs)); // Handle the debug info splitting at object creation time if we're // creating an object. // TODO: Currently only works on linux with newer objcopy. if (Args.hasArg(options::OPT_gsplit_dwarf) && getToolChain().getTriple().isOSLinux()) SplitDebugInfo(getToolChain(), C, *this, JA, Args, Output, SplitDebugName(Args, Input)); } void GnuTool::anchor() {} void gcc::Common::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { const Driver &D = getToolChain().getDriver(); ArgStringList CmdArgs; for (const auto &A : Args) { if (forwardToGCC(A->getOption())) { // Don't forward any -g arguments to assembly steps. if (isa(JA) && A->getOption().matches(options::OPT_g_Group)) continue; // Don't forward any -W arguments to assembly and link steps. if ((isa(JA) || isa(JA)) && A->getOption().matches(options::OPT_W_Group)) continue; // It is unfortunate that we have to claim here, as this means // we will basically never report anything interesting for // platforms using a generic gcc, even if we are just using gcc // to get to the assembler. A->claim(); A->render(Args, CmdArgs); } } RenderExtraToolArgs(JA, CmdArgs); // If using a driver driver, force the arch. if (getToolChain().getTriple().isOSDarwin()) { CmdArgs.push_back("-arch"); CmdArgs.push_back( Args.MakeArgString(getToolChain().getDefaultUniversalArchName())); } // Try to force gcc to match the tool chain we want, if we recognize // the arch. // // FIXME: The triple class should directly provide the information we want // here. const llvm::Triple::ArchType Arch = getToolChain().getArch(); if (Arch == llvm::Triple::x86 || Arch == llvm::Triple::ppc) CmdArgs.push_back("-m32"); else if (Arch == llvm::Triple::x86_64 || Arch == llvm::Triple::ppc64 || Arch == llvm::Triple::ppc64le) CmdArgs.push_back("-m64"); if (Output.isFilename()) { CmdArgs.push_back("-o"); CmdArgs.push_back(Output.getFilename()); } else { assert(Output.isNothing() && "Unexpected output"); CmdArgs.push_back("-fsyntax-only"); } Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler); // Only pass -x if gcc will understand it; otherwise hope gcc // understands the suffix correctly. The main use case this would go // wrong in is for linker inputs if they happened to have an odd // suffix; really the only way to get this to happen is a command // like '-x foobar a.c' which will treat a.c like a linker input. // // FIXME: For the linker case specifically, can we safely convert // inputs into '-Wl,' options? for (const auto &II : Inputs) { // Don't try to pass LLVM or AST inputs to a generic gcc. if (II.getType() == types::TY_LLVM_IR || II.getType() == types::TY_LTO_IR || II.getType() == types::TY_LLVM_BC || II.getType() == types::TY_LTO_BC) D.Diag(diag::err_drv_no_linker_llvm_support) << getToolChain().getTripleString(); else if (II.getType() == types::TY_AST) D.Diag(diag::err_drv_no_ast_support) << getToolChain().getTripleString(); else if (II.getType() == types::TY_ModuleFile) D.Diag(diag::err_drv_no_module_support) << getToolChain().getTripleString(); if (types::canTypeBeUserSpecified(II.getType())) { CmdArgs.push_back("-x"); CmdArgs.push_back(types::getTypeName(II.getType())); } if (II.isFilename()) CmdArgs.push_back(II.getFilename()); else { const Arg &A = II.getInputArg(); // Reverse translate some rewritten options. if (A.getOption().matches(options::OPT_Z_reserved_lib_stdcxx)) { CmdArgs.push_back("-lstdc++"); continue; } // Don't render as input, we need gcc to do the translations. A.render(Args, CmdArgs); } } const std::string customGCCName = D.getCCCGenericGCCName(); const char *GCCName; if (!customGCCName.empty()) GCCName = customGCCName.c_str(); else if (D.CCCIsCXX()) { GCCName = "g++"; } else GCCName = "gcc"; const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath(GCCName)); C.addCommand(llvm::make_unique(JA, *this, Exec, CmdArgs)); } void gcc::Preprocessor::RenderExtraToolArgs(const JobAction &JA, ArgStringList &CmdArgs) const { CmdArgs.push_back("-E"); } void gcc::Compiler::RenderExtraToolArgs(const JobAction &JA, ArgStringList &CmdArgs) const { const Driver &D = getToolChain().getDriver(); switch (JA.getType()) { // If -flto, etc. are present then make sure not to force assembly output. case types::TY_LLVM_IR: case types::TY_LTO_IR: case types::TY_LLVM_BC: case types::TY_LTO_BC: CmdArgs.push_back("-c"); break; case types::TY_PP_Asm: CmdArgs.push_back("-S"); break; case types::TY_Nothing: CmdArgs.push_back("-fsyntax-only"); break; default: D.Diag(diag::err_drv_invalid_gcc_output_type) << getTypeName(JA.getType()); } } void gcc::Linker::RenderExtraToolArgs(const JobAction &JA, ArgStringList &CmdArgs) const { // The types are (hopefully) good enough. } // Hexagon tools start. void hexagon::Assembler::RenderExtraToolArgs(const JobAction &JA, ArgStringList &CmdArgs) const {} void hexagon::Assembler::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { claimNoWarnArgs(Args); const Driver &D = getToolChain().getDriver(); ArgStringList CmdArgs; std::string MarchString = "-march="; MarchString += toolchains::Hexagon_TC::GetTargetCPU(Args); CmdArgs.push_back(Args.MakeArgString(MarchString)); RenderExtraToolArgs(JA, CmdArgs); if (Output.isFilename()) { CmdArgs.push_back("-o"); CmdArgs.push_back(Output.getFilename()); } else { assert(Output.isNothing() && "Unexpected output"); CmdArgs.push_back("-fsyntax-only"); } if (const char *v = toolchains::Hexagon_TC::GetSmallDataThreshold(Args)) CmdArgs.push_back(Args.MakeArgString(std::string("-G") + v)); Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler); // Only pass -x if gcc will understand it; otherwise hope gcc // understands the suffix correctly. The main use case this would go // wrong in is for linker inputs if they happened to have an odd // suffix; really the only way to get this to happen is a command // like '-x foobar a.c' which will treat a.c like a linker input. // // FIXME: For the linker case specifically, can we safely convert // inputs into '-Wl,' options? for (const auto &II : Inputs) { // Don't try to pass LLVM or AST inputs to a generic gcc. if (II.getType() == types::TY_LLVM_IR || II.getType() == types::TY_LTO_IR || II.getType() == types::TY_LLVM_BC || II.getType() == types::TY_LTO_BC) D.Diag(clang::diag::err_drv_no_linker_llvm_support) << getToolChain().getTripleString(); else if (II.getType() == types::TY_AST) D.Diag(clang::diag::err_drv_no_ast_support) << getToolChain().getTripleString(); else if (II.getType() == types::TY_ModuleFile) D.Diag(diag::err_drv_no_module_support) << getToolChain().getTripleString(); if (II.isFilename()) CmdArgs.push_back(II.getFilename()); else // Don't render as input, we need gcc to do the translations. // FIXME: Pranav: What is this ? II.getInputArg().render(Args, CmdArgs); } const char *GCCName = "hexagon-as"; const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath(GCCName)); C.addCommand(llvm::make_unique(JA, *this, Exec, CmdArgs)); } void hexagon::Linker::RenderExtraToolArgs(const JobAction &JA, ArgStringList &CmdArgs) const { // The types are (hopefully) good enough. } static void constructHexagonLinkArgs(Compilation &C, const JobAction &JA, const toolchains::Hexagon_TC &ToolChain, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, ArgStringList &CmdArgs, const char *LinkingOutput) { const Driver &D = ToolChain.getDriver(); //---------------------------------------------------------------------------- // //---------------------------------------------------------------------------- bool hasStaticArg = Args.hasArg(options::OPT_static); bool buildingLib = Args.hasArg(options::OPT_shared); bool buildPIE = Args.hasArg(options::OPT_pie); bool incStdLib = !Args.hasArg(options::OPT_nostdlib); bool incStartFiles = !Args.hasArg(options::OPT_nostartfiles); bool incDefLibs = !Args.hasArg(options::OPT_nodefaultlibs); bool useG0 = false; bool useShared = buildingLib && !hasStaticArg; //---------------------------------------------------------------------------- // Silence warnings for various options //---------------------------------------------------------------------------- Args.ClaimAllArgs(options::OPT_g_Group); Args.ClaimAllArgs(options::OPT_emit_llvm); Args.ClaimAllArgs(options::OPT_w); // Other warning options are already // handled somewhere else. Args.ClaimAllArgs(options::OPT_static_libgcc); //---------------------------------------------------------------------------- // //---------------------------------------------------------------------------- for (const auto &Opt : ToolChain.ExtraOpts) CmdArgs.push_back(Opt.c_str()); std::string MarchString = toolchains::Hexagon_TC::GetTargetCPU(Args); CmdArgs.push_back(Args.MakeArgString("-m" + MarchString)); if (buildingLib) { CmdArgs.push_back("-shared"); CmdArgs.push_back("-call_shared"); // should be the default, but doing as // hexagon-gcc does } if (hasStaticArg) CmdArgs.push_back("-static"); if (buildPIE && !buildingLib) CmdArgs.push_back("-pie"); if (const char *v = toolchains::Hexagon_TC::GetSmallDataThreshold(Args)) { CmdArgs.push_back(Args.MakeArgString(std::string("-G") + v)); useG0 = toolchains::Hexagon_TC::UsesG0(v); } //---------------------------------------------------------------------------- // //---------------------------------------------------------------------------- CmdArgs.push_back("-o"); CmdArgs.push_back(Output.getFilename()); const std::string MarchSuffix = "/" + MarchString; const std::string G0Suffix = "/G0"; const std::string MarchG0Suffix = MarchSuffix + G0Suffix; const std::string RootDir = toolchains::Hexagon_TC::GetGnuDir(D.InstalledDir, Args) + "/"; const std::string StartFilesDir = RootDir + "hexagon/lib" + (useG0 ? MarchG0Suffix : MarchSuffix); //---------------------------------------------------------------------------- // moslib //---------------------------------------------------------------------------- std::vector oslibs; bool hasStandalone = false; for (const Arg *A : Args.filtered(options::OPT_moslib_EQ)) { A->claim(); oslibs.emplace_back(A->getValue()); hasStandalone = hasStandalone || (oslibs.back() == "standalone"); } if (oslibs.empty()) { oslibs.push_back("standalone"); hasStandalone = true; } //---------------------------------------------------------------------------- // Start Files //---------------------------------------------------------------------------- if (incStdLib && incStartFiles) { if (!buildingLib) { if (hasStandalone) { CmdArgs.push_back( Args.MakeArgString(StartFilesDir + "/crt0_standalone.o")); } CmdArgs.push_back(Args.MakeArgString(StartFilesDir + "/crt0.o")); } std::string initObj = useShared ? "/initS.o" : "/init.o"; CmdArgs.push_back(Args.MakeArgString(StartFilesDir + initObj)); } //---------------------------------------------------------------------------- // Library Search Paths //---------------------------------------------------------------------------- const ToolChain::path_list &LibPaths = ToolChain.getFilePaths(); for (const auto &LibPath : LibPaths) CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + LibPath)); //---------------------------------------------------------------------------- // //---------------------------------------------------------------------------- Args.AddAllArgs(CmdArgs, options::OPT_T_Group); Args.AddAllArgs(CmdArgs, options::OPT_e); Args.AddAllArgs(CmdArgs, options::OPT_s); Args.AddAllArgs(CmdArgs, options::OPT_t); Args.AddAllArgs(CmdArgs, options::OPT_u_Group); AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs); //---------------------------------------------------------------------------- // Libraries //---------------------------------------------------------------------------- if (incStdLib && incDefLibs) { if (D.CCCIsCXX()) { ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs); CmdArgs.push_back("-lm"); } CmdArgs.push_back("--start-group"); if (!buildingLib) { for (const std::string &Lib : oslibs) CmdArgs.push_back(Args.MakeArgString("-l" + Lib)); CmdArgs.push_back("-lc"); } CmdArgs.push_back("-lgcc"); CmdArgs.push_back("--end-group"); } //---------------------------------------------------------------------------- // End files //---------------------------------------------------------------------------- if (incStdLib && incStartFiles) { std::string finiObj = useShared ? "/finiS.o" : "/fini.o"; CmdArgs.push_back(Args.MakeArgString(StartFilesDir + finiObj)); } } void hexagon::Linker::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { const toolchains::Hexagon_TC &ToolChain = static_cast(getToolChain()); ArgStringList CmdArgs; constructHexagonLinkArgs(C, JA, ToolChain, Output, Inputs, Args, CmdArgs, LinkingOutput); std::string Linker = ToolChain.GetProgramPath("hexagon-ld"); C.addCommand(llvm::make_unique(JA, *this, Args.MakeArgString(Linker), CmdArgs)); } // Hexagon tools end. const std::string arm::getARMArch(StringRef Arch, const llvm::Triple &Triple) { std::string MArch; if (!Arch.empty()) MArch = Arch; else MArch = Triple.getArchName(); MArch = StringRef(MArch).lower(); // Handle -march=native. if (MArch == "native") { std::string CPU = llvm::sys::getHostCPUName(); if (CPU != "generic") { // Translate the native cpu into the architecture suffix for that CPU. const char *Suffix = arm::getLLVMArchSuffixForARM(CPU, MArch); // If there is no valid architecture suffix for this CPU we don't know how // to handle it, so return no architecture. if (strcmp(Suffix, "") == 0) MArch = ""; else MArch = std::string("arm") + Suffix; } } return MArch; } /// Get the (LLVM) name of the minimum ARM CPU for the arch we are targeting. const char *arm::getARMCPUForMArch(StringRef Arch, const llvm::Triple &Triple) { std::string MArch = getARMArch(Arch, Triple); // getARMCPUForArch defaults to the triple if MArch is empty, but empty MArch // here means an -march=native that we can't handle, so instead return no CPU. if (MArch.empty()) return ""; // We need to return an empty string here on invalid MArch values as the // various places that call this function can't cope with a null result. const char *result = Triple.getARMCPUForArch(MArch); if (result) return result; else return ""; } /// getARMTargetCPU - Get the (LLVM) name of the ARM cpu we are targeting. std::string arm::getARMTargetCPU(StringRef CPU, StringRef Arch, const llvm::Triple &Triple) { // FIXME: Warn on inconsistent use of -mcpu and -march. // If we have -mcpu=, use that. if (!CPU.empty()) { std::string MCPU = StringRef(CPU).lower(); // Handle -mcpu=native. if (MCPU == "native") return llvm::sys::getHostCPUName(); else return MCPU; } return getARMCPUForMArch(Arch, Triple); } /// getLLVMArchSuffixForARM - Get the LLVM arch name to use for a particular /// CPU (or Arch, if CPU is generic). // FIXME: This is redundant with -mcpu, why does LLVM use this. const char *arm::getLLVMArchSuffixForARM(StringRef CPU, StringRef Arch) { if (CPU == "generic" && llvm::ARMTargetParser::parseArch(Arch) == llvm::ARM::AK_ARMV8_1A) return "v8.1a"; unsigned ArchKind = llvm::ARMTargetParser::parseCPUArch(CPU); if (ArchKind == llvm::ARM::AK_INVALID) return ""; return llvm::ARMTargetParser::getSubArch(ArchKind); } void arm::appendEBLinkFlags(const ArgList &Args, ArgStringList &CmdArgs, const llvm::Triple &Triple) { if (Args.hasArg(options::OPT_r)) return; // ARMv7 (and later) and ARMv6-M do not support BE-32, so instruct the linker // to generate BE-8 executables. if (getARMSubArchVersionNumber(Triple) >= 7 || isARMMProfile(Triple)) CmdArgs.push_back("--be8"); } mips::NanEncoding mips::getSupportedNanEncoding(StringRef &CPU) { return (NanEncoding)llvm::StringSwitch(CPU) .Case("mips1", NanLegacy) .Case("mips2", NanLegacy) .Case("mips3", NanLegacy) .Case("mips4", NanLegacy) .Case("mips5", NanLegacy) .Case("mips32", NanLegacy) .Case("mips32r2", NanLegacy) .Case("mips32r3", NanLegacy | Nan2008) .Case("mips32r5", NanLegacy | Nan2008) .Case("mips32r6", Nan2008) .Case("mips64", NanLegacy) .Case("mips64r2", NanLegacy) .Case("mips64r3", NanLegacy | Nan2008) .Case("mips64r5", NanLegacy | Nan2008) .Case("mips64r6", Nan2008) .Default(NanLegacy); } bool mips::hasMipsAbiArg(const ArgList &Args, const char *Value) { Arg *A = Args.getLastArg(options::OPT_mabi_EQ); return A && (A->getValue() == StringRef(Value)); } bool mips::isUCLibc(const ArgList &Args) { Arg *A = Args.getLastArg(options::OPT_m_libc_Group); return A && A->getOption().matches(options::OPT_muclibc); } bool mips::isNaN2008(const ArgList &Args, const llvm::Triple &Triple) { if (Arg *NaNArg = Args.getLastArg(options::OPT_mnan_EQ)) return llvm::StringSwitch(NaNArg->getValue()) .Case("2008", true) .Case("legacy", false) .Default(false); // NaN2008 is the default for MIPS32r6/MIPS64r6. return llvm::StringSwitch(getCPUName(Args, Triple)) .Cases("mips32r6", "mips64r6", true) .Default(false); return false; } bool mips::isFPXXDefault(const llvm::Triple &Triple, StringRef CPUName, StringRef ABIName, StringRef FloatABI) { if (Triple.getVendor() != llvm::Triple::ImaginationTechnologies && Triple.getVendor() != llvm::Triple::MipsTechnologies) return false; if (ABIName != "32") return false; // FPXX shouldn't be used if either -msoft-float or -mfloat-abi=soft is // present. if (FloatABI == "soft") return false; return llvm::StringSwitch(CPUName) .Cases("mips2", "mips3", "mips4", "mips5", true) .Cases("mips32", "mips32r2", "mips32r3", "mips32r5", true) .Cases("mips64", "mips64r2", "mips64r3", "mips64r5", true) .Default(false); } bool mips::shouldUseFPXX(const ArgList &Args, const llvm::Triple &Triple, StringRef CPUName, StringRef ABIName, StringRef FloatABI) { bool UseFPXX = isFPXXDefault(Triple, CPUName, ABIName, FloatABI); // FPXX shouldn't be used if -msingle-float is present. if (Arg *A = Args.getLastArg(options::OPT_msingle_float, options::OPT_mdouble_float)) if (A->getOption().matches(options::OPT_msingle_float)) UseFPXX = false; return UseFPXX; } llvm::Triple::ArchType darwin::getArchTypeForMachOArchName(StringRef Str) { // See arch(3) and llvm-gcc's driver-driver.c. We don't implement support for // archs which Darwin doesn't use. // The matching this routine does is fairly pointless, since it is neither the // complete architecture list, nor a reasonable subset. The problem is that // historically the driver driver accepts this and also ties its -march= // handling to the architecture name, so we need to be careful before removing // support for it. // This code must be kept in sync with Clang's Darwin specific argument // translation. return llvm::StringSwitch(Str) .Cases("ppc", "ppc601", "ppc603", "ppc604", "ppc604e", llvm::Triple::ppc) .Cases("ppc750", "ppc7400", "ppc7450", "ppc970", llvm::Triple::ppc) .Case("ppc64", llvm::Triple::ppc64) .Cases("i386", "i486", "i486SX", "i586", "i686", llvm::Triple::x86) .Cases("pentium", "pentpro", "pentIIm3", "pentIIm5", "pentium4", llvm::Triple::x86) .Cases("x86_64", "x86_64h", llvm::Triple::x86_64) // This is derived from the driver driver. .Cases("arm", "armv4t", "armv5", "armv6", "armv6m", llvm::Triple::arm) .Cases("armv7", "armv7em", "armv7k", "armv7m", llvm::Triple::arm) .Cases("armv7s", "xscale", llvm::Triple::arm) .Case("arm64", llvm::Triple::aarch64) .Case("r600", llvm::Triple::r600) .Case("amdgcn", llvm::Triple::amdgcn) .Case("nvptx", llvm::Triple::nvptx) .Case("nvptx64", llvm::Triple::nvptx64) .Case("amdil", llvm::Triple::amdil) .Case("spir", llvm::Triple::spir) .Default(llvm::Triple::UnknownArch); } void darwin::setTripleTypeForMachOArchName(llvm::Triple &T, StringRef Str) { const llvm::Triple::ArchType Arch = getArchTypeForMachOArchName(Str); T.setArch(Arch); if (Str == "x86_64h") T.setArchName(Str); else if (Str == "armv6m" || Str == "armv7m" || Str == "armv7em") { T.setOS(llvm::Triple::UnknownOS); T.setObjectFormat(llvm::Triple::MachO); } } const char *Clang::getBaseInputName(const ArgList &Args, const InputInfo &Input) { return Args.MakeArgString(llvm::sys::path::filename(Input.getBaseInput())); } const char *Clang::getBaseInputStem(const ArgList &Args, const InputInfoList &Inputs) { const char *Str = getBaseInputName(Args, Inputs[0]); if (const char *End = strrchr(Str, '.')) return Args.MakeArgString(std::string(Str, End)); return Str; } const char *Clang::getDependencyFileName(const ArgList &Args, const InputInfoList &Inputs) { // FIXME: Think about this more. std::string Res; if (Arg *OutputOpt = Args.getLastArg(options::OPT_o)) { std::string Str(OutputOpt->getValue()); Res = Str.substr(0, Str.rfind('.')); } else { Res = getBaseInputStem(Args, Inputs); } return Args.MakeArgString(Res + ".d"); } void cloudabi::Linker::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { const ToolChain &ToolChain = getToolChain(); const Driver &D = ToolChain.getDriver(); ArgStringList CmdArgs; // Silence warning for "clang -g foo.o -o foo" Args.ClaimAllArgs(options::OPT_g_Group); // and "clang -emit-llvm foo.o -o foo" Args.ClaimAllArgs(options::OPT_emit_llvm); // and for "clang -w foo.o -o foo". Other warning options are already // handled somewhere else. Args.ClaimAllArgs(options::OPT_w); if (!D.SysRoot.empty()) CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot)); // CloudABI only supports static linkage. CmdArgs.push_back("-Bstatic"); CmdArgs.push_back("--eh-frame-hdr"); CmdArgs.push_back("--gc-sections"); if (Output.isFilename()) { CmdArgs.push_back("-o"); CmdArgs.push_back(Output.getFilename()); } else { assert(Output.isNothing() && "Invalid output."); } if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nostartfiles)) { CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crt0.o"))); CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtbegin.o"))); } Args.AddAllArgs(CmdArgs, options::OPT_L); const ToolChain::path_list &Paths = ToolChain.getFilePaths(); for (const auto &Path : Paths) CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + Path)); Args.AddAllArgs(CmdArgs, options::OPT_T_Group); Args.AddAllArgs(CmdArgs, options::OPT_e); Args.AddAllArgs(CmdArgs, options::OPT_s); Args.AddAllArgs(CmdArgs, options::OPT_t); Args.AddAllArgs(CmdArgs, options::OPT_Z_Flag); Args.AddAllArgs(CmdArgs, options::OPT_r); if (D.IsUsingLTO(Args)) AddGoldPlugin(ToolChain, Args, CmdArgs); AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs); if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nodefaultlibs)) { if (D.CCCIsCXX()) ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs); CmdArgs.push_back("-lc"); CmdArgs.push_back("-lcompiler_rt"); } if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nostartfiles)) CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtend.o"))); const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath()); C.addCommand(llvm::make_unique(JA, *this, Exec, CmdArgs)); } void darwin::Assembler::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { ArgStringList CmdArgs; assert(Inputs.size() == 1 && "Unexpected number of inputs."); const InputInfo &Input = Inputs[0]; // Determine the original source input. const Action *SourceAction = &JA; while (SourceAction->getKind() != Action::InputClass) { assert(!SourceAction->getInputs().empty() && "unexpected root action!"); SourceAction = SourceAction->getInputs()[0]; } // If -fno_integrated_as is used add -Q to the darwin assember driver to make // sure it runs its system assembler not clang's integrated assembler. // Applicable to darwin11+ and Xcode 4+. darwin<10 lacked integrated-as. // FIXME: at run-time detect assembler capabilities or rely on version // information forwarded by -target-assembler-version (future) if (Args.hasArg(options::OPT_fno_integrated_as)) { const llvm::Triple &T(getToolChain().getTriple()); if (!(T.isMacOSX() && T.isMacOSXVersionLT(10, 7))) CmdArgs.push_back("-Q"); } // Forward -g, assuming we are dealing with an actual assembly file. if (SourceAction->getType() == types::TY_Asm || SourceAction->getType() == types::TY_PP_Asm) { if (Args.hasArg(options::OPT_gstabs)) CmdArgs.push_back("--gstabs"); else if (Args.hasArg(options::OPT_g_Group)) CmdArgs.push_back("-g"); } // Derived from asm spec. AddMachOArch(Args, CmdArgs); // Use -force_cpusubtype_ALL on x86 by default. if (getToolChain().getArch() == llvm::Triple::x86 || getToolChain().getArch() == llvm::Triple::x86_64 || Args.hasArg(options::OPT_force__cpusubtype__ALL)) CmdArgs.push_back("-force_cpusubtype_ALL"); if (getToolChain().getArch() != llvm::Triple::x86_64 && (((Args.hasArg(options::OPT_mkernel) || Args.hasArg(options::OPT_fapple_kext)) && getMachOToolChain().isKernelStatic()) || Args.hasArg(options::OPT_static))) CmdArgs.push_back("-static"); Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler); assert(Output.isFilename() && "Unexpected lipo output."); CmdArgs.push_back("-o"); CmdArgs.push_back(Output.getFilename()); assert(Input.isFilename() && "Invalid input."); CmdArgs.push_back(Input.getFilename()); // asm_final spec is empty. const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as")); C.addCommand(llvm::make_unique(JA, *this, Exec, CmdArgs)); } void darwin::MachOTool::anchor() {} void darwin::MachOTool::AddMachOArch(const ArgList &Args, ArgStringList &CmdArgs) const { StringRef ArchName = getMachOToolChain().getMachOArchName(Args); // Derived from darwin_arch spec. CmdArgs.push_back("-arch"); CmdArgs.push_back(Args.MakeArgString(ArchName)); // FIXME: Is this needed anymore? if (ArchName == "arm") CmdArgs.push_back("-force_cpusubtype_ALL"); } bool darwin::Linker::NeedsTempPath(const InputInfoList &Inputs) const { // We only need to generate a temp path for LTO if we aren't compiling object // files. When compiling source files, we run 'dsymutil' after linking. We // don't run 'dsymutil' when compiling object files. for (const auto &Input : Inputs) if (Input.getType() != types::TY_Object) return true; return false; } void darwin::Linker::AddLinkArgs(Compilation &C, const ArgList &Args, ArgStringList &CmdArgs, const InputInfoList &Inputs) const { const Driver &D = getToolChain().getDriver(); const toolchains::MachO &MachOTC = getMachOToolChain(); unsigned Version[3] = {0, 0, 0}; if (Arg *A = Args.getLastArg(options::OPT_mlinker_version_EQ)) { bool HadExtra; if (!Driver::GetReleaseVersion(A->getValue(), Version[0], Version[1], Version[2], HadExtra) || HadExtra) D.Diag(diag::err_drv_invalid_version_number) << A->getAsString(Args); } // Newer linkers support -demangle. Pass it if supported and not disabled by // the user. if (Version[0] >= 100 && !Args.hasArg(options::OPT_Z_Xlinker__no_demangle)) CmdArgs.push_back("-demangle"); if (Args.hasArg(options::OPT_rdynamic) && Version[0] >= 137) CmdArgs.push_back("-export_dynamic"); // If we are using App Extension restrictions, pass a flag to the linker // telling it that the compiled code has been audited. if (Args.hasFlag(options::OPT_fapplication_extension, options::OPT_fno_application_extension, false)) CmdArgs.push_back("-application_extension"); // If we are using LTO, then automatically create a temporary file path for // the linker to use, so that it's lifetime will extend past a possible // dsymutil step. if (Version[0] >= 116 && D.IsUsingLTO(Args) && NeedsTempPath(Inputs)) { const char *TmpPath = C.getArgs().MakeArgString( D.GetTemporaryPath("cc", types::getTypeTempSuffix(types::TY_Object))); C.addTempFile(TmpPath); CmdArgs.push_back("-object_path_lto"); CmdArgs.push_back(TmpPath); } // Derived from the "link" spec. Args.AddAllArgs(CmdArgs, options::OPT_static); if (!Args.hasArg(options::OPT_static)) CmdArgs.push_back("-dynamic"); if (Args.hasArg(options::OPT_fgnu_runtime)) { // FIXME: gcc replaces -lobjc in forward args with -lobjc-gnu // here. How do we wish to handle such things? } if (!Args.hasArg(options::OPT_dynamiclib)) { AddMachOArch(Args, CmdArgs); // FIXME: Why do this only on this path? Args.AddLastArg(CmdArgs, options::OPT_force__cpusubtype__ALL); Args.AddLastArg(CmdArgs, options::OPT_bundle); Args.AddAllArgs(CmdArgs, options::OPT_bundle__loader); Args.AddAllArgs(CmdArgs, options::OPT_client__name); Arg *A; if ((A = Args.getLastArg(options::OPT_compatibility__version)) || (A = Args.getLastArg(options::OPT_current__version)) || (A = Args.getLastArg(options::OPT_install__name))) D.Diag(diag::err_drv_argument_only_allowed_with) << A->getAsString(Args) << "-dynamiclib"; Args.AddLastArg(CmdArgs, options::OPT_force__flat__namespace); Args.AddLastArg(CmdArgs, options::OPT_keep__private__externs); Args.AddLastArg(CmdArgs, options::OPT_private__bundle); } else { CmdArgs.push_back("-dylib"); Arg *A; if ((A = Args.getLastArg(options::OPT_bundle)) || (A = Args.getLastArg(options::OPT_bundle__loader)) || (A = Args.getLastArg(options::OPT_client__name)) || (A = Args.getLastArg(options::OPT_force__flat__namespace)) || (A = Args.getLastArg(options::OPT_keep__private__externs)) || (A = Args.getLastArg(options::OPT_private__bundle))) D.Diag(diag::err_drv_argument_not_allowed_with) << A->getAsString(Args) << "-dynamiclib"; Args.AddAllArgsTranslated(CmdArgs, options::OPT_compatibility__version, "-dylib_compatibility_version"); Args.AddAllArgsTranslated(CmdArgs, options::OPT_current__version, "-dylib_current_version"); AddMachOArch(Args, CmdArgs); Args.AddAllArgsTranslated(CmdArgs, options::OPT_install__name, "-dylib_install_name"); } Args.AddLastArg(CmdArgs, options::OPT_all__load); Args.AddAllArgs(CmdArgs, options::OPT_allowable__client); Args.AddLastArg(CmdArgs, options::OPT_bind__at__load); if (MachOTC.isTargetIOSBased()) Args.AddLastArg(CmdArgs, options::OPT_arch__errors__fatal); Args.AddLastArg(CmdArgs, options::OPT_dead__strip); Args.AddLastArg(CmdArgs, options::OPT_no__dead__strip__inits__and__terms); Args.AddAllArgs(CmdArgs, options::OPT_dylib__file); Args.AddLastArg(CmdArgs, options::OPT_dynamic); Args.AddAllArgs(CmdArgs, options::OPT_exported__symbols__list); Args.AddLastArg(CmdArgs, options::OPT_flat__namespace); Args.AddAllArgs(CmdArgs, options::OPT_force__load); Args.AddAllArgs(CmdArgs, options::OPT_headerpad__max__install__names); Args.AddAllArgs(CmdArgs, options::OPT_image__base); Args.AddAllArgs(CmdArgs, options::OPT_init); // Add the deployment target. MachOTC.addMinVersionArgs(Args, CmdArgs); Args.AddLastArg(CmdArgs, options::OPT_nomultidefs); Args.AddLastArg(CmdArgs, options::OPT_multi__module); Args.AddLastArg(CmdArgs, options::OPT_single__module); Args.AddAllArgs(CmdArgs, options::OPT_multiply__defined); Args.AddAllArgs(CmdArgs, options::OPT_multiply__defined__unused); if (const Arg *A = Args.getLastArg(options::OPT_fpie, options::OPT_fPIE, options::OPT_fno_pie, options::OPT_fno_PIE)) { if (A->getOption().matches(options::OPT_fpie) || A->getOption().matches(options::OPT_fPIE)) CmdArgs.push_back("-pie"); else CmdArgs.push_back("-no_pie"); } Args.AddLastArg(CmdArgs, options::OPT_prebind); Args.AddLastArg(CmdArgs, options::OPT_noprebind); Args.AddLastArg(CmdArgs, options::OPT_nofixprebinding); Args.AddLastArg(CmdArgs, options::OPT_prebind__all__twolevel__modules); Args.AddLastArg(CmdArgs, options::OPT_read__only__relocs); Args.AddAllArgs(CmdArgs, options::OPT_sectcreate); Args.AddAllArgs(CmdArgs, options::OPT_sectorder); Args.AddAllArgs(CmdArgs, options::OPT_seg1addr); Args.AddAllArgs(CmdArgs, options::OPT_segprot); Args.AddAllArgs(CmdArgs, options::OPT_segaddr); Args.AddAllArgs(CmdArgs, options::OPT_segs__read__only__addr); Args.AddAllArgs(CmdArgs, options::OPT_segs__read__write__addr); Args.AddAllArgs(CmdArgs, options::OPT_seg__addr__table); Args.AddAllArgs(CmdArgs, options::OPT_seg__addr__table__filename); Args.AddAllArgs(CmdArgs, options::OPT_sub__library); Args.AddAllArgs(CmdArgs, options::OPT_sub__umbrella); // Give --sysroot= preference, over the Apple specific behavior to also use // --isysroot as the syslibroot. StringRef sysroot = C.getSysRoot(); if (sysroot != "") { CmdArgs.push_back("-syslibroot"); CmdArgs.push_back(C.getArgs().MakeArgString(sysroot)); } else if (const Arg *A = Args.getLastArg(options::OPT_isysroot)) { CmdArgs.push_back("-syslibroot"); CmdArgs.push_back(A->getValue()); } Args.AddLastArg(CmdArgs, options::OPT_twolevel__namespace); Args.AddLastArg(CmdArgs, options::OPT_twolevel__namespace__hints); Args.AddAllArgs(CmdArgs, options::OPT_umbrella); Args.AddAllArgs(CmdArgs, options::OPT_undefined); Args.AddAllArgs(CmdArgs, options::OPT_unexported__symbols__list); Args.AddAllArgs(CmdArgs, options::OPT_weak__reference__mismatches); Args.AddLastArg(CmdArgs, options::OPT_X_Flag); Args.AddAllArgs(CmdArgs, options::OPT_y); Args.AddLastArg(CmdArgs, options::OPT_w); Args.AddAllArgs(CmdArgs, options::OPT_pagezero__size); Args.AddAllArgs(CmdArgs, options::OPT_segs__read__); Args.AddLastArg(CmdArgs, options::OPT_seglinkedit); Args.AddLastArg(CmdArgs, options::OPT_noseglinkedit); Args.AddAllArgs(CmdArgs, options::OPT_sectalign); Args.AddAllArgs(CmdArgs, options::OPT_sectobjectsymbols); Args.AddAllArgs(CmdArgs, options::OPT_segcreate); Args.AddLastArg(CmdArgs, options::OPT_whyload); Args.AddLastArg(CmdArgs, options::OPT_whatsloaded); Args.AddAllArgs(CmdArgs, options::OPT_dylinker__install__name); Args.AddLastArg(CmdArgs, options::OPT_dylinker); Args.AddLastArg(CmdArgs, options::OPT_Mach); } void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { assert(Output.getType() == types::TY_Image && "Invalid linker output type."); // If the number of arguments surpasses the system limits, we will encode the // input files in a separate file, shortening the command line. To this end, // build a list of input file names that can be passed via a file with the // -filelist linker option. llvm::opt::ArgStringList InputFileList; // The logic here is derived from gcc's behavior; most of which // comes from specs (starting with link_command). Consult gcc for // more information. ArgStringList CmdArgs; /// Hack(tm) to ignore linking errors when we are doing ARC migration. if (Args.hasArg(options::OPT_ccc_arcmt_check, options::OPT_ccc_arcmt_migrate)) { for (const auto &Arg : Args) Arg->claim(); const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("touch")); CmdArgs.push_back(Output.getFilename()); C.addCommand(llvm::make_unique(JA, *this, Exec, CmdArgs)); return; } // I'm not sure why this particular decomposition exists in gcc, but // we follow suite for ease of comparison. AddLinkArgs(C, Args, CmdArgs, Inputs); Args.AddAllArgs(CmdArgs, options::OPT_d_Flag); Args.AddAllArgs(CmdArgs, options::OPT_s); Args.AddAllArgs(CmdArgs, options::OPT_t); Args.AddAllArgs(CmdArgs, options::OPT_Z_Flag); Args.AddAllArgs(CmdArgs, options::OPT_u_Group); Args.AddLastArg(CmdArgs, options::OPT_e); Args.AddAllArgs(CmdArgs, options::OPT_r); // Forward -ObjC when either -ObjC or -ObjC++ is used, to force loading // members of static archive libraries which implement Objective-C classes or // categories. if (Args.hasArg(options::OPT_ObjC) || Args.hasArg(options::OPT_ObjCXX)) CmdArgs.push_back("-ObjC"); CmdArgs.push_back("-o"); CmdArgs.push_back(Output.getFilename()); if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nostartfiles)) getMachOToolChain().addStartObjectFileArgs(Args, CmdArgs); // SafeStack requires its own runtime libraries // These libraries should be linked first, to make sure the // __safestack_init constructor executes before everything else if (getToolChain().getSanitizerArgs().needsSafeStackRt()) { getMachOToolChain().AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.safestack_osx.a", /*AlwaysLink=*/true); } Args.AddAllArgs(CmdArgs, options::OPT_L); - if (Args.hasFlag(options::OPT_fopenmp, options::OPT_fopenmp_EQ, - options::OPT_fno_openmp, false)) { - switch (getOpenMPRuntime(getToolChain(), Args)) { - case OMPRT_OMP: - CmdArgs.push_back("-lomp"); - break; - case OMPRT_GOMP: - CmdArgs.push_back("-lgomp"); - break; - case OMPRT_IOMP5: - CmdArgs.push_back("-liomp5"); - break; - case OMPRT_Unknown: - // Already diagnosed. - break; - } - } - AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs); // Build the input file for -filelist (list of linker input files) in case we // need it later for (const auto &II : Inputs) { if (!II.isFilename()) { // This is a linker input argument. // We cannot mix input arguments and file names in a -filelist input, thus // we prematurely stop our list (remaining files shall be passed as // arguments). if (InputFileList.size() > 0) break; continue; } InputFileList.push_back(II.getFilename()); } + if (!Args.hasArg(options::OPT_nostdlib) && + !Args.hasArg(options::OPT_nodefaultlibs)) + addOpenMPRuntime(CmdArgs, getToolChain(), Args); + if (isObjCRuntimeLinked(Args) && !Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nodefaultlibs)) { // We use arclite library for both ARC and subscripting support. getMachOToolChain().AddLinkARCArgs(Args, CmdArgs); CmdArgs.push_back("-framework"); CmdArgs.push_back("Foundation"); // Link libobj. CmdArgs.push_back("-lobjc"); } if (LinkingOutput) { CmdArgs.push_back("-arch_multiple"); CmdArgs.push_back("-final_output"); CmdArgs.push_back(LinkingOutput); } if (Args.hasArg(options::OPT_fnested_functions)) CmdArgs.push_back("-allow_stack_execute"); // TODO: It would be nice to use addProfileRT() here, but darwin's compiler-rt // paths are different enough from other toolchains that this needs a fair // amount of refactoring done first. getMachOToolChain().addProfileRTLibs(Args, CmdArgs); if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nodefaultlibs)) { if (getToolChain().getDriver().CCCIsCXX()) getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs); // link_ssp spec is empty. // Let the tool chain choose which runtime library to link. getMachOToolChain().AddLinkRuntimeLibArgs(Args, CmdArgs); } if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nostartfiles)) { // endfile_spec is empty. } Args.AddAllArgs(CmdArgs, options::OPT_T_Group); Args.AddAllArgs(CmdArgs, options::OPT_F); // -iframework should be forwarded as -F. for (const Arg *A : Args.filtered(options::OPT_iframework)) CmdArgs.push_back(Args.MakeArgString(std::string("-F") + A->getValue())); if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nodefaultlibs)) { if (Arg *A = Args.getLastArg(options::OPT_fveclib)) { if (A->getValue() == StringRef("Accelerate")) { CmdArgs.push_back("-framework"); CmdArgs.push_back("Accelerate"); } } } const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath()); std::unique_ptr Cmd = llvm::make_unique(JA, *this, Exec, CmdArgs); Cmd->setInputFileList(std::move(InputFileList)); C.addCommand(std::move(Cmd)); } void darwin::Lipo::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { ArgStringList CmdArgs; CmdArgs.push_back("-create"); assert(Output.isFilename() && "Unexpected lipo output."); CmdArgs.push_back("-output"); CmdArgs.push_back(Output.getFilename()); for (const auto &II : Inputs) { assert(II.isFilename() && "Unexpected lipo input."); CmdArgs.push_back(II.getFilename()); } const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("lipo")); C.addCommand(llvm::make_unique(JA, *this, Exec, CmdArgs)); } void darwin::Dsymutil::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { ArgStringList CmdArgs; CmdArgs.push_back("-o"); CmdArgs.push_back(Output.getFilename()); assert(Inputs.size() == 1 && "Unable to handle multiple inputs."); const InputInfo &Input = Inputs[0]; assert(Input.isFilename() && "Unexpected dsymutil input."); CmdArgs.push_back(Input.getFilename()); const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("dsymutil")); C.addCommand(llvm::make_unique(JA, *this, Exec, CmdArgs)); } void darwin::VerifyDebug::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { ArgStringList CmdArgs; CmdArgs.push_back("--verify"); CmdArgs.push_back("--debug-info"); CmdArgs.push_back("--eh-frame"); CmdArgs.push_back("--quiet"); assert(Inputs.size() == 1 && "Unable to handle multiple inputs."); const InputInfo &Input = Inputs[0]; assert(Input.isFilename() && "Unexpected verify input"); // Grabbing the output of the earlier dsymutil run. CmdArgs.push_back(Input.getFilename()); const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("dwarfdump")); C.addCommand(llvm::make_unique(JA, *this, Exec, CmdArgs)); } void solaris::Assembler::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { claimNoWarnArgs(Args); ArgStringList CmdArgs; Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler); CmdArgs.push_back("-o"); CmdArgs.push_back(Output.getFilename()); for (const auto &II : Inputs) CmdArgs.push_back(II.getFilename()); const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as")); C.addCommand(llvm::make_unique(JA, *this, Exec, CmdArgs)); } void solaris::Linker::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { // FIXME: Find a real GCC, don't hard-code versions here std::string GCCLibPath = "/usr/gcc/4.5/lib/gcc/"; const llvm::Triple &T = getToolChain().getTriple(); std::string LibPath = "/usr/lib/"; const llvm::Triple::ArchType Arch = T.getArch(); switch (Arch) { case llvm::Triple::x86: GCCLibPath += ("i386-" + T.getVendorName() + "-" + T.getOSName()).str() + "/4.5.2/"; break; case llvm::Triple::x86_64: GCCLibPath += ("i386-" + T.getVendorName() + "-" + T.getOSName()).str(); GCCLibPath += "/4.5.2/amd64/"; LibPath += "amd64/"; break; default: llvm_unreachable("Unsupported architecture"); } ArgStringList CmdArgs; // Demangle C++ names in errors CmdArgs.push_back("-C"); if ((!Args.hasArg(options::OPT_nostdlib)) && (!Args.hasArg(options::OPT_shared))) { CmdArgs.push_back("-e"); CmdArgs.push_back("_start"); } if (Args.hasArg(options::OPT_static)) { CmdArgs.push_back("-Bstatic"); CmdArgs.push_back("-dn"); } else { CmdArgs.push_back("-Bdynamic"); if (Args.hasArg(options::OPT_shared)) { CmdArgs.push_back("-shared"); } else { CmdArgs.push_back("--dynamic-linker"); CmdArgs.push_back(Args.MakeArgString(LibPath + "ld.so.1")); } } if (Output.isFilename()) { CmdArgs.push_back("-o"); CmdArgs.push_back(Output.getFilename()); } else { assert(Output.isNothing() && "Invalid output."); } if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nostartfiles)) { if (!Args.hasArg(options::OPT_shared)) { CmdArgs.push_back(Args.MakeArgString(LibPath + "crt1.o")); CmdArgs.push_back(Args.MakeArgString(LibPath + "crti.o")); CmdArgs.push_back(Args.MakeArgString(LibPath + "values-Xa.o")); CmdArgs.push_back(Args.MakeArgString(GCCLibPath + "crtbegin.o")); } else { CmdArgs.push_back(Args.MakeArgString(LibPath + "crti.o")); CmdArgs.push_back(Args.MakeArgString(LibPath + "values-Xa.o")); CmdArgs.push_back(Args.MakeArgString(GCCLibPath + "crtbegin.o")); } if (getToolChain().getDriver().CCCIsCXX()) CmdArgs.push_back(Args.MakeArgString(LibPath + "cxa_finalize.o")); } CmdArgs.push_back(Args.MakeArgString("-L" + GCCLibPath)); Args.AddAllArgs(CmdArgs, options::OPT_L); Args.AddAllArgs(CmdArgs, options::OPT_T_Group); Args.AddAllArgs(CmdArgs, options::OPT_e); Args.AddAllArgs(CmdArgs, options::OPT_r); AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs); if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nodefaultlibs)) { if (getToolChain().getDriver().CCCIsCXX()) getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs); CmdArgs.push_back("-lgcc_s"); if (!Args.hasArg(options::OPT_shared)) { CmdArgs.push_back("-lgcc"); CmdArgs.push_back("-lc"); CmdArgs.push_back("-lm"); } } if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nostartfiles)) { CmdArgs.push_back(Args.MakeArgString(GCCLibPath + "crtend.o")); } CmdArgs.push_back(Args.MakeArgString(LibPath + "crtn.o")); addProfileRT(getToolChain(), Args, CmdArgs); const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath()); C.addCommand(llvm::make_unique(JA, *this, Exec, CmdArgs)); } void openbsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { claimNoWarnArgs(Args); ArgStringList CmdArgs; bool NeedsKPIC = false; switch (getToolChain().getArch()) { case llvm::Triple::x86: // When building 32-bit code on OpenBSD/amd64, we have to explicitly // instruct as in the base system to assemble 32-bit code. CmdArgs.push_back("--32"); break; case llvm::Triple::ppc: CmdArgs.push_back("-mppc"); CmdArgs.push_back("-many"); break; case llvm::Triple::sparc: case llvm::Triple::sparcel: CmdArgs.push_back("-32"); NeedsKPIC = true; break; case llvm::Triple::sparcv9: CmdArgs.push_back("-64"); CmdArgs.push_back("-Av9a"); NeedsKPIC = true; break; case llvm::Triple::mips64: case llvm::Triple::mips64el: { StringRef CPUName; StringRef ABIName; mips::getMipsCPUAndABI(Args, getToolChain().getTriple(), CPUName, ABIName); CmdArgs.push_back("-mabi"); CmdArgs.push_back(getGnuCompatibleMipsABIName(ABIName).data()); if (getToolChain().getArch() == llvm::Triple::mips64) CmdArgs.push_back("-EB"); else CmdArgs.push_back("-EL"); NeedsKPIC = true; break; } default: break; } if (NeedsKPIC) addAssemblerKPIC(Args, CmdArgs); Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler); CmdArgs.push_back("-o"); CmdArgs.push_back(Output.getFilename()); for (const auto &II : Inputs) CmdArgs.push_back(II.getFilename()); const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as")); C.addCommand(llvm::make_unique(JA, *this, Exec, CmdArgs)); } void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { const Driver &D = getToolChain().getDriver(); ArgStringList CmdArgs; // Silence warning for "clang -g foo.o -o foo" Args.ClaimAllArgs(options::OPT_g_Group); // and "clang -emit-llvm foo.o -o foo" Args.ClaimAllArgs(options::OPT_emit_llvm); // and for "clang -w foo.o -o foo". Other warning options are already // handled somewhere else. Args.ClaimAllArgs(options::OPT_w); if (getToolChain().getArch() == llvm::Triple::mips64) CmdArgs.push_back("-EB"); else if (getToolChain().getArch() == llvm::Triple::mips64el) CmdArgs.push_back("-EL"); if ((!Args.hasArg(options::OPT_nostdlib)) && (!Args.hasArg(options::OPT_shared))) { CmdArgs.push_back("-e"); CmdArgs.push_back("__start"); } if (Args.hasArg(options::OPT_static)) { CmdArgs.push_back("-Bstatic"); } else { if (Args.hasArg(options::OPT_rdynamic)) CmdArgs.push_back("-export-dynamic"); CmdArgs.push_back("--eh-frame-hdr"); CmdArgs.push_back("-Bdynamic"); if (Args.hasArg(options::OPT_shared)) { CmdArgs.push_back("-shared"); } else { CmdArgs.push_back("-dynamic-linker"); CmdArgs.push_back("/usr/libexec/ld.so"); } } if (Args.hasArg(options::OPT_nopie)) CmdArgs.push_back("-nopie"); if (Output.isFilename()) { CmdArgs.push_back("-o"); CmdArgs.push_back(Output.getFilename()); } else { assert(Output.isNothing() && "Invalid output."); } if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nostartfiles)) { if (!Args.hasArg(options::OPT_shared)) { if (Args.hasArg(options::OPT_pg)) CmdArgs.push_back( Args.MakeArgString(getToolChain().GetFilePath("gcrt0.o"))); else CmdArgs.push_back( Args.MakeArgString(getToolChain().GetFilePath("crt0.o"))); CmdArgs.push_back( Args.MakeArgString(getToolChain().GetFilePath("crtbegin.o"))); } else { CmdArgs.push_back( Args.MakeArgString(getToolChain().GetFilePath("crtbeginS.o"))); } } std::string Triple = getToolChain().getTripleString(); if (Triple.substr(0, 6) == "x86_64") Triple.replace(0, 6, "amd64"); CmdArgs.push_back( Args.MakeArgString("-L/usr/lib/gcc-lib/" + Triple + "/4.2.1")); Args.AddAllArgs(CmdArgs, options::OPT_L); Args.AddAllArgs(CmdArgs, options::OPT_T_Group); Args.AddAllArgs(CmdArgs, options::OPT_e); Args.AddAllArgs(CmdArgs, options::OPT_s); Args.AddAllArgs(CmdArgs, options::OPT_t); Args.AddAllArgs(CmdArgs, options::OPT_Z_Flag); Args.AddAllArgs(CmdArgs, options::OPT_r); AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs); if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nodefaultlibs)) { if (D.CCCIsCXX()) { getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs); if (Args.hasArg(options::OPT_pg)) CmdArgs.push_back("-lm_p"); else CmdArgs.push_back("-lm"); } // FIXME: For some reason GCC passes -lgcc before adding // the default system libraries. Just mimic this for now. CmdArgs.push_back("-lgcc"); if (Args.hasArg(options::OPT_pthread)) { if (!Args.hasArg(options::OPT_shared) && Args.hasArg(options::OPT_pg)) CmdArgs.push_back("-lpthread_p"); else CmdArgs.push_back("-lpthread"); } if (!Args.hasArg(options::OPT_shared)) { if (Args.hasArg(options::OPT_pg)) CmdArgs.push_back("-lc_p"); else CmdArgs.push_back("-lc"); } CmdArgs.push_back("-lgcc"); } if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nostartfiles)) { if (!Args.hasArg(options::OPT_shared)) CmdArgs.push_back( Args.MakeArgString(getToolChain().GetFilePath("crtend.o"))); else CmdArgs.push_back( Args.MakeArgString(getToolChain().GetFilePath("crtendS.o"))); } const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath()); C.addCommand(llvm::make_unique(JA, *this, Exec, CmdArgs)); } void bitrig::Assembler::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { claimNoWarnArgs(Args); ArgStringList CmdArgs; Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler); CmdArgs.push_back("-o"); CmdArgs.push_back(Output.getFilename()); for (const auto &II : Inputs) CmdArgs.push_back(II.getFilename()); const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as")); C.addCommand(llvm::make_unique(JA, *this, Exec, CmdArgs)); } void bitrig::Linker::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { const Driver &D = getToolChain().getDriver(); ArgStringList CmdArgs; if ((!Args.hasArg(options::OPT_nostdlib)) && (!Args.hasArg(options::OPT_shared))) { CmdArgs.push_back("-e"); CmdArgs.push_back("__start"); } if (Args.hasArg(options::OPT_static)) { CmdArgs.push_back("-Bstatic"); } else { if (Args.hasArg(options::OPT_rdynamic)) CmdArgs.push_back("-export-dynamic"); CmdArgs.push_back("--eh-frame-hdr"); CmdArgs.push_back("-Bdynamic"); if (Args.hasArg(options::OPT_shared)) { CmdArgs.push_back("-shared"); } else { CmdArgs.push_back("-dynamic-linker"); CmdArgs.push_back("/usr/libexec/ld.so"); } } if (Output.isFilename()) { CmdArgs.push_back("-o"); CmdArgs.push_back(Output.getFilename()); } else { assert(Output.isNothing() && "Invalid output."); } if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nostartfiles)) { if (!Args.hasArg(options::OPT_shared)) { if (Args.hasArg(options::OPT_pg)) CmdArgs.push_back( Args.MakeArgString(getToolChain().GetFilePath("gcrt0.o"))); else CmdArgs.push_back( Args.MakeArgString(getToolChain().GetFilePath("crt0.o"))); CmdArgs.push_back( Args.MakeArgString(getToolChain().GetFilePath("crtbegin.o"))); } else { CmdArgs.push_back( Args.MakeArgString(getToolChain().GetFilePath("crtbeginS.o"))); } } Args.AddAllArgs(CmdArgs, options::OPT_L); Args.AddAllArgs(CmdArgs, options::OPT_T_Group); Args.AddAllArgs(CmdArgs, options::OPT_e); AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs); if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nodefaultlibs)) { if (D.CCCIsCXX()) { getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs); if (Args.hasArg(options::OPT_pg)) CmdArgs.push_back("-lm_p"); else CmdArgs.push_back("-lm"); } if (Args.hasArg(options::OPT_pthread)) { if (!Args.hasArg(options::OPT_shared) && Args.hasArg(options::OPT_pg)) CmdArgs.push_back("-lpthread_p"); else CmdArgs.push_back("-lpthread"); } if (!Args.hasArg(options::OPT_shared)) { if (Args.hasArg(options::OPT_pg)) CmdArgs.push_back("-lc_p"); else CmdArgs.push_back("-lc"); } StringRef MyArch; switch (getToolChain().getArch()) { case llvm::Triple::arm: MyArch = "arm"; break; case llvm::Triple::x86: MyArch = "i386"; break; case llvm::Triple::x86_64: MyArch = "amd64"; break; default: llvm_unreachable("Unsupported architecture"); } CmdArgs.push_back(Args.MakeArgString("-lclang_rt." + MyArch)); } if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nostartfiles)) { if (!Args.hasArg(options::OPT_shared)) CmdArgs.push_back( Args.MakeArgString(getToolChain().GetFilePath("crtend.o"))); else CmdArgs.push_back( Args.MakeArgString(getToolChain().GetFilePath("crtendS.o"))); } const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath()); C.addCommand(llvm::make_unique(JA, *this, Exec, CmdArgs)); } void freebsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { claimNoWarnArgs(Args); ArgStringList CmdArgs; // When building 32-bit code on FreeBSD/amd64, we have to explicitly // instruct as in the base system to assemble 32-bit code. if (getToolChain().getArch() == llvm::Triple::x86) CmdArgs.push_back("--32"); else if (getToolChain().getArch() == llvm::Triple::ppc) CmdArgs.push_back("-a32"); else if (getToolChain().getArch() == llvm::Triple::mips || getToolChain().getArch() == llvm::Triple::mipsel || getToolChain().getArch() == llvm::Triple::mips64 || getToolChain().getArch() == llvm::Triple::mips64el) { StringRef CPUName; StringRef ABIName; mips::getMipsCPUAndABI(Args, getToolChain().getTriple(), CPUName, ABIName); CmdArgs.push_back("-march"); CmdArgs.push_back(CPUName.data()); CmdArgs.push_back("-mabi"); CmdArgs.push_back(getGnuCompatibleMipsABIName(ABIName).data()); if (getToolChain().getArch() == llvm::Triple::mips || getToolChain().getArch() == llvm::Triple::mips64) CmdArgs.push_back("-EB"); else CmdArgs.push_back("-EL"); addAssemblerKPIC(Args, CmdArgs); } else if (getToolChain().getArch() == llvm::Triple::arm || getToolChain().getArch() == llvm::Triple::armeb || getToolChain().getArch() == llvm::Triple::thumb || getToolChain().getArch() == llvm::Triple::thumbeb) { const Driver &D = getToolChain().getDriver(); const llvm::Triple &Triple = getToolChain().getTriple(); StringRef FloatABI = arm::getARMFloatABI(D, Args, Triple); if (FloatABI == "hard") { CmdArgs.push_back("-mfpu=vfp"); } else { CmdArgs.push_back("-mfpu=softvfp"); } switch (getToolChain().getTriple().getEnvironment()) { case llvm::Triple::GNUEABIHF: case llvm::Triple::GNUEABI: case llvm::Triple::EABI: CmdArgs.push_back("-meabi=5"); break; default: CmdArgs.push_back("-matpcs"); } } else if (getToolChain().getArch() == llvm::Triple::sparc || getToolChain().getArch() == llvm::Triple::sparcel || getToolChain().getArch() == llvm::Triple::sparcv9) { if (getToolChain().getArch() == llvm::Triple::sparc) CmdArgs.push_back("-Av8plusa"); else CmdArgs.push_back("-Av9a"); addAssemblerKPIC(Args, CmdArgs); } Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler); CmdArgs.push_back("-o"); CmdArgs.push_back(Output.getFilename()); for (const auto &II : Inputs) CmdArgs.push_back(II.getFilename()); const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as")); C.addCommand(llvm::make_unique(JA, *this, Exec, CmdArgs)); } void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { const toolchains::FreeBSD &ToolChain = static_cast(getToolChain()); const Driver &D = ToolChain.getDriver(); const llvm::Triple::ArchType Arch = ToolChain.getArch(); const bool IsPIE = !Args.hasArg(options::OPT_shared) && (Args.hasArg(options::OPT_pie) || ToolChain.isPIEDefault()); ArgStringList CmdArgs; // Silence warning for "clang -g foo.o -o foo" Args.ClaimAllArgs(options::OPT_g_Group); // and "clang -emit-llvm foo.o -o foo" Args.ClaimAllArgs(options::OPT_emit_llvm); // and for "clang -w foo.o -o foo". Other warning options are already // handled somewhere else. Args.ClaimAllArgs(options::OPT_w); if (!D.SysRoot.empty()) CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot)); if (IsPIE) CmdArgs.push_back("-pie"); if (Args.hasArg(options::OPT_static)) { CmdArgs.push_back("-Bstatic"); } else { if (Args.hasArg(options::OPT_rdynamic)) CmdArgs.push_back("-export-dynamic"); CmdArgs.push_back("--eh-frame-hdr"); if (Args.hasArg(options::OPT_shared)) { CmdArgs.push_back("-Bshareable"); } else { CmdArgs.push_back("-dynamic-linker"); CmdArgs.push_back("/libexec/ld-elf.so.1"); } if (ToolChain.getTriple().getOSMajorVersion() >= 9) { if (Arch == llvm::Triple::arm || Arch == llvm::Triple::sparc || Arch == llvm::Triple::x86 || Arch == llvm::Triple::x86_64) { CmdArgs.push_back("--hash-style=both"); } } CmdArgs.push_back("--enable-new-dtags"); } // When building 32-bit code on FreeBSD/amd64, we have to explicitly // instruct ld in the base system to link 32-bit code. if (Arch == llvm::Triple::x86) { CmdArgs.push_back("-m"); CmdArgs.push_back("elf_i386_fbsd"); } if (Arch == llvm::Triple::ppc) { CmdArgs.push_back("-m"); CmdArgs.push_back("elf32ppc_fbsd"); } if (Output.isFilename()) { CmdArgs.push_back("-o"); CmdArgs.push_back(Output.getFilename()); } else { assert(Output.isNothing() && "Invalid output."); } if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nostartfiles)) { const char *crt1 = nullptr; if (!Args.hasArg(options::OPT_shared)) { if (Args.hasArg(options::OPT_pg)) crt1 = "gcrt1.o"; else if (IsPIE) crt1 = "Scrt1.o"; else crt1 = "crt1.o"; } if (crt1) CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crt1))); CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crti.o"))); const char *crtbegin = nullptr; if (Args.hasArg(options::OPT_static)) crtbegin = "crtbeginT.o"; else if (Args.hasArg(options::OPT_shared) || IsPIE) crtbegin = "crtbeginS.o"; else crtbegin = "crtbegin.o"; CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtbegin))); } Args.AddAllArgs(CmdArgs, options::OPT_L); const ToolChain::path_list &Paths = ToolChain.getFilePaths(); for (const auto &Path : Paths) CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + Path)); Args.AddAllArgs(CmdArgs, options::OPT_T_Group); Args.AddAllArgs(CmdArgs, options::OPT_e); Args.AddAllArgs(CmdArgs, options::OPT_s); Args.AddAllArgs(CmdArgs, options::OPT_t); Args.AddAllArgs(CmdArgs, options::OPT_Z_Flag); Args.AddAllArgs(CmdArgs, options::OPT_r); if (D.IsUsingLTO(Args)) AddGoldPlugin(ToolChain, Args, CmdArgs); bool NeedsSanitizerDeps = addSanitizerRuntimes(ToolChain, Args, CmdArgs); AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs); if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nodefaultlibs)) { + addOpenMPRuntime(CmdArgs, ToolChain, Args); if (D.CCCIsCXX()) { ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs); if (Args.hasArg(options::OPT_pg)) CmdArgs.push_back("-lm_p"); else CmdArgs.push_back("-lm"); } if (NeedsSanitizerDeps) linkSanitizerRuntimeDeps(ToolChain, CmdArgs); // FIXME: For some reason GCC passes -lgcc and -lgcc_s before adding // the default system libraries. Just mimic this for now. if (Args.hasArg(options::OPT_pg)) CmdArgs.push_back("-lgcc_p"); else CmdArgs.push_back("-lgcc"); if (Args.hasArg(options::OPT_static)) { CmdArgs.push_back("-lgcc_eh"); } else if (Args.hasArg(options::OPT_pg)) { CmdArgs.push_back("-lgcc_eh_p"); } else { CmdArgs.push_back("--as-needed"); CmdArgs.push_back("-lgcc_s"); CmdArgs.push_back("--no-as-needed"); } if (Args.hasArg(options::OPT_pthread)) { if (Args.hasArg(options::OPT_pg)) CmdArgs.push_back("-lpthread_p"); else CmdArgs.push_back("-lpthread"); } if (Args.hasArg(options::OPT_pg)) { if (Args.hasArg(options::OPT_shared)) CmdArgs.push_back("-lc"); else CmdArgs.push_back("-lc_p"); CmdArgs.push_back("-lgcc_p"); } else { CmdArgs.push_back("-lc"); CmdArgs.push_back("-lgcc"); } if (Args.hasArg(options::OPT_static)) { CmdArgs.push_back("-lgcc_eh"); } else if (Args.hasArg(options::OPT_pg)) { CmdArgs.push_back("-lgcc_eh_p"); } else { CmdArgs.push_back("--as-needed"); CmdArgs.push_back("-lgcc_s"); CmdArgs.push_back("--no-as-needed"); } } if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nostartfiles)) { if (Args.hasArg(options::OPT_shared) || IsPIE) CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtendS.o"))); else CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtend.o"))); CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtn.o"))); } addProfileRT(ToolChain, Args, CmdArgs); const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath()); C.addCommand(llvm::make_unique(JA, *this, Exec, CmdArgs)); } void netbsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { claimNoWarnArgs(Args); ArgStringList CmdArgs; // GNU as needs different flags for creating the correct output format // on architectures with different ABIs or optional feature sets. switch (getToolChain().getArch()) { case llvm::Triple::x86: CmdArgs.push_back("--32"); break; case llvm::Triple::arm: case llvm::Triple::armeb: case llvm::Triple::thumb: case llvm::Triple::thumbeb: { StringRef MArch, MCPU; getARMArchCPUFromArgs(Args, MArch, MCPU, /*FromAs*/ true); std::string Arch = arm::getARMTargetCPU(MCPU, MArch, getToolChain().getTriple()); CmdArgs.push_back(Args.MakeArgString("-mcpu=" + Arch)); break; } case llvm::Triple::mips: case llvm::Triple::mipsel: case llvm::Triple::mips64: case llvm::Triple::mips64el: { StringRef CPUName; StringRef ABIName; mips::getMipsCPUAndABI(Args, getToolChain().getTriple(), CPUName, ABIName); CmdArgs.push_back("-march"); CmdArgs.push_back(CPUName.data()); CmdArgs.push_back("-mabi"); CmdArgs.push_back(getGnuCompatibleMipsABIName(ABIName).data()); if (getToolChain().getArch() == llvm::Triple::mips || getToolChain().getArch() == llvm::Triple::mips64) CmdArgs.push_back("-EB"); else CmdArgs.push_back("-EL"); addAssemblerKPIC(Args, CmdArgs); break; } case llvm::Triple::sparc: case llvm::Triple::sparcel: CmdArgs.push_back("-32"); addAssemblerKPIC(Args, CmdArgs); break; case llvm::Triple::sparcv9: CmdArgs.push_back("-64"); CmdArgs.push_back("-Av9"); addAssemblerKPIC(Args, CmdArgs); break; default: break; } Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler); CmdArgs.push_back("-o"); CmdArgs.push_back(Output.getFilename()); for (const auto &II : Inputs) CmdArgs.push_back(II.getFilename()); const char *Exec = Args.MakeArgString((getToolChain().GetProgramPath("as"))); C.addCommand(llvm::make_unique(JA, *this, Exec, CmdArgs)); } void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { const Driver &D = getToolChain().getDriver(); ArgStringList CmdArgs; if (!D.SysRoot.empty()) CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot)); CmdArgs.push_back("--eh-frame-hdr"); if (Args.hasArg(options::OPT_static)) { CmdArgs.push_back("-Bstatic"); } else { if (Args.hasArg(options::OPT_rdynamic)) CmdArgs.push_back("-export-dynamic"); if (Args.hasArg(options::OPT_shared)) { CmdArgs.push_back("-Bshareable"); } else { CmdArgs.push_back("-dynamic-linker"); CmdArgs.push_back("/libexec/ld.elf_so"); } } // Many NetBSD architectures support more than one ABI. // Determine the correct emulation for ld. switch (getToolChain().getArch()) { case llvm::Triple::x86: CmdArgs.push_back("-m"); CmdArgs.push_back("elf_i386"); break; case llvm::Triple::arm: case llvm::Triple::thumb: CmdArgs.push_back("-m"); switch (getToolChain().getTriple().getEnvironment()) { case llvm::Triple::EABI: case llvm::Triple::GNUEABI: CmdArgs.push_back("armelf_nbsd_eabi"); break; case llvm::Triple::EABIHF: case llvm::Triple::GNUEABIHF: CmdArgs.push_back("armelf_nbsd_eabihf"); break; default: CmdArgs.push_back("armelf_nbsd"); break; } break; case llvm::Triple::armeb: case llvm::Triple::thumbeb: arm::appendEBLinkFlags( Args, CmdArgs, llvm::Triple(getToolChain().ComputeEffectiveClangTriple(Args))); CmdArgs.push_back("-m"); switch (getToolChain().getTriple().getEnvironment()) { case llvm::Triple::EABI: case llvm::Triple::GNUEABI: CmdArgs.push_back("armelfb_nbsd_eabi"); break; case llvm::Triple::EABIHF: case llvm::Triple::GNUEABIHF: CmdArgs.push_back("armelfb_nbsd_eabihf"); break; default: CmdArgs.push_back("armelfb_nbsd"); break; } break; case llvm::Triple::mips64: case llvm::Triple::mips64el: if (mips::hasMipsAbiArg(Args, "32")) { CmdArgs.push_back("-m"); if (getToolChain().getArch() == llvm::Triple::mips64) CmdArgs.push_back("elf32btsmip"); else CmdArgs.push_back("elf32ltsmip"); } else if (mips::hasMipsAbiArg(Args, "64")) { CmdArgs.push_back("-m"); if (getToolChain().getArch() == llvm::Triple::mips64) CmdArgs.push_back("elf64btsmip"); else CmdArgs.push_back("elf64ltsmip"); } break; case llvm::Triple::ppc: CmdArgs.push_back("-m"); CmdArgs.push_back("elf32ppc_nbsd"); break; case llvm::Triple::ppc64: case llvm::Triple::ppc64le: CmdArgs.push_back("-m"); CmdArgs.push_back("elf64ppc"); break; case llvm::Triple::sparc: CmdArgs.push_back("-m"); CmdArgs.push_back("elf32_sparc"); break; case llvm::Triple::sparcv9: CmdArgs.push_back("-m"); CmdArgs.push_back("elf64_sparc"); break; default: break; } if (Output.isFilename()) { CmdArgs.push_back("-o"); CmdArgs.push_back(Output.getFilename()); } else { assert(Output.isNothing() && "Invalid output."); } if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nostartfiles)) { if (!Args.hasArg(options::OPT_shared)) { CmdArgs.push_back( Args.MakeArgString(getToolChain().GetFilePath("crt0.o"))); CmdArgs.push_back( Args.MakeArgString(getToolChain().GetFilePath("crti.o"))); CmdArgs.push_back( Args.MakeArgString(getToolChain().GetFilePath("crtbegin.o"))); } else { CmdArgs.push_back( Args.MakeArgString(getToolChain().GetFilePath("crti.o"))); CmdArgs.push_back( Args.MakeArgString(getToolChain().GetFilePath("crtbeginS.o"))); } } Args.AddAllArgs(CmdArgs, options::OPT_L); Args.AddAllArgs(CmdArgs, options::OPT_T_Group); Args.AddAllArgs(CmdArgs, options::OPT_e); Args.AddAllArgs(CmdArgs, options::OPT_s); Args.AddAllArgs(CmdArgs, options::OPT_t); Args.AddAllArgs(CmdArgs, options::OPT_Z_Flag); Args.AddAllArgs(CmdArgs, options::OPT_r); AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs); unsigned Major, Minor, Micro; getToolChain().getTriple().getOSVersion(Major, Minor, Micro); bool useLibgcc = true; if (Major >= 7 || (Major == 6 && Minor == 99 && Micro >= 49) || Major == 0) { switch (getToolChain().getArch()) { case llvm::Triple::aarch64: case llvm::Triple::arm: case llvm::Triple::armeb: case llvm::Triple::thumb: case llvm::Triple::thumbeb: case llvm::Triple::ppc: case llvm::Triple::ppc64: case llvm::Triple::ppc64le: case llvm::Triple::x86: case llvm::Triple::x86_64: useLibgcc = false; break; default: break; } } if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nodefaultlibs)) { + addOpenMPRuntime(CmdArgs, getToolChain(), Args); if (D.CCCIsCXX()) { getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs); CmdArgs.push_back("-lm"); } if (Args.hasArg(options::OPT_pthread)) CmdArgs.push_back("-lpthread"); CmdArgs.push_back("-lc"); if (useLibgcc) { if (Args.hasArg(options::OPT_static)) { // libgcc_eh depends on libc, so resolve as much as possible, // pull in any new requirements from libc and then get the rest // of libgcc. CmdArgs.push_back("-lgcc_eh"); CmdArgs.push_back("-lc"); CmdArgs.push_back("-lgcc"); } else { CmdArgs.push_back("-lgcc"); CmdArgs.push_back("--as-needed"); CmdArgs.push_back("-lgcc_s"); CmdArgs.push_back("--no-as-needed"); } } } if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nostartfiles)) { if (!Args.hasArg(options::OPT_shared)) CmdArgs.push_back( Args.MakeArgString(getToolChain().GetFilePath("crtend.o"))); else CmdArgs.push_back( Args.MakeArgString(getToolChain().GetFilePath("crtendS.o"))); CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crtn.o"))); } addProfileRT(getToolChain(), Args, CmdArgs); const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath()); C.addCommand(llvm::make_unique(JA, *this, Exec, CmdArgs)); } void gnutools::Assembler::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { claimNoWarnArgs(Args); ArgStringList CmdArgs; bool NeedsKPIC = false; switch (getToolChain().getArch()) { default: break; // Add --32/--64 to make sure we get the format we want. // This is incomplete case llvm::Triple::x86: CmdArgs.push_back("--32"); break; case llvm::Triple::x86_64: if (getToolChain().getTriple().getEnvironment() == llvm::Triple::GNUX32) CmdArgs.push_back("--x32"); else CmdArgs.push_back("--64"); break; case llvm::Triple::ppc: CmdArgs.push_back("-a32"); CmdArgs.push_back("-mppc"); CmdArgs.push_back("-many"); break; case llvm::Triple::ppc64: CmdArgs.push_back("-a64"); CmdArgs.push_back("-mppc64"); CmdArgs.push_back("-many"); break; case llvm::Triple::ppc64le: CmdArgs.push_back("-a64"); CmdArgs.push_back("-mppc64"); CmdArgs.push_back("-many"); CmdArgs.push_back("-mlittle-endian"); break; case llvm::Triple::sparc: case llvm::Triple::sparcel: CmdArgs.push_back("-32"); CmdArgs.push_back("-Av8plusa"); NeedsKPIC = true; break; case llvm::Triple::sparcv9: CmdArgs.push_back("-64"); CmdArgs.push_back("-Av9a"); NeedsKPIC = true; break; case llvm::Triple::arm: case llvm::Triple::armeb: case llvm::Triple::thumb: case llvm::Triple::thumbeb: { const llvm::Triple &Triple = getToolChain().getTriple(); switch (Triple.getSubArch()) { case llvm::Triple::ARMSubArch_v7: CmdArgs.push_back("-mfpu=neon"); break; case llvm::Triple::ARMSubArch_v8: CmdArgs.push_back("-mfpu=crypto-neon-fp-armv8"); break; default: break; } StringRef ARMFloatABI = tools::arm::getARMFloatABI( getToolChain().getDriver(), Args, llvm::Triple(getToolChain().ComputeEffectiveClangTriple(Args))); CmdArgs.push_back(Args.MakeArgString("-mfloat-abi=" + ARMFloatABI)); Args.AddLastArg(CmdArgs, options::OPT_march_EQ); // FIXME: remove krait check when GNU tools support krait cpu // for now replace it with -march=armv7-a to avoid a lower // march from being picked in the absence of a cpu flag. Arg *A; if ((A = Args.getLastArg(options::OPT_mcpu_EQ)) && StringRef(A->getValue()).lower() == "krait") CmdArgs.push_back("-march=armv7-a"); else Args.AddLastArg(CmdArgs, options::OPT_mcpu_EQ); Args.AddLastArg(CmdArgs, options::OPT_mfpu_EQ); break; } case llvm::Triple::mips: case llvm::Triple::mipsel: case llvm::Triple::mips64: case llvm::Triple::mips64el: { StringRef CPUName; StringRef ABIName; mips::getMipsCPUAndABI(Args, getToolChain().getTriple(), CPUName, ABIName); ABIName = getGnuCompatibleMipsABIName(ABIName); CmdArgs.push_back("-march"); CmdArgs.push_back(CPUName.data()); CmdArgs.push_back("-mabi"); CmdArgs.push_back(ABIName.data()); // -mno-shared should be emitted unless -fpic, -fpie, -fPIC, -fPIE, // or -mshared (not implemented) is in effect. bool IsPicOrPie = false; if (Arg *A = Args.getLastArg(options::OPT_fPIC, options::OPT_fno_PIC, options::OPT_fpic, options::OPT_fno_pic, options::OPT_fPIE, options::OPT_fno_PIE, options::OPT_fpie, options::OPT_fno_pie)) { if (A->getOption().matches(options::OPT_fPIC) || A->getOption().matches(options::OPT_fpic) || A->getOption().matches(options::OPT_fPIE) || A->getOption().matches(options::OPT_fpie)) IsPicOrPie = true; } if (!IsPicOrPie) CmdArgs.push_back("-mno-shared"); // LLVM doesn't support -mplt yet and acts as if it is always given. // However, -mplt has no effect with the N64 ABI. CmdArgs.push_back(ABIName == "64" ? "-KPIC" : "-call_nonpic"); if (getToolChain().getArch() == llvm::Triple::mips || getToolChain().getArch() == llvm::Triple::mips64) CmdArgs.push_back("-EB"); else CmdArgs.push_back("-EL"); if (Arg *A = Args.getLastArg(options::OPT_mnan_EQ)) { if (StringRef(A->getValue()) == "2008") CmdArgs.push_back(Args.MakeArgString("-mnan=2008")); } // Add the last -mfp32/-mfpxx/-mfp64 or -mfpxx if it is enabled by default. StringRef MIPSFloatABI = getMipsFloatABI(getToolChain().getDriver(), Args); if (Arg *A = Args.getLastArg(options::OPT_mfp32, options::OPT_mfpxx, options::OPT_mfp64)) { A->claim(); A->render(Args, CmdArgs); } else if (mips::shouldUseFPXX(Args, getToolChain().getTriple(), CPUName, ABIName, MIPSFloatABI)) CmdArgs.push_back("-mfpxx"); // Pass on -mmips16 or -mno-mips16. However, the assembler equivalent of // -mno-mips16 is actually -no-mips16. if (Arg *A = Args.getLastArg(options::OPT_mips16, options::OPT_mno_mips16)) { if (A->getOption().matches(options::OPT_mips16)) { A->claim(); A->render(Args, CmdArgs); } else { A->claim(); CmdArgs.push_back("-no-mips16"); } } Args.AddLastArg(CmdArgs, options::OPT_mmicromips, options::OPT_mno_micromips); Args.AddLastArg(CmdArgs, options::OPT_mdsp, options::OPT_mno_dsp); Args.AddLastArg(CmdArgs, options::OPT_mdspr2, options::OPT_mno_dspr2); if (Arg *A = Args.getLastArg(options::OPT_mmsa, options::OPT_mno_msa)) { // Do not use AddLastArg because not all versions of MIPS assembler // support -mmsa / -mno-msa options. if (A->getOption().matches(options::OPT_mmsa)) CmdArgs.push_back(Args.MakeArgString("-mmsa")); } Args.AddLastArg(CmdArgs, options::OPT_mhard_float, options::OPT_msoft_float); Args.AddLastArg(CmdArgs, options::OPT_mdouble_float, options::OPT_msingle_float); Args.AddLastArg(CmdArgs, options::OPT_modd_spreg, options::OPT_mno_odd_spreg); NeedsKPIC = true; break; } case llvm::Triple::systemz: { // Always pass an -march option, since our default of z10 is later // than the GNU assembler's default. StringRef CPUName = getSystemZTargetCPU(Args); CmdArgs.push_back(Args.MakeArgString("-march=" + CPUName)); break; } } if (NeedsKPIC) addAssemblerKPIC(Args, CmdArgs); Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler); CmdArgs.push_back("-o"); CmdArgs.push_back(Output.getFilename()); for (const auto &II : Inputs) CmdArgs.push_back(II.getFilename()); const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as")); C.addCommand(llvm::make_unique(JA, *this, Exec, CmdArgs)); // Handle the debug info splitting at object creation time if we're // creating an object. // TODO: Currently only works on linux with newer objcopy. if (Args.hasArg(options::OPT_gsplit_dwarf) && getToolChain().getTriple().isOSLinux()) SplitDebugInfo(getToolChain(), C, *this, JA, Args, Output, SplitDebugName(Args, Inputs[0])); } static void AddLibgcc(const llvm::Triple &Triple, const Driver &D, ArgStringList &CmdArgs, const ArgList &Args) { bool isAndroid = Triple.getEnvironment() == llvm::Triple::Android; bool isCygMing = Triple.isOSCygMing(); bool StaticLibgcc = Args.hasArg(options::OPT_static_libgcc) || Args.hasArg(options::OPT_static); if (!D.CCCIsCXX()) CmdArgs.push_back("-lgcc"); if (StaticLibgcc || isAndroid) { if (D.CCCIsCXX()) CmdArgs.push_back("-lgcc"); } else { if (!D.CCCIsCXX() && !isCygMing) CmdArgs.push_back("--as-needed"); CmdArgs.push_back("-lgcc_s"); if (!D.CCCIsCXX() && !isCygMing) CmdArgs.push_back("--no-as-needed"); } if (StaticLibgcc && !isAndroid) CmdArgs.push_back("-lgcc_eh"); else if (!Args.hasArg(options::OPT_shared) && D.CCCIsCXX()) CmdArgs.push_back("-lgcc"); // According to Android ABI, we have to link with libdl if we are // linking with non-static libgcc. // // NOTE: This fixes a link error on Android MIPS as well. The non-static // libgcc for MIPS relies on _Unwind_Find_FDE and dl_iterate_phdr from libdl. if (isAndroid && !StaticLibgcc) CmdArgs.push_back("-ldl"); } static std::string getLinuxDynamicLinker(const ArgList &Args, const toolchains::Linux &ToolChain) { const llvm::Triple::ArchType Arch = ToolChain.getArch(); if (ToolChain.getTriple().getEnvironment() == llvm::Triple::Android) { if (ToolChain.getTriple().isArch64Bit()) return "/system/bin/linker64"; else return "/system/bin/linker"; } else if (Arch == llvm::Triple::x86 || Arch == llvm::Triple::sparc || Arch == llvm::Triple::sparcel) return "/lib/ld-linux.so.2"; else if (Arch == llvm::Triple::aarch64) return "/lib/ld-linux-aarch64.so.1"; else if (Arch == llvm::Triple::aarch64_be) return "/lib/ld-linux-aarch64_be.so.1"; else if (Arch == llvm::Triple::arm || Arch == llvm::Triple::thumb) { if (ToolChain.getTriple().getEnvironment() == llvm::Triple::GNUEABIHF || tools::arm::getARMFloatABI(ToolChain.getDriver(), Args, ToolChain.getTriple()) == "hard") return "/lib/ld-linux-armhf.so.3"; else return "/lib/ld-linux.so.3"; } else if (Arch == llvm::Triple::armeb || Arch == llvm::Triple::thumbeb) { // TODO: check which dynamic linker name. if (ToolChain.getTriple().getEnvironment() == llvm::Triple::GNUEABIHF || tools::arm::getARMFloatABI(ToolChain.getDriver(), Args, ToolChain.getTriple()) == "hard") return "/lib/ld-linux-armhf.so.3"; else return "/lib/ld-linux.so.3"; } else if (Arch == llvm::Triple::mips || Arch == llvm::Triple::mipsel || Arch == llvm::Triple::mips64 || Arch == llvm::Triple::mips64el) { StringRef CPUName; StringRef ABIName; mips::getMipsCPUAndABI(Args, ToolChain.getTriple(), CPUName, ABIName); bool IsNaN2008 = mips::isNaN2008(Args, ToolChain.getTriple()); StringRef LibDir = llvm::StringSwitch(ABIName) .Case("o32", "/lib") .Case("n32", "/lib32") .Case("n64", "/lib64") .Default("/lib"); StringRef LibName; if (mips::isUCLibc(Args)) LibName = IsNaN2008 ? "ld-uClibc-mipsn8.so.0" : "ld-uClibc.so.0"; else LibName = IsNaN2008 ? "ld-linux-mipsn8.so.1" : "ld.so.1"; return (LibDir + "/" + LibName).str(); } else if (Arch == llvm::Triple::ppc) return "/lib/ld.so.1"; else if (Arch == llvm::Triple::ppc64) { if (ppc::hasPPCAbiArg(Args, "elfv2")) return "/lib64/ld64.so.2"; return "/lib64/ld64.so.1"; } else if (Arch == llvm::Triple::ppc64le) { if (ppc::hasPPCAbiArg(Args, "elfv1")) return "/lib64/ld64.so.1"; return "/lib64/ld64.so.2"; } else if (Arch == llvm::Triple::systemz) return "/lib64/ld64.so.1"; else if (Arch == llvm::Triple::sparcv9) return "/lib64/ld-linux.so.2"; else if (Arch == llvm::Triple::x86_64 && ToolChain.getTriple().getEnvironment() == llvm::Triple::GNUX32) return "/libx32/ld-linux-x32.so.2"; else return "/lib64/ld-linux-x86-64.so.2"; } static void AddRunTimeLibs(const ToolChain &TC, const Driver &D, ArgStringList &CmdArgs, const ArgList &Args) { // Make use of compiler-rt if --rtlib option is used ToolChain::RuntimeLibType RLT = TC.GetRuntimeLibType(Args); switch (RLT) { case ToolChain::RLT_CompilerRT: switch (TC.getTriple().getOS()) { default: llvm_unreachable("unsupported OS"); case llvm::Triple::Win32: case llvm::Triple::Linux: addClangRT(TC, Args, CmdArgs); break; } break; case ToolChain::RLT_Libgcc: AddLibgcc(TC.getTriple(), D, CmdArgs, Args); break; } } static const char *getLDMOption(const llvm::Triple &T, const ArgList &Args) { switch (T.getArch()) { case llvm::Triple::x86: return "elf_i386"; case llvm::Triple::aarch64: return "aarch64linux"; case llvm::Triple::aarch64_be: return "aarch64_be_linux"; case llvm::Triple::arm: case llvm::Triple::thumb: return "armelf_linux_eabi"; case llvm::Triple::armeb: case llvm::Triple::thumbeb: return "armebelf_linux_eabi"; /* TODO: check which NAME. */ case llvm::Triple::ppc: return "elf32ppclinux"; case llvm::Triple::ppc64: return "elf64ppc"; case llvm::Triple::ppc64le: return "elf64lppc"; case llvm::Triple::sparc: case llvm::Triple::sparcel: return "elf32_sparc"; case llvm::Triple::sparcv9: return "elf64_sparc"; case llvm::Triple::mips: return "elf32btsmip"; case llvm::Triple::mipsel: return "elf32ltsmip"; case llvm::Triple::mips64: if (mips::hasMipsAbiArg(Args, "n32")) return "elf32btsmipn32"; return "elf64btsmip"; case llvm::Triple::mips64el: if (mips::hasMipsAbiArg(Args, "n32")) return "elf32ltsmipn32"; return "elf64ltsmip"; case llvm::Triple::systemz: return "elf64_s390"; case llvm::Triple::x86_64: if (T.getEnvironment() == llvm::Triple::GNUX32) return "elf32_x86_64"; return "elf_x86_64"; default: llvm_unreachable("Unexpected arch"); } } void gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { const toolchains::Linux &ToolChain = static_cast(getToolChain()); const Driver &D = ToolChain.getDriver(); const llvm::Triple::ArchType Arch = ToolChain.getArch(); const bool isAndroid = ToolChain.getTriple().getEnvironment() == llvm::Triple::Android; const bool IsPIE = !Args.hasArg(options::OPT_shared) && !Args.hasArg(options::OPT_static) && (Args.hasArg(options::OPT_pie) || ToolChain.isPIEDefault()); ArgStringList CmdArgs; // Silence warning for "clang -g foo.o -o foo" Args.ClaimAllArgs(options::OPT_g_Group); // and "clang -emit-llvm foo.o -o foo" Args.ClaimAllArgs(options::OPT_emit_llvm); // and for "clang -w foo.o -o foo". Other warning options are already // handled somewhere else. Args.ClaimAllArgs(options::OPT_w); if (!D.SysRoot.empty()) CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot)); if (IsPIE) CmdArgs.push_back("-pie"); if (Args.hasArg(options::OPT_rdynamic)) CmdArgs.push_back("-export-dynamic"); if (Args.hasArg(options::OPT_s)) CmdArgs.push_back("-s"); if (Arch == llvm::Triple::armeb || Arch == llvm::Triple::thumbeb) arm::appendEBLinkFlags( Args, CmdArgs, llvm::Triple(getToolChain().ComputeEffectiveClangTriple(Args))); for (const auto &Opt : ToolChain.ExtraOpts) CmdArgs.push_back(Opt.c_str()); if (!Args.hasArg(options::OPT_static)) { CmdArgs.push_back("--eh-frame-hdr"); } CmdArgs.push_back("-m"); CmdArgs.push_back(getLDMOption(ToolChain.getTriple(), Args)); if (Args.hasArg(options::OPT_static)) { if (Arch == llvm::Triple::arm || Arch == llvm::Triple::armeb || Arch == llvm::Triple::thumb || Arch == llvm::Triple::thumbeb) CmdArgs.push_back("-Bstatic"); else CmdArgs.push_back("-static"); } else if (Args.hasArg(options::OPT_shared)) { CmdArgs.push_back("-shared"); } if (Arch == llvm::Triple::arm || Arch == llvm::Triple::armeb || Arch == llvm::Triple::thumb || Arch == llvm::Triple::thumbeb || (!Args.hasArg(options::OPT_static) && !Args.hasArg(options::OPT_shared))) { CmdArgs.push_back("-dynamic-linker"); CmdArgs.push_back(Args.MakeArgString( D.DyldPrefix + getLinuxDynamicLinker(Args, ToolChain))); } CmdArgs.push_back("-o"); CmdArgs.push_back(Output.getFilename()); if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nostartfiles)) { if (!isAndroid) { const char *crt1 = nullptr; if (!Args.hasArg(options::OPT_shared)) { if (Args.hasArg(options::OPT_pg)) crt1 = "gcrt1.o"; else if (IsPIE) crt1 = "Scrt1.o"; else crt1 = "crt1.o"; } if (crt1) CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crt1))); CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crti.o"))); } const char *crtbegin; if (Args.hasArg(options::OPT_static)) crtbegin = isAndroid ? "crtbegin_static.o" : "crtbeginT.o"; else if (Args.hasArg(options::OPT_shared)) crtbegin = isAndroid ? "crtbegin_so.o" : "crtbeginS.o"; else if (IsPIE) crtbegin = isAndroid ? "crtbegin_dynamic.o" : "crtbeginS.o"; else crtbegin = isAndroid ? "crtbegin_dynamic.o" : "crtbegin.o"; CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtbegin))); // Add crtfastmath.o if available and fast math is enabled. ToolChain.AddFastMathRuntimeIfAvailable(Args, CmdArgs); } Args.AddAllArgs(CmdArgs, options::OPT_L); Args.AddAllArgs(CmdArgs, options::OPT_u); const ToolChain::path_list &Paths = ToolChain.getFilePaths(); for (const auto &Path : Paths) CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + Path)); if (D.IsUsingLTO(Args)) AddGoldPlugin(ToolChain, Args, CmdArgs); if (Args.hasArg(options::OPT_Z_Xlinker__no_demangle)) CmdArgs.push_back("--no-demangle"); bool NeedsSanitizerDeps = addSanitizerRuntimes(ToolChain, Args, CmdArgs); AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs); // The profile runtime also needs access to system libraries. addProfileRT(getToolChain(), Args, CmdArgs); if (D.CCCIsCXX() && !Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nodefaultlibs)) { bool OnlyLibstdcxxStatic = Args.hasArg(options::OPT_static_libstdcxx) && !Args.hasArg(options::OPT_static); if (OnlyLibstdcxxStatic) CmdArgs.push_back("-Bstatic"); ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs); if (OnlyLibstdcxxStatic) CmdArgs.push_back("-Bdynamic"); CmdArgs.push_back("-lm"); } // Silence warnings when linking C code with a C++ '-stdlib' argument. Args.ClaimAllArgs(options::OPT_stdlib_EQ); if (!Args.hasArg(options::OPT_nostdlib)) { if (!Args.hasArg(options::OPT_nodefaultlibs)) { if (Args.hasArg(options::OPT_static)) CmdArgs.push_back("--start-group"); if (NeedsSanitizerDeps) linkSanitizerRuntimeDeps(ToolChain, CmdArgs); bool WantPthread = Args.hasArg(options::OPT_pthread) || Args.hasArg(options::OPT_pthreads); if (Args.hasFlag(options::OPT_fopenmp, options::OPT_fopenmp_EQ, options::OPT_fno_openmp, false)) { // OpenMP runtimes implies pthreads when using the GNU toolchain. // FIXME: Does this really make sense for all GNU toolchains? WantPthread = true; // Also link the particular OpenMP runtimes. switch (getOpenMPRuntime(ToolChain, Args)) { case OMPRT_OMP: CmdArgs.push_back("-lomp"); break; case OMPRT_GOMP: CmdArgs.push_back("-lgomp"); // FIXME: Exclude this for platforms with libgomp that don't require // librt. Most modern Linux platforms require it, but some may not. CmdArgs.push_back("-lrt"); break; case OMPRT_IOMP5: CmdArgs.push_back("-liomp5"); break; case OMPRT_Unknown: // Already diagnosed. break; } } AddRunTimeLibs(ToolChain, D, CmdArgs, Args); if (WantPthread && !isAndroid) CmdArgs.push_back("-lpthread"); CmdArgs.push_back("-lc"); if (Args.hasArg(options::OPT_static)) CmdArgs.push_back("--end-group"); else AddRunTimeLibs(ToolChain, D, CmdArgs, Args); } if (!Args.hasArg(options::OPT_nostartfiles)) { const char *crtend; if (Args.hasArg(options::OPT_shared)) crtend = isAndroid ? "crtend_so.o" : "crtendS.o"; else if (IsPIE) crtend = isAndroid ? "crtend_android.o" : "crtendS.o"; else crtend = isAndroid ? "crtend_android.o" : "crtend.o"; CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtend))); if (!isAndroid) CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtn.o"))); } } C.addCommand( llvm::make_unique(JA, *this, ToolChain.Linker.c_str(), CmdArgs)); } // NaCl ARM assembly (inline or standalone) can be written with a set of macros // for the various SFI requirements like register masking. The assembly tool // inserts the file containing the macros as an input into all the assembly // jobs. void nacltools::AssemblerARM::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { const toolchains::NaCl_TC &ToolChain = static_cast(getToolChain()); InputInfo NaClMacros(ToolChain.GetNaClArmMacrosPath(), types::TY_PP_Asm, "nacl-arm-macros.s"); InputInfoList NewInputs; NewInputs.push_back(NaClMacros); NewInputs.append(Inputs.begin(), Inputs.end()); gnutools::Assembler::ConstructJob(C, JA, Output, NewInputs, Args, LinkingOutput); } // This is quite similar to gnutools::Linker::ConstructJob with changes that // we use static by default, do not yet support sanitizers or LTO, and a few // others. Eventually we can support more of that and hopefully migrate back // to gnutools::Linker. void nacltools::Linker::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { const toolchains::NaCl_TC &ToolChain = static_cast(getToolChain()); const Driver &D = ToolChain.getDriver(); const llvm::Triple::ArchType Arch = ToolChain.getArch(); const bool IsStatic = !Args.hasArg(options::OPT_dynamic) && !Args.hasArg(options::OPT_shared); ArgStringList CmdArgs; // Silence warning for "clang -g foo.o -o foo" Args.ClaimAllArgs(options::OPT_g_Group); // and "clang -emit-llvm foo.o -o foo" Args.ClaimAllArgs(options::OPT_emit_llvm); // and for "clang -w foo.o -o foo". Other warning options are already // handled somewhere else. Args.ClaimAllArgs(options::OPT_w); if (!D.SysRoot.empty()) CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot)); if (Args.hasArg(options::OPT_rdynamic)) CmdArgs.push_back("-export-dynamic"); if (Args.hasArg(options::OPT_s)) CmdArgs.push_back("-s"); // NaCl_TC doesn't have ExtraOpts like Linux; the only relevant flag from // there is --build-id, which we do want. CmdArgs.push_back("--build-id"); if (!IsStatic) CmdArgs.push_back("--eh-frame-hdr"); CmdArgs.push_back("-m"); if (Arch == llvm::Triple::x86) CmdArgs.push_back("elf_i386_nacl"); else if (Arch == llvm::Triple::arm) CmdArgs.push_back("armelf_nacl"); else if (Arch == llvm::Triple::x86_64) CmdArgs.push_back("elf_x86_64_nacl"); else if (Arch == llvm::Triple::mipsel) CmdArgs.push_back("mipselelf_nacl"); else D.Diag(diag::err_target_unsupported_arch) << ToolChain.getArchName() << "Native Client"; if (IsStatic) CmdArgs.push_back("-static"); else if (Args.hasArg(options::OPT_shared)) CmdArgs.push_back("-shared"); CmdArgs.push_back("-o"); CmdArgs.push_back(Output.getFilename()); if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nostartfiles)) { if (!Args.hasArg(options::OPT_shared)) CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crt1.o"))); CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crti.o"))); const char *crtbegin; if (IsStatic) crtbegin = "crtbeginT.o"; else if (Args.hasArg(options::OPT_shared)) crtbegin = "crtbeginS.o"; else crtbegin = "crtbegin.o"; CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtbegin))); } Args.AddAllArgs(CmdArgs, options::OPT_L); Args.AddAllArgs(CmdArgs, options::OPT_u); const ToolChain::path_list &Paths = ToolChain.getFilePaths(); for (const auto &Path : Paths) CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + Path)); if (Args.hasArg(options::OPT_Z_Xlinker__no_demangle)) CmdArgs.push_back("--no-demangle"); AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs); if (D.CCCIsCXX() && !Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nodefaultlibs)) { bool OnlyLibstdcxxStatic = Args.hasArg(options::OPT_static_libstdcxx) && !IsStatic; if (OnlyLibstdcxxStatic) CmdArgs.push_back("-Bstatic"); ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs); if (OnlyLibstdcxxStatic) CmdArgs.push_back("-Bdynamic"); CmdArgs.push_back("-lm"); } if (!Args.hasArg(options::OPT_nostdlib)) { if (!Args.hasArg(options::OPT_nodefaultlibs)) { // Always use groups, since it has no effect on dynamic libraries. CmdArgs.push_back("--start-group"); CmdArgs.push_back("-lc"); // NaCl's libc++ currently requires libpthread, so just always include it // in the group for C++. if (Args.hasArg(options::OPT_pthread) || Args.hasArg(options::OPT_pthreads) || D.CCCIsCXX()) { // Gold, used by Mips, handles nested groups differently than ld, and // without '-lnacl' it prefers symbols from libpthread.a over libnacl.a, // which is not a desired behaviour here. // See https://sourceware.org/ml/binutils/2015-03/msg00034.html if (getToolChain().getArch() == llvm::Triple::mipsel) CmdArgs.push_back("-lnacl"); CmdArgs.push_back("-lpthread"); } CmdArgs.push_back("-lgcc"); CmdArgs.push_back("--as-needed"); if (IsStatic) CmdArgs.push_back("-lgcc_eh"); else CmdArgs.push_back("-lgcc_s"); CmdArgs.push_back("--no-as-needed"); // Mips needs to create and use pnacl_legacy library that contains // definitions from bitcode/pnaclmm.c and definitions for // __nacl_tp_tls_offset() and __nacl_tp_tdb_offset(). if (getToolChain().getArch() == llvm::Triple::mipsel) CmdArgs.push_back("-lpnacl_legacy"); CmdArgs.push_back("--end-group"); } if (!Args.hasArg(options::OPT_nostartfiles)) { const char *crtend; if (Args.hasArg(options::OPT_shared)) crtend = "crtendS.o"; else crtend = "crtend.o"; CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtend))); CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtn.o"))); } } C.addCommand( llvm::make_unique(JA, *this, ToolChain.Linker.c_str(), CmdArgs)); } void minix::Assembler::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { claimNoWarnArgs(Args); ArgStringList CmdArgs; Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler); CmdArgs.push_back("-o"); CmdArgs.push_back(Output.getFilename()); for (const auto &II : Inputs) CmdArgs.push_back(II.getFilename()); const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as")); C.addCommand(llvm::make_unique(JA, *this, Exec, CmdArgs)); } void minix::Linker::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { const Driver &D = getToolChain().getDriver(); ArgStringList CmdArgs; if (Output.isFilename()) { CmdArgs.push_back("-o"); CmdArgs.push_back(Output.getFilename()); } else { assert(Output.isNothing() && "Invalid output."); } if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nostartfiles)) { CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crt1.o"))); CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crti.o"))); CmdArgs.push_back( Args.MakeArgString(getToolChain().GetFilePath("crtbegin.o"))); CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crtn.o"))); } Args.AddAllArgs(CmdArgs, options::OPT_L); Args.AddAllArgs(CmdArgs, options::OPT_T_Group); Args.AddAllArgs(CmdArgs, options::OPT_e); AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs); addProfileRT(getToolChain(), Args, CmdArgs); if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nodefaultlibs)) { if (D.CCCIsCXX()) { getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs); CmdArgs.push_back("-lm"); } } if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nostartfiles)) { if (Args.hasArg(options::OPT_pthread)) CmdArgs.push_back("-lpthread"); CmdArgs.push_back("-lc"); CmdArgs.push_back("-lCompilerRT-Generic"); CmdArgs.push_back("-L/usr/pkg/compiler-rt/lib"); CmdArgs.push_back( Args.MakeArgString(getToolChain().GetFilePath("crtend.o"))); } const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath()); C.addCommand(llvm::make_unique(JA, *this, Exec, CmdArgs)); } /// DragonFly Tools // For now, DragonFly Assemble does just about the same as for // FreeBSD, but this may change soon. void dragonfly::Assembler::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { claimNoWarnArgs(Args); ArgStringList CmdArgs; // When building 32-bit code on DragonFly/pc64, we have to explicitly // instruct as in the base system to assemble 32-bit code. if (getToolChain().getArch() == llvm::Triple::x86) CmdArgs.push_back("--32"); Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler); CmdArgs.push_back("-o"); CmdArgs.push_back(Output.getFilename()); for (const auto &II : Inputs) CmdArgs.push_back(II.getFilename()); const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as")); C.addCommand(llvm::make_unique(JA, *this, Exec, CmdArgs)); } void dragonfly::Linker::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { const Driver &D = getToolChain().getDriver(); ArgStringList CmdArgs; bool UseGCC47 = llvm::sys::fs::exists("/usr/lib/gcc47"); if (!D.SysRoot.empty()) CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot)); CmdArgs.push_back("--eh-frame-hdr"); if (Args.hasArg(options::OPT_static)) { CmdArgs.push_back("-Bstatic"); } else { if (Args.hasArg(options::OPT_rdynamic)) CmdArgs.push_back("-export-dynamic"); if (Args.hasArg(options::OPT_shared)) CmdArgs.push_back("-Bshareable"); else { CmdArgs.push_back("-dynamic-linker"); CmdArgs.push_back("/usr/libexec/ld-elf.so.2"); } CmdArgs.push_back("--hash-style=both"); } // When building 32-bit code on DragonFly/pc64, we have to explicitly // instruct ld in the base system to link 32-bit code. if (getToolChain().getArch() == llvm::Triple::x86) { CmdArgs.push_back("-m"); CmdArgs.push_back("elf_i386"); } if (Output.isFilename()) { CmdArgs.push_back("-o"); CmdArgs.push_back(Output.getFilename()); } else { assert(Output.isNothing() && "Invalid output."); } if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nostartfiles)) { if (!Args.hasArg(options::OPT_shared)) { if (Args.hasArg(options::OPT_pg)) CmdArgs.push_back( Args.MakeArgString(getToolChain().GetFilePath("gcrt1.o"))); else { if (Args.hasArg(options::OPT_pie)) CmdArgs.push_back( Args.MakeArgString(getToolChain().GetFilePath("Scrt1.o"))); else CmdArgs.push_back( Args.MakeArgString(getToolChain().GetFilePath("crt1.o"))); } } CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crti.o"))); if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_pie)) CmdArgs.push_back( Args.MakeArgString(getToolChain().GetFilePath("crtbeginS.o"))); else CmdArgs.push_back( Args.MakeArgString(getToolChain().GetFilePath("crtbegin.o"))); } Args.AddAllArgs(CmdArgs, options::OPT_L); Args.AddAllArgs(CmdArgs, options::OPT_T_Group); Args.AddAllArgs(CmdArgs, options::OPT_e); AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs); if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nodefaultlibs)) { // FIXME: GCC passes on -lgcc, -lgcc_pic and a whole lot of // rpaths if (UseGCC47) CmdArgs.push_back("-L/usr/lib/gcc47"); else CmdArgs.push_back("-L/usr/lib/gcc44"); if (!Args.hasArg(options::OPT_static)) { if (UseGCC47) { CmdArgs.push_back("-rpath"); CmdArgs.push_back("/usr/lib/gcc47"); } else { CmdArgs.push_back("-rpath"); CmdArgs.push_back("/usr/lib/gcc44"); } } if (D.CCCIsCXX()) { getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs); CmdArgs.push_back("-lm"); } if (Args.hasArg(options::OPT_pthread)) CmdArgs.push_back("-lpthread"); if (!Args.hasArg(options::OPT_nolibc)) { CmdArgs.push_back("-lc"); } if (UseGCC47) { if (Args.hasArg(options::OPT_static) || Args.hasArg(options::OPT_static_libgcc)) { CmdArgs.push_back("-lgcc"); CmdArgs.push_back("-lgcc_eh"); } else { if (Args.hasArg(options::OPT_shared_libgcc)) { CmdArgs.push_back("-lgcc_pic"); if (!Args.hasArg(options::OPT_shared)) CmdArgs.push_back("-lgcc"); } else { CmdArgs.push_back("-lgcc"); CmdArgs.push_back("--as-needed"); CmdArgs.push_back("-lgcc_pic"); CmdArgs.push_back("--no-as-needed"); } } } else { if (Args.hasArg(options::OPT_shared)) { CmdArgs.push_back("-lgcc_pic"); } else { CmdArgs.push_back("-lgcc"); } } } if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nostartfiles)) { if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_pie)) CmdArgs.push_back( Args.MakeArgString(getToolChain().GetFilePath("crtendS.o"))); else CmdArgs.push_back( Args.MakeArgString(getToolChain().GetFilePath("crtend.o"))); CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crtn.o"))); } addProfileRT(getToolChain(), Args, CmdArgs); const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath()); C.addCommand(llvm::make_unique(JA, *this, Exec, CmdArgs)); } // Try to find Exe from a Visual Studio distribution. This first tries to find // an installed copy of Visual Studio and, failing that, looks in the PATH, // making sure that whatever executable that's found is not a same-named exe // from clang itself to prevent clang from falling back to itself. static std::string FindVisualStudioExecutable(const ToolChain &TC, const char *Exe, const char *ClangProgramPath) { const auto &MSVC = static_cast(TC); std::string visualStudioBinDir; if (MSVC.getVisualStudioBinariesFolder(ClangProgramPath, visualStudioBinDir)) { SmallString<128> FilePath(visualStudioBinDir); llvm::sys::path::append(FilePath, Exe); if (llvm::sys::fs::can_execute(FilePath.c_str())) return FilePath.str(); } return Exe; } void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { ArgStringList CmdArgs; const ToolChain &TC = getToolChain(); assert((Output.isFilename() || Output.isNothing()) && "invalid output"); if (Output.isFilename()) CmdArgs.push_back( Args.MakeArgString(std::string("-out:") + Output.getFilename())); if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nostartfiles) && !C.getDriver().IsCLMode()) CmdArgs.push_back("-defaultlib:libcmt"); if (!llvm::sys::Process::GetEnv("LIB")) { // If the VC environment hasn't been configured (perhaps because the user // did not run vcvarsall), try to build a consistent link environment. If // the environment variable is set however, assume the user knows what // they're doing. std::string VisualStudioDir; const auto &MSVC = static_cast(TC); if (MSVC.getVisualStudioInstallDir(VisualStudioDir)) { SmallString<128> LibDir(VisualStudioDir); llvm::sys::path::append(LibDir, "VC", "lib"); switch (MSVC.getArch()) { case llvm::Triple::x86: // x86 just puts the libraries directly in lib break; case llvm::Triple::x86_64: llvm::sys::path::append(LibDir, "amd64"); break; case llvm::Triple::arm: llvm::sys::path::append(LibDir, "arm"); break; default: break; } CmdArgs.push_back( Args.MakeArgString(std::string("-libpath:") + LibDir.c_str())); } std::string WindowsSdkLibPath; if (MSVC.getWindowsSDKLibraryPath(WindowsSdkLibPath)) CmdArgs.push_back(Args.MakeArgString(std::string("-libpath:") + WindowsSdkLibPath.c_str())); } CmdArgs.push_back("-nologo"); if (Args.hasArg(options::OPT_g_Group)) CmdArgs.push_back("-debug"); bool DLL = Args.hasArg(options::OPT__SLASH_LD, options::OPT__SLASH_LDd, options::OPT_shared); if (DLL) { CmdArgs.push_back(Args.MakeArgString("-dll")); SmallString<128> ImplibName(Output.getFilename()); llvm::sys::path::replace_extension(ImplibName, "lib"); CmdArgs.push_back(Args.MakeArgString(std::string("-implib:") + ImplibName)); } if (TC.getSanitizerArgs().needsAsanRt()) { CmdArgs.push_back(Args.MakeArgString("-debug")); CmdArgs.push_back(Args.MakeArgString("-incremental:no")); if (Args.hasArg(options::OPT__SLASH_MD, options::OPT__SLASH_MDd)) { static const char *CompilerRTComponents[] = { "asan_dynamic", "asan_dynamic_runtime_thunk", }; for (const auto &Component : CompilerRTComponents) CmdArgs.push_back(Args.MakeArgString(getCompilerRT(TC, Component))); // Make sure the dynamic runtime thunk is not optimized out at link time // to ensure proper SEH handling. CmdArgs.push_back(Args.MakeArgString("-include:___asan_seh_interceptor")); } else if (DLL) { CmdArgs.push_back( Args.MakeArgString(getCompilerRT(TC, "asan_dll_thunk"))); } else { static const char *CompilerRTComponents[] = { "asan", "asan_cxx", }; for (const auto &Component : CompilerRTComponents) CmdArgs.push_back(Args.MakeArgString(getCompilerRT(TC, Component))); } } Args.AddAllArgValues(CmdArgs, options::OPT__SLASH_link); // Add filenames, libraries, and other linker inputs. for (const auto &Input : Inputs) { if (Input.isFilename()) { CmdArgs.push_back(Input.getFilename()); continue; } const Arg &A = Input.getInputArg(); // Render -l options differently for the MSVC linker. if (A.getOption().matches(options::OPT_l)) { StringRef Lib = A.getValue(); const char *LinkLibArg; if (Lib.endswith(".lib")) LinkLibArg = Args.MakeArgString(Lib); else LinkLibArg = Args.MakeArgString(Lib + ".lib"); CmdArgs.push_back(LinkLibArg); continue; } // Otherwise, this is some other kind of linker input option like -Wl, -z, // or -L. Render it, even if MSVC doesn't understand it. A.renderAsInput(Args, CmdArgs); } // We need to special case some linker paths. In the case of lld, we need to // translate 'lld' into 'lld-link', and in the case of the regular msvc // linker, we need to use a special search algorithm. llvm::SmallString<128> linkPath; StringRef Linker = Args.getLastArgValue(options::OPT_fuse_ld_EQ, "link"); if (Linker.equals_lower("lld")) Linker = "lld-link"; if (Linker.equals_lower("link")) { // If we're using the MSVC linker, it's not sufficient to just use link // from the program PATH, because other environments like GnuWin32 install // their own link.exe which may come first. linkPath = FindVisualStudioExecutable(TC, "link.exe", C.getDriver().getClangProgramPath()); } else { linkPath = Linker; llvm::sys::path::replace_extension(linkPath, "exe"); linkPath = TC.GetProgramPath(linkPath.c_str()); } const char *Exec = Args.MakeArgString(linkPath); C.addCommand(llvm::make_unique(JA, *this, Exec, CmdArgs)); } void visualstudio::Compiler::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { C.addCommand(GetCommand(C, JA, Output, Inputs, Args, LinkingOutput)); } std::unique_ptr visualstudio::Compiler::GetCommand( Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { ArgStringList CmdArgs; CmdArgs.push_back("/nologo"); CmdArgs.push_back("/c"); // Compile only. CmdArgs.push_back("/W0"); // No warnings. // The goal is to be able to invoke this tool correctly based on // any flag accepted by clang-cl. // These are spelled the same way in clang and cl.exe,. Args.AddAllArgs(CmdArgs, options::OPT_D, options::OPT_U); Args.AddAllArgs(CmdArgs, options::OPT_I); // Optimization level. if (Arg *A = Args.getLastArg(options::OPT_O, options::OPT_O0)) { if (A->getOption().getID() == options::OPT_O0) { CmdArgs.push_back("/Od"); } else { StringRef OptLevel = A->getValue(); if (OptLevel == "1" || OptLevel == "2" || OptLevel == "s") A->render(Args, CmdArgs); else if (OptLevel == "3") CmdArgs.push_back("/Ox"); } } // Flags for which clang-cl has an alias. // FIXME: How can we ensure this stays in sync with relevant clang-cl options? if (Args.hasFlag(options::OPT__SLASH_GR_, options::OPT__SLASH_GR, /*default=*/false)) CmdArgs.push_back("/GR-"); if (Arg *A = Args.getLastArg(options::OPT_ffunction_sections, options::OPT_fno_function_sections)) CmdArgs.push_back(A->getOption().getID() == options::OPT_ffunction_sections ? "/Gy" : "/Gy-"); if (Arg *A = Args.getLastArg(options::OPT_fdata_sections, options::OPT_fno_data_sections)) CmdArgs.push_back( A->getOption().getID() == options::OPT_fdata_sections ? "/Gw" : "/Gw-"); if (Args.hasArg(options::OPT_fsyntax_only)) CmdArgs.push_back("/Zs"); if (Args.hasArg(options::OPT_g_Flag, options::OPT_gline_tables_only)) CmdArgs.push_back("/Z7"); std::vector Includes = Args.getAllArgValues(options::OPT_include); for (const auto &Include : Includes) CmdArgs.push_back(Args.MakeArgString(std::string("/FI") + Include)); // Flags that can simply be passed through. Args.AddAllArgs(CmdArgs, options::OPT__SLASH_LD); Args.AddAllArgs(CmdArgs, options::OPT__SLASH_LDd); Args.AddAllArgs(CmdArgs, options::OPT__SLASH_EH); // The order of these flags is relevant, so pick the last one. if (Arg *A = Args.getLastArg(options::OPT__SLASH_MD, options::OPT__SLASH_MDd, options::OPT__SLASH_MT, options::OPT__SLASH_MTd)) A->render(Args, CmdArgs); // Input filename. assert(Inputs.size() == 1); const InputInfo &II = Inputs[0]; assert(II.getType() == types::TY_C || II.getType() == types::TY_CXX); CmdArgs.push_back(II.getType() == types::TY_C ? "/Tc" : "/Tp"); if (II.isFilename()) CmdArgs.push_back(II.getFilename()); else II.getInputArg().renderAsInput(Args, CmdArgs); // Output filename. assert(Output.getType() == types::TY_Object); const char *Fo = Args.MakeArgString(std::string("/Fo") + Output.getFilename()); CmdArgs.push_back(Fo); const Driver &D = getToolChain().getDriver(); std::string Exec = FindVisualStudioExecutable(getToolChain(), "cl.exe", D.getClangProgramPath()); return llvm::make_unique(JA, *this, Args.MakeArgString(Exec), CmdArgs); } /// MinGW Tools void MinGW::Assembler::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { claimNoWarnArgs(Args); ArgStringList CmdArgs; if (getToolChain().getArch() == llvm::Triple::x86) { CmdArgs.push_back("--32"); } else if (getToolChain().getArch() == llvm::Triple::x86_64) { CmdArgs.push_back("--64"); } Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler); CmdArgs.push_back("-o"); CmdArgs.push_back(Output.getFilename()); for (const auto &II : Inputs) CmdArgs.push_back(II.getFilename()); const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as")); C.addCommand(llvm::make_unique(JA, *this, Exec, CmdArgs)); if (Args.hasArg(options::OPT_gsplit_dwarf)) SplitDebugInfo(getToolChain(), C, *this, JA, Args, Output, SplitDebugName(Args, Inputs[0])); } void MinGW::Linker::AddLibGCC(const ArgList &Args, ArgStringList &CmdArgs) const { if (Args.hasArg(options::OPT_mthreads)) CmdArgs.push_back("-lmingwthrd"); CmdArgs.push_back("-lmingw32"); // Add libgcc or compiler-rt. AddRunTimeLibs(getToolChain(), getToolChain().getDriver(), CmdArgs, Args); CmdArgs.push_back("-lmoldname"); CmdArgs.push_back("-lmingwex"); CmdArgs.push_back("-lmsvcrt"); } void MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { const ToolChain &TC = getToolChain(); const Driver &D = TC.getDriver(); // const SanitizerArgs &Sanitize = TC.getSanitizerArgs(); ArgStringList CmdArgs; // Silence warning for "clang -g foo.o -o foo" Args.ClaimAllArgs(options::OPT_g_Group); // and "clang -emit-llvm foo.o -o foo" Args.ClaimAllArgs(options::OPT_emit_llvm); // and for "clang -w foo.o -o foo". Other warning options are already // handled somewhere else. Args.ClaimAllArgs(options::OPT_w); StringRef LinkerName = Args.getLastArgValue(options::OPT_fuse_ld_EQ, "ld"); if (LinkerName.equals_lower("lld")) { CmdArgs.push_back("-flavor"); CmdArgs.push_back("gnu"); } if (!D.SysRoot.empty()) CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot)); if (Args.hasArg(options::OPT_s)) CmdArgs.push_back("-s"); CmdArgs.push_back("-m"); if (TC.getArch() == llvm::Triple::x86) CmdArgs.push_back("i386pe"); if (TC.getArch() == llvm::Triple::x86_64) CmdArgs.push_back("i386pep"); if (TC.getArch() == llvm::Triple::arm) CmdArgs.push_back("thumb2pe"); if (Args.hasArg(options::OPT_mwindows)) { CmdArgs.push_back("--subsystem"); CmdArgs.push_back("windows"); } else if (Args.hasArg(options::OPT_mconsole)) { CmdArgs.push_back("--subsystem"); CmdArgs.push_back("console"); } if (Args.hasArg(options::OPT_static)) CmdArgs.push_back("-Bstatic"); else { if (Args.hasArg(options::OPT_mdll)) CmdArgs.push_back("--dll"); else if (Args.hasArg(options::OPT_shared)) CmdArgs.push_back("--shared"); CmdArgs.push_back("-Bdynamic"); if (Args.hasArg(options::OPT_mdll) || Args.hasArg(options::OPT_shared)) { CmdArgs.push_back("-e"); if (TC.getArch() == llvm::Triple::x86) CmdArgs.push_back("_DllMainCRTStartup@12"); else CmdArgs.push_back("DllMainCRTStartup"); CmdArgs.push_back("--enable-auto-image-base"); } } CmdArgs.push_back("-o"); CmdArgs.push_back(Output.getFilename()); Args.AddAllArgs(CmdArgs, options::OPT_e); // FIXME: add -N, -n flags Args.AddLastArg(CmdArgs, options::OPT_r); Args.AddLastArg(CmdArgs, options::OPT_s); Args.AddLastArg(CmdArgs, options::OPT_t); Args.AddAllArgs(CmdArgs, options::OPT_u_Group); Args.AddLastArg(CmdArgs, options::OPT_Z_Flag); if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nostartfiles)) { if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_mdll)) { CmdArgs.push_back(Args.MakeArgString(TC.GetFilePath("dllcrt2.o"))); } else { if (Args.hasArg(options::OPT_municode)) CmdArgs.push_back(Args.MakeArgString(TC.GetFilePath("crt2u.o"))); else CmdArgs.push_back(Args.MakeArgString(TC.GetFilePath("crt2.o"))); } if (Args.hasArg(options::OPT_pg)) CmdArgs.push_back(Args.MakeArgString(TC.GetFilePath("gcrt2.o"))); CmdArgs.push_back(Args.MakeArgString(TC.GetFilePath("crtbegin.o"))); } Args.AddAllArgs(CmdArgs, options::OPT_L); const ToolChain::path_list Paths = TC.getFilePaths(); for (const auto &Path : Paths) CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + Path)); AddLinkerInputs(TC, Inputs, Args, CmdArgs); // TODO: Add ASan stuff here // TODO: Add profile stuff here if (D.CCCIsCXX() && !Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nodefaultlibs)) { bool OnlyLibstdcxxStatic = Args.hasArg(options::OPT_static_libstdcxx) && !Args.hasArg(options::OPT_static); if (OnlyLibstdcxxStatic) CmdArgs.push_back("-Bstatic"); TC.AddCXXStdlibLibArgs(Args, CmdArgs); if (OnlyLibstdcxxStatic) CmdArgs.push_back("-Bdynamic"); } if (!Args.hasArg(options::OPT_nostdlib)) { if (!Args.hasArg(options::OPT_nodefaultlibs)) { if (Args.hasArg(options::OPT_static)) CmdArgs.push_back("--start-group"); if (Args.hasArg(options::OPT_fstack_protector) || Args.hasArg(options::OPT_fstack_protector_strong) || Args.hasArg(options::OPT_fstack_protector_all)) { CmdArgs.push_back("-lssp_nonshared"); CmdArgs.push_back("-lssp"); } if (Args.hasArg(options::OPT_fopenmp)) CmdArgs.push_back("-lgomp"); AddLibGCC(Args, CmdArgs); if (Args.hasArg(options::OPT_pg)) CmdArgs.push_back("-lgmon"); if (Args.hasArg(options::OPT_pthread)) CmdArgs.push_back("-lpthread"); // add system libraries if (Args.hasArg(options::OPT_mwindows)) { CmdArgs.push_back("-lgdi32"); CmdArgs.push_back("-lcomdlg32"); } CmdArgs.push_back("-ladvapi32"); CmdArgs.push_back("-lshell32"); CmdArgs.push_back("-luser32"); CmdArgs.push_back("-lkernel32"); if (Args.hasArg(options::OPT_static)) CmdArgs.push_back("--end-group"); else if (!LinkerName.equals_lower("lld")) AddLibGCC(Args, CmdArgs); } if (!Args.hasArg(options::OPT_nostartfiles)) { // Add crtfastmath.o if available and fast math is enabled. TC.AddFastMathRuntimeIfAvailable(Args, CmdArgs); CmdArgs.push_back(Args.MakeArgString(TC.GetFilePath("crtend.o"))); } } const char *Exec = Args.MakeArgString(TC.GetProgramPath(LinkerName.data())); C.addCommand(llvm::make_unique(JA, *this, Exec, CmdArgs)); } /// XCore Tools // We pass assemble and link construction to the xcc tool. void XCore::Assembler::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { claimNoWarnArgs(Args); ArgStringList CmdArgs; CmdArgs.push_back("-o"); CmdArgs.push_back(Output.getFilename()); CmdArgs.push_back("-c"); if (Args.hasArg(options::OPT_v)) CmdArgs.push_back("-v"); if (Arg *A = Args.getLastArg(options::OPT_g_Group)) if (!A->getOption().matches(options::OPT_g0)) CmdArgs.push_back("-g"); if (Args.hasFlag(options::OPT_fverbose_asm, options::OPT_fno_verbose_asm, false)) CmdArgs.push_back("-fverbose-asm"); Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler); for (const auto &II : Inputs) CmdArgs.push_back(II.getFilename()); const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("xcc")); C.addCommand(llvm::make_unique(JA, *this, Exec, CmdArgs)); } void XCore::Linker::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { ArgStringList CmdArgs; if (Output.isFilename()) { CmdArgs.push_back("-o"); CmdArgs.push_back(Output.getFilename()); } else { assert(Output.isNothing() && "Invalid output."); } if (Args.hasArg(options::OPT_v)) CmdArgs.push_back("-v"); // Pass -fexceptions through to the linker if it was present. if (Args.hasFlag(options::OPT_fexceptions, options::OPT_fno_exceptions, false)) CmdArgs.push_back("-fexceptions"); AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs); const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("xcc")); C.addCommand(llvm::make_unique(JA, *this, Exec, CmdArgs)); } void CrossWindows::Assembler::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { claimNoWarnArgs(Args); const auto &TC = static_cast(getToolChain()); ArgStringList CmdArgs; const char *Exec; switch (TC.getArch()) { default: llvm_unreachable("unsupported architecture"); case llvm::Triple::arm: case llvm::Triple::thumb: break; case llvm::Triple::x86: CmdArgs.push_back("--32"); break; case llvm::Triple::x86_64: CmdArgs.push_back("--64"); break; } Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler); CmdArgs.push_back("-o"); CmdArgs.push_back(Output.getFilename()); for (const auto &Input : Inputs) CmdArgs.push_back(Input.getFilename()); const std::string Assembler = TC.GetProgramPath("as"); Exec = Args.MakeArgString(Assembler); C.addCommand(llvm::make_unique(JA, *this, Exec, CmdArgs)); } void CrossWindows::Linker::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { const auto &TC = static_cast(getToolChain()); const llvm::Triple &T = TC.getTriple(); const Driver &D = TC.getDriver(); SmallString<128> EntryPoint; ArgStringList CmdArgs; const char *Exec; // Silence warning for "clang -g foo.o -o foo" Args.ClaimAllArgs(options::OPT_g_Group); // and "clang -emit-llvm foo.o -o foo" Args.ClaimAllArgs(options::OPT_emit_llvm); // and for "clang -w foo.o -o foo" Args.ClaimAllArgs(options::OPT_w); // Other warning options are already handled somewhere else. if (!D.SysRoot.empty()) CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot)); if (Args.hasArg(options::OPT_pie)) CmdArgs.push_back("-pie"); if (Args.hasArg(options::OPT_rdynamic)) CmdArgs.push_back("-export-dynamic"); if (Args.hasArg(options::OPT_s)) CmdArgs.push_back("--strip-all"); CmdArgs.push_back("-m"); switch (TC.getArch()) { default: llvm_unreachable("unsupported architecture"); case llvm::Triple::arm: case llvm::Triple::thumb: // FIXME: this is incorrect for WinCE CmdArgs.push_back("thumb2pe"); break; case llvm::Triple::x86: CmdArgs.push_back("i386pe"); EntryPoint.append("_"); break; case llvm::Triple::x86_64: CmdArgs.push_back("i386pep"); break; } if (Args.hasArg(options::OPT_shared)) { switch (T.getArch()) { default: llvm_unreachable("unsupported architecture"); case llvm::Triple::arm: case llvm::Triple::thumb: case llvm::Triple::x86_64: EntryPoint.append("_DllMainCRTStartup"); break; case llvm::Triple::x86: EntryPoint.append("_DllMainCRTStartup@12"); break; } CmdArgs.push_back("-shared"); CmdArgs.push_back("-Bdynamic"); CmdArgs.push_back("--enable-auto-image-base"); CmdArgs.push_back("--entry"); CmdArgs.push_back(Args.MakeArgString(EntryPoint)); } else { EntryPoint.append("mainCRTStartup"); CmdArgs.push_back(Args.hasArg(options::OPT_static) ? "-Bstatic" : "-Bdynamic"); if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nostartfiles)) { CmdArgs.push_back("--entry"); CmdArgs.push_back(Args.MakeArgString(EntryPoint)); } // FIXME: handle subsystem } // NOTE: deal with multiple definitions on Windows (e.g. COMDAT) CmdArgs.push_back("--allow-multiple-definition"); CmdArgs.push_back("-o"); CmdArgs.push_back(Output.getFilename()); if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_rdynamic)) { SmallString<261> ImpLib(Output.getFilename()); llvm::sys::path::replace_extension(ImpLib, ".lib"); CmdArgs.push_back("--out-implib"); CmdArgs.push_back(Args.MakeArgString(ImpLib)); } if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nostartfiles)) { const std::string CRTPath(D.SysRoot + "/usr/lib/"); const char *CRTBegin; CRTBegin = Args.hasArg(options::OPT_shared) ? "crtbeginS.obj" : "crtbegin.obj"; CmdArgs.push_back(Args.MakeArgString(CRTPath + CRTBegin)); } Args.AddAllArgs(CmdArgs, options::OPT_L); const auto &Paths = TC.getFilePaths(); for (const auto &Path : Paths) CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + Path)); AddLinkerInputs(TC, Inputs, Args, CmdArgs); if (D.CCCIsCXX() && !Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nodefaultlibs)) { bool StaticCXX = Args.hasArg(options::OPT_static_libstdcxx) && !Args.hasArg(options::OPT_static); if (StaticCXX) CmdArgs.push_back("-Bstatic"); TC.AddCXXStdlibLibArgs(Args, CmdArgs); if (StaticCXX) CmdArgs.push_back("-Bdynamic"); } if (!Args.hasArg(options::OPT_nostdlib)) { if (!Args.hasArg(options::OPT_nodefaultlibs)) { // TODO handle /MT[d] /MD[d] CmdArgs.push_back("-lmsvcrt"); AddRunTimeLibs(TC, D, CmdArgs, Args); } } const std::string Linker = TC.GetProgramPath("ld"); Exec = Args.MakeArgString(Linker); C.addCommand(llvm::make_unique(JA, *this, Exec, CmdArgs)); } void tools::SHAVE::Compiler::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { ArgStringList CmdArgs; assert(Inputs.size() == 1); const InputInfo &II = Inputs[0]; assert(II.getType() == types::TY_C || II.getType() == types::TY_CXX); assert(Output.getType() == types::TY_PP_Asm); // Require preprocessed asm. // Append all -I, -iquote, -isystem paths. Args.AddAllArgs(CmdArgs, options::OPT_clang_i_Group); // These are spelled the same way in clang and moviCompile. Args.AddAllArgs(CmdArgs, options::OPT_D, options::OPT_U); CmdArgs.push_back("-DMYRIAD2"); CmdArgs.push_back("-mcpu=myriad2"); CmdArgs.push_back("-S"); // Any -O option passes through without translation. What about -Ofast ? if (Arg *A = Args.getLastArg(options::OPT_O_Group)) A->render(Args, CmdArgs); if (Args.hasFlag(options::OPT_ffunction_sections, options::OPT_fno_function_sections)) { CmdArgs.push_back("-ffunction-sections"); } if (Args.hasArg(options::OPT_fno_inline_functions)) CmdArgs.push_back("-fno-inline-functions"); CmdArgs.push_back("-fno-exceptions"); // Always do this even if unspecified. CmdArgs.push_back(II.getFilename()); CmdArgs.push_back("-o"); CmdArgs.push_back(Output.getFilename()); std::string Exec = Args.MakeArgString(getToolChain().GetProgramPath("moviCompile")); C.addCommand( llvm::make_unique(JA, *this, Args.MakeArgString(Exec), CmdArgs)); } void tools::SHAVE::Assembler::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { ArgStringList CmdArgs; assert(Inputs.size() == 1); const InputInfo &II = Inputs[0]; assert(II.getType() == types::TY_PP_Asm); // Require preprocessed asm input. assert(Output.getType() == types::TY_Object); CmdArgs.push_back("-no6thSlotCompression"); CmdArgs.push_back("-cv:myriad2"); // Chip Version ? CmdArgs.push_back("-noSPrefixing"); CmdArgs.push_back("-a"); // Mystery option. for (auto Arg : Args.filtered(options::OPT_I)) { Arg->claim(); CmdArgs.push_back( Args.MakeArgString(std::string("-i:") + Arg->getValue(0))); } CmdArgs.push_back("-elf"); // Output format. CmdArgs.push_back(II.getFilename()); CmdArgs.push_back( Args.MakeArgString(std::string("-o:") + Output.getFilename())); std::string Exec = Args.MakeArgString(getToolChain().GetProgramPath("moviAsm")); C.addCommand( llvm::make_unique(JA, *this, Args.MakeArgString(Exec), CmdArgs)); } Index: vendor/clang/dist/lib/Sema/SemaExprCXX.cpp =================================================================== --- vendor/clang/dist/lib/Sema/SemaExprCXX.cpp (revision 292727) +++ vendor/clang/dist/lib/Sema/SemaExprCXX.cpp (revision 292728) @@ -1,6774 +1,6776 @@ //===--- SemaExprCXX.cpp - Semantic Analysis for Expressions --------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// /// \file /// \brief Implements semantic analysis for C++ expressions. /// //===----------------------------------------------------------------------===// #include "clang/Sema/SemaInternal.h" #include "TreeTransform.h" #include "TypeLocBuilder.h" #include "clang/AST/ASTContext.h" #include "clang/AST/ASTLambda.h" #include "clang/AST/CXXInheritance.h" #include "clang/AST/CharUnits.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/RecursiveASTVisitor.h" #include "clang/AST/TypeLoc.h" #include "clang/Basic/PartialDiagnostic.h" #include "clang/Basic/TargetInfo.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/Initialization.h" #include "clang/Sema/Lookup.h" #include "clang/Sema/ParsedTemplate.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/SemaLambda.h" #include "clang/Sema/TemplateDeduction.h" #include "llvm/ADT/APInt.h" #include "llvm/ADT/STLExtras.h" #include "llvm/Support/ErrorHandling.h" using namespace clang; using namespace sema; /// \brief Handle the result of the special case name lookup for inheriting /// constructor declarations. 'NS::X::X' and 'NS::X<...>::X' are treated as /// constructor names in member using declarations, even if 'X' is not the /// name of the corresponding type. ParsedType Sema::getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name) { NestedNameSpecifier *NNS = SS.getScopeRep(); // Convert the nested-name-specifier into a type. QualType Type; switch (NNS->getKind()) { case NestedNameSpecifier::TypeSpec: case NestedNameSpecifier::TypeSpecWithTemplate: Type = QualType(NNS->getAsType(), 0); break; case NestedNameSpecifier::Identifier: // Strip off the last layer of the nested-name-specifier and build a // typename type for it. assert(NNS->getAsIdentifier() == &Name && "not a constructor name"); Type = Context.getDependentNameType(ETK_None, NNS->getPrefix(), NNS->getAsIdentifier()); break; case NestedNameSpecifier::Global: case NestedNameSpecifier::Super: case NestedNameSpecifier::Namespace: case NestedNameSpecifier::NamespaceAlias: llvm_unreachable("Nested name specifier is not a type for inheriting ctor"); } // This reference to the type is located entirely at the location of the // final identifier in the qualified-id. return CreateParsedType(Type, Context.getTrivialTypeSourceInfo(Type, NameLoc)); } ParsedType Sema::getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectTypePtr, bool EnteringContext) { // Determine where to perform name lookup. // FIXME: This area of the standard is very messy, and the current // wording is rather unclear about which scopes we search for the // destructor name; see core issues 399 and 555. Issue 399 in // particular shows where the current description of destructor name // lookup is completely out of line with existing practice, e.g., // this appears to be ill-formed: // // namespace N { // template struct S { // ~S(); // }; // } // // void f(N::S* s) { // s->N::S::~S(); // } // // See also PR6358 and PR6359. // For this reason, we're currently only doing the C++03 version of this // code; the C++0x version has to wait until we get a proper spec. QualType SearchType; DeclContext *LookupCtx = nullptr; bool isDependent = false; bool LookInScope = false; if (SS.isInvalid()) return ParsedType(); // If we have an object type, it's because we are in a // pseudo-destructor-expression or a member access expression, and // we know what type we're looking for. if (ObjectTypePtr) SearchType = GetTypeFromParser(ObjectTypePtr); if (SS.isSet()) { NestedNameSpecifier *NNS = SS.getScopeRep(); bool AlreadySearched = false; bool LookAtPrefix = true; // C++11 [basic.lookup.qual]p6: // If a pseudo-destructor-name (5.2.4) contains a nested-name-specifier, // the type-names are looked up as types in the scope designated by the // nested-name-specifier. Similarly, in a qualified-id of the form: // // nested-name-specifier[opt] class-name :: ~ class-name // // the second class-name is looked up in the same scope as the first. // // Here, we determine whether the code below is permitted to look at the // prefix of the nested-name-specifier. DeclContext *DC = computeDeclContext(SS, EnteringContext); if (DC && DC->isFileContext()) { AlreadySearched = true; LookupCtx = DC; isDependent = false; } else if (DC && isa(DC)) { LookAtPrefix = false; LookInScope = true; } // The second case from the C++03 rules quoted further above. NestedNameSpecifier *Prefix = nullptr; if (AlreadySearched) { // Nothing left to do. } else if (LookAtPrefix && (Prefix = NNS->getPrefix())) { CXXScopeSpec PrefixSS; PrefixSS.Adopt(NestedNameSpecifierLoc(Prefix, SS.location_data())); LookupCtx = computeDeclContext(PrefixSS, EnteringContext); isDependent = isDependentScopeSpecifier(PrefixSS); } else if (ObjectTypePtr) { LookupCtx = computeDeclContext(SearchType); isDependent = SearchType->isDependentType(); } else { LookupCtx = computeDeclContext(SS, EnteringContext); isDependent = LookupCtx && LookupCtx->isDependentContext(); } } else if (ObjectTypePtr) { // C++ [basic.lookup.classref]p3: // If the unqualified-id is ~type-name, the type-name is looked up // in the context of the entire postfix-expression. If the type T // of the object expression is of a class type C, the type-name is // also looked up in the scope of class C. At least one of the // lookups shall find a name that refers to (possibly // cv-qualified) T. LookupCtx = computeDeclContext(SearchType); isDependent = SearchType->isDependentType(); assert((isDependent || !SearchType->isIncompleteType()) && "Caller should have completed object type"); LookInScope = true; } else { // Perform lookup into the current scope (only). LookInScope = true; } TypeDecl *NonMatchingTypeDecl = nullptr; LookupResult Found(*this, &II, NameLoc, LookupOrdinaryName); for (unsigned Step = 0; Step != 2; ++Step) { // Look for the name first in the computed lookup context (if we // have one) and, if that fails to find a match, in the scope (if // we're allowed to look there). Found.clear(); if (Step == 0 && LookupCtx) LookupQualifiedName(Found, LookupCtx); else if (Step == 1 && LookInScope && S) LookupName(Found, S); else continue; // FIXME: Should we be suppressing ambiguities here? if (Found.isAmbiguous()) return ParsedType(); if (TypeDecl *Type = Found.getAsSingle()) { QualType T = Context.getTypeDeclType(Type); MarkAnyDeclReferenced(Type->getLocation(), Type, /*OdrUse=*/false); if (SearchType.isNull() || SearchType->isDependentType() || Context.hasSameUnqualifiedType(T, SearchType)) { // We found our type! return CreateParsedType(T, Context.getTrivialTypeSourceInfo(T, NameLoc)); } if (!SearchType.isNull()) NonMatchingTypeDecl = Type; } // If the name that we found is a class template name, and it is // the same name as the template name in the last part of the // nested-name-specifier (if present) or the object type, then // this is the destructor for that class. // FIXME: This is a workaround until we get real drafting for core // issue 399, for which there isn't even an obvious direction. if (ClassTemplateDecl *Template = Found.getAsSingle()) { QualType MemberOfType; if (SS.isSet()) { if (DeclContext *Ctx = computeDeclContext(SS, EnteringContext)) { // Figure out the type of the context, if it has one. if (CXXRecordDecl *Record = dyn_cast(Ctx)) MemberOfType = Context.getTypeDeclType(Record); } } if (MemberOfType.isNull()) MemberOfType = SearchType; if (MemberOfType.isNull()) continue; // We're referring into a class template specialization. If the // class template we found is the same as the template being // specialized, we found what we are looking for. if (const RecordType *Record = MemberOfType->getAs()) { if (ClassTemplateSpecializationDecl *Spec = dyn_cast(Record->getDecl())) { if (Spec->getSpecializedTemplate()->getCanonicalDecl() == Template->getCanonicalDecl()) return CreateParsedType( MemberOfType, Context.getTrivialTypeSourceInfo(MemberOfType, NameLoc)); } continue; } // We're referring to an unresolved class template // specialization. Determine whether we class template we found // is the same as the template being specialized or, if we don't // know which template is being specialized, that it at least // has the same name. if (const TemplateSpecializationType *SpecType = MemberOfType->getAs()) { TemplateName SpecName = SpecType->getTemplateName(); // The class template we found is the same template being // specialized. if (TemplateDecl *SpecTemplate = SpecName.getAsTemplateDecl()) { if (SpecTemplate->getCanonicalDecl() == Template->getCanonicalDecl()) return CreateParsedType( MemberOfType, Context.getTrivialTypeSourceInfo(MemberOfType, NameLoc)); continue; } // The class template we found has the same name as the // (dependent) template name being specialized. if (DependentTemplateName *DepTemplate = SpecName.getAsDependentTemplateName()) { if (DepTemplate->isIdentifier() && DepTemplate->getIdentifier() == Template->getIdentifier()) return CreateParsedType( MemberOfType, Context.getTrivialTypeSourceInfo(MemberOfType, NameLoc)); continue; } } } } if (isDependent) { // We didn't find our type, but that's okay: it's dependent // anyway. // FIXME: What if we have no nested-name-specifier? QualType T = CheckTypenameType(ETK_None, SourceLocation(), SS.getWithLocInContext(Context), II, NameLoc); return ParsedType::make(T); } if (NonMatchingTypeDecl) { QualType T = Context.getTypeDeclType(NonMatchingTypeDecl); Diag(NameLoc, diag::err_destructor_expr_type_mismatch) << T << SearchType; Diag(NonMatchingTypeDecl->getLocation(), diag::note_destructor_type_here) << T; } else if (ObjectTypePtr) Diag(NameLoc, diag::err_ident_in_dtor_not_a_type) << &II; else { SemaDiagnosticBuilder DtorDiag = Diag(NameLoc, diag::err_destructor_class_name); if (S) { const DeclContext *Ctx = S->getEntity(); if (const CXXRecordDecl *Class = dyn_cast_or_null(Ctx)) DtorDiag << FixItHint::CreateReplacement(SourceRange(NameLoc), Class->getNameAsString()); } } return ParsedType(); } ParsedType Sema::getDestructorType(const DeclSpec& DS, ParsedType ObjectType) { if (DS.getTypeSpecType() == DeclSpec::TST_error || !ObjectType) return ParsedType(); assert(DS.getTypeSpecType() == DeclSpec::TST_decltype && "only get destructor types from declspecs"); QualType T = BuildDecltypeType(DS.getRepAsExpr(), DS.getTypeSpecTypeLoc()); QualType SearchType = GetTypeFromParser(ObjectType); if (SearchType->isDependentType() || Context.hasSameUnqualifiedType(SearchType, T)) { return ParsedType::make(T); } Diag(DS.getTypeSpecTypeLoc(), diag::err_destructor_expr_type_mismatch) << T << SearchType; return ParsedType(); } bool Sema::checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Name) { assert(Name.getKind() == UnqualifiedId::IK_LiteralOperatorId); if (!SS.isValid()) return false; switch (SS.getScopeRep()->getKind()) { case NestedNameSpecifier::Identifier: case NestedNameSpecifier::TypeSpec: case NestedNameSpecifier::TypeSpecWithTemplate: // Per C++11 [over.literal]p2, literal operators can only be declared at // namespace scope. Therefore, this unqualified-id cannot name anything. // Reject it early, because we have no AST representation for this in the // case where the scope is dependent. Diag(Name.getLocStart(), diag::err_literal_operator_id_outside_namespace) << SS.getScopeRep(); return true; case NestedNameSpecifier::Global: case NestedNameSpecifier::Super: case NestedNameSpecifier::Namespace: case NestedNameSpecifier::NamespaceAlias: return false; } llvm_unreachable("unknown nested name specifier kind"); } /// \brief Build a C++ typeid expression with a type operand. ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc) { // C++ [expr.typeid]p4: // The top-level cv-qualifiers of the lvalue expression or the type-id // that is the operand of typeid are always ignored. // If the type of the type-id is a class type or a reference to a class // type, the class shall be completely-defined. Qualifiers Quals; QualType T = Context.getUnqualifiedArrayType(Operand->getType().getNonReferenceType(), Quals); if (T->getAs() && RequireCompleteType(TypeidLoc, T, diag::err_incomplete_typeid)) return ExprError(); if (T->isVariablyModifiedType()) return ExprError(Diag(TypeidLoc, diag::err_variably_modified_typeid) << T); return new (Context) CXXTypeidExpr(TypeInfoType.withConst(), Operand, SourceRange(TypeidLoc, RParenLoc)); } /// \brief Build a C++ typeid expression with an expression operand. ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *E, SourceLocation RParenLoc) { bool WasEvaluated = false; if (E && !E->isTypeDependent()) { if (E->getType()->isPlaceholderType()) { ExprResult result = CheckPlaceholderExpr(E); if (result.isInvalid()) return ExprError(); E = result.get(); } QualType T = E->getType(); if (const RecordType *RecordT = T->getAs()) { CXXRecordDecl *RecordD = cast(RecordT->getDecl()); // C++ [expr.typeid]p3: // [...] If the type of the expression is a class type, the class // shall be completely-defined. if (RequireCompleteType(TypeidLoc, T, diag::err_incomplete_typeid)) return ExprError(); // C++ [expr.typeid]p3: // When typeid is applied to an expression other than an glvalue of a // polymorphic class type [...] [the] expression is an unevaluated // operand. [...] if (RecordD->isPolymorphic() && E->isGLValue()) { // The subexpression is potentially evaluated; switch the context // and recheck the subexpression. ExprResult Result = TransformToPotentiallyEvaluated(E); if (Result.isInvalid()) return ExprError(); E = Result.get(); // We require a vtable to query the type at run time. MarkVTableUsed(TypeidLoc, RecordD); WasEvaluated = true; } } // C++ [expr.typeid]p4: // [...] If the type of the type-id is a reference to a possibly // cv-qualified type, the result of the typeid expression refers to a // std::type_info object representing the cv-unqualified referenced // type. Qualifiers Quals; QualType UnqualT = Context.getUnqualifiedArrayType(T, Quals); if (!Context.hasSameType(T, UnqualT)) { T = UnqualT; E = ImpCastExprToType(E, UnqualT, CK_NoOp, E->getValueKind()).get(); } } if (E->getType()->isVariablyModifiedType()) return ExprError(Diag(TypeidLoc, diag::err_variably_modified_typeid) << E->getType()); else if (ActiveTemplateInstantiations.empty() && E->HasSideEffects(Context, WasEvaluated)) { // The expression operand for typeid is in an unevaluated expression // context, so side effects could result in unintended consequences. Diag(E->getExprLoc(), WasEvaluated ? diag::warn_side_effects_typeid : diag::warn_side_effects_unevaluated_context); } return new (Context) CXXTypeidExpr(TypeInfoType.withConst(), E, SourceRange(TypeidLoc, RParenLoc)); } /// ActOnCXXTypeidOfType - Parse typeid( type-id ) or typeid (expression); ExprResult Sema::ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc) { // Find the std::type_info type. if (!getStdNamespace()) return ExprError(Diag(OpLoc, diag::err_need_header_before_typeid)); if (!CXXTypeInfoDecl) { IdentifierInfo *TypeInfoII = &PP.getIdentifierTable().get("type_info"); LookupResult R(*this, TypeInfoII, SourceLocation(), LookupTagName); LookupQualifiedName(R, getStdNamespace()); CXXTypeInfoDecl = R.getAsSingle(); // Microsoft's typeinfo doesn't have type_info in std but in the global // namespace if _HAS_EXCEPTIONS is defined to 0. See PR13153. if (!CXXTypeInfoDecl && LangOpts.MSVCCompat) { LookupQualifiedName(R, Context.getTranslationUnitDecl()); CXXTypeInfoDecl = R.getAsSingle(); } if (!CXXTypeInfoDecl) return ExprError(Diag(OpLoc, diag::err_need_header_before_typeid)); } if (!getLangOpts().RTTI) { return ExprError(Diag(OpLoc, diag::err_no_typeid_with_fno_rtti)); } QualType TypeInfoType = Context.getTypeDeclType(CXXTypeInfoDecl); if (isType) { // The operand is a type; handle it as such. TypeSourceInfo *TInfo = nullptr; QualType T = GetTypeFromParser(ParsedType::getFromOpaquePtr(TyOrExpr), &TInfo); if (T.isNull()) return ExprError(); if (!TInfo) TInfo = Context.getTrivialTypeSourceInfo(T, OpLoc); return BuildCXXTypeId(TypeInfoType, OpLoc, TInfo, RParenLoc); } // The operand is an expression. return BuildCXXTypeId(TypeInfoType, OpLoc, (Expr*)TyOrExpr, RParenLoc); } /// \brief Build a Microsoft __uuidof expression with a type operand. ExprResult Sema::BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc) { if (!Operand->getType()->isDependentType()) { bool HasMultipleGUIDs = false; if (!CXXUuidofExpr::GetUuidAttrOfType(Operand->getType(), &HasMultipleGUIDs)) { if (HasMultipleGUIDs) return ExprError(Diag(TypeidLoc, diag::err_uuidof_with_multiple_guids)); else return ExprError(Diag(TypeidLoc, diag::err_uuidof_without_guid)); } } return new (Context) CXXUuidofExpr(TypeInfoType.withConst(), Operand, SourceRange(TypeidLoc, RParenLoc)); } /// \brief Build a Microsoft __uuidof expression with an expression operand. ExprResult Sema::BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *E, SourceLocation RParenLoc) { if (!E->getType()->isDependentType()) { bool HasMultipleGUIDs = false; if (!CXXUuidofExpr::GetUuidAttrOfType(E->getType(), &HasMultipleGUIDs) && !E->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) { if (HasMultipleGUIDs) return ExprError(Diag(TypeidLoc, diag::err_uuidof_with_multiple_guids)); else return ExprError(Diag(TypeidLoc, diag::err_uuidof_without_guid)); } } return new (Context) CXXUuidofExpr(TypeInfoType.withConst(), E, SourceRange(TypeidLoc, RParenLoc)); } /// ActOnCXXUuidof - Parse __uuidof( type-id ) or __uuidof (expression); ExprResult Sema::ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc) { // If MSVCGuidDecl has not been cached, do the lookup. if (!MSVCGuidDecl) { IdentifierInfo *GuidII = &PP.getIdentifierTable().get("_GUID"); LookupResult R(*this, GuidII, SourceLocation(), LookupTagName); LookupQualifiedName(R, Context.getTranslationUnitDecl()); MSVCGuidDecl = R.getAsSingle(); if (!MSVCGuidDecl) return ExprError(Diag(OpLoc, diag::err_need_header_before_ms_uuidof)); } QualType GuidType = Context.getTypeDeclType(MSVCGuidDecl); if (isType) { // The operand is a type; handle it as such. TypeSourceInfo *TInfo = nullptr; QualType T = GetTypeFromParser(ParsedType::getFromOpaquePtr(TyOrExpr), &TInfo); if (T.isNull()) return ExprError(); if (!TInfo) TInfo = Context.getTrivialTypeSourceInfo(T, OpLoc); return BuildCXXUuidof(GuidType, OpLoc, TInfo, RParenLoc); } // The operand is an expression. return BuildCXXUuidof(GuidType, OpLoc, (Expr*)TyOrExpr, RParenLoc); } /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult Sema::ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind) { assert((Kind == tok::kw_true || Kind == tok::kw_false) && "Unknown C++ Boolean value!"); return new (Context) CXXBoolLiteralExpr(Kind == tok::kw_true, Context.BoolTy, OpLoc); } /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult Sema::ActOnCXXNullPtrLiteral(SourceLocation Loc) { return new (Context) CXXNullPtrLiteralExpr(Context.NullPtrTy, Loc); } /// ActOnCXXThrow - Parse throw expressions. ExprResult Sema::ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *Ex) { bool IsThrownVarInScope = false; if (Ex) { // C++0x [class.copymove]p31: // When certain criteria are met, an implementation is allowed to omit the // copy/move construction of a class object [...] // // - in a throw-expression, when the operand is the name of a // non-volatile automatic object (other than a function or catch- // clause parameter) whose scope does not extend beyond the end of the // innermost enclosing try-block (if there is one), the copy/move // operation from the operand to the exception object (15.1) can be // omitted by constructing the automatic object directly into the // exception object if (DeclRefExpr *DRE = dyn_cast(Ex->IgnoreParens())) if (VarDecl *Var = dyn_cast(DRE->getDecl())) { if (Var->hasLocalStorage() && !Var->getType().isVolatileQualified()) { for( ; S; S = S->getParent()) { if (S->isDeclScope(Var)) { IsThrownVarInScope = true; break; } if (S->getFlags() & (Scope::FnScope | Scope::ClassScope | Scope::BlockScope | Scope::FunctionPrototypeScope | Scope::ObjCMethodScope | Scope::TryScope)) break; } } } } return BuildCXXThrow(OpLoc, Ex, IsThrownVarInScope); } ExprResult Sema::BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope) { // Don't report an error if 'throw' is used in system headers. if (!getLangOpts().CXXExceptions && !getSourceManager().isInSystemHeader(OpLoc)) Diag(OpLoc, diag::err_exceptions_disabled) << "throw"; if (getCurScope() && getCurScope()->isOpenMPSimdDirectiveScope()) Diag(OpLoc, diag::err_omp_simd_region_cannot_use_stmt) << "throw"; if (Ex && !Ex->isTypeDependent()) { QualType ExceptionObjectTy = Context.getExceptionObjectType(Ex->getType()); if (CheckCXXThrowOperand(OpLoc, ExceptionObjectTy, Ex)) return ExprError(); // Initialize the exception result. This implicitly weeds out // abstract types or types with inaccessible copy constructors. // C++0x [class.copymove]p31: // When certain criteria are met, an implementation is allowed to omit the // copy/move construction of a class object [...] // // - in a throw-expression, when the operand is the name of a // non-volatile automatic object (other than a function or // catch-clause // parameter) whose scope does not extend beyond the end of the // innermost enclosing try-block (if there is one), the copy/move // operation from the operand to the exception object (15.1) can be // omitted by constructing the automatic object directly into the // exception object const VarDecl *NRVOVariable = nullptr; if (IsThrownVarInScope) NRVOVariable = getCopyElisionCandidate(QualType(), Ex, false); InitializedEntity Entity = InitializedEntity::InitializeException( OpLoc, ExceptionObjectTy, /*NRVO=*/NRVOVariable != nullptr); ExprResult Res = PerformMoveOrCopyInitialization( Entity, NRVOVariable, QualType(), Ex, IsThrownVarInScope); if (Res.isInvalid()) return ExprError(); Ex = Res.get(); } return new (Context) CXXThrowExpr(Ex, Context.VoidTy, OpLoc, IsThrownVarInScope); } static void collectPublicBases(CXXRecordDecl *RD, llvm::DenseMap &SubobjectsSeen, llvm::SmallPtrSetImpl &VBases, llvm::SetVector &PublicSubobjectsSeen, bool ParentIsPublic) { for (const CXXBaseSpecifier &BS : RD->bases()) { CXXRecordDecl *BaseDecl = BS.getType()->getAsCXXRecordDecl(); bool NewSubobject; // Virtual bases constitute the same subobject. Non-virtual bases are // always distinct subobjects. if (BS.isVirtual()) NewSubobject = VBases.insert(BaseDecl).second; else NewSubobject = true; if (NewSubobject) ++SubobjectsSeen[BaseDecl]; // Only add subobjects which have public access throughout the entire chain. bool PublicPath = ParentIsPublic && BS.getAccessSpecifier() == AS_public; if (PublicPath) PublicSubobjectsSeen.insert(BaseDecl); // Recurse on to each base subobject. collectPublicBases(BaseDecl, SubobjectsSeen, VBases, PublicSubobjectsSeen, PublicPath); } } static void getUnambiguousPublicSubobjects( CXXRecordDecl *RD, llvm::SmallVectorImpl &Objects) { llvm::DenseMap SubobjectsSeen; llvm::SmallSet VBases; llvm::SetVector PublicSubobjectsSeen; SubobjectsSeen[RD] = 1; PublicSubobjectsSeen.insert(RD); collectPublicBases(RD, SubobjectsSeen, VBases, PublicSubobjectsSeen, /*ParentIsPublic=*/true); for (CXXRecordDecl *PublicSubobject : PublicSubobjectsSeen) { // Skip ambiguous objects. if (SubobjectsSeen[PublicSubobject] > 1) continue; Objects.push_back(PublicSubobject); } } /// CheckCXXThrowOperand - Validate the operand of a throw. bool Sema::CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ExceptionObjectTy, Expr *E) { // If the type of the exception would be an incomplete type or a pointer // to an incomplete type other than (cv) void the program is ill-formed. QualType Ty = ExceptionObjectTy; bool isPointer = false; if (const PointerType* Ptr = Ty->getAs()) { Ty = Ptr->getPointeeType(); isPointer = true; } if (!isPointer || !Ty->isVoidType()) { if (RequireCompleteType(ThrowLoc, Ty, isPointer ? diag::err_throw_incomplete_ptr : diag::err_throw_incomplete, E->getSourceRange())) return true; if (RequireNonAbstractType(ThrowLoc, ExceptionObjectTy, diag::err_throw_abstract_type, E)) return true; } // If the exception has class type, we need additional handling. CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); if (!RD) return false; // If we are throwing a polymorphic class type or pointer thereof, // exception handling will make use of the vtable. MarkVTableUsed(ThrowLoc, RD); // If a pointer is thrown, the referenced object will not be destroyed. if (isPointer) return false; // If the class has a destructor, we must be able to call it. if (!RD->hasIrrelevantDestructor()) { if (CXXDestructorDecl *Destructor = LookupDestructor(RD)) { MarkFunctionReferenced(E->getExprLoc(), Destructor); CheckDestructorAccess(E->getExprLoc(), Destructor, PDiag(diag::err_access_dtor_exception) << Ty); if (DiagnoseUseOfDecl(Destructor, E->getExprLoc())) return true; } } // The MSVC ABI creates a list of all types which can catch the exception // object. This list also references the appropriate copy constructor to call // if the object is caught by value and has a non-trivial copy constructor. if (Context.getTargetInfo().getCXXABI().isMicrosoft()) { // We are only interested in the public, unambiguous bases contained within // the exception object. Bases which are ambiguous or otherwise // inaccessible are not catchable types. llvm::SmallVector UnambiguousPublicSubobjects; getUnambiguousPublicSubobjects(RD, UnambiguousPublicSubobjects); for (CXXRecordDecl *Subobject : UnambiguousPublicSubobjects) { // Attempt to lookup the copy constructor. Various pieces of machinery // will spring into action, like template instantiation, which means this // cannot be a simple walk of the class's decls. Instead, we must perform // lookup and overload resolution. CXXConstructorDecl *CD = LookupCopyingConstructor(Subobject, 0); if (!CD) continue; // Mark the constructor referenced as it is used by this throw expression. MarkFunctionReferenced(E->getExprLoc(), CD); // Skip this copy constructor if it is trivial, we don't need to record it // in the catchable type data. if (CD->isTrivial()) continue; // The copy constructor is non-trivial, create a mapping from this class // type to this constructor. // N.B. The selection of copy constructor is not sensitive to this // particular throw-site. Lookup will be performed at the catch-site to // ensure that the copy constructor is, in fact, accessible (via // friendship or any other means). Context.addCopyConstructorForExceptionObject(Subobject, CD); // We don't keep the instantiated default argument expressions around so // we must rebuild them here. for (unsigned I = 1, E = CD->getNumParams(); I != E; ++I) { // Skip any default arguments that we've already instantiated. if (Context.getDefaultArgExprForConstructor(CD, I)) continue; Expr *DefaultArg = BuildCXXDefaultArgExpr(ThrowLoc, CD, CD->getParamDecl(I)).get(); Context.addDefaultArgExprForConstructor(CD, I, DefaultArg); } } } return false; } QualType Sema::getCurrentThisType() { DeclContext *DC = getFunctionLevelDeclContext(); QualType ThisTy = CXXThisTypeOverride; if (CXXMethodDecl *method = dyn_cast(DC)) { if (method && method->isInstance()) ThisTy = method->getThisType(Context); } if (ThisTy.isNull()) { if (isGenericLambdaCallOperatorSpecialization(CurContext) && CurContext->getParent()->getParent()->isRecord()) { // This is a generic lambda call operator that is being instantiated // within a default initializer - so use the enclosing class as 'this'. // There is no enclosing member function to retrieve the 'this' pointer // from. QualType ClassTy = Context.getTypeDeclType( cast(CurContext->getParent()->getParent())); // There are no cv-qualifiers for 'this' within default initializers, // per [expr.prim.general]p4. return Context.getPointerType(ClassTy); } } return ThisTy; } Sema::CXXThisScopeRAII::CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals, bool Enabled) : S(S), OldCXXThisTypeOverride(S.CXXThisTypeOverride), Enabled(false) { if (!Enabled || !ContextDecl) return; CXXRecordDecl *Record = nullptr; if (ClassTemplateDecl *Template = dyn_cast(ContextDecl)) Record = Template->getTemplatedDecl(); else Record = cast(ContextDecl); S.CXXThisTypeOverride = S.Context.getPointerType( S.Context.getRecordType(Record).withCVRQualifiers(CXXThisTypeQuals)); this->Enabled = true; } Sema::CXXThisScopeRAII::~CXXThisScopeRAII() { if (Enabled) { S.CXXThisTypeOverride = OldCXXThisTypeOverride; } } static Expr *captureThis(ASTContext &Context, RecordDecl *RD, QualType ThisTy, SourceLocation Loc) { FieldDecl *Field = FieldDecl::Create(Context, RD, Loc, Loc, nullptr, ThisTy, Context.getTrivialTypeSourceInfo(ThisTy, Loc), nullptr, false, ICIS_NoInit); Field->setImplicit(true); Field->setAccess(AS_private); RD->addDecl(Field); return new (Context) CXXThisExpr(Loc, ThisTy, /*isImplicit*/true); } bool Sema::CheckCXXThisCapture(SourceLocation Loc, bool Explicit, bool BuildAndDiagnose, const unsigned *const FunctionScopeIndexToStopAt) { // We don't need to capture this in an unevaluated context. if (isUnevaluatedContext() && !Explicit) return true; const unsigned MaxFunctionScopesIndex = FunctionScopeIndexToStopAt ? *FunctionScopeIndexToStopAt : FunctionScopes.size() - 1; // Otherwise, check that we can capture 'this'. unsigned NumClosures = 0; for (unsigned idx = MaxFunctionScopesIndex; idx != 0; idx--) { if (CapturingScopeInfo *CSI = dyn_cast(FunctionScopes[idx])) { if (CSI->CXXThisCaptureIndex != 0) { // 'this' is already being captured; there isn't anything more to do. break; } LambdaScopeInfo *LSI = dyn_cast(CSI); if (LSI && isGenericLambdaCallOperatorSpecialization(LSI->CallOperator)) { // This context can't implicitly capture 'this'; fail out. if (BuildAndDiagnose) Diag(Loc, diag::err_this_capture) << Explicit; return true; } if (CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_LambdaByref || CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_LambdaByval || CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_Block || CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_CapturedRegion || Explicit) { // This closure can capture 'this'; continue looking upwards. NumClosures++; Explicit = false; continue; } // This context can't implicitly capture 'this'; fail out. if (BuildAndDiagnose) Diag(Loc, diag::err_this_capture) << Explicit; return true; } break; } if (!BuildAndDiagnose) return false; // Mark that we're implicitly capturing 'this' in all the scopes we skipped. // FIXME: We need to delay this marking in PotentiallyPotentiallyEvaluated // contexts. for (unsigned idx = MaxFunctionScopesIndex; NumClosures; --idx, --NumClosures) { CapturingScopeInfo *CSI = cast(FunctionScopes[idx]); Expr *ThisExpr = nullptr; QualType ThisTy = getCurrentThisType(); if (LambdaScopeInfo *LSI = dyn_cast(CSI)) // For lambda expressions, build a field and an initializing expression. ThisExpr = captureThis(Context, LSI->Lambda, ThisTy, Loc); else if (CapturedRegionScopeInfo *RSI = dyn_cast(FunctionScopes[idx])) ThisExpr = captureThis(Context, RSI->TheRecordDecl, ThisTy, Loc); bool isNested = NumClosures > 1; CSI->addThisCapture(isNested, Loc, ThisTy, ThisExpr); } return false; } ExprResult Sema::ActOnCXXThis(SourceLocation Loc) { /// C++ 9.3.2: In the body of a non-static member function, the keyword this /// is a non-lvalue expression whose value is the address of the object for /// which the function is called. QualType ThisTy = getCurrentThisType(); if (ThisTy.isNull()) return Diag(Loc, diag::err_invalid_this_use); CheckCXXThisCapture(Loc); return new (Context) CXXThisExpr(Loc, ThisTy, /*isImplicit=*/false); } bool Sema::isThisOutsideMemberFunctionBody(QualType BaseType) { // If we're outside the body of a member function, then we'll have a specified // type for 'this'. if (CXXThisTypeOverride.isNull()) return false; // Determine whether we're looking into a class that's currently being // defined. CXXRecordDecl *Class = BaseType->getAsCXXRecordDecl(); return Class && Class->isBeingDefined(); } ExprResult Sema::ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenLoc, MultiExprArg exprs, SourceLocation RParenLoc) { if (!TypeRep) return ExprError(); TypeSourceInfo *TInfo; QualType Ty = GetTypeFromParser(TypeRep, &TInfo); if (!TInfo) TInfo = Context.getTrivialTypeSourceInfo(Ty, SourceLocation()); return BuildCXXTypeConstructExpr(TInfo, LParenLoc, exprs, RParenLoc); } /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc) { QualType Ty = TInfo->getType(); SourceLocation TyBeginLoc = TInfo->getTypeLoc().getBeginLoc(); if (Ty->isDependentType() || CallExpr::hasAnyTypeDependentArguments(Exprs)) { return CXXUnresolvedConstructExpr::Create(Context, TInfo, LParenLoc, Exprs, RParenLoc); } bool ListInitialization = LParenLoc.isInvalid(); assert((!ListInitialization || (Exprs.size() == 1 && isa(Exprs[0]))) && "List initialization must have initializer list as expression."); SourceRange FullRange = SourceRange(TyBeginLoc, ListInitialization ? Exprs[0]->getSourceRange().getEnd() : RParenLoc); // C++ [expr.type.conv]p1: // If the expression list is a single expression, the type conversion // expression is equivalent (in definedness, and if defined in meaning) to the // corresponding cast expression. if (Exprs.size() == 1 && !ListInitialization) { Expr *Arg = Exprs[0]; return BuildCXXFunctionalCastExpr(TInfo, LParenLoc, Arg, RParenLoc); } QualType ElemTy = Ty; if (Ty->isArrayType()) { if (!ListInitialization) return ExprError(Diag(TyBeginLoc, diag::err_value_init_for_array_type) << FullRange); ElemTy = Context.getBaseElementType(Ty); } if (!Ty->isVoidType() && RequireCompleteType(TyBeginLoc, ElemTy, diag::err_invalid_incomplete_type_use, FullRange)) return ExprError(); if (RequireNonAbstractType(TyBeginLoc, Ty, diag::err_allocation_of_abstract_type)) return ExprError(); InitializedEntity Entity = InitializedEntity::InitializeTemporary(TInfo); InitializationKind Kind = Exprs.size() ? ListInitialization ? InitializationKind::CreateDirectList(TyBeginLoc) : InitializationKind::CreateDirect(TyBeginLoc, LParenLoc, RParenLoc) : InitializationKind::CreateValue(TyBeginLoc, LParenLoc, RParenLoc); InitializationSequence InitSeq(*this, Entity, Kind, Exprs); ExprResult Result = InitSeq.Perform(*this, Entity, Kind, Exprs); if (Result.isInvalid() || !ListInitialization) return Result; Expr *Inner = Result.get(); if (CXXBindTemporaryExpr *BTE = dyn_cast_or_null(Inner)) Inner = BTE->getSubExpr(); if (!isa(Inner)) { // If we created a CXXTemporaryObjectExpr, that node also represents the // functional cast. Otherwise, create an explicit cast to represent // the syntactic form of a functional-style cast that was used here. // // FIXME: Creating a CXXFunctionalCastExpr around a CXXConstructExpr // would give a more consistent AST representation than using a // CXXTemporaryObjectExpr. It's also weird that the functional cast // is sometimes handled by initialization and sometimes not. QualType ResultType = Result.get()->getType(); Result = CXXFunctionalCastExpr::Create( Context, ResultType, Expr::getValueKindForType(TInfo->getType()), TInfo, CK_NoOp, Result.get(), /*Path=*/nullptr, LParenLoc, RParenLoc); } return Result; } /// doesUsualArrayDeleteWantSize - Answers whether the usual /// operator delete[] for the given type has a size_t parameter. static bool doesUsualArrayDeleteWantSize(Sema &S, SourceLocation loc, QualType allocType) { const RecordType *record = allocType->getBaseElementTypeUnsafe()->getAs(); if (!record) return false; // Try to find an operator delete[] in class scope. DeclarationName deleteName = S.Context.DeclarationNames.getCXXOperatorName(OO_Array_Delete); LookupResult ops(S, deleteName, loc, Sema::LookupOrdinaryName); S.LookupQualifiedName(ops, record->getDecl()); // We're just doing this for information. ops.suppressDiagnostics(); // Very likely: there's no operator delete[]. if (ops.empty()) return false; // If it's ambiguous, it should be illegal to call operator delete[] // on this thing, so it doesn't matter if we allocate extra space or not. if (ops.isAmbiguous()) return false; LookupResult::Filter filter = ops.makeFilter(); while (filter.hasNext()) { NamedDecl *del = filter.next()->getUnderlyingDecl(); // C++0x [basic.stc.dynamic.deallocation]p2: // A template instance is never a usual deallocation function, // regardless of its signature. if (isa(del)) { filter.erase(); continue; } // C++0x [basic.stc.dynamic.deallocation]p2: // If class T does not declare [an operator delete[] with one // parameter] but does declare a member deallocation function // named operator delete[] with exactly two parameters, the // second of which has type std::size_t, then this function // is a usual deallocation function. if (!cast(del)->isUsualDeallocationFunction()) { filter.erase(); continue; } } filter.done(); if (!ops.isSingleResult()) return false; const FunctionDecl *del = cast(ops.getFoundDecl()); return (del->getNumParams() == 2); } /// \brief Parsed a C++ 'new' expression (C++ 5.3.4). /// /// E.g.: /// @code new (memory) int[size][4] @endcode /// or /// @code ::new Foo(23, "hello") @endcode /// /// \param StartLoc The first location of the expression. /// \param UseGlobal True if 'new' was prefixed with '::'. /// \param PlacementLParen Opening paren of the placement arguments. /// \param PlacementArgs Placement new arguments. /// \param PlacementRParen Closing paren of the placement arguments. /// \param TypeIdParens If the type is in parens, the source range. /// \param D The type to be allocated, as well as array dimensions. /// \param Initializer The initializing expression or initializer-list, or null /// if there is none. ExprResult Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer) { bool TypeContainsAuto = D.getDeclSpec().containsPlaceholderType(); Expr *ArraySize = nullptr; // If the specified type is an array, unwrap it and save the expression. if (D.getNumTypeObjects() > 0 && D.getTypeObject(0).Kind == DeclaratorChunk::Array) { DeclaratorChunk &Chunk = D.getTypeObject(0); if (TypeContainsAuto) return ExprError(Diag(Chunk.Loc, diag::err_new_array_of_auto) << D.getSourceRange()); if (Chunk.Arr.hasStatic) return ExprError(Diag(Chunk.Loc, diag::err_static_illegal_in_new) << D.getSourceRange()); if (!Chunk.Arr.NumElts) return ExprError(Diag(Chunk.Loc, diag::err_array_new_needs_size) << D.getSourceRange()); ArraySize = static_cast(Chunk.Arr.NumElts); D.DropFirstTypeObject(); } // Every dimension shall be of constant size. if (ArraySize) { for (unsigned I = 0, N = D.getNumTypeObjects(); I < N; ++I) { if (D.getTypeObject(I).Kind != DeclaratorChunk::Array) break; DeclaratorChunk::ArrayTypeInfo &Array = D.getTypeObject(I).Arr; if (Expr *NumElts = (Expr *)Array.NumElts) { if (!NumElts->isTypeDependent() && !NumElts->isValueDependent()) { if (getLangOpts().CPlusPlus14) { // C++1y [expr.new]p6: Every constant-expression in a noptr-new-declarator // shall be a converted constant expression (5.19) of type std::size_t // and shall evaluate to a strictly positive value. unsigned IntWidth = Context.getTargetInfo().getIntWidth(); assert(IntWidth && "Builtin type of size 0?"); llvm::APSInt Value(IntWidth); Array.NumElts = CheckConvertedConstantExpression(NumElts, Context.getSizeType(), Value, CCEK_NewExpr) .get(); } else { Array.NumElts = VerifyIntegerConstantExpression(NumElts, nullptr, diag::err_new_array_nonconst) .get(); } if (!Array.NumElts) return ExprError(); } } } } TypeSourceInfo *TInfo = GetTypeForDeclarator(D, /*Scope=*/nullptr); QualType AllocType = TInfo->getType(); if (D.isInvalidType()) return ExprError(); SourceRange DirectInitRange; if (ParenListExpr *List = dyn_cast_or_null(Initializer)) DirectInitRange = List->getSourceRange(); return BuildCXXNew(SourceRange(StartLoc, D.getLocEnd()), UseGlobal, PlacementLParen, PlacementArgs, PlacementRParen, TypeIdParens, AllocType, TInfo, ArraySize, DirectInitRange, Initializer, TypeContainsAuto); } static bool isLegalArrayNewInitializer(CXXNewExpr::InitializationStyle Style, Expr *Init) { if (!Init) return true; if (ParenListExpr *PLE = dyn_cast(Init)) return PLE->getNumExprs() == 0; if (isa(Init)) return true; else if (CXXConstructExpr *CCE = dyn_cast(Init)) return !CCE->isListInitialization() && CCE->getConstructor()->isDefaultConstructor(); else if (Style == CXXNewExpr::ListInit) { assert(isa(Init) && "Shouldn't create list CXXConstructExprs for arrays."); return true; } return false; } ExprResult Sema::BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Expr *ArraySize, SourceRange DirectInitRange, Expr *Initializer, bool TypeMayContainAuto) { SourceRange TypeRange = AllocTypeInfo->getTypeLoc().getSourceRange(); SourceLocation StartLoc = Range.getBegin(); CXXNewExpr::InitializationStyle initStyle; if (DirectInitRange.isValid()) { assert(Initializer && "Have parens but no initializer."); initStyle = CXXNewExpr::CallInit; } else if (Initializer && isa(Initializer)) initStyle = CXXNewExpr::ListInit; else { assert((!Initializer || isa(Initializer) || isa(Initializer)) && "Initializer expression that cannot have been implicitly created."); initStyle = CXXNewExpr::NoInit; } Expr **Inits = &Initializer; unsigned NumInits = Initializer ? 1 : 0; if (ParenListExpr *List = dyn_cast_or_null(Initializer)) { assert(initStyle == CXXNewExpr::CallInit && "paren init for non-call init"); Inits = List->getExprs(); NumInits = List->getNumExprs(); } // C++11 [dcl.spec.auto]p6. Deduce the type which 'auto' stands in for. if (TypeMayContainAuto && AllocType->isUndeducedType()) { if (initStyle == CXXNewExpr::NoInit || NumInits == 0) return ExprError(Diag(StartLoc, diag::err_auto_new_requires_ctor_arg) << AllocType << TypeRange); if (initStyle == CXXNewExpr::ListInit || (NumInits == 1 && isa(Inits[0]))) return ExprError(Diag(Inits[0]->getLocStart(), diag::err_auto_new_list_init) << AllocType << TypeRange); if (NumInits > 1) { Expr *FirstBad = Inits[1]; return ExprError(Diag(FirstBad->getLocStart(), diag::err_auto_new_ctor_multiple_expressions) << AllocType << TypeRange); } Expr *Deduce = Inits[0]; QualType DeducedType; if (DeduceAutoType(AllocTypeInfo, Deduce, DeducedType) == DAR_Failed) return ExprError(Diag(StartLoc, diag::err_auto_new_deduction_failure) << AllocType << Deduce->getType() << TypeRange << Deduce->getSourceRange()); if (DeducedType.isNull()) return ExprError(); AllocType = DeducedType; } // Per C++0x [expr.new]p5, the type being constructed may be a // typedef of an array type. if (!ArraySize) { if (const ConstantArrayType *Array = Context.getAsConstantArrayType(AllocType)) { ArraySize = IntegerLiteral::Create(Context, Array->getSize(), Context.getSizeType(), TypeRange.getEnd()); AllocType = Array->getElementType(); } } if (CheckAllocatedType(AllocType, TypeRange.getBegin(), TypeRange)) return ExprError(); if (initStyle == CXXNewExpr::ListInit && isStdInitializerList(AllocType, nullptr)) { Diag(AllocTypeInfo->getTypeLoc().getBeginLoc(), diag::warn_dangling_std_initializer_list) << /*at end of FE*/0 << Inits[0]->getSourceRange(); } // In ARC, infer 'retaining' for the allocated if (getLangOpts().ObjCAutoRefCount && AllocType.getObjCLifetime() == Qualifiers::OCL_None && AllocType->isObjCLifetimeType()) { AllocType = Context.getLifetimeQualifiedType(AllocType, AllocType->getObjCARCImplicitLifetime()); } QualType ResultType = Context.getPointerType(AllocType); if (ArraySize && ArraySize->getType()->isNonOverloadPlaceholderType()) { ExprResult result = CheckPlaceholderExpr(ArraySize); if (result.isInvalid()) return ExprError(); ArraySize = result.get(); } // C++98 5.3.4p6: "The expression in a direct-new-declarator shall have // integral or enumeration type with a non-negative value." // C++11 [expr.new]p6: The expression [...] shall be of integral or unscoped // enumeration type, or a class type for which a single non-explicit // conversion function to integral or unscoped enumeration type exists. // C++1y [expr.new]p6: The expression [...] is implicitly converted to // std::size_t. if (ArraySize && !ArraySize->isTypeDependent()) { ExprResult ConvertedSize; if (getLangOpts().CPlusPlus14) { assert(Context.getTargetInfo().getIntWidth() && "Builtin type of size 0?"); ConvertedSize = PerformImplicitConversion(ArraySize, Context.getSizeType(), AA_Converting); if (!ConvertedSize.isInvalid() && ArraySize->getType()->getAs()) // Diagnose the compatibility of this conversion. Diag(StartLoc, diag::warn_cxx98_compat_array_size_conversion) << ArraySize->getType() << 0 << "'size_t'"; } else { class SizeConvertDiagnoser : public ICEConvertDiagnoser { protected: Expr *ArraySize; public: SizeConvertDiagnoser(Expr *ArraySize) : ICEConvertDiagnoser(/*AllowScopedEnumerations*/false, false, false), ArraySize(ArraySize) {} SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) override { return S.Diag(Loc, diag::err_array_size_not_integral) << S.getLangOpts().CPlusPlus11 << T; } SemaDiagnosticBuilder diagnoseIncomplete( Sema &S, SourceLocation Loc, QualType T) override { return S.Diag(Loc, diag::err_array_size_incomplete_type) << T << ArraySize->getSourceRange(); } SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override { return S.Diag(Loc, diag::err_array_size_explicit_conversion) << T << ConvTy; } SemaDiagnosticBuilder noteExplicitConv( Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override { return S.Diag(Conv->getLocation(), diag::note_array_size_conversion) << ConvTy->isEnumeralType() << ConvTy; } SemaDiagnosticBuilder diagnoseAmbiguous( Sema &S, SourceLocation Loc, QualType T) override { return S.Diag(Loc, diag::err_array_size_ambiguous_conversion) << T; } SemaDiagnosticBuilder noteAmbiguous( Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override { return S.Diag(Conv->getLocation(), diag::note_array_size_conversion) << ConvTy->isEnumeralType() << ConvTy; } SemaDiagnosticBuilder diagnoseConversion(Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override { return S.Diag(Loc, S.getLangOpts().CPlusPlus11 ? diag::warn_cxx98_compat_array_size_conversion : diag::ext_array_size_conversion) << T << ConvTy->isEnumeralType() << ConvTy; } } SizeDiagnoser(ArraySize); ConvertedSize = PerformContextualImplicitConversion(StartLoc, ArraySize, SizeDiagnoser); } if (ConvertedSize.isInvalid()) return ExprError(); ArraySize = ConvertedSize.get(); QualType SizeType = ArraySize->getType(); if (!SizeType->isIntegralOrUnscopedEnumerationType()) return ExprError(); // C++98 [expr.new]p7: // The expression in a direct-new-declarator shall have integral type // with a non-negative value. // // Let's see if this is a constant < 0. If so, we reject it out of // hand. Otherwise, if it's not a constant, we must have an unparenthesized // array type. // // Note: such a construct has well-defined semantics in C++11: it throws // std::bad_array_new_length. if (!ArraySize->isValueDependent()) { llvm::APSInt Value; // We've already performed any required implicit conversion to integer or // unscoped enumeration type. if (ArraySize->isIntegerConstantExpr(Value, Context)) { if (Value < llvm::APSInt( llvm::APInt::getNullValue(Value.getBitWidth()), Value.isUnsigned())) { if (getLangOpts().CPlusPlus11) Diag(ArraySize->getLocStart(), diag::warn_typecheck_negative_array_new_size) << ArraySize->getSourceRange(); else return ExprError(Diag(ArraySize->getLocStart(), diag::err_typecheck_negative_array_size) << ArraySize->getSourceRange()); } else if (!AllocType->isDependentType()) { unsigned ActiveSizeBits = ConstantArrayType::getNumAddressingBits(Context, AllocType, Value); if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context)) { if (getLangOpts().CPlusPlus11) Diag(ArraySize->getLocStart(), diag::warn_array_new_too_large) << Value.toString(10) << ArraySize->getSourceRange(); else return ExprError(Diag(ArraySize->getLocStart(), diag::err_array_too_large) << Value.toString(10) << ArraySize->getSourceRange()); } } } else if (TypeIdParens.isValid()) { // Can't have dynamic array size when the type-id is in parentheses. Diag(ArraySize->getLocStart(), diag::ext_new_paren_array_nonconst) << ArraySize->getSourceRange() << FixItHint::CreateRemoval(TypeIdParens.getBegin()) << FixItHint::CreateRemoval(TypeIdParens.getEnd()); TypeIdParens = SourceRange(); } } // Note that we do *not* convert the argument in any way. It can // be signed, larger than size_t, whatever. } FunctionDecl *OperatorNew = nullptr; FunctionDecl *OperatorDelete = nullptr; if (!AllocType->isDependentType() && !Expr::hasAnyTypeDependentArguments(PlacementArgs) && FindAllocationFunctions(StartLoc, SourceRange(PlacementLParen, PlacementRParen), UseGlobal, AllocType, ArraySize, PlacementArgs, OperatorNew, OperatorDelete)) return ExprError(); // If this is an array allocation, compute whether the usual array // deallocation function for the type has a size_t parameter. bool UsualArrayDeleteWantsSize = false; if (ArraySize && !AllocType->isDependentType()) UsualArrayDeleteWantsSize = doesUsualArrayDeleteWantSize(*this, StartLoc, AllocType); SmallVector AllPlaceArgs; if (OperatorNew) { const FunctionProtoType *Proto = OperatorNew->getType()->getAs(); VariadicCallType CallType = Proto->isVariadic() ? VariadicFunction : VariadicDoesNotApply; // We've already converted the placement args, just fill in any default // arguments. Skip the first parameter because we don't have a corresponding // argument. if (GatherArgumentsForCall(PlacementLParen, OperatorNew, Proto, 1, PlacementArgs, AllPlaceArgs, CallType)) return ExprError(); if (!AllPlaceArgs.empty()) PlacementArgs = AllPlaceArgs; // FIXME: This is wrong: PlacementArgs misses out the first (size) argument. DiagnoseSentinelCalls(OperatorNew, PlacementLParen, PlacementArgs); // FIXME: Missing call to CheckFunctionCall or equivalent } // Warn if the type is over-aligned and is being allocated by global operator // new. if (PlacementArgs.empty() && OperatorNew && (OperatorNew->isImplicit() || getSourceManager().isInSystemHeader(OperatorNew->getLocStart()))) { if (unsigned Align = Context.getPreferredTypeAlign(AllocType.getTypePtr())){ unsigned SuitableAlign = Context.getTargetInfo().getSuitableAlign(); if (Align > SuitableAlign) Diag(StartLoc, diag::warn_overaligned_type) << AllocType << unsigned(Align / Context.getCharWidth()) << unsigned(SuitableAlign / Context.getCharWidth()); } } QualType InitType = AllocType; // Array 'new' can't have any initializers except empty parentheses. // Initializer lists are also allowed, in C++11. Rely on the parser for the // dialect distinction. if (ResultType->isArrayType() || ArraySize) { if (!isLegalArrayNewInitializer(initStyle, Initializer)) { SourceRange InitRange(Inits[0]->getLocStart(), Inits[NumInits - 1]->getLocEnd()); Diag(StartLoc, diag::err_new_array_init_args) << InitRange; return ExprError(); } if (InitListExpr *ILE = dyn_cast_or_null(Initializer)) { // We do the initialization typechecking against the array type // corresponding to the number of initializers + 1 (to also check // default-initialization). unsigned NumElements = ILE->getNumInits() + 1; InitType = Context.getConstantArrayType(AllocType, llvm::APInt(Context.getTypeSize(Context.getSizeType()), NumElements), ArrayType::Normal, 0); } } // If we can perform the initialization, and we've not already done so, // do it now. if (!AllocType->isDependentType() && !Expr::hasAnyTypeDependentArguments( llvm::makeArrayRef(Inits, NumInits))) { // C++11 [expr.new]p15: // A new-expression that creates an object of type T initializes that // object as follows: InitializationKind Kind // - If the new-initializer is omitted, the object is default- // initialized (8.5); if no initialization is performed, // the object has indeterminate value = initStyle == CXXNewExpr::NoInit ? InitializationKind::CreateDefault(TypeRange.getBegin()) // - Otherwise, the new-initializer is interpreted according to the // initialization rules of 8.5 for direct-initialization. : initStyle == CXXNewExpr::ListInit ? InitializationKind::CreateDirectList(TypeRange.getBegin()) : InitializationKind::CreateDirect(TypeRange.getBegin(), DirectInitRange.getBegin(), DirectInitRange.getEnd()); InitializedEntity Entity = InitializedEntity::InitializeNew(StartLoc, InitType); InitializationSequence InitSeq(*this, Entity, Kind, MultiExprArg(Inits, NumInits)); ExprResult FullInit = InitSeq.Perform(*this, Entity, Kind, MultiExprArg(Inits, NumInits)); if (FullInit.isInvalid()) return ExprError(); // FullInit is our initializer; strip off CXXBindTemporaryExprs, because // we don't want the initialized object to be destructed. if (CXXBindTemporaryExpr *Binder = dyn_cast_or_null(FullInit.get())) FullInit = Binder->getSubExpr(); Initializer = FullInit.get(); } // Mark the new and delete operators as referenced. if (OperatorNew) { if (DiagnoseUseOfDecl(OperatorNew, StartLoc)) return ExprError(); MarkFunctionReferenced(StartLoc, OperatorNew); } if (OperatorDelete) { if (DiagnoseUseOfDecl(OperatorDelete, StartLoc)) return ExprError(); MarkFunctionReferenced(StartLoc, OperatorDelete); } // C++0x [expr.new]p17: // If the new expression creates an array of objects of class type, // access and ambiguity control are done for the destructor. QualType BaseAllocType = Context.getBaseElementType(AllocType); if (ArraySize && !BaseAllocType->isDependentType()) { if (const RecordType *BaseRecordType = BaseAllocType->getAs()) { if (CXXDestructorDecl *dtor = LookupDestructor( cast(BaseRecordType->getDecl()))) { MarkFunctionReferenced(StartLoc, dtor); CheckDestructorAccess(StartLoc, dtor, PDiag(diag::err_access_dtor) << BaseAllocType); if (DiagnoseUseOfDecl(dtor, StartLoc)) return ExprError(); } } } return new (Context) CXXNewExpr(Context, UseGlobal, OperatorNew, OperatorDelete, UsualArrayDeleteWantsSize, PlacementArgs, TypeIdParens, ArraySize, initStyle, Initializer, ResultType, AllocTypeInfo, Range, DirectInitRange); } /// \brief Checks that a type is suitable as the allocated type /// in a new-expression. bool Sema::CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R) { // C++ 5.3.4p1: "[The] type shall be a complete object type, but not an // abstract class type or array thereof. if (AllocType->isFunctionType()) return Diag(Loc, diag::err_bad_new_type) << AllocType << 0 << R; else if (AllocType->isReferenceType()) return Diag(Loc, diag::err_bad_new_type) << AllocType << 1 << R; else if (!AllocType->isDependentType() && RequireCompleteType(Loc, AllocType, diag::err_new_incomplete_type,R)) return true; else if (RequireNonAbstractType(Loc, AllocType, diag::err_allocation_of_abstract_type)) return true; else if (AllocType->isVariablyModifiedType()) return Diag(Loc, diag::err_variably_modified_new_type) << AllocType; else if (unsigned AddressSpace = AllocType.getAddressSpace()) return Diag(Loc, diag::err_address_space_qualified_new) << AllocType.getUnqualifiedType() << AddressSpace; else if (getLangOpts().ObjCAutoRefCount) { if (const ArrayType *AT = Context.getAsArrayType(AllocType)) { QualType BaseAllocType = Context.getBaseElementType(AT); if (BaseAllocType.getObjCLifetime() == Qualifiers::OCL_None && BaseAllocType->isObjCLifetimeType()) return Diag(Loc, diag::err_arc_new_array_without_ownership) << BaseAllocType; } } return false; } /// \brief Determine whether the given function is a non-placement /// deallocation function. static bool isNonPlacementDeallocationFunction(Sema &S, FunctionDecl *FD) { if (FD->isInvalidDecl()) return false; if (CXXMethodDecl *Method = dyn_cast(FD)) return Method->isUsualDeallocationFunction(); if (FD->getOverloadedOperator() != OO_Delete && FD->getOverloadedOperator() != OO_Array_Delete) return false; if (FD->getNumParams() == 1) return true; return S.getLangOpts().SizedDeallocation && FD->getNumParams() == 2 && S.Context.hasSameUnqualifiedType(FD->getParamDecl(1)->getType(), S.Context.getSizeType()); } /// FindAllocationFunctions - Finds the overloads of operator new and delete /// that are appropriate for the allocation. bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, bool UseGlobal, QualType AllocType, bool IsArray, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete) { // --- Choosing an allocation function --- // C++ 5.3.4p8 - 14 & 18 // 1) If UseGlobal is true, only look in the global scope. Else, also look // in the scope of the allocated class. // 2) If an array size is given, look for operator new[], else look for // operator new. // 3) The first argument is always size_t. Append the arguments from the // placement form. SmallVector AllocArgs(1 + PlaceArgs.size()); // We don't care about the actual value of this argument. // FIXME: Should the Sema create the expression and embed it in the syntax // tree? Or should the consumer just recalculate the value? IntegerLiteral Size(Context, llvm::APInt::getNullValue( Context.getTargetInfo().getPointerWidth(0)), Context.getSizeType(), SourceLocation()); AllocArgs[0] = &Size; std::copy(PlaceArgs.begin(), PlaceArgs.end(), AllocArgs.begin() + 1); // C++ [expr.new]p8: // If the allocated type is a non-array type, the allocation // function's name is operator new and the deallocation function's // name is operator delete. If the allocated type is an array // type, the allocation function's name is operator new[] and the // deallocation function's name is operator delete[]. DeclarationName NewName = Context.DeclarationNames.getCXXOperatorName( IsArray ? OO_Array_New : OO_New); DeclarationName DeleteName = Context.DeclarationNames.getCXXOperatorName( IsArray ? OO_Array_Delete : OO_Delete); QualType AllocElemType = Context.getBaseElementType(AllocType); if (AllocElemType->isRecordType() && !UseGlobal) { CXXRecordDecl *Record = cast(AllocElemType->getAs()->getDecl()); if (FindAllocationOverload(StartLoc, Range, NewName, AllocArgs, Record, /*AllowMissing=*/true, OperatorNew)) return true; } if (!OperatorNew) { // Didn't find a member overload. Look for a global one. DeclareGlobalNewDelete(); DeclContext *TUDecl = Context.getTranslationUnitDecl(); bool FallbackEnabled = IsArray && Context.getLangOpts().MSVCCompat; if (FindAllocationOverload(StartLoc, Range, NewName, AllocArgs, TUDecl, /*AllowMissing=*/FallbackEnabled, OperatorNew, /*Diagnose=*/!FallbackEnabled)) { if (!FallbackEnabled) return true; // MSVC will fall back on trying to find a matching global operator new // if operator new[] cannot be found. Also, MSVC will leak by not // generating a call to operator delete or operator delete[], but we // will not replicate that bug. NewName = Context.DeclarationNames.getCXXOperatorName(OO_New); DeleteName = Context.DeclarationNames.getCXXOperatorName(OO_Delete); if (FindAllocationOverload(StartLoc, Range, NewName, AllocArgs, TUDecl, /*AllowMissing=*/false, OperatorNew)) return true; } } // We don't need an operator delete if we're running under // -fno-exceptions. if (!getLangOpts().Exceptions) { OperatorDelete = nullptr; return false; } // C++ [expr.new]p19: // // If the new-expression begins with a unary :: operator, the // deallocation function's name is looked up in the global // scope. Otherwise, if the allocated type is a class type T or an // array thereof, the deallocation function's name is looked up in // the scope of T. If this lookup fails to find the name, or if // the allocated type is not a class type or array thereof, the // deallocation function's name is looked up in the global scope. LookupResult FoundDelete(*this, DeleteName, StartLoc, LookupOrdinaryName); if (AllocElemType->isRecordType() && !UseGlobal) { CXXRecordDecl *RD = cast(AllocElemType->getAs()->getDecl()); LookupQualifiedName(FoundDelete, RD); } if (FoundDelete.isAmbiguous()) return true; // FIXME: clean up expressions? if (FoundDelete.empty()) { DeclareGlobalNewDelete(); LookupQualifiedName(FoundDelete, Context.getTranslationUnitDecl()); } FoundDelete.suppressDiagnostics(); SmallVector, 2> Matches; // Whether we're looking for a placement operator delete is dictated // by whether we selected a placement operator new, not by whether // we had explicit placement arguments. This matters for things like // struct A { void *operator new(size_t, int = 0); ... }; // A *a = new A() bool isPlacementNew = (!PlaceArgs.empty() || OperatorNew->param_size() != 1); if (isPlacementNew) { // C++ [expr.new]p20: // A declaration of a placement deallocation function matches the // declaration of a placement allocation function if it has the // same number of parameters and, after parameter transformations // (8.3.5), all parameter types except the first are // identical. [...] // // To perform this comparison, we compute the function type that // the deallocation function should have, and use that type both // for template argument deduction and for comparison purposes. // // FIXME: this comparison should ignore CC and the like. QualType ExpectedFunctionType; { const FunctionProtoType *Proto = OperatorNew->getType()->getAs(); SmallVector ArgTypes; ArgTypes.push_back(Context.VoidPtrTy); for (unsigned I = 1, N = Proto->getNumParams(); I < N; ++I) ArgTypes.push_back(Proto->getParamType(I)); FunctionProtoType::ExtProtoInfo EPI; EPI.Variadic = Proto->isVariadic(); ExpectedFunctionType = Context.getFunctionType(Context.VoidTy, ArgTypes, EPI); } for (LookupResult::iterator D = FoundDelete.begin(), DEnd = FoundDelete.end(); D != DEnd; ++D) { FunctionDecl *Fn = nullptr; if (FunctionTemplateDecl *FnTmpl = dyn_cast((*D)->getUnderlyingDecl())) { // Perform template argument deduction to try to match the // expected function type. TemplateDeductionInfo Info(StartLoc); if (DeduceTemplateArguments(FnTmpl, nullptr, ExpectedFunctionType, Fn, Info)) continue; } else Fn = cast((*D)->getUnderlyingDecl()); if (Context.hasSameType(Fn->getType(), ExpectedFunctionType)) Matches.push_back(std::make_pair(D.getPair(), Fn)); } } else { // C++ [expr.new]p20: // [...] Any non-placement deallocation function matches a // non-placement allocation function. [...] for (LookupResult::iterator D = FoundDelete.begin(), DEnd = FoundDelete.end(); D != DEnd; ++D) { if (FunctionDecl *Fn = dyn_cast((*D)->getUnderlyingDecl())) if (isNonPlacementDeallocationFunction(*this, Fn)) Matches.push_back(std::make_pair(D.getPair(), Fn)); } // C++1y [expr.new]p22: // For a non-placement allocation function, the normal deallocation // function lookup is used // C++1y [expr.delete]p?: // If [...] deallocation function lookup finds both a usual deallocation // function with only a pointer parameter and a usual deallocation // function with both a pointer parameter and a size parameter, then the // selected deallocation function shall be the one with two parameters. // Otherwise, the selected deallocation function shall be the function // with one parameter. if (getLangOpts().SizedDeallocation && Matches.size() == 2) { if (Matches[0].second->getNumParams() == 1) Matches.erase(Matches.begin()); else Matches.erase(Matches.begin() + 1); assert(Matches[0].second->getNumParams() == 2 && "found an unexpected usual deallocation function"); } } // C++ [expr.new]p20: // [...] If the lookup finds a single matching deallocation // function, that function will be called; otherwise, no // deallocation function will be called. if (Matches.size() == 1) { OperatorDelete = Matches[0].second; // C++0x [expr.new]p20: // If the lookup finds the two-parameter form of a usual // deallocation function (3.7.4.2) and that function, considered // as a placement deallocation function, would have been // selected as a match for the allocation function, the program // is ill-formed. if (!PlaceArgs.empty() && getLangOpts().CPlusPlus11 && isNonPlacementDeallocationFunction(*this, OperatorDelete)) { Diag(StartLoc, diag::err_placement_new_non_placement_delete) << SourceRange(PlaceArgs.front()->getLocStart(), PlaceArgs.back()->getLocEnd()); if (!OperatorDelete->isImplicit()) Diag(OperatorDelete->getLocation(), diag::note_previous_decl) << DeleteName; } else { CheckAllocationAccess(StartLoc, Range, FoundDelete.getNamingClass(), Matches[0].first); } } return false; } /// \brief Find an fitting overload for the allocation function /// in the specified scope. /// /// \param StartLoc The location of the 'new' token. /// \param Range The range of the placement arguments. /// \param Name The name of the function ('operator new' or 'operator new[]'). /// \param Args The placement arguments specified. /// \param Ctx The scope in which we should search; either a class scope or the /// translation unit. /// \param AllowMissing If \c true, report an error if we can't find any /// allocation functions. Otherwise, succeed but don't fill in \p /// Operator. /// \param Operator Filled in with the found allocation function. Unchanged if /// no allocation function was found. /// \param Diagnose If \c true, issue errors if the allocation function is not /// usable. bool Sema::FindAllocationOverload(SourceLocation StartLoc, SourceRange Range, DeclarationName Name, MultiExprArg Args, DeclContext *Ctx, bool AllowMissing, FunctionDecl *&Operator, bool Diagnose) { LookupResult R(*this, Name, StartLoc, LookupOrdinaryName); LookupQualifiedName(R, Ctx); if (R.empty()) { if (AllowMissing || !Diagnose) return false; return Diag(StartLoc, diag::err_ovl_no_viable_function_in_call) << Name << Range; } if (R.isAmbiguous()) return true; R.suppressDiagnostics(); OverloadCandidateSet Candidates(StartLoc, OverloadCandidateSet::CSK_Normal); for (LookupResult::iterator Alloc = R.begin(), AllocEnd = R.end(); Alloc != AllocEnd; ++Alloc) { // Even member operator new/delete are implicitly treated as // static, so don't use AddMemberCandidate. NamedDecl *D = (*Alloc)->getUnderlyingDecl(); if (FunctionTemplateDecl *FnTemplate = dyn_cast(D)) { AddTemplateOverloadCandidate(FnTemplate, Alloc.getPair(), /*ExplicitTemplateArgs=*/nullptr, Args, Candidates, /*SuppressUserConversions=*/false); continue; } FunctionDecl *Fn = cast(D); AddOverloadCandidate(Fn, Alloc.getPair(), Args, Candidates, /*SuppressUserConversions=*/false); } // Do the resolution. OverloadCandidateSet::iterator Best; switch (Candidates.BestViableFunction(*this, StartLoc, Best)) { case OR_Success: { // Got one! FunctionDecl *FnDecl = Best->Function; if (CheckAllocationAccess(StartLoc, Range, R.getNamingClass(), Best->FoundDecl, Diagnose) == AR_inaccessible) return true; Operator = FnDecl; return false; } case OR_No_Viable_Function: if (Diagnose) { Diag(StartLoc, diag::err_ovl_no_viable_function_in_call) << Name << Range; Candidates.NoteCandidates(*this, OCD_AllCandidates, Args); } return true; case OR_Ambiguous: if (Diagnose) { Diag(StartLoc, diag::err_ovl_ambiguous_call) << Name << Range; Candidates.NoteCandidates(*this, OCD_ViableCandidates, Args); } return true; case OR_Deleted: { if (Diagnose) { Diag(StartLoc, diag::err_ovl_deleted_call) << Best->Function->isDeleted() << Name << getDeletedOrUnavailableSuffix(Best->Function) << Range; Candidates.NoteCandidates(*this, OCD_AllCandidates, Args); } return true; } } llvm_unreachable("Unreachable, bad result from BestViableFunction"); } /// DeclareGlobalNewDelete - Declare the global forms of operator new and /// delete. These are: /// @code /// // C++03: /// void* operator new(std::size_t) throw(std::bad_alloc); /// void* operator new[](std::size_t) throw(std::bad_alloc); /// void operator delete(void *) throw(); /// void operator delete[](void *) throw(); /// // C++11: /// void* operator new(std::size_t); /// void* operator new[](std::size_t); /// void operator delete(void *) noexcept; /// void operator delete[](void *) noexcept; /// // C++1y: /// void* operator new(std::size_t); /// void* operator new[](std::size_t); /// void operator delete(void *) noexcept; /// void operator delete[](void *) noexcept; /// void operator delete(void *, std::size_t) noexcept; /// void operator delete[](void *, std::size_t) noexcept; /// @endcode /// Note that the placement and nothrow forms of new are *not* implicitly /// declared. Their use requires including \. void Sema::DeclareGlobalNewDelete() { if (GlobalNewDeleteDeclared) return; // C++ [basic.std.dynamic]p2: // [...] The following allocation and deallocation functions (18.4) are // implicitly declared in global scope in each translation unit of a // program // // C++03: // void* operator new(std::size_t) throw(std::bad_alloc); // void* operator new[](std::size_t) throw(std::bad_alloc); // void operator delete(void*) throw(); // void operator delete[](void*) throw(); // C++11: // void* operator new(std::size_t); // void* operator new[](std::size_t); // void operator delete(void*) noexcept; // void operator delete[](void*) noexcept; // C++1y: // void* operator new(std::size_t); // void* operator new[](std::size_t); // void operator delete(void*) noexcept; // void operator delete[](void*) noexcept; // void operator delete(void*, std::size_t) noexcept; // void operator delete[](void*, std::size_t) noexcept; // // These implicit declarations introduce only the function names operator // new, operator new[], operator delete, operator delete[]. // // Here, we need to refer to std::bad_alloc, so we will implicitly declare // "std" or "bad_alloc" as necessary to form the exception specification. // However, we do not make these implicit declarations visible to name // lookup. if (!StdBadAlloc && !getLangOpts().CPlusPlus11) { // The "std::bad_alloc" class has not yet been declared, so build it // implicitly. StdBadAlloc = CXXRecordDecl::Create(Context, TTK_Class, getOrCreateStdNamespace(), SourceLocation(), SourceLocation(), &PP.getIdentifierTable().get("bad_alloc"), nullptr); getStdBadAlloc()->setImplicit(true); } GlobalNewDeleteDeclared = true; QualType VoidPtr = Context.getPointerType(Context.VoidTy); QualType SizeT = Context.getSizeType(); bool AssumeSaneOperatorNew = getLangOpts().AssumeSaneOperatorNew; DeclareGlobalAllocationFunction( Context.DeclarationNames.getCXXOperatorName(OO_New), VoidPtr, SizeT, QualType(), AssumeSaneOperatorNew); DeclareGlobalAllocationFunction( Context.DeclarationNames.getCXXOperatorName(OO_Array_New), VoidPtr, SizeT, QualType(), AssumeSaneOperatorNew); DeclareGlobalAllocationFunction( Context.DeclarationNames.getCXXOperatorName(OO_Delete), Context.VoidTy, VoidPtr); DeclareGlobalAllocationFunction( Context.DeclarationNames.getCXXOperatorName(OO_Array_Delete), Context.VoidTy, VoidPtr); if (getLangOpts().SizedDeallocation) { DeclareGlobalAllocationFunction( Context.DeclarationNames.getCXXOperatorName(OO_Delete), Context.VoidTy, VoidPtr, Context.getSizeType()); DeclareGlobalAllocationFunction( Context.DeclarationNames.getCXXOperatorName(OO_Array_Delete), Context.VoidTy, VoidPtr, Context.getSizeType()); } } /// DeclareGlobalAllocationFunction - Declares a single implicit global /// allocation function if it doesn't already exist. void Sema::DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, QualType Param1, QualType Param2, bool AddRestrictAttr) { DeclContext *GlobalCtx = Context.getTranslationUnitDecl(); unsigned NumParams = Param2.isNull() ? 1 : 2; // Check if this function is already declared. DeclContext::lookup_result R = GlobalCtx->lookup(Name); for (DeclContext::lookup_iterator Alloc = R.begin(), AllocEnd = R.end(); Alloc != AllocEnd; ++Alloc) { // Only look at non-template functions, as it is the predefined, // non-templated allocation function we are trying to declare here. if (FunctionDecl *Func = dyn_cast(*Alloc)) { if (Func->getNumParams() == NumParams) { QualType InitialParam1Type = Context.getCanonicalType(Func->getParamDecl(0) ->getType().getUnqualifiedType()); QualType InitialParam2Type = NumParams == 2 ? Context.getCanonicalType(Func->getParamDecl(1) ->getType().getUnqualifiedType()) : QualType(); // FIXME: Do we need to check for default arguments here? if (InitialParam1Type == Param1 && (NumParams == 1 || InitialParam2Type == Param2)) { if (AddRestrictAttr && !Func->hasAttr()) Func->addAttr(RestrictAttr::CreateImplicit( Context, RestrictAttr::GNU_malloc)); // Make the function visible to name lookup, even if we found it in // an unimported module. It either is an implicitly-declared global // allocation function, or is suppressing that function. Func->setHidden(false); return; } } } } FunctionProtoType::ExtProtoInfo EPI; QualType BadAllocType; bool HasBadAllocExceptionSpec = (Name.getCXXOverloadedOperator() == OO_New || Name.getCXXOverloadedOperator() == OO_Array_New); if (HasBadAllocExceptionSpec) { if (!getLangOpts().CPlusPlus11) { BadAllocType = Context.getTypeDeclType(getStdBadAlloc()); assert(StdBadAlloc && "Must have std::bad_alloc declared"); EPI.ExceptionSpec.Type = EST_Dynamic; EPI.ExceptionSpec.Exceptions = llvm::makeArrayRef(BadAllocType); } } else { EPI.ExceptionSpec = getLangOpts().CPlusPlus11 ? EST_BasicNoexcept : EST_DynamicNone; } QualType Params[] = { Param1, Param2 }; QualType FnType = Context.getFunctionType( Return, llvm::makeArrayRef(Params, NumParams), EPI); FunctionDecl *Alloc = FunctionDecl::Create(Context, GlobalCtx, SourceLocation(), SourceLocation(), Name, FnType, /*TInfo=*/nullptr, SC_None, false, true); Alloc->setImplicit(); // Implicit sized deallocation functions always have default visibility. Alloc->addAttr(VisibilityAttr::CreateImplicit(Context, VisibilityAttr::Default)); if (AddRestrictAttr) Alloc->addAttr( RestrictAttr::CreateImplicit(Context, RestrictAttr::GNU_malloc)); ParmVarDecl *ParamDecls[2]; for (unsigned I = 0; I != NumParams; ++I) { ParamDecls[I] = ParmVarDecl::Create(Context, Alloc, SourceLocation(), SourceLocation(), nullptr, Params[I], /*TInfo=*/nullptr, SC_None, nullptr); ParamDecls[I]->setImplicit(); } Alloc->setParams(llvm::makeArrayRef(ParamDecls, NumParams)); Context.getTranslationUnitDecl()->addDecl(Alloc); IdResolver.tryAddTopLevelDecl(Alloc, Name); } FunctionDecl *Sema::FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, DeclarationName Name) { DeclareGlobalNewDelete(); LookupResult FoundDelete(*this, Name, StartLoc, LookupOrdinaryName); LookupQualifiedName(FoundDelete, Context.getTranslationUnitDecl()); // C++ [expr.new]p20: // [...] Any non-placement deallocation function matches a // non-placement allocation function. [...] llvm::SmallVector Matches; for (LookupResult::iterator D = FoundDelete.begin(), DEnd = FoundDelete.end(); D != DEnd; ++D) { if (FunctionDecl *Fn = dyn_cast(*D)) if (isNonPlacementDeallocationFunction(*this, Fn)) Matches.push_back(Fn); } // C++1y [expr.delete]p?: // If the type is complete and deallocation function lookup finds both a // usual deallocation function with only a pointer parameter and a usual // deallocation function with both a pointer parameter and a size // parameter, then the selected deallocation function shall be the one // with two parameters. Otherwise, the selected deallocation function // shall be the function with one parameter. if (getLangOpts().SizedDeallocation && Matches.size() == 2) { unsigned NumArgs = CanProvideSize ? 2 : 1; if (Matches[0]->getNumParams() != NumArgs) Matches.erase(Matches.begin()); else Matches.erase(Matches.begin() + 1); assert(Matches[0]->getNumParams() == NumArgs && "found an unexpected usual deallocation function"); } assert(Matches.size() == 1 && "unexpectedly have multiple usual deallocation functions"); return Matches.front(); } bool Sema::FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose) { LookupResult Found(*this, Name, StartLoc, LookupOrdinaryName); // Try to find operator delete/operator delete[] in class scope. LookupQualifiedName(Found, RD); if (Found.isAmbiguous()) return true; Found.suppressDiagnostics(); SmallVector Matches; for (LookupResult::iterator F = Found.begin(), FEnd = Found.end(); F != FEnd; ++F) { NamedDecl *ND = (*F)->getUnderlyingDecl(); // Ignore template operator delete members from the check for a usual // deallocation function. if (isa(ND)) continue; if (cast(ND)->isUsualDeallocationFunction()) Matches.push_back(F.getPair()); } // There's exactly one suitable operator; pick it. if (Matches.size() == 1) { Operator = cast(Matches[0]->getUnderlyingDecl()); if (Operator->isDeleted()) { if (Diagnose) { Diag(StartLoc, diag::err_deleted_function_use); NoteDeletedFunction(Operator); } return true; } if (CheckAllocationAccess(StartLoc, SourceRange(), Found.getNamingClass(), Matches[0], Diagnose) == AR_inaccessible) return true; return false; // We found multiple suitable operators; complain about the ambiguity. } else if (!Matches.empty()) { if (Diagnose) { Diag(StartLoc, diag::err_ambiguous_suitable_delete_member_function_found) << Name << RD; for (SmallVectorImpl::iterator F = Matches.begin(), FEnd = Matches.end(); F != FEnd; ++F) Diag((*F)->getUnderlyingDecl()->getLocation(), diag::note_member_declared_here) << Name; } return true; } // We did find operator delete/operator delete[] declarations, but // none of them were suitable. if (!Found.empty()) { if (Diagnose) { Diag(StartLoc, diag::err_no_suitable_delete_member_function_found) << Name << RD; for (LookupResult::iterator F = Found.begin(), FEnd = Found.end(); F != FEnd; ++F) Diag((*F)->getUnderlyingDecl()->getLocation(), diag::note_member_declared_here) << Name; } return true; } Operator = nullptr; return false; } namespace { /// \brief Checks whether delete-expression, and new-expression used for /// initializing deletee have the same array form. class MismatchingNewDeleteDetector { public: enum MismatchResult { /// Indicates that there is no mismatch or a mismatch cannot be proven. NoMismatch, /// Indicates that variable is initialized with mismatching form of \a new. VarInitMismatches, /// Indicates that member is initialized with mismatching form of \a new. MemberInitMismatches, /// Indicates that 1 or more constructors' definitions could not been /// analyzed, and they will be checked again at the end of translation unit. AnalyzeLater }; /// \param EndOfTU True, if this is the final analysis at the end of /// translation unit. False, if this is the initial analysis at the point /// delete-expression was encountered. explicit MismatchingNewDeleteDetector(bool EndOfTU) : IsArrayForm(false), Field(nullptr), EndOfTU(EndOfTU), HasUndefinedConstructors(false) {} /// \brief Checks whether pointee of a delete-expression is initialized with /// matching form of new-expression. /// /// If return value is \c VarInitMismatches or \c MemberInitMismatches at the /// point where delete-expression is encountered, then a warning will be /// issued immediately. If return value is \c AnalyzeLater at the point where /// delete-expression is seen, then member will be analyzed at the end of /// translation unit. \c AnalyzeLater is returned iff at least one constructor /// couldn't be analyzed. If at least one constructor initializes the member /// with matching type of new, the return value is \c NoMismatch. MismatchResult analyzeDeleteExpr(const CXXDeleteExpr *DE); /// \brief Analyzes a class member. /// \param Field Class member to analyze. /// \param DeleteWasArrayForm Array form-ness of the delete-expression used /// for deleting the \p Field. MismatchResult analyzeField(FieldDecl *Field, bool DeleteWasArrayForm); /// List of mismatching new-expressions used for initialization of the pointee llvm::SmallVector NewExprs; /// Indicates whether delete-expression was in array form. bool IsArrayForm; FieldDecl *Field; private: const bool EndOfTU; /// \brief Indicates that there is at least one constructor without body. bool HasUndefinedConstructors; /// \brief Returns \c CXXNewExpr from given initialization expression. /// \param E Expression used for initializing pointee in delete-expression. /// E can be a single-element \c InitListExpr consisting of new-expression. const CXXNewExpr *getNewExprFromInitListOrExpr(const Expr *E); /// \brief Returns whether member is initialized with mismatching form of /// \c new either by the member initializer or in-class initialization. /// /// If bodies of all constructors are not visible at the end of translation /// unit or at least one constructor initializes member with the matching /// form of \c new, mismatch cannot be proven, and this function will return /// \c NoMismatch. MismatchResult analyzeMemberExpr(const MemberExpr *ME); /// \brief Returns whether variable is initialized with mismatching form of /// \c new. /// /// If variable is initialized with matching form of \c new or variable is not /// initialized with a \c new expression, this function will return true. /// If variable is initialized with mismatching form of \c new, returns false. /// \param D Variable to analyze. bool hasMatchingVarInit(const DeclRefExpr *D); /// \brief Checks whether the constructor initializes pointee with mismatching /// form of \c new. /// /// Returns true, if member is initialized with matching form of \c new in /// member initializer list. Returns false, if member is initialized with the /// matching form of \c new in this constructor's initializer or given /// constructor isn't defined at the point where delete-expression is seen, or /// member isn't initialized by the constructor. bool hasMatchingNewInCtor(const CXXConstructorDecl *CD); /// \brief Checks whether member is initialized with matching form of /// \c new in member initializer list. bool hasMatchingNewInCtorInit(const CXXCtorInitializer *CI); /// Checks whether member is initialized with mismatching form of \c new by /// in-class initializer. MismatchResult analyzeInClassInitializer(); }; } MismatchingNewDeleteDetector::MismatchResult MismatchingNewDeleteDetector::analyzeDeleteExpr(const CXXDeleteExpr *DE) { NewExprs.clear(); assert(DE && "Expected delete-expression"); IsArrayForm = DE->isArrayForm(); const Expr *E = DE->getArgument()->IgnoreParenImpCasts(); if (const MemberExpr *ME = dyn_cast(E)) { return analyzeMemberExpr(ME); } else if (const DeclRefExpr *D = dyn_cast(E)) { if (!hasMatchingVarInit(D)) return VarInitMismatches; } return NoMismatch; } const CXXNewExpr * MismatchingNewDeleteDetector::getNewExprFromInitListOrExpr(const Expr *E) { assert(E != nullptr && "Expected a valid initializer expression"); E = E->IgnoreParenImpCasts(); if (const InitListExpr *ILE = dyn_cast(E)) { if (ILE->getNumInits() == 1) E = dyn_cast(ILE->getInit(0)->IgnoreParenImpCasts()); } return dyn_cast_or_null(E); } bool MismatchingNewDeleteDetector::hasMatchingNewInCtorInit( const CXXCtorInitializer *CI) { const CXXNewExpr *NE = nullptr; if (Field == CI->getMember() && (NE = getNewExprFromInitListOrExpr(CI->getInit()))) { if (NE->isArray() == IsArrayForm) return true; else NewExprs.push_back(NE); } return false; } bool MismatchingNewDeleteDetector::hasMatchingNewInCtor( const CXXConstructorDecl *CD) { if (CD->isImplicit()) return false; const FunctionDecl *Definition = CD; if (!CD->isThisDeclarationADefinition() && !CD->isDefined(Definition)) { HasUndefinedConstructors = true; return EndOfTU; } for (const auto *CI : cast(Definition)->inits()) { if (hasMatchingNewInCtorInit(CI)) return true; } return false; } MismatchingNewDeleteDetector::MismatchResult MismatchingNewDeleteDetector::analyzeInClassInitializer() { assert(Field != nullptr && "This should be called only for members"); - if (const CXXNewExpr *NE = - getNewExprFromInitListOrExpr(Field->getInClassInitializer())) { + const Expr *InitExpr = Field->getInClassInitializer(); + if (!InitExpr) + return EndOfTU ? NoMismatch : AnalyzeLater; + if (const CXXNewExpr *NE = getNewExprFromInitListOrExpr(InitExpr)) { if (NE->isArray() != IsArrayForm) { NewExprs.push_back(NE); return MemberInitMismatches; } } return NoMismatch; } MismatchingNewDeleteDetector::MismatchResult MismatchingNewDeleteDetector::analyzeField(FieldDecl *Field, bool DeleteWasArrayForm) { assert(Field != nullptr && "Analysis requires a valid class member."); this->Field = Field; IsArrayForm = DeleteWasArrayForm; const CXXRecordDecl *RD = cast(Field->getParent()); for (const auto *CD : RD->ctors()) { if (hasMatchingNewInCtor(CD)) return NoMismatch; } if (HasUndefinedConstructors) return EndOfTU ? NoMismatch : AnalyzeLater; if (!NewExprs.empty()) return MemberInitMismatches; return Field->hasInClassInitializer() ? analyzeInClassInitializer() : NoMismatch; } MismatchingNewDeleteDetector::MismatchResult MismatchingNewDeleteDetector::analyzeMemberExpr(const MemberExpr *ME) { assert(ME != nullptr && "Expected a member expression"); if (FieldDecl *F = dyn_cast(ME->getMemberDecl())) return analyzeField(F, IsArrayForm); return NoMismatch; } bool MismatchingNewDeleteDetector::hasMatchingVarInit(const DeclRefExpr *D) { const CXXNewExpr *NE = nullptr; if (const VarDecl *VD = dyn_cast(D->getDecl())) { if (VD->hasInit() && (NE = getNewExprFromInitListOrExpr(VD->getInit())) && NE->isArray() != IsArrayForm) { NewExprs.push_back(NE); } } return NewExprs.empty(); } static void DiagnoseMismatchedNewDelete(Sema &SemaRef, SourceLocation DeleteLoc, const MismatchingNewDeleteDetector &Detector) { SourceLocation EndOfDelete = SemaRef.getLocForEndOfToken(DeleteLoc); FixItHint H; if (!Detector.IsArrayForm) H = FixItHint::CreateInsertion(EndOfDelete, "[]"); else { SourceLocation RSquare = Lexer::findLocationAfterToken( DeleteLoc, tok::l_square, SemaRef.getSourceManager(), SemaRef.getLangOpts(), true); if (RSquare.isValid()) H = FixItHint::CreateRemoval(SourceRange(EndOfDelete, RSquare)); } SemaRef.Diag(DeleteLoc, diag::warn_mismatched_delete_new) << Detector.IsArrayForm << H; for (const auto *NE : Detector.NewExprs) SemaRef.Diag(NE->getExprLoc(), diag::note_allocated_here) << Detector.IsArrayForm; } void Sema::AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE) { if (Diags.isIgnored(diag::warn_mismatched_delete_new, SourceLocation())) return; MismatchingNewDeleteDetector Detector(/*EndOfTU=*/false); switch (Detector.analyzeDeleteExpr(DE)) { case MismatchingNewDeleteDetector::VarInitMismatches: case MismatchingNewDeleteDetector::MemberInitMismatches: { DiagnoseMismatchedNewDelete(*this, DE->getLocStart(), Detector); break; } case MismatchingNewDeleteDetector::AnalyzeLater: { DeleteExprs[Detector.Field].push_back( std::make_pair(DE->getLocStart(), DE->isArrayForm())); break; } case MismatchingNewDeleteDetector::NoMismatch: break; } } void Sema::AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm) { MismatchingNewDeleteDetector Detector(/*EndOfTU=*/true); switch (Detector.analyzeField(Field, DeleteWasArrayForm)) { case MismatchingNewDeleteDetector::VarInitMismatches: llvm_unreachable("This analysis should have been done for class members."); case MismatchingNewDeleteDetector::AnalyzeLater: llvm_unreachable("Analysis cannot be postponed any point beyond end of " "translation unit."); case MismatchingNewDeleteDetector::MemberInitMismatches: DiagnoseMismatchedNewDelete(*this, DeleteLoc, Detector); break; case MismatchingNewDeleteDetector::NoMismatch: break; } } /// ActOnCXXDelete - Parsed a C++ 'delete' expression (C++ 5.3.5), as in: /// @code ::delete ptr; @endcode /// or /// @code delete [] ptr; @endcode ExprResult Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *ExE) { // C++ [expr.delete]p1: // The operand shall have a pointer type, or a class type having a single // non-explicit conversion function to a pointer type. The result has type // void. // // DR599 amends "pointer type" to "pointer to object type" in both cases. ExprResult Ex = ExE; FunctionDecl *OperatorDelete = nullptr; bool ArrayFormAsWritten = ArrayForm; bool UsualArrayDeleteWantsSize = false; if (!Ex.get()->isTypeDependent()) { // Perform lvalue-to-rvalue cast, if needed. Ex = DefaultLvalueConversion(Ex.get()); if (Ex.isInvalid()) return ExprError(); QualType Type = Ex.get()->getType(); class DeleteConverter : public ContextualImplicitConverter { public: DeleteConverter() : ContextualImplicitConverter(false, true) {} bool match(QualType ConvType) override { // FIXME: If we have an operator T* and an operator void*, we must pick // the operator T*. if (const PointerType *ConvPtrType = ConvType->getAs()) if (ConvPtrType->getPointeeType()->isIncompleteOrObjectType()) return true; return false; } SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return S.Diag(Loc, diag::err_delete_operand) << T; } SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) override { return S.Diag(Loc, diag::err_delete_incomplete_class_type) << T; } SemaDiagnosticBuilder diagnoseExplicitConv(Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override { return S.Diag(Loc, diag::err_delete_explicit_conversion) << T << ConvTy; } SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override { return S.Diag(Conv->getLocation(), diag::note_delete_conversion) << ConvTy; } SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) override { return S.Diag(Loc, diag::err_ambiguous_delete_operand) << T; } SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override { return S.Diag(Conv->getLocation(), diag::note_delete_conversion) << ConvTy; } SemaDiagnosticBuilder diagnoseConversion(Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override { llvm_unreachable("conversion functions are permitted"); } } Converter; Ex = PerformContextualImplicitConversion(StartLoc, Ex.get(), Converter); if (Ex.isInvalid()) return ExprError(); Type = Ex.get()->getType(); if (!Converter.match(Type)) // FIXME: PerformContextualImplicitConversion should return ExprError // itself in this case. return ExprError(); QualType Pointee = Type->getAs()->getPointeeType(); QualType PointeeElem = Context.getBaseElementType(Pointee); if (unsigned AddressSpace = Pointee.getAddressSpace()) return Diag(Ex.get()->getLocStart(), diag::err_address_space_qualified_delete) << Pointee.getUnqualifiedType() << AddressSpace; CXXRecordDecl *PointeeRD = nullptr; if (Pointee->isVoidType() && !isSFINAEContext()) { // The C++ standard bans deleting a pointer to a non-object type, which // effectively bans deletion of "void*". However, most compilers support // this, so we treat it as a warning unless we're in a SFINAE context. Diag(StartLoc, diag::ext_delete_void_ptr_operand) << Type << Ex.get()->getSourceRange(); } else if (Pointee->isFunctionType() || Pointee->isVoidType()) { return ExprError(Diag(StartLoc, diag::err_delete_operand) << Type << Ex.get()->getSourceRange()); } else if (!Pointee->isDependentType()) { if (!RequireCompleteType(StartLoc, Pointee, diag::warn_delete_incomplete, Ex.get())) { if (const RecordType *RT = PointeeElem->getAs()) PointeeRD = cast(RT->getDecl()); } } if (Pointee->isArrayType() && !ArrayForm) { Diag(StartLoc, diag::warn_delete_array_type) << Type << Ex.get()->getSourceRange() << FixItHint::CreateInsertion(PP.getLocForEndOfToken(StartLoc), "[]"); ArrayForm = true; } DeclarationName DeleteName = Context.DeclarationNames.getCXXOperatorName( ArrayForm ? OO_Array_Delete : OO_Delete); if (PointeeRD) { if (!UseGlobal && FindDeallocationFunction(StartLoc, PointeeRD, DeleteName, OperatorDelete)) return ExprError(); // If we're allocating an array of records, check whether the // usual operator delete[] has a size_t parameter. if (ArrayForm) { // If the user specifically asked to use the global allocator, // we'll need to do the lookup into the class. if (UseGlobal) UsualArrayDeleteWantsSize = doesUsualArrayDeleteWantSize(*this, StartLoc, PointeeElem); // Otherwise, the usual operator delete[] should be the // function we just found. else if (OperatorDelete && isa(OperatorDelete)) UsualArrayDeleteWantsSize = (OperatorDelete->getNumParams() == 2); } if (!PointeeRD->hasIrrelevantDestructor()) if (CXXDestructorDecl *Dtor = LookupDestructor(PointeeRD)) { MarkFunctionReferenced(StartLoc, const_cast(Dtor)); if (DiagnoseUseOfDecl(Dtor, StartLoc)) return ExprError(); } // C++ [expr.delete]p3: // In the first alternative (delete object), if the static type of the // object to be deleted is different from its dynamic type, the static // type shall be a base class of the dynamic type of the object to be // deleted and the static type shall have a virtual destructor or the // behavior is undefined. // // Note: a final class cannot be derived from, no issue there if (PointeeRD->isPolymorphic() && !PointeeRD->hasAttr()) { CXXDestructorDecl *dtor = PointeeRD->getDestructor(); if (dtor && !dtor->isVirtual()) { if (PointeeRD->isAbstract()) { // If the class is abstract, we warn by default, because we're // sure the code has undefined behavior. Diag(StartLoc, diag::warn_delete_abstract_non_virtual_dtor) << PointeeElem; } else if (!ArrayForm) { // Otherwise, if this is not an array delete, it's a bit suspect, // but not necessarily wrong. Diag(StartLoc, diag::warn_delete_non_virtual_dtor) << PointeeElem; } } } } if (!OperatorDelete) // Look for a global declaration. OperatorDelete = FindUsualDeallocationFunction( StartLoc, !RequireCompleteType(StartLoc, Pointee, 0) && (!ArrayForm || UsualArrayDeleteWantsSize || Pointee.isDestructedType()), DeleteName); MarkFunctionReferenced(StartLoc, OperatorDelete); // Check access and ambiguity of operator delete and destructor. if (PointeeRD) { if (CXXDestructorDecl *Dtor = LookupDestructor(PointeeRD)) { CheckDestructorAccess(Ex.get()->getExprLoc(), Dtor, PDiag(diag::err_access_dtor) << PointeeElem); } } } CXXDeleteExpr *Result = new (Context) CXXDeleteExpr( Context.VoidTy, UseGlobal, ArrayForm, ArrayFormAsWritten, UsualArrayDeleteWantsSize, OperatorDelete, Ex.get(), StartLoc); AnalyzeDeleteExprMismatch(Result); return Result; } /// \brief Check the use of the given variable as a C++ condition in an if, /// while, do-while, or switch statement. ExprResult Sema::CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, bool ConvertToBoolean) { if (ConditionVar->isInvalidDecl()) return ExprError(); QualType T = ConditionVar->getType(); // C++ [stmt.select]p2: // The declarator shall not specify a function or an array. if (T->isFunctionType()) return ExprError(Diag(ConditionVar->getLocation(), diag::err_invalid_use_of_function_type) << ConditionVar->getSourceRange()); else if (T->isArrayType()) return ExprError(Diag(ConditionVar->getLocation(), diag::err_invalid_use_of_array_type) << ConditionVar->getSourceRange()); ExprResult Condition = DeclRefExpr::Create( Context, NestedNameSpecifierLoc(), SourceLocation(), ConditionVar, /*enclosing*/ false, ConditionVar->getLocation(), ConditionVar->getType().getNonReferenceType(), VK_LValue); MarkDeclRefReferenced(cast(Condition.get())); if (ConvertToBoolean) { Condition = CheckBooleanCondition(Condition.get(), StmtLoc); if (Condition.isInvalid()) return ExprError(); } return Condition; } /// CheckCXXBooleanCondition - Returns true if a conversion to bool is invalid. ExprResult Sema::CheckCXXBooleanCondition(Expr *CondExpr) { // C++ 6.4p4: // The value of a condition that is an initialized declaration in a statement // other than a switch statement is the value of the declared variable // implicitly converted to type bool. If that conversion is ill-formed, the // program is ill-formed. // The value of a condition that is an expression is the value of the // expression, implicitly converted to bool. // return PerformContextuallyConvertToBool(CondExpr); } /// Helper function to determine whether this is the (deprecated) C++ /// conversion from a string literal to a pointer to non-const char or /// non-const wchar_t (for narrow and wide string literals, /// respectively). bool Sema::IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType) { // Look inside the implicit cast, if it exists. if (ImplicitCastExpr *Cast = dyn_cast(From)) From = Cast->getSubExpr(); // A string literal (2.13.4) that is not a wide string literal can // be converted to an rvalue of type "pointer to char"; a wide // string literal can be converted to an rvalue of type "pointer // to wchar_t" (C++ 4.2p2). if (StringLiteral *StrLit = dyn_cast(From->IgnoreParens())) if (const PointerType *ToPtrType = ToType->getAs()) if (const BuiltinType *ToPointeeType = ToPtrType->getPointeeType()->getAs()) { // This conversion is considered only when there is an // explicit appropriate pointer target type (C++ 4.2p2). if (!ToPtrType->getPointeeType().hasQualifiers()) { switch (StrLit->getKind()) { case StringLiteral::UTF8: case StringLiteral::UTF16: case StringLiteral::UTF32: // We don't allow UTF literals to be implicitly converted break; case StringLiteral::Ascii: return (ToPointeeType->getKind() == BuiltinType::Char_U || ToPointeeType->getKind() == BuiltinType::Char_S); case StringLiteral::Wide: return ToPointeeType->isWideCharType(); } } } return false; } static ExprResult BuildCXXCastArgument(Sema &S, SourceLocation CastLoc, QualType Ty, CastKind Kind, CXXMethodDecl *Method, DeclAccessPair FoundDecl, bool HadMultipleCandidates, Expr *From) { switch (Kind) { default: llvm_unreachable("Unhandled cast kind!"); case CK_ConstructorConversion: { CXXConstructorDecl *Constructor = cast(Method); SmallVector ConstructorArgs; if (S.RequireNonAbstractType(CastLoc, Ty, diag::err_allocation_of_abstract_type)) return ExprError(); if (S.CompleteConstructorCall(Constructor, From, CastLoc, ConstructorArgs)) return ExprError(); S.CheckConstructorAccess(CastLoc, Constructor, InitializedEntity::InitializeTemporary(Ty), Constructor->getAccess()); if (S.DiagnoseUseOfDecl(Method, CastLoc)) return ExprError(); ExprResult Result = S.BuildCXXConstructExpr( CastLoc, Ty, cast(Method), ConstructorArgs, HadMultipleCandidates, /*ListInit*/ false, /*StdInitListInit*/ false, /*ZeroInit*/ false, CXXConstructExpr::CK_Complete, SourceRange()); if (Result.isInvalid()) return ExprError(); return S.MaybeBindToTemporary(Result.getAs()); } case CK_UserDefinedConversion: { assert(!From->getType()->isPointerType() && "Arg can't have pointer type!"); S.CheckMemberOperatorAccess(CastLoc, From, /*arg*/ nullptr, FoundDecl); if (S.DiagnoseUseOfDecl(Method, CastLoc)) return ExprError(); // Create an implicit call expr that calls it. CXXConversionDecl *Conv = cast(Method); ExprResult Result = S.BuildCXXMemberCallExpr(From, FoundDecl, Conv, HadMultipleCandidates); if (Result.isInvalid()) return ExprError(); // Record usage of conversion in an implicit cast. Result = ImplicitCastExpr::Create(S.Context, Result.get()->getType(), CK_UserDefinedConversion, Result.get(), nullptr, Result.get()->getValueKind()); return S.MaybeBindToTemporary(Result.get()); } } } /// PerformImplicitConversion - Perform an implicit conversion of the /// expression From to the type ToType using the pre-computed implicit /// conversion sequence ICS. Returns the converted /// expression. Action is the kind of conversion we're performing, /// used in the error message. ExprResult Sema::PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence &ICS, AssignmentAction Action, CheckedConversionKind CCK) { switch (ICS.getKind()) { case ImplicitConversionSequence::StandardConversion: { ExprResult Res = PerformImplicitConversion(From, ToType, ICS.Standard, Action, CCK); if (Res.isInvalid()) return ExprError(); From = Res.get(); break; } case ImplicitConversionSequence::UserDefinedConversion: { FunctionDecl *FD = ICS.UserDefined.ConversionFunction; CastKind CastKind; QualType BeforeToType; assert(FD && "no conversion function for user-defined conversion seq"); if (const CXXConversionDecl *Conv = dyn_cast(FD)) { CastKind = CK_UserDefinedConversion; // If the user-defined conversion is specified by a conversion function, // the initial standard conversion sequence converts the source type to // the implicit object parameter of the conversion function. BeforeToType = Context.getTagDeclType(Conv->getParent()); } else { const CXXConstructorDecl *Ctor = cast(FD); CastKind = CK_ConstructorConversion; // Do no conversion if dealing with ... for the first conversion. if (!ICS.UserDefined.EllipsisConversion) { // If the user-defined conversion is specified by a constructor, the // initial standard conversion sequence converts the source type to // the type required by the argument of the constructor BeforeToType = Ctor->getParamDecl(0)->getType().getNonReferenceType(); } } // Watch out for ellipsis conversion. if (!ICS.UserDefined.EllipsisConversion) { ExprResult Res = PerformImplicitConversion(From, BeforeToType, ICS.UserDefined.Before, AA_Converting, CCK); if (Res.isInvalid()) return ExprError(); From = Res.get(); } ExprResult CastArg = BuildCXXCastArgument(*this, From->getLocStart(), ToType.getNonReferenceType(), CastKind, cast(FD), ICS.UserDefined.FoundConversionFunction, ICS.UserDefined.HadMultipleCandidates, From); if (CastArg.isInvalid()) return ExprError(); From = CastArg.get(); return PerformImplicitConversion(From, ToType, ICS.UserDefined.After, AA_Converting, CCK); } case ImplicitConversionSequence::AmbiguousConversion: ICS.DiagnoseAmbiguousConversion(*this, From->getExprLoc(), PDiag(diag::err_typecheck_ambiguous_condition) << From->getSourceRange()); return ExprError(); case ImplicitConversionSequence::EllipsisConversion: llvm_unreachable("Cannot perform an ellipsis conversion"); case ImplicitConversionSequence::BadConversion: return ExprError(); } // Everything went well. return From; } /// PerformImplicitConversion - Perform an implicit conversion of the /// expression From to the type ToType by following the standard /// conversion sequence SCS. Returns the converted /// expression. Flavor is the context in which we're performing this /// conversion, for use in error messages. ExprResult Sema::PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK) { bool CStyle = (CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast); // Overall FIXME: we are recomputing too many types here and doing far too // much extra work. What this means is that we need to keep track of more // information that is computed when we try the implicit conversion initially, // so that we don't need to recompute anything here. QualType FromType = From->getType(); if (SCS.CopyConstructor) { // FIXME: When can ToType be a reference type? assert(!ToType->isReferenceType()); if (SCS.Second == ICK_Derived_To_Base) { SmallVector ConstructorArgs; if (CompleteConstructorCall(cast(SCS.CopyConstructor), From, /*FIXME:ConstructLoc*/SourceLocation(), ConstructorArgs)) return ExprError(); return BuildCXXConstructExpr( /*FIXME:ConstructLoc*/ SourceLocation(), ToType, SCS.CopyConstructor, ConstructorArgs, /*HadMultipleCandidates*/ false, /*ListInit*/ false, /*StdInitListInit*/ false, /*ZeroInit*/ false, CXXConstructExpr::CK_Complete, SourceRange()); } return BuildCXXConstructExpr( /*FIXME:ConstructLoc*/ SourceLocation(), ToType, SCS.CopyConstructor, From, /*HadMultipleCandidates*/ false, /*ListInit*/ false, /*StdInitListInit*/ false, /*ZeroInit*/ false, CXXConstructExpr::CK_Complete, SourceRange()); } // Resolve overloaded function references. if (Context.hasSameType(FromType, Context.OverloadTy)) { DeclAccessPair Found; FunctionDecl *Fn = ResolveAddressOfOverloadedFunction(From, ToType, true, Found); if (!Fn) return ExprError(); if (DiagnoseUseOfDecl(Fn, From->getLocStart())) return ExprError(); From = FixOverloadedFunctionReference(From, Found, Fn); FromType = From->getType(); } // If we're converting to an atomic type, first convert to the corresponding // non-atomic type. QualType ToAtomicType; if (const AtomicType *ToAtomic = ToType->getAs()) { ToAtomicType = ToType; ToType = ToAtomic->getValueType(); } // Perform the first implicit conversion. switch (SCS.First) { case ICK_Identity: if (const AtomicType *FromAtomic = FromType->getAs()) { FromType = FromAtomic->getValueType().getUnqualifiedType(); From = ImplicitCastExpr::Create(Context, FromType, CK_AtomicToNonAtomic, From, /*BasePath=*/nullptr, VK_RValue); } break; case ICK_Lvalue_To_Rvalue: { assert(From->getObjectKind() != OK_ObjCProperty); ExprResult FromRes = DefaultLvalueConversion(From); assert(!FromRes.isInvalid() && "Can't perform deduced conversion?!"); From = FromRes.get(); FromType = From->getType(); break; } case ICK_Array_To_Pointer: FromType = Context.getArrayDecayedType(FromType); From = ImpCastExprToType(From, FromType, CK_ArrayToPointerDecay, VK_RValue, /*BasePath=*/nullptr, CCK).get(); break; case ICK_Function_To_Pointer: FromType = Context.getPointerType(FromType); From = ImpCastExprToType(From, FromType, CK_FunctionToPointerDecay, VK_RValue, /*BasePath=*/nullptr, CCK).get(); break; default: llvm_unreachable("Improper first standard conversion"); } // Perform the second implicit conversion switch (SCS.Second) { case ICK_Identity: // C++ [except.spec]p5: // [For] assignment to and initialization of pointers to functions, // pointers to member functions, and references to functions: the // target entity shall allow at least the exceptions allowed by the // source value in the assignment or initialization. switch (Action) { case AA_Assigning: case AA_Initializing: // Note, function argument passing and returning are initialization. case AA_Passing: case AA_Returning: case AA_Sending: case AA_Passing_CFAudited: if (CheckExceptionSpecCompatibility(From, ToType)) return ExprError(); break; case AA_Casting: case AA_Converting: // Casts and implicit conversions are not initialization, so are not // checked for exception specification mismatches. break; } // Nothing else to do. break; case ICK_NoReturn_Adjustment: // If both sides are functions (or pointers/references to them), there could // be incompatible exception declarations. if (CheckExceptionSpecCompatibility(From, ToType)) return ExprError(); From = ImpCastExprToType(From, ToType, CK_NoOp, VK_RValue, /*BasePath=*/nullptr, CCK).get(); break; case ICK_Integral_Promotion: case ICK_Integral_Conversion: if (ToType->isBooleanType()) { assert(FromType->castAs()->getDecl()->isFixed() && SCS.Second == ICK_Integral_Promotion && "only enums with fixed underlying type can promote to bool"); From = ImpCastExprToType(From, ToType, CK_IntegralToBoolean, VK_RValue, /*BasePath=*/nullptr, CCK).get(); } else { From = ImpCastExprToType(From, ToType, CK_IntegralCast, VK_RValue, /*BasePath=*/nullptr, CCK).get(); } break; case ICK_Floating_Promotion: case ICK_Floating_Conversion: From = ImpCastExprToType(From, ToType, CK_FloatingCast, VK_RValue, /*BasePath=*/nullptr, CCK).get(); break; case ICK_Complex_Promotion: case ICK_Complex_Conversion: { QualType FromEl = From->getType()->getAs()->getElementType(); QualType ToEl = ToType->getAs()->getElementType(); CastKind CK; if (FromEl->isRealFloatingType()) { if (ToEl->isRealFloatingType()) CK = CK_FloatingComplexCast; else CK = CK_FloatingComplexToIntegralComplex; } else if (ToEl->isRealFloatingType()) { CK = CK_IntegralComplexToFloatingComplex; } else { CK = CK_IntegralComplexCast; } From = ImpCastExprToType(From, ToType, CK, VK_RValue, /*BasePath=*/nullptr, CCK).get(); break; } case ICK_Floating_Integral: if (ToType->isRealFloatingType()) From = ImpCastExprToType(From, ToType, CK_IntegralToFloating, VK_RValue, /*BasePath=*/nullptr, CCK).get(); else From = ImpCastExprToType(From, ToType, CK_FloatingToIntegral, VK_RValue, /*BasePath=*/nullptr, CCK).get(); break; case ICK_Compatible_Conversion: From = ImpCastExprToType(From, ToType, CK_NoOp, VK_RValue, /*BasePath=*/nullptr, CCK).get(); break; case ICK_Writeback_Conversion: case ICK_Pointer_Conversion: { if (SCS.IncompatibleObjC && Action != AA_Casting) { // Diagnose incompatible Objective-C conversions if (Action == AA_Initializing || Action == AA_Assigning) Diag(From->getLocStart(), diag::ext_typecheck_convert_incompatible_pointer) << ToType << From->getType() << Action << From->getSourceRange() << 0; else Diag(From->getLocStart(), diag::ext_typecheck_convert_incompatible_pointer) << From->getType() << ToType << Action << From->getSourceRange() << 0; if (From->getType()->isObjCObjectPointerType() && ToType->isObjCObjectPointerType()) EmitRelatedResultTypeNote(From); } else if (getLangOpts().ObjCAutoRefCount && !CheckObjCARCUnavailableWeakConversion(ToType, From->getType())) { if (Action == AA_Initializing) Diag(From->getLocStart(), diag::err_arc_weak_unavailable_assign); else Diag(From->getLocStart(), diag::err_arc_convesion_of_weak_unavailable) << (Action == AA_Casting) << From->getType() << ToType << From->getSourceRange(); } CastKind Kind = CK_Invalid; CXXCastPath BasePath; if (CheckPointerConversion(From, ToType, Kind, BasePath, CStyle)) return ExprError(); // Make sure we extend blocks if necessary. // FIXME: doing this here is really ugly. if (Kind == CK_BlockPointerToObjCPointerCast) { ExprResult E = From; (void) PrepareCastToObjCObjectPointer(E); From = E.get(); } if (getLangOpts().ObjCAutoRefCount) CheckObjCARCConversion(SourceRange(), ToType, From, CCK); From = ImpCastExprToType(From, ToType, Kind, VK_RValue, &BasePath, CCK) .get(); break; } case ICK_Pointer_Member: { CastKind Kind = CK_Invalid; CXXCastPath BasePath; if (CheckMemberPointerConversion(From, ToType, Kind, BasePath, CStyle)) return ExprError(); if (CheckExceptionSpecCompatibility(From, ToType)) return ExprError(); // We may not have been able to figure out what this member pointer resolved // to up until this exact point. Attempt to lock-in it's inheritance model. if (Context.getTargetInfo().getCXXABI().isMicrosoft()) { RequireCompleteType(From->getExprLoc(), From->getType(), 0); RequireCompleteType(From->getExprLoc(), ToType, 0); } From = ImpCastExprToType(From, ToType, Kind, VK_RValue, &BasePath, CCK) .get(); break; } case ICK_Boolean_Conversion: // Perform half-to-boolean conversion via float. if (From->getType()->isHalfType()) { From = ImpCastExprToType(From, Context.FloatTy, CK_FloatingCast).get(); FromType = Context.FloatTy; } From = ImpCastExprToType(From, Context.BoolTy, ScalarTypeToBooleanCastKind(FromType), VK_RValue, /*BasePath=*/nullptr, CCK).get(); break; case ICK_Derived_To_Base: { CXXCastPath BasePath; if (CheckDerivedToBaseConversion(From->getType(), ToType.getNonReferenceType(), From->getLocStart(), From->getSourceRange(), &BasePath, CStyle)) return ExprError(); From = ImpCastExprToType(From, ToType.getNonReferenceType(), CK_DerivedToBase, From->getValueKind(), &BasePath, CCK).get(); break; } case ICK_Vector_Conversion: From = ImpCastExprToType(From, ToType, CK_BitCast, VK_RValue, /*BasePath=*/nullptr, CCK).get(); break; case ICK_Vector_Splat: // Vector splat from any arithmetic type to a vector. // Cast to the element type. { QualType elType = ToType->getAs()->getElementType(); if (elType != From->getType()) { ExprResult E = From; From = ImpCastExprToType(From, elType, PrepareScalarCast(E, elType)).get(); } From = ImpCastExprToType(From, ToType, CK_VectorSplat, VK_RValue, /*BasePath=*/nullptr, CCK).get(); } break; case ICK_Complex_Real: // Case 1. x -> _Complex y if (const ComplexType *ToComplex = ToType->getAs()) { QualType ElType = ToComplex->getElementType(); bool isFloatingComplex = ElType->isRealFloatingType(); // x -> y if (Context.hasSameUnqualifiedType(ElType, From->getType())) { // do nothing } else if (From->getType()->isRealFloatingType()) { From = ImpCastExprToType(From, ElType, isFloatingComplex ? CK_FloatingCast : CK_FloatingToIntegral).get(); } else { assert(From->getType()->isIntegerType()); From = ImpCastExprToType(From, ElType, isFloatingComplex ? CK_IntegralToFloating : CK_IntegralCast).get(); } // y -> _Complex y From = ImpCastExprToType(From, ToType, isFloatingComplex ? CK_FloatingRealToComplex : CK_IntegralRealToComplex).get(); // Case 2. _Complex x -> y } else { const ComplexType *FromComplex = From->getType()->getAs(); assert(FromComplex); QualType ElType = FromComplex->getElementType(); bool isFloatingComplex = ElType->isRealFloatingType(); // _Complex x -> x From = ImpCastExprToType(From, ElType, isFloatingComplex ? CK_FloatingComplexToReal : CK_IntegralComplexToReal, VK_RValue, /*BasePath=*/nullptr, CCK).get(); // x -> y if (Context.hasSameUnqualifiedType(ElType, ToType)) { // do nothing } else if (ToType->isRealFloatingType()) { From = ImpCastExprToType(From, ToType, isFloatingComplex ? CK_FloatingCast : CK_IntegralToFloating, VK_RValue, /*BasePath=*/nullptr, CCK).get(); } else { assert(ToType->isIntegerType()); From = ImpCastExprToType(From, ToType, isFloatingComplex ? CK_FloatingToIntegral : CK_IntegralCast, VK_RValue, /*BasePath=*/nullptr, CCK).get(); } } break; case ICK_Block_Pointer_Conversion: { From = ImpCastExprToType(From, ToType.getUnqualifiedType(), CK_BitCast, VK_RValue, /*BasePath=*/nullptr, CCK).get(); break; } case ICK_TransparentUnionConversion: { ExprResult FromRes = From; Sema::AssignConvertType ConvTy = CheckTransparentUnionArgumentConstraints(ToType, FromRes); if (FromRes.isInvalid()) return ExprError(); From = FromRes.get(); assert ((ConvTy == Sema::Compatible) && "Improper transparent union conversion"); (void)ConvTy; break; } case ICK_Zero_Event_Conversion: From = ImpCastExprToType(From, ToType, CK_ZeroToOCLEvent, From->getValueKind()).get(); break; case ICK_Lvalue_To_Rvalue: case ICK_Array_To_Pointer: case ICK_Function_To_Pointer: case ICK_Qualification: case ICK_Num_Conversion_Kinds: llvm_unreachable("Improper second standard conversion"); } switch (SCS.Third) { case ICK_Identity: // Nothing to do. break; case ICK_Qualification: { // The qualification keeps the category of the inner expression, unless the // target type isn't a reference. ExprValueKind VK = ToType->isReferenceType() ? From->getValueKind() : VK_RValue; From = ImpCastExprToType(From, ToType.getNonLValueExprType(Context), CK_NoOp, VK, /*BasePath=*/nullptr, CCK).get(); if (SCS.DeprecatedStringLiteralToCharPtr && !getLangOpts().WritableStrings) { Diag(From->getLocStart(), getLangOpts().CPlusPlus11 ? diag::ext_deprecated_string_literal_conversion : diag::warn_deprecated_string_literal_conversion) << ToType.getNonReferenceType(); } break; } default: llvm_unreachable("Improper third standard conversion"); } // If this conversion sequence involved a scalar -> atomic conversion, perform // that conversion now. if (!ToAtomicType.isNull()) { assert(Context.hasSameType( ToAtomicType->castAs()->getValueType(), From->getType())); From = ImpCastExprToType(From, ToAtomicType, CK_NonAtomicToAtomic, VK_RValue, nullptr, CCK).get(); } return From; } /// \brief Check the completeness of a type in a unary type trait. /// /// If the particular type trait requires a complete type, tries to complete /// it. If completing the type fails, a diagnostic is emitted and false /// returned. If completing the type succeeds or no completion was required, /// returns true. static bool CheckUnaryTypeTraitTypeCompleteness(Sema &S, TypeTrait UTT, SourceLocation Loc, QualType ArgTy) { // C++0x [meta.unary.prop]p3: // For all of the class templates X declared in this Clause, instantiating // that template with a template argument that is a class template // specialization may result in the implicit instantiation of the template // argument if and only if the semantics of X require that the argument // must be a complete type. // We apply this rule to all the type trait expressions used to implement // these class templates. We also try to follow any GCC documented behavior // in these expressions to ensure portability of standard libraries. switch (UTT) { default: llvm_unreachable("not a UTT"); // is_complete_type somewhat obviously cannot require a complete type. case UTT_IsCompleteType: // Fall-through // These traits are modeled on the type predicates in C++0x // [meta.unary.cat] and [meta.unary.comp]. They are not specified as // requiring a complete type, as whether or not they return true cannot be // impacted by the completeness of the type. case UTT_IsVoid: case UTT_IsIntegral: case UTT_IsFloatingPoint: case UTT_IsArray: case UTT_IsPointer: case UTT_IsLvalueReference: case UTT_IsRvalueReference: case UTT_IsMemberFunctionPointer: case UTT_IsMemberObjectPointer: case UTT_IsEnum: case UTT_IsUnion: case UTT_IsClass: case UTT_IsFunction: case UTT_IsReference: case UTT_IsArithmetic: case UTT_IsFundamental: case UTT_IsObject: case UTT_IsScalar: case UTT_IsCompound: case UTT_IsMemberPointer: // Fall-through // These traits are modeled on type predicates in C++0x [meta.unary.prop] // which requires some of its traits to have the complete type. However, // the completeness of the type cannot impact these traits' semantics, and // so they don't require it. This matches the comments on these traits in // Table 49. case UTT_IsConst: case UTT_IsVolatile: case UTT_IsSigned: case UTT_IsUnsigned: return true; // C++0x [meta.unary.prop] Table 49 requires the following traits to be // applied to a complete type. case UTT_IsTrivial: case UTT_IsTriviallyCopyable: case UTT_IsStandardLayout: case UTT_IsPOD: case UTT_IsLiteral: case UTT_IsEmpty: case UTT_IsPolymorphic: case UTT_IsAbstract: case UTT_IsInterfaceClass: case UTT_IsDestructible: case UTT_IsNothrowDestructible: // Fall-through // These traits require a complete type. case UTT_IsFinal: case UTT_IsSealed: // These trait expressions are designed to help implement predicates in // [meta.unary.prop] despite not being named the same. They are specified // by both GCC and the Embarcadero C++ compiler, and require the complete // type due to the overarching C++0x type predicates being implemented // requiring the complete type. case UTT_HasNothrowAssign: case UTT_HasNothrowMoveAssign: case UTT_HasNothrowConstructor: case UTT_HasNothrowCopy: case UTT_HasTrivialAssign: case UTT_HasTrivialMoveAssign: case UTT_HasTrivialDefaultConstructor: case UTT_HasTrivialMoveConstructor: case UTT_HasTrivialCopy: case UTT_HasTrivialDestructor: case UTT_HasVirtualDestructor: // Arrays of unknown bound are expressly allowed. QualType ElTy = ArgTy; if (ArgTy->isIncompleteArrayType()) ElTy = S.Context.getAsArrayType(ArgTy)->getElementType(); // The void type is expressly allowed. if (ElTy->isVoidType()) return true; return !S.RequireCompleteType( Loc, ElTy, diag::err_incomplete_type_used_in_type_trait_expr); } } static bool HasNoThrowOperator(const RecordType *RT, OverloadedOperatorKind Op, Sema &Self, SourceLocation KeyLoc, ASTContext &C, bool (CXXRecordDecl::*HasTrivial)() const, bool (CXXRecordDecl::*HasNonTrivial)() const, bool (CXXMethodDecl::*IsDesiredOp)() const) { CXXRecordDecl *RD = cast(RT->getDecl()); if ((RD->*HasTrivial)() && !(RD->*HasNonTrivial)()) return true; DeclarationName Name = C.DeclarationNames.getCXXOperatorName(Op); DeclarationNameInfo NameInfo(Name, KeyLoc); LookupResult Res(Self, NameInfo, Sema::LookupOrdinaryName); if (Self.LookupQualifiedName(Res, RD)) { bool FoundOperator = false; Res.suppressDiagnostics(); for (LookupResult::iterator Op = Res.begin(), OpEnd = Res.end(); Op != OpEnd; ++Op) { if (isa(*Op)) continue; CXXMethodDecl *Operator = cast(*Op); if((Operator->*IsDesiredOp)()) { FoundOperator = true; const FunctionProtoType *CPT = Operator->getType()->getAs(); CPT = Self.ResolveExceptionSpec(KeyLoc, CPT); if (!CPT || !CPT->isNothrow(C)) return false; } } return FoundOperator; } return false; } static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT, SourceLocation KeyLoc, QualType T) { assert(!T->isDependentType() && "Cannot evaluate traits of dependent type"); ASTContext &C = Self.Context; switch(UTT) { default: llvm_unreachable("not a UTT"); // Type trait expressions corresponding to the primary type category // predicates in C++0x [meta.unary.cat]. case UTT_IsVoid: return T->isVoidType(); case UTT_IsIntegral: return T->isIntegralType(C); case UTT_IsFloatingPoint: return T->isFloatingType(); case UTT_IsArray: return T->isArrayType(); case UTT_IsPointer: return T->isPointerType(); case UTT_IsLvalueReference: return T->isLValueReferenceType(); case UTT_IsRvalueReference: return T->isRValueReferenceType(); case UTT_IsMemberFunctionPointer: return T->isMemberFunctionPointerType(); case UTT_IsMemberObjectPointer: return T->isMemberDataPointerType(); case UTT_IsEnum: return T->isEnumeralType(); case UTT_IsUnion: return T->isUnionType(); case UTT_IsClass: return T->isClassType() || T->isStructureType() || T->isInterfaceType(); case UTT_IsFunction: return T->isFunctionType(); // Type trait expressions which correspond to the convenient composition // predicates in C++0x [meta.unary.comp]. case UTT_IsReference: return T->isReferenceType(); case UTT_IsArithmetic: return T->isArithmeticType() && !T->isEnumeralType(); case UTT_IsFundamental: return T->isFundamentalType(); case UTT_IsObject: return T->isObjectType(); case UTT_IsScalar: // Note: semantic analysis depends on Objective-C lifetime types to be // considered scalar types. However, such types do not actually behave // like scalar types at run time (since they may require retain/release // operations), so we report them as non-scalar. if (T->isObjCLifetimeType()) { switch (T.getObjCLifetime()) { case Qualifiers::OCL_None: case Qualifiers::OCL_ExplicitNone: return true; case Qualifiers::OCL_Strong: case Qualifiers::OCL_Weak: case Qualifiers::OCL_Autoreleasing: return false; } } return T->isScalarType(); case UTT_IsCompound: return T->isCompoundType(); case UTT_IsMemberPointer: return T->isMemberPointerType(); // Type trait expressions which correspond to the type property predicates // in C++0x [meta.unary.prop]. case UTT_IsConst: return T.isConstQualified(); case UTT_IsVolatile: return T.isVolatileQualified(); case UTT_IsTrivial: return T.isTrivialType(Self.Context); case UTT_IsTriviallyCopyable: return T.isTriviallyCopyableType(Self.Context); case UTT_IsStandardLayout: return T->isStandardLayoutType(); case UTT_IsPOD: return T.isPODType(Self.Context); case UTT_IsLiteral: return T->isLiteralType(Self.Context); case UTT_IsEmpty: if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl()) return !RD->isUnion() && RD->isEmpty(); return false; case UTT_IsPolymorphic: if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl()) return RD->isPolymorphic(); return false; case UTT_IsAbstract: if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl()) return RD->isAbstract(); return false; case UTT_IsInterfaceClass: if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl()) return RD->isInterface(); return false; case UTT_IsFinal: if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl()) return RD->hasAttr(); return false; case UTT_IsSealed: if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl()) if (FinalAttr *FA = RD->getAttr()) return FA->isSpelledAsSealed(); return false; case UTT_IsSigned: return T->isSignedIntegerType(); case UTT_IsUnsigned: return T->isUnsignedIntegerType(); // Type trait expressions which query classes regarding their construction, // destruction, and copying. Rather than being based directly on the // related type predicates in the standard, they are specified by both // GCC[1] and the Embarcadero C++ compiler[2], and Clang implements those // specifications. // // 1: http://gcc.gnu/.org/onlinedocs/gcc/Type-Traits.html // 2: http://docwiki.embarcadero.com/RADStudio/XE/en/Type_Trait_Functions_(C%2B%2B0x)_Index // // Note that these builtins do not behave as documented in g++: if a class // has both a trivial and a non-trivial special member of a particular kind, // they return false! For now, we emulate this behavior. // FIXME: This appears to be a g++ bug: more complex cases reveal that it // does not correctly compute triviality in the presence of multiple special // members of the same kind. Revisit this once the g++ bug is fixed. case UTT_HasTrivialDefaultConstructor: // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html: // If __is_pod (type) is true then the trait is true, else if type is // a cv class or union type (or array thereof) with a trivial default // constructor ([class.ctor]) then the trait is true, else it is false. if (T.isPODType(Self.Context)) return true; if (CXXRecordDecl *RD = C.getBaseElementType(T)->getAsCXXRecordDecl()) return RD->hasTrivialDefaultConstructor() && !RD->hasNonTrivialDefaultConstructor(); return false; case UTT_HasTrivialMoveConstructor: // This trait is implemented by MSVC 2012 and needed to parse the // standard library headers. Specifically this is used as the logic // behind std::is_trivially_move_constructible (20.9.4.3). if (T.isPODType(Self.Context)) return true; if (CXXRecordDecl *RD = C.getBaseElementType(T)->getAsCXXRecordDecl()) return RD->hasTrivialMoveConstructor() && !RD->hasNonTrivialMoveConstructor(); return false; case UTT_HasTrivialCopy: // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html: // If __is_pod (type) is true or type is a reference type then // the trait is true, else if type is a cv class or union type // with a trivial copy constructor ([class.copy]) then the trait // is true, else it is false. if (T.isPODType(Self.Context) || T->isReferenceType()) return true; if (CXXRecordDecl *RD = T->getAsCXXRecordDecl()) return RD->hasTrivialCopyConstructor() && !RD->hasNonTrivialCopyConstructor(); return false; case UTT_HasTrivialMoveAssign: // This trait is implemented by MSVC 2012 and needed to parse the // standard library headers. Specifically it is used as the logic // behind std::is_trivially_move_assignable (20.9.4.3) if (T.isPODType(Self.Context)) return true; if (CXXRecordDecl *RD = C.getBaseElementType(T)->getAsCXXRecordDecl()) return RD->hasTrivialMoveAssignment() && !RD->hasNonTrivialMoveAssignment(); return false; case UTT_HasTrivialAssign: // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html: // If type is const qualified or is a reference type then the // trait is false. Otherwise if __is_pod (type) is true then the // trait is true, else if type is a cv class or union type with // a trivial copy assignment ([class.copy]) then the trait is // true, else it is false. // Note: the const and reference restrictions are interesting, // given that const and reference members don't prevent a class // from having a trivial copy assignment operator (but do cause // errors if the copy assignment operator is actually used, q.v. // [class.copy]p12). if (T.isConstQualified()) return false; if (T.isPODType(Self.Context)) return true; if (CXXRecordDecl *RD = T->getAsCXXRecordDecl()) return RD->hasTrivialCopyAssignment() && !RD->hasNonTrivialCopyAssignment(); return false; case UTT_IsDestructible: case UTT_IsNothrowDestructible: // FIXME: Implement UTT_IsDestructible and UTT_IsNothrowDestructible. // For now, let's fall through. case UTT_HasTrivialDestructor: // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html // If __is_pod (type) is true or type is a reference type // then the trait is true, else if type is a cv class or union // type (or array thereof) with a trivial destructor // ([class.dtor]) then the trait is true, else it is // false. if (T.isPODType(Self.Context) || T->isReferenceType()) return true; // Objective-C++ ARC: autorelease types don't require destruction. if (T->isObjCLifetimeType() && T.getObjCLifetime() == Qualifiers::OCL_Autoreleasing) return true; if (CXXRecordDecl *RD = C.getBaseElementType(T)->getAsCXXRecordDecl()) return RD->hasTrivialDestructor(); return false; // TODO: Propagate nothrowness for implicitly declared special members. case UTT_HasNothrowAssign: // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html: // If type is const qualified or is a reference type then the // trait is false. Otherwise if __has_trivial_assign (type) // is true then the trait is true, else if type is a cv class // or union type with copy assignment operators that are known // not to throw an exception then the trait is true, else it is // false. if (C.getBaseElementType(T).isConstQualified()) return false; if (T->isReferenceType()) return false; if (T.isPODType(Self.Context) || T->isObjCLifetimeType()) return true; if (const RecordType *RT = T->getAs()) return HasNoThrowOperator(RT, OO_Equal, Self, KeyLoc, C, &CXXRecordDecl::hasTrivialCopyAssignment, &CXXRecordDecl::hasNonTrivialCopyAssignment, &CXXMethodDecl::isCopyAssignmentOperator); return false; case UTT_HasNothrowMoveAssign: // This trait is implemented by MSVC 2012 and needed to parse the // standard library headers. Specifically this is used as the logic // behind std::is_nothrow_move_assignable (20.9.4.3). if (T.isPODType(Self.Context)) return true; if (const RecordType *RT = C.getBaseElementType(T)->getAs()) return HasNoThrowOperator(RT, OO_Equal, Self, KeyLoc, C, &CXXRecordDecl::hasTrivialMoveAssignment, &CXXRecordDecl::hasNonTrivialMoveAssignment, &CXXMethodDecl::isMoveAssignmentOperator); return false; case UTT_HasNothrowCopy: // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html: // If __has_trivial_copy (type) is true then the trait is true, else // if type is a cv class or union type with copy constructors that are // known not to throw an exception then the trait is true, else it is // false. if (T.isPODType(C) || T->isReferenceType() || T->isObjCLifetimeType()) return true; if (CXXRecordDecl *RD = T->getAsCXXRecordDecl()) { if (RD->hasTrivialCopyConstructor() && !RD->hasNonTrivialCopyConstructor()) return true; bool FoundConstructor = false; unsigned FoundTQs; DeclContext::lookup_result R = Self.LookupConstructors(RD); for (DeclContext::lookup_iterator Con = R.begin(), ConEnd = R.end(); Con != ConEnd; ++Con) { // A template constructor is never a copy constructor. // FIXME: However, it may actually be selected at the actual overload // resolution point. if (isa(*Con)) continue; CXXConstructorDecl *Constructor = cast(*Con); if (Constructor->isCopyConstructor(FoundTQs)) { FoundConstructor = true; const FunctionProtoType *CPT = Constructor->getType()->getAs(); CPT = Self.ResolveExceptionSpec(KeyLoc, CPT); if (!CPT) return false; // TODO: check whether evaluating default arguments can throw. // For now, we'll be conservative and assume that they can throw. if (!CPT->isNothrow(Self.Context) || CPT->getNumParams() > 1) return false; } } return FoundConstructor; } return false; case UTT_HasNothrowConstructor: // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html // If __has_trivial_constructor (type) is true then the trait is // true, else if type is a cv class or union type (or array // thereof) with a default constructor that is known not to // throw an exception then the trait is true, else it is false. if (T.isPODType(C) || T->isObjCLifetimeType()) return true; if (CXXRecordDecl *RD = C.getBaseElementType(T)->getAsCXXRecordDecl()) { if (RD->hasTrivialDefaultConstructor() && !RD->hasNonTrivialDefaultConstructor()) return true; bool FoundConstructor = false; DeclContext::lookup_result R = Self.LookupConstructors(RD); for (DeclContext::lookup_iterator Con = R.begin(), ConEnd = R.end(); Con != ConEnd; ++Con) { // FIXME: In C++0x, a constructor template can be a default constructor. if (isa(*Con)) continue; CXXConstructorDecl *Constructor = cast(*Con); if (Constructor->isDefaultConstructor()) { FoundConstructor = true; const FunctionProtoType *CPT = Constructor->getType()->getAs(); CPT = Self.ResolveExceptionSpec(KeyLoc, CPT); if (!CPT) return false; // FIXME: check whether evaluating default arguments can throw. // For now, we'll be conservative and assume that they can throw. if (!CPT->isNothrow(Self.Context) || CPT->getNumParams() > 0) return false; } } return FoundConstructor; } return false; case UTT_HasVirtualDestructor: // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html: // If type is a class type with a virtual destructor ([class.dtor]) // then the trait is true, else it is false. if (CXXRecordDecl *RD = T->getAsCXXRecordDecl()) if (CXXDestructorDecl *Destructor = Self.LookupDestructor(RD)) return Destructor->isVirtual(); return false; // These type trait expressions are modeled on the specifications for the // Embarcadero C++0x type trait functions: // http://docwiki.embarcadero.com/RADStudio/XE/en/Type_Trait_Functions_(C%2B%2B0x)_Index case UTT_IsCompleteType: // http://docwiki.embarcadero.com/RADStudio/XE/en/Is_complete_type_(typename_T_): // Returns True if and only if T is a complete type at the point of the // function call. return !T->isIncompleteType(); } } /// \brief Determine whether T has a non-trivial Objective-C lifetime in /// ARC mode. static bool hasNontrivialObjCLifetime(QualType T) { switch (T.getObjCLifetime()) { case Qualifiers::OCL_ExplicitNone: return false; case Qualifiers::OCL_Strong: case Qualifiers::OCL_Weak: case Qualifiers::OCL_Autoreleasing: return true; case Qualifiers::OCL_None: return T->isObjCLifetimeType(); } llvm_unreachable("Unknown ObjC lifetime qualifier"); } static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT, QualType LhsT, QualType RhsT, SourceLocation KeyLoc); static bool evaluateTypeTrait(Sema &S, TypeTrait Kind, SourceLocation KWLoc, ArrayRef Args, SourceLocation RParenLoc) { if (Kind <= UTT_Last) return EvaluateUnaryTypeTrait(S, Kind, KWLoc, Args[0]->getType()); if (Kind <= BTT_Last) return EvaluateBinaryTypeTrait(S, Kind, Args[0]->getType(), Args[1]->getType(), RParenLoc); switch (Kind) { case clang::TT_IsConstructible: case clang::TT_IsNothrowConstructible: case clang::TT_IsTriviallyConstructible: { // C++11 [meta.unary.prop]: // is_trivially_constructible is defined as: // // is_constructible::value is true and the variable // definition for is_constructible, as defined below, is known to call // no operation that is not trivial. // // The predicate condition for a template specialization // is_constructible shall be satisfied if and only if the // following variable definition would be well-formed for some invented // variable t: // // T t(create()...); assert(!Args.empty()); // Precondition: T and all types in the parameter pack Args shall be // complete types, (possibly cv-qualified) void, or arrays of // unknown bound. for (unsigned I = 0, N = Args.size(); I != N; ++I) { QualType ArgTy = Args[I]->getType(); if (ArgTy->isVoidType() || ArgTy->isIncompleteArrayType()) continue; if (S.RequireCompleteType(KWLoc, ArgTy, diag::err_incomplete_type_used_in_type_trait_expr)) return false; } // Make sure the first argument is a complete type. if (Args[0]->getType()->isIncompleteType()) return false; // Make sure the first argument is not an abstract type. CXXRecordDecl *RD = Args[0]->getType()->getAsCXXRecordDecl(); if (RD && RD->isAbstract()) return false; SmallVector OpaqueArgExprs; SmallVector ArgExprs; ArgExprs.reserve(Args.size() - 1); for (unsigned I = 1, N = Args.size(); I != N; ++I) { QualType T = Args[I]->getType(); if (T->isObjectType() || T->isFunctionType()) T = S.Context.getRValueReferenceType(T); OpaqueArgExprs.push_back( OpaqueValueExpr(Args[I]->getTypeLoc().getLocStart(), T.getNonLValueExprType(S.Context), Expr::getValueKindForType(T))); } for (Expr &E : OpaqueArgExprs) ArgExprs.push_back(&E); // Perform the initialization in an unevaluated context within a SFINAE // trap at translation unit scope. EnterExpressionEvaluationContext Unevaluated(S, Sema::Unevaluated); Sema::SFINAETrap SFINAE(S, /*AccessCheckingSFINAE=*/true); Sema::ContextRAII TUContext(S, S.Context.getTranslationUnitDecl()); InitializedEntity To(InitializedEntity::InitializeTemporary(Args[0])); InitializationKind InitKind(InitializationKind::CreateDirect(KWLoc, KWLoc, RParenLoc)); InitializationSequence Init(S, To, InitKind, ArgExprs); if (Init.Failed()) return false; ExprResult Result = Init.Perform(S, To, InitKind, ArgExprs); if (Result.isInvalid() || SFINAE.hasErrorOccurred()) return false; if (Kind == clang::TT_IsConstructible) return true; if (Kind == clang::TT_IsNothrowConstructible) return S.canThrow(Result.get()) == CT_Cannot; if (Kind == clang::TT_IsTriviallyConstructible) { // Under Objective-C ARC, if the destination has non-trivial Objective-C // lifetime, this is a non-trivial construction. if (S.getLangOpts().ObjCAutoRefCount && hasNontrivialObjCLifetime(Args[0]->getType().getNonReferenceType())) return false; // The initialization succeeded; now make sure there are no non-trivial // calls. return !Result.get()->hasNonTrivialCall(S.Context); } llvm_unreachable("unhandled type trait"); return false; } default: llvm_unreachable("not a TT"); } return false; } ExprResult Sema::BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef Args, SourceLocation RParenLoc) { QualType ResultType = Context.getLogicalOperationType(); if (Kind <= UTT_Last && !CheckUnaryTypeTraitTypeCompleteness( *this, Kind, KWLoc, Args[0]->getType())) return ExprError(); bool Dependent = false; for (unsigned I = 0, N = Args.size(); I != N; ++I) { if (Args[I]->getType()->isDependentType()) { Dependent = true; break; } } bool Result = false; if (!Dependent) Result = evaluateTypeTrait(*this, Kind, KWLoc, Args, RParenLoc); return TypeTraitExpr::Create(Context, ResultType, KWLoc, Kind, Args, RParenLoc, Result); } ExprResult Sema::ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef Args, SourceLocation RParenLoc) { SmallVector ConvertedArgs; ConvertedArgs.reserve(Args.size()); for (unsigned I = 0, N = Args.size(); I != N; ++I) { TypeSourceInfo *TInfo; QualType T = GetTypeFromParser(Args[I], &TInfo); if (!TInfo) TInfo = Context.getTrivialTypeSourceInfo(T, KWLoc); ConvertedArgs.push_back(TInfo); } return BuildTypeTrait(Kind, KWLoc, ConvertedArgs, RParenLoc); } static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT, QualType LhsT, QualType RhsT, SourceLocation KeyLoc) { assert(!LhsT->isDependentType() && !RhsT->isDependentType() && "Cannot evaluate traits of dependent types"); switch(BTT) { case BTT_IsBaseOf: { // C++0x [meta.rel]p2 // Base is a base class of Derived without regard to cv-qualifiers or // Base and Derived are not unions and name the same class type without // regard to cv-qualifiers. const RecordType *lhsRecord = LhsT->getAs(); if (!lhsRecord) return false; const RecordType *rhsRecord = RhsT->getAs(); if (!rhsRecord) return false; assert(Self.Context.hasSameUnqualifiedType(LhsT, RhsT) == (lhsRecord == rhsRecord)); if (lhsRecord == rhsRecord) return !lhsRecord->getDecl()->isUnion(); // C++0x [meta.rel]p2: // If Base and Derived are class types and are different types // (ignoring possible cv-qualifiers) then Derived shall be a // complete type. if (Self.RequireCompleteType(KeyLoc, RhsT, diag::err_incomplete_type_used_in_type_trait_expr)) return false; return cast(rhsRecord->getDecl()) ->isDerivedFrom(cast(lhsRecord->getDecl())); } case BTT_IsSame: return Self.Context.hasSameType(LhsT, RhsT); case BTT_TypeCompatible: return Self.Context.typesAreCompatible(LhsT.getUnqualifiedType(), RhsT.getUnqualifiedType()); case BTT_IsConvertible: case BTT_IsConvertibleTo: { // C++0x [meta.rel]p4: // Given the following function prototype: // // template // typename add_rvalue_reference::type create(); // // the predicate condition for a template specialization // is_convertible shall be satisfied if and only if // the return expression in the following code would be // well-formed, including any implicit conversions to the return // type of the function: // // To test() { // return create(); // } // // Access checking is performed as if in a context unrelated to To and // From. Only the validity of the immediate context of the expression // of the return-statement (including conversions to the return type) // is considered. // // We model the initialization as a copy-initialization of a temporary // of the appropriate type, which for this expression is identical to the // return statement (since NRVO doesn't apply). // Functions aren't allowed to return function or array types. if (RhsT->isFunctionType() || RhsT->isArrayType()) return false; // A return statement in a void function must have void type. if (RhsT->isVoidType()) return LhsT->isVoidType(); // A function definition requires a complete, non-abstract return type. if (Self.RequireCompleteType(KeyLoc, RhsT, 0) || Self.RequireNonAbstractType(KeyLoc, RhsT, 0)) return false; // Compute the result of add_rvalue_reference. if (LhsT->isObjectType() || LhsT->isFunctionType()) LhsT = Self.Context.getRValueReferenceType(LhsT); // Build a fake source and destination for initialization. InitializedEntity To(InitializedEntity::InitializeTemporary(RhsT)); OpaqueValueExpr From(KeyLoc, LhsT.getNonLValueExprType(Self.Context), Expr::getValueKindForType(LhsT)); Expr *FromPtr = &From; InitializationKind Kind(InitializationKind::CreateCopy(KeyLoc, SourceLocation())); // Perform the initialization in an unevaluated context within a SFINAE // trap at translation unit scope. EnterExpressionEvaluationContext Unevaluated(Self, Sema::Unevaluated); Sema::SFINAETrap SFINAE(Self, /*AccessCheckingSFINAE=*/true); Sema::ContextRAII TUContext(Self, Self.Context.getTranslationUnitDecl()); InitializationSequence Init(Self, To, Kind, FromPtr); if (Init.Failed()) return false; ExprResult Result = Init.Perform(Self, To, Kind, FromPtr); return !Result.isInvalid() && !SFINAE.hasErrorOccurred(); } case BTT_IsNothrowAssignable: case BTT_IsTriviallyAssignable: { // C++11 [meta.unary.prop]p3: // is_trivially_assignable is defined as: // is_assignable::value is true and the assignment, as defined by // is_assignable, is known to call no operation that is not trivial // // is_assignable is defined as: // The expression declval() = declval() is well-formed when // treated as an unevaluated operand (Clause 5). // // For both, T and U shall be complete types, (possibly cv-qualified) // void, or arrays of unknown bound. if (!LhsT->isVoidType() && !LhsT->isIncompleteArrayType() && Self.RequireCompleteType(KeyLoc, LhsT, diag::err_incomplete_type_used_in_type_trait_expr)) return false; if (!RhsT->isVoidType() && !RhsT->isIncompleteArrayType() && Self.RequireCompleteType(KeyLoc, RhsT, diag::err_incomplete_type_used_in_type_trait_expr)) return false; // cv void is never assignable. if (LhsT->isVoidType() || RhsT->isVoidType()) return false; // Build expressions that emulate the effect of declval() and // declval(). if (LhsT->isObjectType() || LhsT->isFunctionType()) LhsT = Self.Context.getRValueReferenceType(LhsT); if (RhsT->isObjectType() || RhsT->isFunctionType()) RhsT = Self.Context.getRValueReferenceType(RhsT); OpaqueValueExpr Lhs(KeyLoc, LhsT.getNonLValueExprType(Self.Context), Expr::getValueKindForType(LhsT)); OpaqueValueExpr Rhs(KeyLoc, RhsT.getNonLValueExprType(Self.Context), Expr::getValueKindForType(RhsT)); // Attempt the assignment in an unevaluated context within a SFINAE // trap at translation unit scope. EnterExpressionEvaluationContext Unevaluated(Self, Sema::Unevaluated); Sema::SFINAETrap SFINAE(Self, /*AccessCheckingSFINAE=*/true); Sema::ContextRAII TUContext(Self, Self.Context.getTranslationUnitDecl()); ExprResult Result = Self.BuildBinOp(/*S=*/nullptr, KeyLoc, BO_Assign, &Lhs, &Rhs); if (Result.isInvalid() || SFINAE.hasErrorOccurred()) return false; if (BTT == BTT_IsNothrowAssignable) return Self.canThrow(Result.get()) == CT_Cannot; if (BTT == BTT_IsTriviallyAssignable) { // Under Objective-C ARC, if the destination has non-trivial Objective-C // lifetime, this is a non-trivial assignment. if (Self.getLangOpts().ObjCAutoRefCount && hasNontrivialObjCLifetime(LhsT.getNonReferenceType())) return false; return !Result.get()->hasNonTrivialCall(Self.Context); } llvm_unreachable("unhandled type trait"); return false; } default: llvm_unreachable("not a BTT"); } llvm_unreachable("Unknown type trait or not implemented"); } ExprResult Sema::ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType Ty, Expr* DimExpr, SourceLocation RParen) { TypeSourceInfo *TSInfo; QualType T = GetTypeFromParser(Ty, &TSInfo); if (!TSInfo) TSInfo = Context.getTrivialTypeSourceInfo(T); return BuildArrayTypeTrait(ATT, KWLoc, TSInfo, DimExpr, RParen); } static uint64_t EvaluateArrayTypeTrait(Sema &Self, ArrayTypeTrait ATT, QualType T, Expr *DimExpr, SourceLocation KeyLoc) { assert(!T->isDependentType() && "Cannot evaluate traits of dependent type"); switch(ATT) { case ATT_ArrayRank: if (T->isArrayType()) { unsigned Dim = 0; while (const ArrayType *AT = Self.Context.getAsArrayType(T)) { ++Dim; T = AT->getElementType(); } return Dim; } return 0; case ATT_ArrayExtent: { llvm::APSInt Value; uint64_t Dim; if (Self.VerifyIntegerConstantExpression(DimExpr, &Value, diag::err_dimension_expr_not_constant_integer, false).isInvalid()) return 0; if (Value.isSigned() && Value.isNegative()) { Self.Diag(KeyLoc, diag::err_dimension_expr_not_constant_integer) << DimExpr->getSourceRange(); return 0; } Dim = Value.getLimitedValue(); if (T->isArrayType()) { unsigned D = 0; bool Matched = false; while (const ArrayType *AT = Self.Context.getAsArrayType(T)) { if (Dim == D) { Matched = true; break; } ++D; T = AT->getElementType(); } if (Matched && T->isArrayType()) { if (const ConstantArrayType *CAT = Self.Context.getAsConstantArrayType(T)) return CAT->getSize().getLimitedValue(); } } return 0; } } llvm_unreachable("Unknown type trait or not implemented"); } ExprResult Sema::BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr* DimExpr, SourceLocation RParen) { QualType T = TSInfo->getType(); // FIXME: This should likely be tracked as an APInt to remove any host // assumptions about the width of size_t on the target. uint64_t Value = 0; if (!T->isDependentType()) Value = EvaluateArrayTypeTrait(*this, ATT, T, DimExpr, KWLoc); // While the specification for these traits from the Embarcadero C++ // compiler's documentation says the return type is 'unsigned int', Clang // returns 'size_t'. On Windows, the primary platform for the Embarcadero // compiler, there is no difference. On several other platforms this is an // important distinction. return new (Context) ArrayTypeTraitExpr(KWLoc, ATT, TSInfo, Value, DimExpr, RParen, Context.getSizeType()); } ExprResult Sema::ActOnExpressionTrait(ExpressionTrait ET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen) { // If error parsing the expression, ignore. if (!Queried) return ExprError(); ExprResult Result = BuildExpressionTrait(ET, KWLoc, Queried, RParen); return Result; } static bool EvaluateExpressionTrait(ExpressionTrait ET, Expr *E) { switch (ET) { case ET_IsLValueExpr: return E->isLValue(); case ET_IsRValueExpr: return E->isRValue(); } llvm_unreachable("Expression trait not covered by switch"); } ExprResult Sema::BuildExpressionTrait(ExpressionTrait ET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen) { if (Queried->isTypeDependent()) { // Delay type-checking for type-dependent expressions. } else if (Queried->getType()->isPlaceholderType()) { ExprResult PE = CheckPlaceholderExpr(Queried); if (PE.isInvalid()) return ExprError(); return BuildExpressionTrait(ET, KWLoc, PE.get(), RParen); } bool Value = EvaluateExpressionTrait(ET, Queried); return new (Context) ExpressionTraitExpr(KWLoc, ET, Queried, Value, RParen, Context.BoolTy); } QualType Sema::CheckPointerToMemberOperands(ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation Loc, bool isIndirect) { assert(!LHS.get()->getType()->isPlaceholderType() && !RHS.get()->getType()->isPlaceholderType() && "placeholders should have been weeded out by now"); // The LHS undergoes lvalue conversions if this is ->*. if (isIndirect) { LHS = DefaultLvalueConversion(LHS.get()); if (LHS.isInvalid()) return QualType(); } // The RHS always undergoes lvalue conversions. RHS = DefaultLvalueConversion(RHS.get()); if (RHS.isInvalid()) return QualType(); const char *OpSpelling = isIndirect ? "->*" : ".*"; // C++ 5.5p2 // The binary operator .* [p3: ->*] binds its second operand, which shall // be of type "pointer to member of T" (where T is a completely-defined // class type) [...] QualType RHSType = RHS.get()->getType(); const MemberPointerType *MemPtr = RHSType->getAs(); if (!MemPtr) { Diag(Loc, diag::err_bad_memptr_rhs) << OpSpelling << RHSType << RHS.get()->getSourceRange(); return QualType(); } QualType Class(MemPtr->getClass(), 0); // Note: C++ [expr.mptr.oper]p2-3 says that the class type into which the // member pointer points must be completely-defined. However, there is no // reason for this semantic distinction, and the rule is not enforced by // other compilers. Therefore, we do not check this property, as it is // likely to be considered a defect. // C++ 5.5p2 // [...] to its first operand, which shall be of class T or of a class of // which T is an unambiguous and accessible base class. [p3: a pointer to // such a class] QualType LHSType = LHS.get()->getType(); if (isIndirect) { if (const PointerType *Ptr = LHSType->getAs()) LHSType = Ptr->getPointeeType(); else { Diag(Loc, diag::err_bad_memptr_lhs) << OpSpelling << 1 << LHSType << FixItHint::CreateReplacement(SourceRange(Loc), ".*"); return QualType(); } } if (!Context.hasSameUnqualifiedType(Class, LHSType)) { // If we want to check the hierarchy, we need a complete type. if (RequireCompleteType(Loc, LHSType, diag::err_bad_memptr_lhs, OpSpelling, (int)isIndirect)) { return QualType(); } if (!IsDerivedFrom(LHSType, Class)) { Diag(Loc, diag::err_bad_memptr_lhs) << OpSpelling << (int)isIndirect << LHS.get()->getType(); return QualType(); } CXXCastPath BasePath; if (CheckDerivedToBaseConversion(LHSType, Class, Loc, SourceRange(LHS.get()->getLocStart(), RHS.get()->getLocEnd()), &BasePath)) return QualType(); // Cast LHS to type of use. QualType UseType = isIndirect ? Context.getPointerType(Class) : Class; ExprValueKind VK = isIndirect ? VK_RValue : LHS.get()->getValueKind(); LHS = ImpCastExprToType(LHS.get(), UseType, CK_DerivedToBase, VK, &BasePath); } if (isa(RHS.get()->IgnoreParens())) { // Diagnose use of pointer-to-member type which when used as // the functional cast in a pointer-to-member expression. Diag(Loc, diag::err_pointer_to_member_type) << isIndirect; return QualType(); } // C++ 5.5p2 // The result is an object or a function of the type specified by the // second operand. // The cv qualifiers are the union of those in the pointer and the left side, // in accordance with 5.5p5 and 5.2.5. QualType Result = MemPtr->getPointeeType(); Result = Context.getCVRQualifiedType(Result, LHSType.getCVRQualifiers()); // C++0x [expr.mptr.oper]p6: // In a .* expression whose object expression is an rvalue, the program is // ill-formed if the second operand is a pointer to member function with // ref-qualifier &. In a ->* expression or in a .* expression whose object // expression is an lvalue, the program is ill-formed if the second operand // is a pointer to member function with ref-qualifier &&. if (const FunctionProtoType *Proto = Result->getAs()) { switch (Proto->getRefQualifier()) { case RQ_None: // Do nothing break; case RQ_LValue: if (!isIndirect && !LHS.get()->Classify(Context).isLValue()) Diag(Loc, diag::err_pointer_to_member_oper_value_classify) << RHSType << 1 << LHS.get()->getSourceRange(); break; case RQ_RValue: if (isIndirect || !LHS.get()->Classify(Context).isRValue()) Diag(Loc, diag::err_pointer_to_member_oper_value_classify) << RHSType << 0 << LHS.get()->getSourceRange(); break; } } // C++ [expr.mptr.oper]p6: // The result of a .* expression whose second operand is a pointer // to a data member is of the same value category as its // first operand. The result of a .* expression whose second // operand is a pointer to a member function is a prvalue. The // result of an ->* expression is an lvalue if its second operand // is a pointer to data member and a prvalue otherwise. if (Result->isFunctionType()) { VK = VK_RValue; return Context.BoundMemberTy; } else if (isIndirect) { VK = VK_LValue; } else { VK = LHS.get()->getValueKind(); } return Result; } /// \brief Try to convert a type to another according to C++0x 5.16p3. /// /// This is part of the parameter validation for the ? operator. If either /// value operand is a class type, the two operands are attempted to be /// converted to each other. This function does the conversion in one direction. /// It returns true if the program is ill-formed and has already been diagnosed /// as such. static bool TryClassUnification(Sema &Self, Expr *From, Expr *To, SourceLocation QuestionLoc, bool &HaveConversion, QualType &ToType) { HaveConversion = false; ToType = To->getType(); InitializationKind Kind = InitializationKind::CreateCopy(To->getLocStart(), SourceLocation()); // C++0x 5.16p3 // The process for determining whether an operand expression E1 of type T1 // can be converted to match an operand expression E2 of type T2 is defined // as follows: // -- If E2 is an lvalue: bool ToIsLvalue = To->isLValue(); if (ToIsLvalue) { // E1 can be converted to match E2 if E1 can be implicitly converted to // type "lvalue reference to T2", subject to the constraint that in the // conversion the reference must bind directly to E1. QualType T = Self.Context.getLValueReferenceType(ToType); InitializedEntity Entity = InitializedEntity::InitializeTemporary(T); InitializationSequence InitSeq(Self, Entity, Kind, From); if (InitSeq.isDirectReferenceBinding()) { ToType = T; HaveConversion = true; return false; } if (InitSeq.isAmbiguous()) return InitSeq.Diagnose(Self, Entity, Kind, From); } // -- If E2 is an rvalue, or if the conversion above cannot be done: // -- if E1 and E2 have class type, and the underlying class types are // the same or one is a base class of the other: QualType FTy = From->getType(); QualType TTy = To->getType(); const RecordType *FRec = FTy->getAs(); const RecordType *TRec = TTy->getAs(); bool FDerivedFromT = FRec && TRec && FRec != TRec && Self.IsDerivedFrom(FTy, TTy); if (FRec && TRec && (FRec == TRec || FDerivedFromT || Self.IsDerivedFrom(TTy, FTy))) { // E1 can be converted to match E2 if the class of T2 is the // same type as, or a base class of, the class of T1, and // [cv2 > cv1]. if (FRec == TRec || FDerivedFromT) { if (TTy.isAtLeastAsQualifiedAs(FTy)) { InitializedEntity Entity = InitializedEntity::InitializeTemporary(TTy); InitializationSequence InitSeq(Self, Entity, Kind, From); if (InitSeq) { HaveConversion = true; return false; } if (InitSeq.isAmbiguous()) return InitSeq.Diagnose(Self, Entity, Kind, From); } } return false; } // -- Otherwise: E1 can be converted to match E2 if E1 can be // implicitly converted to the type that expression E2 would have // if E2 were converted to an rvalue (or the type it has, if E2 is // an rvalue). // // This actually refers very narrowly to the lvalue-to-rvalue conversion, not // to the array-to-pointer or function-to-pointer conversions. if (!TTy->getAs()) TTy = TTy.getUnqualifiedType(); InitializedEntity Entity = InitializedEntity::InitializeTemporary(TTy); InitializationSequence InitSeq(Self, Entity, Kind, From); HaveConversion = !InitSeq.Failed(); ToType = TTy; if (InitSeq.isAmbiguous()) return InitSeq.Diagnose(Self, Entity, Kind, From); return false; } /// \brief Try to find a common type for two according to C++0x 5.16p5. /// /// This is part of the parameter validation for the ? operator. If either /// value operand is a class type, overload resolution is used to find a /// conversion to a common type. static bool FindConditionalOverload(Sema &Self, ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc) { Expr *Args[2] = { LHS.get(), RHS.get() }; OverloadCandidateSet CandidateSet(QuestionLoc, OverloadCandidateSet::CSK_Operator); Self.AddBuiltinOperatorCandidates(OO_Conditional, QuestionLoc, Args, CandidateSet); OverloadCandidateSet::iterator Best; switch (CandidateSet.BestViableFunction(Self, QuestionLoc, Best)) { case OR_Success: { // We found a match. Perform the conversions on the arguments and move on. ExprResult LHSRes = Self.PerformImplicitConversion(LHS.get(), Best->BuiltinTypes.ParamTypes[0], Best->Conversions[0], Sema::AA_Converting); if (LHSRes.isInvalid()) break; LHS = LHSRes; ExprResult RHSRes = Self.PerformImplicitConversion(RHS.get(), Best->BuiltinTypes.ParamTypes[1], Best->Conversions[1], Sema::AA_Converting); if (RHSRes.isInvalid()) break; RHS = RHSRes; if (Best->Function) Self.MarkFunctionReferenced(QuestionLoc, Best->Function); return false; } case OR_No_Viable_Function: // Emit a better diagnostic if one of the expressions is a null pointer // constant and the other is a pointer type. In this case, the user most // likely forgot to take the address of the other expression. if (Self.DiagnoseConditionalForNull(LHS.get(), RHS.get(), QuestionLoc)) return true; Self.Diag(QuestionLoc, diag::err_typecheck_cond_incompatible_operands) << LHS.get()->getType() << RHS.get()->getType() << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); return true; case OR_Ambiguous: Self.Diag(QuestionLoc, diag::err_conditional_ambiguous_ovl) << LHS.get()->getType() << RHS.get()->getType() << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); // FIXME: Print the possible common types by printing the return types of // the viable candidates. break; case OR_Deleted: llvm_unreachable("Conditional operator has only built-in overloads"); } return true; } /// \brief Perform an "extended" implicit conversion as returned by /// TryClassUnification. static bool ConvertForConditional(Sema &Self, ExprResult &E, QualType T) { InitializedEntity Entity = InitializedEntity::InitializeTemporary(T); InitializationKind Kind = InitializationKind::CreateCopy(E.get()->getLocStart(), SourceLocation()); Expr *Arg = E.get(); InitializationSequence InitSeq(Self, Entity, Kind, Arg); ExprResult Result = InitSeq.Perform(Self, Entity, Kind, Arg); if (Result.isInvalid()) return true; E = Result; return false; } /// \brief Check the operands of ?: under C++ semantics. /// /// See C++ [expr.cond]. Note that LHS is never null, even for the GNU x ?: y /// extension. In this case, LHS == Cond. (But they're not aliases.) QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc) { // FIXME: Handle C99's complex types, vector types, block pointers and Obj-C++ // interface pointers. // C++11 [expr.cond]p1 // The first expression is contextually converted to bool. if (!Cond.get()->isTypeDependent()) { ExprResult CondRes = CheckCXXBooleanCondition(Cond.get()); if (CondRes.isInvalid()) return QualType(); Cond = CondRes; } // Assume r-value. VK = VK_RValue; OK = OK_Ordinary; // Either of the arguments dependent? if (LHS.get()->isTypeDependent() || RHS.get()->isTypeDependent()) return Context.DependentTy; // C++11 [expr.cond]p2 // If either the second or the third operand has type (cv) void, ... QualType LTy = LHS.get()->getType(); QualType RTy = RHS.get()->getType(); bool LVoid = LTy->isVoidType(); bool RVoid = RTy->isVoidType(); if (LVoid || RVoid) { // ... one of the following shall hold: // -- The second or the third operand (but not both) is a (possibly // parenthesized) throw-expression; the result is of the type // and value category of the other. bool LThrow = isa(LHS.get()->IgnoreParenImpCasts()); bool RThrow = isa(RHS.get()->IgnoreParenImpCasts()); if (LThrow != RThrow) { Expr *NonThrow = LThrow ? RHS.get() : LHS.get(); VK = NonThrow->getValueKind(); // DR (no number yet): the result is a bit-field if the // non-throw-expression operand is a bit-field. OK = NonThrow->getObjectKind(); return NonThrow->getType(); } // -- Both the second and third operands have type void; the result is of // type void and is a prvalue. if (LVoid && RVoid) return Context.VoidTy; // Neither holds, error. Diag(QuestionLoc, diag::err_conditional_void_nonvoid) << (LVoid ? RTy : LTy) << (LVoid ? 0 : 1) << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); return QualType(); } // Neither is void. // C++11 [expr.cond]p3 // Otherwise, if the second and third operand have different types, and // either has (cv) class type [...] an attempt is made to convert each of // those operands to the type of the other. if (!Context.hasSameType(LTy, RTy) && (LTy->isRecordType() || RTy->isRecordType())) { // These return true if a single direction is already ambiguous. QualType L2RType, R2LType; bool HaveL2R, HaveR2L; if (TryClassUnification(*this, LHS.get(), RHS.get(), QuestionLoc, HaveL2R, L2RType)) return QualType(); if (TryClassUnification(*this, RHS.get(), LHS.get(), QuestionLoc, HaveR2L, R2LType)) return QualType(); // If both can be converted, [...] the program is ill-formed. if (HaveL2R && HaveR2L) { Diag(QuestionLoc, diag::err_conditional_ambiguous) << LTy << RTy << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); return QualType(); } // If exactly one conversion is possible, that conversion is applied to // the chosen operand and the converted operands are used in place of the // original operands for the remainder of this section. if (HaveL2R) { if (ConvertForConditional(*this, LHS, L2RType) || LHS.isInvalid()) return QualType(); LTy = LHS.get()->getType(); } else if (HaveR2L) { if (ConvertForConditional(*this, RHS, R2LType) || RHS.isInvalid()) return QualType(); RTy = RHS.get()->getType(); } } // C++11 [expr.cond]p3 // if both are glvalues of the same value category and the same type except // for cv-qualification, an attempt is made to convert each of those // operands to the type of the other. ExprValueKind LVK = LHS.get()->getValueKind(); ExprValueKind RVK = RHS.get()->getValueKind(); if (!Context.hasSameType(LTy, RTy) && Context.hasSameUnqualifiedType(LTy, RTy) && LVK == RVK && LVK != VK_RValue) { // Since the unqualified types are reference-related and we require the // result to be as if a reference bound directly, the only conversion // we can perform is to add cv-qualifiers. Qualifiers LCVR = Qualifiers::fromCVRMask(LTy.getCVRQualifiers()); Qualifiers RCVR = Qualifiers::fromCVRMask(RTy.getCVRQualifiers()); if (RCVR.isStrictSupersetOf(LCVR)) { LHS = ImpCastExprToType(LHS.get(), RTy, CK_NoOp, LVK); LTy = LHS.get()->getType(); } else if (LCVR.isStrictSupersetOf(RCVR)) { RHS = ImpCastExprToType(RHS.get(), LTy, CK_NoOp, RVK); RTy = RHS.get()->getType(); } } // C++11 [expr.cond]p4 // If the second and third operands are glvalues of the same value // category and have the same type, the result is of that type and // value category and it is a bit-field if the second or the third // operand is a bit-field, or if both are bit-fields. // We only extend this to bitfields, not to the crazy other kinds of // l-values. bool Same = Context.hasSameType(LTy, RTy); if (Same && LVK == RVK && LVK != VK_RValue && LHS.get()->isOrdinaryOrBitFieldObject() && RHS.get()->isOrdinaryOrBitFieldObject()) { VK = LHS.get()->getValueKind(); if (LHS.get()->getObjectKind() == OK_BitField || RHS.get()->getObjectKind() == OK_BitField) OK = OK_BitField; return LTy; } // C++11 [expr.cond]p5 // Otherwise, the result is a prvalue. If the second and third operands // do not have the same type, and either has (cv) class type, ... if (!Same && (LTy->isRecordType() || RTy->isRecordType())) { // ... overload resolution is used to determine the conversions (if any) // to be applied to the operands. If the overload resolution fails, the // program is ill-formed. if (FindConditionalOverload(*this, LHS, RHS, QuestionLoc)) return QualType(); } // C++11 [expr.cond]p6 // Lvalue-to-rvalue, array-to-pointer, and function-to-pointer standard // conversions are performed on the second and third operands. LHS = DefaultFunctionArrayLvalueConversion(LHS.get()); RHS = DefaultFunctionArrayLvalueConversion(RHS.get()); if (LHS.isInvalid() || RHS.isInvalid()) return QualType(); LTy = LHS.get()->getType(); RTy = RHS.get()->getType(); // After those conversions, one of the following shall hold: // -- The second and third operands have the same type; the result // is of that type. If the operands have class type, the result // is a prvalue temporary of the result type, which is // copy-initialized from either the second operand or the third // operand depending on the value of the first operand. if (Context.getCanonicalType(LTy) == Context.getCanonicalType(RTy)) { if (LTy->isRecordType()) { // The operands have class type. Make a temporary copy. if (RequireNonAbstractType(QuestionLoc, LTy, diag::err_allocation_of_abstract_type)) return QualType(); InitializedEntity Entity = InitializedEntity::InitializeTemporary(LTy); ExprResult LHSCopy = PerformCopyInitialization(Entity, SourceLocation(), LHS); if (LHSCopy.isInvalid()) return QualType(); ExprResult RHSCopy = PerformCopyInitialization(Entity, SourceLocation(), RHS); if (RHSCopy.isInvalid()) return QualType(); LHS = LHSCopy; RHS = RHSCopy; } return LTy; } // Extension: conditional operator involving vector types. if (LTy->isVectorType() || RTy->isVectorType()) return CheckVectorOperands(LHS, RHS, QuestionLoc, /*isCompAssign*/false, /*AllowBothBool*/true, /*AllowBoolConversions*/false); // -- The second and third operands have arithmetic or enumeration type; // the usual arithmetic conversions are performed to bring them to a // common type, and the result is of that type. if (LTy->isArithmeticType() && RTy->isArithmeticType()) { QualType ResTy = UsualArithmeticConversions(LHS, RHS); if (LHS.isInvalid() || RHS.isInvalid()) return QualType(); LHS = ImpCastExprToType(LHS.get(), ResTy, PrepareScalarCast(LHS, ResTy)); RHS = ImpCastExprToType(RHS.get(), ResTy, PrepareScalarCast(RHS, ResTy)); return ResTy; } // -- The second and third operands have pointer type, or one has pointer // type and the other is a null pointer constant, or both are null // pointer constants, at least one of which is non-integral; pointer // conversions and qualification conversions are performed to bring them // to their composite pointer type. The result is of the composite // pointer type. // -- The second and third operands have pointer to member type, or one has // pointer to member type and the other is a null pointer constant; // pointer to member conversions and qualification conversions are // performed to bring them to a common type, whose cv-qualification // shall match the cv-qualification of either the second or the third // operand. The result is of the common type. bool NonStandardCompositeType = false; QualType Composite = FindCompositePointerType(QuestionLoc, LHS, RHS, isSFINAEContext() ? nullptr : &NonStandardCompositeType); if (!Composite.isNull()) { if (NonStandardCompositeType) Diag(QuestionLoc, diag::ext_typecheck_cond_incompatible_operands_nonstandard) << LTy << RTy << Composite << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); return Composite; } // Similarly, attempt to find composite type of two objective-c pointers. Composite = FindCompositeObjCPointerType(LHS, RHS, QuestionLoc); if (!Composite.isNull()) return Composite; // Check if we are using a null with a non-pointer type. if (DiagnoseConditionalForNull(LHS.get(), RHS.get(), QuestionLoc)) return QualType(); Diag(QuestionLoc, diag::err_typecheck_cond_incompatible_operands) << LHS.get()->getType() << RHS.get()->getType() << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); return QualType(); } /// \brief Find a merged pointer type and convert the two expressions to it. /// /// This finds the composite pointer type (or member pointer type) for @p E1 /// and @p E2 according to C++11 5.9p2. It converts both expressions to this /// type and returns it. /// It does not emit diagnostics. /// /// \param Loc The location of the operator requiring these two expressions to /// be converted to the composite pointer type. /// /// If \p NonStandardCompositeType is non-NULL, then we are permitted to find /// a non-standard (but still sane) composite type to which both expressions /// can be converted. When such a type is chosen, \c *NonStandardCompositeType /// will be set true. QualType Sema::FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool *NonStandardCompositeType) { if (NonStandardCompositeType) *NonStandardCompositeType = false; assert(getLangOpts().CPlusPlus && "This function assumes C++"); QualType T1 = E1->getType(), T2 = E2->getType(); // C++11 5.9p2 // Pointer conversions and qualification conversions are performed on // pointer operands to bring them to their composite pointer type. If // one operand is a null pointer constant, the composite pointer type is // std::nullptr_t if the other operand is also a null pointer constant or, // if the other operand is a pointer, the type of the other operand. if (!T1->isAnyPointerType() && !T1->isMemberPointerType() && !T2->isAnyPointerType() && !T2->isMemberPointerType()) { if (T1->isNullPtrType() && E2->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) { E2 = ImpCastExprToType(E2, T1, CK_NullToPointer).get(); return T1; } if (T2->isNullPtrType() && E1->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) { E1 = ImpCastExprToType(E1, T2, CK_NullToPointer).get(); return T2; } return QualType(); } if (E1->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) { if (T2->isMemberPointerType()) E1 = ImpCastExprToType(E1, T2, CK_NullToMemberPointer).get(); else E1 = ImpCastExprToType(E1, T2, CK_NullToPointer).get(); return T2; } if (E2->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) { if (T1->isMemberPointerType()) E2 = ImpCastExprToType(E2, T1, CK_NullToMemberPointer).get(); else E2 = ImpCastExprToType(E2, T1, CK_NullToPointer).get(); return T1; } // Now both have to be pointers or member pointers. if ((!T1->isPointerType() && !T1->isMemberPointerType()) || (!T2->isPointerType() && !T2->isMemberPointerType())) return QualType(); // Otherwise, of one of the operands has type "pointer to cv1 void," then // the other has type "pointer to cv2 T" and the composite pointer type is // "pointer to cv12 void," where cv12 is the union of cv1 and cv2. // Otherwise, the composite pointer type is a pointer type similar to the // type of one of the operands, with a cv-qualification signature that is // the union of the cv-qualification signatures of the operand types. // In practice, the first part here is redundant; it's subsumed by the second. // What we do here is, we build the two possible composite types, and try the // conversions in both directions. If only one works, or if the two composite // types are the same, we have succeeded. // FIXME: extended qualifiers? typedef SmallVector QualifierVector; QualifierVector QualifierUnion; typedef SmallVector, 4> ContainingClassVector; ContainingClassVector MemberOfClass; QualType Composite1 = Context.getCanonicalType(T1), Composite2 = Context.getCanonicalType(T2); unsigned NeedConstBefore = 0; do { const PointerType *Ptr1, *Ptr2; if ((Ptr1 = Composite1->getAs()) && (Ptr2 = Composite2->getAs())) { Composite1 = Ptr1->getPointeeType(); Composite2 = Ptr2->getPointeeType(); // If we're allowed to create a non-standard composite type, keep track // of where we need to fill in additional 'const' qualifiers. if (NonStandardCompositeType && Composite1.getCVRQualifiers() != Composite2.getCVRQualifiers()) NeedConstBefore = QualifierUnion.size(); QualifierUnion.push_back( Composite1.getCVRQualifiers() | Composite2.getCVRQualifiers()); MemberOfClass.push_back(std::make_pair(nullptr, nullptr)); continue; } const MemberPointerType *MemPtr1, *MemPtr2; if ((MemPtr1 = Composite1->getAs()) && (MemPtr2 = Composite2->getAs())) { Composite1 = MemPtr1->getPointeeType(); Composite2 = MemPtr2->getPointeeType(); // If we're allowed to create a non-standard composite type, keep track // of where we need to fill in additional 'const' qualifiers. if (NonStandardCompositeType && Composite1.getCVRQualifiers() != Composite2.getCVRQualifiers()) NeedConstBefore = QualifierUnion.size(); QualifierUnion.push_back( Composite1.getCVRQualifiers() | Composite2.getCVRQualifiers()); MemberOfClass.push_back(std::make_pair(MemPtr1->getClass(), MemPtr2->getClass())); continue; } // FIXME: block pointer types? // Cannot unwrap any more types. break; } while (true); if (NeedConstBefore && NonStandardCompositeType) { // Extension: Add 'const' to qualifiers that come before the first qualifier // mismatch, so that our (non-standard!) composite type meets the // requirements of C++ [conv.qual]p4 bullet 3. for (unsigned I = 0; I != NeedConstBefore; ++I) { if ((QualifierUnion[I] & Qualifiers::Const) == 0) { QualifierUnion[I] = QualifierUnion[I] | Qualifiers::Const; *NonStandardCompositeType = true; } } } // Rewrap the composites as pointers or member pointers with the union CVRs. ContainingClassVector::reverse_iterator MOC = MemberOfClass.rbegin(); for (QualifierVector::reverse_iterator I = QualifierUnion.rbegin(), E = QualifierUnion.rend(); I != E; (void)++I, ++MOC) { Qualifiers Quals = Qualifiers::fromCVRMask(*I); if (MOC->first && MOC->second) { // Rebuild member pointer type Composite1 = Context.getMemberPointerType( Context.getQualifiedType(Composite1, Quals), MOC->first); Composite2 = Context.getMemberPointerType( Context.getQualifiedType(Composite2, Quals), MOC->second); } else { // Rebuild pointer type Composite1 = Context.getPointerType(Context.getQualifiedType(Composite1, Quals)); Composite2 = Context.getPointerType(Context.getQualifiedType(Composite2, Quals)); } } // Try to convert to the first composite pointer type. InitializedEntity Entity1 = InitializedEntity::InitializeTemporary(Composite1); InitializationKind Kind = InitializationKind::CreateCopy(Loc, SourceLocation()); InitializationSequence E1ToC1(*this, Entity1, Kind, E1); InitializationSequence E2ToC1(*this, Entity1, Kind, E2); if (E1ToC1 && E2ToC1) { // Conversion to Composite1 is viable. if (!Context.hasSameType(Composite1, Composite2)) { // Composite2 is a different type from Composite1. Check whether // Composite2 is also viable. InitializedEntity Entity2 = InitializedEntity::InitializeTemporary(Composite2); InitializationSequence E1ToC2(*this, Entity2, Kind, E1); InitializationSequence E2ToC2(*this, Entity2, Kind, E2); if (E1ToC2 && E2ToC2) { // Both Composite1 and Composite2 are viable and are different; // this is an ambiguity. return QualType(); } } // Convert E1 to Composite1 ExprResult E1Result = E1ToC1.Perform(*this, Entity1, Kind, E1); if (E1Result.isInvalid()) return QualType(); E1 = E1Result.getAs(); // Convert E2 to Composite1 ExprResult E2Result = E2ToC1.Perform(*this, Entity1, Kind, E2); if (E2Result.isInvalid()) return QualType(); E2 = E2Result.getAs(); return Composite1; } // Check whether Composite2 is viable. InitializedEntity Entity2 = InitializedEntity::InitializeTemporary(Composite2); InitializationSequence E1ToC2(*this, Entity2, Kind, E1); InitializationSequence E2ToC2(*this, Entity2, Kind, E2); if (!E1ToC2 || !E2ToC2) return QualType(); // Convert E1 to Composite2 ExprResult E1Result = E1ToC2.Perform(*this, Entity2, Kind, E1); if (E1Result.isInvalid()) return QualType(); E1 = E1Result.getAs(); // Convert E2 to Composite2 ExprResult E2Result = E2ToC2.Perform(*this, Entity2, Kind, E2); if (E2Result.isInvalid()) return QualType(); E2 = E2Result.getAs(); return Composite2; } ExprResult Sema::MaybeBindToTemporary(Expr *E) { if (!E) return ExprError(); assert(!isa(E) && "Double-bound temporary?"); // If the result is a glvalue, we shouldn't bind it. if (!E->isRValue()) return E; // In ARC, calls that return a retainable type can return retained, // in which case we have to insert a consuming cast. if (getLangOpts().ObjCAutoRefCount && E->getType()->isObjCRetainableType()) { bool ReturnsRetained; // For actual calls, we compute this by examining the type of the // called value. if (CallExpr *Call = dyn_cast(E)) { Expr *Callee = Call->getCallee()->IgnoreParens(); QualType T = Callee->getType(); if (T == Context.BoundMemberTy) { // Handle pointer-to-members. if (BinaryOperator *BinOp = dyn_cast(Callee)) T = BinOp->getRHS()->getType(); else if (MemberExpr *Mem = dyn_cast(Callee)) T = Mem->getMemberDecl()->getType(); } if (const PointerType *Ptr = T->getAs()) T = Ptr->getPointeeType(); else if (const BlockPointerType *Ptr = T->getAs()) T = Ptr->getPointeeType(); else if (const MemberPointerType *MemPtr = T->getAs()) T = MemPtr->getPointeeType(); const FunctionType *FTy = T->getAs(); assert(FTy && "call to value not of function type?"); ReturnsRetained = FTy->getExtInfo().getProducesResult(); // ActOnStmtExpr arranges things so that StmtExprs of retainable // type always produce a +1 object. } else if (isa(E)) { ReturnsRetained = true; // We hit this case with the lambda conversion-to-block optimization; // we don't want any extra casts here. } else if (isa(E) && isa(cast(E)->getSubExpr())) { return E; // For message sends and property references, we try to find an // actual method. FIXME: we should infer retention by selector in // cases where we don't have an actual method. } else { ObjCMethodDecl *D = nullptr; if (ObjCMessageExpr *Send = dyn_cast(E)) { D = Send->getMethodDecl(); } else if (ObjCBoxedExpr *BoxedExpr = dyn_cast(E)) { D = BoxedExpr->getBoxingMethod(); } else if (ObjCArrayLiteral *ArrayLit = dyn_cast(E)) { D = ArrayLit->getArrayWithObjectsMethod(); } else if (ObjCDictionaryLiteral *DictLit = dyn_cast(E)) { D = DictLit->getDictWithObjectsMethod(); } ReturnsRetained = (D && D->hasAttr()); // Don't do reclaims on performSelector calls; despite their // return type, the invoked method doesn't necessarily actually // return an object. if (!ReturnsRetained && D && D->getMethodFamily() == OMF_performSelector) return E; } // Don't reclaim an object of Class type. if (!ReturnsRetained && E->getType()->isObjCARCImplicitlyUnretainedType()) return E; ExprNeedsCleanups = true; CastKind ck = (ReturnsRetained ? CK_ARCConsumeObject : CK_ARCReclaimReturnedObject); return ImplicitCastExpr::Create(Context, E->getType(), ck, E, nullptr, VK_RValue); } if (!getLangOpts().CPlusPlus) return E; // Search for the base element type (cf. ASTContext::getBaseElementType) with // a fast path for the common case that the type is directly a RecordType. const Type *T = Context.getCanonicalType(E->getType().getTypePtr()); const RecordType *RT = nullptr; while (!RT) { switch (T->getTypeClass()) { case Type::Record: RT = cast(T); break; case Type::ConstantArray: case Type::IncompleteArray: case Type::VariableArray: case Type::DependentSizedArray: T = cast(T)->getElementType().getTypePtr(); break; default: return E; } } // That should be enough to guarantee that this type is complete, if we're // not processing a decltype expression. CXXRecordDecl *RD = cast(RT->getDecl()); if (RD->isInvalidDecl() || RD->isDependentContext()) return E; bool IsDecltype = ExprEvalContexts.back().IsDecltype; CXXDestructorDecl *Destructor = IsDecltype ? nullptr : LookupDestructor(RD); if (Destructor) { MarkFunctionReferenced(E->getExprLoc(), Destructor); CheckDestructorAccess(E->getExprLoc(), Destructor, PDiag(diag::err_access_dtor_temp) << E->getType()); if (DiagnoseUseOfDecl(Destructor, E->getExprLoc())) return ExprError(); // If destructor is trivial, we can avoid the extra copy. if (Destructor->isTrivial()) return E; // We need a cleanup, but we don't need to remember the temporary. ExprNeedsCleanups = true; } CXXTemporary *Temp = CXXTemporary::Create(Context, Destructor); CXXBindTemporaryExpr *Bind = CXXBindTemporaryExpr::Create(Context, Temp, E); if (IsDecltype) ExprEvalContexts.back().DelayedDecltypeBinds.push_back(Bind); return Bind; } ExprResult Sema::MaybeCreateExprWithCleanups(ExprResult SubExpr) { if (SubExpr.isInvalid()) return ExprError(); return MaybeCreateExprWithCleanups(SubExpr.get()); } Expr *Sema::MaybeCreateExprWithCleanups(Expr *SubExpr) { assert(SubExpr && "subexpression can't be null!"); CleanupVarDeclMarking(); unsigned FirstCleanup = ExprEvalContexts.back().NumCleanupObjects; assert(ExprCleanupObjects.size() >= FirstCleanup); assert(ExprNeedsCleanups || ExprCleanupObjects.size() == FirstCleanup); if (!ExprNeedsCleanups) return SubExpr; auto Cleanups = llvm::makeArrayRef(ExprCleanupObjects.begin() + FirstCleanup, ExprCleanupObjects.size() - FirstCleanup); Expr *E = ExprWithCleanups::Create(Context, SubExpr, Cleanups); DiscardCleanupsInEvaluationContext(); return E; } Stmt *Sema::MaybeCreateStmtWithCleanups(Stmt *SubStmt) { assert(SubStmt && "sub-statement can't be null!"); CleanupVarDeclMarking(); if (!ExprNeedsCleanups) return SubStmt; // FIXME: In order to attach the temporaries, wrap the statement into // a StmtExpr; currently this is only used for asm statements. // This is hacky, either create a new CXXStmtWithTemporaries statement or // a new AsmStmtWithTemporaries. CompoundStmt *CompStmt = new (Context) CompoundStmt(Context, SubStmt, SourceLocation(), SourceLocation()); Expr *E = new (Context) StmtExpr(CompStmt, Context.VoidTy, SourceLocation(), SourceLocation()); return MaybeCreateExprWithCleanups(E); } /// Process the expression contained within a decltype. For such expressions, /// certain semantic checks on temporaries are delayed until this point, and /// are omitted for the 'topmost' call in the decltype expression. If the /// topmost call bound a temporary, strip that temporary off the expression. ExprResult Sema::ActOnDecltypeExpression(Expr *E) { assert(ExprEvalContexts.back().IsDecltype && "not in a decltype expression"); // C++11 [expr.call]p11: // If a function call is a prvalue of object type, // -- if the function call is either // -- the operand of a decltype-specifier, or // -- the right operand of a comma operator that is the operand of a // decltype-specifier, // a temporary object is not introduced for the prvalue. // Recursively rebuild ParenExprs and comma expressions to strip out the // outermost CXXBindTemporaryExpr, if any. if (ParenExpr *PE = dyn_cast(E)) { ExprResult SubExpr = ActOnDecltypeExpression(PE->getSubExpr()); if (SubExpr.isInvalid()) return ExprError(); if (SubExpr.get() == PE->getSubExpr()) return E; return ActOnParenExpr(PE->getLParen(), PE->getRParen(), SubExpr.get()); } if (BinaryOperator *BO = dyn_cast(E)) { if (BO->getOpcode() == BO_Comma) { ExprResult RHS = ActOnDecltypeExpression(BO->getRHS()); if (RHS.isInvalid()) return ExprError(); if (RHS.get() == BO->getRHS()) return E; return new (Context) BinaryOperator( BO->getLHS(), RHS.get(), BO_Comma, BO->getType(), BO->getValueKind(), BO->getObjectKind(), BO->getOperatorLoc(), BO->isFPContractable()); } } CXXBindTemporaryExpr *TopBind = dyn_cast(E); CallExpr *TopCall = TopBind ? dyn_cast(TopBind->getSubExpr()) : nullptr; if (TopCall) E = TopCall; else TopBind = nullptr; // Disable the special decltype handling now. ExprEvalContexts.back().IsDecltype = false; // In MS mode, don't perform any extra checking of call return types within a // decltype expression. if (getLangOpts().MSVCCompat) return E; // Perform the semantic checks we delayed until this point. for (unsigned I = 0, N = ExprEvalContexts.back().DelayedDecltypeCalls.size(); I != N; ++I) { CallExpr *Call = ExprEvalContexts.back().DelayedDecltypeCalls[I]; if (Call == TopCall) continue; if (CheckCallReturnType(Call->getCallReturnType(Context), Call->getLocStart(), Call, Call->getDirectCallee())) return ExprError(); } // Now all relevant types are complete, check the destructors are accessible // and non-deleted, and annotate them on the temporaries. for (unsigned I = 0, N = ExprEvalContexts.back().DelayedDecltypeBinds.size(); I != N; ++I) { CXXBindTemporaryExpr *Bind = ExprEvalContexts.back().DelayedDecltypeBinds[I]; if (Bind == TopBind) continue; CXXTemporary *Temp = Bind->getTemporary(); CXXRecordDecl *RD = Bind->getType()->getBaseElementTypeUnsafe()->getAsCXXRecordDecl(); CXXDestructorDecl *Destructor = LookupDestructor(RD); Temp->setDestructor(Destructor); MarkFunctionReferenced(Bind->getExprLoc(), Destructor); CheckDestructorAccess(Bind->getExprLoc(), Destructor, PDiag(diag::err_access_dtor_temp) << Bind->getType()); if (DiagnoseUseOfDecl(Destructor, Bind->getExprLoc())) return ExprError(); // We need a cleanup, but we don't need to remember the temporary. ExprNeedsCleanups = true; } // Possibly strip off the top CXXBindTemporaryExpr. return E; } /// Note a set of 'operator->' functions that were used for a member access. static void noteOperatorArrows(Sema &S, ArrayRef OperatorArrows) { unsigned SkipStart = OperatorArrows.size(), SkipCount = 0; // FIXME: Make this configurable? unsigned Limit = 9; if (OperatorArrows.size() > Limit) { // Produce Limit-1 normal notes and one 'skipping' note. SkipStart = (Limit - 1) / 2 + (Limit - 1) % 2; SkipCount = OperatorArrows.size() - (Limit - 1); } for (unsigned I = 0; I < OperatorArrows.size(); /**/) { if (I == SkipStart) { S.Diag(OperatorArrows[I]->getLocation(), diag::note_operator_arrows_suppressed) << SkipCount; I += SkipCount; } else { S.Diag(OperatorArrows[I]->getLocation(), diag::note_operator_arrow_here) << OperatorArrows[I]->getCallResultType(); ++I; } } } ExprResult Sema::ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor) { // Since this might be a postfix expression, get rid of ParenListExprs. ExprResult Result = MaybeConvertParenListExprToParenExpr(S, Base); if (Result.isInvalid()) return ExprError(); Base = Result.get(); Result = CheckPlaceholderExpr(Base); if (Result.isInvalid()) return ExprError(); Base = Result.get(); QualType BaseType = Base->getType(); MayBePseudoDestructor = false; if (BaseType->isDependentType()) { // If we have a pointer to a dependent type and are using the -> operator, // the object type is the type that the pointer points to. We might still // have enough information about that type to do something useful. if (OpKind == tok::arrow) if (const PointerType *Ptr = BaseType->getAs()) BaseType = Ptr->getPointeeType(); ObjectType = ParsedType::make(BaseType); MayBePseudoDestructor = true; return Base; } // C++ [over.match.oper]p8: // [...] When operator->returns, the operator-> is applied to the value // returned, with the original second operand. if (OpKind == tok::arrow) { QualType StartingType = BaseType; bool NoArrowOperatorFound = false; bool FirstIteration = true; FunctionDecl *CurFD = dyn_cast(CurContext); // The set of types we've considered so far. llvm::SmallPtrSet CTypes; SmallVector OperatorArrows; CTypes.insert(Context.getCanonicalType(BaseType)); while (BaseType->isRecordType()) { if (OperatorArrows.size() >= getLangOpts().ArrowDepth) { Diag(OpLoc, diag::err_operator_arrow_depth_exceeded) << StartingType << getLangOpts().ArrowDepth << Base->getSourceRange(); noteOperatorArrows(*this, OperatorArrows); Diag(OpLoc, diag::note_operator_arrow_depth) << getLangOpts().ArrowDepth; return ExprError(); } Result = BuildOverloadedArrowExpr( S, Base, OpLoc, // When in a template specialization and on the first loop iteration, // potentially give the default diagnostic (with the fixit in a // separate note) instead of having the error reported back to here // and giving a diagnostic with a fixit attached to the error itself. (FirstIteration && CurFD && CurFD->isFunctionTemplateSpecialization()) ? nullptr : &NoArrowOperatorFound); if (Result.isInvalid()) { if (NoArrowOperatorFound) { if (FirstIteration) { Diag(OpLoc, diag::err_typecheck_member_reference_suggestion) << BaseType << 1 << Base->getSourceRange() << FixItHint::CreateReplacement(OpLoc, "."); OpKind = tok::period; break; } Diag(OpLoc, diag::err_typecheck_member_reference_arrow) << BaseType << Base->getSourceRange(); CallExpr *CE = dyn_cast(Base); if (Decl *CD = (CE ? CE->getCalleeDecl() : nullptr)) { Diag(CD->getLocStart(), diag::note_member_reference_arrow_from_operator_arrow); } } return ExprError(); } Base = Result.get(); if (CXXOperatorCallExpr *OpCall = dyn_cast(Base)) OperatorArrows.push_back(OpCall->getDirectCallee()); BaseType = Base->getType(); CanQualType CBaseType = Context.getCanonicalType(BaseType); if (!CTypes.insert(CBaseType).second) { Diag(OpLoc, diag::err_operator_arrow_circular) << StartingType; noteOperatorArrows(*this, OperatorArrows); return ExprError(); } FirstIteration = false; } if (OpKind == tok::arrow && (BaseType->isPointerType() || BaseType->isObjCObjectPointerType())) BaseType = BaseType->getPointeeType(); } // Objective-C properties allow "." access on Objective-C pointer types, // so adjust the base type to the object type itself. if (BaseType->isObjCObjectPointerType()) BaseType = BaseType->getPointeeType(); // C++ [basic.lookup.classref]p2: // [...] If the type of the object expression is of pointer to scalar // type, the unqualified-id is looked up in the context of the complete // postfix-expression. // // This also indicates that we could be parsing a pseudo-destructor-name. // Note that Objective-C class and object types can be pseudo-destructor // expressions or normal member (ivar or property) access expressions. if (BaseType->isObjCObjectOrInterfaceType()) { MayBePseudoDestructor = true; } else if (!BaseType->isRecordType()) { ObjectType = ParsedType(); MayBePseudoDestructor = true; return Base; } // The object type must be complete (or dependent), or // C++11 [expr.prim.general]p3: // Unlike the object expression in other contexts, *this is not required to // be of complete type for purposes of class member access (5.2.5) outside // the member function body. if (!BaseType->isDependentType() && !isThisOutsideMemberFunctionBody(BaseType) && RequireCompleteType(OpLoc, BaseType, diag::err_incomplete_member_access)) return ExprError(); // C++ [basic.lookup.classref]p2: // If the id-expression in a class member access (5.2.5) is an // unqualified-id, and the type of the object expression is of a class // type C (or of pointer to a class type C), the unqualified-id is looked // up in the scope of class C. [...] ObjectType = ParsedType::make(BaseType); return Base; } static bool CheckArrow(Sema& S, QualType& ObjectType, Expr *&Base, tok::TokenKind& OpKind, SourceLocation OpLoc) { if (Base->hasPlaceholderType()) { ExprResult result = S.CheckPlaceholderExpr(Base); if (result.isInvalid()) return true; Base = result.get(); } ObjectType = Base->getType(); // C++ [expr.pseudo]p2: // The left-hand side of the dot operator shall be of scalar type. The // left-hand side of the arrow operator shall be of pointer to scalar type. // This scalar type is the object type. // Note that this is rather different from the normal handling for the // arrow operator. if (OpKind == tok::arrow) { if (const PointerType *Ptr = ObjectType->getAs()) { ObjectType = Ptr->getPointeeType(); } else if (!Base->isTypeDependent()) { // The user wrote "p->" when she probably meant "p."; fix it. S.Diag(OpLoc, diag::err_typecheck_member_reference_suggestion) << ObjectType << true << FixItHint::CreateReplacement(OpLoc, "."); if (S.isSFINAEContext()) return true; OpKind = tok::period; } } return false; } ExprResult Sema::BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeTypeInfo, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage Destructed) { TypeSourceInfo *DestructedTypeInfo = Destructed.getTypeSourceInfo(); QualType ObjectType; if (CheckArrow(*this, ObjectType, Base, OpKind, OpLoc)) return ExprError(); if (!ObjectType->isDependentType() && !ObjectType->isScalarType() && !ObjectType->isVectorType()) { if (getLangOpts().MSVCCompat && ObjectType->isVoidType()) Diag(OpLoc, diag::ext_pseudo_dtor_on_void) << Base->getSourceRange(); else { Diag(OpLoc, diag::err_pseudo_dtor_base_not_scalar) << ObjectType << Base->getSourceRange(); return ExprError(); } } // C++ [expr.pseudo]p2: // [...] The cv-unqualified versions of the object type and of the type // designated by the pseudo-destructor-name shall be the same type. if (DestructedTypeInfo) { QualType DestructedType = DestructedTypeInfo->getType(); SourceLocation DestructedTypeStart = DestructedTypeInfo->getTypeLoc().getLocalSourceRange().getBegin(); if (!DestructedType->isDependentType() && !ObjectType->isDependentType()) { if (!Context.hasSameUnqualifiedType(DestructedType, ObjectType)) { Diag(DestructedTypeStart, diag::err_pseudo_dtor_type_mismatch) << ObjectType << DestructedType << Base->getSourceRange() << DestructedTypeInfo->getTypeLoc().getLocalSourceRange(); // Recover by setting the destructed type to the object type. DestructedType = ObjectType; DestructedTypeInfo = Context.getTrivialTypeSourceInfo(ObjectType, DestructedTypeStart); Destructed = PseudoDestructorTypeStorage(DestructedTypeInfo); } else if (DestructedType.getObjCLifetime() != ObjectType.getObjCLifetime()) { if (DestructedType.getObjCLifetime() == Qualifiers::OCL_None) { // Okay: just pretend that the user provided the correctly-qualified // type. } else { Diag(DestructedTypeStart, diag::err_arc_pseudo_dtor_inconstant_quals) << ObjectType << DestructedType << Base->getSourceRange() << DestructedTypeInfo->getTypeLoc().getLocalSourceRange(); } // Recover by setting the destructed type to the object type. DestructedType = ObjectType; DestructedTypeInfo = Context.getTrivialTypeSourceInfo(ObjectType, DestructedTypeStart); Destructed = PseudoDestructorTypeStorage(DestructedTypeInfo); } } } // C++ [expr.pseudo]p2: // [...] Furthermore, the two type-names in a pseudo-destructor-name of the // form // // ::[opt] nested-name-specifier[opt] type-name :: ~ type-name // // shall designate the same scalar type. if (ScopeTypeInfo) { QualType ScopeType = ScopeTypeInfo->getType(); if (!ScopeType->isDependentType() && !ObjectType->isDependentType() && !Context.hasSameUnqualifiedType(ScopeType, ObjectType)) { Diag(ScopeTypeInfo->getTypeLoc().getLocalSourceRange().getBegin(), diag::err_pseudo_dtor_type_mismatch) << ObjectType << ScopeType << Base->getSourceRange() << ScopeTypeInfo->getTypeLoc().getLocalSourceRange(); ScopeType = QualType(); ScopeTypeInfo = nullptr; } } Expr *Result = new (Context) CXXPseudoDestructorExpr(Context, Base, OpKind == tok::arrow, OpLoc, SS.getWithLocInContext(Context), ScopeTypeInfo, CCLoc, TildeLoc, Destructed); return Result; } ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName) { assert((FirstTypeName.getKind() == UnqualifiedId::IK_TemplateId || FirstTypeName.getKind() == UnqualifiedId::IK_Identifier) && "Invalid first type name in pseudo-destructor"); assert((SecondTypeName.getKind() == UnqualifiedId::IK_TemplateId || SecondTypeName.getKind() == UnqualifiedId::IK_Identifier) && "Invalid second type name in pseudo-destructor"); QualType ObjectType; if (CheckArrow(*this, ObjectType, Base, OpKind, OpLoc)) return ExprError(); // Compute the object type that we should use for name lookup purposes. Only // record types and dependent types matter. ParsedType ObjectTypePtrForLookup; if (!SS.isSet()) { if (ObjectType->isRecordType()) ObjectTypePtrForLookup = ParsedType::make(ObjectType); else if (ObjectType->isDependentType()) ObjectTypePtrForLookup = ParsedType::make(Context.DependentTy); } // Convert the name of the type being destructed (following the ~) into a // type (with source-location information). QualType DestructedType; TypeSourceInfo *DestructedTypeInfo = nullptr; PseudoDestructorTypeStorage Destructed; if (SecondTypeName.getKind() == UnqualifiedId::IK_Identifier) { ParsedType T = getTypeName(*SecondTypeName.Identifier, SecondTypeName.StartLocation, S, &SS, true, false, ObjectTypePtrForLookup); if (!T && ((SS.isSet() && !computeDeclContext(SS, false)) || (!SS.isSet() && ObjectType->isDependentType()))) { // The name of the type being destroyed is a dependent name, and we // couldn't find anything useful in scope. Just store the identifier and // it's location, and we'll perform (qualified) name lookup again at // template instantiation time. Destructed = PseudoDestructorTypeStorage(SecondTypeName.Identifier, SecondTypeName.StartLocation); } else if (!T) { Diag(SecondTypeName.StartLocation, diag::err_pseudo_dtor_destructor_non_type) << SecondTypeName.Identifier << ObjectType; if (isSFINAEContext()) return ExprError(); // Recover by assuming we had the right type all along. DestructedType = ObjectType; } else DestructedType = GetTypeFromParser(T, &DestructedTypeInfo); } else { // Resolve the template-id to a type. TemplateIdAnnotation *TemplateId = SecondTypeName.TemplateId; ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(), TemplateId->NumArgs); TypeResult T = ActOnTemplateIdType(TemplateId->SS, TemplateId->TemplateKWLoc, TemplateId->Template, TemplateId->TemplateNameLoc, TemplateId->LAngleLoc, TemplateArgsPtr, TemplateId->RAngleLoc); if (T.isInvalid() || !T.get()) { // Recover by assuming we had the right type all along. DestructedType = ObjectType; } else DestructedType = GetTypeFromParser(T.get(), &DestructedTypeInfo); } // If we've performed some kind of recovery, (re-)build the type source // information. if (!DestructedType.isNull()) { if (!DestructedTypeInfo) DestructedTypeInfo = Context.getTrivialTypeSourceInfo(DestructedType, SecondTypeName.StartLocation); Destructed = PseudoDestructorTypeStorage(DestructedTypeInfo); } // Convert the name of the scope type (the type prior to '::') into a type. TypeSourceInfo *ScopeTypeInfo = nullptr; QualType ScopeType; if (FirstTypeName.getKind() == UnqualifiedId::IK_TemplateId || FirstTypeName.Identifier) { if (FirstTypeName.getKind() == UnqualifiedId::IK_Identifier) { ParsedType T = getTypeName(*FirstTypeName.Identifier, FirstTypeName.StartLocation, S, &SS, true, false, ObjectTypePtrForLookup); if (!T) { Diag(FirstTypeName.StartLocation, diag::err_pseudo_dtor_destructor_non_type) << FirstTypeName.Identifier << ObjectType; if (isSFINAEContext()) return ExprError(); // Just drop this type. It's unnecessary anyway. ScopeType = QualType(); } else ScopeType = GetTypeFromParser(T, &ScopeTypeInfo); } else { // Resolve the template-id to a type. TemplateIdAnnotation *TemplateId = FirstTypeName.TemplateId; ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(), TemplateId->NumArgs); TypeResult T = ActOnTemplateIdType(TemplateId->SS, TemplateId->TemplateKWLoc, TemplateId->Template, TemplateId->TemplateNameLoc, TemplateId->LAngleLoc, TemplateArgsPtr, TemplateId->RAngleLoc); if (T.isInvalid() || !T.get()) { // Recover by dropping this type. ScopeType = QualType(); } else ScopeType = GetTypeFromParser(T.get(), &ScopeTypeInfo); } } if (!ScopeType.isNull() && !ScopeTypeInfo) ScopeTypeInfo = Context.getTrivialTypeSourceInfo(ScopeType, FirstTypeName.StartLocation); return BuildPseudoDestructorExpr(Base, OpLoc, OpKind, SS, ScopeTypeInfo, CCLoc, TildeLoc, Destructed); } ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS) { QualType ObjectType; if (CheckArrow(*this, ObjectType, Base, OpKind, OpLoc)) return ExprError(); QualType T = BuildDecltypeType(DS.getRepAsExpr(), DS.getTypeSpecTypeLoc(), false); TypeLocBuilder TLB; DecltypeTypeLoc DecltypeTL = TLB.push(T); DecltypeTL.setNameLoc(DS.getTypeSpecTypeLoc()); TypeSourceInfo *DestructedTypeInfo = TLB.getTypeSourceInfo(Context, T); PseudoDestructorTypeStorage Destructed(DestructedTypeInfo); return BuildPseudoDestructorExpr(Base, OpLoc, OpKind, CXXScopeSpec(), nullptr, SourceLocation(), TildeLoc, Destructed); } ExprResult Sema::BuildCXXMemberCallExpr(Expr *E, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates) { if (Method->getParent()->isLambda() && Method->getConversionType()->isBlockPointerType()) { // This is a lambda coversion to block pointer; check if the argument // is a LambdaExpr. Expr *SubE = E; CastExpr *CE = dyn_cast(SubE); if (CE && CE->getCastKind() == CK_NoOp) SubE = CE->getSubExpr(); SubE = SubE->IgnoreParens(); if (CXXBindTemporaryExpr *BE = dyn_cast(SubE)) SubE = BE->getSubExpr(); if (isa(SubE)) { // For the conversion to block pointer on a lambda expression, we // construct a special BlockLiteral instead; this doesn't really make // a difference in ARC, but outside of ARC the resulting block literal // follows the normal lifetime rules for block literals instead of being // autoreleased. DiagnosticErrorTrap Trap(Diags); ExprResult Exp = BuildBlockForLambdaConversion(E->getExprLoc(), E->getExprLoc(), Method, E); if (Exp.isInvalid()) Diag(E->getExprLoc(), diag::note_lambda_to_block_conv); return Exp; } } ExprResult Exp = PerformObjectArgumentInitialization(E, /*Qualifier=*/nullptr, FoundDecl, Method); if (Exp.isInvalid()) return true; MemberExpr *ME = new (Context) MemberExpr( Exp.get(), /*IsArrow=*/false, SourceLocation(), Method, SourceLocation(), Context.BoundMemberTy, VK_RValue, OK_Ordinary); if (HadMultipleCandidates) ME->setHadMultipleCandidates(true); MarkMemberReferenced(ME); QualType ResultType = Method->getReturnType(); ExprValueKind VK = Expr::getValueKindForType(ResultType); ResultType = ResultType.getNonLValueExprType(Context); CXXMemberCallExpr *CE = new (Context) CXXMemberCallExpr(Context, ME, None, ResultType, VK, Exp.get()->getLocEnd()); return CE; } ExprResult Sema::BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen) { // If the operand is an unresolved lookup expression, the expression is ill- // formed per [over.over]p1, because overloaded function names cannot be used // without arguments except in explicit contexts. ExprResult R = CheckPlaceholderExpr(Operand); if (R.isInvalid()) return R; // The operand may have been modified when checking the placeholder type. Operand = R.get(); if (ActiveTemplateInstantiations.empty() && Operand->HasSideEffects(Context, false)) { // The expression operand for noexcept is in an unevaluated expression // context, so side effects could result in unintended consequences. Diag(Operand->getExprLoc(), diag::warn_side_effects_unevaluated_context); } CanThrowResult CanThrow = canThrow(Operand); return new (Context) CXXNoexceptExpr(Context.BoolTy, Operand, CanThrow, KeyLoc, RParen); } ExprResult Sema::ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation, Expr *Operand, SourceLocation RParen) { return BuildCXXNoexceptExpr(KeyLoc, Operand, RParen); } static bool IsSpecialDiscardedValue(Expr *E) { // In C++11, discarded-value expressions of a certain form are special, // according to [expr]p10: // The lvalue-to-rvalue conversion (4.1) is applied only if the // expression is an lvalue of volatile-qualified type and it has // one of the following forms: E = E->IgnoreParens(); // - id-expression (5.1.1), if (isa(E)) return true; // - subscripting (5.2.1), if (isa(E)) return true; // - class member access (5.2.5), if (isa(E)) return true; // - indirection (5.3.1), if (UnaryOperator *UO = dyn_cast(E)) if (UO->getOpcode() == UO_Deref) return true; if (BinaryOperator *BO = dyn_cast(E)) { // - pointer-to-member operation (5.5), if (BO->isPtrMemOp()) return true; // - comma expression (5.18) where the right operand is one of the above. if (BO->getOpcode() == BO_Comma) return IsSpecialDiscardedValue(BO->getRHS()); } // - conditional expression (5.16) where both the second and the third // operands are one of the above, or if (ConditionalOperator *CO = dyn_cast(E)) return IsSpecialDiscardedValue(CO->getTrueExpr()) && IsSpecialDiscardedValue(CO->getFalseExpr()); // The related edge case of "*x ?: *x". if (BinaryConditionalOperator *BCO = dyn_cast(E)) { if (OpaqueValueExpr *OVE = dyn_cast(BCO->getTrueExpr())) return IsSpecialDiscardedValue(OVE->getSourceExpr()) && IsSpecialDiscardedValue(BCO->getFalseExpr()); } // Objective-C++ extensions to the rule. if (isa(E) || isa(E)) return true; return false; } /// Perform the conversions required for an expression used in a /// context that ignores the result. ExprResult Sema::IgnoredValueConversions(Expr *E) { if (E->hasPlaceholderType()) { ExprResult result = CheckPlaceholderExpr(E); if (result.isInvalid()) return E; E = result.get(); } // C99 6.3.2.1: // [Except in specific positions,] an lvalue that does not have // array type is converted to the value stored in the // designated object (and is no longer an lvalue). if (E->isRValue()) { // In C, function designators (i.e. expressions of function type) // are r-values, but we still want to do function-to-pointer decay // on them. This is both technically correct and convenient for // some clients. if (!getLangOpts().CPlusPlus && E->getType()->isFunctionType()) return DefaultFunctionArrayConversion(E); return E; } if (getLangOpts().CPlusPlus) { // The C++11 standard defines the notion of a discarded-value expression; // normally, we don't need to do anything to handle it, but if it is a // volatile lvalue with a special form, we perform an lvalue-to-rvalue // conversion. if (getLangOpts().CPlusPlus11 && E->isGLValue() && E->getType().isVolatileQualified() && IsSpecialDiscardedValue(E)) { ExprResult Res = DefaultLvalueConversion(E); if (Res.isInvalid()) return E; E = Res.get(); } return E; } // GCC seems to also exclude expressions of incomplete enum type. if (const EnumType *T = E->getType()->getAs()) { if (!T->getDecl()->isComplete()) { // FIXME: stupid workaround for a codegen bug! E = ImpCastExprToType(E, Context.VoidTy, CK_ToVoid).get(); return E; } } ExprResult Res = DefaultFunctionArrayLvalueConversion(E); if (Res.isInvalid()) return E; E = Res.get(); if (!E->getType()->isVoidType()) RequireCompleteType(E->getExprLoc(), E->getType(), diag::err_incomplete_type); return E; } // If we can unambiguously determine whether Var can never be used // in a constant expression, return true. // - if the variable and its initializer are non-dependent, then // we can unambiguously check if the variable is a constant expression. // - if the initializer is not value dependent - we can determine whether // it can be used to initialize a constant expression. If Init can not // be used to initialize a constant expression we conclude that Var can // never be a constant expression. // - FXIME: if the initializer is dependent, we can still do some analysis and // identify certain cases unambiguously as non-const by using a Visitor: // - such as those that involve odr-use of a ParmVarDecl, involve a new // delete, lambda-expr, dynamic-cast, reinterpret-cast etc... static inline bool VariableCanNeverBeAConstantExpression(VarDecl *Var, ASTContext &Context) { if (isa(Var)) return true; const VarDecl *DefVD = nullptr; // If there is no initializer - this can not be a constant expression. if (!Var->getAnyInitializer(DefVD)) return true; assert(DefVD); if (DefVD->isWeak()) return false; EvaluatedStmt *Eval = DefVD->ensureEvaluatedStmt(); Expr *Init = cast(Eval->Value); if (Var->getType()->isDependentType() || Init->isValueDependent()) { // FIXME: Teach the constant evaluator to deal with the non-dependent parts // of value-dependent expressions, and use it here to determine whether the // initializer is a potential constant expression. return false; } return !IsVariableAConstantExpression(Var, Context); } /// \brief Check if the current lambda has any potential captures /// that must be captured by any of its enclosing lambdas that are ready to /// capture. If there is a lambda that can capture a nested /// potential-capture, go ahead and do so. Also, check to see if any /// variables are uncaptureable or do not involve an odr-use so do not /// need to be captured. static void CheckIfAnyEnclosingLambdasMustCaptureAnyPotentialCaptures( Expr *const FE, LambdaScopeInfo *const CurrentLSI, Sema &S) { assert(!S.isUnevaluatedContext()); assert(S.CurContext->isDependentContext()); assert(CurrentLSI->CallOperator == S.CurContext && "The current call operator must be synchronized with Sema's CurContext"); const bool IsFullExprInstantiationDependent = FE->isInstantiationDependent(); ArrayRef FunctionScopesArrayRef( S.FunctionScopes.data(), S.FunctionScopes.size()); // All the potentially captureable variables in the current nested // lambda (within a generic outer lambda), must be captured by an // outer lambda that is enclosed within a non-dependent context. const unsigned NumPotentialCaptures = CurrentLSI->getNumPotentialVariableCaptures(); for (unsigned I = 0; I != NumPotentialCaptures; ++I) { Expr *VarExpr = nullptr; VarDecl *Var = nullptr; CurrentLSI->getPotentialVariableCapture(I, Var, VarExpr); // If the variable is clearly identified as non-odr-used and the full // expression is not instantiation dependent, only then do we not // need to check enclosing lambda's for speculative captures. // For e.g.: // Even though 'x' is not odr-used, it should be captured. // int test() { // const int x = 10; // auto L = [=](auto a) { // (void) +x + a; // }; // } if (CurrentLSI->isVariableExprMarkedAsNonODRUsed(VarExpr) && !IsFullExprInstantiationDependent) continue; // If we have a capture-capable lambda for the variable, go ahead and // capture the variable in that lambda (and all its enclosing lambdas). if (const Optional Index = getStackIndexOfNearestEnclosingCaptureCapableLambda( FunctionScopesArrayRef, Var, S)) { const unsigned FunctionScopeIndexOfCapturableLambda = Index.getValue(); MarkVarDeclODRUsed(Var, VarExpr->getExprLoc(), S, &FunctionScopeIndexOfCapturableLambda); } const bool IsVarNeverAConstantExpression = VariableCanNeverBeAConstantExpression(Var, S.Context); if (!IsFullExprInstantiationDependent || IsVarNeverAConstantExpression) { // This full expression is not instantiation dependent or the variable // can not be used in a constant expression - which means // this variable must be odr-used here, so diagnose a // capture violation early, if the variable is un-captureable. // This is purely for diagnosing errors early. Otherwise, this // error would get diagnosed when the lambda becomes capture ready. QualType CaptureType, DeclRefType; SourceLocation ExprLoc = VarExpr->getExprLoc(); if (S.tryCaptureVariable(Var, ExprLoc, S.TryCapture_Implicit, /*EllipsisLoc*/ SourceLocation(), /*BuildAndDiagnose*/false, CaptureType, DeclRefType, nullptr)) { // We will never be able to capture this variable, and we need // to be able to in any and all instantiations, so diagnose it. S.tryCaptureVariable(Var, ExprLoc, S.TryCapture_Implicit, /*EllipsisLoc*/ SourceLocation(), /*BuildAndDiagnose*/true, CaptureType, DeclRefType, nullptr); } } } // Check if 'this' needs to be captured. if (CurrentLSI->hasPotentialThisCapture()) { // If we have a capture-capable lambda for 'this', go ahead and capture // 'this' in that lambda (and all its enclosing lambdas). if (const Optional Index = getStackIndexOfNearestEnclosingCaptureCapableLambda( FunctionScopesArrayRef, /*0 is 'this'*/ nullptr, S)) { const unsigned FunctionScopeIndexOfCapturableLambda = Index.getValue(); S.CheckCXXThisCapture(CurrentLSI->PotentialThisCaptureLocation, /*Explicit*/ false, /*BuildAndDiagnose*/ true, &FunctionScopeIndexOfCapturableLambda); } } // Reset all the potential captures at the end of each full-expression. CurrentLSI->clearPotentialCaptures(); } static ExprResult attemptRecovery(Sema &SemaRef, const TypoCorrectionConsumer &Consumer, TypoCorrection TC) { LookupResult R(SemaRef, Consumer.getLookupResult().getLookupNameInfo(), Consumer.getLookupResult().getLookupKind()); const CXXScopeSpec *SS = Consumer.getSS(); CXXScopeSpec NewSS; // Use an approprate CXXScopeSpec for building the expr. if (auto *NNS = TC.getCorrectionSpecifier()) NewSS.MakeTrivial(SemaRef.Context, NNS, TC.getCorrectionRange()); else if (SS && !TC.WillReplaceSpecifier()) NewSS = *SS; if (auto *ND = TC.getCorrectionDecl()) { R.setLookupName(ND->getDeclName()); R.addDecl(ND); if (ND->isCXXClassMember()) { // Figure out the correct naming class to add to the LookupResult. CXXRecordDecl *Record = nullptr; if (auto *NNS = TC.getCorrectionSpecifier()) Record = NNS->getAsType()->getAsCXXRecordDecl(); if (!Record) Record = dyn_cast(ND->getDeclContext()->getRedeclContext()); if (Record) R.setNamingClass(Record); // Detect and handle the case where the decl might be an implicit // member. bool MightBeImplicitMember; if (!Consumer.isAddressOfOperand()) MightBeImplicitMember = true; else if (!NewSS.isEmpty()) MightBeImplicitMember = false; else if (R.isOverloadedResult()) MightBeImplicitMember = false; else if (R.isUnresolvableResult()) MightBeImplicitMember = true; else MightBeImplicitMember = isa(ND) || isa(ND) || isa(ND); if (MightBeImplicitMember) return SemaRef.BuildPossibleImplicitMemberExpr( NewSS, /*TemplateKWLoc*/ SourceLocation(), R, /*TemplateArgs*/ nullptr); } else if (auto *Ivar = dyn_cast(ND)) { return SemaRef.LookupInObjCMethod(R, Consumer.getScope(), Ivar->getIdentifier()); } } return SemaRef.BuildDeclarationNameExpr(NewSS, R, /*NeedsADL*/ false, /*AcceptInvalidDecl*/ true); } namespace { class FindTypoExprs : public RecursiveASTVisitor { llvm::SmallSetVector &TypoExprs; public: explicit FindTypoExprs(llvm::SmallSetVector &TypoExprs) : TypoExprs(TypoExprs) {} bool VisitTypoExpr(TypoExpr *TE) { TypoExprs.insert(TE); return true; } }; class TransformTypos : public TreeTransform { typedef TreeTransform BaseTransform; VarDecl *InitDecl; // A decl to avoid as a correction because it is in the // process of being initialized. llvm::function_ref ExprFilter; llvm::SmallSetVector TypoExprs, AmbiguousTypoExprs; llvm::SmallDenseMap TransformCache; llvm::SmallDenseMap OverloadResolution; /// \brief Emit diagnostics for all of the TypoExprs encountered. /// If the TypoExprs were successfully corrected, then the diagnostics should /// suggest the corrections. Otherwise the diagnostics will not suggest /// anything (having been passed an empty TypoCorrection). void EmitAllDiagnostics() { for (auto E : TypoExprs) { TypoExpr *TE = cast(E); auto &State = SemaRef.getTypoExprState(TE); if (State.DiagHandler) { TypoCorrection TC = State.Consumer->getCurrentCorrection(); ExprResult Replacement = TransformCache[TE]; // Extract the NamedDecl from the transformed TypoExpr and add it to the // TypoCorrection, replacing the existing decls. This ensures the right // NamedDecl is used in diagnostics e.g. in the case where overload // resolution was used to select one from several possible decls that // had been stored in the TypoCorrection. if (auto *ND = getDeclFromExpr( Replacement.isInvalid() ? nullptr : Replacement.get())) TC.setCorrectionDecl(ND); State.DiagHandler(TC); } SemaRef.clearDelayedTypo(TE); } } /// \brief If corrections for the first TypoExpr have been exhausted for a /// given combination of the other TypoExprs, retry those corrections against /// the next combination of substitutions for the other TypoExprs by advancing /// to the next potential correction of the second TypoExpr. For the second /// and subsequent TypoExprs, if its stream of corrections has been exhausted, /// the stream is reset and the next TypoExpr's stream is advanced by one (a /// TypoExpr's correction stream is advanced by removing the TypoExpr from the /// TransformCache). Returns true if there is still any untried combinations /// of corrections. bool CheckAndAdvanceTypoExprCorrectionStreams() { for (auto TE : TypoExprs) { auto &State = SemaRef.getTypoExprState(TE); TransformCache.erase(TE); if (!State.Consumer->finished()) return true; State.Consumer->resetCorrectionStream(); } return false; } NamedDecl *getDeclFromExpr(Expr *E) { if (auto *OE = dyn_cast_or_null(E)) E = OverloadResolution[OE]; if (!E) return nullptr; if (auto *DRE = dyn_cast(E)) return DRE->getDecl(); if (auto *ME = dyn_cast(E)) return ME->getMemberDecl(); // FIXME: Add any other expr types that could be be seen by the delayed typo // correction TreeTransform for which the corresponding TypoCorrection could // contain multiple decls. return nullptr; } ExprResult TryTransform(Expr *E) { Sema::SFINAETrap Trap(SemaRef); ExprResult Res = TransformExpr(E); if (Trap.hasErrorOccurred() || Res.isInvalid()) return ExprError(); return ExprFilter(Res.get()); } public: TransformTypos(Sema &SemaRef, VarDecl *InitDecl, llvm::function_ref Filter) : BaseTransform(SemaRef), InitDecl(InitDecl), ExprFilter(Filter) {} ExprResult RebuildCallExpr(Expr *Callee, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig = nullptr) { auto Result = BaseTransform::RebuildCallExpr(Callee, LParenLoc, Args, RParenLoc, ExecConfig); if (auto *OE = dyn_cast(Callee)) { if (Result.isUsable()) { Expr *ResultCall = Result.get(); if (auto *BE = dyn_cast(ResultCall)) ResultCall = BE->getSubExpr(); if (auto *CE = dyn_cast(ResultCall)) OverloadResolution[OE] = CE->getCallee(); } } return Result; } ExprResult TransformLambdaExpr(LambdaExpr *E) { return Owned(E); } ExprResult Transform(Expr *E) { ExprResult Res; while (true) { Res = TryTransform(E); // Exit if either the transform was valid or if there were no TypoExprs // to transform that still have any untried correction candidates.. if (!Res.isInvalid() || !CheckAndAdvanceTypoExprCorrectionStreams()) break; } // Ensure none of the TypoExprs have multiple typo correction candidates // with the same edit length that pass all the checks and filters. // TODO: Properly handle various permutations of possible corrections when // there is more than one potentially ambiguous typo correction. // Also, disable typo correction while attempting the transform when // handling potentially ambiguous typo corrections as any new TypoExprs will // have been introduced by the application of one of the correction // candidates and add little to no value if corrected. SemaRef.DisableTypoCorrection = true; while (!AmbiguousTypoExprs.empty()) { auto TE = AmbiguousTypoExprs.back(); auto Cached = TransformCache[TE]; auto &State = SemaRef.getTypoExprState(TE); State.Consumer->saveCurrentPosition(); TransformCache.erase(TE); if (!TryTransform(E).isInvalid()) { State.Consumer->resetCorrectionStream(); TransformCache.erase(TE); Res = ExprError(); break; } AmbiguousTypoExprs.remove(TE); State.Consumer->restoreSavedPosition(); TransformCache[TE] = Cached; } SemaRef.DisableTypoCorrection = false; // Ensure that all of the TypoExprs within the current Expr have been found. if (!Res.isUsable()) FindTypoExprs(TypoExprs).TraverseStmt(E); EmitAllDiagnostics(); return Res; } ExprResult TransformTypoExpr(TypoExpr *E) { // If the TypoExpr hasn't been seen before, record it. Otherwise, return the // cached transformation result if there is one and the TypoExpr isn't the // first one that was encountered. auto &CacheEntry = TransformCache[E]; if (!TypoExprs.insert(E) && !CacheEntry.isUnset()) { return CacheEntry; } auto &State = SemaRef.getTypoExprState(E); assert(State.Consumer && "Cannot transform a cleared TypoExpr"); // For the first TypoExpr and an uncached TypoExpr, find the next likely // typo correction and return it. while (TypoCorrection TC = State.Consumer->getNextCorrection()) { if (InitDecl && TC.getCorrectionDecl() == InitDecl) continue; ExprResult NE = State.RecoveryHandler ? State.RecoveryHandler(SemaRef, E, TC) : attemptRecovery(SemaRef, *State.Consumer, TC); if (!NE.isInvalid()) { // Check whether there may be a second viable correction with the same // edit distance; if so, remember this TypoExpr may have an ambiguous // correction so it can be more thoroughly vetted later. TypoCorrection Next; if ((Next = State.Consumer->peekNextCorrection()) && Next.getEditDistance(false) == TC.getEditDistance(false)) { AmbiguousTypoExprs.insert(E); } else { AmbiguousTypoExprs.remove(E); } assert(!NE.isUnset() && "Typo was transformed into a valid-but-null ExprResult"); return CacheEntry = NE; } } return CacheEntry = ExprError(); } }; } ExprResult Sema::CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl, llvm::function_ref Filter) { // If the current evaluation context indicates there are uncorrected typos // and the current expression isn't guaranteed to not have typos, try to // resolve any TypoExpr nodes that might be in the expression. if (E && !ExprEvalContexts.empty() && ExprEvalContexts.back().NumTypos && (E->isTypeDependent() || E->isValueDependent() || E->isInstantiationDependent())) { auto TyposInContext = ExprEvalContexts.back().NumTypos; assert(TyposInContext < ~0U && "Recursive call of CorrectDelayedTyposInExpr"); ExprEvalContexts.back().NumTypos = ~0U; auto TyposResolved = DelayedTypos.size(); auto Result = TransformTypos(*this, InitDecl, Filter).Transform(E); ExprEvalContexts.back().NumTypos = TyposInContext; TyposResolved -= DelayedTypos.size(); if (Result.isInvalid() || Result.get() != E) { ExprEvalContexts.back().NumTypos -= TyposResolved; return Result; } assert(TyposResolved == 0 && "Corrected typo but got same Expr back?"); } return E; } ExprResult Sema::ActOnFinishFullExpr(Expr *FE, SourceLocation CC, bool DiscardedValue, bool IsConstexpr, bool IsLambdaInitCaptureInitializer) { ExprResult FullExpr = FE; if (!FullExpr.get()) return ExprError(); // If we are an init-expression in a lambdas init-capture, we should not // diagnose an unexpanded pack now (will be diagnosed once lambda-expr // containing full-expression is done). // template void test(Ts ... t) { // test([&a(t)]() { <-- (t) is an init-expr that shouldn't be diagnosed now. // return a; // }() ...); // } // FIXME: This is a hack. It would be better if we pushed the lambda scope // when we parse the lambda introducer, and teach capturing (but not // unexpanded pack detection) to walk over LambdaScopeInfos which don't have a // corresponding class yet (that is, have LambdaScopeInfo either represent a // lambda where we've entered the introducer but not the body, or represent a // lambda where we've entered the body, depending on where the // parser/instantiation has got to). if (!IsLambdaInitCaptureInitializer && DiagnoseUnexpandedParameterPack(FullExpr.get())) return ExprError(); // Top-level expressions default to 'id' when we're in a debugger. if (DiscardedValue && getLangOpts().DebuggerCastResultToId && FullExpr.get()->getType() == Context.UnknownAnyTy) { FullExpr = forceUnknownAnyToType(FullExpr.get(), Context.getObjCIdType()); if (FullExpr.isInvalid()) return ExprError(); } if (DiscardedValue) { FullExpr = CheckPlaceholderExpr(FullExpr.get()); if (FullExpr.isInvalid()) return ExprError(); FullExpr = IgnoredValueConversions(FullExpr.get()); if (FullExpr.isInvalid()) return ExprError(); } FullExpr = CorrectDelayedTyposInExpr(FullExpr.get()); if (FullExpr.isInvalid()) return ExprError(); CheckCompletedExpr(FullExpr.get(), CC, IsConstexpr); // At the end of this full expression (which could be a deeply nested // lambda), if there is a potential capture within the nested lambda, // have the outer capture-able lambda try and capture it. // Consider the following code: // void f(int, int); // void f(const int&, double); // void foo() { // const int x = 10, y = 20; // auto L = [=](auto a) { // auto M = [=](auto b) { // f(x, b); <-- requires x to be captured by L and M // f(y, a); <-- requires y to be captured by L, but not all Ms // }; // }; // } // FIXME: Also consider what happens for something like this that involves // the gnu-extension statement-expressions or even lambda-init-captures: // void f() { // const int n = 0; // auto L = [&](auto a) { // +n + ({ 0; a; }); // }; // } // // Here, we see +n, and then the full-expression 0; ends, so we don't // capture n (and instead remove it from our list of potential captures), // and then the full-expression +n + ({ 0; }); ends, but it's too late // for us to see that we need to capture n after all. LambdaScopeInfo *const CurrentLSI = getCurLambda(); // FIXME: PR 17877 showed that getCurLambda() can return a valid pointer // even if CurContext is not a lambda call operator. Refer to that Bug Report // for an example of the code that might cause this asynchrony. // By ensuring we are in the context of a lambda's call operator // we can fix the bug (we only need to check whether we need to capture // if we are within a lambda's body); but per the comments in that // PR, a proper fix would entail : // "Alternative suggestion: // - Add to Sema an integer holding the smallest (outermost) scope // index that we are *lexically* within, and save/restore/set to // FunctionScopes.size() in InstantiatingTemplate's // constructor/destructor. // - Teach the handful of places that iterate over FunctionScopes to // stop at the outermost enclosing lexical scope." const bool IsInLambdaDeclContext = isLambdaCallOperator(CurContext); if (IsInLambdaDeclContext && CurrentLSI && CurrentLSI->hasPotentialCaptures() && !FullExpr.isInvalid()) CheckIfAnyEnclosingLambdasMustCaptureAnyPotentialCaptures(FE, CurrentLSI, *this); return MaybeCreateExprWithCleanups(FullExpr); } StmtResult Sema::ActOnFinishFullStmt(Stmt *FullStmt) { if (!FullStmt) return StmtError(); return MaybeCreateStmtWithCleanups(FullStmt); } Sema::IfExistsResult Sema::CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo) { DeclarationName TargetName = TargetNameInfo.getName(); if (!TargetName) return IER_DoesNotExist; // If the name itself is dependent, then the result is dependent. if (TargetName.isDependentName()) return IER_Dependent; // Do the redeclaration lookup in the current scope. LookupResult R(*this, TargetNameInfo, Sema::LookupAnyName, Sema::NotForRedeclaration); LookupParsedName(R, S, &SS); R.suppressDiagnostics(); switch (R.getResultKind()) { case LookupResult::Found: case LookupResult::FoundOverloaded: case LookupResult::FoundUnresolvedValue: case LookupResult::Ambiguous: return IER_Exists; case LookupResult::NotFound: return IER_DoesNotExist; case LookupResult::NotFoundInCurrentInstantiation: return IER_Dependent; } llvm_unreachable("Invalid LookupResult Kind!"); } Sema::IfExistsResult Sema::CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name) { DeclarationNameInfo TargetNameInfo = GetNameFromUnqualifiedId(Name); // Check for unexpanded parameter packs. SmallVector Unexpanded; collectUnexpandedParameterPacks(SS, Unexpanded); collectUnexpandedParameterPacks(TargetNameInfo, Unexpanded); if (!Unexpanded.empty()) { DiagnoseUnexpandedParameterPacks(KeywordLoc, IsIfExists? UPPC_IfExists : UPPC_IfNotExists, Unexpanded); return IER_Error; } return CheckMicrosoftIfExistsSymbol(S, SS, TargetNameInfo); } Index: vendor/clang/dist/test/CodeGen/libcalls-fno-builtin.c =================================================================== --- vendor/clang/dist/test/CodeGen/libcalls-fno-builtin.c (revision 292727) +++ vendor/clang/dist/test/CodeGen/libcalls-fno-builtin.c (revision 292728) @@ -1,125 +1,126 @@ // RUN: %clang_cc1 -S -O3 -fno-builtin -o - %s | FileCheck %s +// RUN: %clang_cc1 -triple x86_64-w64-mingw32 -S -O3 -fno-builtin -o - %s | FileCheck %s // rdar://10551066 typedef __SIZE_TYPE__ size_t; double ceil(double x); double copysign(double,double); double cos(double x); double fabs(double x); double floor(double x); char *strcat(char *s1, const char *s2); char *strncat(char *s1, const char *s2, size_t n); char *strchr(const char *s, int c); char *strrchr(const char *s, int c); int strcmp(const char *s1, const char *s2); int strncmp(const char *s1, const char *s2, size_t n); char *strcpy(char *s1, const char *s2); char *stpcpy(char *s1, const char *s2); char *strncpy(char *s1, const char *s2, size_t n); size_t strlen(const char *s); char *strpbrk(const char *s1, const char *s2); size_t strspn(const char *s1, const char *s2); double strtod(const char *nptr, char **endptr); float strtof(const char *nptr, char **endptr); long double strtold(const char *nptr, char **endptr); long int strtol(const char *nptr, char **endptr, int base); long long int strtoll(const char *nptr, char **endptr, int base); unsigned long int strtoul(const char *nptr, char **endptr, int base); unsigned long long int strtoull(const char *nptr, char **endptr, int base); double t1(double x) { return ceil(x); } // CHECK: t1 // CHECK: ceil double t2(double x, double y) { return copysign(x,y); } // CHECK: t2 // CHECK: copysign double t3(double x) { return cos(x); } // CHECK: t3 // CHECK: cos double t4(double x) { return fabs(x); } // CHECK: t4 // CHECK: fabs double t5(double x) { return floor(x); } // CHECK: t5 // CHECK: floor char *t6(char *x) { return strcat(x, ""); } // CHECK: t6 // CHECK: strcat char *t7(char *x) { return strncat(x, "", 1); } // CHECK: t7 // CHECK: strncat char *t8(void) { return strchr("hello, world", 'w'); } // CHECK: t8 // CHECK: strchr char *t9(void) { return strrchr("hello, world", 'w'); } // CHECK: t9 // CHECK: strrchr int t10(void) { return strcmp("foo", "bar"); } // CHECK: t10 // CHECK: strcmp int t11(void) { return strncmp("foo", "bar", 3); } // CHECK: t11 // CHECK: strncmp char *t12(char *x) { return strcpy(x, "foo"); } // CHECK: t12 // CHECK: strcpy char *t13(char *x) { return stpcpy(x, "foo"); } // CHECK: t13 // CHECK: stpcpy char *t14(char *x) { return strncpy(x, "foo", 3); } // CHECK: t14 // CHECK: strncpy size_t t15(void) { return strlen("foo"); } // CHECK: t15 // CHECK: strlen char *t16(char *x) { return strpbrk(x, ""); } // CHECK: t16 // CHECK: strpbrk size_t t17(char *x) { return strspn(x, ""); } // CHECK: t17 // CHECK: strspn double t18(char **x) { return strtod("123.4", x); } // CHECK: t18 // CHECK: strtod float t19(char **x) { return strtof("123.4", x); } // CHECK: t19 // CHECK: strtof long double t20(char **x) { return strtold("123.4", x); } // CHECK: t20 // CHECK: strtold long int t21(char **x) { return strtol("1234", x, 10); } // CHECK: t21 // CHECK: strtol long int t22(char **x) { return strtoll("1234", x, 10); } // CHECK: t22 // CHECK: strtoll long int t23(char **x) { return strtoul("1234", x, 10); } // CHECK: t23 // CHECK: strtoul long int t24(char **x) { return strtoull("1234", x, 10); } // CHECK: t24 // CHECK: strtoull Index: vendor/clang/dist/test/Driver/fopenmp.c =================================================================== --- vendor/clang/dist/test/Driver/fopenmp.c (revision 292727) +++ vendor/clang/dist/test/Driver/fopenmp.c (revision 292728) @@ -1,33 +1,78 @@ // RUN: %clang -target x86_64-linux-gnu -fopenmp=libomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP // RUN: %clang -target x86_64-linux-gnu -fopenmp=libgomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-NO-OPENMP // RUN: %clang -target x86_64-linux-gnu -fopenmp=libiomp5 -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP +// RUN: %clang -target x86_64-apple-darwin -fopenmp=libomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP +// RUN: %clang -target x86_64-apple-darwin -fopenmp=libgomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-NO-OPENMP +// RUN: %clang -target x86_64-apple-darwin -fopenmp=libiomp5 -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP +// RUN: %clang -target x86_64-freebsd -fopenmp=libomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP +// RUN: %clang -target x86_64-freebsd -fopenmp=libgomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-NO-OPENMP +// RUN: %clang -target x86_64-freebsd -fopenmp=libiomp5 -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP +// RUN: %clang -target x86_64-netbsd -fopenmp=libomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP +// RUN: %clang -target x86_64-netbsd -fopenmp=libgomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-NO-OPENMP +// RUN: %clang -target x86_64-netbsd -fopenmp=libiomp5 -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP // // CHECK-CC1-OPENMP: "-cc1" // CHECK-CC1-OPENMP: "-fopenmp" // // CHECK-CC1-NO-OPENMP: "-cc1" // CHECK-CC1-NO-OPENMP-NOT: "-fopenmp" // // RUN: %clang -target x86_64-linux-gnu -fopenmp=libomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-OMP // RUN: %clang -target x86_64-linux-gnu -fopenmp=libgomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-GOMP // RUN: %clang -target x86_64-linux-gnu -fopenmp=libiomp5 %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-IOMP5 // +// RUN: %clang -nostdlib -target x86_64-linux-gnu -fopenmp=libomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-OMP +// RUN: %clang -nostdlib -target x86_64-linux-gnu -fopenmp=libgomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-GOMP +// RUN: %clang -nostdlib -target x86_64-linux-gnu -fopenmp=libiomp5 %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-IOMP5 +// +// RUN: %clang -target x86_64-darwin -fopenmp=libomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-OMP +// RUN: %clang -target x86_64-darwin -fopenmp=libgomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-GOMP +// RUN: %clang -target x86_64-darwin -fopenmp=libiomp5 %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-IOMP5 +// +// RUN: %clang -nostdlib -target x86_64-darwin -fopenmp=libomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-OMP +// RUN: %clang -nostdlib -target x86_64-darwin -fopenmp=libgomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-GOMP +// RUN: %clang -nostdlib -target x86_64-darwin -fopenmp=libiomp5 %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-IOMP5 +// +// RUN: %clang -target x86_64-netbsd -fopenmp=libomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-OMP +// RUN: %clang -target x86_64-netbsd -fopenmp=libgomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-GOMP +// RUN: %clang -target x86_64-netbsd -fopenmp=libiomp5 %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-IOMP5 +// +// RUN: %clang -nostdlib -target x86_64-freebsd -fopenmp=libomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-OMP +// RUN: %clang -nostdlib -target x86_64-freebsd -fopenmp=libgomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-GOMP +// RUN: %clang -nostdlib -target x86_64-freebsd -fopenmp=libiomp5 %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-IOMP5 +// +// RUN: %clang -nostdlib -target x86_64-netbsd -fopenmp=libomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-OMP +// RUN: %clang -nostdlib -target x86_64-netbsd -fopenmp=libgomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-GOMP +// RUN: %clang -nostdlib -target x86_64-netbsd -fopenmp=libiomp5 %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-IOMP5 +// // CHECK-LD-OMP: "{{.*}}ld{{(.exe)?}}" // CHECK-LD-OMP: "-lomp" // // CHECK-LD-GOMP: "{{.*}}ld{{(.exe)?}}" // CHECK-LD-GOMP: "-lgomp" // // CHECK-LD-IOMP5: "{{.*}}ld{{(.exe)?}}" // CHECK-LD-IOMP5: "-liomp5" // +// CHECK-NO-OMP: "{{.*}}ld{{(.exe)?}}" +// CHECK-NO-OMP-NOT: "-lomp" +// +// CHECK-NO-GOMP: "{{.*}}ld{{(.exe)?}}" +// CHECK-NO-GOMP-NOT: "-lgomp" +// +// CHECK-NO-IOMP5: "{{.*}}ld{{(.exe)?}}" +// CHECK-NO-IOMP5-NOT: "-liomp5" +// // We'd like to check that the default is sane, but until we have the ability // to *always* semantically analyze OpenMP without always generating runtime // calls (in the event of an unsupported runtime), we don't have a good way to // test the CC1 invocation. Instead, just ensure we do eventually link *some* // OpenMP runtime. // // RUN: %clang -target x86_64-linux-gnu -fopenmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-ANY +// RUN: %clang -target x86_64-darwin -fopenmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-ANY +// RUN: %clang -target x86_64-freebsd -fopenmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-ANY +// RUN: %clang -target x86_64-netbsd -fopenmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-ANY // // CHECK-LD-ANY: "{{.*}}ld{{(.exe)?}}" // CHECK-LD-ANY: "-l{{(omp|gomp|iomp5)}}" Index: vendor/clang/dist/test/Sema/attr-self-alias.c =================================================================== --- vendor/clang/dist/test/Sema/attr-self-alias.c (nonexistent) +++ vendor/clang/dist/test/Sema/attr-self-alias.c (revision 292728) @@ -0,0 +1,4 @@ +// RUN: %clang_cc1 -triple x86_64-pc-linux -fsyntax-only -verify -emit-llvm-only %s + +int self_alias(void) __attribute__((weak, alias("self_alias"))); // expected-error {{alias definition is part of a cycle}} + Property changes on: vendor/clang/dist/test/Sema/attr-self-alias.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/clang/dist/test/SemaCXX/delete.cpp =================================================================== --- vendor/clang/dist/test/SemaCXX/delete.cpp (revision 292727) +++ vendor/clang/dist/test/SemaCXX/delete.cpp (revision 292728) @@ -1,130 +1,146 @@ // Test without PCH // RUN: %clang_cc1 -fsyntax-only -include %S/delete-mismatch.h -fdiagnostics-parseable-fixits -std=c++11 %s 2>&1 | FileCheck %s // Test with PCH // RUN: %clang_cc1 -x c++-header -std=c++11 -emit-pch -o %t %S/delete-mismatch.h // RUN: %clang_cc1 -std=c++11 -include-pch %t -DWITH_PCH -fsyntax-only -verify %s -ast-dump void f(int a[10][20]) { delete a; // expected-warning {{'delete' applied to a pointer-to-array type}} // CHECK: fix-it:"{{.*}}":{[[@LINE-1]]:9-[[@LINE-1]]:9}:"[]" } namespace MemberCheck { struct S { int *a = new int[5]; // expected-note4 {{allocated with 'new[]' here}} int *b; int *c; static int *d; S(); S(int); ~S() { delete a; // expected-warning {{'delete' applied to a pointer that was allocated with 'new[]'; did you mean 'delete[]'?}} delete b; // expected-warning {{'delete' applied to a pointer that was allocated with 'new[]'; did you mean 'delete[]'?}} delete[] c; // expected-warning {{'delete[]' applied to a pointer that was allocated with 'new'; did you mean 'delete'?}} } void f(); }; void S::f() { delete a; // expected-warning {{'delete' applied to a pointer that was allocated with 'new[]'; did you mean 'delete[]'?}} delete b; // expected-warning {{'delete' applied to a pointer that was allocated with 'new[]'; did you mean 'delete[]'?}} } S::S() : b(new int[1]), c(new int) {} // expected-note3 {{allocated with 'new[]' here}} // expected-note@-1 {{allocated with 'new' here}} S::S(int i) : b(new int[i]), c(new int) {} // expected-note3 {{allocated with 'new[]' here}} // expected-note@-1 {{allocated with 'new' here}} struct S2 : S { ~S2() { delete a; // expected-warning {{'delete' applied to a pointer that was allocated with 'new[]'; did you mean 'delete[]'?}} } }; int *S::d = new int[42]; // expected-note {{allocated with 'new[]' here}} void f(S *s) { int *a = new int[1]; // expected-note {{allocated with 'new[]' here}} delete a; // expected-warning {{'delete' applied to a pointer that was allocated with 'new[]'; did you mean 'delete[]'?}} delete s->a; // expected-warning {{'delete' applied to a pointer that was allocated with 'new[]'; did you mean 'delete[]'?}} delete s->b; // expected-warning {{'delete' applied to a pointer that was allocated with 'new[]'; did you mean 'delete[]'?}} delete s->c; delete s->d; delete S::d; // expected-warning {{'delete' applied to a pointer that was allocated with 'new[]'; did you mean 'delete[]'?}} } // At least one constructor initializes field with matching form of 'new'. struct MatchingNewIsOK { int *p; bool is_array_; MatchingNewIsOK() : p{new int}, is_array_(false) {} explicit MatchingNewIsOK(unsigned c) : p{new int[c]}, is_array_(true) {} ~MatchingNewIsOK() { if (is_array_) delete[] p; else delete p; } }; // At least one constructor's body is missing; no proof of mismatch. struct CantProve_MissingCtorDefinition { int *p; CantProve_MissingCtorDefinition(); CantProve_MissingCtorDefinition(int); ~CantProve_MissingCtorDefinition(); }; CantProve_MissingCtorDefinition::CantProve_MissingCtorDefinition() : p(new int) { } CantProve_MissingCtorDefinition::~CantProve_MissingCtorDefinition() { delete[] p; } struct base {}; struct derived : base {}; struct InitList { base *p, *p2 = nullptr, *p3{nullptr}, *p4; InitList(unsigned c) : p(new derived[c]), p4(nullptr) {} // expected-note {{allocated with 'new[]' here}} InitList(unsigned c, unsigned) : p{new derived[c]}, p4{nullptr} {} // expected-note {{allocated with 'new[]' here}} ~InitList() { delete p; // expected-warning {{'delete' applied to a pointer that was allocated with 'new[]'; did you mean 'delete[]'?}} delete [] p; delete p2; delete [] p3; delete p4; } }; } namespace NonMemberCheck { #define DELETE_ARRAY(x) delete[] (x) #define DELETE(x) delete (x) void f() { int *a = new int(5); // expected-note2 {{allocated with 'new' here}} delete[] a; // expected-warning {{'delete[]' applied to a pointer that was allocated with 'new'; did you mean 'delete'?}} int *b = new int; delete b; int *c{new int}; // expected-note {{allocated with 'new' here}} int *d{new int[1]}; // expected-note2 {{allocated with 'new[]' here}} delete [ ] c; // expected-warning {{'delete[]' applied to a pointer that was allocated with 'new'; did you mean 'delete'?}} // CHECK: fix-it:"{{.*}}":{[[@LINE-1]]:9-[[@LINE-1]]:17}:"" delete d; // expected-warning {{'delete' applied to a pointer that was allocated with 'new[]'; did you mean 'delete[]'?}} // CHECK: fix-it:"{{.*}}":{[[@LINE-1]]:9-[[@LINE-1]]:9}:"[]" DELETE_ARRAY(a); // expected-warning {{'delete[]' applied to a pointer that was allocated with 'new'; did you mean 'delete'?}} DELETE(d); // expected-warning {{'delete' applied to a pointer that was allocated with 'new[]'; did you mean 'delete[]'?}} } } + +namespace MissingInitializer { +template +struct Base { + struct S { + const T *p1 = nullptr; + const T *p2 = new T[3]; + }; +}; + +void null_init(Base::S s) { + delete s.p1; + delete s.p2; +} +} + #ifndef WITH_PCH pch_test::X::X() : a(new int[1]) // expected-note{{allocated with 'new[]' here}} { } pch_test::X::X(int i) : a(new int[i]) // expected-note{{allocated with 'new[]' here}} { } #endif