diff --git a/contrib/llvm-project/clang/include/clang/AST/ASTContext.h b/contrib/llvm-project/clang/include/clang/AST/ASTContext.h index 9020e6629d08..3700d0101daf 100644 --- a/contrib/llvm-project/clang/include/clang/AST/ASTContext.h +++ b/contrib/llvm-project/clang/include/clang/AST/ASTContext.h @@ -1,3158 +1,3166 @@ //===- ASTContext.h - Context to hold long-lived AST nodes ------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // /// \file /// Defines the clang::ASTContext interface. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_ASTCONTEXT_H #define LLVM_CLANG_AST_ASTCONTEXT_H #include "clang/AST/ASTContextAllocate.h" #include "clang/AST/ASTFwd.h" #include "clang/AST/CanonicalType.h" #include "clang/AST/CommentCommandTraits.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclBase.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/RawCommentList.h" #include "clang/AST/TemplateName.h" #include "clang/AST/Type.h" #include "clang/Basic/AddressSpaces.h" #include "clang/Basic/AttrKinds.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/LangOptions.h" #include "clang/Basic/Linkage.h" #include "clang/Basic/OperatorKinds.h" #include "clang/Basic/PartialDiagnostic.h" #include "clang/Basic/SanitizerBlacklist.h" #include "clang/Basic/SourceLocation.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/XRayLists.h" #include "llvm/ADT/APSInt.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/FoldingSet.h" #include "llvm/ADT/IntrusiveRefCntPtr.h" #include "llvm/ADT/MapVector.h" #include "llvm/ADT/None.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/PointerUnion.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/TinyPtrVector.h" #include "llvm/ADT/Triple.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Support/AlignOf.h" #include "llvm/Support/Allocator.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/TypeSize.h" #include #include #include #include #include #include #include #include #include namespace llvm { struct fltSemantics; template class SmallPtrSet; } // namespace llvm namespace clang { class APFixedPoint; class APValue; class ASTMutationListener; class ASTRecordLayout; class AtomicExpr; class BlockExpr; class BuiltinTemplateDecl; class CharUnits; class ConceptDecl; class CXXABI; class CXXConstructorDecl; class CXXMethodDecl; class CXXRecordDecl; class DiagnosticsEngine; class ParentMapContext; class DynTypedNode; class DynTypedNodeList; class Expr; class FixedPointSemantics; class GlobalDecl; class MangleContext; class MangleNumberingContext; class MaterializeTemporaryExpr; class MemberSpecializationInfo; class Module; struct MSGuidDeclParts; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCPropertyImplDecl; class ObjCProtocolDecl; class ObjCTypeParamDecl; class OMPTraitInfo; struct ParsedTargetAttr; class Preprocessor; class Stmt; class StoredDeclsMap; class TargetAttr; class TargetInfo; class TemplateDecl; class TemplateParameterList; class TemplateTemplateParmDecl; class TemplateTypeParmDecl; class UnresolvedSetIterator; class UsingShadowDecl; class VarTemplateDecl; class VTableContextBase; struct BlockVarCopyInit; namespace Builtin { class Context; } // namespace Builtin enum BuiltinTemplateKind : int; enum OpenCLTypeKind : uint8_t; namespace comments { class FullComment; } // namespace comments namespace interp { class Context; } // namespace interp namespace serialization { template class AbstractTypeReader; } // namespace serialization struct TypeInfo { uint64_t Width = 0; unsigned Align = 0; bool AlignIsRequired : 1; TypeInfo() : AlignIsRequired(false) {} TypeInfo(uint64_t Width, unsigned Align, bool AlignIsRequired) : Width(Width), Align(Align), AlignIsRequired(AlignIsRequired) {} }; /// Holds long-lived AST nodes (such as types and decls) that can be /// referred to throughout the semantic analysis of a file. class ASTContext : public RefCountedBase { friend class NestedNameSpecifier; mutable SmallVector Types; mutable llvm::FoldingSet ExtQualNodes; mutable llvm::FoldingSet ComplexTypes; mutable llvm::FoldingSet PointerTypes; mutable llvm::FoldingSet AdjustedTypes; mutable llvm::FoldingSet BlockPointerTypes; mutable llvm::FoldingSet LValueReferenceTypes; mutable llvm::FoldingSet RValueReferenceTypes; mutable llvm::FoldingSet MemberPointerTypes; mutable llvm::ContextualFoldingSet ConstantArrayTypes; mutable llvm::FoldingSet IncompleteArrayTypes; mutable std::vector VariableArrayTypes; mutable llvm::FoldingSet DependentSizedArrayTypes; mutable llvm::FoldingSet DependentSizedExtVectorTypes; mutable llvm::FoldingSet DependentAddressSpaceTypes; mutable llvm::FoldingSet VectorTypes; mutable llvm::FoldingSet DependentVectorTypes; mutable llvm::FoldingSet MatrixTypes; mutable llvm::FoldingSet DependentSizedMatrixTypes; mutable llvm::FoldingSet FunctionNoProtoTypes; mutable llvm::ContextualFoldingSet FunctionProtoTypes; mutable llvm::FoldingSet DependentTypeOfExprTypes; mutable llvm::FoldingSet DependentDecltypeTypes; mutable llvm::FoldingSet TemplateTypeParmTypes; mutable llvm::FoldingSet ObjCTypeParamTypes; mutable llvm::FoldingSet SubstTemplateTypeParmTypes; mutable llvm::FoldingSet SubstTemplateTypeParmPackTypes; mutable llvm::ContextualFoldingSet TemplateSpecializationTypes; mutable llvm::FoldingSet ParenTypes; mutable llvm::FoldingSet ElaboratedTypes; mutable llvm::FoldingSet DependentNameTypes; mutable llvm::ContextualFoldingSet DependentTemplateSpecializationTypes; llvm::FoldingSet PackExpansionTypes; mutable llvm::FoldingSet ObjCObjectTypes; mutable llvm::FoldingSet ObjCObjectPointerTypes; mutable llvm::FoldingSet DependentUnaryTransformTypes; mutable llvm::ContextualFoldingSet AutoTypes; mutable llvm::FoldingSet DeducedTemplateSpecializationTypes; mutable llvm::FoldingSet AtomicTypes; llvm::FoldingSet AttributedTypes; mutable llvm::FoldingSet PipeTypes; mutable llvm::FoldingSet ExtIntTypes; mutable llvm::FoldingSet DependentExtIntTypes; mutable llvm::FoldingSet QualifiedTemplateNames; mutable llvm::FoldingSet DependentTemplateNames; mutable llvm::FoldingSet SubstTemplateTemplateParms; mutable llvm::ContextualFoldingSet SubstTemplateTemplateParmPacks; /// The set of nested name specifiers. /// /// This set is managed by the NestedNameSpecifier class. mutable llvm::FoldingSet NestedNameSpecifiers; mutable NestedNameSpecifier *GlobalNestedNameSpecifier = nullptr; /// A cache mapping from RecordDecls to ASTRecordLayouts. /// /// This is lazily created. This is intentionally not serialized. mutable llvm::DenseMap ASTRecordLayouts; mutable llvm::DenseMap ObjCLayouts; /// A cache from types to size and alignment information. using TypeInfoMap = llvm::DenseMap; mutable TypeInfoMap MemoizedTypeInfo; /// A cache from types to unadjusted alignment information. Only ARM and /// AArch64 targets need this information, keeping it separate prevents /// imposing overhead on TypeInfo size. using UnadjustedAlignMap = llvm::DenseMap; mutable UnadjustedAlignMap MemoizedUnadjustedAlign; /// A cache mapping from CXXRecordDecls to key functions. llvm::DenseMap KeyFunctions; /// Mapping from ObjCContainers to their ObjCImplementations. llvm::DenseMap ObjCImpls; /// Mapping from ObjCMethod to its duplicate declaration in the same /// interface. llvm::DenseMap ObjCMethodRedecls; /// Mapping from __block VarDecls to BlockVarCopyInit. llvm::DenseMap BlockVarCopyInits; /// Mapping from GUIDs to the corresponding MSGuidDecl. mutable llvm::FoldingSet MSGuidDecls; /// Used to cleanups APValues stored in the AST. mutable llvm::SmallVector APValueCleanups; /// A cache mapping a string value to a StringLiteral object with the same /// value. /// /// This is lazily created. This is intentionally not serialized. mutable llvm::StringMap StringLiteralCache; /// Representation of a "canonical" template template parameter that /// is used in canonical template names. class CanonicalTemplateTemplateParm : public llvm::FoldingSetNode { TemplateTemplateParmDecl *Parm; public: CanonicalTemplateTemplateParm(TemplateTemplateParmDecl *Parm) : Parm(Parm) {} TemplateTemplateParmDecl *getParam() const { return Parm; } void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &C) { Profile(ID, C, Parm); } static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &C, TemplateTemplateParmDecl *Parm); }; mutable llvm::ContextualFoldingSet CanonTemplateTemplateParms; TemplateTemplateParmDecl * getCanonicalTemplateTemplateParmDecl(TemplateTemplateParmDecl *TTP) const; /// The typedef for the __int128_t type. mutable TypedefDecl *Int128Decl = nullptr; /// The typedef for the __uint128_t type. mutable TypedefDecl *UInt128Decl = nullptr; /// The typedef for the target specific predefined /// __builtin_va_list type. mutable TypedefDecl *BuiltinVaListDecl = nullptr; /// The typedef for the predefined \c __builtin_ms_va_list type. mutable TypedefDecl *BuiltinMSVaListDecl = nullptr; /// The typedef for the predefined \c id type. mutable TypedefDecl *ObjCIdDecl = nullptr; /// The typedef for the predefined \c SEL type. mutable TypedefDecl *ObjCSelDecl = nullptr; /// The typedef for the predefined \c Class type. mutable TypedefDecl *ObjCClassDecl = nullptr; /// The typedef for the predefined \c Protocol class in Objective-C. mutable ObjCInterfaceDecl *ObjCProtocolClassDecl = nullptr; /// The typedef for the predefined 'BOOL' type. mutable TypedefDecl *BOOLDecl = nullptr; // Typedefs which may be provided defining the structure of Objective-C // pseudo-builtins QualType ObjCIdRedefinitionType; QualType ObjCClassRedefinitionType; QualType ObjCSelRedefinitionType; /// The identifier 'bool'. mutable IdentifierInfo *BoolName = nullptr; /// The identifier 'NSObject'. mutable IdentifierInfo *NSObjectName = nullptr; /// The identifier 'NSCopying'. IdentifierInfo *NSCopyingName = nullptr; /// The identifier '__make_integer_seq'. mutable IdentifierInfo *MakeIntegerSeqName = nullptr; /// The identifier '__type_pack_element'. mutable IdentifierInfo *TypePackElementName = nullptr; QualType ObjCConstantStringType; mutable RecordDecl *CFConstantStringTagDecl = nullptr; mutable TypedefDecl *CFConstantStringTypeDecl = nullptr; mutable QualType ObjCSuperType; QualType ObjCNSStringType; /// The typedef declaration for the Objective-C "instancetype" type. TypedefDecl *ObjCInstanceTypeDecl = nullptr; /// The type for the C FILE type. TypeDecl *FILEDecl = nullptr; /// The type for the C jmp_buf type. TypeDecl *jmp_bufDecl = nullptr; /// The type for the C sigjmp_buf type. TypeDecl *sigjmp_bufDecl = nullptr; /// The type for the C ucontext_t type. TypeDecl *ucontext_tDecl = nullptr; /// Type for the Block descriptor for Blocks CodeGen. /// /// Since this is only used for generation of debug info, it is not /// serialized. mutable RecordDecl *BlockDescriptorType = nullptr; /// Type for the Block descriptor for Blocks CodeGen. /// /// Since this is only used for generation of debug info, it is not /// serialized. mutable RecordDecl *BlockDescriptorExtendedType = nullptr; /// Declaration for the CUDA cudaConfigureCall function. FunctionDecl *cudaConfigureCallDecl = nullptr; /// Keeps track of all declaration attributes. /// /// Since so few decls have attrs, we keep them in a hash map instead of /// wasting space in the Decl class. llvm::DenseMap DeclAttrs; /// A mapping from non-redeclarable declarations in modules that were /// merged with other declarations to the canonical declaration that they were /// merged into. llvm::DenseMap MergedDecls; /// A mapping from a defining declaration to a list of modules (other /// than the owning module of the declaration) that contain merged /// definitions of that entity. llvm::DenseMap> MergedDefModules; /// Initializers for a module, in order. Each Decl will be either /// something that has a semantic effect on startup (such as a variable with /// a non-constant initializer), or an ImportDecl (which recursively triggers /// initialization of another module). struct PerModuleInitializers { llvm::SmallVector Initializers; llvm::SmallVector LazyInitializers; void resolve(ASTContext &Ctx); }; llvm::DenseMap ModuleInitializers; ASTContext &this_() { return *this; } public: /// A type synonym for the TemplateOrInstantiation mapping. using TemplateOrSpecializationInfo = llvm::PointerUnion; private: friend class ASTDeclReader; friend class ASTReader; friend class ASTWriter; template friend class serialization::AbstractTypeReader; friend class CXXRecordDecl; /// A mapping to contain the template or declaration that /// a variable declaration describes or was instantiated from, /// respectively. /// /// For non-templates, this value will be NULL. For variable /// declarations that describe a variable template, this will be a /// pointer to a VarTemplateDecl. For static data members /// of class template specializations, this will be the /// MemberSpecializationInfo referring to the member variable that was /// instantiated or specialized. Thus, the mapping will keep track of /// the static data member templates from which static data members of /// class template specializations were instantiated. /// /// Given the following example: /// /// \code /// template /// struct X { /// static T value; /// }; /// /// template /// T X::value = T(17); /// /// int *x = &X::value; /// \endcode /// /// This mapping will contain an entry that maps from the VarDecl for /// X::value to the corresponding VarDecl for X::value (within the /// class template X) and will be marked TSK_ImplicitInstantiation. llvm::DenseMap TemplateOrInstantiation; /// Keeps track of the declaration from which a using declaration was /// created during instantiation. /// /// The source and target declarations are always a UsingDecl, an /// UnresolvedUsingValueDecl, or an UnresolvedUsingTypenameDecl. /// /// For example: /// \code /// template /// struct A { /// void f(); /// }; /// /// template /// struct B : A { /// using A::f; /// }; /// /// template struct B; /// \endcode /// /// This mapping will contain an entry that maps from the UsingDecl in /// B to the UnresolvedUsingDecl in B. llvm::DenseMap InstantiatedFromUsingDecl; llvm::DenseMap InstantiatedFromUsingShadowDecl; llvm::DenseMap InstantiatedFromUnnamedFieldDecl; /// Mapping that stores the methods overridden by a given C++ /// member function. /// /// Since most C++ member functions aren't virtual and therefore /// don't override anything, we store the overridden functions in /// this map on the side rather than within the CXXMethodDecl structure. using CXXMethodVector = llvm::TinyPtrVector; llvm::DenseMap OverriddenMethods; /// Mapping from each declaration context to its corresponding /// mangling numbering context (used for constructs like lambdas which /// need to be consistently numbered for the mangler). llvm::DenseMap> MangleNumberingContexts; llvm::DenseMap> ExtraMangleNumberingContexts; /// Side-table of mangling numbers for declarations which rarely /// need them (like static local vars). llvm::MapVector MangleNumbers; llvm::MapVector StaticLocalNumbers; /// Mapping that stores parameterIndex values for ParmVarDecls when /// that value exceeds the bitfield size of ParmVarDeclBits.ParameterIndex. using ParameterIndexTable = llvm::DenseMap; ParameterIndexTable ParamIndices; ImportDecl *FirstLocalImport = nullptr; ImportDecl *LastLocalImport = nullptr; TranslationUnitDecl *TUDecl; mutable ExternCContextDecl *ExternCContext = nullptr; mutable BuiltinTemplateDecl *MakeIntegerSeqDecl = nullptr; mutable BuiltinTemplateDecl *TypePackElementDecl = nullptr; /// The associated SourceManager object. SourceManager &SourceMgr; /// The language options used to create the AST associated with /// this ASTContext object. LangOptions &LangOpts; /// Blacklist object that is used by sanitizers to decide which /// entities should not be instrumented. std::unique_ptr SanitizerBL; /// Function filtering mechanism to determine whether a given function /// should be imbued with the XRay "always" or "never" attributes. std::unique_ptr XRayFilter; /// The allocator used to create AST objects. /// /// AST objects are never destructed; rather, all memory associated with the /// AST objects will be released when the ASTContext itself is destroyed. mutable llvm::BumpPtrAllocator BumpAlloc; /// Allocator for partial diagnostics. PartialDiagnostic::StorageAllocator DiagAllocator; /// The current C++ ABI. std::unique_ptr ABI; CXXABI *createCXXABI(const TargetInfo &T); /// The logical -> physical address space map. const LangASMap *AddrSpaceMap = nullptr; /// Address space map mangling must be used with language specific /// address spaces (e.g. OpenCL/CUDA) bool AddrSpaceMapMangling; const TargetInfo *Target = nullptr; const TargetInfo *AuxTarget = nullptr; clang::PrintingPolicy PrintingPolicy; std::unique_ptr InterpContext; std::unique_ptr ParentMapCtx; public: IdentifierTable &Idents; SelectorTable &Selectors; Builtin::Context &BuiltinInfo; mutable DeclarationNameTable DeclarationNames; IntrusiveRefCntPtr ExternalSource; ASTMutationListener *Listener = nullptr; /// Returns the clang bytecode interpreter context. interp::Context &getInterpContext(); /// Returns the dynamic AST node parent map context. ParentMapContext &getParentMapContext(); // A traversal scope limits the parts of the AST visible to certain analyses. // RecursiveASTVisitor::TraverseAST will only visit reachable nodes, and // getParents() will only observe reachable parent edges. // // The scope is defined by a set of "top-level" declarations. // Initially, it is the entire TU: {getTranslationUnitDecl()}. // Changing the scope clears the parent cache, which is expensive to rebuild. std::vector getTraversalScope() const { return TraversalScope; } void setTraversalScope(const std::vector &); /// Forwards to get node parents from the ParentMapContext. New callers should /// use ParentMapContext::getParents() directly. template DynTypedNodeList getParents(const NodeT &Node); const clang::PrintingPolicy &getPrintingPolicy() const { return PrintingPolicy; } void setPrintingPolicy(const clang::PrintingPolicy &Policy) { PrintingPolicy = Policy; } SourceManager& getSourceManager() { return SourceMgr; } const SourceManager& getSourceManager() const { return SourceMgr; } llvm::BumpPtrAllocator &getAllocator() const { return BumpAlloc; } void *Allocate(size_t Size, unsigned Align = 8) const { return BumpAlloc.Allocate(Size, Align); } template T *Allocate(size_t Num = 1) const { return static_cast(Allocate(Num * sizeof(T), alignof(T))); } void Deallocate(void *Ptr) const {} /// Return the total amount of physical memory allocated for representing /// AST nodes and type information. size_t getASTAllocatedMemory() const { return BumpAlloc.getTotalMemory(); } /// Return the total memory used for various side tables. size_t getSideTableAllocatedMemory() const; PartialDiagnostic::StorageAllocator &getDiagAllocator() { return DiagAllocator; } const TargetInfo &getTargetInfo() const { return *Target; } const TargetInfo *getAuxTargetInfo() const { return AuxTarget; } /// getIntTypeForBitwidth - /// sets integer QualTy according to specified details: /// bitwidth, signed/unsigned. /// Returns empty type if there is no appropriate target types. QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const; /// getRealTypeForBitwidth - /// sets floating point QualTy according to specified bitwidth. /// Returns empty type if there is no appropriate target types. QualType getRealTypeForBitwidth(unsigned DestWidth, bool ExplicitIEEE) const; bool AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const; const LangOptions& getLangOpts() const { return LangOpts; } const SanitizerBlacklist &getSanitizerBlacklist() const { return *SanitizerBL; } const XRayFunctionFilter &getXRayFilter() const { return *XRayFilter; } DiagnosticsEngine &getDiagnostics() const; FullSourceLoc getFullLoc(SourceLocation Loc) const { return FullSourceLoc(Loc,SourceMgr); } /// All comments in this translation unit. RawCommentList Comments; /// True if comments are already loaded from ExternalASTSource. mutable bool CommentsLoaded = false; /// Mapping from declaration to directly attached comment. /// /// Raw comments are owned by Comments list. This mapping is populated /// lazily. mutable llvm::DenseMap DeclRawComments; /// Mapping from canonical declaration to the first redeclaration in chain /// that has a comment attached. /// /// Raw comments are owned by Comments list. This mapping is populated /// lazily. mutable llvm::DenseMap RedeclChainComments; /// Keeps track of redeclaration chains that don't have any comment attached. /// Mapping from canonical declaration to redeclaration chain that has no /// comments attached to any redeclaration. Specifically it's mapping to /// the last redeclaration we've checked. /// /// Shall not contain declarations that have comments attached to any /// redeclaration in their chain. mutable llvm::DenseMap CommentlessRedeclChains; /// Mapping from declarations to parsed comments attached to any /// redeclaration. mutable llvm::DenseMap ParsedComments; /// Attaches \p Comment to \p OriginalD and to its redeclaration chain /// and removes the redeclaration chain from the set of commentless chains. /// /// Don't do anything if a comment has already been attached to \p OriginalD /// or its redeclaration chain. void cacheRawCommentForDecl(const Decl &OriginalD, const RawComment &Comment) const; /// \returns searches \p CommentsInFile for doc comment for \p D. /// /// \p RepresentativeLocForDecl is used as a location for searching doc /// comments. \p CommentsInFile is a mapping offset -> comment of files in the /// same file where \p RepresentativeLocForDecl is. RawComment *getRawCommentForDeclNoCacheImpl( const Decl *D, const SourceLocation RepresentativeLocForDecl, const std::map &CommentsInFile) const; /// Return the documentation comment attached to a given declaration, /// without looking into cache. RawComment *getRawCommentForDeclNoCache(const Decl *D) const; public: void addComment(const RawComment &RC); /// Return the documentation comment attached to a given declaration. /// Returns nullptr if no comment is attached. /// /// \param OriginalDecl if not nullptr, is set to declaration AST node that /// had the comment, if the comment we found comes from a redeclaration. const RawComment * getRawCommentForAnyRedecl(const Decl *D, const Decl **OriginalDecl = nullptr) const; /// Searches existing comments for doc comments that should be attached to \p /// Decls. If any doc comment is found, it is parsed. /// /// Requirement: All \p Decls are in the same file. /// /// If the last comment in the file is already attached we assume /// there are not comments left to be attached to \p Decls. void attachCommentsToJustParsedDecls(ArrayRef Decls, const Preprocessor *PP); /// Return parsed documentation comment attached to a given declaration. /// Returns nullptr if no comment is attached. /// /// \param PP the Preprocessor used with this TU. Could be nullptr if /// preprocessor is not available. comments::FullComment *getCommentForDecl(const Decl *D, const Preprocessor *PP) const; /// Return parsed documentation comment attached to a given declaration. /// Returns nullptr if no comment is attached. Does not look at any /// redeclarations of the declaration. comments::FullComment *getLocalCommentForDeclUncached(const Decl *D) const; comments::FullComment *cloneFullComment(comments::FullComment *FC, const Decl *D) const; private: mutable comments::CommandTraits CommentCommandTraits; /// Iterator that visits import declarations. class import_iterator { ImportDecl *Import = nullptr; public: using value_type = ImportDecl *; using reference = ImportDecl *; using pointer = ImportDecl *; using difference_type = int; using iterator_category = std::forward_iterator_tag; import_iterator() = default; explicit import_iterator(ImportDecl *Import) : Import(Import) {} reference operator*() const { return Import; } pointer operator->() const { return Import; } import_iterator &operator++() { Import = ASTContext::getNextLocalImport(Import); return *this; } import_iterator operator++(int) { import_iterator Other(*this); ++(*this); return Other; } friend bool operator==(import_iterator X, import_iterator Y) { return X.Import == Y.Import; } friend bool operator!=(import_iterator X, import_iterator Y) { return X.Import != Y.Import; } }; public: comments::CommandTraits &getCommentCommandTraits() const { return CommentCommandTraits; } /// Retrieve the attributes for the given declaration. AttrVec& getDeclAttrs(const Decl *D); /// Erase the attributes corresponding to the given declaration. void eraseDeclAttrs(const Decl *D); /// If this variable is an instantiated static data member of a /// class template specialization, returns the templated static data member /// from which it was instantiated. // FIXME: Remove ? MemberSpecializationInfo *getInstantiatedFromStaticDataMember( const VarDecl *Var); TemplateOrSpecializationInfo getTemplateOrSpecializationInfo(const VarDecl *Var); /// Note that the static data member \p Inst is an instantiation of /// the static data member template \p Tmpl of a class template. void setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl, TemplateSpecializationKind TSK, SourceLocation PointOfInstantiation = SourceLocation()); void setTemplateOrSpecializationInfo(VarDecl *Inst, TemplateOrSpecializationInfo TSI); /// If the given using decl \p Inst is an instantiation of a /// (possibly unresolved) using decl from a template instantiation, /// return it. NamedDecl *getInstantiatedFromUsingDecl(NamedDecl *Inst); /// Remember that the using decl \p Inst is an instantiation /// of the using decl \p Pattern of a class template. void setInstantiatedFromUsingDecl(NamedDecl *Inst, NamedDecl *Pattern); void setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst, UsingShadowDecl *Pattern); UsingShadowDecl *getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst); FieldDecl *getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field); void setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst, FieldDecl *Tmpl); // Access to the set of methods overridden by the given C++ method. using overridden_cxx_method_iterator = CXXMethodVector::const_iterator; overridden_cxx_method_iterator overridden_methods_begin(const CXXMethodDecl *Method) const; overridden_cxx_method_iterator overridden_methods_end(const CXXMethodDecl *Method) const; unsigned overridden_methods_size(const CXXMethodDecl *Method) const; using overridden_method_range = llvm::iterator_range; overridden_method_range overridden_methods(const CXXMethodDecl *Method) const; /// Note that the given C++ \p Method overrides the given \p /// Overridden method. void addOverriddenMethod(const CXXMethodDecl *Method, const CXXMethodDecl *Overridden); /// Return C++ or ObjC overridden methods for the given \p Method. /// /// An ObjC method is considered to override any method in the class's /// base classes, its protocols, or its categories' protocols, that has /// the same selector and is of the same kind (class or instance). /// A method in an implementation is not considered as overriding the same /// method in the interface or its categories. void getOverriddenMethods( const NamedDecl *Method, SmallVectorImpl &Overridden) const; /// Notify the AST context that a new import declaration has been /// parsed or implicitly created within this translation unit. void addedLocalImportDecl(ImportDecl *Import); static ImportDecl *getNextLocalImport(ImportDecl *Import) { return Import->getNextLocalImport(); } using import_range = llvm::iterator_range; import_range local_imports() const { return import_range(import_iterator(FirstLocalImport), import_iterator()); } Decl *getPrimaryMergedDecl(Decl *D) { Decl *Result = MergedDecls.lookup(D); return Result ? Result : D; } void setPrimaryMergedDecl(Decl *D, Decl *Primary) { MergedDecls[D] = Primary; } /// Note that the definition \p ND has been merged into module \p M, /// and should be visible whenever \p M is visible. void mergeDefinitionIntoModule(NamedDecl *ND, Module *M, bool NotifyListeners = true); /// Clean up the merged definition list. Call this if you might have /// added duplicates into the list. void deduplicateMergedDefinitonsFor(NamedDecl *ND); /// Get the additional modules in which the definition \p Def has /// been merged. ArrayRef getModulesWithMergedDefinition(const NamedDecl *Def); /// Add a declaration to the list of declarations that are initialized /// for a module. This will typically be a global variable (with internal /// linkage) that runs module initializers, such as the iostream initializer, /// or an ImportDecl nominating another module that has initializers. void addModuleInitializer(Module *M, Decl *Init); void addLazyModuleInitializers(Module *M, ArrayRef IDs); /// Get the initializations to perform when importing a module, if any. ArrayRef getModuleInitializers(Module *M); TranslationUnitDecl *getTranslationUnitDecl() const { return TUDecl; } ExternCContextDecl *getExternCContextDecl() const; BuiltinTemplateDecl *getMakeIntegerSeqDecl() const; BuiltinTemplateDecl *getTypePackElementDecl() const; // Builtin Types. CanQualType VoidTy; CanQualType BoolTy; CanQualType CharTy; CanQualType WCharTy; // [C++ 3.9.1p5]. CanQualType WideCharTy; // Same as WCharTy in C++, integer type in C99. CanQualType WIntTy; // [C99 7.24.1], integer type unchanged by default promotions. CanQualType Char8Ty; // [C++20 proposal] CanQualType Char16Ty; // [C++0x 3.9.1p5], integer type in C99. CanQualType Char32Ty; // [C++0x 3.9.1p5], integer type in C99. CanQualType SignedCharTy, ShortTy, IntTy, LongTy, LongLongTy, Int128Ty; CanQualType UnsignedCharTy, UnsignedShortTy, UnsignedIntTy, UnsignedLongTy; CanQualType UnsignedLongLongTy, UnsignedInt128Ty; CanQualType FloatTy, DoubleTy, LongDoubleTy, Float128Ty; CanQualType ShortAccumTy, AccumTy, LongAccumTy; // ISO/IEC JTC1 SC22 WG14 N1169 Extension CanQualType UnsignedShortAccumTy, UnsignedAccumTy, UnsignedLongAccumTy; CanQualType ShortFractTy, FractTy, LongFractTy; CanQualType UnsignedShortFractTy, UnsignedFractTy, UnsignedLongFractTy; CanQualType SatShortAccumTy, SatAccumTy, SatLongAccumTy; CanQualType SatUnsignedShortAccumTy, SatUnsignedAccumTy, SatUnsignedLongAccumTy; CanQualType SatShortFractTy, SatFractTy, SatLongFractTy; CanQualType SatUnsignedShortFractTy, SatUnsignedFractTy, SatUnsignedLongFractTy; CanQualType HalfTy; // [OpenCL 6.1.1.1], ARM NEON CanQualType BFloat16Ty; CanQualType Float16Ty; // C11 extension ISO/IEC TS 18661-3 CanQualType FloatComplexTy, DoubleComplexTy, LongDoubleComplexTy; CanQualType Float128ComplexTy; CanQualType VoidPtrTy, NullPtrTy; CanQualType DependentTy, OverloadTy, BoundMemberTy, UnknownAnyTy; CanQualType BuiltinFnTy; CanQualType PseudoObjectTy, ARCUnbridgedCastTy; CanQualType ObjCBuiltinIdTy, ObjCBuiltinClassTy, ObjCBuiltinSelTy; CanQualType ObjCBuiltinBoolTy; #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ CanQualType SingletonId; #include "clang/Basic/OpenCLImageTypes.def" CanQualType OCLSamplerTy, OCLEventTy, OCLClkEventTy; CanQualType OCLQueueTy, OCLReserveIDTy; CanQualType IncompleteMatrixIdxTy; CanQualType OMPArraySectionTy, OMPArrayShapingTy, OMPIteratorTy; #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ CanQualType Id##Ty; #include "clang/Basic/OpenCLExtensionTypes.def" #define SVE_TYPE(Name, Id, SingletonId) \ CanQualType SingletonId; #include "clang/Basic/AArch64SVEACLETypes.def" // Types for deductions in C++0x [stmt.ranged]'s desugaring. Built on demand. mutable QualType AutoDeductTy; // Deduction against 'auto'. mutable QualType AutoRRefDeductTy; // Deduction against 'auto &&'. // Decl used to help define __builtin_va_list for some targets. // The decl is built when constructing 'BuiltinVaListDecl'. mutable Decl *VaListTagDecl = nullptr; // Implicitly-declared type 'struct _GUID'. mutable TagDecl *MSGuidTagDecl = nullptr; ASTContext(LangOptions &LOpts, SourceManager &SM, IdentifierTable &idents, SelectorTable &sels, Builtin::Context &builtins); ASTContext(const ASTContext &) = delete; ASTContext &operator=(const ASTContext &) = delete; ~ASTContext(); /// Attach an external AST source to the AST context. /// /// The external AST source provides the ability to load parts of /// the abstract syntax tree as needed from some external storage, /// e.g., a precompiled header. void setExternalSource(IntrusiveRefCntPtr Source); /// Retrieve a pointer to the external AST source associated /// with this AST context, if any. ExternalASTSource *getExternalSource() const { return ExternalSource.get(); } /// Attach an AST mutation listener to the AST context. /// /// The AST mutation listener provides the ability to track modifications to /// the abstract syntax tree entities committed after they were initially /// created. void setASTMutationListener(ASTMutationListener *Listener) { this->Listener = Listener; } /// Retrieve a pointer to the AST mutation listener associated /// with this AST context, if any. ASTMutationListener *getASTMutationListener() const { return Listener; } void PrintStats() const; const SmallVectorImpl& getTypes() const { return Types; } BuiltinTemplateDecl *buildBuiltinTemplateDecl(BuiltinTemplateKind BTK, const IdentifierInfo *II) const; /// Create a new implicit TU-level CXXRecordDecl or RecordDecl /// declaration. RecordDecl *buildImplicitRecord(StringRef Name, RecordDecl::TagKind TK = TTK_Struct) const; /// Create a new implicit TU-level typedef declaration. TypedefDecl *buildImplicitTypedef(QualType T, StringRef Name) const; /// Retrieve the declaration for the 128-bit signed integer type. TypedefDecl *getInt128Decl() const; /// Retrieve the declaration for the 128-bit unsigned integer type. TypedefDecl *getUInt128Decl() const; //===--------------------------------------------------------------------===// // Type Constructors //===--------------------------------------------------------------------===// private: /// Return a type with extended qualifiers. QualType getExtQualType(const Type *Base, Qualifiers Quals) const; QualType getTypeDeclTypeSlow(const TypeDecl *Decl) const; QualType getPipeType(QualType T, bool ReadOnly) const; public: /// Return the uniqued reference to the type for an address space /// qualified type with the specified type and address space. /// /// The resulting type has a union of the qualifiers from T and the address /// space. If T already has an address space specifier, it is silently /// replaced. QualType getAddrSpaceQualType(QualType T, LangAS AddressSpace) const; /// Remove any existing address space on the type and returns the type /// with qualifiers intact (or that's the idea anyway) /// /// The return type should be T with all prior qualifiers minus the address /// space. QualType removeAddrSpaceQualType(QualType T) const; /// Apply Objective-C protocol qualifiers to the given type. /// \param allowOnPointerType specifies if we can apply protocol /// qualifiers on ObjCObjectPointerType. It can be set to true when /// constructing the canonical type of a Objective-C type parameter. QualType applyObjCProtocolQualifiers(QualType type, ArrayRef protocols, bool &hasError, bool allowOnPointerType = false) const; /// Return the uniqued reference to the type for an Objective-C /// gc-qualified type. /// /// The resulting type has a union of the qualifiers from T and the gc /// attribute. QualType getObjCGCQualType(QualType T, Qualifiers::GC gcAttr) const; /// Remove the existing address space on the type if it is a pointer size /// address space and return the type with qualifiers intact. QualType removePtrSizeAddrSpace(QualType T) const; /// Return the uniqued reference to the type for a \c restrict /// qualified type. /// /// The resulting type has a union of the qualifiers from \p T and /// \c restrict. QualType getRestrictType(QualType T) const { return T.withFastQualifiers(Qualifiers::Restrict); } /// Return the uniqued reference to the type for a \c volatile /// qualified type. /// /// The resulting type has a union of the qualifiers from \p T and /// \c volatile. QualType getVolatileType(QualType T) const { return T.withFastQualifiers(Qualifiers::Volatile); } /// Return the uniqued reference to the type for a \c const /// qualified type. /// /// The resulting type has a union of the qualifiers from \p T and \c const. /// /// It can be reasonably expected that this will always be equivalent to /// calling T.withConst(). QualType getConstType(QualType T) const { return T.withConst(); } /// Change the ExtInfo on a function type. const FunctionType *adjustFunctionType(const FunctionType *Fn, FunctionType::ExtInfo EInfo); /// Adjust the given function result type. CanQualType getCanonicalFunctionResultType(QualType ResultType) const; /// Change the result type of a function type once it is deduced. void adjustDeducedFunctionResultType(FunctionDecl *FD, QualType ResultType); /// Get a function type and produce the equivalent function type with the /// specified exception specification. Type sugar that can be present on a /// declaration of a function with an exception specification is permitted /// and preserved. Other type sugar (for instance, typedefs) is not. QualType getFunctionTypeWithExceptionSpec( QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine whether two function types are the same, ignoring /// exception specifications in cases where they're part of the type. bool hasSameFunctionTypeIgnoringExceptionSpec(QualType T, QualType U); /// Change the exception specification on a function once it is /// delay-parsed, instantiated, or computed. void adjustExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI, bool AsWritten = false); /// Get a function type and produce the equivalent function type where /// pointer size address spaces in the return type and parameter tyeps are /// replaced with the default address space. QualType getFunctionTypeWithoutPtrSizes(QualType T); /// Determine whether two function types are the same, ignoring pointer sizes /// in the return type and parameter types. bool hasSameFunctionTypeIgnoringPtrSizes(QualType T, QualType U); /// Return the uniqued reference to the type for a complex /// number with the specified element type. QualType getComplexType(QualType T) const; CanQualType getComplexType(CanQualType T) const { return CanQualType::CreateUnsafe(getComplexType((QualType) T)); } /// Return the uniqued reference to the type for a pointer to /// the specified type. QualType getPointerType(QualType T) const; CanQualType getPointerType(CanQualType T) const { return CanQualType::CreateUnsafe(getPointerType((QualType) T)); } /// Return the uniqued reference to a type adjusted from the original /// type to a new type. QualType getAdjustedType(QualType Orig, QualType New) const; CanQualType getAdjustedType(CanQualType Orig, CanQualType New) const { return CanQualType::CreateUnsafe( getAdjustedType((QualType)Orig, (QualType)New)); } /// Return the uniqued reference to the decayed version of the given /// type. Can only be called on array and function types which decay to /// pointer types. QualType getDecayedType(QualType T) const; CanQualType getDecayedType(CanQualType T) const { return CanQualType::CreateUnsafe(getDecayedType((QualType) T)); } /// Return the uniqued reference to the atomic type for the specified /// type. QualType getAtomicType(QualType T) const; /// Return the uniqued reference to the type for a block of the /// specified type. QualType getBlockPointerType(QualType T) const; /// Gets the struct used to keep track of the descriptor for pointer to /// blocks. QualType getBlockDescriptorType() const; /// Return a read_only pipe type for the specified type. QualType getReadPipeType(QualType T) const; /// Return a write_only pipe type for the specified type. QualType getWritePipeType(QualType T) const; /// Return an extended integer type with the specified signedness and bit /// count. QualType getExtIntType(bool Unsigned, unsigned NumBits) const; /// Return a dependent extended integer type with the specified signedness and /// bit count. QualType getDependentExtIntType(bool Unsigned, Expr *BitsExpr) const; /// Gets the struct used to keep track of the extended descriptor for /// pointer to blocks. QualType getBlockDescriptorExtendedType() const; /// Map an AST Type to an OpenCLTypeKind enum value. OpenCLTypeKind getOpenCLTypeKind(const Type *T) const; /// Get address space for OpenCL type. LangAS getOpenCLTypeAddrSpace(const Type *T) const; void setcudaConfigureCallDecl(FunctionDecl *FD) { cudaConfigureCallDecl = FD; } FunctionDecl *getcudaConfigureCallDecl() { return cudaConfigureCallDecl; } /// Returns true iff we need copy/dispose helpers for the given type. bool BlockRequiresCopying(QualType Ty, const VarDecl *D); /// Returns true, if given type has a known lifetime. HasByrefExtendedLayout /// is set to false in this case. If HasByrefExtendedLayout returns true, /// byref variable has extended lifetime. bool getByrefLifetime(QualType Ty, Qualifiers::ObjCLifetime &Lifetime, bool &HasByrefExtendedLayout) const; /// Return the uniqued reference to the type for an lvalue reference /// to the specified type. QualType getLValueReferenceType(QualType T, bool SpelledAsLValue = true) const; /// Return the uniqued reference to the type for an rvalue reference /// to the specified type. QualType getRValueReferenceType(QualType T) const; /// Return the uniqued reference to the type for a member pointer to /// the specified type in the specified class. /// /// The class \p Cls is a \c Type because it could be a dependent name. QualType getMemberPointerType(QualType T, const Type *Cls) const; /// Return a non-unique reference to the type for a variable array of /// the specified element type. QualType getVariableArrayType(QualType EltTy, Expr *NumElts, ArrayType::ArraySizeModifier ASM, unsigned IndexTypeQuals, SourceRange Brackets) const; /// Return a non-unique reference to the type for a dependently-sized /// array of the specified element type. /// /// FIXME: We will need these to be uniqued, or at least comparable, at some /// point. QualType getDependentSizedArrayType(QualType EltTy, Expr *NumElts, ArrayType::ArraySizeModifier ASM, unsigned IndexTypeQuals, SourceRange Brackets) const; /// Return a unique reference to the type for an incomplete array of /// the specified element type. QualType getIncompleteArrayType(QualType EltTy, ArrayType::ArraySizeModifier ASM, unsigned IndexTypeQuals) const; /// Return the unique reference to the type for a constant array of /// the specified element type. QualType getConstantArrayType(QualType EltTy, const llvm::APInt &ArySize, const Expr *SizeExpr, ArrayType::ArraySizeModifier ASM, unsigned IndexTypeQuals) const; /// Return a type for a constant array for a string literal of the /// specified element type and length. QualType getStringLiteralArrayType(QualType EltTy, unsigned Length) const; /// Returns a vla type where known sizes are replaced with [*]. QualType getVariableArrayDecayedType(QualType Ty) const; // Convenience struct to return information about a builtin vector type. struct BuiltinVectorTypeInfo { QualType ElementType; llvm::ElementCount EC; unsigned NumVectors; BuiltinVectorTypeInfo(QualType ElementType, llvm::ElementCount EC, unsigned NumVectors) : ElementType(ElementType), EC(EC), NumVectors(NumVectors) {} }; /// Returns the element type, element count and number of vectors /// (in case of tuple) for a builtin vector type. BuiltinVectorTypeInfo getBuiltinVectorTypeInfo(const BuiltinType *VecTy) const; /// Return the unique reference to a scalable vector type of the specified /// element type and scalable number of elements. /// /// \pre \p EltTy must be a built-in type. QualType getScalableVectorType(QualType EltTy, unsigned NumElts) const; /// Return the unique reference to a vector type of the specified /// element type and size. /// /// \pre \p VectorType must be a built-in type. QualType getVectorType(QualType VectorType, unsigned NumElts, VectorType::VectorKind VecKind) const; /// Return the unique reference to the type for a dependently sized vector of /// the specified element type. QualType getDependentVectorType(QualType VectorType, Expr *SizeExpr, SourceLocation AttrLoc, VectorType::VectorKind VecKind) const; /// Return the unique reference to an extended vector type /// of the specified element type and size. /// /// \pre \p VectorType must be a built-in type. QualType getExtVectorType(QualType VectorType, unsigned NumElts) const; /// \pre Return a non-unique reference to the type for a dependently-sized /// vector of the specified element type. /// /// FIXME: We will need these to be uniqued, or at least comparable, at some /// point. QualType getDependentSizedExtVectorType(QualType VectorType, Expr *SizeExpr, SourceLocation AttrLoc) const; /// Return the unique reference to the matrix type of the specified element /// type and size /// /// \pre \p ElementType must be a valid matrix element type (see /// MatrixType::isValidElementType). QualType getConstantMatrixType(QualType ElementType, unsigned NumRows, unsigned NumColumns) const; /// Return the unique reference to the matrix type of the specified element /// type and size QualType getDependentSizedMatrixType(QualType ElementType, Expr *RowExpr, Expr *ColumnExpr, SourceLocation AttrLoc) const; QualType getDependentAddressSpaceType(QualType PointeeType, Expr *AddrSpaceExpr, SourceLocation AttrLoc) const; /// Return a K&R style C function type like 'int()'. QualType getFunctionNoProtoType(QualType ResultTy, const FunctionType::ExtInfo &Info) const; QualType getFunctionNoProtoType(QualType ResultTy) const { return getFunctionNoProtoType(ResultTy, FunctionType::ExtInfo()); } /// Return a normal function type with a typed argument list. QualType getFunctionType(QualType ResultTy, ArrayRef Args, const FunctionProtoType::ExtProtoInfo &EPI) const { return getFunctionTypeInternal(ResultTy, Args, EPI, false); } QualType adjustStringLiteralBaseType(QualType StrLTy) const; private: /// Return a normal function type with a typed argument list. QualType getFunctionTypeInternal(QualType ResultTy, ArrayRef Args, const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const; public: /// Return the unique reference to the type for the specified type /// declaration. QualType getTypeDeclType(const TypeDecl *Decl, const TypeDecl *PrevDecl = nullptr) const { assert(Decl && "Passed null for Decl param"); if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); if (PrevDecl) { assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl"); Decl->TypeForDecl = PrevDecl->TypeForDecl; return QualType(PrevDecl->TypeForDecl, 0); } return getTypeDeclTypeSlow(Decl); } /// Return the unique reference to the type for the specified /// typedef-name decl. QualType getTypedefType(const TypedefNameDecl *Decl, QualType Canon = QualType()) const; QualType getRecordType(const RecordDecl *Decl) const; QualType getEnumType(const EnumDecl *Decl) const; QualType getInjectedClassNameType(CXXRecordDecl *Decl, QualType TST) const; QualType getAttributedType(attr::Kind attrKind, QualType modifiedType, QualType equivalentType); QualType getSubstTemplateTypeParmType(const TemplateTypeParmType *Replaced, QualType Replacement) const; QualType getSubstTemplateTypeParmPackType( const TemplateTypeParmType *Replaced, const TemplateArgument &ArgPack); QualType getTemplateTypeParmType(unsigned Depth, unsigned Index, bool ParameterPack, TemplateTypeParmDecl *ParmDecl = nullptr) const; QualType getTemplateSpecializationType(TemplateName T, ArrayRef Args, QualType Canon = QualType()) const; QualType getCanonicalTemplateSpecializationType(TemplateName T, ArrayRef Args) const; QualType getTemplateSpecializationType(TemplateName T, const TemplateArgumentListInfo &Args, QualType Canon = QualType()) const; TypeSourceInfo * getTemplateSpecializationTypeInfo(TemplateName T, SourceLocation TLoc, const TemplateArgumentListInfo &Args, QualType Canon = QualType()) const; QualType getParenType(QualType NamedType) const; QualType getMacroQualifiedType(QualType UnderlyingTy, const IdentifierInfo *MacroII) const; QualType getElaboratedType(ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS, QualType NamedType, TagDecl *OwnedTagDecl = nullptr) const; QualType getDependentNameType(ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS, const IdentifierInfo *Name, QualType Canon = QualType()) const; QualType getDependentTemplateSpecializationType(ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS, const IdentifierInfo *Name, const TemplateArgumentListInfo &Args) const; QualType getDependentTemplateSpecializationType( ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS, const IdentifierInfo *Name, ArrayRef Args) const; TemplateArgument getInjectedTemplateArg(NamedDecl *ParamDecl); /// Get a template argument list with one argument per template parameter /// in a template parameter list, such as for the injected class name of /// a class template. void getInjectedTemplateArgs(const TemplateParameterList *Params, SmallVectorImpl &Args); + /// Form a pack expansion type with the given pattern. + /// \param NumExpansions The number of expansions for the pack, if known. + /// \param ExpectPackInType If \c false, we should not expect \p Pattern to + /// contain an unexpanded pack. This only makes sense if the pack + /// expansion is used in a context where the arity is inferred from + /// elsewhere, such as if the pattern contains a placeholder type or + /// if this is the canonical type of another pack expansion type. QualType getPackExpansionType(QualType Pattern, - Optional NumExpansions); + Optional NumExpansions, + bool ExpectPackInType = true); QualType getObjCInterfaceType(const ObjCInterfaceDecl *Decl, ObjCInterfaceDecl *PrevDecl = nullptr) const; /// Legacy interface: cannot provide type arguments or __kindof. QualType getObjCObjectType(QualType Base, ObjCProtocolDecl * const *Protocols, unsigned NumProtocols) const; QualType getObjCObjectType(QualType Base, ArrayRef typeArgs, ArrayRef protocols, bool isKindOf) const; QualType getObjCTypeParamType(const ObjCTypeParamDecl *Decl, ArrayRef protocols) const; void adjustObjCTypeParamBoundType(const ObjCTypeParamDecl *Orig, ObjCTypeParamDecl *New) const; bool ObjCObjectAdoptsQTypeProtocols(QualType QT, ObjCInterfaceDecl *Decl); /// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in /// QT's qualified-id protocol list adopt all protocols in IDecl's list /// of protocols. bool QIdProtocolsAdoptObjCObjectProtocols(QualType QT, ObjCInterfaceDecl *IDecl); /// Return a ObjCObjectPointerType type for the given ObjCObjectType. QualType getObjCObjectPointerType(QualType OIT) const; /// GCC extension. QualType getTypeOfExprType(Expr *e) const; QualType getTypeOfType(QualType t) const; /// C++11 decltype. QualType getDecltypeType(Expr *e, QualType UnderlyingType) const; /// Unary type transforms QualType getUnaryTransformType(QualType BaseType, QualType UnderlyingType, UnaryTransformType::UTTKind UKind) const; /// C++11 deduced auto type. QualType getAutoType(QualType DeducedType, AutoTypeKeyword Keyword, bool IsDependent, bool IsPack = false, ConceptDecl *TypeConstraintConcept = nullptr, ArrayRef TypeConstraintArgs ={}) const; /// C++11 deduction pattern for 'auto' type. QualType getAutoDeductType() const; /// C++11 deduction pattern for 'auto &&' type. QualType getAutoRRefDeductType() const; /// C++17 deduced class template specialization type. QualType getDeducedTemplateSpecializationType(TemplateName Template, QualType DeducedType, bool IsDependent) const; /// Return the unique reference to the type for the specified TagDecl /// (struct/union/class/enum) decl. QualType getTagDeclType(const TagDecl *Decl) const; /// Return the unique type for "size_t" (C99 7.17), defined in /// . /// /// The sizeof operator requires this (C99 6.5.3.4p4). CanQualType getSizeType() const; /// Return the unique signed counterpart of /// the integer type corresponding to size_t. CanQualType getSignedSizeType() const; /// Return the unique type for "intmax_t" (C99 7.18.1.5), defined in /// . CanQualType getIntMaxType() const; /// Return the unique type for "uintmax_t" (C99 7.18.1.5), defined in /// . CanQualType getUIntMaxType() const; /// Return the unique wchar_t type available in C++ (and available as /// __wchar_t as a Microsoft extension). QualType getWCharType() const { return WCharTy; } /// Return the type of wide characters. In C++, this returns the /// unique wchar_t type. In C99, this returns a type compatible with the type /// defined in as defined by the target. QualType getWideCharType() const { return WideCharTy; } /// Return the type of "signed wchar_t". /// /// Used when in C++, as a GCC extension. QualType getSignedWCharType() const; /// Return the type of "unsigned wchar_t". /// /// Used when in C++, as a GCC extension. QualType getUnsignedWCharType() const; /// In C99, this returns a type compatible with the type /// defined in as defined by the target. QualType getWIntType() const { return WIntTy; } /// Return a type compatible with "intptr_t" (C99 7.18.1.4), /// as defined by the target. QualType getIntPtrType() const; /// Return a type compatible with "uintptr_t" (C99 7.18.1.4), /// as defined by the target. QualType getUIntPtrType() const; /// Return the unique type for "ptrdiff_t" (C99 7.17) defined in /// . Pointer - pointer requires this (C99 6.5.6p9). QualType getPointerDiffType() const; /// Return the unique unsigned counterpart of "ptrdiff_t" /// integer type. The standard (C11 7.21.6.1p7) refers to this type /// in the definition of %tu format specifier. QualType getUnsignedPointerDiffType() const; /// Return the unique type for "pid_t" defined in /// . We need this to compute the correct type for vfork(). QualType getProcessIDType() const; /// Return the C structure type used to represent constant CFStrings. QualType getCFConstantStringType() const; /// Returns the C struct type for objc_super QualType getObjCSuperType() const; void setObjCSuperType(QualType ST) { ObjCSuperType = ST; } /// Get the structure type used to representation CFStrings, or NULL /// if it hasn't yet been built. QualType getRawCFConstantStringType() const { if (CFConstantStringTypeDecl) return getTypedefType(CFConstantStringTypeDecl); return QualType(); } void setCFConstantStringType(QualType T); TypedefDecl *getCFConstantStringDecl() const; RecordDecl *getCFConstantStringTagDecl() const; // This setter/getter represents the ObjC type for an NSConstantString. void setObjCConstantStringInterface(ObjCInterfaceDecl *Decl); QualType getObjCConstantStringInterface() const { return ObjCConstantStringType; } QualType getObjCNSStringType() const { return ObjCNSStringType; } void setObjCNSStringType(QualType T) { ObjCNSStringType = T; } /// Retrieve the type that \c id has been defined to, which may be /// different from the built-in \c id if \c id has been typedef'd. QualType getObjCIdRedefinitionType() const { if (ObjCIdRedefinitionType.isNull()) return getObjCIdType(); return ObjCIdRedefinitionType; } /// Set the user-written type that redefines \c id. void setObjCIdRedefinitionType(QualType RedefType) { ObjCIdRedefinitionType = RedefType; } /// Retrieve the type that \c Class has been defined to, which may be /// different from the built-in \c Class if \c Class has been typedef'd. QualType getObjCClassRedefinitionType() const { if (ObjCClassRedefinitionType.isNull()) return getObjCClassType(); return ObjCClassRedefinitionType; } /// Set the user-written type that redefines 'SEL'. void setObjCClassRedefinitionType(QualType RedefType) { ObjCClassRedefinitionType = RedefType; } /// Retrieve the type that 'SEL' has been defined to, which may be /// different from the built-in 'SEL' if 'SEL' has been typedef'd. QualType getObjCSelRedefinitionType() const { if (ObjCSelRedefinitionType.isNull()) return getObjCSelType(); return ObjCSelRedefinitionType; } /// Set the user-written type that redefines 'SEL'. void setObjCSelRedefinitionType(QualType RedefType) { ObjCSelRedefinitionType = RedefType; } /// Retrieve the identifier 'NSObject'. IdentifierInfo *getNSObjectName() const { if (!NSObjectName) { NSObjectName = &Idents.get("NSObject"); } return NSObjectName; } /// Retrieve the identifier 'NSCopying'. IdentifierInfo *getNSCopyingName() { if (!NSCopyingName) { NSCopyingName = &Idents.get("NSCopying"); } return NSCopyingName; } CanQualType getNSUIntegerType() const; CanQualType getNSIntegerType() const; /// Retrieve the identifier 'bool'. IdentifierInfo *getBoolName() const { if (!BoolName) BoolName = &Idents.get("bool"); return BoolName; } IdentifierInfo *getMakeIntegerSeqName() const { if (!MakeIntegerSeqName) MakeIntegerSeqName = &Idents.get("__make_integer_seq"); return MakeIntegerSeqName; } IdentifierInfo *getTypePackElementName() const { if (!TypePackElementName) TypePackElementName = &Idents.get("__type_pack_element"); return TypePackElementName; } /// Retrieve the Objective-C "instancetype" type, if already known; /// otherwise, returns a NULL type; QualType getObjCInstanceType() { return getTypeDeclType(getObjCInstanceTypeDecl()); } /// Retrieve the typedef declaration corresponding to the Objective-C /// "instancetype" type. TypedefDecl *getObjCInstanceTypeDecl(); /// Set the type for the C FILE type. void setFILEDecl(TypeDecl *FILEDecl) { this->FILEDecl = FILEDecl; } /// Retrieve the C FILE type. QualType getFILEType() const { if (FILEDecl) return getTypeDeclType(FILEDecl); return QualType(); } /// Set the type for the C jmp_buf type. void setjmp_bufDecl(TypeDecl *jmp_bufDecl) { this->jmp_bufDecl = jmp_bufDecl; } /// Retrieve the C jmp_buf type. QualType getjmp_bufType() const { if (jmp_bufDecl) return getTypeDeclType(jmp_bufDecl); return QualType(); } /// Set the type for the C sigjmp_buf type. void setsigjmp_bufDecl(TypeDecl *sigjmp_bufDecl) { this->sigjmp_bufDecl = sigjmp_bufDecl; } /// Retrieve the C sigjmp_buf type. QualType getsigjmp_bufType() const { if (sigjmp_bufDecl) return getTypeDeclType(sigjmp_bufDecl); return QualType(); } /// Set the type for the C ucontext_t type. void setucontext_tDecl(TypeDecl *ucontext_tDecl) { this->ucontext_tDecl = ucontext_tDecl; } /// Retrieve the C ucontext_t type. QualType getucontext_tType() const { if (ucontext_tDecl) return getTypeDeclType(ucontext_tDecl); return QualType(); } /// The result type of logical operations, '<', '>', '!=', etc. QualType getLogicalOperationType() const { return getLangOpts().CPlusPlus ? BoolTy : IntTy; } /// Emit the Objective-CC type encoding for the given type \p T into /// \p S. /// /// If \p Field is specified then record field names are also encoded. void getObjCEncodingForType(QualType T, std::string &S, const FieldDecl *Field=nullptr, QualType *NotEncodedT=nullptr) const; /// Emit the Objective-C property type encoding for the given /// type \p T into \p S. void getObjCEncodingForPropertyType(QualType T, std::string &S) const; void getLegacyIntegralTypeEncoding(QualType &t) const; /// Put the string version of the type qualifiers \p QT into \p S. void getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT, std::string &S) const; /// Emit the encoded type for the function \p Decl into \p S. /// /// This is in the same format as Objective-C method encodings. /// /// \returns true if an error occurred (e.g., because one of the parameter /// types is incomplete), false otherwise. std::string getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const; /// Emit the encoded type for the method declaration \p Decl into /// \p S. std::string getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl, bool Extended = false) const; /// Return the encoded type for this block declaration. std::string getObjCEncodingForBlock(const BlockExpr *blockExpr) const; /// getObjCEncodingForPropertyDecl - Return the encoded type for /// this method declaration. If non-NULL, Container must be either /// an ObjCCategoryImplDecl or ObjCImplementationDecl; it should /// only be NULL when getting encodings for protocol properties. std::string getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD, const Decl *Container) const; bool ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto, ObjCProtocolDecl *rProto) const; ObjCPropertyImplDecl *getObjCPropertyImplDeclForPropertyDecl( const ObjCPropertyDecl *PD, const Decl *Container) const; /// Return the size of type \p T for Objective-C encoding purpose, /// in characters. CharUnits getObjCEncodingTypeSize(QualType T) const; /// Retrieve the typedef corresponding to the predefined \c id type /// in Objective-C. TypedefDecl *getObjCIdDecl() const; /// Represents the Objective-CC \c id type. /// /// This is set up lazily, by Sema. \c id is always a (typedef for a) /// pointer type, a pointer to a struct. QualType getObjCIdType() const { return getTypeDeclType(getObjCIdDecl()); } /// Retrieve the typedef corresponding to the predefined 'SEL' type /// in Objective-C. TypedefDecl *getObjCSelDecl() const; /// Retrieve the type that corresponds to the predefined Objective-C /// 'SEL' type. QualType getObjCSelType() const { return getTypeDeclType(getObjCSelDecl()); } /// Retrieve the typedef declaration corresponding to the predefined /// Objective-C 'Class' type. TypedefDecl *getObjCClassDecl() const; /// Represents the Objective-C \c Class type. /// /// This is set up lazily, by Sema. \c Class is always a (typedef for a) /// pointer type, a pointer to a struct. QualType getObjCClassType() const { return getTypeDeclType(getObjCClassDecl()); } /// Retrieve the Objective-C class declaration corresponding to /// the predefined \c Protocol class. ObjCInterfaceDecl *getObjCProtocolDecl() const; /// Retrieve declaration of 'BOOL' typedef TypedefDecl *getBOOLDecl() const { return BOOLDecl; } /// Save declaration of 'BOOL' typedef void setBOOLDecl(TypedefDecl *TD) { BOOLDecl = TD; } /// type of 'BOOL' type. QualType getBOOLType() const { return getTypeDeclType(getBOOLDecl()); } /// Retrieve the type of the Objective-C \c Protocol class. QualType getObjCProtoType() const { return getObjCInterfaceType(getObjCProtocolDecl()); } /// Retrieve the C type declaration corresponding to the predefined /// \c __builtin_va_list type. TypedefDecl *getBuiltinVaListDecl() const; /// Retrieve the type of the \c __builtin_va_list type. QualType getBuiltinVaListType() const { return getTypeDeclType(getBuiltinVaListDecl()); } /// Retrieve the C type declaration corresponding to the predefined /// \c __va_list_tag type used to help define the \c __builtin_va_list type /// for some targets. Decl *getVaListTagDecl() const; /// Retrieve the C type declaration corresponding to the predefined /// \c __builtin_ms_va_list type. TypedefDecl *getBuiltinMSVaListDecl() const; /// Retrieve the type of the \c __builtin_ms_va_list type. QualType getBuiltinMSVaListType() const { return getTypeDeclType(getBuiltinMSVaListDecl()); } /// Retrieve the implicitly-predeclared 'struct _GUID' declaration. TagDecl *getMSGuidTagDecl() const { return MSGuidTagDecl; } /// Retrieve the implicitly-predeclared 'struct _GUID' type. QualType getMSGuidType() const { assert(MSGuidTagDecl && "asked for GUID type but MS extensions disabled"); return getTagDeclType(MSGuidTagDecl); } /// Return whether a declaration to a builtin is allowed to be /// overloaded/redeclared. bool canBuiltinBeRedeclared(const FunctionDecl *) const; /// Return a type with additional \c const, \c volatile, or /// \c restrict qualifiers. QualType getCVRQualifiedType(QualType T, unsigned CVR) const { return getQualifiedType(T, Qualifiers::fromCVRMask(CVR)); } /// Un-split a SplitQualType. QualType getQualifiedType(SplitQualType split) const { return getQualifiedType(split.Ty, split.Quals); } /// Return a type with additional qualifiers. QualType getQualifiedType(QualType T, Qualifiers Qs) const { if (!Qs.hasNonFastQualifiers()) return T.withFastQualifiers(Qs.getFastQualifiers()); QualifierCollector Qc(Qs); const Type *Ptr = Qc.strip(T); return getExtQualType(Ptr, Qc); } /// Return a type with additional qualifiers. QualType getQualifiedType(const Type *T, Qualifiers Qs) const { if (!Qs.hasNonFastQualifiers()) return QualType(T, Qs.getFastQualifiers()); return getExtQualType(T, Qs); } /// Return a type with the given lifetime qualifier. /// /// \pre Neither type.ObjCLifetime() nor \p lifetime may be \c OCL_None. QualType getLifetimeQualifiedType(QualType type, Qualifiers::ObjCLifetime lifetime) { assert(type.getObjCLifetime() == Qualifiers::OCL_None); assert(lifetime != Qualifiers::OCL_None); Qualifiers qs; qs.addObjCLifetime(lifetime); return getQualifiedType(type, qs); } /// getUnqualifiedObjCPointerType - Returns version of /// Objective-C pointer type with lifetime qualifier removed. QualType getUnqualifiedObjCPointerType(QualType type) const { if (!type.getTypePtr()->isObjCObjectPointerType() || !type.getQualifiers().hasObjCLifetime()) return type; Qualifiers Qs = type.getQualifiers(); Qs.removeObjCLifetime(); return getQualifiedType(type.getUnqualifiedType(), Qs); } unsigned char getFixedPointScale(QualType Ty) const; unsigned char getFixedPointIBits(QualType Ty) const; FixedPointSemantics getFixedPointSemantics(QualType Ty) const; APFixedPoint getFixedPointMax(QualType Ty) const; APFixedPoint getFixedPointMin(QualType Ty) const; DeclarationNameInfo getNameForTemplate(TemplateName Name, SourceLocation NameLoc) const; TemplateName getOverloadedTemplateName(UnresolvedSetIterator Begin, UnresolvedSetIterator End) const; TemplateName getAssumedTemplateName(DeclarationName Name) const; TemplateName getQualifiedTemplateName(NestedNameSpecifier *NNS, bool TemplateKeyword, TemplateDecl *Template) const; TemplateName getDependentTemplateName(NestedNameSpecifier *NNS, const IdentifierInfo *Name) const; TemplateName getDependentTemplateName(NestedNameSpecifier *NNS, OverloadedOperatorKind Operator) const; TemplateName getSubstTemplateTemplateParm(TemplateTemplateParmDecl *param, TemplateName replacement) const; TemplateName getSubstTemplateTemplateParmPack(TemplateTemplateParmDecl *Param, const TemplateArgument &ArgPack) const; enum GetBuiltinTypeError { /// No error GE_None, /// Missing a type GE_Missing_type, /// Missing a type from GE_Missing_stdio, /// Missing a type from GE_Missing_setjmp, /// Missing a type from GE_Missing_ucontext }; /// Return the type for the specified builtin. /// /// If \p IntegerConstantArgs is non-null, it is filled in with a bitmask of /// arguments to the builtin that are required to be integer constant /// expressions. QualType GetBuiltinType(unsigned ID, GetBuiltinTypeError &Error, unsigned *IntegerConstantArgs = nullptr) const; /// Types and expressions required to build C++2a three-way comparisons /// using operator<=>, including the values return by builtin <=> operators. ComparisonCategories CompCategories; private: CanQualType getFromTargetType(unsigned Type) const; TypeInfo getTypeInfoImpl(const Type *T) const; //===--------------------------------------------------------------------===// // Type Predicates. //===--------------------------------------------------------------------===// public: /// Return one of the GCNone, Weak or Strong Objective-C garbage /// collection attributes. Qualifiers::GC getObjCGCAttrKind(QualType Ty) const; /// Return true if the given vector types are of the same unqualified /// type or if they are equivalent to the same GCC vector type. /// /// \note This ignores whether they are target-specific (AltiVec or Neon) /// types. bool areCompatibleVectorTypes(QualType FirstVec, QualType SecondVec); /// Return true if the type has been explicitly qualified with ObjC ownership. /// A type may be implicitly qualified with ownership under ObjC ARC, and in /// some cases the compiler treats these differently. bool hasDirectOwnershipQualifier(QualType Ty) const; /// Return true if this is an \c NSObject object with its \c NSObject /// attribute set. static bool isObjCNSObjectType(QualType Ty) { return Ty->isObjCNSObjectType(); } //===--------------------------------------------------------------------===// // Type Sizing and Analysis //===--------------------------------------------------------------------===// /// Return the APFloat 'semantics' for the specified scalar floating /// point type. const llvm::fltSemantics &getFloatTypeSemantics(QualType T) const; /// Get the size and alignment of the specified complete type in bits. TypeInfo getTypeInfo(const Type *T) const; TypeInfo getTypeInfo(QualType T) const { return getTypeInfo(T.getTypePtr()); } /// Get default simd alignment of the specified complete type in bits. unsigned getOpenMPDefaultSimdAlign(QualType T) const; /// Return the size of the specified (complete) type \p T, in bits. uint64_t getTypeSize(QualType T) const { return getTypeInfo(T).Width; } uint64_t getTypeSize(const Type *T) const { return getTypeInfo(T).Width; } /// Return the size of the character type, in bits. uint64_t getCharWidth() const { return getTypeSize(CharTy); } /// Convert a size in bits to a size in characters. CharUnits toCharUnitsFromBits(int64_t BitSize) const; /// Convert a size in characters to a size in bits. int64_t toBits(CharUnits CharSize) const; /// Return the size of the specified (complete) type \p T, in /// characters. CharUnits getTypeSizeInChars(QualType T) const; CharUnits getTypeSizeInChars(const Type *T) const; Optional getTypeSizeInCharsIfKnown(QualType Ty) const { if (Ty->isIncompleteType() || Ty->isDependentType()) return None; return getTypeSizeInChars(Ty); } Optional getTypeSizeInCharsIfKnown(const Type *Ty) const { return getTypeSizeInCharsIfKnown(QualType(Ty, 0)); } /// Return the ABI-specified alignment of a (complete) type \p T, in /// bits. unsigned getTypeAlign(QualType T) const { return getTypeInfo(T).Align; } unsigned getTypeAlign(const Type *T) const { return getTypeInfo(T).Align; } /// Return the ABI-specified natural alignment of a (complete) type \p T, /// before alignment adjustments, in bits. /// /// This alignment is curently used only by ARM and AArch64 when passing /// arguments of a composite type. unsigned getTypeUnadjustedAlign(QualType T) const { return getTypeUnadjustedAlign(T.getTypePtr()); } unsigned getTypeUnadjustedAlign(const Type *T) const; /// Return the ABI-specified alignment of a type, in bits, or 0 if /// the type is incomplete and we cannot determine the alignment (for /// example, from alignment attributes). unsigned getTypeAlignIfKnown(QualType T) const; /// Return the ABI-specified alignment of a (complete) type \p T, in /// characters. CharUnits getTypeAlignInChars(QualType T) const; CharUnits getTypeAlignInChars(const Type *T) const; /// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a type, /// in characters, before alignment adjustments. This method does not work on /// incomplete types. CharUnits getTypeUnadjustedAlignInChars(QualType T) const; CharUnits getTypeUnadjustedAlignInChars(const Type *T) const; // getTypeInfoDataSizeInChars - Return the size of a type, in chars. If the // type is a record, its data size is returned. std::pair getTypeInfoDataSizeInChars(QualType T) const; std::pair getTypeInfoInChars(const Type *T) const; std::pair getTypeInfoInChars(QualType T) const; /// Determine if the alignment the type has was required using an /// alignment attribute. bool isAlignmentRequired(const Type *T) const; bool isAlignmentRequired(QualType T) const; /// Return the "preferred" alignment of the specified type \p T for /// the current target, in bits. /// /// This can be different than the ABI alignment in cases where it is /// beneficial for performance to overalign a data type. unsigned getPreferredTypeAlign(const Type *T) const; /// Return the default alignment for __attribute__((aligned)) on /// this target, to be used if no alignment value is specified. unsigned getTargetDefaultAlignForAttributeAligned() const; /// Return the alignment in bits that should be given to a /// global variable with type \p T. unsigned getAlignOfGlobalVar(QualType T) const; /// Return the alignment in characters that should be given to a /// global variable with type \p T. CharUnits getAlignOfGlobalVarInChars(QualType T) const; /// Return a conservative estimate of the alignment of the specified /// decl \p D. /// /// \pre \p D must not be a bitfield type, as bitfields do not have a valid /// alignment. /// /// If \p ForAlignof, references are treated like their underlying type /// and large arrays don't get any special treatment. If not \p ForAlignof /// it computes the value expected by CodeGen: references are treated like /// pointers and large arrays get extra alignment. CharUnits getDeclAlign(const Decl *D, bool ForAlignof = false) const; /// Return the alignment (in bytes) of the thrown exception object. This is /// only meaningful for targets that allocate C++ exceptions in a system /// runtime, such as those using the Itanium C++ ABI. CharUnits getExnObjectAlignment() const; /// Get or compute information about the layout of the specified /// record (struct/union/class) \p D, which indicates its size and field /// position information. const ASTRecordLayout &getASTRecordLayout(const RecordDecl *D) const; /// Get or compute information about the layout of the specified /// Objective-C interface. const ASTRecordLayout &getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) const; void DumpRecordLayout(const RecordDecl *RD, raw_ostream &OS, bool Simple = false) const; /// Get or compute information about the layout of the specified /// Objective-C implementation. /// /// This may differ from the interface if synthesized ivars are present. const ASTRecordLayout & getASTObjCImplementationLayout(const ObjCImplementationDecl *D) const; /// Get our current best idea for the key function of the /// given record decl, or nullptr if there isn't one. /// /// The key function is, according to the Itanium C++ ABI section 5.2.3: /// ...the first non-pure virtual function that is not inline at the /// point of class definition. /// /// Other ABIs use the same idea. However, the ARM C++ ABI ignores /// virtual functions that are defined 'inline', which means that /// the result of this computation can change. const CXXMethodDecl *getCurrentKeyFunction(const CXXRecordDecl *RD); /// Observe that the given method cannot be a key function. /// Checks the key-function cache for the method's class and clears it /// if matches the given declaration. /// /// This is used in ABIs where out-of-line definitions marked /// inline are not considered to be key functions. /// /// \param method should be the declaration from the class definition void setNonKeyFunction(const CXXMethodDecl *method); /// Loading virtual member pointers using the virtual inheritance model /// always results in an adjustment using the vbtable even if the index is /// zero. /// /// This is usually OK because the first slot in the vbtable points /// backwards to the top of the MDC. However, the MDC might be reusing a /// vbptr from an nv-base. In this case, the first slot in the vbtable /// points to the start of the nv-base which introduced the vbptr and *not* /// the MDC. Modify the NonVirtualBaseAdjustment to account for this. CharUnits getOffsetOfBaseWithVBPtr(const CXXRecordDecl *RD) const; /// Get the offset of a FieldDecl or IndirectFieldDecl, in bits. uint64_t getFieldOffset(const ValueDecl *FD) const; /// Get the offset of an ObjCIvarDecl in bits. uint64_t lookupFieldBitOffset(const ObjCInterfaceDecl *OID, const ObjCImplementationDecl *ID, const ObjCIvarDecl *Ivar) const; bool isNearlyEmpty(const CXXRecordDecl *RD) const; VTableContextBase *getVTableContext(); /// If \p T is null pointer, assume the target in ASTContext. MangleContext *createMangleContext(const TargetInfo *T = nullptr); void DeepCollectObjCIvars(const ObjCInterfaceDecl *OI, bool leafClass, SmallVectorImpl &Ivars) const; unsigned CountNonClassIvars(const ObjCInterfaceDecl *OI) const; void CollectInheritedProtocols(const Decl *CDecl, llvm::SmallPtrSet &Protocols); /// Return true if the specified type has unique object representations /// according to (C++17 [meta.unary.prop]p9) bool hasUniqueObjectRepresentations(QualType Ty) const; //===--------------------------------------------------------------------===// // Type Operators //===--------------------------------------------------------------------===// /// Return the canonical (structural) type corresponding to the /// specified potentially non-canonical type \p T. /// /// The non-canonical version of a type may have many "decorated" versions of /// types. Decorators can include typedefs, 'typeof' operators, etc. The /// returned type is guaranteed to be free of any of these, allowing two /// canonical types to be compared for exact equality with a simple pointer /// comparison. CanQualType getCanonicalType(QualType T) const { return CanQualType::CreateUnsafe(T.getCanonicalType()); } const Type *getCanonicalType(const Type *T) const { return T->getCanonicalTypeInternal().getTypePtr(); } /// Return the canonical parameter type corresponding to the specific /// potentially non-canonical one. /// /// Qualifiers are stripped off, functions are turned into function /// pointers, and arrays decay one level into pointers. CanQualType getCanonicalParamType(QualType T) const; /// Determine whether the given types \p T1 and \p T2 are equivalent. bool hasSameType(QualType T1, QualType T2) const { return getCanonicalType(T1) == getCanonicalType(T2); } bool hasSameType(const Type *T1, const Type *T2) const { return getCanonicalType(T1) == getCanonicalType(T2); } /// Return this type as a completely-unqualified array type, /// capturing the qualifiers in \p Quals. /// /// This will remove the minimal amount of sugaring from the types, similar /// to the behavior of QualType::getUnqualifiedType(). /// /// \param T is the qualified type, which may be an ArrayType /// /// \param Quals will receive the full set of qualifiers that were /// applied to the array. /// /// \returns if this is an array type, the completely unqualified array type /// that corresponds to it. Otherwise, returns T.getUnqualifiedType(). QualType getUnqualifiedArrayType(QualType T, Qualifiers &Quals); /// Determine whether the given types are equivalent after /// cvr-qualifiers have been removed. bool hasSameUnqualifiedType(QualType T1, QualType T2) const { return getCanonicalType(T1).getTypePtr() == getCanonicalType(T2).getTypePtr(); } bool hasSameNullabilityTypeQualifier(QualType SubT, QualType SuperT, bool IsParam) const { auto SubTnullability = SubT->getNullability(*this); auto SuperTnullability = SuperT->getNullability(*this); if (SubTnullability.hasValue() == SuperTnullability.hasValue()) { // Neither has nullability; return true if (!SubTnullability) return true; // Both have nullability qualifier. if (*SubTnullability == *SuperTnullability || *SubTnullability == NullabilityKind::Unspecified || *SuperTnullability == NullabilityKind::Unspecified) return true; if (IsParam) { // Ok for the superclass method parameter to be "nonnull" and the subclass // method parameter to be "nullable" return (*SuperTnullability == NullabilityKind::NonNull && *SubTnullability == NullabilityKind::Nullable); } else { // For the return type, it's okay for the superclass method to specify // "nullable" and the subclass method specify "nonnull" return (*SuperTnullability == NullabilityKind::Nullable && *SubTnullability == NullabilityKind::NonNull); } } return true; } bool ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl, const ObjCMethodDecl *MethodImp); bool UnwrapSimilarTypes(QualType &T1, QualType &T2); bool UnwrapSimilarArrayTypes(QualType &T1, QualType &T2); /// Determine if two types are similar, according to the C++ rules. That is, /// determine if they are the same other than qualifiers on the initial /// sequence of pointer / pointer-to-member / array (and in Clang, object /// pointer) types and their element types. /// /// Clang offers a number of qualifiers in addition to the C++ qualifiers; /// those qualifiers are also ignored in the 'similarity' check. bool hasSimilarType(QualType T1, QualType T2); /// Determine if two types are similar, ignoring only CVR qualifiers. bool hasCvrSimilarType(QualType T1, QualType T2); /// Retrieves the "canonical" nested name specifier for a /// given nested name specifier. /// /// The canonical nested name specifier is a nested name specifier /// that uniquely identifies a type or namespace within the type /// system. For example, given: /// /// \code /// namespace N { /// struct S { /// template struct X { typename T* type; }; /// }; /// } /// /// template struct Y { /// typename N::S::X::type member; /// }; /// \endcode /// /// Here, the nested-name-specifier for N::S::X:: will be /// S::X, since 'S' and 'X' are uniquely defined /// by declarations in the type system and the canonical type for /// the template type parameter 'T' is template-param-0-0. NestedNameSpecifier * getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const; /// Retrieves the default calling convention for the current target. CallingConv getDefaultCallingConvention(bool IsVariadic, bool IsCXXMethod, bool IsBuiltin = false) const; /// Retrieves the "canonical" template name that refers to a /// given template. /// /// The canonical template name is the simplest expression that can /// be used to refer to a given template. For most templates, this /// expression is just the template declaration itself. For example, /// the template std::vector can be referred to via a variety of /// names---std::vector, \::std::vector, vector (if vector is in /// scope), etc.---but all of these names map down to the same /// TemplateDecl, which is used to form the canonical template name. /// /// Dependent template names are more interesting. Here, the /// template name could be something like T::template apply or /// std::allocator::template rebind, where the nested name /// specifier itself is dependent. In this case, the canonical /// template name uses the shortest form of the dependent /// nested-name-specifier, which itself contains all canonical /// types, values, and templates. TemplateName getCanonicalTemplateName(TemplateName Name) const; /// Determine whether the given template names refer to the same /// template. bool hasSameTemplateName(TemplateName X, TemplateName Y); /// Retrieve the "canonical" template argument. /// /// The canonical template argument is the simplest template argument /// (which may be a type, value, expression, or declaration) that /// expresses the value of the argument. TemplateArgument getCanonicalTemplateArgument(const TemplateArgument &Arg) const; /// Type Query functions. If the type is an instance of the specified class, /// return the Type pointer for the underlying maximally pretty type. This /// is a member of ASTContext because this may need to do some amount of /// canonicalization, e.g. to move type qualifiers into the element type. const ArrayType *getAsArrayType(QualType T) const; const ConstantArrayType *getAsConstantArrayType(QualType T) const { return dyn_cast_or_null(getAsArrayType(T)); } const VariableArrayType *getAsVariableArrayType(QualType T) const { return dyn_cast_or_null(getAsArrayType(T)); } const IncompleteArrayType *getAsIncompleteArrayType(QualType T) const { return dyn_cast_or_null(getAsArrayType(T)); } const DependentSizedArrayType *getAsDependentSizedArrayType(QualType T) const { return dyn_cast_or_null(getAsArrayType(T)); } /// Return the innermost element type of an array type. /// /// For example, will return "int" for int[m][n] QualType getBaseElementType(const ArrayType *VAT) const; /// Return the innermost element type of a type (which needn't /// actually be an array type). QualType getBaseElementType(QualType QT) const; /// Return number of constant array elements. uint64_t getConstantArrayElementCount(const ConstantArrayType *CA) const; /// Perform adjustment on the parameter type of a function. /// /// This routine adjusts the given parameter type @p T to the actual /// parameter type used by semantic analysis (C99 6.7.5.3p[7,8], /// C++ [dcl.fct]p3). The adjusted parameter type is returned. QualType getAdjustedParameterType(QualType T) const; /// Retrieve the parameter type as adjusted for use in the signature /// of a function, decaying array and function types and removing top-level /// cv-qualifiers. QualType getSignatureParameterType(QualType T) const; QualType getExceptionObjectType(QualType T) const; /// Return the properly qualified result of decaying the specified /// array type to a pointer. /// /// This operation is non-trivial when handling typedefs etc. The canonical /// type of \p T must be an array type, this returns a pointer to a properly /// qualified element of the array. /// /// See C99 6.7.5.3p7 and C99 6.3.2.1p3. QualType getArrayDecayedType(QualType T) const; /// Return the type that \p PromotableType will promote to: C99 /// 6.3.1.1p2, assuming that \p PromotableType is a promotable integer type. QualType getPromotedIntegerType(QualType PromotableType) const; /// Recurses in pointer/array types until it finds an Objective-C /// retainable type and returns its ownership. Qualifiers::ObjCLifetime getInnerObjCOwnership(QualType T) const; /// Whether this is a promotable bitfield reference according /// to C99 6.3.1.1p2, bullet 2 (and GCC extensions). /// /// \returns the type this bit-field will promote to, or NULL if no /// promotion occurs. QualType isPromotableBitField(Expr *E) const; /// Return the highest ranked integer type, see C99 6.3.1.8p1. /// /// If \p LHS > \p RHS, returns 1. If \p LHS == \p RHS, returns 0. If /// \p LHS < \p RHS, return -1. int getIntegerTypeOrder(QualType LHS, QualType RHS) const; /// Compare the rank of the two specified floating point types, /// ignoring the domain of the type (i.e. 'double' == '_Complex double'). /// /// If \p LHS > \p RHS, returns 1. If \p LHS == \p RHS, returns 0. If /// \p LHS < \p RHS, return -1. int getFloatingTypeOrder(QualType LHS, QualType RHS) const; /// Compare the rank of two floating point types as above, but compare equal /// if both types have the same floating-point semantics on the target (i.e. /// long double and double on AArch64 will return 0). int getFloatingTypeSemanticOrder(QualType LHS, QualType RHS) const; /// Return a real floating point or a complex type (based on /// \p typeDomain/\p typeSize). /// /// \param typeDomain a real floating point or complex type. /// \param typeSize a real floating point or complex type. QualType getFloatingTypeOfSizeWithinDomain(QualType typeSize, QualType typeDomain) const; unsigned getTargetAddressSpace(QualType T) const { return getTargetAddressSpace(T.getQualifiers()); } unsigned getTargetAddressSpace(Qualifiers Q) const { return getTargetAddressSpace(Q.getAddressSpace()); } unsigned getTargetAddressSpace(LangAS AS) const; LangAS getLangASForBuiltinAddressSpace(unsigned AS) const; /// Get target-dependent integer value for null pointer which is used for /// constant folding. uint64_t getTargetNullPointerValue(QualType QT) const; bool addressSpaceMapManglingFor(LangAS AS) const { return AddrSpaceMapMangling || isTargetAddressSpace(AS); } private: // Helper for integer ordering unsigned getIntegerRank(const Type *T) const; public: //===--------------------------------------------------------------------===// // Type Compatibility Predicates //===--------------------------------------------------------------------===// /// Compatibility predicates used to check assignment expressions. bool typesAreCompatible(QualType T1, QualType T2, bool CompareUnqualified = false); // C99 6.2.7p1 bool propertyTypesAreCompatible(QualType, QualType); bool typesAreBlockPointerCompatible(QualType, QualType); bool isObjCIdType(QualType T) const { return T == getObjCIdType(); } bool isObjCClassType(QualType T) const { return T == getObjCClassType(); } bool isObjCSelType(QualType T) const { return T == getObjCSelType(); } bool ObjCQualifiedIdTypesAreCompatible(const ObjCObjectPointerType *LHS, const ObjCObjectPointerType *RHS, bool ForCompare); bool ObjCQualifiedClassTypesAreCompatible(const ObjCObjectPointerType *LHS, const ObjCObjectPointerType *RHS); // Check the safety of assignment from LHS to RHS bool canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT, const ObjCObjectPointerType *RHSOPT); bool canAssignObjCInterfaces(const ObjCObjectType *LHS, const ObjCObjectType *RHS); bool canAssignObjCInterfacesInBlockPointer( const ObjCObjectPointerType *LHSOPT, const ObjCObjectPointerType *RHSOPT, bool BlockReturnType); bool areComparableObjCPointerTypes(QualType LHS, QualType RHS); QualType areCommonBaseCompatible(const ObjCObjectPointerType *LHSOPT, const ObjCObjectPointerType *RHSOPT); bool canBindObjCObjectType(QualType To, QualType From); // Functions for calculating composite types QualType mergeTypes(QualType, QualType, bool OfBlockPointer=false, bool Unqualified = false, bool BlockReturnType = false); QualType mergeFunctionTypes(QualType, QualType, bool OfBlockPointer=false, bool Unqualified = false, bool AllowCXX = false); QualType mergeFunctionParameterTypes(QualType, QualType, bool OfBlockPointer = false, bool Unqualified = false); QualType mergeTransparentUnionType(QualType, QualType, bool OfBlockPointer=false, bool Unqualified = false); QualType mergeObjCGCQualifiers(QualType, QualType); /// This function merges the ExtParameterInfo lists of two functions. It /// returns true if the lists are compatible. The merged list is returned in /// NewParamInfos. /// /// \param FirstFnType The type of the first function. /// /// \param SecondFnType The type of the second function. /// /// \param CanUseFirst This flag is set to true if the first function's /// ExtParameterInfo list can be used as the composite list of /// ExtParameterInfo. /// /// \param CanUseSecond This flag is set to true if the second function's /// ExtParameterInfo list can be used as the composite list of /// ExtParameterInfo. /// /// \param NewParamInfos The composite list of ExtParameterInfo. The list is /// empty if none of the flags are set. /// bool mergeExtParameterInfo( const FunctionProtoType *FirstFnType, const FunctionProtoType *SecondFnType, bool &CanUseFirst, bool &CanUseSecond, SmallVectorImpl &NewParamInfos); void ResetObjCLayout(const ObjCContainerDecl *CD); //===--------------------------------------------------------------------===// // Integer Predicates //===--------------------------------------------------------------------===// // The width of an integer, as defined in C99 6.2.6.2. This is the number // of bits in an integer type excluding any padding bits. unsigned getIntWidth(QualType T) const; // Per C99 6.2.5p6, for every signed integer type, there is a corresponding // unsigned integer type. This method takes a signed type, and returns the // corresponding unsigned integer type. // With the introduction of fixed point types in ISO N1169, this method also // accepts fixed point types and returns the corresponding unsigned type for // a given fixed point type. QualType getCorrespondingUnsignedType(QualType T) const; // Per ISO N1169, this method accepts fixed point types and returns the // corresponding saturated type for a given fixed point type. QualType getCorrespondingSaturatedType(QualType Ty) const; // This method accepts fixed point types and returns the corresponding signed // type. Unlike getCorrespondingUnsignedType(), this only accepts unsigned // fixed point types because there are unsigned integer types like bool and // char8_t that don't have signed equivalents. QualType getCorrespondingSignedFixedPointType(QualType Ty) const; //===--------------------------------------------------------------------===// // Integer Values //===--------------------------------------------------------------------===// /// Make an APSInt of the appropriate width and signedness for the /// given \p Value and integer \p Type. llvm::APSInt MakeIntValue(uint64_t Value, QualType Type) const { // If Type is a signed integer type larger than 64 bits, we need to be sure // to sign extend Res appropriately. llvm::APSInt Res(64, !Type->isSignedIntegerOrEnumerationType()); Res = Value; unsigned Width = getIntWidth(Type); if (Width != Res.getBitWidth()) return Res.extOrTrunc(Width); return Res; } bool isSentinelNullExpr(const Expr *E); /// Get the implementation of the ObjCInterfaceDecl \p D, or nullptr if /// none exists. ObjCImplementationDecl *getObjCImplementation(ObjCInterfaceDecl *D); /// Get the implementation of the ObjCCategoryDecl \p D, or nullptr if /// none exists. ObjCCategoryImplDecl *getObjCImplementation(ObjCCategoryDecl *D); /// Return true if there is at least one \@implementation in the TU. bool AnyObjCImplementation() { return !ObjCImpls.empty(); } /// Set the implementation of ObjCInterfaceDecl. void setObjCImplementation(ObjCInterfaceDecl *IFaceD, ObjCImplementationDecl *ImplD); /// Set the implementation of ObjCCategoryDecl. void setObjCImplementation(ObjCCategoryDecl *CatD, ObjCCategoryImplDecl *ImplD); /// Get the duplicate declaration of a ObjCMethod in the same /// interface, or null if none exists. const ObjCMethodDecl * getObjCMethodRedeclaration(const ObjCMethodDecl *MD) const; void setObjCMethodRedeclaration(const ObjCMethodDecl *MD, const ObjCMethodDecl *Redecl); /// Returns the Objective-C interface that \p ND belongs to if it is /// an Objective-C method/property/ivar etc. that is part of an interface, /// otherwise returns null. const ObjCInterfaceDecl *getObjContainingInterface(const NamedDecl *ND) const; /// Set the copy initialization expression of a block var decl. \p CanThrow /// indicates whether the copy expression can throw or not. void setBlockVarCopyInit(const VarDecl* VD, Expr *CopyExpr, bool CanThrow); /// Get the copy initialization expression of the VarDecl \p VD, or /// nullptr if none exists. BlockVarCopyInit getBlockVarCopyInit(const VarDecl* VD) const; /// Allocate an uninitialized TypeSourceInfo. /// /// The caller should initialize the memory held by TypeSourceInfo using /// the TypeLoc wrappers. /// /// \param T the type that will be the basis for type source info. This type /// should refer to how the declarator was written in source code, not to /// what type semantic analysis resolved the declarator to. /// /// \param Size the size of the type info to create, or 0 if the size /// should be calculated based on the type. TypeSourceInfo *CreateTypeSourceInfo(QualType T, unsigned Size = 0) const; /// Allocate a TypeSourceInfo where all locations have been /// initialized to a given location, which defaults to the empty /// location. TypeSourceInfo * getTrivialTypeSourceInfo(QualType T, SourceLocation Loc = SourceLocation()) const; /// Add a deallocation callback that will be invoked when the /// ASTContext is destroyed. /// /// \param Callback A callback function that will be invoked on destruction. /// /// \param Data Pointer data that will be provided to the callback function /// when it is called. void AddDeallocation(void (*Callback)(void *), void *Data) const; /// If T isn't trivially destructible, calls AddDeallocation to register it /// for destruction. template void addDestruction(T *Ptr) const { if (!std::is_trivially_destructible::value) { auto DestroyPtr = [](void *V) { static_cast(V)->~T(); }; AddDeallocation(DestroyPtr, Ptr); } } GVALinkage GetGVALinkageForFunction(const FunctionDecl *FD) const; GVALinkage GetGVALinkageForVariable(const VarDecl *VD); /// Determines if the decl can be CodeGen'ed or deserialized from PCH /// lazily, only when used; this is only relevant for function or file scoped /// var definitions. /// /// \returns true if the function/var must be CodeGen'ed/deserialized even if /// it is not used. bool DeclMustBeEmitted(const Decl *D); /// Visits all versions of a multiversioned function with the passed /// predicate. void forEachMultiversionedFunctionVersion( const FunctionDecl *FD, llvm::function_ref Pred) const; const CXXConstructorDecl * getCopyConstructorForExceptionObject(CXXRecordDecl *RD); void addCopyConstructorForExceptionObject(CXXRecordDecl *RD, CXXConstructorDecl *CD); void addTypedefNameForUnnamedTagDecl(TagDecl *TD, TypedefNameDecl *TND); TypedefNameDecl *getTypedefNameForUnnamedTagDecl(const TagDecl *TD); void addDeclaratorForUnnamedTagDecl(TagDecl *TD, DeclaratorDecl *DD); DeclaratorDecl *getDeclaratorForUnnamedTagDecl(const TagDecl *TD); void setManglingNumber(const NamedDecl *ND, unsigned Number); unsigned getManglingNumber(const NamedDecl *ND) const; void setStaticLocalNumber(const VarDecl *VD, unsigned Number); unsigned getStaticLocalNumber(const VarDecl *VD) const; /// Retrieve the context for computing mangling numbers in the given /// DeclContext. MangleNumberingContext &getManglingNumberContext(const DeclContext *DC); enum NeedExtraManglingDecl_t { NeedExtraManglingDecl }; MangleNumberingContext &getManglingNumberContext(NeedExtraManglingDecl_t, const Decl *D); std::unique_ptr createMangleNumberingContext() const; /// Used by ParmVarDecl to store on the side the /// index of the parameter when it exceeds the size of the normal bitfield. void setParameterIndex(const ParmVarDecl *D, unsigned index); /// Used by ParmVarDecl to retrieve on the side the /// index of the parameter when it exceeds the size of the normal bitfield. unsigned getParameterIndex(const ParmVarDecl *D) const; /// Return a string representing the human readable name for the specified /// function declaration or file name. Used by SourceLocExpr and /// PredefinedExpr to cache evaluated results. StringLiteral *getPredefinedStringLiteralFromCache(StringRef Key) const; /// Return a declaration for the global GUID object representing the given /// GUID value. MSGuidDecl *getMSGuidDecl(MSGuidDeclParts Parts) const; /// Parses the target attributes passed in, and returns only the ones that are /// valid feature names. ParsedTargetAttr filterFunctionTargetAttrs(const TargetAttr *TD) const; void getFunctionFeatureMap(llvm::StringMap &FeatureMap, const FunctionDecl *) const; void getFunctionFeatureMap(llvm::StringMap &FeatureMap, GlobalDecl GD) const; //===--------------------------------------------------------------------===// // Statistics //===--------------------------------------------------------------------===// /// The number of implicitly-declared default constructors. unsigned NumImplicitDefaultConstructors = 0; /// The number of implicitly-declared default constructors for /// which declarations were built. unsigned NumImplicitDefaultConstructorsDeclared = 0; /// The number of implicitly-declared copy constructors. unsigned NumImplicitCopyConstructors = 0; /// The number of implicitly-declared copy constructors for /// which declarations were built. unsigned NumImplicitCopyConstructorsDeclared = 0; /// The number of implicitly-declared move constructors. unsigned NumImplicitMoveConstructors = 0; /// The number of implicitly-declared move constructors for /// which declarations were built. unsigned NumImplicitMoveConstructorsDeclared = 0; /// The number of implicitly-declared copy assignment operators. unsigned NumImplicitCopyAssignmentOperators = 0; /// The number of implicitly-declared copy assignment operators for /// which declarations were built. unsigned NumImplicitCopyAssignmentOperatorsDeclared = 0; /// The number of implicitly-declared move assignment operators. unsigned NumImplicitMoveAssignmentOperators = 0; /// The number of implicitly-declared move assignment operators for /// which declarations were built. unsigned NumImplicitMoveAssignmentOperatorsDeclared = 0; /// The number of implicitly-declared destructors. unsigned NumImplicitDestructors = 0; /// The number of implicitly-declared destructors for which /// declarations were built. unsigned NumImplicitDestructorsDeclared = 0; public: /// Initialize built-in types. /// /// This routine may only be invoked once for a given ASTContext object. /// It is normally invoked after ASTContext construction. /// /// \param Target The target void InitBuiltinTypes(const TargetInfo &Target, const TargetInfo *AuxTarget = nullptr); private: void InitBuiltinType(CanQualType &R, BuiltinType::Kind K); class ObjCEncOptions { unsigned Bits; ObjCEncOptions(unsigned Bits) : Bits(Bits) {} public: ObjCEncOptions() : Bits(0) {} ObjCEncOptions(const ObjCEncOptions &RHS) : Bits(RHS.Bits) {} #define OPT_LIST(V) \ V(ExpandPointedToStructures, 0) \ V(ExpandStructures, 1) \ V(IsOutermostType, 2) \ V(EncodingProperty, 3) \ V(IsStructField, 4) \ V(EncodeBlockParameters, 5) \ V(EncodeClassNames, 6) \ #define V(N,I) ObjCEncOptions& set##N() { Bits |= 1 << I; return *this; } OPT_LIST(V) #undef V #define V(N,I) bool N() const { return Bits & 1 << I; } OPT_LIST(V) #undef V #undef OPT_LIST LLVM_NODISCARD ObjCEncOptions keepingOnly(ObjCEncOptions Mask) const { return Bits & Mask.Bits; } LLVM_NODISCARD ObjCEncOptions forComponentType() const { ObjCEncOptions Mask = ObjCEncOptions() .setIsOutermostType() .setIsStructField(); return Bits & ~Mask.Bits; } }; // Return the Objective-C type encoding for a given type. void getObjCEncodingForTypeImpl(QualType t, std::string &S, ObjCEncOptions Options, const FieldDecl *Field, QualType *NotEncodedT = nullptr) const; // Adds the encoding of the structure's members. void getObjCEncodingForStructureImpl(RecordDecl *RD, std::string &S, const FieldDecl *Field, bool includeVBases = true, QualType *NotEncodedT=nullptr) const; public: // Adds the encoding of a method parameter or return type. void getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT, QualType T, std::string& S, bool Extended) const; /// Returns true if this is an inline-initialized static data member /// which is treated as a definition for MSVC compatibility. bool isMSStaticDataMemberInlineDefinition(const VarDecl *VD) const; enum class InlineVariableDefinitionKind { /// Not an inline variable. None, /// Weak definition of inline variable. Weak, /// Weak for now, might become strong later in this TU. WeakUnknown, /// Strong definition. Strong }; /// Determine whether a definition of this inline variable should /// be treated as a weak or strong definition. For compatibility with /// C++14 and before, for a constexpr static data member, if there is an /// out-of-line declaration of the member, we may promote it from weak to /// strong. InlineVariableDefinitionKind getInlineVariableDefinitionKind(const VarDecl *VD) const; private: friend class DeclarationNameTable; friend class DeclContext; const ASTRecordLayout & getObjCLayout(const ObjCInterfaceDecl *D, const ObjCImplementationDecl *Impl) const; /// A set of deallocations that should be performed when the /// ASTContext is destroyed. // FIXME: We really should have a better mechanism in the ASTContext to // manage running destructors for types which do variable sized allocation // within the AST. In some places we thread the AST bump pointer allocator // into the datastructures which avoids this mess during deallocation but is // wasteful of memory, and here we require a lot of error prone book keeping // in order to track and run destructors while we're tearing things down. using DeallocationFunctionsAndArguments = llvm::SmallVector, 16>; mutable DeallocationFunctionsAndArguments Deallocations; // FIXME: This currently contains the set of StoredDeclMaps used // by DeclContext objects. This probably should not be in ASTContext, // but we include it here so that ASTContext can quickly deallocate them. llvm::PointerIntPair LastSDM; std::vector TraversalScope; std::unique_ptr VTContext; void ReleaseDeclContextMaps(); public: enum PragmaSectionFlag : unsigned { PSF_None = 0, PSF_Read = 0x1, PSF_Write = 0x2, PSF_Execute = 0x4, PSF_Implicit = 0x8, PSF_ZeroInit = 0x10, PSF_Invalid = 0x80000000U, }; struct SectionInfo { DeclaratorDecl *Decl; SourceLocation PragmaSectionLocation; int SectionFlags; SectionInfo() = default; SectionInfo(DeclaratorDecl *Decl, SourceLocation PragmaSectionLocation, int SectionFlags) : Decl(Decl), PragmaSectionLocation(PragmaSectionLocation), SectionFlags(SectionFlags) {} }; llvm::StringMap SectionInfos; /// Return a new OMPTraitInfo object owned by this context. OMPTraitInfo &getNewOMPTraitInfo(); private: /// All OMPTraitInfo objects live in this collection, one per /// `pragma omp [begin] declare variant` directive. SmallVector, 4> OMPTraitInfoVector; }; /// Insertion operator for diagnostics. const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB, const ASTContext::SectionInfo &Section); /// Utility function for constructing a nullary selector. inline Selector GetNullarySelector(StringRef name, ASTContext &Ctx) { IdentifierInfo* II = &Ctx.Idents.get(name); return Ctx.Selectors.getSelector(0, &II); } /// Utility function for constructing an unary selector. inline Selector GetUnarySelector(StringRef name, ASTContext &Ctx) { IdentifierInfo* II = &Ctx.Idents.get(name); return Ctx.Selectors.getSelector(1, &II); } } // namespace clang // operator new and delete aren't allowed inside namespaces. /// Placement new for using the ASTContext's allocator. /// /// This placement form of operator new uses the ASTContext's allocator for /// obtaining memory. /// /// IMPORTANT: These are also declared in clang/AST/ASTContextAllocate.h! /// Any changes here need to also be made there. /// /// We intentionally avoid using a nothrow specification here so that the calls /// to this operator will not perform a null check on the result -- the /// underlying allocator never returns null pointers. /// /// Usage looks like this (assuming there's an ASTContext 'Context' in scope): /// @code /// // Default alignment (8) /// IntegerLiteral *Ex = new (Context) IntegerLiteral(arguments); /// // Specific alignment /// IntegerLiteral *Ex2 = new (Context, 4) IntegerLiteral(arguments); /// @endcode /// Memory allocated through this placement new operator does not need to be /// explicitly freed, as ASTContext will free all of this memory when it gets /// destroyed. Please note that you cannot use delete on the pointer. /// /// @param Bytes The number of bytes to allocate. Calculated by the compiler. /// @param C The ASTContext that provides the allocator. /// @param Alignment The alignment of the allocated memory (if the underlying /// allocator supports it). /// @return The allocated memory. Could be nullptr. inline void *operator new(size_t Bytes, const clang::ASTContext &C, size_t Alignment /* = 8 */) { return C.Allocate(Bytes, Alignment); } /// Placement delete companion to the new above. /// /// This operator is just a companion to the new above. There is no way of /// invoking it directly; see the new operator for more details. This operator /// is called implicitly by the compiler if a placement new expression using /// the ASTContext throws in the object constructor. inline void operator delete(void *Ptr, const clang::ASTContext &C, size_t) { C.Deallocate(Ptr); } /// This placement form of operator new[] uses the ASTContext's allocator for /// obtaining memory. /// /// We intentionally avoid using a nothrow specification here so that the calls /// to this operator will not perform a null check on the result -- the /// underlying allocator never returns null pointers. /// /// Usage looks like this (assuming there's an ASTContext 'Context' in scope): /// @code /// // Default alignment (8) /// char *data = new (Context) char[10]; /// // Specific alignment /// char *data = new (Context, 4) char[10]; /// @endcode /// Memory allocated through this placement new[] operator does not need to be /// explicitly freed, as ASTContext will free all of this memory when it gets /// destroyed. Please note that you cannot use delete on the pointer. /// /// @param Bytes The number of bytes to allocate. Calculated by the compiler. /// @param C The ASTContext that provides the allocator. /// @param Alignment The alignment of the allocated memory (if the underlying /// allocator supports it). /// @return The allocated memory. Could be nullptr. inline void *operator new[](size_t Bytes, const clang::ASTContext& C, size_t Alignment /* = 8 */) { return C.Allocate(Bytes, Alignment); } /// Placement delete[] companion to the new[] above. /// /// This operator is just a companion to the new[] above. There is no way of /// invoking it directly; see the new[] operator for more details. This operator /// is called implicitly by the compiler if a placement new[] expression using /// the ASTContext throws in the object constructor. inline void operator delete[](void *Ptr, const clang::ASTContext &C, size_t) { C.Deallocate(Ptr); } /// Create the representation of a LazyGenerationalUpdatePtr. template typename clang::LazyGenerationalUpdatePtr::ValueType clang::LazyGenerationalUpdatePtr::makeValue( const clang::ASTContext &Ctx, T Value) { // Note, this is implemented here so that ExternalASTSource.h doesn't need to // include ASTContext.h. We explicitly instantiate it for all relevant types // in ASTContext.cpp. if (auto *Source = Ctx.getExternalSource()) return new (Ctx) LazyData(Source, Value); return Value; } #endif // LLVM_CLANG_AST_ASTCONTEXT_H diff --git a/contrib/llvm-project/clang/include/clang/AST/Type.h b/contrib/llvm-project/clang/include/clang/AST/Type.h index 736f08651c84..74e4a578cb55 100644 --- a/contrib/llvm-project/clang/include/clang/AST/Type.h +++ b/contrib/llvm-project/clang/include/clang/AST/Type.h @@ -1,7258 +1,7255 @@ //===- Type.h - C Language Family Type Representation -----------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // /// \file /// C Language Family Type Representation /// /// This file defines the clang::Type interface and subclasses, used to /// represent types for languages in the C family. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_TYPE_H #define LLVM_CLANG_AST_TYPE_H #include "clang/AST/DependenceFlags.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/TemplateName.h" #include "clang/Basic/AddressSpaces.h" #include "clang/Basic/AttrKinds.h" #include "clang/Basic/Diagnostic.h" #include "clang/Basic/ExceptionSpecificationType.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/Linkage.h" #include "clang/Basic/PartialDiagnostic.h" #include "clang/Basic/SourceLocation.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/Visibility.h" #include "llvm/ADT/APInt.h" #include "llvm/ADT/APSInt.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/FoldingSet.h" #include "llvm/ADT/None.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/PointerUnion.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/Twine.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/PointerLikeTypeTraits.h" #include "llvm/Support/TrailingObjects.h" #include "llvm/Support/type_traits.h" #include #include #include #include #include #include #include namespace clang { class ExtQuals; class QualType; class ConceptDecl; class TagDecl; class Type; enum { TypeAlignmentInBits = 4, TypeAlignment = 1 << TypeAlignmentInBits }; namespace serialization { template class AbstractTypeReader; template class AbstractTypeWriter; } } // namespace clang namespace llvm { template struct PointerLikeTypeTraits; template<> struct PointerLikeTypeTraits< ::clang::Type*> { static inline void *getAsVoidPointer(::clang::Type *P) { return P; } static inline ::clang::Type *getFromVoidPointer(void *P) { return static_cast< ::clang::Type*>(P); } static constexpr int NumLowBitsAvailable = clang::TypeAlignmentInBits; }; template<> struct PointerLikeTypeTraits< ::clang::ExtQuals*> { static inline void *getAsVoidPointer(::clang::ExtQuals *P) { return P; } static inline ::clang::ExtQuals *getFromVoidPointer(void *P) { return static_cast< ::clang::ExtQuals*>(P); } static constexpr int NumLowBitsAvailable = clang::TypeAlignmentInBits; }; } // namespace llvm namespace clang { class ASTContext; template class CanQual; class CXXRecordDecl; class DeclContext; class EnumDecl; class Expr; class ExtQualsTypeCommonBase; class FunctionDecl; class IdentifierInfo; class NamedDecl; class ObjCInterfaceDecl; class ObjCProtocolDecl; class ObjCTypeParamDecl; struct PrintingPolicy; class RecordDecl; class Stmt; class TagDecl; class TemplateArgument; class TemplateArgumentListInfo; class TemplateArgumentLoc; class TemplateTypeParmDecl; class TypedefNameDecl; class UnresolvedUsingTypenameDecl; using CanQualType = CanQual; // Provide forward declarations for all of the *Type classes. #define TYPE(Class, Base) class Class##Type; #include "clang/AST/TypeNodes.inc" /// The collection of all-type qualifiers we support. /// Clang supports five independent qualifiers: /// * C99: const, volatile, and restrict /// * MS: __unaligned /// * Embedded C (TR18037): address spaces /// * Objective C: the GC attributes (none, weak, or strong) class Qualifiers { public: enum TQ { // NOTE: These flags must be kept in sync with DeclSpec::TQ. Const = 0x1, Restrict = 0x2, Volatile = 0x4, CVRMask = Const | Volatile | Restrict }; enum GC { GCNone = 0, Weak, Strong }; enum ObjCLifetime { /// There is no lifetime qualification on this type. OCL_None, /// This object can be modified without requiring retains or /// releases. OCL_ExplicitNone, /// Assigning into this object requires the old value to be /// released and the new value to be retained. The timing of the /// release of the old value is inexact: it may be moved to /// immediately after the last known point where the value is /// live. OCL_Strong, /// Reading or writing from this object requires a barrier call. OCL_Weak, /// Assigning into this object requires a lifetime extension. OCL_Autoreleasing }; enum { /// The maximum supported address space number. /// 23 bits should be enough for anyone. MaxAddressSpace = 0x7fffffu, /// The width of the "fast" qualifier mask. FastWidth = 3, /// The fast qualifier mask. FastMask = (1 << FastWidth) - 1 }; /// Returns the common set of qualifiers while removing them from /// the given sets. static Qualifiers removeCommonQualifiers(Qualifiers &L, Qualifiers &R) { // If both are only CVR-qualified, bit operations are sufficient. if (!(L.Mask & ~CVRMask) && !(R.Mask & ~CVRMask)) { Qualifiers Q; Q.Mask = L.Mask & R.Mask; L.Mask &= ~Q.Mask; R.Mask &= ~Q.Mask; return Q; } Qualifiers Q; unsigned CommonCRV = L.getCVRQualifiers() & R.getCVRQualifiers(); Q.addCVRQualifiers(CommonCRV); L.removeCVRQualifiers(CommonCRV); R.removeCVRQualifiers(CommonCRV); if (L.getObjCGCAttr() == R.getObjCGCAttr()) { Q.setObjCGCAttr(L.getObjCGCAttr()); L.removeObjCGCAttr(); R.removeObjCGCAttr(); } if (L.getObjCLifetime() == R.getObjCLifetime()) { Q.setObjCLifetime(L.getObjCLifetime()); L.removeObjCLifetime(); R.removeObjCLifetime(); } if (L.getAddressSpace() == R.getAddressSpace()) { Q.setAddressSpace(L.getAddressSpace()); L.removeAddressSpace(); R.removeAddressSpace(); } return Q; } static Qualifiers fromFastMask(unsigned Mask) { Qualifiers Qs; Qs.addFastQualifiers(Mask); return Qs; } static Qualifiers fromCVRMask(unsigned CVR) { Qualifiers Qs; Qs.addCVRQualifiers(CVR); return Qs; } static Qualifiers fromCVRUMask(unsigned CVRU) { Qualifiers Qs; Qs.addCVRUQualifiers(CVRU); return Qs; } // Deserialize qualifiers from an opaque representation. static Qualifiers fromOpaqueValue(unsigned opaque) { Qualifiers Qs; Qs.Mask = opaque; return Qs; } // Serialize these qualifiers into an opaque representation. unsigned getAsOpaqueValue() const { return Mask; } bool hasConst() const { return Mask & Const; } bool hasOnlyConst() const { return Mask == Const; } void removeConst() { Mask &= ~Const; } void addConst() { Mask |= Const; } bool hasVolatile() const { return Mask & Volatile; } bool hasOnlyVolatile() const { return Mask == Volatile; } void removeVolatile() { Mask &= ~Volatile; } void addVolatile() { Mask |= Volatile; } bool hasRestrict() const { return Mask & Restrict; } bool hasOnlyRestrict() const { return Mask == Restrict; } void removeRestrict() { Mask &= ~Restrict; } void addRestrict() { Mask |= Restrict; } bool hasCVRQualifiers() const { return getCVRQualifiers(); } unsigned getCVRQualifiers() const { return Mask & CVRMask; } unsigned getCVRUQualifiers() const { return Mask & (CVRMask | UMask); } void setCVRQualifiers(unsigned mask) { assert(!(mask & ~CVRMask) && "bitmask contains non-CVR bits"); Mask = (Mask & ~CVRMask) | mask; } void removeCVRQualifiers(unsigned mask) { assert(!(mask & ~CVRMask) && "bitmask contains non-CVR bits"); Mask &= ~mask; } void removeCVRQualifiers() { removeCVRQualifiers(CVRMask); } void addCVRQualifiers(unsigned mask) { assert(!(mask & ~CVRMask) && "bitmask contains non-CVR bits"); Mask |= mask; } void addCVRUQualifiers(unsigned mask) { assert(!(mask & ~CVRMask & ~UMask) && "bitmask contains non-CVRU bits"); Mask |= mask; } bool hasUnaligned() const { return Mask & UMask; } void setUnaligned(bool flag) { Mask = (Mask & ~UMask) | (flag ? UMask : 0); } void removeUnaligned() { Mask &= ~UMask; } void addUnaligned() { Mask |= UMask; } bool hasObjCGCAttr() const { return Mask & GCAttrMask; } GC getObjCGCAttr() const { return GC((Mask & GCAttrMask) >> GCAttrShift); } void setObjCGCAttr(GC type) { Mask = (Mask & ~GCAttrMask) | (type << GCAttrShift); } void removeObjCGCAttr() { setObjCGCAttr(GCNone); } void addObjCGCAttr(GC type) { assert(type); setObjCGCAttr(type); } Qualifiers withoutObjCGCAttr() const { Qualifiers qs = *this; qs.removeObjCGCAttr(); return qs; } Qualifiers withoutObjCLifetime() const { Qualifiers qs = *this; qs.removeObjCLifetime(); return qs; } Qualifiers withoutAddressSpace() const { Qualifiers qs = *this; qs.removeAddressSpace(); return qs; } bool hasObjCLifetime() const { return Mask & LifetimeMask; } ObjCLifetime getObjCLifetime() const { return ObjCLifetime((Mask & LifetimeMask) >> LifetimeShift); } void setObjCLifetime(ObjCLifetime type) { Mask = (Mask & ~LifetimeMask) | (type << LifetimeShift); } void removeObjCLifetime() { setObjCLifetime(OCL_None); } void addObjCLifetime(ObjCLifetime type) { assert(type); assert(!hasObjCLifetime()); Mask |= (type << LifetimeShift); } /// True if the lifetime is neither None or ExplicitNone. bool hasNonTrivialObjCLifetime() const { ObjCLifetime lifetime = getObjCLifetime(); return (lifetime > OCL_ExplicitNone); } /// True if the lifetime is either strong or weak. bool hasStrongOrWeakObjCLifetime() const { ObjCLifetime lifetime = getObjCLifetime(); return (lifetime == OCL_Strong || lifetime == OCL_Weak); } bool hasAddressSpace() const { return Mask & AddressSpaceMask; } LangAS getAddressSpace() const { return static_cast(Mask >> AddressSpaceShift); } bool hasTargetSpecificAddressSpace() const { return isTargetAddressSpace(getAddressSpace()); } /// Get the address space attribute value to be printed by diagnostics. unsigned getAddressSpaceAttributePrintValue() const { auto Addr = getAddressSpace(); // This function is not supposed to be used with language specific // address spaces. If that happens, the diagnostic message should consider // printing the QualType instead of the address space value. assert(Addr == LangAS::Default || hasTargetSpecificAddressSpace()); if (Addr != LangAS::Default) return toTargetAddressSpace(Addr); // TODO: The diagnostic messages where Addr may be 0 should be fixed // since it cannot differentiate the situation where 0 denotes the default // address space or user specified __attribute__((address_space(0))). return 0; } void setAddressSpace(LangAS space) { assert((unsigned)space <= MaxAddressSpace); Mask = (Mask & ~AddressSpaceMask) | (((uint32_t) space) << AddressSpaceShift); } void removeAddressSpace() { setAddressSpace(LangAS::Default); } void addAddressSpace(LangAS space) { assert(space != LangAS::Default); setAddressSpace(space); } // Fast qualifiers are those that can be allocated directly // on a QualType object. bool hasFastQualifiers() const { return getFastQualifiers(); } unsigned getFastQualifiers() const { return Mask & FastMask; } void setFastQualifiers(unsigned mask) { assert(!(mask & ~FastMask) && "bitmask contains non-fast qualifier bits"); Mask = (Mask & ~FastMask) | mask; } void removeFastQualifiers(unsigned mask) { assert(!(mask & ~FastMask) && "bitmask contains non-fast qualifier bits"); Mask &= ~mask; } void removeFastQualifiers() { removeFastQualifiers(FastMask); } void addFastQualifiers(unsigned mask) { assert(!(mask & ~FastMask) && "bitmask contains non-fast qualifier bits"); Mask |= mask; } /// Return true if the set contains any qualifiers which require an ExtQuals /// node to be allocated. bool hasNonFastQualifiers() const { return Mask & ~FastMask; } Qualifiers getNonFastQualifiers() const { Qualifiers Quals = *this; Quals.setFastQualifiers(0); return Quals; } /// Return true if the set contains any qualifiers. bool hasQualifiers() const { return Mask; } bool empty() const { return !Mask; } /// Add the qualifiers from the given set to this set. void addQualifiers(Qualifiers Q) { // If the other set doesn't have any non-boolean qualifiers, just // bit-or it in. if (!(Q.Mask & ~CVRMask)) Mask |= Q.Mask; else { Mask |= (Q.Mask & CVRMask); if (Q.hasAddressSpace()) addAddressSpace(Q.getAddressSpace()); if (Q.hasObjCGCAttr()) addObjCGCAttr(Q.getObjCGCAttr()); if (Q.hasObjCLifetime()) addObjCLifetime(Q.getObjCLifetime()); } } /// Remove the qualifiers from the given set from this set. void removeQualifiers(Qualifiers Q) { // If the other set doesn't have any non-boolean qualifiers, just // bit-and the inverse in. if (!(Q.Mask & ~CVRMask)) Mask &= ~Q.Mask; else { Mask &= ~(Q.Mask & CVRMask); if (getObjCGCAttr() == Q.getObjCGCAttr()) removeObjCGCAttr(); if (getObjCLifetime() == Q.getObjCLifetime()) removeObjCLifetime(); if (getAddressSpace() == Q.getAddressSpace()) removeAddressSpace(); } } /// Add the qualifiers from the given set to this set, given that /// they don't conflict. void addConsistentQualifiers(Qualifiers qs) { assert(getAddressSpace() == qs.getAddressSpace() || !hasAddressSpace() || !qs.hasAddressSpace()); assert(getObjCGCAttr() == qs.getObjCGCAttr() || !hasObjCGCAttr() || !qs.hasObjCGCAttr()); assert(getObjCLifetime() == qs.getObjCLifetime() || !hasObjCLifetime() || !qs.hasObjCLifetime()); Mask |= qs.Mask; } /// Returns true if address space A is equal to or a superset of B. /// OpenCL v2.0 defines conversion rules (OpenCLC v2.0 s6.5.5) and notion of /// overlapping address spaces. /// CL1.1 or CL1.2: /// every address space is a superset of itself. /// CL2.0 adds: /// __generic is a superset of any address space except for __constant. static bool isAddressSpaceSupersetOf(LangAS A, LangAS B) { // Address spaces must match exactly. return A == B || // Otherwise in OpenCLC v2.0 s6.5.5: every address space except // for __constant can be used as __generic. (A == LangAS::opencl_generic && B != LangAS::opencl_constant) || // Consider pointer size address spaces to be equivalent to default. ((isPtrSizeAddressSpace(A) || A == LangAS::Default) && (isPtrSizeAddressSpace(B) || B == LangAS::Default)); } /// Returns true if the address space in these qualifiers is equal to or /// a superset of the address space in the argument qualifiers. bool isAddressSpaceSupersetOf(Qualifiers other) const { return isAddressSpaceSupersetOf(getAddressSpace(), other.getAddressSpace()); } /// Determines if these qualifiers compatibly include another set. /// Generally this answers the question of whether an object with the other /// qualifiers can be safely used as an object with these qualifiers. bool compatiblyIncludes(Qualifiers other) const { return isAddressSpaceSupersetOf(other) && // ObjC GC qualifiers can match, be added, or be removed, but can't // be changed. (getObjCGCAttr() == other.getObjCGCAttr() || !hasObjCGCAttr() || !other.hasObjCGCAttr()) && // ObjC lifetime qualifiers must match exactly. getObjCLifetime() == other.getObjCLifetime() && // CVR qualifiers may subset. (((Mask & CVRMask) | (other.Mask & CVRMask)) == (Mask & CVRMask)) && // U qualifier may superset. (!other.hasUnaligned() || hasUnaligned()); } /// Determines if these qualifiers compatibly include another set of /// qualifiers from the narrow perspective of Objective-C ARC lifetime. /// /// One set of Objective-C lifetime qualifiers compatibly includes the other /// if the lifetime qualifiers match, or if both are non-__weak and the /// including set also contains the 'const' qualifier, or both are non-__weak /// and one is None (which can only happen in non-ARC modes). bool compatiblyIncludesObjCLifetime(Qualifiers other) const { if (getObjCLifetime() == other.getObjCLifetime()) return true; if (getObjCLifetime() == OCL_Weak || other.getObjCLifetime() == OCL_Weak) return false; if (getObjCLifetime() == OCL_None || other.getObjCLifetime() == OCL_None) return true; return hasConst(); } /// Determine whether this set of qualifiers is a strict superset of /// another set of qualifiers, not considering qualifier compatibility. bool isStrictSupersetOf(Qualifiers Other) const; bool operator==(Qualifiers Other) const { return Mask == Other.Mask; } bool operator!=(Qualifiers Other) const { return Mask != Other.Mask; } explicit operator bool() const { return hasQualifiers(); } Qualifiers &operator+=(Qualifiers R) { addQualifiers(R); return *this; } // Union two qualifier sets. If an enumerated qualifier appears // in both sets, use the one from the right. friend Qualifiers operator+(Qualifiers L, Qualifiers R) { L += R; return L; } Qualifiers &operator-=(Qualifiers R) { removeQualifiers(R); return *this; } /// Compute the difference between two qualifier sets. friend Qualifiers operator-(Qualifiers L, Qualifiers R) { L -= R; return L; } std::string getAsString() const; std::string getAsString(const PrintingPolicy &Policy) const; static std::string getAddrSpaceAsString(LangAS AS); bool isEmptyWhenPrinted(const PrintingPolicy &Policy) const; void print(raw_ostream &OS, const PrintingPolicy &Policy, bool appendSpaceIfNonEmpty = false) const; void Profile(llvm::FoldingSetNodeID &ID) const { ID.AddInteger(Mask); } private: // bits: |0 1 2|3|4 .. 5|6 .. 8|9 ... 31| // |C R V|U|GCAttr|Lifetime|AddressSpace| uint32_t Mask = 0; static const uint32_t UMask = 0x8; static const uint32_t UShift = 3; static const uint32_t GCAttrMask = 0x30; static const uint32_t GCAttrShift = 4; static const uint32_t LifetimeMask = 0x1C0; static const uint32_t LifetimeShift = 6; static const uint32_t AddressSpaceMask = ~(CVRMask | UMask | GCAttrMask | LifetimeMask); static const uint32_t AddressSpaceShift = 9; }; /// A std::pair-like structure for storing a qualified type split /// into its local qualifiers and its locally-unqualified type. struct SplitQualType { /// The locally-unqualified type. const Type *Ty = nullptr; /// The local qualifiers. Qualifiers Quals; SplitQualType() = default; SplitQualType(const Type *ty, Qualifiers qs) : Ty(ty), Quals(qs) {} SplitQualType getSingleStepDesugaredType() const; // end of this file // Make std::tie work. std::pair asPair() const { return std::pair(Ty, Quals); } friend bool operator==(SplitQualType a, SplitQualType b) { return a.Ty == b.Ty && a.Quals == b.Quals; } friend bool operator!=(SplitQualType a, SplitQualType b) { return a.Ty != b.Ty || a.Quals != b.Quals; } }; /// The kind of type we are substituting Objective-C type arguments into. /// /// The kind of substitution affects the replacement of type parameters when /// no concrete type information is provided, e.g., when dealing with an /// unspecialized type. enum class ObjCSubstitutionContext { /// An ordinary type. Ordinary, /// The result type of a method or function. Result, /// The parameter type of a method or function. Parameter, /// The type of a property. Property, /// The superclass of a type. Superclass, }; /// A (possibly-)qualified type. /// /// For efficiency, we don't store CV-qualified types as nodes on their /// own: instead each reference to a type stores the qualifiers. This /// greatly reduces the number of nodes we need to allocate for types (for /// example we only need one for 'int', 'const int', 'volatile int', /// 'const volatile int', etc). /// /// As an added efficiency bonus, instead of making this a pair, we /// just store the two bits we care about in the low bits of the /// pointer. To handle the packing/unpacking, we make QualType be a /// simple wrapper class that acts like a smart pointer. A third bit /// indicates whether there are extended qualifiers present, in which /// case the pointer points to a special structure. class QualType { friend class QualifierCollector; // Thankfully, these are efficiently composable. llvm::PointerIntPair, Qualifiers::FastWidth> Value; const ExtQuals *getExtQualsUnsafe() const { return Value.getPointer().get(); } const Type *getTypePtrUnsafe() const { return Value.getPointer().get(); } const ExtQualsTypeCommonBase *getCommonPtr() const { assert(!isNull() && "Cannot retrieve a NULL type pointer"); auto CommonPtrVal = reinterpret_cast(Value.getOpaqueValue()); CommonPtrVal &= ~(uintptr_t)((1 << TypeAlignmentInBits) - 1); return reinterpret_cast(CommonPtrVal); } public: QualType() = default; QualType(const Type *Ptr, unsigned Quals) : Value(Ptr, Quals) {} QualType(const ExtQuals *Ptr, unsigned Quals) : Value(Ptr, Quals) {} unsigned getLocalFastQualifiers() const { return Value.getInt(); } void setLocalFastQualifiers(unsigned Quals) { Value.setInt(Quals); } /// Retrieves a pointer to the underlying (unqualified) type. /// /// This function requires that the type not be NULL. If the type might be /// NULL, use the (slightly less efficient) \c getTypePtrOrNull(). const Type *getTypePtr() const; const Type *getTypePtrOrNull() const; /// Retrieves a pointer to the name of the base type. const IdentifierInfo *getBaseTypeIdentifier() const; /// Divides a QualType into its unqualified type and a set of local /// qualifiers. SplitQualType split() const; void *getAsOpaquePtr() const { return Value.getOpaqueValue(); } static QualType getFromOpaquePtr(const void *Ptr) { QualType T; T.Value.setFromOpaqueValue(const_cast(Ptr)); return T; } const Type &operator*() const { return *getTypePtr(); } const Type *operator->() const { return getTypePtr(); } bool isCanonical() const; bool isCanonicalAsParam() const; /// Return true if this QualType doesn't point to a type yet. bool isNull() const { return Value.getPointer().isNull(); } /// Determine whether this particular QualType instance has the /// "const" qualifier set, without looking through typedefs that may have /// added "const" at a different level. bool isLocalConstQualified() const { return (getLocalFastQualifiers() & Qualifiers::Const); } /// Determine whether this type is const-qualified. bool isConstQualified() const; /// Determine whether this particular QualType instance has the /// "restrict" qualifier set, without looking through typedefs that may have /// added "restrict" at a different level. bool isLocalRestrictQualified() const { return (getLocalFastQualifiers() & Qualifiers::Restrict); } /// Determine whether this type is restrict-qualified. bool isRestrictQualified() const; /// Determine whether this particular QualType instance has the /// "volatile" qualifier set, without looking through typedefs that may have /// added "volatile" at a different level. bool isLocalVolatileQualified() const { return (getLocalFastQualifiers() & Qualifiers::Volatile); } /// Determine whether this type is volatile-qualified. bool isVolatileQualified() const; /// Determine whether this particular QualType instance has any /// qualifiers, without looking through any typedefs that might add /// qualifiers at a different level. bool hasLocalQualifiers() const { return getLocalFastQualifiers() || hasLocalNonFastQualifiers(); } /// Determine whether this type has any qualifiers. bool hasQualifiers() const; /// Determine whether this particular QualType instance has any /// "non-fast" qualifiers, e.g., those that are stored in an ExtQualType /// instance. bool hasLocalNonFastQualifiers() const { return Value.getPointer().is(); } /// Retrieve the set of qualifiers local to this particular QualType /// instance, not including any qualifiers acquired through typedefs or /// other sugar. Qualifiers getLocalQualifiers() const; /// Retrieve the set of qualifiers applied to this type. Qualifiers getQualifiers() const; /// Retrieve the set of CVR (const-volatile-restrict) qualifiers /// local to this particular QualType instance, not including any qualifiers /// acquired through typedefs or other sugar. unsigned getLocalCVRQualifiers() const { return getLocalFastQualifiers(); } /// Retrieve the set of CVR (const-volatile-restrict) qualifiers /// applied to this type. unsigned getCVRQualifiers() const; bool isConstant(const ASTContext& Ctx) const { return QualType::isConstant(*this, Ctx); } /// Determine whether this is a Plain Old Data (POD) type (C++ 3.9p10). bool isPODType(const ASTContext &Context) const; /// Return true if this is a POD type according to the rules of the C++98 /// standard, regardless of the current compilation's language. bool isCXX98PODType(const ASTContext &Context) const; /// Return true if this is a POD type according to the more relaxed rules /// of the C++11 standard, regardless of the current compilation's language. /// (C++0x [basic.types]p9). Note that, unlike /// CXXRecordDecl::isCXX11StandardLayout, this takes DRs into account. bool isCXX11PODType(const ASTContext &Context) const; /// Return true if this is a trivial type per (C++0x [basic.types]p9) bool isTrivialType(const ASTContext &Context) const; /// Return true if this is a trivially copyable type (C++0x [basic.types]p9) bool isTriviallyCopyableType(const ASTContext &Context) const; /// Returns true if it is a class and it might be dynamic. bool mayBeDynamicClass() const; /// Returns true if it is not a class or if the class might not be dynamic. bool mayBeNotDynamicClass() const; // Don't promise in the API that anything besides 'const' can be // easily added. /// Add the `const` type qualifier to this QualType. void addConst() { addFastQualifiers(Qualifiers::Const); } QualType withConst() const { return withFastQualifiers(Qualifiers::Const); } /// Add the `volatile` type qualifier to this QualType. void addVolatile() { addFastQualifiers(Qualifiers::Volatile); } QualType withVolatile() const { return withFastQualifiers(Qualifiers::Volatile); } /// Add the `restrict` qualifier to this QualType. void addRestrict() { addFastQualifiers(Qualifiers::Restrict); } QualType withRestrict() const { return withFastQualifiers(Qualifiers::Restrict); } QualType withCVRQualifiers(unsigned CVR) const { return withFastQualifiers(CVR); } void addFastQualifiers(unsigned TQs) { assert(!(TQs & ~Qualifiers::FastMask) && "non-fast qualifier bits set in mask!"); Value.setInt(Value.getInt() | TQs); } void removeLocalConst(); void removeLocalVolatile(); void removeLocalRestrict(); void removeLocalCVRQualifiers(unsigned Mask); void removeLocalFastQualifiers() { Value.setInt(0); } void removeLocalFastQualifiers(unsigned Mask) { assert(!(Mask & ~Qualifiers::FastMask) && "mask has non-fast qualifiers"); Value.setInt(Value.getInt() & ~Mask); } // Creates a type with the given qualifiers in addition to any // qualifiers already on this type. QualType withFastQualifiers(unsigned TQs) const { QualType T = *this; T.addFastQualifiers(TQs); return T; } // Creates a type with exactly the given fast qualifiers, removing // any existing fast qualifiers. QualType withExactLocalFastQualifiers(unsigned TQs) const { return withoutLocalFastQualifiers().withFastQualifiers(TQs); } // Removes fast qualifiers, but leaves any extended qualifiers in place. QualType withoutLocalFastQualifiers() const { QualType T = *this; T.removeLocalFastQualifiers(); return T; } QualType getCanonicalType() const; /// Return this type with all of the instance-specific qualifiers /// removed, but without removing any qualifiers that may have been applied /// through typedefs. QualType getLocalUnqualifiedType() const { return QualType(getTypePtr(), 0); } /// Retrieve the unqualified variant of the given type, /// removing as little sugar as possible. /// /// This routine looks through various kinds of sugar to find the /// least-desugared type that is unqualified. For example, given: /// /// \code /// typedef int Integer; /// typedef const Integer CInteger; /// typedef CInteger DifferenceType; /// \endcode /// /// Executing \c getUnqualifiedType() on the type \c DifferenceType will /// desugar until we hit the type \c Integer, which has no qualifiers on it. /// /// The resulting type might still be qualified if it's sugar for an array /// type. To strip qualifiers even from within a sugared array type, use /// ASTContext::getUnqualifiedArrayType. inline QualType getUnqualifiedType() const; /// Retrieve the unqualified variant of the given type, removing as little /// sugar as possible. /// /// Like getUnqualifiedType(), but also returns the set of /// qualifiers that were built up. /// /// The resulting type might still be qualified if it's sugar for an array /// type. To strip qualifiers even from within a sugared array type, use /// ASTContext::getUnqualifiedArrayType. inline SplitQualType getSplitUnqualifiedType() const; /// Determine whether this type is more qualified than the other /// given type, requiring exact equality for non-CVR qualifiers. bool isMoreQualifiedThan(QualType Other) const; /// Determine whether this type is at least as qualified as the other /// given type, requiring exact equality for non-CVR qualifiers. bool isAtLeastAsQualifiedAs(QualType Other) const; QualType getNonReferenceType() const; /// Determine the type of a (typically non-lvalue) expression with the /// specified result type. /// /// This routine should be used for expressions for which the return type is /// explicitly specified (e.g., in a cast or call) and isn't necessarily /// an lvalue. It removes a top-level reference (since there are no /// expressions of reference type) and deletes top-level cvr-qualifiers /// from non-class types (in C++) or all types (in C). QualType getNonLValueExprType(const ASTContext &Context) const; /// Remove an outer pack expansion type (if any) from this type. Used as part /// of converting the type of a declaration to the type of an expression that /// references that expression. It's meaningless for an expression to have a /// pack expansion type. QualType getNonPackExpansionType() const; /// Return the specified type with any "sugar" removed from /// the type. This takes off typedefs, typeof's etc. If the outer level of /// the type is already concrete, it returns it unmodified. This is similar /// to getting the canonical type, but it doesn't remove *all* typedefs. For /// example, it returns "T*" as "T*", (not as "int*"), because the pointer is /// concrete. /// /// Qualifiers are left in place. QualType getDesugaredType(const ASTContext &Context) const { return getDesugaredType(*this, Context); } SplitQualType getSplitDesugaredType() const { return getSplitDesugaredType(*this); } /// Return the specified type with one level of "sugar" removed from /// the type. /// /// This routine takes off the first typedef, typeof, etc. If the outer level /// of the type is already concrete, it returns it unmodified. QualType getSingleStepDesugaredType(const ASTContext &Context) const { return getSingleStepDesugaredTypeImpl(*this, Context); } /// Returns the specified type after dropping any /// outer-level parentheses. QualType IgnoreParens() const { if (isa(*this)) return QualType::IgnoreParens(*this); return *this; } /// Indicate whether the specified types and qualifiers are identical. friend bool operator==(const QualType &LHS, const QualType &RHS) { return LHS.Value == RHS.Value; } friend bool operator!=(const QualType &LHS, const QualType &RHS) { return LHS.Value != RHS.Value; } friend bool operator<(const QualType &LHS, const QualType &RHS) { return LHS.Value < RHS.Value; } static std::string getAsString(SplitQualType split, const PrintingPolicy &Policy) { return getAsString(split.Ty, split.Quals, Policy); } static std::string getAsString(const Type *ty, Qualifiers qs, const PrintingPolicy &Policy); std::string getAsString() const; std::string getAsString(const PrintingPolicy &Policy) const; void print(raw_ostream &OS, const PrintingPolicy &Policy, const Twine &PlaceHolder = Twine(), unsigned Indentation = 0) const; static void print(SplitQualType split, raw_ostream &OS, const PrintingPolicy &policy, const Twine &PlaceHolder, unsigned Indentation = 0) { return print(split.Ty, split.Quals, OS, policy, PlaceHolder, Indentation); } static void print(const Type *ty, Qualifiers qs, raw_ostream &OS, const PrintingPolicy &policy, const Twine &PlaceHolder, unsigned Indentation = 0); void getAsStringInternal(std::string &Str, const PrintingPolicy &Policy) const; static void getAsStringInternal(SplitQualType split, std::string &out, const PrintingPolicy &policy) { return getAsStringInternal(split.Ty, split.Quals, out, policy); } static void getAsStringInternal(const Type *ty, Qualifiers qs, std::string &out, const PrintingPolicy &policy); class StreamedQualTypeHelper { const QualType &T; const PrintingPolicy &Policy; const Twine &PlaceHolder; unsigned Indentation; public: StreamedQualTypeHelper(const QualType &T, const PrintingPolicy &Policy, const Twine &PlaceHolder, unsigned Indentation) : T(T), Policy(Policy), PlaceHolder(PlaceHolder), Indentation(Indentation) {} friend raw_ostream &operator<<(raw_ostream &OS, const StreamedQualTypeHelper &SQT) { SQT.T.print(OS, SQT.Policy, SQT.PlaceHolder, SQT.Indentation); return OS; } }; StreamedQualTypeHelper stream(const PrintingPolicy &Policy, const Twine &PlaceHolder = Twine(), unsigned Indentation = 0) const { return StreamedQualTypeHelper(*this, Policy, PlaceHolder, Indentation); } void dump(const char *s) const; void dump() const; void dump(llvm::raw_ostream &OS, const ASTContext &Context) const; void Profile(llvm::FoldingSetNodeID &ID) const { ID.AddPointer(getAsOpaquePtr()); } /// Check if this type has any address space qualifier. inline bool hasAddressSpace() const; /// Return the address space of this type. inline LangAS getAddressSpace() const; /// Returns true if address space qualifiers overlap with T address space /// qualifiers. /// OpenCL C defines conversion rules for pointers to different address spaces /// and notion of overlapping address spaces. /// CL1.1 or CL1.2: /// address spaces overlap iff they are they same. /// OpenCL C v2.0 s6.5.5 adds: /// __generic overlaps with any address space except for __constant. bool isAddressSpaceOverlapping(QualType T) const { Qualifiers Q = getQualifiers(); Qualifiers TQ = T.getQualifiers(); // Address spaces overlap if at least one of them is a superset of another return Q.isAddressSpaceSupersetOf(TQ) || TQ.isAddressSpaceSupersetOf(Q); } /// Returns gc attribute of this type. inline Qualifiers::GC getObjCGCAttr() const; /// true when Type is objc's weak. bool isObjCGCWeak() const { return getObjCGCAttr() == Qualifiers::Weak; } /// true when Type is objc's strong. bool isObjCGCStrong() const { return getObjCGCAttr() == Qualifiers::Strong; } /// Returns lifetime attribute of this type. Qualifiers::ObjCLifetime getObjCLifetime() const { return getQualifiers().getObjCLifetime(); } bool hasNonTrivialObjCLifetime() const { return getQualifiers().hasNonTrivialObjCLifetime(); } bool hasStrongOrWeakObjCLifetime() const { return getQualifiers().hasStrongOrWeakObjCLifetime(); } // true when Type is objc's weak and weak is enabled but ARC isn't. bool isNonWeakInMRRWithObjCWeak(const ASTContext &Context) const; enum PrimitiveDefaultInitializeKind { /// The type does not fall into any of the following categories. Note that /// this case is zero-valued so that values of this enum can be used as a /// boolean condition for non-triviality. PDIK_Trivial, /// The type is an Objective-C retainable pointer type that is qualified /// with the ARC __strong qualifier. PDIK_ARCStrong, /// The type is an Objective-C retainable pointer type that is qualified /// with the ARC __weak qualifier. PDIK_ARCWeak, /// The type is a struct containing a field whose type is not PCK_Trivial. PDIK_Struct }; /// Functions to query basic properties of non-trivial C struct types. /// Check if this is a non-trivial type that would cause a C struct /// transitively containing this type to be non-trivial to default initialize /// and return the kind. PrimitiveDefaultInitializeKind isNonTrivialToPrimitiveDefaultInitialize() const; enum PrimitiveCopyKind { /// The type does not fall into any of the following categories. Note that /// this case is zero-valued so that values of this enum can be used as a /// boolean condition for non-triviality. PCK_Trivial, /// The type would be trivial except that it is volatile-qualified. Types /// that fall into one of the other non-trivial cases may additionally be /// volatile-qualified. PCK_VolatileTrivial, /// The type is an Objective-C retainable pointer type that is qualified /// with the ARC __strong qualifier. PCK_ARCStrong, /// The type is an Objective-C retainable pointer type that is qualified /// with the ARC __weak qualifier. PCK_ARCWeak, /// The type is a struct containing a field whose type is neither /// PCK_Trivial nor PCK_VolatileTrivial. /// Note that a C++ struct type does not necessarily match this; C++ copying /// semantics are too complex to express here, in part because they depend /// on the exact constructor or assignment operator that is chosen by /// overload resolution to do the copy. PCK_Struct }; /// Check if this is a non-trivial type that would cause a C struct /// transitively containing this type to be non-trivial to copy and return the /// kind. PrimitiveCopyKind isNonTrivialToPrimitiveCopy() const; /// Check if this is a non-trivial type that would cause a C struct /// transitively containing this type to be non-trivial to destructively /// move and return the kind. Destructive move in this context is a C++-style /// move in which the source object is placed in a valid but unspecified state /// after it is moved, as opposed to a truly destructive move in which the /// source object is placed in an uninitialized state. PrimitiveCopyKind isNonTrivialToPrimitiveDestructiveMove() const; enum DestructionKind { DK_none, DK_cxx_destructor, DK_objc_strong_lifetime, DK_objc_weak_lifetime, DK_nontrivial_c_struct }; /// Returns a nonzero value if objects of this type require /// non-trivial work to clean up after. Non-zero because it's /// conceivable that qualifiers (objc_gc(weak)?) could make /// something require destruction. DestructionKind isDestructedType() const { return isDestructedTypeImpl(*this); } /// Check if this is or contains a C union that is non-trivial to /// default-initialize, which is a union that has a member that is non-trivial /// to default-initialize. If this returns true, /// isNonTrivialToPrimitiveDefaultInitialize returns PDIK_Struct. bool hasNonTrivialToPrimitiveDefaultInitializeCUnion() const; /// Check if this is or contains a C union that is non-trivial to destruct, /// which is a union that has a member that is non-trivial to destruct. If /// this returns true, isDestructedType returns DK_nontrivial_c_struct. bool hasNonTrivialToPrimitiveDestructCUnion() const; /// Check if this is or contains a C union that is non-trivial to copy, which /// is a union that has a member that is non-trivial to copy. If this returns /// true, isNonTrivialToPrimitiveCopy returns PCK_Struct. bool hasNonTrivialToPrimitiveCopyCUnion() const; /// Determine whether expressions of the given type are forbidden /// from being lvalues in C. /// /// The expression types that are forbidden to be lvalues are: /// - 'void', but not qualified void /// - function types /// /// The exact rule here is C99 6.3.2.1: /// An lvalue is an expression with an object type or an incomplete /// type other than void. bool isCForbiddenLValueType() const; /// Substitute type arguments for the Objective-C type parameters used in the /// subject type. /// /// \param ctx ASTContext in which the type exists. /// /// \param typeArgs The type arguments that will be substituted for the /// Objective-C type parameters in the subject type, which are generally /// computed via \c Type::getObjCSubstitutions. If empty, the type /// parameters will be replaced with their bounds or id/Class, as appropriate /// for the context. /// /// \param context The context in which the subject type was written. /// /// \returns the resulting type. QualType substObjCTypeArgs(ASTContext &ctx, ArrayRef typeArgs, ObjCSubstitutionContext context) const; /// Substitute type arguments from an object type for the Objective-C type /// parameters used in the subject type. /// /// This operation combines the computation of type arguments for /// substitution (\c Type::getObjCSubstitutions) with the actual process of /// substitution (\c QualType::substObjCTypeArgs) for the convenience of /// callers that need to perform a single substitution in isolation. /// /// \param objectType The type of the object whose member type we're /// substituting into. For example, this might be the receiver of a message /// or the base of a property access. /// /// \param dc The declaration context from which the subject type was /// retrieved, which indicates (for example) which type parameters should /// be substituted. /// /// \param context The context in which the subject type was written. /// /// \returns the subject type after replacing all of the Objective-C type /// parameters with their corresponding arguments. QualType substObjCMemberType(QualType objectType, const DeclContext *dc, ObjCSubstitutionContext context) const; /// Strip Objective-C "__kindof" types from the given type. QualType stripObjCKindOfType(const ASTContext &ctx) const; /// Remove all qualifiers including _Atomic. QualType getAtomicUnqualifiedType() const; private: // These methods are implemented in a separate translation unit; // "static"-ize them to avoid creating temporary QualTypes in the // caller. static bool isConstant(QualType T, const ASTContext& Ctx); static QualType getDesugaredType(QualType T, const ASTContext &Context); static SplitQualType getSplitDesugaredType(QualType T); static SplitQualType getSplitUnqualifiedTypeImpl(QualType type); static QualType getSingleStepDesugaredTypeImpl(QualType type, const ASTContext &C); static QualType IgnoreParens(QualType T); static DestructionKind isDestructedTypeImpl(QualType type); /// Check if \param RD is or contains a non-trivial C union. static bool hasNonTrivialToPrimitiveDefaultInitializeCUnion(const RecordDecl *RD); static bool hasNonTrivialToPrimitiveDestructCUnion(const RecordDecl *RD); static bool hasNonTrivialToPrimitiveCopyCUnion(const RecordDecl *RD); }; } // namespace clang namespace llvm { /// Implement simplify_type for QualType, so that we can dyn_cast from QualType /// to a specific Type class. template<> struct simplify_type< ::clang::QualType> { using SimpleType = const ::clang::Type *; static SimpleType getSimplifiedValue(::clang::QualType Val) { return Val.getTypePtr(); } }; // Teach SmallPtrSet that QualType is "basically a pointer". template<> struct PointerLikeTypeTraits { static inline void *getAsVoidPointer(clang::QualType P) { return P.getAsOpaquePtr(); } static inline clang::QualType getFromVoidPointer(void *P) { return clang::QualType::getFromOpaquePtr(P); } // Various qualifiers go in low bits. static constexpr int NumLowBitsAvailable = 0; }; } // namespace llvm namespace clang { /// Base class that is common to both the \c ExtQuals and \c Type /// classes, which allows \c QualType to access the common fields between the /// two. class ExtQualsTypeCommonBase { friend class ExtQuals; friend class QualType; friend class Type; /// The "base" type of an extended qualifiers type (\c ExtQuals) or /// a self-referential pointer (for \c Type). /// /// This pointer allows an efficient mapping from a QualType to its /// underlying type pointer. const Type *const BaseType; /// The canonical type of this type. A QualType. QualType CanonicalType; ExtQualsTypeCommonBase(const Type *baseType, QualType canon) : BaseType(baseType), CanonicalType(canon) {} }; /// We can encode up to four bits in the low bits of a /// type pointer, but there are many more type qualifiers that we want /// to be able to apply to an arbitrary type. Therefore we have this /// struct, intended to be heap-allocated and used by QualType to /// store qualifiers. /// /// The current design tags the 'const', 'restrict', and 'volatile' qualifiers /// in three low bits on the QualType pointer; a fourth bit records whether /// the pointer is an ExtQuals node. The extended qualifiers (address spaces, /// Objective-C GC attributes) are much more rare. class ExtQuals : public ExtQualsTypeCommonBase, public llvm::FoldingSetNode { // NOTE: changing the fast qualifiers should be straightforward as // long as you don't make 'const' non-fast. // 1. Qualifiers: // a) Modify the bitmasks (Qualifiers::TQ and DeclSpec::TQ). // Fast qualifiers must occupy the low-order bits. // b) Update Qualifiers::FastWidth and FastMask. // 2. QualType: // a) Update is{Volatile,Restrict}Qualified(), defined inline. // b) Update remove{Volatile,Restrict}, defined near the end of // this header. // 3. ASTContext: // a) Update get{Volatile,Restrict}Type. /// The immutable set of qualifiers applied by this node. Always contains /// extended qualifiers. Qualifiers Quals; ExtQuals *this_() { return this; } public: ExtQuals(const Type *baseType, QualType canon, Qualifiers quals) : ExtQualsTypeCommonBase(baseType, canon.isNull() ? QualType(this_(), 0) : canon), Quals(quals) { assert(Quals.hasNonFastQualifiers() && "ExtQuals created with no fast qualifiers"); assert(!Quals.hasFastQualifiers() && "ExtQuals created with fast qualifiers"); } Qualifiers getQualifiers() const { return Quals; } bool hasObjCGCAttr() const { return Quals.hasObjCGCAttr(); } Qualifiers::GC getObjCGCAttr() const { return Quals.getObjCGCAttr(); } bool hasObjCLifetime() const { return Quals.hasObjCLifetime(); } Qualifiers::ObjCLifetime getObjCLifetime() const { return Quals.getObjCLifetime(); } bool hasAddressSpace() const { return Quals.hasAddressSpace(); } LangAS getAddressSpace() const { return Quals.getAddressSpace(); } const Type *getBaseType() const { return BaseType; } public: void Profile(llvm::FoldingSetNodeID &ID) const { Profile(ID, getBaseType(), Quals); } static void Profile(llvm::FoldingSetNodeID &ID, const Type *BaseType, Qualifiers Quals) { assert(!Quals.hasFastQualifiers() && "fast qualifiers in ExtQuals hash!"); ID.AddPointer(BaseType); Quals.Profile(ID); } }; /// The kind of C++11 ref-qualifier associated with a function type. /// This determines whether a member function's "this" object can be an /// lvalue, rvalue, or neither. enum RefQualifierKind { /// No ref-qualifier was provided. RQ_None = 0, /// An lvalue ref-qualifier was provided (\c &). RQ_LValue, /// An rvalue ref-qualifier was provided (\c &&). RQ_RValue }; /// Which keyword(s) were used to create an AutoType. enum class AutoTypeKeyword { /// auto Auto, /// decltype(auto) DecltypeAuto, /// __auto_type (GNU extension) GNUAutoType }; /// The base class of the type hierarchy. /// /// A central concept with types is that each type always has a canonical /// type. A canonical type is the type with any typedef names stripped out /// of it or the types it references. For example, consider: /// /// typedef int foo; /// typedef foo* bar; /// 'int *' 'foo *' 'bar' /// /// There will be a Type object created for 'int'. Since int is canonical, its /// CanonicalType pointer points to itself. There is also a Type for 'foo' (a /// TypedefType). Its CanonicalType pointer points to the 'int' Type. Next /// there is a PointerType that represents 'int*', which, like 'int', is /// canonical. Finally, there is a PointerType type for 'foo*' whose canonical /// type is 'int*', and there is a TypedefType for 'bar', whose canonical type /// is also 'int*'. /// /// Non-canonical types are useful for emitting diagnostics, without losing /// information about typedefs being used. Canonical types are useful for type /// comparisons (they allow by-pointer equality tests) and useful for reasoning /// about whether something has a particular form (e.g. is a function type), /// because they implicitly, recursively, strip all typedefs out of a type. /// /// Types, once created, are immutable. /// class alignas(8) Type : public ExtQualsTypeCommonBase { public: enum TypeClass { #define TYPE(Class, Base) Class, #define LAST_TYPE(Class) TypeLast = Class #define ABSTRACT_TYPE(Class, Base) #include "clang/AST/TypeNodes.inc" }; private: /// Bitfields required by the Type class. class TypeBitfields { friend class Type; template friend class TypePropertyCache; /// TypeClass bitfield - Enum that specifies what subclass this belongs to. unsigned TC : 8; /// Store information on the type dependency. unsigned Dependence : llvm::BitWidth; /// True if the cache (i.e. the bitfields here starting with /// 'Cache') is valid. mutable unsigned CacheValid : 1; /// Linkage of this type. mutable unsigned CachedLinkage : 3; /// Whether this type involves and local or unnamed types. mutable unsigned CachedLocalOrUnnamed : 1; /// Whether this type comes from an AST file. mutable unsigned FromAST : 1; bool isCacheValid() const { return CacheValid; } Linkage getLinkage() const { assert(isCacheValid() && "getting linkage from invalid cache"); return static_cast(CachedLinkage); } bool hasLocalOrUnnamedType() const { assert(isCacheValid() && "getting linkage from invalid cache"); return CachedLocalOrUnnamed; } }; enum { NumTypeBits = 8 + llvm::BitWidth + 6 }; protected: // These classes allow subclasses to somewhat cleanly pack bitfields // into Type. class ArrayTypeBitfields { friend class ArrayType; unsigned : NumTypeBits; /// CVR qualifiers from declarations like /// 'int X[static restrict 4]'. For function parameters only. unsigned IndexTypeQuals : 3; /// Storage class qualifiers from declarations like /// 'int X[static restrict 4]'. For function parameters only. /// Actually an ArrayType::ArraySizeModifier. unsigned SizeModifier : 3; }; class ConstantArrayTypeBitfields { friend class ConstantArrayType; unsigned : NumTypeBits + 3 + 3; /// Whether we have a stored size expression. unsigned HasStoredSizeExpr : 1; }; class BuiltinTypeBitfields { friend class BuiltinType; unsigned : NumTypeBits; /// The kind (BuiltinType::Kind) of builtin type this is. unsigned Kind : 8; }; /// FunctionTypeBitfields store various bits belonging to FunctionProtoType. /// Only common bits are stored here. Additional uncommon bits are stored /// in a trailing object after FunctionProtoType. class FunctionTypeBitfields { friend class FunctionProtoType; friend class FunctionType; unsigned : NumTypeBits; /// Extra information which affects how the function is called, like /// regparm and the calling convention. unsigned ExtInfo : 13; /// The ref-qualifier associated with a \c FunctionProtoType. /// /// This is a value of type \c RefQualifierKind. unsigned RefQualifier : 2; /// Used only by FunctionProtoType, put here to pack with the /// other bitfields. /// The qualifiers are part of FunctionProtoType because... /// /// C++ 8.3.5p4: The return type, the parameter type list and the /// cv-qualifier-seq, [...], are part of the function type. unsigned FastTypeQuals : Qualifiers::FastWidth; /// Whether this function has extended Qualifiers. unsigned HasExtQuals : 1; /// The number of parameters this function has, not counting '...'. /// According to [implimits] 8 bits should be enough here but this is /// somewhat easy to exceed with metaprogramming and so we would like to /// keep NumParams as wide as reasonably possible. unsigned NumParams : 16; /// The type of exception specification this function has. unsigned ExceptionSpecType : 4; /// Whether this function has extended parameter information. unsigned HasExtParameterInfos : 1; /// Whether the function is variadic. unsigned Variadic : 1; /// Whether this function has a trailing return type. unsigned HasTrailingReturn : 1; }; class ObjCObjectTypeBitfields { friend class ObjCObjectType; unsigned : NumTypeBits; /// The number of type arguments stored directly on this object type. unsigned NumTypeArgs : 7; /// The number of protocols stored directly on this object type. unsigned NumProtocols : 6; /// Whether this is a "kindof" type. unsigned IsKindOf : 1; }; class ReferenceTypeBitfields { friend class ReferenceType; unsigned : NumTypeBits; /// True if the type was originally spelled with an lvalue sigil. /// This is never true of rvalue references but can also be false /// on lvalue references because of C++0x [dcl.typedef]p9, /// as follows: /// /// typedef int &ref; // lvalue, spelled lvalue /// typedef int &&rvref; // rvalue /// ref &a; // lvalue, inner ref, spelled lvalue /// ref &&a; // lvalue, inner ref /// rvref &a; // lvalue, inner ref, spelled lvalue /// rvref &&a; // rvalue, inner ref unsigned SpelledAsLValue : 1; /// True if the inner type is a reference type. This only happens /// in non-canonical forms. unsigned InnerRef : 1; }; class TypeWithKeywordBitfields { friend class TypeWithKeyword; unsigned : NumTypeBits; /// An ElaboratedTypeKeyword. 8 bits for efficient access. unsigned Keyword : 8; }; enum { NumTypeWithKeywordBits = 8 }; class ElaboratedTypeBitfields { friend class ElaboratedType; unsigned : NumTypeBits; unsigned : NumTypeWithKeywordBits; /// Whether the ElaboratedType has a trailing OwnedTagDecl. unsigned HasOwnedTagDecl : 1; }; class VectorTypeBitfields { friend class VectorType; friend class DependentVectorType; unsigned : NumTypeBits; /// The kind of vector, either a generic vector type or some /// target-specific vector type such as for AltiVec or Neon. unsigned VecKind : 3; /// The number of elements in the vector. uint32_t NumElements; }; class ConstantMatrixTypeBitfields { friend class ConstantMatrixType; unsigned : NumTypeBits; /// Number of rows and columns. Using 20 bits allows supporting very large /// matrixes, while keeping 24 bits to accommodate NumTypeBits. unsigned NumRows : 20; unsigned NumColumns : 20; static constexpr uint32_t MaxElementsPerDimension = (1 << 20) - 1; }; class AttributedTypeBitfields { friend class AttributedType; unsigned : NumTypeBits; /// An AttributedType::Kind unsigned AttrKind : 32 - NumTypeBits; }; class AutoTypeBitfields { friend class AutoType; unsigned : NumTypeBits; /// Was this placeholder type spelled as 'auto', 'decltype(auto)', /// or '__auto_type'? AutoTypeKeyword value. unsigned Keyword : 2; /// The number of template arguments in the type-constraints, which is /// expected to be able to hold at least 1024 according to [implimits]. /// However as this limit is somewhat easy to hit with template /// metaprogramming we'd prefer to keep it as large as possible. /// At the moment it has been left as a non-bitfield since this type /// safely fits in 64 bits as an unsigned, so there is no reason to /// introduce the performance impact of a bitfield. unsigned NumArgs; }; class SubstTemplateTypeParmPackTypeBitfields { friend class SubstTemplateTypeParmPackType; unsigned : NumTypeBits; /// The number of template arguments in \c Arguments, which is /// expected to be able to hold at least 1024 according to [implimits]. /// However as this limit is somewhat easy to hit with template /// metaprogramming we'd prefer to keep it as large as possible. /// At the moment it has been left as a non-bitfield since this type /// safely fits in 64 bits as an unsigned, so there is no reason to /// introduce the performance impact of a bitfield. unsigned NumArgs; }; class TemplateSpecializationTypeBitfields { friend class TemplateSpecializationType; unsigned : NumTypeBits; /// Whether this template specialization type is a substituted type alias. unsigned TypeAlias : 1; /// The number of template arguments named in this class template /// specialization, which is expected to be able to hold at least 1024 /// according to [implimits]. However, as this limit is somewhat easy to /// hit with template metaprogramming we'd prefer to keep it as large /// as possible. At the moment it has been left as a non-bitfield since /// this type safely fits in 64 bits as an unsigned, so there is no reason /// to introduce the performance impact of a bitfield. unsigned NumArgs; }; class DependentTemplateSpecializationTypeBitfields { friend class DependentTemplateSpecializationType; unsigned : NumTypeBits; unsigned : NumTypeWithKeywordBits; /// The number of template arguments named in this class template /// specialization, which is expected to be able to hold at least 1024 /// according to [implimits]. However, as this limit is somewhat easy to /// hit with template metaprogramming we'd prefer to keep it as large /// as possible. At the moment it has been left as a non-bitfield since /// this type safely fits in 64 bits as an unsigned, so there is no reason /// to introduce the performance impact of a bitfield. unsigned NumArgs; }; class PackExpansionTypeBitfields { friend class PackExpansionType; unsigned : NumTypeBits; /// The number of expansions that this pack expansion will /// generate when substituted (+1), which is expected to be able to /// hold at least 1024 according to [implimits]. However, as this limit /// is somewhat easy to hit with template metaprogramming we'd prefer to /// keep it as large as possible. At the moment it has been left as a /// non-bitfield since this type safely fits in 64 bits as an unsigned, so /// there is no reason to introduce the performance impact of a bitfield. /// /// This field will only have a non-zero value when some of the parameter /// packs that occur within the pattern have been substituted but others /// have not. unsigned NumExpansions; }; union { TypeBitfields TypeBits; ArrayTypeBitfields ArrayTypeBits; ConstantArrayTypeBitfields ConstantArrayTypeBits; AttributedTypeBitfields AttributedTypeBits; AutoTypeBitfields AutoTypeBits; BuiltinTypeBitfields BuiltinTypeBits; FunctionTypeBitfields FunctionTypeBits; ObjCObjectTypeBitfields ObjCObjectTypeBits; ReferenceTypeBitfields ReferenceTypeBits; TypeWithKeywordBitfields TypeWithKeywordBits; ElaboratedTypeBitfields ElaboratedTypeBits; VectorTypeBitfields VectorTypeBits; ConstantMatrixTypeBitfields ConstantMatrixTypeBits; SubstTemplateTypeParmPackTypeBitfields SubstTemplateTypeParmPackTypeBits; TemplateSpecializationTypeBitfields TemplateSpecializationTypeBits; DependentTemplateSpecializationTypeBitfields DependentTemplateSpecializationTypeBits; PackExpansionTypeBitfields PackExpansionTypeBits; }; static_assert(sizeof(TypeBitfields) <= 8, "TypeBitfields is larger than 8 bytes!"); static_assert(sizeof(ArrayTypeBitfields) <= 8, "ArrayTypeBitfields is larger than 8 bytes!"); static_assert(sizeof(AttributedTypeBitfields) <= 8, "AttributedTypeBitfields is larger than 8 bytes!"); static_assert(sizeof(AutoTypeBitfields) <= 8, "AutoTypeBitfields is larger than 8 bytes!"); static_assert(sizeof(BuiltinTypeBitfields) <= 8, "BuiltinTypeBitfields is larger than 8 bytes!"); static_assert(sizeof(FunctionTypeBitfields) <= 8, "FunctionTypeBitfields is larger than 8 bytes!"); static_assert(sizeof(ObjCObjectTypeBitfields) <= 8, "ObjCObjectTypeBitfields is larger than 8 bytes!"); static_assert(sizeof(ReferenceTypeBitfields) <= 8, "ReferenceTypeBitfields is larger than 8 bytes!"); static_assert(sizeof(TypeWithKeywordBitfields) <= 8, "TypeWithKeywordBitfields is larger than 8 bytes!"); static_assert(sizeof(ElaboratedTypeBitfields) <= 8, "ElaboratedTypeBitfields is larger than 8 bytes!"); static_assert(sizeof(VectorTypeBitfields) <= 8, "VectorTypeBitfields is larger than 8 bytes!"); static_assert(sizeof(SubstTemplateTypeParmPackTypeBitfields) <= 8, "SubstTemplateTypeParmPackTypeBitfields is larger" " than 8 bytes!"); static_assert(sizeof(TemplateSpecializationTypeBitfields) <= 8, "TemplateSpecializationTypeBitfields is larger" " than 8 bytes!"); static_assert(sizeof(DependentTemplateSpecializationTypeBitfields) <= 8, "DependentTemplateSpecializationTypeBitfields is larger" " than 8 bytes!"); static_assert(sizeof(PackExpansionTypeBitfields) <= 8, "PackExpansionTypeBitfields is larger than 8 bytes"); private: template friend class TypePropertyCache; /// Set whether this type comes from an AST file. void setFromAST(bool V = true) const { TypeBits.FromAST = V; } protected: friend class ASTContext; Type(TypeClass tc, QualType canon, TypeDependence Dependence) : ExtQualsTypeCommonBase(this, canon.isNull() ? QualType(this_(), 0) : canon) { TypeBits.TC = tc; TypeBits.Dependence = static_cast(Dependence); TypeBits.CacheValid = false; TypeBits.CachedLocalOrUnnamed = false; TypeBits.CachedLinkage = NoLinkage; TypeBits.FromAST = false; } // silence VC++ warning C4355: 'this' : used in base member initializer list Type *this_() { return this; } void setDependence(TypeDependence D) { TypeBits.Dependence = static_cast(D); } void addDependence(TypeDependence D) { setDependence(getDependence() | D); } public: friend class ASTReader; friend class ASTWriter; template friend class serialization::AbstractTypeReader; template friend class serialization::AbstractTypeWriter; Type(const Type &) = delete; Type(Type &&) = delete; Type &operator=(const Type &) = delete; Type &operator=(Type &&) = delete; TypeClass getTypeClass() const { return static_cast(TypeBits.TC); } /// Whether this type comes from an AST file. bool isFromAST() const { return TypeBits.FromAST; } /// Whether this type is or contains an unexpanded parameter /// pack, used to support C++0x variadic templates. /// /// A type that contains a parameter pack shall be expanded by the /// ellipsis operator at some point. For example, the typedef in the /// following example contains an unexpanded parameter pack 'T': /// /// \code /// template /// struct X { /// typedef T* pointer_types; // ill-formed; T is a parameter pack. /// }; /// \endcode /// /// Note that this routine does not specify which bool containsUnexpandedParameterPack() const { return getDependence() & TypeDependence::UnexpandedPack; } /// Determines if this type would be canonical if it had no further /// qualification. bool isCanonicalUnqualified() const { return CanonicalType == QualType(this, 0); } /// Pull a single level of sugar off of this locally-unqualified type. /// Users should generally prefer SplitQualType::getSingleStepDesugaredType() /// or QualType::getSingleStepDesugaredType(const ASTContext&). QualType getLocallyUnqualifiedSingleStepDesugaredType() const; /// As an extension, we classify types as one of "sized" or "sizeless"; /// every type is one or the other. Standard types are all sized; /// sizeless types are purely an extension. /// /// Sizeless types contain data with no specified size, alignment, /// or layout. bool isSizelessType() const; bool isSizelessBuiltinType() const; /// Types are partitioned into 3 broad categories (C99 6.2.5p1): /// object types, function types, and incomplete types. /// Return true if this is an incomplete type. /// A type that can describe objects, but which lacks information needed to /// determine its size (e.g. void, or a fwd declared struct). Clients of this /// routine will need to determine if the size is actually required. /// /// Def If non-null, and the type refers to some kind of declaration /// that can be completed (such as a C struct, C++ class, or Objective-C /// class), will be set to the declaration. bool isIncompleteType(NamedDecl **Def = nullptr) const; /// Return true if this is an incomplete or object /// type, in other words, not a function type. bool isIncompleteOrObjectType() const { return !isFunctionType(); } /// Determine whether this type is an object type. bool isObjectType() const { // C++ [basic.types]p8: // An object type is a (possibly cv-qualified) type that is not a // function type, not a reference type, and not a void type. return !isReferenceType() && !isFunctionType() && !isVoidType(); } /// Return true if this is a literal type /// (C++11 [basic.types]p10) bool isLiteralType(const ASTContext &Ctx) const; /// Test if this type is a standard-layout type. /// (C++0x [basic.type]p9) bool isStandardLayoutType() const; /// Helper methods to distinguish type categories. All type predicates /// operate on the canonical type, ignoring typedefs and qualifiers. /// Returns true if the type is a builtin type. bool isBuiltinType() const; /// Test for a particular builtin type. bool isSpecificBuiltinType(unsigned K) const; /// Test for a type which does not represent an actual type-system type but /// is instead used as a placeholder for various convenient purposes within /// Clang. All such types are BuiltinTypes. bool isPlaceholderType() const; const BuiltinType *getAsPlaceholderType() const; /// Test for a specific placeholder type. bool isSpecificPlaceholderType(unsigned K) const; /// Test for a placeholder type other than Overload; see /// BuiltinType::isNonOverloadPlaceholderType. bool isNonOverloadPlaceholderType() const; /// isIntegerType() does *not* include complex integers (a GCC extension). /// isComplexIntegerType() can be used to test for complex integers. bool isIntegerType() const; // C99 6.2.5p17 (int, char, bool, enum) bool isEnumeralType() const; /// Determine whether this type is a scoped enumeration type. bool isScopedEnumeralType() const; bool isBooleanType() const; bool isCharType() const; bool isWideCharType() const; bool isChar8Type() const; bool isChar16Type() const; bool isChar32Type() const; bool isAnyCharacterType() const; bool isIntegralType(const ASTContext &Ctx) const; /// Determine whether this type is an integral or enumeration type. bool isIntegralOrEnumerationType() const; /// Determine whether this type is an integral or unscoped enumeration type. bool isIntegralOrUnscopedEnumerationType() const; bool isUnscopedEnumerationType() const; /// Floating point categories. bool isRealFloatingType() const; // C99 6.2.5p10 (float, double, long double) /// isComplexType() does *not* include complex integers (a GCC extension). /// isComplexIntegerType() can be used to test for complex integers. bool isComplexType() const; // C99 6.2.5p11 (complex) bool isAnyComplexType() const; // C99 6.2.5p11 (complex) + Complex Int. bool isFloatingType() const; // C99 6.2.5p11 (real floating + complex) bool isHalfType() const; // OpenCL 6.1.1.1, NEON (IEEE 754-2008 half) bool isFloat16Type() const; // C11 extension ISO/IEC TS 18661 bool isBFloat16Type() const; bool isFloat128Type() const; bool isRealType() const; // C99 6.2.5p17 (real floating + integer) bool isArithmeticType() const; // C99 6.2.5p18 (integer + floating) bool isVoidType() const; // C99 6.2.5p19 bool isScalarType() const; // C99 6.2.5p21 (arithmetic + pointers) bool isAggregateType() const; bool isFundamentalType() const; bool isCompoundType() const; // Type Predicates: Check to see if this type is structurally the specified // type, ignoring typedefs and qualifiers. bool isFunctionType() const; bool isFunctionNoProtoType() const { return getAs(); } bool isFunctionProtoType() const { return getAs(); } bool isPointerType() const; bool isAnyPointerType() const; // Any C pointer or ObjC object pointer bool isBlockPointerType() const; bool isVoidPointerType() const; bool isReferenceType() const; bool isLValueReferenceType() const; bool isRValueReferenceType() const; bool isObjectPointerType() const; bool isFunctionPointerType() const; bool isFunctionReferenceType() const; bool isMemberPointerType() const; bool isMemberFunctionPointerType() const; bool isMemberDataPointerType() const; bool isArrayType() const; bool isConstantArrayType() const; bool isIncompleteArrayType() const; bool isVariableArrayType() const; bool isDependentSizedArrayType() const; bool isRecordType() const; bool isClassType() const; bool isStructureType() const; bool isObjCBoxableRecordType() const; bool isInterfaceType() const; bool isStructureOrClassType() const; bool isUnionType() const; bool isComplexIntegerType() const; // GCC _Complex integer type. bool isVectorType() const; // GCC vector type. bool isExtVectorType() const; // Extended vector type. bool isMatrixType() const; // Matrix type. bool isConstantMatrixType() const; // Constant matrix type. bool isDependentAddressSpaceType() const; // value-dependent address space qualifier bool isObjCObjectPointerType() const; // pointer to ObjC object bool isObjCRetainableType() const; // ObjC object or block pointer bool isObjCLifetimeType() const; // (array of)* retainable type bool isObjCIndirectLifetimeType() const; // (pointer to)* lifetime type bool isObjCNSObjectType() const; // __attribute__((NSObject)) bool isObjCIndependentClassType() const; // __attribute__((objc_independent_class)) // FIXME: change this to 'raw' interface type, so we can used 'interface' type // for the common case. bool isObjCObjectType() const; // NSString or typeof(*(id)0) bool isObjCQualifiedInterfaceType() const; // NSString bool isObjCQualifiedIdType() const; // id bool isObjCQualifiedClassType() const; // Class bool isObjCObjectOrInterfaceType() const; bool isObjCIdType() const; // id bool isDecltypeType() const; /// Was this type written with the special inert-in-ARC __unsafe_unretained /// qualifier? /// /// This approximates the answer to the following question: if this /// translation unit were compiled in ARC, would this type be qualified /// with __unsafe_unretained? bool isObjCInertUnsafeUnretainedType() const { return hasAttr(attr::ObjCInertUnsafeUnretained); } /// Whether the type is Objective-C 'id' or a __kindof type of an /// object type, e.g., __kindof NSView * or __kindof id /// . /// /// \param bound Will be set to the bound on non-id subtype types, /// which will be (possibly specialized) Objective-C class type, or /// null for 'id. bool isObjCIdOrObjectKindOfType(const ASTContext &ctx, const ObjCObjectType *&bound) const; bool isObjCClassType() const; // Class /// Whether the type is Objective-C 'Class' or a __kindof type of an /// Class type, e.g., __kindof Class . /// /// Unlike \c isObjCIdOrObjectKindOfType, there is no relevant bound /// here because Objective-C's type system cannot express "a class /// object for a subclass of NSFoo". bool isObjCClassOrClassKindOfType() const; bool isBlockCompatibleObjCPointerType(ASTContext &ctx) const; bool isObjCSelType() const; // Class bool isObjCBuiltinType() const; // 'id' or 'Class' bool isObjCARCBridgableType() const; bool isCARCBridgableType() const; bool isTemplateTypeParmType() const; // C++ template type parameter bool isNullPtrType() const; // C++11 std::nullptr_t bool isNothrowT() const; // C++ std::nothrow_t bool isAlignValT() const; // C++17 std::align_val_t bool isStdByteType() const; // C++17 std::byte bool isAtomicType() const; // C11 _Atomic() bool isUndeducedAutoType() const; // C++11 auto or // C++14 decltype(auto) #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ bool is##Id##Type() const; #include "clang/Basic/OpenCLImageTypes.def" bool isImageType() const; // Any OpenCL image type bool isSamplerT() const; // OpenCL sampler_t bool isEventT() const; // OpenCL event_t bool isClkEventT() const; // OpenCL clk_event_t bool isQueueT() const; // OpenCL queue_t bool isReserveIDT() const; // OpenCL reserve_id_t #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ bool is##Id##Type() const; #include "clang/Basic/OpenCLExtensionTypes.def" // Type defined in cl_intel_device_side_avc_motion_estimation OpenCL extension bool isOCLIntelSubgroupAVCType() const; bool isOCLExtOpaqueType() const; // Any OpenCL extension type bool isPipeType() const; // OpenCL pipe type bool isExtIntType() const; // Extended Int Type bool isOpenCLSpecificType() const; // Any OpenCL specific type /// Determines if this type, which must satisfy /// isObjCLifetimeType(), is implicitly __unsafe_unretained rather /// than implicitly __strong. bool isObjCARCImplicitlyUnretainedType() const; /// Check if the type is the CUDA device builtin surface type. bool isCUDADeviceBuiltinSurfaceType() const; /// Check if the type is the CUDA device builtin texture type. bool isCUDADeviceBuiltinTextureType() const; /// Return the implicit lifetime for this type, which must not be dependent. Qualifiers::ObjCLifetime getObjCARCImplicitLifetime() const; enum ScalarTypeKind { STK_CPointer, STK_BlockPointer, STK_ObjCObjectPointer, STK_MemberPointer, STK_Bool, STK_Integral, STK_Floating, STK_IntegralComplex, STK_FloatingComplex, STK_FixedPoint }; /// Given that this is a scalar type, classify it. ScalarTypeKind getScalarTypeKind() const; TypeDependence getDependence() const { return static_cast(TypeBits.Dependence); } /// Whether this type is an error type. bool containsErrors() const { return getDependence() & TypeDependence::Error; } /// Whether this type is a dependent type, meaning that its definition /// somehow depends on a template parameter (C++ [temp.dep.type]). bool isDependentType() const { return getDependence() & TypeDependence::Dependent; } /// Determine whether this type is an instantiation-dependent type, /// meaning that the type involves a template parameter (even if the /// definition does not actually depend on the type substituted for that /// template parameter). bool isInstantiationDependentType() const { return getDependence() & TypeDependence::Instantiation; } /// Determine whether this type is an undeduced type, meaning that /// it somehow involves a C++11 'auto' type or similar which has not yet been /// deduced. bool isUndeducedType() const; /// Whether this type is a variably-modified type (C99 6.7.5). bool isVariablyModifiedType() const { return getDependence() & TypeDependence::VariablyModified; } /// Whether this type involves a variable-length array type /// with a definite size. bool hasSizedVLAType() const; /// Whether this type is or contains a local or unnamed type. bool hasUnnamedOrLocalType() const; bool isOverloadableType() const; /// Determine wither this type is a C++ elaborated-type-specifier. bool isElaboratedTypeSpecifier() const; bool canDecayToPointerType() const; /// Whether this type is represented natively as a pointer. This includes /// pointers, references, block pointers, and Objective-C interface, /// qualified id, and qualified interface types, as well as nullptr_t. bool hasPointerRepresentation() const; /// Whether this type can represent an objective pointer type for the /// purpose of GC'ability bool hasObjCPointerRepresentation() const; /// Determine whether this type has an integer representation /// of some sort, e.g., it is an integer type or a vector. bool hasIntegerRepresentation() const; /// Determine whether this type has an signed integer representation /// of some sort, e.g., it is an signed integer type or a vector. bool hasSignedIntegerRepresentation() const; /// Determine whether this type has an unsigned integer representation /// of some sort, e.g., it is an unsigned integer type or a vector. bool hasUnsignedIntegerRepresentation() const; /// Determine whether this type has a floating-point representation /// of some sort, e.g., it is a floating-point type or a vector thereof. bool hasFloatingRepresentation() const; // Type Checking Functions: Check to see if this type is structurally the // specified type, ignoring typedefs and qualifiers, and return a pointer to // the best type we can. const RecordType *getAsStructureType() const; /// NOTE: getAs*ArrayType are methods on ASTContext. const RecordType *getAsUnionType() const; const ComplexType *getAsComplexIntegerType() const; // GCC complex int type. const ObjCObjectType *getAsObjCInterfaceType() const; // The following is a convenience method that returns an ObjCObjectPointerType // for object declared using an interface. const ObjCObjectPointerType *getAsObjCInterfacePointerType() const; const ObjCObjectPointerType *getAsObjCQualifiedIdType() const; const ObjCObjectPointerType *getAsObjCQualifiedClassType() const; const ObjCObjectType *getAsObjCQualifiedInterfaceType() const; /// Retrieves the CXXRecordDecl that this type refers to, either /// because the type is a RecordType or because it is the injected-class-name /// type of a class template or class template partial specialization. CXXRecordDecl *getAsCXXRecordDecl() const; /// Retrieves the RecordDecl this type refers to. RecordDecl *getAsRecordDecl() const; /// Retrieves the TagDecl that this type refers to, either /// because the type is a TagType or because it is the injected-class-name /// type of a class template or class template partial specialization. TagDecl *getAsTagDecl() const; /// If this is a pointer or reference to a RecordType, return the /// CXXRecordDecl that the type refers to. /// /// If this is not a pointer or reference, or the type being pointed to does /// not refer to a CXXRecordDecl, returns NULL. const CXXRecordDecl *getPointeeCXXRecordDecl() const; /// Get the DeducedType whose type will be deduced for a variable with /// an initializer of this type. This looks through declarators like pointer /// types, but not through decltype or typedefs. DeducedType *getContainedDeducedType() const; /// Get the AutoType whose type will be deduced for a variable with /// an initializer of this type. This looks through declarators like pointer /// types, but not through decltype or typedefs. AutoType *getContainedAutoType() const { return dyn_cast_or_null(getContainedDeducedType()); } /// Determine whether this type was written with a leading 'auto' /// corresponding to a trailing return type (possibly for a nested /// function type within a pointer to function type or similar). bool hasAutoForTrailingReturnType() const; /// Member-template getAs'. Look through sugar for /// an instance of \. This scheme will eventually /// replace the specific getAsXXXX methods above. /// /// There are some specializations of this member template listed /// immediately following this class. template const T *getAs() const; /// Member-template getAsAdjusted. Look through specific kinds /// of sugar (parens, attributes, etc) for an instance of \. /// This is used when you need to walk over sugar nodes that represent some /// kind of type adjustment from a type that was written as a \ /// to another type that is still canonically a \. template const T *getAsAdjusted() const; /// A variant of getAs<> for array types which silently discards /// qualifiers from the outermost type. const ArrayType *getAsArrayTypeUnsafe() const; /// Member-template castAs. Look through sugar for /// the underlying instance of \. /// /// This method has the same relationship to getAs as cast has /// to dyn_cast; which is to say, the underlying type *must* /// have the intended type, and this method will never return null. template const T *castAs() const; /// A variant of castAs<> for array type which silently discards /// qualifiers from the outermost type. const ArrayType *castAsArrayTypeUnsafe() const; /// Determine whether this type had the specified attribute applied to it /// (looking through top-level type sugar). bool hasAttr(attr::Kind AK) const; /// Get the base element type of this type, potentially discarding type /// qualifiers. This should never be used when type qualifiers /// are meaningful. const Type *getBaseElementTypeUnsafe() const; /// If this is an array type, return the element type of the array, /// potentially with type qualifiers missing. /// This should never be used when type qualifiers are meaningful. const Type *getArrayElementTypeNoTypeQual() const; /// If this is a pointer type, return the pointee type. /// If this is an array type, return the array element type. /// This should never be used when type qualifiers are meaningful. const Type *getPointeeOrArrayElementType() const; /// If this is a pointer, ObjC object pointer, or block /// pointer, this returns the respective pointee. QualType getPointeeType() const; /// Return the specified type with any "sugar" removed from the type, /// removing any typedefs, typeofs, etc., as well as any qualifiers. const Type *getUnqualifiedDesugaredType() const; /// More type predicates useful for type checking/promotion bool isPromotableIntegerType() const; // C99 6.3.1.1p2 /// Return true if this is an integer type that is /// signed, according to C99 6.2.5p4 [char, signed char, short, int, long..], /// or an enum decl which has a signed representation. bool isSignedIntegerType() const; /// Return true if this is an integer type that is /// unsigned, according to C99 6.2.5p6 [which returns true for _Bool], /// or an enum decl which has an unsigned representation. bool isUnsignedIntegerType() const; /// Determines whether this is an integer type that is signed or an /// enumeration types whose underlying type is a signed integer type. bool isSignedIntegerOrEnumerationType() const; /// Determines whether this is an integer type that is unsigned or an /// enumeration types whose underlying type is a unsigned integer type. bool isUnsignedIntegerOrEnumerationType() const; /// Return true if this is a fixed point type according to /// ISO/IEC JTC1 SC22 WG14 N1169. bool isFixedPointType() const; /// Return true if this is a fixed point or integer type. bool isFixedPointOrIntegerType() const; /// Return true if this is a saturated fixed point type according to /// ISO/IEC JTC1 SC22 WG14 N1169. This type can be signed or unsigned. bool isSaturatedFixedPointType() const; /// Return true if this is a saturated fixed point type according to /// ISO/IEC JTC1 SC22 WG14 N1169. This type can be signed or unsigned. bool isUnsaturatedFixedPointType() const; /// Return true if this is a fixed point type that is signed according /// to ISO/IEC JTC1 SC22 WG14 N1169. This type can also be saturated. bool isSignedFixedPointType() const; /// Return true if this is a fixed point type that is unsigned according /// to ISO/IEC JTC1 SC22 WG14 N1169. This type can also be saturated. bool isUnsignedFixedPointType() const; /// Return true if this is not a variable sized type, /// according to the rules of C99 6.7.5p3. It is not legal to call this on /// incomplete types. bool isConstantSizeType() const; /// Returns true if this type can be represented by some /// set of type specifiers. bool isSpecifierType() const; /// Determine the linkage of this type. Linkage getLinkage() const; /// Determine the visibility of this type. Visibility getVisibility() const { return getLinkageAndVisibility().getVisibility(); } /// Return true if the visibility was explicitly set is the code. bool isVisibilityExplicit() const { return getLinkageAndVisibility().isVisibilityExplicit(); } /// Determine the linkage and visibility of this type. LinkageInfo getLinkageAndVisibility() const; /// True if the computed linkage is valid. Used for consistency /// checking. Should always return true. bool isLinkageValid() const; /// Determine the nullability of the given type. /// /// Note that nullability is only captured as sugar within the type /// system, not as part of the canonical type, so nullability will /// be lost by canonicalization and desugaring. Optional getNullability(const ASTContext &context) const; /// Determine whether the given type can have a nullability /// specifier applied to it, i.e., if it is any kind of pointer type. /// /// \param ResultIfUnknown The value to return if we don't yet know whether /// this type can have nullability because it is dependent. bool canHaveNullability(bool ResultIfUnknown = true) const; /// Retrieve the set of substitutions required when accessing a member /// of the Objective-C receiver type that is declared in the given context. /// /// \c *this is the type of the object we're operating on, e.g., the /// receiver for a message send or the base of a property access, and is /// expected to be of some object or object pointer type. /// /// \param dc The declaration context for which we are building up a /// substitution mapping, which should be an Objective-C class, extension, /// category, or method within. /// /// \returns an array of type arguments that can be substituted for /// the type parameters of the given declaration context in any type described /// within that context, or an empty optional to indicate that no /// substitution is required. Optional> getObjCSubstitutions(const DeclContext *dc) const; /// Determines if this is an ObjC interface type that may accept type /// parameters. bool acceptsObjCTypeParams() const; const char *getTypeClassName() const; QualType getCanonicalTypeInternal() const { return CanonicalType; } CanQualType getCanonicalTypeUnqualified() const; // in CanonicalType.h void dump() const; void dump(llvm::raw_ostream &OS, const ASTContext &Context) const; }; /// This will check for a TypedefType by removing any existing sugar /// until it reaches a TypedefType or a non-sugared type. template <> const TypedefType *Type::getAs() const; /// This will check for a TemplateSpecializationType by removing any /// existing sugar until it reaches a TemplateSpecializationType or a /// non-sugared type. template <> const TemplateSpecializationType *Type::getAs() const; /// This will check for an AttributedType by removing any existing sugar /// until it reaches an AttributedType or a non-sugared type. template <> const AttributedType *Type::getAs() const; // We can do canonical leaf types faster, because we don't have to // worry about preserving child type decoration. #define TYPE(Class, Base) #define LEAF_TYPE(Class) \ template <> inline const Class##Type *Type::getAs() const { \ return dyn_cast(CanonicalType); \ } \ template <> inline const Class##Type *Type::castAs() const { \ return cast(CanonicalType); \ } #include "clang/AST/TypeNodes.inc" /// This class is used for builtin types like 'int'. Builtin /// types are always canonical and have a literal name field. class BuiltinType : public Type { public: enum Kind { // OpenCL image types #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) Id, #include "clang/Basic/OpenCLImageTypes.def" // OpenCL extension types #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) Id, #include "clang/Basic/OpenCLExtensionTypes.def" // SVE Types #define SVE_TYPE(Name, Id, SingletonId) Id, #include "clang/Basic/AArch64SVEACLETypes.def" // All other builtin types #define BUILTIN_TYPE(Id, SingletonId) Id, #define LAST_BUILTIN_TYPE(Id) LastKind = Id #include "clang/AST/BuiltinTypes.def" }; private: friend class ASTContext; // ASTContext creates these. BuiltinType(Kind K) : Type(Builtin, QualType(), K == Dependent ? TypeDependence::DependentInstantiation : TypeDependence::None) { BuiltinTypeBits.Kind = K; } public: Kind getKind() const { return static_cast(BuiltinTypeBits.Kind); } StringRef getName(const PrintingPolicy &Policy) const; const char *getNameAsCString(const PrintingPolicy &Policy) const { // The StringRef is null-terminated. StringRef str = getName(Policy); assert(!str.empty() && str.data()[str.size()] == '\0'); return str.data(); } bool isSugared() const { return false; } QualType desugar() const { return QualType(this, 0); } bool isInteger() const { return getKind() >= Bool && getKind() <= Int128; } bool isSignedInteger() const { return getKind() >= Char_S && getKind() <= Int128; } bool isUnsignedInteger() const { return getKind() >= Bool && getKind() <= UInt128; } bool isFloatingPoint() const { return getKind() >= Half && getKind() <= Float128; } /// Determines whether the given kind corresponds to a placeholder type. static bool isPlaceholderTypeKind(Kind K) { return K >= Overload; } /// Determines whether this type is a placeholder type, i.e. a type /// which cannot appear in arbitrary positions in a fully-formed /// expression. bool isPlaceholderType() const { return isPlaceholderTypeKind(getKind()); } /// Determines whether this type is a placeholder type other than /// Overload. Most placeholder types require only syntactic /// information about their context in order to be resolved (e.g. /// whether it is a call expression), which means they can (and /// should) be resolved in an earlier "phase" of analysis. /// Overload expressions sometimes pick up further information /// from their context, like whether the context expects a /// specific function-pointer type, and so frequently need /// special treatment. bool isNonOverloadPlaceholderType() const { return getKind() > Overload; } static bool classof(const Type *T) { return T->getTypeClass() == Builtin; } }; /// Complex values, per C99 6.2.5p11. This supports the C99 complex /// types (_Complex float etc) as well as the GCC integer complex extensions. class ComplexType : public Type, public llvm::FoldingSetNode { friend class ASTContext; // ASTContext creates these. QualType ElementType; ComplexType(QualType Element, QualType CanonicalPtr) : Type(Complex, CanonicalPtr, Element->getDependence()), ElementType(Element) {} public: QualType getElementType() const { return ElementType; } bool isSugared() const { return false; } QualType desugar() const { return QualType(this, 0); } void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, getElementType()); } static void Profile(llvm::FoldingSetNodeID &ID, QualType Element) { ID.AddPointer(Element.getAsOpaquePtr()); } static bool classof(const Type *T) { return T->getTypeClass() == Complex; } }; /// Sugar for parentheses used when specifying types. class ParenType : public Type, public llvm::FoldingSetNode { friend class ASTContext; // ASTContext creates these. QualType Inner; ParenType(QualType InnerType, QualType CanonType) : Type(Paren, CanonType, InnerType->getDependence()), Inner(InnerType) {} public: QualType getInnerType() const { return Inner; } bool isSugared() const { return true; } QualType desugar() const { return getInnerType(); } void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, getInnerType()); } static void Profile(llvm::FoldingSetNodeID &ID, QualType Inner) { Inner.Profile(ID); } static bool classof(const Type *T) { return T->getTypeClass() == Paren; } }; /// PointerType - C99 6.7.5.1 - Pointer Declarators. class PointerType : public Type, public llvm::FoldingSetNode { friend class ASTContext; // ASTContext creates these. QualType PointeeType; PointerType(QualType Pointee, QualType CanonicalPtr) : Type(Pointer, CanonicalPtr, Pointee->getDependence()), PointeeType(Pointee) {} public: QualType getPointeeType() const { return PointeeType; } bool isSugared() const { return false; } QualType desugar() const { return QualType(this, 0); } void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, getPointeeType()); } static void Profile(llvm::FoldingSetNodeID &ID, QualType Pointee) { ID.AddPointer(Pointee.getAsOpaquePtr()); } static bool classof(const Type *T) { return T->getTypeClass() == Pointer; } }; /// Represents a type which was implicitly adjusted by the semantic /// engine for arbitrary reasons. For example, array and function types can /// decay, and function types can have their calling conventions adjusted. class AdjustedType : public Type, public llvm::FoldingSetNode { QualType OriginalTy; QualType AdjustedTy; protected: friend class ASTContext; // ASTContext creates these. AdjustedType(TypeClass TC, QualType OriginalTy, QualType AdjustedTy, QualType CanonicalPtr) : Type(TC, CanonicalPtr, OriginalTy->getDependence()), OriginalTy(OriginalTy), AdjustedTy(AdjustedTy) {} public: QualType getOriginalType() const { return OriginalTy; } QualType getAdjustedType() const { return AdjustedTy; } bool isSugared() const { return true; } QualType desugar() const { return AdjustedTy; } void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, OriginalTy, AdjustedTy); } static void Profile(llvm::FoldingSetNodeID &ID, QualType Orig, QualType New) { ID.AddPointer(Orig.getAsOpaquePtr()); ID.AddPointer(New.getAsOpaquePtr()); } static bool classof(const Type *T) { return T->getTypeClass() == Adjusted || T->getTypeClass() == Decayed; } }; /// Represents a pointer type decayed from an array or function type. class DecayedType : public AdjustedType { friend class ASTContext; // ASTContext creates these. inline DecayedType(QualType OriginalType, QualType Decayed, QualType Canonical); public: QualType getDecayedType() const { return getAdjustedType(); } inline QualType getPointeeType() const; static bool classof(const Type *T) { return T->getTypeClass() == Decayed; } }; /// Pointer to a block type. /// This type is to represent types syntactically represented as /// "void (^)(int)", etc. Pointee is required to always be a function type. class BlockPointerType : public Type, public llvm::FoldingSetNode { friend class ASTContext; // ASTContext creates these. // Block is some kind of pointer type QualType PointeeType; BlockPointerType(QualType Pointee, QualType CanonicalCls) : Type(BlockPointer, CanonicalCls, Pointee->getDependence()), PointeeType(Pointee) {} public: // Get the pointee type. Pointee is required to always be a function type. QualType getPointeeType() const { return PointeeType; } bool isSugared() const { return false; } QualType desugar() const { return QualType(this, 0); } void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, getPointeeType()); } static void Profile(llvm::FoldingSetNodeID &ID, QualType Pointee) { ID.AddPointer(Pointee.getAsOpaquePtr()); } static bool classof(const Type *T) { return T->getTypeClass() == BlockPointer; } }; /// Base for LValueReferenceType and RValueReferenceType class ReferenceType : public Type, public llvm::FoldingSetNode { QualType PointeeType; protected: ReferenceType(TypeClass tc, QualType Referencee, QualType CanonicalRef, bool SpelledAsLValue) : Type(tc, CanonicalRef, Referencee->getDependence()), PointeeType(Referencee) { ReferenceTypeBits.SpelledAsLValue = SpelledAsLValue; ReferenceTypeBits.InnerRef = Referencee->isReferenceType(); } public: bool isSpelledAsLValue() const { return ReferenceTypeBits.SpelledAsLValue; } bool isInnerRef() const { return ReferenceTypeBits.InnerRef; } QualType getPointeeTypeAsWritten() const { return PointeeType; } QualType getPointeeType() const { // FIXME: this might strip inner qualifiers; okay? const ReferenceType *T = this; while (T->isInnerRef()) T = T->PointeeType->castAs(); return T->PointeeType; } void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, PointeeType, isSpelledAsLValue()); } static void Profile(llvm::FoldingSetNodeID &ID, QualType Referencee, bool SpelledAsLValue) { ID.AddPointer(Referencee.getAsOpaquePtr()); ID.AddBoolean(SpelledAsLValue); } static bool classof(const Type *T) { return T->getTypeClass() == LValueReference || T->getTypeClass() == RValueReference; } }; /// An lvalue reference type, per C++11 [dcl.ref]. class LValueReferenceType : public ReferenceType { friend class ASTContext; // ASTContext creates these LValueReferenceType(QualType Referencee, QualType CanonicalRef, bool SpelledAsLValue) : ReferenceType(LValueReference, Referencee, CanonicalRef, SpelledAsLValue) {} public: bool isSugared() const { return false; } QualType desugar() const { return QualType(this, 0); } static bool classof(const Type *T) { return T->getTypeClass() == LValueReference; } }; /// An rvalue reference type, per C++11 [dcl.ref]. class RValueReferenceType : public ReferenceType { friend class ASTContext; // ASTContext creates these RValueReferenceType(QualType Referencee, QualType CanonicalRef) : ReferenceType(RValueReference, Referencee, CanonicalRef, false) {} public: bool isSugared() const { return false; } QualType desugar() const { return QualType(this, 0); } static bool classof(const Type *T) { return T->getTypeClass() == RValueReference; } }; /// A pointer to member type per C++ 8.3.3 - Pointers to members. /// /// This includes both pointers to data members and pointer to member functions. class MemberPointerType : public Type, public llvm::FoldingSetNode { friend class ASTContext; // ASTContext creates these. QualType PointeeType; /// The class of which the pointee is a member. Must ultimately be a /// RecordType, but could be a typedef or a template parameter too. const Type *Class; MemberPointerType(QualType Pointee, const Type *Cls, QualType CanonicalPtr) : Type(MemberPointer, CanonicalPtr, (Cls->getDependence() & ~TypeDependence::VariablyModified) | Pointee->getDependence()), PointeeType(Pointee), Class(Cls) {} public: QualType getPointeeType() const { return PointeeType; } /// Returns true if the member type (i.e. the pointee type) is a /// function type rather than a data-member type. bool isMemberFunctionPointer() const { return PointeeType->isFunctionProtoType(); } /// Returns true if the member type (i.e. the pointee type) is a /// data type rather than a function type. bool isMemberDataPointer() const { return !PointeeType->isFunctionProtoType(); } const Type *getClass() const { return Class; } CXXRecordDecl *getMostRecentCXXRecordDecl() const; bool isSugared() const { return false; } QualType desugar() const { return QualType(this, 0); } void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, getPointeeType(), getClass()); } static void Profile(llvm::FoldingSetNodeID &ID, QualType Pointee, const Type *Class) { ID.AddPointer(Pointee.getAsOpaquePtr()); ID.AddPointer(Class); } static bool classof(const Type *T) { return T->getTypeClass() == MemberPointer; } }; /// Represents an array type, per C99 6.7.5.2 - Array Declarators. class ArrayType : public Type, public llvm::FoldingSetNode { public: /// Capture whether this is a normal array (e.g. int X[4]) /// an array with a static size (e.g. int X[static 4]), or an array /// with a star size (e.g. int X[*]). /// 'static' is only allowed on function parameters. enum ArraySizeModifier { Normal, Static, Star }; private: /// The element type of the array. QualType ElementType; protected: friend class ASTContext; // ASTContext creates these. ArrayType(TypeClass tc, QualType et, QualType can, ArraySizeModifier sm, unsigned tq, const Expr *sz = nullptr); public: QualType getElementType() const { return ElementType; } ArraySizeModifier getSizeModifier() const { return ArraySizeModifier(ArrayTypeBits.SizeModifier); } Qualifiers getIndexTypeQualifiers() const { return Qualifiers::fromCVRMask(getIndexTypeCVRQualifiers()); } unsigned getIndexTypeCVRQualifiers() const { return ArrayTypeBits.IndexTypeQuals; } static bool classof(const Type *T) { return T->getTypeClass() == ConstantArray || T->getTypeClass() == VariableArray || T->getTypeClass() == IncompleteArray || T->getTypeClass() == DependentSizedArray; } }; /// Represents the canonical version of C arrays with a specified constant size. /// For example, the canonical type for 'int A[4 + 4*100]' is a /// ConstantArrayType where the element type is 'int' and the size is 404. class ConstantArrayType final : public ArrayType, private llvm::TrailingObjects { friend class ASTContext; // ASTContext creates these. friend TrailingObjects; llvm::APInt Size; // Allows us to unique the type. ConstantArrayType(QualType et, QualType can, const llvm::APInt &size, const Expr *sz, ArraySizeModifier sm, unsigned tq) : ArrayType(ConstantArray, et, can, sm, tq, sz), Size(size) { ConstantArrayTypeBits.HasStoredSizeExpr = sz != nullptr; if (ConstantArrayTypeBits.HasStoredSizeExpr) { assert(!can.isNull() && "canonical constant array should not have size"); *getTrailingObjects() = sz; } } unsigned numTrailingObjects(OverloadToken) const { return ConstantArrayTypeBits.HasStoredSizeExpr; } public: const llvm::APInt &getSize() const { return Size; } const Expr *getSizeExpr() const { return ConstantArrayTypeBits.HasStoredSizeExpr ? *getTrailingObjects() : nullptr; } bool isSugared() const { return false; } QualType desugar() const { return QualType(this, 0); } /// Determine the number of bits required to address a member of // an array with the given element type and number of elements. static unsigned getNumAddressingBits(const ASTContext &Context, QualType ElementType, const llvm::APInt &NumElements); /// Determine the maximum number of active bits that an array's size /// can require, which limits the maximum size of the array. static unsigned getMaxSizeBits(const ASTContext &Context); void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Ctx) { Profile(ID, Ctx, getElementType(), getSize(), getSizeExpr(), getSizeModifier(), getIndexTypeCVRQualifiers()); } static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Ctx, QualType ET, const llvm::APInt &ArraySize, const Expr *SizeExpr, ArraySizeModifier SizeMod, unsigned TypeQuals); static bool classof(const Type *T) { return T->getTypeClass() == ConstantArray; } }; /// Represents a C array with an unspecified size. For example 'int A[]' has /// an IncompleteArrayType where the element type is 'int' and the size is /// unspecified. class IncompleteArrayType : public ArrayType { friend class ASTContext; // ASTContext creates these. IncompleteArrayType(QualType et, QualType can, ArraySizeModifier sm, unsigned tq) : ArrayType(IncompleteArray, et, can, sm, tq) {} public: friend class StmtIteratorBase; bool isSugared() const { return false; } QualType desugar() const { return QualType(this, 0); } static bool classof(const Type *T) { return T->getTypeClass() == IncompleteArray; } void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, getElementType(), getSizeModifier(), getIndexTypeCVRQualifiers()); } static void Profile(llvm::FoldingSetNodeID &ID, QualType ET, ArraySizeModifier SizeMod, unsigned TypeQuals) { ID.AddPointer(ET.getAsOpaquePtr()); ID.AddInteger(SizeMod); ID.AddInteger(TypeQuals); } }; /// Represents a C array with a specified size that is not an /// integer-constant-expression. For example, 'int s[x+foo()]'. /// Since the size expression is an arbitrary expression, we store it as such. /// /// Note: VariableArrayType's aren't uniqued (since the expressions aren't) and /// should not be: two lexically equivalent variable array types could mean /// different things, for example, these variables do not have the same type /// dynamically: /// /// void foo(int x) { /// int Y[x]; /// ++x; /// int Z[x]; /// } class VariableArrayType : public ArrayType { friend class ASTContext; // ASTContext creates these. /// An assignment-expression. VLA's are only permitted within /// a function block. Stmt *SizeExpr; /// The range spanned by the left and right array brackets. SourceRange Brackets; VariableArrayType(QualType et, QualType can, Expr *e, ArraySizeModifier sm, unsigned tq, SourceRange brackets) : ArrayType(VariableArray, et, can, sm, tq, e), SizeExpr((Stmt*) e), Brackets(brackets) {} public: friend class StmtIteratorBase; Expr *getSizeExpr() const { // We use C-style casts instead of cast<> here because we do not wish // to have a dependency of Type.h on Stmt.h/Expr.h. return (Expr*) SizeExpr; } SourceRange getBracketsRange() const { return Brackets; } SourceLocation getLBracketLoc() const { return Brackets.getBegin(); } SourceLocation getRBracketLoc() const { return Brackets.getEnd(); } bool isSugared() const { return false; } QualType desugar() const { return QualType(this, 0); } static bool classof(const Type *T) { return T->getTypeClass() == VariableArray; } void Profile(llvm::FoldingSetNodeID &ID) { llvm_unreachable("Cannot unique VariableArrayTypes."); } }; /// Represents an array type in C++ whose size is a value-dependent expression. /// /// For example: /// \code /// template /// class array { /// T data[Size]; /// }; /// \endcode /// /// For these types, we won't actually know what the array bound is /// until template instantiation occurs, at which point this will /// become either a ConstantArrayType or a VariableArrayType. class DependentSizedArrayType : public ArrayType { friend class ASTContext; // ASTContext creates these. const ASTContext &Context; /// An assignment expression that will instantiate to the /// size of the array. /// /// The expression itself might be null, in which case the array /// type will have its size deduced from an initializer. Stmt *SizeExpr; /// The range spanned by the left and right array brackets. SourceRange Brackets; DependentSizedArrayType(const ASTContext &Context, QualType et, QualType can, Expr *e, ArraySizeModifier sm, unsigned tq, SourceRange brackets); public: friend class StmtIteratorBase; Expr *getSizeExpr() const { // We use C-style casts instead of cast<> here because we do not wish // to have a dependency of Type.h on Stmt.h/Expr.h. return (Expr*) SizeExpr; } SourceRange getBracketsRange() const { return Brackets; } SourceLocation getLBracketLoc() const { return Brackets.getBegin(); } SourceLocation getRBracketLoc() const { return Brackets.getEnd(); } bool isSugared() const { return false; } QualType desugar() const { return QualType(this, 0); } static bool classof(const Type *T) { return T->getTypeClass() == DependentSizedArray; } void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, Context, getElementType(), getSizeModifier(), getIndexTypeCVRQualifiers(), getSizeExpr()); } static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, QualType ET, ArraySizeModifier SizeMod, unsigned TypeQuals, Expr *E); }; /// Represents an extended address space qualifier where the input address space /// value is dependent. Non-dependent address spaces are not represented with a /// special Type subclass; they are stored on an ExtQuals node as part of a QualType. /// /// For example: /// \code /// template /// class AddressSpace { /// typedef T __attribute__((address_space(AddrSpace))) type; /// } /// \endcode class DependentAddressSpaceType : public Type, public llvm::FoldingSetNode { friend class ASTContext; const ASTContext &Context; Expr *AddrSpaceExpr; QualType PointeeType; SourceLocation loc; DependentAddressSpaceType(const ASTContext &Context, QualType PointeeType, QualType can, Expr *AddrSpaceExpr, SourceLocation loc); public: Expr *getAddrSpaceExpr() const { return AddrSpaceExpr; } QualType getPointeeType() const { return PointeeType; } SourceLocation getAttributeLoc() const { return loc; } bool isSugared() const { return false; } QualType desugar() const { return QualType(this, 0); } static bool classof(const Type *T) { return T->getTypeClass() == DependentAddressSpace; } void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, Context, getPointeeType(), getAddrSpaceExpr()); } static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, QualType PointeeType, Expr *AddrSpaceExpr); }; /// Represents an extended vector type where either the type or size is /// dependent. /// /// For example: /// \code /// template /// class vector { /// typedef T __attribute__((ext_vector_type(Size))) type; /// } /// \endcode class DependentSizedExtVectorType : public Type, public llvm::FoldingSetNode { friend class ASTContext; const ASTContext &Context; Expr *SizeExpr; /// The element type of the array. QualType ElementType; SourceLocation loc; DependentSizedExtVectorType(const ASTContext &Context, QualType ElementType, QualType can, Expr *SizeExpr, SourceLocation loc); public: Expr *getSizeExpr() const { return SizeExpr; } QualType getElementType() const { return ElementType; } SourceLocation getAttributeLoc() const { return loc; } bool isSugared() const { return false; } QualType desugar() const { return QualType(this, 0); } static bool classof(const Type *T) { return T->getTypeClass() == DependentSizedExtVector; } void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, Context, getElementType(), getSizeExpr()); } static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, QualType ElementType, Expr *SizeExpr); }; /// Represents a GCC generic vector type. This type is created using /// __attribute__((vector_size(n)), where "n" specifies the vector size in /// bytes; or from an Altivec __vector or vector declaration. /// Since the constructor takes the number of vector elements, the /// client is responsible for converting the size into the number of elements. class VectorType : public Type, public llvm::FoldingSetNode { public: enum VectorKind { /// not a target-specific vector type GenericVector, /// is AltiVec vector AltiVecVector, /// is AltiVec 'vector Pixel' AltiVecPixel, /// is AltiVec 'vector bool ...' AltiVecBool, /// is ARM Neon vector NeonVector, /// is ARM Neon polynomial vector NeonPolyVector }; protected: friend class ASTContext; // ASTContext creates these. /// The element type of the vector. QualType ElementType; VectorType(QualType vecType, unsigned nElements, QualType canonType, VectorKind vecKind); VectorType(TypeClass tc, QualType vecType, unsigned nElements, QualType canonType, VectorKind vecKind); public: QualType getElementType() const { return ElementType; } unsigned getNumElements() const { return VectorTypeBits.NumElements; } bool isSugared() const { return false; } QualType desugar() const { return QualType(this, 0); } VectorKind getVectorKind() const { return VectorKind(VectorTypeBits.VecKind); } void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, getElementType(), getNumElements(), getTypeClass(), getVectorKind()); } static void Profile(llvm::FoldingSetNodeID &ID, QualType ElementType, unsigned NumElements, TypeClass TypeClass, VectorKind VecKind) { ID.AddPointer(ElementType.getAsOpaquePtr()); ID.AddInteger(NumElements); ID.AddInteger(TypeClass); ID.AddInteger(VecKind); } static bool classof(const Type *T) { return T->getTypeClass() == Vector || T->getTypeClass() == ExtVector; } }; /// Represents a vector type where either the type or size is dependent. //// /// For example: /// \code /// template /// class vector { /// typedef T __attribute__((vector_size(Size))) type; /// } /// \endcode class DependentVectorType : public Type, public llvm::FoldingSetNode { friend class ASTContext; const ASTContext &Context; QualType ElementType; Expr *SizeExpr; SourceLocation Loc; DependentVectorType(const ASTContext &Context, QualType ElementType, QualType CanonType, Expr *SizeExpr, SourceLocation Loc, VectorType::VectorKind vecKind); public: Expr *getSizeExpr() const { return SizeExpr; } QualType getElementType() const { return ElementType; } SourceLocation getAttributeLoc() const { return Loc; } VectorType::VectorKind getVectorKind() const { return VectorType::VectorKind(VectorTypeBits.VecKind); } bool isSugared() const { return false; } QualType desugar() const { return QualType(this, 0); } static bool classof(const Type *T) { return T->getTypeClass() == DependentVector; } void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, Context, getElementType(), getSizeExpr(), getVectorKind()); } static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, QualType ElementType, const Expr *SizeExpr, VectorType::VectorKind VecKind); }; /// ExtVectorType - Extended vector type. This type is created using /// __attribute__((ext_vector_type(n)), where "n" is the number of elements. /// Unlike vector_size, ext_vector_type is only allowed on typedef's. This /// class enables syntactic extensions, like Vector Components for accessing /// points (as .xyzw), colors (as .rgba), and textures (modeled after OpenGL /// Shading Language). class ExtVectorType : public VectorType { friend class ASTContext; // ASTContext creates these. ExtVectorType(QualType vecType, unsigned nElements, QualType canonType) : VectorType(ExtVector, vecType, nElements, canonType, GenericVector) {} public: static int getPointAccessorIdx(char c) { switch (c) { default: return -1; case 'x': case 'r': return 0; case 'y': case 'g': return 1; case 'z': case 'b': return 2; case 'w': case 'a': return 3; } } static int getNumericAccessorIdx(char c) { switch (c) { default: return -1; case '0': return 0; case '1': return 1; case '2': return 2; case '3': return 3; case '4': return 4; case '5': return 5; case '6': return 6; case '7': return 7; case '8': return 8; case '9': return 9; case 'A': case 'a': return 10; case 'B': case 'b': return 11; case 'C': case 'c': return 12; case 'D': case 'd': return 13; case 'E': case 'e': return 14; case 'F': case 'f': return 15; } } static int getAccessorIdx(char c, bool isNumericAccessor) { if (isNumericAccessor) return getNumericAccessorIdx(c); else return getPointAccessorIdx(c); } bool isAccessorWithinNumElements(char c, bool isNumericAccessor) const { if (int idx = getAccessorIdx(c, isNumericAccessor)+1) return unsigned(idx-1) < getNumElements(); return false; } bool isSugared() const { return false; } QualType desugar() const { return QualType(this, 0); } static bool classof(const Type *T) { return T->getTypeClass() == ExtVector; } }; /// Represents a matrix type, as defined in the Matrix Types clang extensions. /// __attribute__((matrix_type(rows, columns))), where "rows" specifies /// number of rows and "columns" specifies the number of columns. class MatrixType : public Type, public llvm::FoldingSetNode { protected: friend class ASTContext; /// The element type of the matrix. QualType ElementType; MatrixType(QualType ElementTy, QualType CanonElementTy); MatrixType(TypeClass TypeClass, QualType ElementTy, QualType CanonElementTy, const Expr *RowExpr = nullptr, const Expr *ColumnExpr = nullptr); public: /// Returns type of the elements being stored in the matrix QualType getElementType() const { return ElementType; } /// Valid elements types are the following: /// * an integer type (as in C2x 6.2.5p19), but excluding enumerated types /// and _Bool /// * the standard floating types float or double /// * a half-precision floating point type, if one is supported on the target static bool isValidElementType(QualType T) { return T->isDependentType() || (T->isRealType() && !T->isBooleanType() && !T->isEnumeralType()); } bool isSugared() const { return false; } QualType desugar() const { return QualType(this, 0); } static bool classof(const Type *T) { return T->getTypeClass() == ConstantMatrix || T->getTypeClass() == DependentSizedMatrix; } }; /// Represents a concrete matrix type with constant number of rows and columns class ConstantMatrixType final : public MatrixType { protected: friend class ASTContext; /// The element type of the matrix. QualType ElementType; ConstantMatrixType(QualType MatrixElementType, unsigned NRows, unsigned NColumns, QualType CanonElementType); ConstantMatrixType(TypeClass typeClass, QualType MatrixType, unsigned NRows, unsigned NColumns, QualType CanonElementType); public: /// Returns the number of rows in the matrix. unsigned getNumRows() const { return ConstantMatrixTypeBits.NumRows; } /// Returns the number of columns in the matrix. unsigned getNumColumns() const { return ConstantMatrixTypeBits.NumColumns; } /// Returns the number of elements required to embed the matrix into a vector. unsigned getNumElementsFlattened() const { return ConstantMatrixTypeBits.NumRows * ConstantMatrixTypeBits.NumColumns; } /// Returns true if \p NumElements is a valid matrix dimension. static bool isDimensionValid(uint64_t NumElements) { return NumElements > 0 && NumElements <= ConstantMatrixTypeBitfields::MaxElementsPerDimension; } /// Returns the maximum number of elements per dimension. static unsigned getMaxElementsPerDimension() { return ConstantMatrixTypeBitfields::MaxElementsPerDimension; } void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, getElementType(), getNumRows(), getNumColumns(), getTypeClass()); } static void Profile(llvm::FoldingSetNodeID &ID, QualType ElementType, unsigned NumRows, unsigned NumColumns, TypeClass TypeClass) { ID.AddPointer(ElementType.getAsOpaquePtr()); ID.AddInteger(NumRows); ID.AddInteger(NumColumns); ID.AddInteger(TypeClass); } static bool classof(const Type *T) { return T->getTypeClass() == ConstantMatrix; } }; /// Represents a matrix type where the type and the number of rows and columns /// is dependent on a template. class DependentSizedMatrixType final : public MatrixType { friend class ASTContext; const ASTContext &Context; Expr *RowExpr; Expr *ColumnExpr; SourceLocation loc; DependentSizedMatrixType(const ASTContext &Context, QualType ElementType, QualType CanonicalType, Expr *RowExpr, Expr *ColumnExpr, SourceLocation loc); public: QualType getElementType() const { return ElementType; } Expr *getRowExpr() const { return RowExpr; } Expr *getColumnExpr() const { return ColumnExpr; } SourceLocation getAttributeLoc() const { return loc; } bool isSugared() const { return false; } QualType desugar() const { return QualType(this, 0); } static bool classof(const Type *T) { return T->getTypeClass() == DependentSizedMatrix; } void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, Context, getElementType(), getRowExpr(), getColumnExpr()); } static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, QualType ElementType, Expr *RowExpr, Expr *ColumnExpr); }; /// FunctionType - C99 6.7.5.3 - Function Declarators. This is the common base /// class of FunctionNoProtoType and FunctionProtoType. class FunctionType : public Type { // The type returned by the function. QualType ResultType; public: /// Interesting information about a specific parameter that can't simply /// be reflected in parameter's type. This is only used by FunctionProtoType /// but is in FunctionType to make this class available during the /// specification of the bases of FunctionProtoType. /// /// It makes sense to model language features this way when there's some /// sort of parameter-specific override (such as an attribute) that /// affects how the function is called. For example, the ARC ns_consumed /// attribute changes whether a parameter is passed at +0 (the default) /// or +1 (ns_consumed). This must be reflected in the function type, /// but isn't really a change to the parameter type. /// /// One serious disadvantage of modelling language features this way is /// that they generally do not work with language features that attempt /// to destructure types. For example, template argument deduction will /// not be able to match a parameter declared as /// T (*)(U) /// against an argument of type /// void (*)(__attribute__((ns_consumed)) id) /// because the substitution of T=void, U=id into the former will /// not produce the latter. class ExtParameterInfo { enum { ABIMask = 0x0F, IsConsumed = 0x10, HasPassObjSize = 0x20, IsNoEscape = 0x40, }; unsigned char Data = 0; public: ExtParameterInfo() = default; /// Return the ABI treatment of this parameter. ParameterABI getABI() const { return ParameterABI(Data & ABIMask); } ExtParameterInfo withABI(ParameterABI kind) const { ExtParameterInfo copy = *this; copy.Data = (copy.Data & ~ABIMask) | unsigned(kind); return copy; } /// Is this parameter considered "consumed" by Objective-C ARC? /// Consumed parameters must have retainable object type. bool isConsumed() const { return (Data & IsConsumed); } ExtParameterInfo withIsConsumed(bool consumed) const { ExtParameterInfo copy = *this; if (consumed) copy.Data |= IsConsumed; else copy.Data &= ~IsConsumed; return copy; } bool hasPassObjectSize() const { return Data & HasPassObjSize; } ExtParameterInfo withHasPassObjectSize() const { ExtParameterInfo Copy = *this; Copy.Data |= HasPassObjSize; return Copy; } bool isNoEscape() const { return Data & IsNoEscape; } ExtParameterInfo withIsNoEscape(bool NoEscape) const { ExtParameterInfo Copy = *this; if (NoEscape) Copy.Data |= IsNoEscape; else Copy.Data &= ~IsNoEscape; return Copy; } unsigned char getOpaqueValue() const { return Data; } static ExtParameterInfo getFromOpaqueValue(unsigned char data) { ExtParameterInfo result; result.Data = data; return result; } friend bool operator==(ExtParameterInfo lhs, ExtParameterInfo rhs) { return lhs.Data == rhs.Data; } friend bool operator!=(ExtParameterInfo lhs, ExtParameterInfo rhs) { return lhs.Data != rhs.Data; } }; /// A class which abstracts out some details necessary for /// making a call. /// /// It is not actually used directly for storing this information in /// a FunctionType, although FunctionType does currently use the /// same bit-pattern. /// // If you add a field (say Foo), other than the obvious places (both, // constructors, compile failures), what you need to update is // * Operator== // * getFoo // * withFoo // * functionType. Add Foo, getFoo. // * ASTContext::getFooType // * ASTContext::mergeFunctionTypes // * FunctionNoProtoType::Profile // * FunctionProtoType::Profile // * TypePrinter::PrintFunctionProto // * AST read and write // * Codegen class ExtInfo { friend class FunctionType; // Feel free to rearrange or add bits, but if you go over 16, you'll need to // adjust the Bits field below, and if you add bits, you'll need to adjust // Type::FunctionTypeBitfields::ExtInfo as well. // | CC |noreturn|produces|nocallersavedregs|regparm|nocfcheck|cmsenscall| // |0 .. 4| 5 | 6 | 7 |8 .. 10| 11 | 12 | // // regparm is either 0 (no regparm attribute) or the regparm value+1. enum { CallConvMask = 0x1F }; enum { NoReturnMask = 0x20 }; enum { ProducesResultMask = 0x40 }; enum { NoCallerSavedRegsMask = 0x80 }; enum { RegParmMask = 0x700, RegParmOffset = 8 }; enum { NoCfCheckMask = 0x800 }; enum { CmseNSCallMask = 0x1000 }; uint16_t Bits = CC_C; ExtInfo(unsigned Bits) : Bits(static_cast(Bits)) {} public: // Constructor with no defaults. Use this when you know that you // have all the elements (when reading an AST file for example). ExtInfo(bool noReturn, bool hasRegParm, unsigned regParm, CallingConv cc, bool producesResult, bool noCallerSavedRegs, bool NoCfCheck, bool cmseNSCall) { assert((!hasRegParm || regParm < 7) && "Invalid regparm value"); Bits = ((unsigned)cc) | (noReturn ? NoReturnMask : 0) | (producesResult ? ProducesResultMask : 0) | (noCallerSavedRegs ? NoCallerSavedRegsMask : 0) | (hasRegParm ? ((regParm + 1) << RegParmOffset) : 0) | (NoCfCheck ? NoCfCheckMask : 0) | (cmseNSCall ? CmseNSCallMask : 0); } // Constructor with all defaults. Use when for example creating a // function known to use defaults. ExtInfo() = default; // Constructor with just the calling convention, which is an important part // of the canonical type. ExtInfo(CallingConv CC) : Bits(CC) {} bool getNoReturn() const { return Bits & NoReturnMask; } bool getProducesResult() const { return Bits & ProducesResultMask; } bool getCmseNSCall() const { return Bits & CmseNSCallMask; } bool getNoCallerSavedRegs() const { return Bits & NoCallerSavedRegsMask; } bool getNoCfCheck() const { return Bits & NoCfCheckMask; } bool getHasRegParm() const { return ((Bits & RegParmMask) >> RegParmOffset) != 0; } unsigned getRegParm() const { unsigned RegParm = (Bits & RegParmMask) >> RegParmOffset; if (RegParm > 0) --RegParm; return RegParm; } CallingConv getCC() const { return CallingConv(Bits & CallConvMask); } bool operator==(ExtInfo Other) const { return Bits == Other.Bits; } bool operator!=(ExtInfo Other) const { return Bits != Other.Bits; } // Note that we don't have setters. That is by design, use // the following with methods instead of mutating these objects. ExtInfo withNoReturn(bool noReturn) const { if (noReturn) return ExtInfo(Bits | NoReturnMask); else return ExtInfo(Bits & ~NoReturnMask); } ExtInfo withProducesResult(bool producesResult) const { if (producesResult) return ExtInfo(Bits | ProducesResultMask); else return ExtInfo(Bits & ~ProducesResultMask); } ExtInfo withCmseNSCall(bool cmseNSCall) const { if (cmseNSCall) return ExtInfo(Bits | CmseNSCallMask); else return ExtInfo(Bits & ~CmseNSCallMask); } ExtInfo withNoCallerSavedRegs(bool noCallerSavedRegs) const { if (noCallerSavedRegs) return ExtInfo(Bits | NoCallerSavedRegsMask); else return ExtInfo(Bits & ~NoCallerSavedRegsMask); } ExtInfo withNoCfCheck(bool noCfCheck) const { if (noCfCheck) return ExtInfo(Bits | NoCfCheckMask); else return ExtInfo(Bits & ~NoCfCheckMask); } ExtInfo withRegParm(unsigned RegParm) const { assert(RegParm < 7 && "Invalid regparm value"); return ExtInfo((Bits & ~RegParmMask) | ((RegParm + 1) << RegParmOffset)); } ExtInfo withCallingConv(CallingConv cc) const { return ExtInfo((Bits & ~CallConvMask) | (unsigned) cc); } void Profile(llvm::FoldingSetNodeID &ID) const { ID.AddInteger(Bits); } }; /// A simple holder for a QualType representing a type in an /// exception specification. Unfortunately needed by FunctionProtoType /// because TrailingObjects cannot handle repeated types. struct ExceptionType { QualType Type; }; /// A simple holder for various uncommon bits which do not fit in /// FunctionTypeBitfields. Aligned to alignof(void *) to maintain the /// alignment of subsequent objects in TrailingObjects. You must update /// hasExtraBitfields in FunctionProtoType after adding extra data here. struct alignas(void *) FunctionTypeExtraBitfields { /// The number of types in the exception specification. /// A whole unsigned is not needed here and according to /// [implimits] 8 bits would be enough here. unsigned NumExceptionType; }; protected: FunctionType(TypeClass tc, QualType res, QualType Canonical, TypeDependence Dependence, ExtInfo Info) : Type(tc, Canonical, Dependence), ResultType(res) { FunctionTypeBits.ExtInfo = Info.Bits; } Qualifiers getFastTypeQuals() const { return Qualifiers::fromFastMask(FunctionTypeBits.FastTypeQuals); } public: QualType getReturnType() const { return ResultType; } bool getHasRegParm() const { return getExtInfo().getHasRegParm(); } unsigned getRegParmType() const { return getExtInfo().getRegParm(); } /// Determine whether this function type includes the GNU noreturn /// attribute. The C++11 [[noreturn]] attribute does not affect the function /// type. bool getNoReturnAttr() const { return getExtInfo().getNoReturn(); } bool getCmseNSCallAttr() const { return getExtInfo().getCmseNSCall(); } CallingConv getCallConv() const { return getExtInfo().getCC(); } ExtInfo getExtInfo() const { return ExtInfo(FunctionTypeBits.ExtInfo); } static_assert((~Qualifiers::FastMask & Qualifiers::CVRMask) == 0, "Const, volatile and restrict are assumed to be a subset of " "the fast qualifiers."); bool isConst() const { return getFastTypeQuals().hasConst(); } bool isVolatile() const { return getFastTypeQuals().hasVolatile(); } bool isRestrict() const { return getFastTypeQuals().hasRestrict(); } /// Determine the type of an expression that calls a function of /// this type. QualType getCallResultType(const ASTContext &Context) const { return getReturnType().getNonLValueExprType(Context); } static StringRef getNameForCallConv(CallingConv CC); static bool classof(const Type *T) { return T->getTypeClass() == FunctionNoProto || T->getTypeClass() == FunctionProto; } }; /// Represents a K&R-style 'int foo()' function, which has /// no information available about its arguments. class FunctionNoProtoType : public FunctionType, public llvm::FoldingSetNode { friend class ASTContext; // ASTContext creates these. FunctionNoProtoType(QualType Result, QualType Canonical, ExtInfo Info) : FunctionType(FunctionNoProto, Result, Canonical, Result->getDependence() & ~(TypeDependence::DependentInstantiation | TypeDependence::UnexpandedPack), Info) {} public: // No additional state past what FunctionType provides. bool isSugared() const { return false; } QualType desugar() const { return QualType(this, 0); } void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, getReturnType(), getExtInfo()); } static void Profile(llvm::FoldingSetNodeID &ID, QualType ResultType, ExtInfo Info) { Info.Profile(ID); ID.AddPointer(ResultType.getAsOpaquePtr()); } static bool classof(const Type *T) { return T->getTypeClass() == FunctionNoProto; } }; /// Represents a prototype with parameter type info, e.g. /// 'int foo(int)' or 'int foo(void)'. 'void' is represented as having no /// parameters, not as having a single void parameter. Such a type can have /// an exception specification, but this specification is not part of the /// canonical type. FunctionProtoType has several trailing objects, some of /// which optional. For more information about the trailing objects see /// the first comment inside FunctionProtoType. class FunctionProtoType final : public FunctionType, public llvm::FoldingSetNode, private llvm::TrailingObjects< FunctionProtoType, QualType, SourceLocation, FunctionType::FunctionTypeExtraBitfields, FunctionType::ExceptionType, Expr *, FunctionDecl *, FunctionType::ExtParameterInfo, Qualifiers> { friend class ASTContext; // ASTContext creates these. friend TrailingObjects; // FunctionProtoType is followed by several trailing objects, some of // which optional. They are in order: // // * An array of getNumParams() QualType holding the parameter types. // Always present. Note that for the vast majority of FunctionProtoType, // these will be the only trailing objects. // // * Optionally if the function is variadic, the SourceLocation of the // ellipsis. // // * Optionally if some extra data is stored in FunctionTypeExtraBitfields // (see FunctionTypeExtraBitfields and FunctionTypeBitfields): // a single FunctionTypeExtraBitfields. Present if and only if // hasExtraBitfields() is true. // // * Optionally exactly one of: // * an array of getNumExceptions() ExceptionType, // * a single Expr *, // * a pair of FunctionDecl *, // * a single FunctionDecl * // used to store information about the various types of exception // specification. See getExceptionSpecSize for the details. // // * Optionally an array of getNumParams() ExtParameterInfo holding // an ExtParameterInfo for each of the parameters. Present if and // only if hasExtParameterInfos() is true. // // * Optionally a Qualifiers object to represent extra qualifiers that can't // be represented by FunctionTypeBitfields.FastTypeQuals. Present if and only // if hasExtQualifiers() is true. // // The optional FunctionTypeExtraBitfields has to be before the data // related to the exception specification since it contains the number // of exception types. // // We put the ExtParameterInfos last. If all were equal, it would make // more sense to put these before the exception specification, because // it's much easier to skip past them compared to the elaborate switch // required to skip the exception specification. However, all is not // equal; ExtParameterInfos are used to model very uncommon features, // and it's better not to burden the more common paths. public: /// Holds information about the various types of exception specification. /// ExceptionSpecInfo is not stored as such in FunctionProtoType but is /// used to group together the various bits of information about the /// exception specification. struct ExceptionSpecInfo { /// The kind of exception specification this is. ExceptionSpecificationType Type = EST_None; /// Explicitly-specified list of exception types. ArrayRef Exceptions; /// Noexcept expression, if this is a computed noexcept specification. Expr *NoexceptExpr = nullptr; /// The function whose exception specification this is, for /// EST_Unevaluated and EST_Uninstantiated. FunctionDecl *SourceDecl = nullptr; /// The function template whose exception specification this is instantiated /// from, for EST_Uninstantiated. FunctionDecl *SourceTemplate = nullptr; ExceptionSpecInfo() = default; ExceptionSpecInfo(ExceptionSpecificationType EST) : Type(EST) {} }; /// Extra information about a function prototype. ExtProtoInfo is not /// stored as such in FunctionProtoType but is used to group together /// the various bits of extra information about a function prototype. struct ExtProtoInfo { FunctionType::ExtInfo ExtInfo; bool Variadic : 1; bool HasTrailingReturn : 1; Qualifiers TypeQuals; RefQualifierKind RefQualifier = RQ_None; ExceptionSpecInfo ExceptionSpec; const ExtParameterInfo *ExtParameterInfos = nullptr; SourceLocation EllipsisLoc; ExtProtoInfo() : Variadic(false), HasTrailingReturn(false) {} ExtProtoInfo(CallingConv CC) : ExtInfo(CC), Variadic(false), HasTrailingReturn(false) {} ExtProtoInfo withExceptionSpec(const ExceptionSpecInfo &ESI) { ExtProtoInfo Result(*this); Result.ExceptionSpec = ESI; return Result; } }; private: unsigned numTrailingObjects(OverloadToken) const { return getNumParams(); } unsigned numTrailingObjects(OverloadToken) const { return isVariadic(); } unsigned numTrailingObjects(OverloadToken) const { return hasExtraBitfields(); } unsigned numTrailingObjects(OverloadToken) const { return getExceptionSpecSize().NumExceptionType; } unsigned numTrailingObjects(OverloadToken) const { return getExceptionSpecSize().NumExprPtr; } unsigned numTrailingObjects(OverloadToken) const { return getExceptionSpecSize().NumFunctionDeclPtr; } unsigned numTrailingObjects(OverloadToken) const { return hasExtParameterInfos() ? getNumParams() : 0; } /// Determine whether there are any argument types that /// contain an unexpanded parameter pack. static bool containsAnyUnexpandedParameterPack(const QualType *ArgArray, unsigned numArgs) { for (unsigned Idx = 0; Idx < numArgs; ++Idx) if (ArgArray[Idx]->containsUnexpandedParameterPack()) return true; return false; } FunctionProtoType(QualType result, ArrayRef params, QualType canonical, const ExtProtoInfo &epi); /// This struct is returned by getExceptionSpecSize and is used to /// translate an ExceptionSpecificationType to the number and kind /// of trailing objects related to the exception specification. struct ExceptionSpecSizeHolder { unsigned NumExceptionType; unsigned NumExprPtr; unsigned NumFunctionDeclPtr; }; /// Return the number and kind of trailing objects /// related to the exception specification. static ExceptionSpecSizeHolder getExceptionSpecSize(ExceptionSpecificationType EST, unsigned NumExceptions) { switch (EST) { case EST_None: case EST_DynamicNone: case EST_MSAny: case EST_BasicNoexcept: case EST_Unparsed: case EST_NoThrow: return {0, 0, 0}; case EST_Dynamic: return {NumExceptions, 0, 0}; case EST_DependentNoexcept: case EST_NoexceptFalse: case EST_NoexceptTrue: return {0, 1, 0}; case EST_Uninstantiated: return {0, 0, 2}; case EST_Unevaluated: return {0, 0, 1}; } llvm_unreachable("bad exception specification kind"); } /// Return the number and kind of trailing objects /// related to the exception specification. ExceptionSpecSizeHolder getExceptionSpecSize() const { return getExceptionSpecSize(getExceptionSpecType(), getNumExceptions()); } /// Whether the trailing FunctionTypeExtraBitfields is present. static bool hasExtraBitfields(ExceptionSpecificationType EST) { // If the exception spec type is EST_Dynamic then we have > 0 exception // types and the exact number is stored in FunctionTypeExtraBitfields. return EST == EST_Dynamic; } /// Whether the trailing FunctionTypeExtraBitfields is present. bool hasExtraBitfields() const { return hasExtraBitfields(getExceptionSpecType()); } bool hasExtQualifiers() const { return FunctionTypeBits.HasExtQuals; } public: unsigned getNumParams() const { return FunctionTypeBits.NumParams; } QualType getParamType(unsigned i) const { assert(i < getNumParams() && "invalid parameter index"); return param_type_begin()[i]; } ArrayRef getParamTypes() const { return llvm::makeArrayRef(param_type_begin(), param_type_end()); } ExtProtoInfo getExtProtoInfo() const { ExtProtoInfo EPI; EPI.ExtInfo = getExtInfo(); EPI.Variadic = isVariadic(); EPI.EllipsisLoc = getEllipsisLoc(); EPI.HasTrailingReturn = hasTrailingReturn(); EPI.ExceptionSpec = getExceptionSpecInfo(); EPI.TypeQuals = getMethodQuals(); EPI.RefQualifier = getRefQualifier(); EPI.ExtParameterInfos = getExtParameterInfosOrNull(); return EPI; } /// Get the kind of exception specification on this function. ExceptionSpecificationType getExceptionSpecType() const { return static_cast( FunctionTypeBits.ExceptionSpecType); } /// Return whether this function has any kind of exception spec. bool hasExceptionSpec() const { return getExceptionSpecType() != EST_None; } /// Return whether this function has a dynamic (throw) exception spec. bool hasDynamicExceptionSpec() const { return isDynamicExceptionSpec(getExceptionSpecType()); } /// Return whether this function has a noexcept exception spec. bool hasNoexceptExceptionSpec() const { return isNoexceptExceptionSpec(getExceptionSpecType()); } /// Return whether this function has a dependent exception spec. bool hasDependentExceptionSpec() const; /// Return whether this function has an instantiation-dependent exception /// spec. bool hasInstantiationDependentExceptionSpec() const; /// Return all the available information about this type's exception spec. ExceptionSpecInfo getExceptionSpecInfo() const { ExceptionSpecInfo Result; Result.Type = getExceptionSpecType(); if (Result.Type == EST_Dynamic) { Result.Exceptions = exceptions(); } else if (isComputedNoexcept(Result.Type)) { Result.NoexceptExpr = getNoexceptExpr(); } else if (Result.Type == EST_Uninstantiated) { Result.SourceDecl = getExceptionSpecDecl(); Result.SourceTemplate = getExceptionSpecTemplate(); } else if (Result.Type == EST_Unevaluated) { Result.SourceDecl = getExceptionSpecDecl(); } return Result; } /// Return the number of types in the exception specification. unsigned getNumExceptions() const { return getExceptionSpecType() == EST_Dynamic ? getTrailingObjects() ->NumExceptionType : 0; } /// Return the ith exception type, where 0 <= i < getNumExceptions(). QualType getExceptionType(unsigned i) const { assert(i < getNumExceptions() && "Invalid exception number!"); return exception_begin()[i]; } /// Return the expression inside noexcept(expression), or a null pointer /// if there is none (because the exception spec is not of this form). Expr *getNoexceptExpr() const { if (!isComputedNoexcept(getExceptionSpecType())) return nullptr; return *getTrailingObjects(); } /// If this function type has an exception specification which hasn't /// been determined yet (either because it has not been evaluated or because /// it has not been instantiated), this is the function whose exception /// specification is represented by this type. FunctionDecl *getExceptionSpecDecl() const { if (getExceptionSpecType() != EST_Uninstantiated && getExceptionSpecType() != EST_Unevaluated) return nullptr; return getTrailingObjects()[0]; } /// If this function type has an uninstantiated exception /// specification, this is the function whose exception specification /// should be instantiated to find the exception specification for /// this type. FunctionDecl *getExceptionSpecTemplate() const { if (getExceptionSpecType() != EST_Uninstantiated) return nullptr; return getTrailingObjects()[1]; } /// Determine whether this function type has a non-throwing exception /// specification. CanThrowResult canThrow() const; /// Determine whether this function type has a non-throwing exception /// specification. If this depends on template arguments, returns /// \c ResultIfDependent. bool isNothrow(bool ResultIfDependent = false) const { return ResultIfDependent ? canThrow() != CT_Can : canThrow() == CT_Cannot; } /// Whether this function prototype is variadic. bool isVariadic() const { return FunctionTypeBits.Variadic; } SourceLocation getEllipsisLoc() const { return isVariadic() ? *getTrailingObjects() : SourceLocation(); } /// Determines whether this function prototype contains a /// parameter pack at the end. /// /// A function template whose last parameter is a parameter pack can be /// called with an arbitrary number of arguments, much like a variadic /// function. bool isTemplateVariadic() const; /// Whether this function prototype has a trailing return type. bool hasTrailingReturn() const { return FunctionTypeBits.HasTrailingReturn; } Qualifiers getMethodQuals() const { if (hasExtQualifiers()) return *getTrailingObjects(); else return getFastTypeQuals(); } /// Retrieve the ref-qualifier associated with this function type. RefQualifierKind getRefQualifier() const { return static_cast(FunctionTypeBits.RefQualifier); } using param_type_iterator = const QualType *; using param_type_range = llvm::iterator_range; param_type_range param_types() const { return param_type_range(param_type_begin(), param_type_end()); } param_type_iterator param_type_begin() const { return getTrailingObjects(); } param_type_iterator param_type_end() const { return param_type_begin() + getNumParams(); } using exception_iterator = const QualType *; ArrayRef exceptions() const { return llvm::makeArrayRef(exception_begin(), exception_end()); } exception_iterator exception_begin() const { return reinterpret_cast( getTrailingObjects()); } exception_iterator exception_end() const { return exception_begin() + getNumExceptions(); } /// Is there any interesting extra information for any of the parameters /// of this function type? bool hasExtParameterInfos() const { return FunctionTypeBits.HasExtParameterInfos; } ArrayRef getExtParameterInfos() const { assert(hasExtParameterInfos()); return ArrayRef(getTrailingObjects(), getNumParams()); } /// Return a pointer to the beginning of the array of extra parameter /// information, if present, or else null if none of the parameters /// carry it. This is equivalent to getExtProtoInfo().ExtParameterInfos. const ExtParameterInfo *getExtParameterInfosOrNull() const { if (!hasExtParameterInfos()) return nullptr; return getTrailingObjects(); } ExtParameterInfo getExtParameterInfo(unsigned I) const { assert(I < getNumParams() && "parameter index out of range"); if (hasExtParameterInfos()) return getTrailingObjects()[I]; return ExtParameterInfo(); } ParameterABI getParameterABI(unsigned I) const { assert(I < getNumParams() && "parameter index out of range"); if (hasExtParameterInfos()) return getTrailingObjects()[I].getABI(); return ParameterABI::Ordinary; } bool isParamConsumed(unsigned I) const { assert(I < getNumParams() && "parameter index out of range"); if (hasExtParameterInfos()) return getTrailingObjects()[I].isConsumed(); return false; } bool isSugared() const { return false; } QualType desugar() const { return QualType(this, 0); } void printExceptionSpecification(raw_ostream &OS, const PrintingPolicy &Policy) const; static bool classof(const Type *T) { return T->getTypeClass() == FunctionProto; } void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Ctx); static void Profile(llvm::FoldingSetNodeID &ID, QualType Result, param_type_iterator ArgTys, unsigned NumArgs, const ExtProtoInfo &EPI, const ASTContext &Context, bool Canonical); }; /// Represents the dependent type named by a dependently-scoped /// typename using declaration, e.g. /// using typename Base::foo; /// /// Template instantiation turns these into the underlying type. class UnresolvedUsingType : public Type { friend class ASTContext; // ASTContext creates these. UnresolvedUsingTypenameDecl *Decl; UnresolvedUsingType(const UnresolvedUsingTypenameDecl *D) : Type(UnresolvedUsing, QualType(), TypeDependence::DependentInstantiation), Decl(const_cast(D)) {} public: UnresolvedUsingTypenameDecl *getDecl() const { return Decl; } bool isSugared() const { return false; } QualType desugar() const { return QualType(this, 0); } static bool classof(const Type *T) { return T->getTypeClass() == UnresolvedUsing; } void Profile(llvm::FoldingSetNodeID &ID) { return Profile(ID, Decl); } static void Profile(llvm::FoldingSetNodeID &ID, UnresolvedUsingTypenameDecl *D) { ID.AddPointer(D); } }; class TypedefType : public Type { TypedefNameDecl *Decl; protected: friend class ASTContext; // ASTContext creates these. - TypedefType(TypeClass tc, const TypedefNameDecl *D, QualType can) - : Type(tc, can, can->getDependence() & ~TypeDependence::UnexpandedPack), - Decl(const_cast(D)) { - assert(!isa(can) && "Invalid canonical type"); - } + TypedefType(TypeClass tc, const TypedefNameDecl *D, QualType can); public: TypedefNameDecl *getDecl() const { return Decl; } bool isSugared() const { return true; } QualType desugar() const; static bool classof(const Type *T) { return T->getTypeClass() == Typedef; } }; /// Sugar type that represents a type that was qualified by a qualifier written /// as a macro invocation. class MacroQualifiedType : public Type { friend class ASTContext; // ASTContext creates these. QualType UnderlyingTy; const IdentifierInfo *MacroII; MacroQualifiedType(QualType UnderlyingTy, QualType CanonTy, const IdentifierInfo *MacroII) : Type(MacroQualified, CanonTy, UnderlyingTy->getDependence()), UnderlyingTy(UnderlyingTy), MacroII(MacroII) { assert(isa(UnderlyingTy) && "Expected a macro qualified type to only wrap attributed types."); } public: const IdentifierInfo *getMacroIdentifier() const { return MacroII; } QualType getUnderlyingType() const { return UnderlyingTy; } /// Return this attributed type's modified type with no qualifiers attached to /// it. QualType getModifiedType() const; bool isSugared() const { return true; } QualType desugar() const; static bool classof(const Type *T) { return T->getTypeClass() == MacroQualified; } }; /// Represents a `typeof` (or __typeof__) expression (a GCC extension). class TypeOfExprType : public Type { Expr *TOExpr; protected: friend class ASTContext; // ASTContext creates these. TypeOfExprType(Expr *E, QualType can = QualType()); public: Expr *getUnderlyingExpr() const { return TOExpr; } /// Remove a single level of sugar. QualType desugar() const; /// Returns whether this type directly provides sugar. bool isSugared() const; static bool classof(const Type *T) { return T->getTypeClass() == TypeOfExpr; } }; /// Internal representation of canonical, dependent /// `typeof(expr)` types. /// /// This class is used internally by the ASTContext to manage /// canonical, dependent types, only. Clients will only see instances /// of this class via TypeOfExprType nodes. class DependentTypeOfExprType : public TypeOfExprType, public llvm::FoldingSetNode { const ASTContext &Context; public: DependentTypeOfExprType(const ASTContext &Context, Expr *E) : TypeOfExprType(E), Context(Context) {} void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, Context, getUnderlyingExpr()); } static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, Expr *E); }; /// Represents `typeof(type)`, a GCC extension. class TypeOfType : public Type { friend class ASTContext; // ASTContext creates these. QualType TOType; TypeOfType(QualType T, QualType can) : Type(TypeOf, can, T->getDependence()), TOType(T) { assert(!isa(can) && "Invalid canonical type"); } public: QualType getUnderlyingType() const { return TOType; } /// Remove a single level of sugar. QualType desugar() const { return getUnderlyingType(); } /// Returns whether this type directly provides sugar. bool isSugared() const { return true; } static bool classof(const Type *T) { return T->getTypeClass() == TypeOf; } }; /// Represents the type `decltype(expr)` (C++11). class DecltypeType : public Type { Expr *E; QualType UnderlyingType; protected: friend class ASTContext; // ASTContext creates these. DecltypeType(Expr *E, QualType underlyingType, QualType can = QualType()); public: Expr *getUnderlyingExpr() const { return E; } QualType getUnderlyingType() const { return UnderlyingType; } /// Remove a single level of sugar. QualType desugar() const; /// Returns whether this type directly provides sugar. bool isSugared() const; static bool classof(const Type *T) { return T->getTypeClass() == Decltype; } }; /// Internal representation of canonical, dependent /// decltype(expr) types. /// /// This class is used internally by the ASTContext to manage /// canonical, dependent types, only. Clients will only see instances /// of this class via DecltypeType nodes. class DependentDecltypeType : public DecltypeType, public llvm::FoldingSetNode { const ASTContext &Context; public: DependentDecltypeType(const ASTContext &Context, Expr *E); void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, Context, getUnderlyingExpr()); } static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, Expr *E); }; /// A unary type transform, which is a type constructed from another. class UnaryTransformType : public Type { public: enum UTTKind { EnumUnderlyingType }; private: /// The untransformed type. QualType BaseType; /// The transformed type if not dependent, otherwise the same as BaseType. QualType UnderlyingType; UTTKind UKind; protected: friend class ASTContext; UnaryTransformType(QualType BaseTy, QualType UnderlyingTy, UTTKind UKind, QualType CanonicalTy); public: bool isSugared() const { return !isDependentType(); } QualType desugar() const { return UnderlyingType; } QualType getUnderlyingType() const { return UnderlyingType; } QualType getBaseType() const { return BaseType; } UTTKind getUTTKind() const { return UKind; } static bool classof(const Type *T) { return T->getTypeClass() == UnaryTransform; } }; /// Internal representation of canonical, dependent /// __underlying_type(type) types. /// /// This class is used internally by the ASTContext to manage /// canonical, dependent types, only. Clients will only see instances /// of this class via UnaryTransformType nodes. class DependentUnaryTransformType : public UnaryTransformType, public llvm::FoldingSetNode { public: DependentUnaryTransformType(const ASTContext &C, QualType BaseType, UTTKind UKind); void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, getBaseType(), getUTTKind()); } static void Profile(llvm::FoldingSetNodeID &ID, QualType BaseType, UTTKind UKind) { ID.AddPointer(BaseType.getAsOpaquePtr()); ID.AddInteger((unsigned)UKind); } }; class TagType : public Type { friend class ASTReader; template friend class serialization::AbstractTypeReader; /// Stores the TagDecl associated with this type. The decl may point to any /// TagDecl that declares the entity. TagDecl *decl; protected: TagType(TypeClass TC, const TagDecl *D, QualType can); public: TagDecl *getDecl() const; /// Determines whether this type is in the process of being defined. bool isBeingDefined() const; static bool classof(const Type *T) { return T->getTypeClass() == Enum || T->getTypeClass() == Record; } }; /// A helper class that allows the use of isa/cast/dyncast /// to detect TagType objects of structs/unions/classes. class RecordType : public TagType { protected: friend class ASTContext; // ASTContext creates these. explicit RecordType(const RecordDecl *D) : TagType(Record, reinterpret_cast(D), QualType()) {} explicit RecordType(TypeClass TC, RecordDecl *D) : TagType(TC, reinterpret_cast(D), QualType()) {} public: RecordDecl *getDecl() const { return reinterpret_cast(TagType::getDecl()); } /// Recursively check all fields in the record for const-ness. If any field /// is declared const, return true. Otherwise, return false. bool hasConstFields() const; bool isSugared() const { return false; } QualType desugar() const { return QualType(this, 0); } static bool classof(const Type *T) { return T->getTypeClass() == Record; } }; /// A helper class that allows the use of isa/cast/dyncast /// to detect TagType objects of enums. class EnumType : public TagType { friend class ASTContext; // ASTContext creates these. explicit EnumType(const EnumDecl *D) : TagType(Enum, reinterpret_cast(D), QualType()) {} public: EnumDecl *getDecl() const { return reinterpret_cast(TagType::getDecl()); } bool isSugared() const { return false; } QualType desugar() const { return QualType(this, 0); } static bool classof(const Type *T) { return T->getTypeClass() == Enum; } }; /// An attributed type is a type to which a type attribute has been applied. /// /// The "modified type" is the fully-sugared type to which the attributed /// type was applied; generally it is not canonically equivalent to the /// attributed type. The "equivalent type" is the minimally-desugared type /// which the type is canonically equivalent to. /// /// For example, in the following attributed type: /// int32_t __attribute__((vector_size(16))) /// - the modified type is the TypedefType for int32_t /// - the equivalent type is VectorType(16, int32_t) /// - the canonical type is VectorType(16, int) class AttributedType : public Type, public llvm::FoldingSetNode { public: using Kind = attr::Kind; private: friend class ASTContext; // ASTContext creates these QualType ModifiedType; QualType EquivalentType; AttributedType(QualType canon, attr::Kind attrKind, QualType modified, QualType equivalent) : Type(Attributed, canon, equivalent->getDependence()), ModifiedType(modified), EquivalentType(equivalent) { AttributedTypeBits.AttrKind = attrKind; } public: Kind getAttrKind() const { return static_cast(AttributedTypeBits.AttrKind); } QualType getModifiedType() const { return ModifiedType; } QualType getEquivalentType() const { return EquivalentType; } bool isSugared() const { return true; } QualType desugar() const { return getEquivalentType(); } /// Does this attribute behave like a type qualifier? /// /// A type qualifier adjusts a type to provide specialized rules for /// a specific object, like the standard const and volatile qualifiers. /// This includes attributes controlling things like nullability, /// address spaces, and ARC ownership. The value of the object is still /// largely described by the modified type. /// /// In contrast, many type attributes "rewrite" their modified type to /// produce a fundamentally different type, not necessarily related in any /// formalizable way to the original type. For example, calling convention /// and vector attributes are not simple type qualifiers. /// /// Type qualifiers are often, but not always, reflected in the canonical /// type. bool isQualifier() const; bool isMSTypeSpec() const; bool isCallingConv() const; llvm::Optional getImmediateNullability() const; /// Retrieve the attribute kind corresponding to the given /// nullability kind. static Kind getNullabilityAttrKind(NullabilityKind kind) { switch (kind) { case NullabilityKind::NonNull: return attr::TypeNonNull; case NullabilityKind::Nullable: return attr::TypeNullable; case NullabilityKind::Unspecified: return attr::TypeNullUnspecified; } llvm_unreachable("Unknown nullability kind."); } /// Strip off the top-level nullability annotation on the given /// type, if it's there. /// /// \param T The type to strip. If the type is exactly an /// AttributedType specifying nullability (without looking through /// type sugar), the nullability is returned and this type changed /// to the underlying modified type. /// /// \returns the top-level nullability, if present. static Optional stripOuterNullability(QualType &T); void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, getAttrKind(), ModifiedType, EquivalentType); } static void Profile(llvm::FoldingSetNodeID &ID, Kind attrKind, QualType modified, QualType equivalent) { ID.AddInteger(attrKind); ID.AddPointer(modified.getAsOpaquePtr()); ID.AddPointer(equivalent.getAsOpaquePtr()); } static bool classof(const Type *T) { return T->getTypeClass() == Attributed; } }; class TemplateTypeParmType : public Type, public llvm::FoldingSetNode { friend class ASTContext; // ASTContext creates these // Helper data collector for canonical types. struct CanonicalTTPTInfo { unsigned Depth : 15; unsigned ParameterPack : 1; unsigned Index : 16; }; union { // Info for the canonical type. CanonicalTTPTInfo CanTTPTInfo; // Info for the non-canonical type. TemplateTypeParmDecl *TTPDecl; }; /// Build a non-canonical type. TemplateTypeParmType(TemplateTypeParmDecl *TTPDecl, QualType Canon) : Type(TemplateTypeParm, Canon, TypeDependence::DependentInstantiation | (Canon->getDependence() & TypeDependence::UnexpandedPack)), TTPDecl(TTPDecl) {} /// Build the canonical type. TemplateTypeParmType(unsigned D, unsigned I, bool PP) : Type(TemplateTypeParm, QualType(this, 0), TypeDependence::DependentInstantiation | (PP ? TypeDependence::UnexpandedPack : TypeDependence::None)) { CanTTPTInfo.Depth = D; CanTTPTInfo.Index = I; CanTTPTInfo.ParameterPack = PP; } const CanonicalTTPTInfo& getCanTTPTInfo() const { QualType Can = getCanonicalTypeInternal(); return Can->castAs()->CanTTPTInfo; } public: unsigned getDepth() const { return getCanTTPTInfo().Depth; } unsigned getIndex() const { return getCanTTPTInfo().Index; } bool isParameterPack() const { return getCanTTPTInfo().ParameterPack; } TemplateTypeParmDecl *getDecl() const { return isCanonicalUnqualified() ? nullptr : TTPDecl; } IdentifierInfo *getIdentifier() const; bool isSugared() const { return false; } QualType desugar() const { return QualType(this, 0); } void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, getDepth(), getIndex(), isParameterPack(), getDecl()); } static void Profile(llvm::FoldingSetNodeID &ID, unsigned Depth, unsigned Index, bool ParameterPack, TemplateTypeParmDecl *TTPDecl) { ID.AddInteger(Depth); ID.AddInteger(Index); ID.AddBoolean(ParameterPack); ID.AddPointer(TTPDecl); } static bool classof(const Type *T) { return T->getTypeClass() == TemplateTypeParm; } }; /// Represents the result of substituting a type for a template /// type parameter. /// /// Within an instantiated template, all template type parameters have /// been replaced with these. They are used solely to record that a /// type was originally written as a template type parameter; /// therefore they are never canonical. class SubstTemplateTypeParmType : public Type, public llvm::FoldingSetNode { friend class ASTContext; // The original type parameter. const TemplateTypeParmType *Replaced; SubstTemplateTypeParmType(const TemplateTypeParmType *Param, QualType Canon) : Type(SubstTemplateTypeParm, Canon, Canon->getDependence()), Replaced(Param) {} public: /// Gets the template parameter that was substituted for. const TemplateTypeParmType *getReplacedParameter() const { return Replaced; } /// Gets the type that was substituted for the template /// parameter. QualType getReplacementType() const { return getCanonicalTypeInternal(); } bool isSugared() const { return true; } QualType desugar() const { return getReplacementType(); } void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, getReplacedParameter(), getReplacementType()); } static void Profile(llvm::FoldingSetNodeID &ID, const TemplateTypeParmType *Replaced, QualType Replacement) { ID.AddPointer(Replaced); ID.AddPointer(Replacement.getAsOpaquePtr()); } static bool classof(const Type *T) { return T->getTypeClass() == SubstTemplateTypeParm; } }; /// Represents the result of substituting a set of types for a template /// type parameter pack. /// /// When a pack expansion in the source code contains multiple parameter packs /// and those parameter packs correspond to different levels of template /// parameter lists, this type node is used to represent a template type /// parameter pack from an outer level, which has already had its argument pack /// substituted but that still lives within a pack expansion that itself /// could not be instantiated. When actually performing a substitution into /// that pack expansion (e.g., when all template parameters have corresponding /// arguments), this type will be replaced with the \c SubstTemplateTypeParmType /// at the current pack substitution index. class SubstTemplateTypeParmPackType : public Type, public llvm::FoldingSetNode { friend class ASTContext; /// The original type parameter. const TemplateTypeParmType *Replaced; /// A pointer to the set of template arguments that this /// parameter pack is instantiated with. const TemplateArgument *Arguments; SubstTemplateTypeParmPackType(const TemplateTypeParmType *Param, QualType Canon, const TemplateArgument &ArgPack); public: IdentifierInfo *getIdentifier() const { return Replaced->getIdentifier(); } /// Gets the template parameter that was substituted for. const TemplateTypeParmType *getReplacedParameter() const { return Replaced; } unsigned getNumArgs() const { return SubstTemplateTypeParmPackTypeBits.NumArgs; } bool isSugared() const { return false; } QualType desugar() const { return QualType(this, 0); } TemplateArgument getArgumentPack() const; void Profile(llvm::FoldingSetNodeID &ID); static void Profile(llvm::FoldingSetNodeID &ID, const TemplateTypeParmType *Replaced, const TemplateArgument &ArgPack); static bool classof(const Type *T) { return T->getTypeClass() == SubstTemplateTypeParmPack; } }; /// Common base class for placeholders for types that get replaced by /// placeholder type deduction: C++11 auto, C++14 decltype(auto), C++17 deduced /// class template types, and constrained type names. /// /// These types are usually a placeholder for a deduced type. However, before /// the initializer is attached, or (usually) if the initializer is /// type-dependent, there is no deduced type and the type is canonical. In /// the latter case, it is also a dependent type. class DeducedType : public Type { protected: DeducedType(TypeClass TC, QualType DeducedAsType, TypeDependence ExtraDependence) : Type(TC, // FIXME: Retain the sugared deduced type? DeducedAsType.isNull() ? QualType(this, 0) : DeducedAsType.getCanonicalType(), ExtraDependence | (DeducedAsType.isNull() ? TypeDependence::None : DeducedAsType->getDependence() & ~TypeDependence::VariablyModified)) {} public: bool isSugared() const { return !isCanonicalUnqualified(); } QualType desugar() const { return getCanonicalTypeInternal(); } /// Get the type deduced for this placeholder type, or null if it's /// either not been deduced or was deduced to a dependent type. QualType getDeducedType() const { return !isCanonicalUnqualified() ? getCanonicalTypeInternal() : QualType(); } bool isDeduced() const { return !isCanonicalUnqualified() || isDependentType(); } static bool classof(const Type *T) { return T->getTypeClass() == Auto || T->getTypeClass() == DeducedTemplateSpecialization; } }; /// Represents a C++11 auto or C++14 decltype(auto) type, possibly constrained /// by a type-constraint. class alignas(8) AutoType : public DeducedType, public llvm::FoldingSetNode { friend class ASTContext; // ASTContext creates these ConceptDecl *TypeConstraintConcept; AutoType(QualType DeducedAsType, AutoTypeKeyword Keyword, TypeDependence ExtraDependence, ConceptDecl *CD, ArrayRef TypeConstraintArgs); const TemplateArgument *getArgBuffer() const { return reinterpret_cast(this+1); } TemplateArgument *getArgBuffer() { return reinterpret_cast(this+1); } public: /// Retrieve the template arguments. const TemplateArgument *getArgs() const { return getArgBuffer(); } /// Retrieve the number of template arguments. unsigned getNumArgs() const { return AutoTypeBits.NumArgs; } const TemplateArgument &getArg(unsigned Idx) const; // in TemplateBase.h ArrayRef getTypeConstraintArguments() const { return {getArgs(), getNumArgs()}; } ConceptDecl *getTypeConstraintConcept() const { return TypeConstraintConcept; } bool isConstrained() const { return TypeConstraintConcept != nullptr; } bool isDecltypeAuto() const { return getKeyword() == AutoTypeKeyword::DecltypeAuto; } AutoTypeKeyword getKeyword() const { return (AutoTypeKeyword)AutoTypeBits.Keyword; } void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) { Profile(ID, Context, getDeducedType(), getKeyword(), isDependentType(), getTypeConstraintConcept(), getTypeConstraintArguments()); } static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, QualType Deduced, AutoTypeKeyword Keyword, bool IsDependent, ConceptDecl *CD, ArrayRef Arguments); static bool classof(const Type *T) { return T->getTypeClass() == Auto; } }; /// Represents a C++17 deduced template specialization type. class DeducedTemplateSpecializationType : public DeducedType, public llvm::FoldingSetNode { friend class ASTContext; // ASTContext creates these /// The name of the template whose arguments will be deduced. TemplateName Template; DeducedTemplateSpecializationType(TemplateName Template, QualType DeducedAsType, bool IsDeducedAsDependent) : DeducedType(DeducedTemplateSpecialization, DeducedAsType, toTypeDependence(Template.getDependence()) | (IsDeducedAsDependent ? TypeDependence::DependentInstantiation : TypeDependence::None)), Template(Template) {} public: /// Retrieve the name of the template that we are deducing. TemplateName getTemplateName() const { return Template;} void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, getTemplateName(), getDeducedType(), isDependentType()); } static void Profile(llvm::FoldingSetNodeID &ID, TemplateName Template, QualType Deduced, bool IsDependent) { Template.Profile(ID); ID.AddPointer(Deduced.getAsOpaquePtr()); ID.AddBoolean(IsDependent); } static bool classof(const Type *T) { return T->getTypeClass() == DeducedTemplateSpecialization; } }; /// Represents a type template specialization; the template /// must be a class template, a type alias template, or a template /// template parameter. A template which cannot be resolved to one of /// these, e.g. because it is written with a dependent scope /// specifier, is instead represented as a /// @c DependentTemplateSpecializationType. /// /// A non-dependent template specialization type is always "sugar", /// typically for a \c RecordType. For example, a class template /// specialization type of \c vector will refer to a tag type for /// the instantiation \c std::vector> /// /// Template specializations are dependent if either the template or /// any of the template arguments are dependent, in which case the /// type may also be canonical. /// /// Instances of this type are allocated with a trailing array of /// TemplateArguments, followed by a QualType representing the /// non-canonical aliased type when the template is a type alias /// template. class alignas(8) TemplateSpecializationType : public Type, public llvm::FoldingSetNode { friend class ASTContext; // ASTContext creates these /// The name of the template being specialized. This is /// either a TemplateName::Template (in which case it is a /// ClassTemplateDecl*, a TemplateTemplateParmDecl*, or a /// TypeAliasTemplateDecl*), a /// TemplateName::SubstTemplateTemplateParmPack, or a /// TemplateName::SubstTemplateTemplateParm (in which case the /// replacement must, recursively, be one of these). TemplateName Template; TemplateSpecializationType(TemplateName T, ArrayRef Args, QualType Canon, QualType Aliased); public: /// Determine whether any of the given template arguments are dependent. static bool anyDependentTemplateArguments(ArrayRef Args, bool &InstantiationDependent); static bool anyDependentTemplateArguments(const TemplateArgumentListInfo &, bool &InstantiationDependent); /// True if this template specialization type matches a current /// instantiation in the context in which it is found. bool isCurrentInstantiation() const { return isa(getCanonicalTypeInternal()); } /// Determine if this template specialization type is for a type alias /// template that has been substituted. /// /// Nearly every template specialization type whose template is an alias /// template will be substituted. However, this is not the case when /// the specialization contains a pack expansion but the template alias /// does not have a corresponding parameter pack, e.g., /// /// \code /// template struct S; /// template using A = S; /// template struct X { /// typedef A type; // not a type alias /// }; /// \endcode bool isTypeAlias() const { return TemplateSpecializationTypeBits.TypeAlias; } /// Get the aliased type, if this is a specialization of a type alias /// template. QualType getAliasedType() const { assert(isTypeAlias() && "not a type alias template specialization"); return *reinterpret_cast(end()); } using iterator = const TemplateArgument *; iterator begin() const { return getArgs(); } iterator end() const; // defined inline in TemplateBase.h /// Retrieve the name of the template that we are specializing. TemplateName getTemplateName() const { return Template; } /// Retrieve the template arguments. const TemplateArgument *getArgs() const { return reinterpret_cast(this + 1); } /// Retrieve the number of template arguments. unsigned getNumArgs() const { return TemplateSpecializationTypeBits.NumArgs; } /// Retrieve a specific template argument as a type. /// \pre \c isArgType(Arg) const TemplateArgument &getArg(unsigned Idx) const; // in TemplateBase.h ArrayRef template_arguments() const { return {getArgs(), getNumArgs()}; } bool isSugared() const { return !isDependentType() || isCurrentInstantiation() || isTypeAlias(); } QualType desugar() const { return isTypeAlias() ? getAliasedType() : getCanonicalTypeInternal(); } void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Ctx) { Profile(ID, Template, template_arguments(), Ctx); if (isTypeAlias()) getAliasedType().Profile(ID); } static void Profile(llvm::FoldingSetNodeID &ID, TemplateName T, ArrayRef Args, const ASTContext &Context); static bool classof(const Type *T) { return T->getTypeClass() == TemplateSpecialization; } }; /// Print a template argument list, including the '<' and '>' /// enclosing the template arguments. void printTemplateArgumentList(raw_ostream &OS, ArrayRef Args, const PrintingPolicy &Policy); void printTemplateArgumentList(raw_ostream &OS, ArrayRef Args, const PrintingPolicy &Policy); void printTemplateArgumentList(raw_ostream &OS, const TemplateArgumentListInfo &Args, const PrintingPolicy &Policy); /// The injected class name of a C++ class template or class /// template partial specialization. Used to record that a type was /// spelled with a bare identifier rather than as a template-id; the /// equivalent for non-templated classes is just RecordType. /// /// Injected class name types are always dependent. Template /// instantiation turns these into RecordTypes. /// /// Injected class name types are always canonical. This works /// because it is impossible to compare an injected class name type /// with the corresponding non-injected template type, for the same /// reason that it is impossible to directly compare template /// parameters from different dependent contexts: injected class name /// types can only occur within the scope of a particular templated /// declaration, and within that scope every template specialization /// will canonicalize to the injected class name (when appropriate /// according to the rules of the language). class InjectedClassNameType : public Type { friend class ASTContext; // ASTContext creates these. friend class ASTNodeImporter; friend class ASTReader; // FIXME: ASTContext::getInjectedClassNameType is not // currently suitable for AST reading, too much // interdependencies. template friend class serialization::AbstractTypeReader; CXXRecordDecl *Decl; /// The template specialization which this type represents. /// For example, in /// template class A { ... }; /// this is A, whereas in /// template class A > { ... }; /// this is A >. /// /// It is always unqualified, always a template specialization type, /// and always dependent. QualType InjectedType; InjectedClassNameType(CXXRecordDecl *D, QualType TST) : Type(InjectedClassName, QualType(), TypeDependence::DependentInstantiation), Decl(D), InjectedType(TST) { assert(isa(TST)); assert(!TST.hasQualifiers()); assert(TST->isDependentType()); } public: QualType getInjectedSpecializationType() const { return InjectedType; } const TemplateSpecializationType *getInjectedTST() const { return cast(InjectedType.getTypePtr()); } TemplateName getTemplateName() const { return getInjectedTST()->getTemplateName(); } CXXRecordDecl *getDecl() const; bool isSugared() const { return false; } QualType desugar() const { return QualType(this, 0); } static bool classof(const Type *T) { return T->getTypeClass() == InjectedClassName; } }; /// The kind of a tag type. enum TagTypeKind { /// The "struct" keyword. TTK_Struct, /// The "__interface" keyword. TTK_Interface, /// The "union" keyword. TTK_Union, /// The "class" keyword. TTK_Class, /// The "enum" keyword. TTK_Enum }; /// The elaboration keyword that precedes a qualified type name or /// introduces an elaborated-type-specifier. enum ElaboratedTypeKeyword { /// The "struct" keyword introduces the elaborated-type-specifier. ETK_Struct, /// The "__interface" keyword introduces the elaborated-type-specifier. ETK_Interface, /// The "union" keyword introduces the elaborated-type-specifier. ETK_Union, /// The "class" keyword introduces the elaborated-type-specifier. ETK_Class, /// The "enum" keyword introduces the elaborated-type-specifier. ETK_Enum, /// The "typename" keyword precedes the qualified type name, e.g., /// \c typename T::type. ETK_Typename, /// No keyword precedes the qualified type name. ETK_None }; /// A helper class for Type nodes having an ElaboratedTypeKeyword. /// The keyword in stored in the free bits of the base class. /// Also provides a few static helpers for converting and printing /// elaborated type keyword and tag type kind enumerations. class TypeWithKeyword : public Type { protected: TypeWithKeyword(ElaboratedTypeKeyword Keyword, TypeClass tc, QualType Canonical, TypeDependence Dependence) : Type(tc, Canonical, Dependence) { TypeWithKeywordBits.Keyword = Keyword; } public: ElaboratedTypeKeyword getKeyword() const { return static_cast(TypeWithKeywordBits.Keyword); } /// Converts a type specifier (DeclSpec::TST) into an elaborated type keyword. static ElaboratedTypeKeyword getKeywordForTypeSpec(unsigned TypeSpec); /// Converts a type specifier (DeclSpec::TST) into a tag type kind. /// It is an error to provide a type specifier which *isn't* a tag kind here. static TagTypeKind getTagTypeKindForTypeSpec(unsigned TypeSpec); /// Converts a TagTypeKind into an elaborated type keyword. static ElaboratedTypeKeyword getKeywordForTagTypeKind(TagTypeKind Tag); /// Converts an elaborated type keyword into a TagTypeKind. /// It is an error to provide an elaborated type keyword /// which *isn't* a tag kind here. static TagTypeKind getTagTypeKindForKeyword(ElaboratedTypeKeyword Keyword); static bool KeywordIsTagTypeKind(ElaboratedTypeKeyword Keyword); static StringRef getKeywordName(ElaboratedTypeKeyword Keyword); static StringRef getTagTypeKindName(TagTypeKind Kind) { return getKeywordName(getKeywordForTagTypeKind(Kind)); } class CannotCastToThisType {}; static CannotCastToThisType classof(const Type *); }; /// Represents a type that was referred to using an elaborated type /// keyword, e.g., struct S, or via a qualified name, e.g., N::M::type, /// or both. /// /// This type is used to keep track of a type name as written in the /// source code, including tag keywords and any nested-name-specifiers. /// The type itself is always "sugar", used to express what was written /// in the source code but containing no additional semantic information. class ElaboratedType final : public TypeWithKeyword, public llvm::FoldingSetNode, private llvm::TrailingObjects { friend class ASTContext; // ASTContext creates these friend TrailingObjects; /// The nested name specifier containing the qualifier. NestedNameSpecifier *NNS; /// The type that this qualified name refers to. QualType NamedType; /// The (re)declaration of this tag type owned by this occurrence is stored /// as a trailing object if there is one. Use getOwnedTagDecl to obtain /// it, or obtain a null pointer if there is none. ElaboratedType(ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS, QualType NamedType, QualType CanonType, TagDecl *OwnedTagDecl) : TypeWithKeyword(Keyword, Elaborated, CanonType, NamedType->getDependence()), NNS(NNS), NamedType(NamedType) { ElaboratedTypeBits.HasOwnedTagDecl = false; if (OwnedTagDecl) { ElaboratedTypeBits.HasOwnedTagDecl = true; *getTrailingObjects() = OwnedTagDecl; } assert(!(Keyword == ETK_None && NNS == nullptr) && "ElaboratedType cannot have elaborated type keyword " "and name qualifier both null."); } public: /// Retrieve the qualification on this type. NestedNameSpecifier *getQualifier() const { return NNS; } /// Retrieve the type named by the qualified-id. QualType getNamedType() const { return NamedType; } /// Remove a single level of sugar. QualType desugar() const { return getNamedType(); } /// Returns whether this type directly provides sugar. bool isSugared() const { return true; } /// Return the (re)declaration of this type owned by this occurrence of this /// type, or nullptr if there is none. TagDecl *getOwnedTagDecl() const { return ElaboratedTypeBits.HasOwnedTagDecl ? *getTrailingObjects() : nullptr; } void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, getKeyword(), NNS, NamedType, getOwnedTagDecl()); } static void Profile(llvm::FoldingSetNodeID &ID, ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS, QualType NamedType, TagDecl *OwnedTagDecl) { ID.AddInteger(Keyword); ID.AddPointer(NNS); NamedType.Profile(ID); ID.AddPointer(OwnedTagDecl); } static bool classof(const Type *T) { return T->getTypeClass() == Elaborated; } }; /// Represents a qualified type name for which the type name is /// dependent. /// /// DependentNameType represents a class of dependent types that involve a /// possibly dependent nested-name-specifier (e.g., "T::") followed by a /// name of a type. The DependentNameType may start with a "typename" (for a /// typename-specifier), "class", "struct", "union", or "enum" (for a /// dependent elaborated-type-specifier), or nothing (in contexts where we /// know that we must be referring to a type, e.g., in a base class specifier). /// Typically the nested-name-specifier is dependent, but in MSVC compatibility /// mode, this type is used with non-dependent names to delay name lookup until /// instantiation. class DependentNameType : public TypeWithKeyword, public llvm::FoldingSetNode { friend class ASTContext; // ASTContext creates these /// The nested name specifier containing the qualifier. NestedNameSpecifier *NNS; /// The type that this typename specifier refers to. const IdentifierInfo *Name; DependentNameType(ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS, const IdentifierInfo *Name, QualType CanonType) : TypeWithKeyword(Keyword, DependentName, CanonType, TypeDependence::DependentInstantiation | toTypeDependence(NNS->getDependence())), NNS(NNS), Name(Name) {} public: /// Retrieve the qualification on this type. NestedNameSpecifier *getQualifier() const { return NNS; } /// Retrieve the type named by the typename specifier as an identifier. /// /// This routine will return a non-NULL identifier pointer when the /// form of the original typename was terminated by an identifier, /// e.g., "typename T::type". const IdentifierInfo *getIdentifier() const { return Name; } bool isSugared() const { return false; } QualType desugar() const { return QualType(this, 0); } void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, getKeyword(), NNS, Name); } static void Profile(llvm::FoldingSetNodeID &ID, ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS, const IdentifierInfo *Name) { ID.AddInteger(Keyword); ID.AddPointer(NNS); ID.AddPointer(Name); } static bool classof(const Type *T) { return T->getTypeClass() == DependentName; } }; /// Represents a template specialization type whose template cannot be /// resolved, e.g. /// A::template B class alignas(8) DependentTemplateSpecializationType : public TypeWithKeyword, public llvm::FoldingSetNode { friend class ASTContext; // ASTContext creates these /// The nested name specifier containing the qualifier. NestedNameSpecifier *NNS; /// The identifier of the template. const IdentifierInfo *Name; DependentTemplateSpecializationType(ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS, const IdentifierInfo *Name, ArrayRef Args, QualType Canon); const TemplateArgument *getArgBuffer() const { return reinterpret_cast(this+1); } TemplateArgument *getArgBuffer() { return reinterpret_cast(this+1); } public: NestedNameSpecifier *getQualifier() const { return NNS; } const IdentifierInfo *getIdentifier() const { return Name; } /// Retrieve the template arguments. const TemplateArgument *getArgs() const { return getArgBuffer(); } /// Retrieve the number of template arguments. unsigned getNumArgs() const { return DependentTemplateSpecializationTypeBits.NumArgs; } const TemplateArgument &getArg(unsigned Idx) const; // in TemplateBase.h ArrayRef template_arguments() const { return {getArgs(), getNumArgs()}; } using iterator = const TemplateArgument *; iterator begin() const { return getArgs(); } iterator end() const; // inline in TemplateBase.h bool isSugared() const { return false; } QualType desugar() const { return QualType(this, 0); } void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) { Profile(ID, Context, getKeyword(), NNS, Name, {getArgs(), getNumArgs()}); } static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, ElaboratedTypeKeyword Keyword, NestedNameSpecifier *Qualifier, const IdentifierInfo *Name, ArrayRef Args); static bool classof(const Type *T) { return T->getTypeClass() == DependentTemplateSpecialization; } }; /// Represents a pack expansion of types. /// /// Pack expansions are part of C++11 variadic templates. A pack /// expansion contains a pattern, which itself contains one or more /// "unexpanded" parameter packs. When instantiated, a pack expansion /// produces a series of types, each instantiated from the pattern of /// the expansion, where the Ith instantiation of the pattern uses the /// Ith arguments bound to each of the unexpanded parameter packs. The /// pack expansion is considered to "expand" these unexpanded /// parameter packs. /// /// \code /// template struct tuple; /// /// template /// struct tuple_of_references { /// typedef tuple type; /// }; /// \endcode /// /// Here, the pack expansion \c Types&... is represented via a /// PackExpansionType whose pattern is Types&. class PackExpansionType : public Type, public llvm::FoldingSetNode { friend class ASTContext; // ASTContext creates these /// The pattern of the pack expansion. QualType Pattern; PackExpansionType(QualType Pattern, QualType Canon, Optional NumExpansions) : Type(PackExpansion, Canon, - (Pattern->getDependence() | TypeDependence::Instantiation) & + (Pattern->getDependence() | TypeDependence::Dependent | + TypeDependence::Instantiation) & ~TypeDependence::UnexpandedPack), Pattern(Pattern) { PackExpansionTypeBits.NumExpansions = NumExpansions ? *NumExpansions + 1 : 0; } public: /// Retrieve the pattern of this pack expansion, which is the /// type that will be repeatedly instantiated when instantiating the /// pack expansion itself. QualType getPattern() const { return Pattern; } /// Retrieve the number of expansions that this pack expansion will /// generate, if known. Optional getNumExpansions() const { if (PackExpansionTypeBits.NumExpansions) return PackExpansionTypeBits.NumExpansions - 1; return None; } - bool isSugared() const { return !Pattern->isDependentType(); } - QualType desugar() const { return isSugared() ? Pattern : QualType(this, 0); } + bool isSugared() const { return false; } + QualType desugar() const { return QualType(this, 0); } void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, getPattern(), getNumExpansions()); } static void Profile(llvm::FoldingSetNodeID &ID, QualType Pattern, Optional NumExpansions) { ID.AddPointer(Pattern.getAsOpaquePtr()); ID.AddBoolean(NumExpansions.hasValue()); if (NumExpansions) ID.AddInteger(*NumExpansions); } static bool classof(const Type *T) { return T->getTypeClass() == PackExpansion; } }; /// This class wraps the list of protocol qualifiers. For types that can /// take ObjC protocol qualifers, they can subclass this class. template class ObjCProtocolQualifiers { protected: ObjCProtocolQualifiers() = default; ObjCProtocolDecl * const *getProtocolStorage() const { return const_cast(this)->getProtocolStorage(); } ObjCProtocolDecl **getProtocolStorage() { return static_cast(this)->getProtocolStorageImpl(); } void setNumProtocols(unsigned N) { static_cast(this)->setNumProtocolsImpl(N); } void initialize(ArrayRef protocols) { setNumProtocols(protocols.size()); assert(getNumProtocols() == protocols.size() && "bitfield overflow in protocol count"); if (!protocols.empty()) memcpy(getProtocolStorage(), protocols.data(), protocols.size() * sizeof(ObjCProtocolDecl*)); } public: using qual_iterator = ObjCProtocolDecl * const *; using qual_range = llvm::iterator_range; qual_range quals() const { return qual_range(qual_begin(), qual_end()); } qual_iterator qual_begin() const { return getProtocolStorage(); } qual_iterator qual_end() const { return qual_begin() + getNumProtocols(); } bool qual_empty() const { return getNumProtocols() == 0; } /// Return the number of qualifying protocols in this type, or 0 if /// there are none. unsigned getNumProtocols() const { return static_cast(this)->getNumProtocolsImpl(); } /// Fetch a protocol by index. ObjCProtocolDecl *getProtocol(unsigned I) const { assert(I < getNumProtocols() && "Out-of-range protocol access"); return qual_begin()[I]; } /// Retrieve all of the protocol qualifiers. ArrayRef getProtocols() const { return ArrayRef(qual_begin(), getNumProtocols()); } }; /// Represents a type parameter type in Objective C. It can take /// a list of protocols. class ObjCTypeParamType : public Type, public ObjCProtocolQualifiers, public llvm::FoldingSetNode { friend class ASTContext; friend class ObjCProtocolQualifiers; /// The number of protocols stored on this type. unsigned NumProtocols : 6; ObjCTypeParamDecl *OTPDecl; /// The protocols are stored after the ObjCTypeParamType node. In the /// canonical type, the list of protocols are sorted alphabetically /// and uniqued. ObjCProtocolDecl **getProtocolStorageImpl(); /// Return the number of qualifying protocols in this interface type, /// or 0 if there are none. unsigned getNumProtocolsImpl() const { return NumProtocols; } void setNumProtocolsImpl(unsigned N) { NumProtocols = N; } ObjCTypeParamType(const ObjCTypeParamDecl *D, QualType can, ArrayRef protocols); public: bool isSugared() const { return true; } QualType desugar() const { return getCanonicalTypeInternal(); } static bool classof(const Type *T) { return T->getTypeClass() == ObjCTypeParam; } void Profile(llvm::FoldingSetNodeID &ID); static void Profile(llvm::FoldingSetNodeID &ID, const ObjCTypeParamDecl *OTPDecl, QualType CanonicalType, ArrayRef protocols); ObjCTypeParamDecl *getDecl() const { return OTPDecl; } }; /// Represents a class type in Objective C. /// /// Every Objective C type is a combination of a base type, a set of /// type arguments (optional, for parameterized classes) and a list of /// protocols. /// /// Given the following declarations: /// \code /// \@class C; /// \@protocol P; /// \endcode /// /// 'C' is an ObjCInterfaceType C. It is sugar for an ObjCObjectType /// with base C and no protocols. /// /// 'C

' is an unspecialized ObjCObjectType with base C and protocol list [P]. /// 'C' is a specialized ObjCObjectType with type arguments 'C*' and no /// protocol list. /// 'C

' is a specialized ObjCObjectType with base C, type arguments 'C*', /// and protocol list [P]. /// /// 'id' is a TypedefType which is sugar for an ObjCObjectPointerType whose /// pointee is an ObjCObjectType with base BuiltinType::ObjCIdType /// and no protocols. /// /// 'id

' is an ObjCObjectPointerType whose pointee is an ObjCObjectType /// with base BuiltinType::ObjCIdType and protocol list [P]. Eventually /// this should get its own sugar class to better represent the source. class ObjCObjectType : public Type, public ObjCProtocolQualifiers { friend class ObjCProtocolQualifiers; // ObjCObjectType.NumTypeArgs - the number of type arguments stored // after the ObjCObjectPointerType node. // ObjCObjectType.NumProtocols - the number of protocols stored // after the type arguments of ObjCObjectPointerType node. // // These protocols are those written directly on the type. If // protocol qualifiers ever become additive, the iterators will need // to get kindof complicated. // // In the canonical object type, these are sorted alphabetically // and uniqued. /// Either a BuiltinType or an InterfaceType or sugar for either. QualType BaseType; /// Cached superclass type. mutable llvm::PointerIntPair CachedSuperClassType; QualType *getTypeArgStorage(); const QualType *getTypeArgStorage() const { return const_cast(this)->getTypeArgStorage(); } ObjCProtocolDecl **getProtocolStorageImpl(); /// Return the number of qualifying protocols in this interface type, /// or 0 if there are none. unsigned getNumProtocolsImpl() const { return ObjCObjectTypeBits.NumProtocols; } void setNumProtocolsImpl(unsigned N) { ObjCObjectTypeBits.NumProtocols = N; } protected: enum Nonce_ObjCInterface { Nonce_ObjCInterface }; ObjCObjectType(QualType Canonical, QualType Base, ArrayRef typeArgs, ArrayRef protocols, bool isKindOf); ObjCObjectType(enum Nonce_ObjCInterface) : Type(ObjCInterface, QualType(), TypeDependence::None), BaseType(QualType(this_(), 0)) { ObjCObjectTypeBits.NumProtocols = 0; ObjCObjectTypeBits.NumTypeArgs = 0; ObjCObjectTypeBits.IsKindOf = 0; } void computeSuperClassTypeSlow() const; public: /// Gets the base type of this object type. This is always (possibly /// sugar for) one of: /// - the 'id' builtin type (as opposed to the 'id' type visible to the /// user, which is a typedef for an ObjCObjectPointerType) /// - the 'Class' builtin type (same caveat) /// - an ObjCObjectType (currently always an ObjCInterfaceType) QualType getBaseType() const { return BaseType; } bool isObjCId() const { return getBaseType()->isSpecificBuiltinType(BuiltinType::ObjCId); } bool isObjCClass() const { return getBaseType()->isSpecificBuiltinType(BuiltinType::ObjCClass); } bool isObjCUnqualifiedId() const { return qual_empty() && isObjCId(); } bool isObjCUnqualifiedClass() const { return qual_empty() && isObjCClass(); } bool isObjCUnqualifiedIdOrClass() const { if (!qual_empty()) return false; if (const BuiltinType *T = getBaseType()->getAs()) return T->getKind() == BuiltinType::ObjCId || T->getKind() == BuiltinType::ObjCClass; return false; } bool isObjCQualifiedId() const { return !qual_empty() && isObjCId(); } bool isObjCQualifiedClass() const { return !qual_empty() && isObjCClass(); } /// Gets the interface declaration for this object type, if the base type /// really is an interface. ObjCInterfaceDecl *getInterface() const; /// Determine whether this object type is "specialized", meaning /// that it has type arguments. bool isSpecialized() const; /// Determine whether this object type was written with type arguments. bool isSpecializedAsWritten() const { return ObjCObjectTypeBits.NumTypeArgs > 0; } /// Determine whether this object type is "unspecialized", meaning /// that it has no type arguments. bool isUnspecialized() const { return !isSpecialized(); } /// Determine whether this object type is "unspecialized" as /// written, meaning that it has no type arguments. bool isUnspecializedAsWritten() const { return !isSpecializedAsWritten(); } /// Retrieve the type arguments of this object type (semantically). ArrayRef getTypeArgs() const; /// Retrieve the type arguments of this object type as they were /// written. ArrayRef getTypeArgsAsWritten() const { return llvm::makeArrayRef(getTypeArgStorage(), ObjCObjectTypeBits.NumTypeArgs); } /// Whether this is a "__kindof" type as written. bool isKindOfTypeAsWritten() const { return ObjCObjectTypeBits.IsKindOf; } /// Whether this ia a "__kindof" type (semantically). bool isKindOfType() const; /// Retrieve the type of the superclass of this object type. /// /// This operation substitutes any type arguments into the /// superclass of the current class type, potentially producing a /// specialization of the superclass type. Produces a null type if /// there is no superclass. QualType getSuperClassType() const { if (!CachedSuperClassType.getInt()) computeSuperClassTypeSlow(); assert(CachedSuperClassType.getInt() && "Superclass not set?"); return QualType(CachedSuperClassType.getPointer(), 0); } /// Strip off the Objective-C "kindof" type and (with it) any /// protocol qualifiers. QualType stripObjCKindOfTypeAndQuals(const ASTContext &ctx) const; bool isSugared() const { return false; } QualType desugar() const { return QualType(this, 0); } static bool classof(const Type *T) { return T->getTypeClass() == ObjCObject || T->getTypeClass() == ObjCInterface; } }; /// A class providing a concrete implementation /// of ObjCObjectType, so as to not increase the footprint of /// ObjCInterfaceType. Code outside of ASTContext and the core type /// system should not reference this type. class ObjCObjectTypeImpl : public ObjCObjectType, public llvm::FoldingSetNode { friend class ASTContext; // If anyone adds fields here, ObjCObjectType::getProtocolStorage() // will need to be modified. ObjCObjectTypeImpl(QualType Canonical, QualType Base, ArrayRef typeArgs, ArrayRef protocols, bool isKindOf) : ObjCObjectType(Canonical, Base, typeArgs, protocols, isKindOf) {} public: void Profile(llvm::FoldingSetNodeID &ID); static void Profile(llvm::FoldingSetNodeID &ID, QualType Base, ArrayRef typeArgs, ArrayRef protocols, bool isKindOf); }; inline QualType *ObjCObjectType::getTypeArgStorage() { return reinterpret_cast(static_cast(this)+1); } inline ObjCProtocolDecl **ObjCObjectType::getProtocolStorageImpl() { return reinterpret_cast( getTypeArgStorage() + ObjCObjectTypeBits.NumTypeArgs); } inline ObjCProtocolDecl **ObjCTypeParamType::getProtocolStorageImpl() { return reinterpret_cast( static_cast(this)+1); } /// Interfaces are the core concept in Objective-C for object oriented design. /// They basically correspond to C++ classes. There are two kinds of interface /// types: normal interfaces like `NSString`, and qualified interfaces, which /// are qualified with a protocol list like `NSString`. /// /// ObjCInterfaceType guarantees the following properties when considered /// as a subtype of its superclass, ObjCObjectType: /// - There are no protocol qualifiers. To reinforce this, code which /// tries to invoke the protocol methods via an ObjCInterfaceType will /// fail to compile. /// - It is its own base type. That is, if T is an ObjCInterfaceType*, /// T->getBaseType() == QualType(T, 0). class ObjCInterfaceType : public ObjCObjectType { friend class ASTContext; // ASTContext creates these. friend class ASTReader; friend class ObjCInterfaceDecl; template friend class serialization::AbstractTypeReader; mutable ObjCInterfaceDecl *Decl; ObjCInterfaceType(const ObjCInterfaceDecl *D) : ObjCObjectType(Nonce_ObjCInterface), Decl(const_cast(D)) {} public: /// Get the declaration of this interface. ObjCInterfaceDecl *getDecl() const { return Decl; } bool isSugared() const { return false; } QualType desugar() const { return QualType(this, 0); } static bool classof(const Type *T) { return T->getTypeClass() == ObjCInterface; } // Nonsense to "hide" certain members of ObjCObjectType within this // class. People asking for protocols on an ObjCInterfaceType are // not going to get what they want: ObjCInterfaceTypes are // guaranteed to have no protocols. enum { qual_iterator, qual_begin, qual_end, getNumProtocols, getProtocol }; }; inline ObjCInterfaceDecl *ObjCObjectType::getInterface() const { QualType baseType = getBaseType(); while (const auto *ObjT = baseType->getAs()) { if (const auto *T = dyn_cast(ObjT)) return T->getDecl(); baseType = ObjT->getBaseType(); } return nullptr; } /// Represents a pointer to an Objective C object. /// /// These are constructed from pointer declarators when the pointee type is /// an ObjCObjectType (or sugar for one). In addition, the 'id' and 'Class' /// types are typedefs for these, and the protocol-qualified types 'id

' /// and 'Class

' are translated into these. /// /// Pointers to pointers to Objective C objects are still PointerTypes; /// only the first level of pointer gets it own type implementation. class ObjCObjectPointerType : public Type, public llvm::FoldingSetNode { friend class ASTContext; // ASTContext creates these. QualType PointeeType; ObjCObjectPointerType(QualType Canonical, QualType Pointee) : Type(ObjCObjectPointer, Canonical, Pointee->getDependence()), PointeeType(Pointee) {} public: /// Gets the type pointed to by this ObjC pointer. /// The result will always be an ObjCObjectType or sugar thereof. QualType getPointeeType() const { return PointeeType; } /// Gets the type pointed to by this ObjC pointer. Always returns non-null. /// /// This method is equivalent to getPointeeType() except that /// it discards any typedefs (or other sugar) between this /// type and the "outermost" object type. So for: /// \code /// \@class A; \@protocol P; \@protocol Q; /// typedef A

AP; /// typedef A A1; /// typedef A1

A1P; /// typedef A1P A1PQ; /// \endcode /// For 'A*', getObjectType() will return 'A'. /// For 'A

*', getObjectType() will return 'A

'. /// For 'AP*', getObjectType() will return 'A

'. /// For 'A1*', getObjectType() will return 'A'. /// For 'A1

*', getObjectType() will return 'A1

'. /// For 'A1P*', getObjectType() will return 'A1

'. /// For 'A1PQ*', getObjectType() will return 'A1', because /// adding protocols to a protocol-qualified base discards the /// old qualifiers (for now). But if it didn't, getObjectType() /// would return 'A1P' (and we'd have to make iterating over /// qualifiers more complicated). const ObjCObjectType *getObjectType() const { return PointeeType->castAs(); } /// If this pointer points to an Objective C /// \@interface type, gets the type for that interface. Any protocol /// qualifiers on the interface are ignored. /// /// \return null if the base type for this pointer is 'id' or 'Class' const ObjCInterfaceType *getInterfaceType() const; /// If this pointer points to an Objective \@interface /// type, gets the declaration for that interface. /// /// \return null if the base type for this pointer is 'id' or 'Class' ObjCInterfaceDecl *getInterfaceDecl() const { return getObjectType()->getInterface(); } /// True if this is equivalent to the 'id' type, i.e. if /// its object type is the primitive 'id' type with no protocols. bool isObjCIdType() const { return getObjectType()->isObjCUnqualifiedId(); } /// True if this is equivalent to the 'Class' type, /// i.e. if its object tive is the primitive 'Class' type with no protocols. bool isObjCClassType() const { return getObjectType()->isObjCUnqualifiedClass(); } /// True if this is equivalent to the 'id' or 'Class' type, bool isObjCIdOrClassType() const { return getObjectType()->isObjCUnqualifiedIdOrClass(); } /// True if this is equivalent to 'id

' for some non-empty set of /// protocols. bool isObjCQualifiedIdType() const { return getObjectType()->isObjCQualifiedId(); } /// True if this is equivalent to 'Class

' for some non-empty set of /// protocols. bool isObjCQualifiedClassType() const { return getObjectType()->isObjCQualifiedClass(); } /// Whether this is a "__kindof" type. bool isKindOfType() const { return getObjectType()->isKindOfType(); } /// Whether this type is specialized, meaning that it has type arguments. bool isSpecialized() const { return getObjectType()->isSpecialized(); } /// Whether this type is specialized, meaning that it has type arguments. bool isSpecializedAsWritten() const { return getObjectType()->isSpecializedAsWritten(); } /// Whether this type is unspecialized, meaning that is has no type arguments. bool isUnspecialized() const { return getObjectType()->isUnspecialized(); } /// Determine whether this object type is "unspecialized" as /// written, meaning that it has no type arguments. bool isUnspecializedAsWritten() const { return !isSpecializedAsWritten(); } /// Retrieve the type arguments for this type. ArrayRef getTypeArgs() const { return getObjectType()->getTypeArgs(); } /// Retrieve the type arguments for this type. ArrayRef getTypeArgsAsWritten() const { return getObjectType()->getTypeArgsAsWritten(); } /// An iterator over the qualifiers on the object type. Provided /// for convenience. This will always iterate over the full set of /// protocols on a type, not just those provided directly. using qual_iterator = ObjCObjectType::qual_iterator; using qual_range = llvm::iterator_range; qual_range quals() const { return qual_range(qual_begin(), qual_end()); } qual_iterator qual_begin() const { return getObjectType()->qual_begin(); } qual_iterator qual_end() const { return getObjectType()->qual_end(); } bool qual_empty() const { return getObjectType()->qual_empty(); } /// Return the number of qualifying protocols on the object type. unsigned getNumProtocols() const { return getObjectType()->getNumProtocols(); } /// Retrieve a qualifying protocol by index on the object type. ObjCProtocolDecl *getProtocol(unsigned I) const { return getObjectType()->getProtocol(I); } bool isSugared() const { return false; } QualType desugar() const { return QualType(this, 0); } /// Retrieve the type of the superclass of this object pointer type. /// /// This operation substitutes any type arguments into the /// superclass of the current class type, potentially producing a /// pointer to a specialization of the superclass type. Produces a /// null type if there is no superclass. QualType getSuperClassType() const; /// Strip off the Objective-C "kindof" type and (with it) any /// protocol qualifiers. const ObjCObjectPointerType *stripObjCKindOfTypeAndQuals( const ASTContext &ctx) const; void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, getPointeeType()); } static void Profile(llvm::FoldingSetNodeID &ID, QualType T) { ID.AddPointer(T.getAsOpaquePtr()); } static bool classof(const Type *T) { return T->getTypeClass() == ObjCObjectPointer; } }; class AtomicType : public Type, public llvm::FoldingSetNode { friend class ASTContext; // ASTContext creates these. QualType ValueType; AtomicType(QualType ValTy, QualType Canonical) : Type(Atomic, Canonical, ValTy->getDependence()), ValueType(ValTy) {} public: /// Gets the type contained by this atomic type, i.e. /// the type returned by performing an atomic load of this atomic type. QualType getValueType() const { return ValueType; } bool isSugared() const { return false; } QualType desugar() const { return QualType(this, 0); } void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, getValueType()); } static void Profile(llvm::FoldingSetNodeID &ID, QualType T) { ID.AddPointer(T.getAsOpaquePtr()); } static bool classof(const Type *T) { return T->getTypeClass() == Atomic; } }; /// PipeType - OpenCL20. class PipeType : public Type, public llvm::FoldingSetNode { friend class ASTContext; // ASTContext creates these. QualType ElementType; bool isRead; PipeType(QualType elemType, QualType CanonicalPtr, bool isRead) : Type(Pipe, CanonicalPtr, elemType->getDependence()), ElementType(elemType), isRead(isRead) {} public: QualType getElementType() const { return ElementType; } bool isSugared() const { return false; } QualType desugar() const { return QualType(this, 0); } void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, getElementType(), isReadOnly()); } static void Profile(llvm::FoldingSetNodeID &ID, QualType T, bool isRead) { ID.AddPointer(T.getAsOpaquePtr()); ID.AddBoolean(isRead); } static bool classof(const Type *T) { return T->getTypeClass() == Pipe; } bool isReadOnly() const { return isRead; } }; /// A fixed int type of a specified bitwidth. class ExtIntType final : public Type, public llvm::FoldingSetNode { friend class ASTContext; unsigned IsUnsigned : 1; unsigned NumBits : 24; protected: ExtIntType(bool isUnsigned, unsigned NumBits); public: bool isUnsigned() const { return IsUnsigned; } bool isSigned() const { return !IsUnsigned; } unsigned getNumBits() const { return NumBits; } bool isSugared() const { return false; } QualType desugar() const { return QualType(this, 0); } void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, isUnsigned(), getNumBits()); } static void Profile(llvm::FoldingSetNodeID &ID, bool IsUnsigned, unsigned NumBits) { ID.AddBoolean(IsUnsigned); ID.AddInteger(NumBits); } static bool classof(const Type *T) { return T->getTypeClass() == ExtInt; } }; class DependentExtIntType final : public Type, public llvm::FoldingSetNode { friend class ASTContext; const ASTContext &Context; llvm::PointerIntPair ExprAndUnsigned; protected: DependentExtIntType(const ASTContext &Context, bool IsUnsigned, Expr *NumBits); public: bool isUnsigned() const; bool isSigned() const { return !isUnsigned(); } Expr *getNumBitsExpr() const; bool isSugared() const { return false; } QualType desugar() const { return QualType(this, 0); } void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, Context, isUnsigned(), getNumBitsExpr()); } static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, bool IsUnsigned, Expr *NumBitsExpr); static bool classof(const Type *T) { return T->getTypeClass() == DependentExtInt; } }; /// A qualifier set is used to build a set of qualifiers. class QualifierCollector : public Qualifiers { public: QualifierCollector(Qualifiers Qs = Qualifiers()) : Qualifiers(Qs) {} /// Collect any qualifiers on the given type and return an /// unqualified type. The qualifiers are assumed to be consistent /// with those already in the type. const Type *strip(QualType type) { addFastQualifiers(type.getLocalFastQualifiers()); if (!type.hasLocalNonFastQualifiers()) return type.getTypePtrUnsafe(); const ExtQuals *extQuals = type.getExtQualsUnsafe(); addConsistentQualifiers(extQuals->getQualifiers()); return extQuals->getBaseType(); } /// Apply the collected qualifiers to the given type. QualType apply(const ASTContext &Context, QualType QT) const; /// Apply the collected qualifiers to the given type. QualType apply(const ASTContext &Context, const Type* T) const; }; /// A container of type source information. /// /// A client can read the relevant info using TypeLoc wrappers, e.g: /// @code /// TypeLoc TL = TypeSourceInfo->getTypeLoc(); /// TL.getBeginLoc().print(OS, SrcMgr); /// @endcode class alignas(8) TypeSourceInfo { // Contains a memory block after the class, used for type source information, // allocated by ASTContext. friend class ASTContext; QualType Ty; TypeSourceInfo(QualType ty) : Ty(ty) {} public: /// Return the type wrapped by this type source info. QualType getType() const { return Ty; } /// Return the TypeLoc wrapper for the type source info. TypeLoc getTypeLoc() const; // implemented in TypeLoc.h /// Override the type stored in this TypeSourceInfo. Use with caution! void overrideType(QualType T) { Ty = T; } }; // Inline function definitions. inline SplitQualType SplitQualType::getSingleStepDesugaredType() const { SplitQualType desugar = Ty->getLocallyUnqualifiedSingleStepDesugaredType().split(); desugar.Quals.addConsistentQualifiers(Quals); return desugar; } inline const Type *QualType::getTypePtr() const { return getCommonPtr()->BaseType; } inline const Type *QualType::getTypePtrOrNull() const { return (isNull() ? nullptr : getCommonPtr()->BaseType); } inline SplitQualType QualType::split() const { if (!hasLocalNonFastQualifiers()) return SplitQualType(getTypePtrUnsafe(), Qualifiers::fromFastMask(getLocalFastQualifiers())); const ExtQuals *eq = getExtQualsUnsafe(); Qualifiers qs = eq->getQualifiers(); qs.addFastQualifiers(getLocalFastQualifiers()); return SplitQualType(eq->getBaseType(), qs); } inline Qualifiers QualType::getLocalQualifiers() const { Qualifiers Quals; if (hasLocalNonFastQualifiers()) Quals = getExtQualsUnsafe()->getQualifiers(); Quals.addFastQualifiers(getLocalFastQualifiers()); return Quals; } inline Qualifiers QualType::getQualifiers() const { Qualifiers quals = getCommonPtr()->CanonicalType.getLocalQualifiers(); quals.addFastQualifiers(getLocalFastQualifiers()); return quals; } inline unsigned QualType::getCVRQualifiers() const { unsigned cvr = getCommonPtr()->CanonicalType.getLocalCVRQualifiers(); cvr |= getLocalCVRQualifiers(); return cvr; } inline QualType QualType::getCanonicalType() const { QualType canon = getCommonPtr()->CanonicalType; return canon.withFastQualifiers(getLocalFastQualifiers()); } inline bool QualType::isCanonical() const { return getTypePtr()->isCanonicalUnqualified(); } inline bool QualType::isCanonicalAsParam() const { if (!isCanonical()) return false; if (hasLocalQualifiers()) return false; const Type *T = getTypePtr(); if (T->isVariablyModifiedType() && T->hasSizedVLAType()) return false; return !isa(T) && !isa(T); } inline bool QualType::isConstQualified() const { return isLocalConstQualified() || getCommonPtr()->CanonicalType.isLocalConstQualified(); } inline bool QualType::isRestrictQualified() const { return isLocalRestrictQualified() || getCommonPtr()->CanonicalType.isLocalRestrictQualified(); } inline bool QualType::isVolatileQualified() const { return isLocalVolatileQualified() || getCommonPtr()->CanonicalType.isLocalVolatileQualified(); } inline bool QualType::hasQualifiers() const { return hasLocalQualifiers() || getCommonPtr()->CanonicalType.hasLocalQualifiers(); } inline QualType QualType::getUnqualifiedType() const { if (!getTypePtr()->getCanonicalTypeInternal().hasLocalQualifiers()) return QualType(getTypePtr(), 0); return QualType(getSplitUnqualifiedTypeImpl(*this).Ty, 0); } inline SplitQualType QualType::getSplitUnqualifiedType() const { if (!getTypePtr()->getCanonicalTypeInternal().hasLocalQualifiers()) return split(); return getSplitUnqualifiedTypeImpl(*this); } inline void QualType::removeLocalConst() { removeLocalFastQualifiers(Qualifiers::Const); } inline void QualType::removeLocalRestrict() { removeLocalFastQualifiers(Qualifiers::Restrict); } inline void QualType::removeLocalVolatile() { removeLocalFastQualifiers(Qualifiers::Volatile); } inline void QualType::removeLocalCVRQualifiers(unsigned Mask) { assert(!(Mask & ~Qualifiers::CVRMask) && "mask has non-CVR bits"); static_assert((int)Qualifiers::CVRMask == (int)Qualifiers::FastMask, "Fast bits differ from CVR bits!"); // Fast path: we don't need to touch the slow qualifiers. removeLocalFastQualifiers(Mask); } /// Check if this type has any address space qualifier. inline bool QualType::hasAddressSpace() const { return getQualifiers().hasAddressSpace(); } /// Return the address space of this type. inline LangAS QualType::getAddressSpace() const { return getQualifiers().getAddressSpace(); } /// Return the gc attribute of this type. inline Qualifiers::GC QualType::getObjCGCAttr() const { return getQualifiers().getObjCGCAttr(); } inline bool QualType::hasNonTrivialToPrimitiveDefaultInitializeCUnion() const { if (auto *RD = getTypePtr()->getBaseElementTypeUnsafe()->getAsRecordDecl()) return hasNonTrivialToPrimitiveDefaultInitializeCUnion(RD); return false; } inline bool QualType::hasNonTrivialToPrimitiveDestructCUnion() const { if (auto *RD = getTypePtr()->getBaseElementTypeUnsafe()->getAsRecordDecl()) return hasNonTrivialToPrimitiveDestructCUnion(RD); return false; } inline bool QualType::hasNonTrivialToPrimitiveCopyCUnion() const { if (auto *RD = getTypePtr()->getBaseElementTypeUnsafe()->getAsRecordDecl()) return hasNonTrivialToPrimitiveCopyCUnion(RD); return false; } inline FunctionType::ExtInfo getFunctionExtInfo(const Type &t) { if (const auto *PT = t.getAs()) { if (const auto *FT = PT->getPointeeType()->getAs()) return FT->getExtInfo(); } else if (const auto *FT = t.getAs()) return FT->getExtInfo(); return FunctionType::ExtInfo(); } inline FunctionType::ExtInfo getFunctionExtInfo(QualType t) { return getFunctionExtInfo(*t); } /// Determine whether this type is more /// qualified than the Other type. For example, "const volatile int" /// is more qualified than "const int", "volatile int", and /// "int". However, it is not more qualified than "const volatile /// int". inline bool QualType::isMoreQualifiedThan(QualType other) const { Qualifiers MyQuals = getQualifiers(); Qualifiers OtherQuals = other.getQualifiers(); return (MyQuals != OtherQuals && MyQuals.compatiblyIncludes(OtherQuals)); } /// Determine whether this type is at last /// as qualified as the Other type. For example, "const volatile /// int" is at least as qualified as "const int", "volatile int", /// "int", and "const volatile int". inline bool QualType::isAtLeastAsQualifiedAs(QualType other) const { Qualifiers OtherQuals = other.getQualifiers(); // Ignore __unaligned qualifier if this type is a void. if (getUnqualifiedType()->isVoidType()) OtherQuals.removeUnaligned(); return getQualifiers().compatiblyIncludes(OtherQuals); } /// If Type is a reference type (e.g., const /// int&), returns the type that the reference refers to ("const /// int"). Otherwise, returns the type itself. This routine is used /// throughout Sema to implement C++ 5p6: /// /// If an expression initially has the type "reference to T" (8.3.2, /// 8.5.3), the type is adjusted to "T" prior to any further /// analysis, the expression designates the object or function /// denoted by the reference, and the expression is an lvalue. inline QualType QualType::getNonReferenceType() const { if (const auto *RefType = (*this)->getAs()) return RefType->getPointeeType(); else return *this; } inline bool QualType::isCForbiddenLValueType() const { return ((getTypePtr()->isVoidType() && !hasQualifiers()) || getTypePtr()->isFunctionType()); } /// Tests whether the type is categorized as a fundamental type. /// /// \returns True for types specified in C++0x [basic.fundamental]. inline bool Type::isFundamentalType() const { return isVoidType() || isNullPtrType() || // FIXME: It's really annoying that we don't have an // 'isArithmeticType()' which agrees with the standard definition. (isArithmeticType() && !isEnumeralType()); } /// Tests whether the type is categorized as a compound type. /// /// \returns True for types specified in C++0x [basic.compound]. inline bool Type::isCompoundType() const { // C++0x [basic.compound]p1: // Compound types can be constructed in the following ways: // -- arrays of objects of a given type [...]; return isArrayType() || // -- functions, which have parameters of given types [...]; isFunctionType() || // -- pointers to void or objects or functions [...]; isPointerType() || // -- references to objects or functions of a given type. [...] isReferenceType() || // -- classes containing a sequence of objects of various types, [...]; isRecordType() || // -- unions, which are classes capable of containing objects of different // types at different times; isUnionType() || // -- enumerations, which comprise a set of named constant values. [...]; isEnumeralType() || // -- pointers to non-static class members, [...]. isMemberPointerType(); } inline bool Type::isFunctionType() const { return isa(CanonicalType); } inline bool Type::isPointerType() const { return isa(CanonicalType); } inline bool Type::isAnyPointerType() const { return isPointerType() || isObjCObjectPointerType(); } inline bool Type::isBlockPointerType() const { return isa(CanonicalType); } inline bool Type::isReferenceType() const { return isa(CanonicalType); } inline bool Type::isLValueReferenceType() const { return isa(CanonicalType); } inline bool Type::isRValueReferenceType() const { return isa(CanonicalType); } inline bool Type::isObjectPointerType() const { // Note: an "object pointer type" is not the same thing as a pointer to an // object type; rather, it is a pointer to an object type or a pointer to cv // void. if (const auto *T = getAs()) return !T->getPointeeType()->isFunctionType(); else return false; } inline bool Type::isFunctionPointerType() const { if (const auto *T = getAs()) return T->getPointeeType()->isFunctionType(); else return false; } inline bool Type::isFunctionReferenceType() const { if (const auto *T = getAs()) return T->getPointeeType()->isFunctionType(); else return false; } inline bool Type::isMemberPointerType() const { return isa(CanonicalType); } inline bool Type::isMemberFunctionPointerType() const { if (const auto *T = getAs()) return T->isMemberFunctionPointer(); else return false; } inline bool Type::isMemberDataPointerType() const { if (const auto *T = getAs()) return T->isMemberDataPointer(); else return false; } inline bool Type::isArrayType() const { return isa(CanonicalType); } inline bool Type::isConstantArrayType() const { return isa(CanonicalType); } inline bool Type::isIncompleteArrayType() const { return isa(CanonicalType); } inline bool Type::isVariableArrayType() const { return isa(CanonicalType); } inline bool Type::isDependentSizedArrayType() const { return isa(CanonicalType); } inline bool Type::isBuiltinType() const { return isa(CanonicalType); } inline bool Type::isRecordType() const { return isa(CanonicalType); } inline bool Type::isEnumeralType() const { return isa(CanonicalType); } inline bool Type::isAnyComplexType() const { return isa(CanonicalType); } inline bool Type::isVectorType() const { return isa(CanonicalType); } inline bool Type::isExtVectorType() const { return isa(CanonicalType); } inline bool Type::isMatrixType() const { return isa(CanonicalType); } inline bool Type::isConstantMatrixType() const { return isa(CanonicalType); } inline bool Type::isDependentAddressSpaceType() const { return isa(CanonicalType); } inline bool Type::isObjCObjectPointerType() const { return isa(CanonicalType); } inline bool Type::isObjCObjectType() const { return isa(CanonicalType); } inline bool Type::isObjCObjectOrInterfaceType() const { return isa(CanonicalType) || isa(CanonicalType); } inline bool Type::isAtomicType() const { return isa(CanonicalType); } inline bool Type::isUndeducedAutoType() const { return isa(CanonicalType); } inline bool Type::isObjCQualifiedIdType() const { if (const auto *OPT = getAs()) return OPT->isObjCQualifiedIdType(); return false; } inline bool Type::isObjCQualifiedClassType() const { if (const auto *OPT = getAs()) return OPT->isObjCQualifiedClassType(); return false; } inline bool Type::isObjCIdType() const { if (const auto *OPT = getAs()) return OPT->isObjCIdType(); return false; } inline bool Type::isObjCClassType() const { if (const auto *OPT = getAs()) return OPT->isObjCClassType(); return false; } inline bool Type::isObjCSelType() const { if (const auto *OPT = getAs()) return OPT->getPointeeType()->isSpecificBuiltinType(BuiltinType::ObjCSel); return false; } inline bool Type::isObjCBuiltinType() const { return isObjCIdType() || isObjCClassType() || isObjCSelType(); } inline bool Type::isDecltypeType() const { return isa(this); } #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ inline bool Type::is##Id##Type() const { \ return isSpecificBuiltinType(BuiltinType::Id); \ } #include "clang/Basic/OpenCLImageTypes.def" inline bool Type::isSamplerT() const { return isSpecificBuiltinType(BuiltinType::OCLSampler); } inline bool Type::isEventT() const { return isSpecificBuiltinType(BuiltinType::OCLEvent); } inline bool Type::isClkEventT() const { return isSpecificBuiltinType(BuiltinType::OCLClkEvent); } inline bool Type::isQueueT() const { return isSpecificBuiltinType(BuiltinType::OCLQueue); } inline bool Type::isReserveIDT() const { return isSpecificBuiltinType(BuiltinType::OCLReserveID); } inline bool Type::isImageType() const { #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) is##Id##Type() || return #include "clang/Basic/OpenCLImageTypes.def" false; // end boolean or operation } inline bool Type::isPipeType() const { return isa(CanonicalType); } inline bool Type::isExtIntType() const { return isa(CanonicalType); } #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ inline bool Type::is##Id##Type() const { \ return isSpecificBuiltinType(BuiltinType::Id); \ } #include "clang/Basic/OpenCLExtensionTypes.def" inline bool Type::isOCLIntelSubgroupAVCType() const { #define INTEL_SUBGROUP_AVC_TYPE(ExtType, Id) \ isOCLIntelSubgroupAVC##Id##Type() || return #include "clang/Basic/OpenCLExtensionTypes.def" false; // end of boolean or operation } inline bool Type::isOCLExtOpaqueType() const { #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) is##Id##Type() || return #include "clang/Basic/OpenCLExtensionTypes.def" false; // end of boolean or operation } inline bool Type::isOpenCLSpecificType() const { return isSamplerT() || isEventT() || isImageType() || isClkEventT() || isQueueT() || isReserveIDT() || isPipeType() || isOCLExtOpaqueType(); } inline bool Type::isTemplateTypeParmType() const { return isa(CanonicalType); } inline bool Type::isSpecificBuiltinType(unsigned K) const { if (const BuiltinType *BT = getAs()) { return BT->getKind() == static_cast(K); } return false; } inline bool Type::isPlaceholderType() const { if (const auto *BT = dyn_cast(this)) return BT->isPlaceholderType(); return false; } inline const BuiltinType *Type::getAsPlaceholderType() const { if (const auto *BT = dyn_cast(this)) if (BT->isPlaceholderType()) return BT; return nullptr; } inline bool Type::isSpecificPlaceholderType(unsigned K) const { assert(BuiltinType::isPlaceholderTypeKind((BuiltinType::Kind) K)); return isSpecificBuiltinType(K); } inline bool Type::isNonOverloadPlaceholderType() const { if (const auto *BT = dyn_cast(this)) return BT->isNonOverloadPlaceholderType(); return false; } inline bool Type::isVoidType() const { return isSpecificBuiltinType(BuiltinType::Void); } inline bool Type::isHalfType() const { // FIXME: Should we allow complex __fp16? Probably not. return isSpecificBuiltinType(BuiltinType::Half); } inline bool Type::isFloat16Type() const { return isSpecificBuiltinType(BuiltinType::Float16); } inline bool Type::isBFloat16Type() const { return isSpecificBuiltinType(BuiltinType::BFloat16); } inline bool Type::isFloat128Type() const { return isSpecificBuiltinType(BuiltinType::Float128); } inline bool Type::isNullPtrType() const { return isSpecificBuiltinType(BuiltinType::NullPtr); } bool IsEnumDeclComplete(EnumDecl *); bool IsEnumDeclScoped(EnumDecl *); inline bool Type::isIntegerType() const { if (const auto *BT = dyn_cast(CanonicalType)) return BT->getKind() >= BuiltinType::Bool && BT->getKind() <= BuiltinType::Int128; if (const EnumType *ET = dyn_cast(CanonicalType)) { // Incomplete enum types are not treated as integer types. // FIXME: In C++, enum types are never integer types. return IsEnumDeclComplete(ET->getDecl()) && !IsEnumDeclScoped(ET->getDecl()); } return isExtIntType(); } inline bool Type::isFixedPointType() const { if (const auto *BT = dyn_cast(CanonicalType)) { return BT->getKind() >= BuiltinType::ShortAccum && BT->getKind() <= BuiltinType::SatULongFract; } return false; } inline bool Type::isFixedPointOrIntegerType() const { return isFixedPointType() || isIntegerType(); } inline bool Type::isSaturatedFixedPointType() const { if (const auto *BT = dyn_cast(CanonicalType)) { return BT->getKind() >= BuiltinType::SatShortAccum && BT->getKind() <= BuiltinType::SatULongFract; } return false; } inline bool Type::isUnsaturatedFixedPointType() const { return isFixedPointType() && !isSaturatedFixedPointType(); } inline bool Type::isSignedFixedPointType() const { if (const auto *BT = dyn_cast(CanonicalType)) { return ((BT->getKind() >= BuiltinType::ShortAccum && BT->getKind() <= BuiltinType::LongAccum) || (BT->getKind() >= BuiltinType::ShortFract && BT->getKind() <= BuiltinType::LongFract) || (BT->getKind() >= BuiltinType::SatShortAccum && BT->getKind() <= BuiltinType::SatLongAccum) || (BT->getKind() >= BuiltinType::SatShortFract && BT->getKind() <= BuiltinType::SatLongFract)); } return false; } inline bool Type::isUnsignedFixedPointType() const { return isFixedPointType() && !isSignedFixedPointType(); } inline bool Type::isScalarType() const { if (const auto *BT = dyn_cast(CanonicalType)) return BT->getKind() > BuiltinType::Void && BT->getKind() <= BuiltinType::NullPtr; if (const EnumType *ET = dyn_cast(CanonicalType)) // Enums are scalar types, but only if they are defined. Incomplete enums // are not treated as scalar types. return IsEnumDeclComplete(ET->getDecl()); return isa(CanonicalType) || isa(CanonicalType) || isa(CanonicalType) || isa(CanonicalType) || isa(CanonicalType) || isExtIntType(); } inline bool Type::isIntegralOrEnumerationType() const { if (const auto *BT = dyn_cast(CanonicalType)) return BT->getKind() >= BuiltinType::Bool && BT->getKind() <= BuiltinType::Int128; // Check for a complete enum type; incomplete enum types are not properly an // enumeration type in the sense required here. if (const auto *ET = dyn_cast(CanonicalType)) return IsEnumDeclComplete(ET->getDecl()); return isExtIntType(); } inline bool Type::isBooleanType() const { if (const auto *BT = dyn_cast(CanonicalType)) return BT->getKind() == BuiltinType::Bool; return false; } inline bool Type::isUndeducedType() const { auto *DT = getContainedDeducedType(); return DT && !DT->isDeduced(); } /// Determines whether this is a type for which one can define /// an overloaded operator. inline bool Type::isOverloadableType() const { return isDependentType() || isRecordType() || isEnumeralType(); } /// Determines whether this type can decay to a pointer type. inline bool Type::canDecayToPointerType() const { return isFunctionType() || isArrayType(); } inline bool Type::hasPointerRepresentation() const { return (isPointerType() || isReferenceType() || isBlockPointerType() || isObjCObjectPointerType() || isNullPtrType()); } inline bool Type::hasObjCPointerRepresentation() const { return isObjCObjectPointerType(); } inline const Type *Type::getBaseElementTypeUnsafe() const { const Type *type = this; while (const ArrayType *arrayType = type->getAsArrayTypeUnsafe()) type = arrayType->getElementType().getTypePtr(); return type; } inline const Type *Type::getPointeeOrArrayElementType() const { const Type *type = this; if (type->isAnyPointerType()) return type->getPointeeType().getTypePtr(); else if (type->isArrayType()) return type->getBaseElementTypeUnsafe(); return type; } /// Insertion operator for diagnostics. This allows sending address spaces into /// a diagnostic with <<. inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB, LangAS AS) { DB.AddTaggedVal(static_cast>(AS), DiagnosticsEngine::ArgumentKind::ak_addrspace); return DB; } /// Insertion operator for partial diagnostics. This allows sending adress /// spaces into a diagnostic with <<. inline const PartialDiagnostic &operator<<(const PartialDiagnostic &PD, LangAS AS) { PD.AddTaggedVal(static_cast>(AS), DiagnosticsEngine::ArgumentKind::ak_addrspace); return PD; } /// Insertion operator for diagnostics. This allows sending Qualifiers into a /// diagnostic with <<. inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB, Qualifiers Q) { DB.AddTaggedVal(Q.getAsOpaqueValue(), DiagnosticsEngine::ArgumentKind::ak_qual); return DB; } /// Insertion operator for partial diagnostics. This allows sending Qualifiers /// into a diagnostic with <<. inline const PartialDiagnostic &operator<<(const PartialDiagnostic &PD, Qualifiers Q) { PD.AddTaggedVal(Q.getAsOpaqueValue(), DiagnosticsEngine::ArgumentKind::ak_qual); return PD; } /// Insertion operator for diagnostics. This allows sending QualType's into a /// diagnostic with <<. inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB, QualType T) { DB.AddTaggedVal(reinterpret_cast(T.getAsOpaquePtr()), DiagnosticsEngine::ak_qualtype); return DB; } /// Insertion operator for partial diagnostics. This allows sending QualType's /// into a diagnostic with <<. inline const PartialDiagnostic &operator<<(const PartialDiagnostic &PD, QualType T) { PD.AddTaggedVal(reinterpret_cast(T.getAsOpaquePtr()), DiagnosticsEngine::ak_qualtype); return PD; } // Helper class template that is used by Type::getAs to ensure that one does // not try to look through a qualified type to get to an array type. template using TypeIsArrayType = std::integral_constant::value || std::is_base_of::value>; // Member-template getAs'. template const T *Type::getAs() const { static_assert(!TypeIsArrayType::value, "ArrayType cannot be used with getAs!"); // If this is directly a T type, return it. if (const auto *Ty = dyn_cast(this)) return Ty; // If the canonical form of this type isn't the right kind, reject it. if (!isa(CanonicalType)) return nullptr; // If this is a typedef for the type, strip the typedef off without // losing all typedef information. return cast(getUnqualifiedDesugaredType()); } template const T *Type::getAsAdjusted() const { static_assert(!TypeIsArrayType::value, "ArrayType cannot be used with getAsAdjusted!"); // If this is directly a T type, return it. if (const auto *Ty = dyn_cast(this)) return Ty; // If the canonical form of this type isn't the right kind, reject it. if (!isa(CanonicalType)) return nullptr; // Strip off type adjustments that do not modify the underlying nature of the // type. const Type *Ty = this; while (Ty) { if (const auto *A = dyn_cast(Ty)) Ty = A->getModifiedType().getTypePtr(); else if (const auto *E = dyn_cast(Ty)) Ty = E->desugar().getTypePtr(); else if (const auto *P = dyn_cast(Ty)) Ty = P->desugar().getTypePtr(); else if (const auto *A = dyn_cast(Ty)) Ty = A->desugar().getTypePtr(); else if (const auto *M = dyn_cast(Ty)) Ty = M->desugar().getTypePtr(); else break; } // Just because the canonical type is correct does not mean we can use cast<>, // since we may not have stripped off all the sugar down to the base type. return dyn_cast(Ty); } inline const ArrayType *Type::getAsArrayTypeUnsafe() const { // If this is directly an array type, return it. if (const auto *arr = dyn_cast(this)) return arr; // If the canonical form of this type isn't the right kind, reject it. if (!isa(CanonicalType)) return nullptr; // If this is a typedef for the type, strip the typedef off without // losing all typedef information. return cast(getUnqualifiedDesugaredType()); } template const T *Type::castAs() const { static_assert(!TypeIsArrayType::value, "ArrayType cannot be used with castAs!"); if (const auto *ty = dyn_cast(this)) return ty; assert(isa(CanonicalType)); return cast(getUnqualifiedDesugaredType()); } inline const ArrayType *Type::castAsArrayTypeUnsafe() const { assert(isa(CanonicalType)); if (const auto *arr = dyn_cast(this)) return arr; return cast(getUnqualifiedDesugaredType()); } DecayedType::DecayedType(QualType OriginalType, QualType DecayedPtr, QualType CanonicalPtr) : AdjustedType(Decayed, OriginalType, DecayedPtr, CanonicalPtr) { #ifndef NDEBUG QualType Adjusted = getAdjustedType(); (void)AttributedType::stripOuterNullability(Adjusted); assert(isa(Adjusted)); #endif } QualType DecayedType::getPointeeType() const { QualType Decayed = getDecayedType(); (void)AttributedType::stripOuterNullability(Decayed); return cast(Decayed)->getPointeeType(); } // Get the decimal string representation of a fixed point type, represented // as a scaled integer. // TODO: At some point, we should change the arguments to instead just accept an // APFixedPoint instead of APSInt and scale. void FixedPointValueToString(SmallVectorImpl &Str, llvm::APSInt Val, unsigned Scale); } // namespace clang #endif // LLVM_CLANG_AST_TYPE_H diff --git a/contrib/llvm-project/clang/include/clang/AST/TypeProperties.td b/contrib/llvm-project/clang/include/clang/AST/TypeProperties.td index 4540ea0e1952..ed91670829b8 100644 --- a/contrib/llvm-project/clang/include/clang/AST/TypeProperties.td +++ b/contrib/llvm-project/clang/include/clang/AST/TypeProperties.td @@ -1,895 +1,896 @@ //==--- TypeProperties.td - Type property definitions ---------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// include "clang/AST/PropertiesBase.td" include "clang/Basic/TypeNodes.td" let Class = ComplexType in { def : Property<"elementType", QualType> { let Read = [{ node->getElementType() }]; } def : Creator<[{ return ctx.getComplexType(elementType); }]>; } let Class = PointerType in { def : Property<"pointeeType", QualType> { let Read = [{ node->getPointeeType() }]; } def : Creator<[{ return ctx.getPointerType(pointeeType); }]>; } let Class = AdjustedType in { def : Property<"originalType", QualType> { let Read = [{ node->getOriginalType() }]; } def : Property<"adjustedType", QualType> { let Read = [{ node->getAdjustedType() }]; } def : Creator<[{ return ctx.getAdjustedType(originalType, adjustedType); }]>; } let Class = DecayedType in { def : Override { // We don't need to serialize the adjusted type because we can always // derive it by decaying the original type. let IgnoredProperties = [ "adjustedType" ]; } def : Creator<[{ return ctx.getAdjustedParameterType(originalType); }]>; } let Class = BlockPointerType in { def : Property<"pointeeType", QualType> { let Read = [{ node->getPointeeType() }]; } def : Creator<[{ return ctx.getBlockPointerType(pointeeType); }]>; } let Class = ReferenceType in { def : Property<"pointeeTypeAsWritten", QualType> { let Read = [{ node->getPointeeTypeAsWritten() }]; } } let Class = LValueReferenceType in { def : Property<"isSpelledAsLValue", Bool> { let Read = [{ node->isSpelledAsLValue() }]; } def : Creator<[{ return ctx.getLValueReferenceType(pointeeTypeAsWritten, isSpelledAsLValue); }]>; } let Class = RValueReferenceType in { def : Creator<[{ return ctx.getRValueReferenceType(pointeeTypeAsWritten); }]>; } let Class = MemberPointerType in { def : Property<"pointeeType", QualType> { let Read = [{ node->getPointeeType() }]; } def : Property<"baseType", QualType> { let Read = [{ QualType(node->getClass(), 0) }]; } def : Creator<[{ return ctx.getMemberPointerType(pointeeType, baseType.getTypePtr()); }]>; } let Class = ArrayType in { def : Property<"elementType", QualType> { let Read = [{ node->getElementType() }]; } def : Property<"sizeModifier", ArraySizeModifier> { let Read = [{ node->getSizeModifier() }]; } def : Property<"indexQualifiers", Qualifiers> { let Read = [{ Qualifiers::fromCVRMask(node->getIndexTypeCVRQualifiers()) }]; } } let Class = ConstantArrayType in { def : Property<"sizeValue", APInt> { let Read = [{ node->getSize() }]; } def : Property<"size", ExprRef> { let Read = [{ node->getSizeExpr() }]; } def : Creator<[{ return ctx.getConstantArrayType(elementType, sizeValue, size, sizeModifier, indexQualifiers.getCVRQualifiers()); }]>; } let Class = IncompleteArrayType in { def : Creator<[{ return ctx.getIncompleteArrayType(elementType, sizeModifier, indexQualifiers.getCVRQualifiers()); }]>; } let Class = VariableArrayType in { def : Property<"leftBracketLoc", SourceLocation> { let Read = [{ node->getLBracketLoc() }]; } def : Property<"rightBracketLoc", SourceLocation> { let Read = [{ node->getRBracketLoc() }]; } def : Property<"size", ExprRef> { let Read = [{ node->getSizeExpr() }]; } def : Creator<[{ return ctx.getVariableArrayType(elementType, size, sizeModifier, indexQualifiers.getCVRQualifiers(), SourceRange(leftBracketLoc, rightBracketLoc)); }]>; } let Class = DependentSizedArrayType in { def : Property<"size", ExprRef> { let Read = [{ node->getSizeExpr() }]; } def : Property<"leftBracketLoc", SourceLocation> { let Read = [{ node->getLBracketLoc() }]; } def : Property<"rightBracketLoc", SourceLocation> { let Read = [{ node->getRBracketLoc() }]; } def : Creator<[{ return ctx.getDependentSizedArrayType(elementType, size, sizeModifier, indexQualifiers.getCVRQualifiers(), SourceRange(leftBracketLoc, rightBracketLoc)); }]>; } let Class = VectorType in { def : Property<"elementType", QualType> { let Read = [{ node->getElementType() }]; } def : Property<"numElements", UInt32> { let Read = [{ node->getNumElements() }]; } def : Property<"vectorKind", VectorKind> { let Read = [{ node->getVectorKind() }]; } def : Creator<[{ return ctx.getVectorType(elementType, numElements, vectorKind); }]>; } let Class = DependentVectorType in { def : Property<"elementType", QualType> { let Read = [{ node->getElementType() }]; } def : Property<"size", ExprRef> { let Read = [{ node->getSizeExpr() }]; } def : Property<"attributeLoc", SourceLocation> { let Read = [{ node->getAttributeLoc() }]; } def : Property<"vectorKind", VectorKind> { let Read = [{ node->getVectorKind() }]; } def : Creator<[{ return ctx.getDependentVectorType(elementType, size, attributeLoc, vectorKind); }]>; } let Class = ExtVectorType in { def : Override { let IgnoredProperties = [ "vectorKind" ]; } def : Creator<[{ return ctx.getExtVectorType(elementType, numElements); }]>; } let Class = DependentSizedExtVectorType in { def : Property<"elementType", QualType> { let Read = [{ node->getElementType() }]; } def : Property<"size", ExprRef> { let Read = [{ node->getSizeExpr() }]; } def : Property<"attributeLoc", SourceLocation> { let Read = [{ node->getAttributeLoc() }]; } def : Creator<[{ return ctx.getDependentSizedExtVectorType(elementType, size, attributeLoc); }]>; } let Class = MatrixType in { def : Property<"elementType", QualType> { let Read = [{ node->getElementType() }]; } } let Class = ConstantMatrixType in { def : Property<"numRows", UInt32> { let Read = [{ node->getNumRows() }]; } def : Property<"numColumns", UInt32> { let Read = [{ node->getNumColumns() }]; } def : Creator<[{ return ctx.getConstantMatrixType(elementType, numRows, numColumns); }]>; } let Class = DependentSizedMatrixType in { def : Property<"rows", ExprRef> { let Read = [{ node->getRowExpr() }]; } def : Property<"columns", ExprRef> { let Read = [{ node->getColumnExpr() }]; } def : Property<"attributeLoc", SourceLocation> { let Read = [{ node->getAttributeLoc() }]; } def : Creator<[{ return ctx.getDependentSizedMatrixType(elementType, rows, columns, attributeLoc); }]>; } let Class = FunctionType in { def : Property<"returnType", QualType> { let Read = [{ node->getReturnType() }]; } def : Property<"noReturn", Bool> { let Read = [{ node->getExtInfo().getNoReturn() }]; } def : Property<"hasRegParm", Bool> { let Read = [{ node->getExtInfo().getHasRegParm() }]; } def : Property<"regParm", UInt32> { let Read = [{ node->getExtInfo().getRegParm() }]; } def : Property<"callingConvention", CallingConv> { let Read = [{ node->getExtInfo().getCC() }]; } def : Property<"producesResult", Bool> { let Read = [{ node->getExtInfo().getProducesResult() }]; } def : Property<"noCallerSavedRegs", Bool> { let Read = [{ node->getExtInfo().getNoCallerSavedRegs() }]; } def : Property<"noCfCheck", Bool> { let Read = [{ node->getExtInfo().getNoCfCheck() }]; } def : Property<"cmseNSCall", Bool> { let Read = [{ node->getExtInfo().getCmseNSCall() }]; } } let Class = FunctionNoProtoType in { def : Creator<[{ auto extInfo = FunctionType::ExtInfo(noReturn, hasRegParm, regParm, callingConvention, producesResult, noCallerSavedRegs, noCfCheck, cmseNSCall); return ctx.getFunctionNoProtoType(returnType, extInfo); }]>; } let Class = FunctionProtoType in { def : Property<"variadic", Bool> { let Read = [{ node->isVariadic() }]; } def : Property<"trailingReturn", Bool> { let Read = [{ node->hasTrailingReturn() }]; } def : Property<"methodQualifiers", Qualifiers> { let Read = [{ node->getMethodQuals() }]; } def : Property<"refQualifier", RefQualifierKind> { let Read = [{ node->getRefQualifier() }]; } def : Property<"exceptionSpecifier", ExceptionSpecInfo> { let Read = [{ node->getExceptionSpecInfo() }]; } def : Property<"parameters", Array> { let Read = [{ node->getParamTypes() }]; } def : Property<"extParameterInfo", Array> { let Read = [{ node->hasExtParameterInfos() ? node->getExtParameterInfos() : llvm::ArrayRef() }]; } def : Creator<[{ auto extInfo = FunctionType::ExtInfo(noReturn, hasRegParm, regParm, callingConvention, producesResult, noCallerSavedRegs, noCfCheck, cmseNSCall); FunctionProtoType::ExtProtoInfo epi; epi.ExtInfo = extInfo; epi.Variadic = variadic; epi.HasTrailingReturn = trailingReturn; epi.TypeQuals = methodQualifiers; epi.RefQualifier = refQualifier; epi.ExceptionSpec = exceptionSpecifier; epi.ExtParameterInfos = extParameterInfo.empty() ? nullptr : extParameterInfo.data(); return ctx.getFunctionType(returnType, parameters, epi); }]>; } let Class = AtomicType in { def : Property<"valueType", QualType> { let Read = [{ node->getValueType() }]; } def : Creator<[{ return ctx.getAtomicType(valueType); }]>; } let Class = UnresolvedUsingType in { def : Property<"declaration", DeclRef> { let Read = [{ node->getDecl() }]; } def : Creator<[{ return ctx.getTypeDeclType(cast(declaration)); }]>; } let Class = TypedefType in { def : Property<"declaration", DeclRef> { let Read = [{ node->getDecl() }]; } def : Property<"canonicalType", Optional> { let Read = [{ makeOptionalFromNullable(node->getCanonicalTypeInternal()) }]; } def : Creator<[{ QualType finalCanonicalType = canonicalType ? ctx.getCanonicalType(*canonicalType) : QualType(); return ctx.getTypedefType(cast(declaration), finalCanonicalType); }]>; } let Class = TypeOfExprType in { def : Property<"expression", ExprRef> { let Read = [{ node->getUnderlyingExpr() }]; } def : Creator<[{ return ctx.getTypeOfExprType(expression); }]>; } let Class = TypeOfType in { def : Property<"underlyingType", QualType> { let Read = [{ node->getUnderlyingType() }]; } def : Creator<[{ return ctx.getTypeOfType(underlyingType); }]>; } let Class = DecltypeType in { def : Property<"underlyingType", QualType> { let Read = [{ node->getUnderlyingType() }]; } def : Property<"expression", ExprRef> { let Read = [{ node->getUnderlyingExpr() }]; } def : Creator<[{ return ctx.getDecltypeType(expression, underlyingType); }]>; } let Class = UnaryTransformType in { def : Property<"baseType", QualType> { let Read = [{ node->getBaseType() }]; } def : Property<"underlyingType", QualType> { let Read = [{ node->getUnderlyingType() }]; } def : Property<"transform", UnaryTypeTransformKind> { let Read = [{ node->getUTTKind() }]; } def : Creator<[{ return ctx.getUnaryTransformType(baseType, underlyingType, transform); }]>; } let Class = AutoType in { def : Property<"deducedType", Optional> { let Read = [{ makeOptionalFromNullable(node->getDeducedType()) }]; } def : Property<"keyword", AutoTypeKeyword> { let Read = [{ node->getKeyword() }]; } def : Property<"typeConstraintConcept", Optional> { let Read = [{ makeOptionalFromPointer( const_cast(node->getTypeConstraintConcept())) }]; } def : Property<"typeConstraintArguments", Array> { let Read = [{ node->getTypeConstraintArguments() }]; } // FIXME: better enumerated value // Only really required when the deduced type is null def : Property<"dependence", UInt32> { let Read = [{ !node->getDeducedType().isNull() ? 0 : node->containsUnexpandedParameterPack() ? 2 : node->isDependentType() ? 1 : 0 }]; } def : Creator<[{ return ctx.getAutoType(makeNullableFromOptional(deducedType), keyword, /*isDependentWithoutDeducedType*/ dependence > 0, /*isPackWithoutDeducedType*/ dependence > 1, makePointerFromOptional(typeConstraintConcept), typeConstraintArguments); }]>; } let Class = DeducedTemplateSpecializationType in { def : Property<"templateName", Optional> { let Read = [{ makeOptionalFromNullable(node->getTemplateName()) }]; } def : Property<"deducedType", QualType> { let Read = [{ node->getDeducedType() }]; } // Only really required when the deduced type is null def : Property<"dependent", Bool> { let Read = [{ !node->getDeducedType().isNull() ? false : node->isDependentType() }]; } def : Creator<[{ return ctx.getDeducedTemplateSpecializationType( makeNullableFromOptional(templateName), deducedType, dependent); }]>; } let Class = TagType in { def : Property<"dependent", Bool> { let Read = [{ node->isDependentType() }]; } def : Property<"declaration", DeclRef> { // Serializing a reference to the canonical declaration is apparently // necessary to make module-merging work. let Read = [{ node->getDecl()->getCanonicalDecl() }]; } } let Class = EnumType in { def : Creator<[{ QualType result = ctx.getEnumType(cast(declaration)); if (dependent) const_cast(result.getTypePtr()) ->addDependence(TypeDependence::DependentInstantiation); return result; }]>; } let Class = RecordType in { def : Creator<[{ auto record = cast(declaration); QualType result = ctx.getRecordType(record); if (dependent) const_cast(result.getTypePtr()) ->addDependence(TypeDependence::DependentInstantiation); return result; }]>; } let Class = ElaboratedType in { def : Property<"keyword", ElaboratedTypeKeyword> { let Read = [{ node->getKeyword() }]; } def : Property<"qualifier", NestedNameSpecifier> { let Read = [{ node->getQualifier() }]; } def : Property<"namedType", QualType> { let Read = [{ node->getNamedType() }]; } def : Property<"ownedTag", Optional> { let Read = [{ makeOptionalFromPointer( const_cast(node->getOwnedTagDecl())) }]; } def : Creator<[{ return ctx.getElaboratedType(keyword, qualifier, namedType, makePointerFromOptional(ownedTag)); }]>; } let Class = InjectedClassNameType in { def : Property<"declaration", DeclRef> { // FIXME: drilling down to the canonical declaration is what the // existing serialization code was doing, but it's not clear why. let Read = [{ node->getDecl()->getCanonicalDecl() }]; } def : Property<"injectedSpecializationType", QualType> { let Read = [{ node->getInjectedSpecializationType() }]; } def : Creator<[{ // FIXME: ASTContext::getInjectedClassNameType is not currently suitable // for AST reading, too much interdependencies. const Type *T = nullptr; auto typeDecl = cast(declaration); for (auto *DI = typeDecl; DI; DI = DI->getPreviousDecl()) { if (const Type *existing = DI->getTypeForDecl()) { T = existing; break; } } if (!T) { T = new (ctx, TypeAlignment) InjectedClassNameType(typeDecl, injectedSpecializationType); for (auto *DI = typeDecl; DI; DI = DI->getPreviousDecl()) DI->setTypeForDecl(T); } return QualType(T, 0); }]>; } let Class = ParenType in { def : Property<"innerType", QualType> { let Read = [{ node->getInnerType() }]; } def : Creator<[{ return ctx.getParenType(innerType); }]>; } let Class = MacroQualifiedType in { def : Property<"underlyingType", QualType> { let Read = [{ node->getUnderlyingType() }]; } def : Property<"macroIdentifier", Identifier> { let Read = [{ node->getMacroIdentifier() }]; } def : Creator<[{ return ctx.getMacroQualifiedType(underlyingType, macroIdentifier); }]>; } let Class = AttributedType in { def : Property<"modifiedType", QualType> { let Read = [{ node->getModifiedType() }]; } def : Property<"equivalentType", QualType> { let Read = [{ node->getEquivalentType() }]; } def : Property<"attribute", AttrKind> { let Read = [{ node->getAttrKind() }]; } def : Creator<[{ return ctx.getAttributedType(attribute, modifiedType, equivalentType); }]>; } let Class = DependentAddressSpaceType in { def : Property<"pointeeType", QualType> { let Read = [{ node->getPointeeType() }]; } def : Property<"addressSpace", ExprRef> { let Read = [{ node->getAddrSpaceExpr() }]; } def : Property<"attributeLoc", SourceLocation> { let Read = [{ node->getAttributeLoc() }]; } def : Creator<[{ return ctx.getDependentAddressSpaceType(pointeeType, addressSpace, attributeLoc); }]>; } let Class = TemplateSpecializationType in { def : Property<"dependent", Bool> { let Read = [{ node->isDependentType() }]; } def : Property<"templateName", TemplateName> { let Read = [{ node->getTemplateName() }]; } def : Property<"templateArguments", Array> { let Read = [{ node->template_arguments() }]; } def : Property<"underlyingType", Optional> { let Read = [{ node->isTypeAlias() ? llvm::Optional(node->getAliasedType()) : node->isCanonicalUnqualified() ? llvm::None : llvm::Optional(node->getCanonicalTypeInternal()) }]; } def : Creator<[{ QualType result; if (!underlyingType.hasValue()) { result = ctx.getCanonicalTemplateSpecializationType(templateName, templateArguments); } else { result = ctx.getTemplateSpecializationType(templateName, templateArguments, *underlyingType); } if (dependent) const_cast(result.getTypePtr()) ->addDependence(TypeDependence::DependentInstantiation); return result; }]>; } let Class = DependentTemplateSpecializationType in { def : Property<"keyword", ElaboratedTypeKeyword> { let Read = [{ node->getKeyword() }]; } def : Property<"qualifier", NestedNameSpecifier> { let Read = [{ node->getQualifier() }]; } def : Property<"name", Identifier> { let Read = [{ node->getIdentifier() }]; } def : Property<"templateArguments", Array> { let Read = [{ node->template_arguments() }]; } def : Creator<[{ return ctx.getDependentTemplateSpecializationType(keyword, qualifier, name, templateArguments); }]>; } let Class = TemplateTypeParmType in { def : Property<"depth", UInt32> { let Read = [{ node->getDepth() }]; } def : Property<"index", UInt32> { let Read = [{ node->getIndex() }]; } def : Property<"isParameterPack", Bool> { let Read = [{ node->isParameterPack() }]; } def : Property<"declaration", Optional> { let Read = [{ makeOptionalFromPointer( const_cast(node->getDecl())) }]; } def : Creator<[{ return ctx.getTemplateTypeParmType(depth, index, isParameterPack, makePointerFromOptional(declaration)); }]>; } let Class = SubstTemplateTypeParmType in { def : Property<"replacedParameter", QualType> { let Read = [{ QualType(node->getReplacedParameter(), 0) }]; } def : Property<"replacementType", QualType> { let Read = [{ node->getReplacementType() }]; } def : Creator<[{ // The call to getCanonicalType here existed in ASTReader.cpp, too. return ctx.getSubstTemplateTypeParmType( cast(replacedParameter), ctx.getCanonicalType(replacementType)); }]>; } let Class = PackExpansionType in { def : Property<"pattern", QualType> { let Read = [{ node->getPattern() }]; } def : Property<"numExpansions", Optional> { let Read = [{ node->getNumExpansions() }]; } def : Creator<[{ - return ctx.getPackExpansionType(pattern, numExpansions); + return ctx.getPackExpansionType(pattern, numExpansions, + /*ExpectPackInType*/false); }]>; } let Class = SubstTemplateTypeParmPackType in { def : Property<"replacedParameter", QualType> { let Read = [{ QualType(node->getReplacedParameter(), 0) }]; } def : Property<"replacementPack", TemplateArgument> { let Read = [{ node->getArgumentPack() }]; } def : Creator<[{ return ctx.getSubstTemplateTypeParmPackType( cast(replacedParameter), replacementPack); }]>; } let Class = BuiltinType in { def : Property<"kind", BuiltinTypeKind> { let Read = [{ node->getKind() }]; } def : Creator<[{ switch (kind) { #define IMAGE_TYPE(IMGTYPE, ID, SINGLETON_ID, ACCESS, SUFFIX) \ case BuiltinType::ID: return ctx.SINGLETON_ID; #include "clang/Basic/OpenCLImageTypes.def" #define EXT_OPAQUE_TYPE(EXTTYPE, ID, EXT) \ case BuiltinType::ID: return ctx.ID##Ty; #include "clang/Basic/OpenCLExtensionTypes.def" #define SVE_TYPE(NAME, ID, SINGLETON_ID) \ case BuiltinType::ID: return ctx.SINGLETON_ID; #include "clang/Basic/AArch64SVEACLETypes.def" #define BUILTIN_TYPE(ID, SINGLETON_ID) \ case BuiltinType::ID: return ctx.SINGLETON_ID; #include "clang/AST/BuiltinTypes.def" } llvm_unreachable("unreachable builtin case"); }]>; } let Class = DependentNameType in { def : Property<"keyword", ElaboratedTypeKeyword> { let Read = [{ node->getKeyword() }]; } def : Property<"qualifier", NestedNameSpecifier> { let Read = [{ node->getQualifier() }]; } def : Property<"name", Identifier> { let Read = [{ node->getIdentifier() }]; } def : Property<"underlyingType", Optional> { let Read = [{ node->isCanonicalUnqualified() ? llvm::None : llvm::Optional(node->getCanonicalTypeInternal()) }]; } def : Creator<[{ QualType canon = (underlyingType ? ctx.getCanonicalType(*underlyingType) : QualType()); return ctx.getDependentNameType(keyword, qualifier, name, canon); }]>; } let Class = ObjCObjectType in { def : Property<"baseType", QualType> { let Read = [{ node->getBaseType() }]; } def : Property<"typeArgsAsWritten", Array> { let Read = [{ node->getTypeArgsAsWritten() }]; } def : Property<"qualifiers", Array> { let Read = [{ node->getProtocols() }]; } def : Property<"isKindOfTypeAsWritten", Bool> { let Read = [{ node->isKindOfTypeAsWritten() }]; } def : Creator<[{ return ctx.getObjCObjectType(baseType, typeArgsAsWritten, qualifiers, isKindOfTypeAsWritten); }]>; } let Class = ObjCInterfaceType in { // We don't actually want any of the properties of the superclass. def : Override { let IgnoredProperties = [ "baseType", "typeArgsAsWritten", "qualifiers", "isKindOfTypeAsWritten" ]; } def : Property<"declaration", DeclRef> { // FIXME: drilling down to the canonical declaration is what the // existing serialization code was doing, but it's not clear why. let Read = [{ node->getDecl()->getCanonicalDecl() }]; } def : Creator<[{ return ctx.getObjCInterfaceType( cast(declaration->getCanonicalDecl())); }]>; } let Class = ObjCTypeParamType in { def : Property<"declaration", ObjCTypeParamDeclRef> { let Read = [{ node->getDecl() }]; } def : Property<"qualifiers", Array> { let Read = [{ node->getProtocols() }]; } def : Creator<[{ return ctx.getObjCTypeParamType(declaration, qualifiers); }]>; } let Class = ObjCObjectPointerType in { def : Property<"pointeeType", QualType> { let Read = [{ node->getPointeeType() }]; } def : Creator<[{ return ctx.getObjCObjectPointerType(pointeeType); }]>; } let Class = PipeType in { def : Property<"elementType", QualType> { let Read = [{ node->getElementType() }]; } def : Property<"isReadOnly", Bool> { let Read = [{ node->isReadOnly() }]; } def : Creator<[{ return ctx.getPipeType(elementType, isReadOnly); }]>; } let Class = ExtIntType in { def : Property<"isUnsigned", Bool> { let Read = [{ node->isUnsigned() }]; } def : Property <"numBits", UInt32> { let Read = [{ node->getNumBits() }]; } def : Creator<[{ return ctx.getExtIntType(isUnsigned, numBits); }]>; } let Class = DependentExtIntType in { def : Property<"isUnsigned", Bool> { let Read = [{ node->isUnsigned() }]; } def : Property <"numBitsExpr", ExprRef> { let Read = [{ node->getNumBitsExpr() }]; } def : Creator<[{ return ctx.getDependentExtIntType(isUnsigned, numBitsExpr); }]>; } diff --git a/contrib/llvm-project/clang/include/clang/Basic/TypeNodes.td b/contrib/llvm-project/clang/include/clang/Basic/TypeNodes.td index a4e3002b9075..011394c3ef45 100644 --- a/contrib/llvm-project/clang/include/clang/Basic/TypeNodes.td +++ b/contrib/llvm-project/clang/include/clang/Basic/TypeNodes.td @@ -1,111 +1,111 @@ include "clang/Basic/ASTNode.td" class TypeNode : ASTNode { TypeNode Base = base; bit Abstract = abstract; } /// A type node that is only used to represent dependent types in C++. For /// example, DependentTemplateSpecializationType is used to represent types /// where the base template-id is dependent (such as `T::foo`). Code /// that only works with non-dependent types can ignore these type nodes. class AlwaysDependent {} /// A type node that is never used to represent a canonical type, which is to /// say that it always represents some sort of type "sugar" which can /// (supposedly) be erased without affecting the formal behavior of the /// language. For example, in standard C/C++, typedefs do not introduce new /// types and do not affect the semantics of the program. Code that only /// works with canonical types can ignore these type nodes. /// /// Note that this simple story about non-canonical types is not the whole /// truth. Languages and extensions often have formation rules which differ /// based on how a type is spelled and which therefore are not consistent /// with immediately stipping away type sugar. More critically, attributes on /// typedefs can have semantic impacts in ways that are only reflected in our /// AST by preserving the typedef sugar; for example, we do not otherwise /// represent the alignment attribute on typedefs, and so it is necessary to /// preserve typedef structure into most parts of IR generation. class NeverCanonical {} /// A type node that only represents a canonical type in some dependent cases. /// For example, `std::vector` (a TemplateSpecializationType) is /// considered to be a non-canonical representation for the RecordType /// referencing the concrete ClassTemplateSpecializationDecl; but /// `std::vector` cannot be resolved to a concrete specialization /// and so remains canonical. Code which only works with non-dependent /// canonical types can ignore these nodes. class NeverCanonicalUnlessDependent {} /// A type node which never has component type structure. Some code may be /// able to operate on leaf types faster than they can on non-leaf types. /// /// For example, the function type `void (int)` is not a leaf type because it /// is structurally composed of component types (`void` and `int`). /// /// A struct type is a leaf type because its field types are not part of its /// type-expression. /// /// Nodes like `TypedefType` which are syntactically leaves but can desugar /// to types that may not be leaves should not declare this. class LeafType {} def Type : TypeNode; def BuiltinType : TypeNode, LeafType; def ComplexType : TypeNode; def PointerType : TypeNode; def BlockPointerType : TypeNode; def ReferenceType : TypeNode; def LValueReferenceType : TypeNode; def RValueReferenceType : TypeNode; def MemberPointerType : TypeNode; def ArrayType : TypeNode; def ConstantArrayType : TypeNode; def IncompleteArrayType : TypeNode; def VariableArrayType : TypeNode; def DependentSizedArrayType : TypeNode, AlwaysDependent; def DependentSizedExtVectorType : TypeNode, AlwaysDependent; def DependentAddressSpaceType : TypeNode, AlwaysDependent; def VectorType : TypeNode; def DependentVectorType : TypeNode, AlwaysDependent; def ExtVectorType : TypeNode; def MatrixType : TypeNode; def ConstantMatrixType : TypeNode; def DependentSizedMatrixType : TypeNode, AlwaysDependent; def FunctionType : TypeNode; def FunctionProtoType : TypeNode; def FunctionNoProtoType : TypeNode; def UnresolvedUsingType : TypeNode, AlwaysDependent; def ParenType : TypeNode, NeverCanonical; def TypedefType : TypeNode, NeverCanonical; def MacroQualifiedType : TypeNode, NeverCanonical; def AdjustedType : TypeNode, NeverCanonical; def DecayedType : TypeNode, NeverCanonical; def TypeOfExprType : TypeNode, NeverCanonicalUnlessDependent; def TypeOfType : TypeNode, NeverCanonicalUnlessDependent; def DecltypeType : TypeNode, NeverCanonicalUnlessDependent; def UnaryTransformType : TypeNode, NeverCanonicalUnlessDependent; def TagType : TypeNode; def RecordType : TypeNode, LeafType; def EnumType : TypeNode, LeafType; def ElaboratedType : TypeNode, NeverCanonical; def AttributedType : TypeNode, NeverCanonical; def TemplateTypeParmType : TypeNode, AlwaysDependent, LeafType; def SubstTemplateTypeParmType : TypeNode, NeverCanonical; def SubstTemplateTypeParmPackType : TypeNode, AlwaysDependent; def TemplateSpecializationType : TypeNode, NeverCanonicalUnlessDependent; def DeducedType : TypeNode; def AutoType : TypeNode; def DeducedTemplateSpecializationType : TypeNode; def InjectedClassNameType : TypeNode, AlwaysDependent, LeafType; def DependentNameType : TypeNode, AlwaysDependent; def DependentTemplateSpecializationType : TypeNode, AlwaysDependent; -def PackExpansionType : TypeNode, NeverCanonicalUnlessDependent; +def PackExpansionType : TypeNode, AlwaysDependent; def ObjCTypeParamType : TypeNode, NeverCanonical; def ObjCObjectType : TypeNode; def ObjCInterfaceType : TypeNode, LeafType; def ObjCObjectPointerType : TypeNode; def PipeType : TypeNode; def AtomicType : TypeNode; def ExtIntType : TypeNode; def DependentExtIntType : TypeNode, AlwaysDependent; diff --git a/contrib/llvm-project/clang/include/clang/Sema/DeclSpec.h b/contrib/llvm-project/clang/include/clang/Sema/DeclSpec.h index 8db03babfb1e..0a22b5af7c64 100644 --- a/contrib/llvm-project/clang/include/clang/Sema/DeclSpec.h +++ b/contrib/llvm-project/clang/include/clang/Sema/DeclSpec.h @@ -1,2701 +1,2710 @@ //===--- DeclSpec.h - Parsed declaration specifiers -------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// /// \file /// This file defines the classes used to store parsed information about /// declaration-specifiers and declarators. /// /// \verbatim /// static const int volatile x, *y, *(*(*z)[10])(const void *x); /// ------------------------- - -- --------------------------- /// declaration-specifiers \ | / /// declarators /// \endverbatim /// //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_DECLSPEC_H #define LLVM_CLANG_SEMA_DECLSPEC_H #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclObjCCommon.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/Basic/ExceptionSpecificationType.h" #include "clang/Basic/Lambda.h" #include "clang/Basic/OperatorKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Lex/Token.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/ParsedAttr.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" namespace clang { class ASTContext; class CXXRecordDecl; class TypeLoc; class LangOptions; class IdentifierInfo; class NamespaceAliasDecl; class NamespaceDecl; class ObjCDeclSpec; class Sema; class Declarator; struct TemplateIdAnnotation; /// Represents a C++ nested-name-specifier or a global scope specifier. /// /// These can be in 3 states: /// 1) Not present, identified by isEmpty() /// 2) Present, identified by isNotEmpty() /// 2.a) Valid, identified by isValid() /// 2.b) Invalid, identified by isInvalid(). /// /// isSet() is deprecated because it mostly corresponded to "valid" but was /// often used as if it meant "present". /// /// The actual scope is described by getScopeRep(). class CXXScopeSpec { SourceRange Range; NestedNameSpecifierLocBuilder Builder; public: SourceRange getRange() const { return Range; } void setRange(SourceRange R) { Range = R; } void setBeginLoc(SourceLocation Loc) { Range.setBegin(Loc); } void setEndLoc(SourceLocation Loc) { Range.setEnd(Loc); } SourceLocation getBeginLoc() const { return Range.getBegin(); } SourceLocation getEndLoc() const { return Range.getEnd(); } /// Retrieve the representation of the nested-name-specifier. NestedNameSpecifier *getScopeRep() const { return Builder.getRepresentation(); } /// Extend the current nested-name-specifier by another /// nested-name-specifier component of the form 'type::'. /// /// \param Context The AST context in which this nested-name-specifier /// resides. /// /// \param TemplateKWLoc The location of the 'template' keyword, if present. /// /// \param TL The TypeLoc that describes the type preceding the '::'. /// /// \param ColonColonLoc The location of the trailing '::'. void Extend(ASTContext &Context, SourceLocation TemplateKWLoc, TypeLoc TL, SourceLocation ColonColonLoc); /// Extend the current nested-name-specifier by another /// nested-name-specifier component of the form 'identifier::'. /// /// \param Context The AST context in which this nested-name-specifier /// resides. /// /// \param Identifier The identifier. /// /// \param IdentifierLoc The location of the identifier. /// /// \param ColonColonLoc The location of the trailing '::'. void Extend(ASTContext &Context, IdentifierInfo *Identifier, SourceLocation IdentifierLoc, SourceLocation ColonColonLoc); /// Extend the current nested-name-specifier by another /// nested-name-specifier component of the form 'namespace::'. /// /// \param Context The AST context in which this nested-name-specifier /// resides. /// /// \param Namespace The namespace. /// /// \param NamespaceLoc The location of the namespace name. /// /// \param ColonColonLoc The location of the trailing '::'. void Extend(ASTContext &Context, NamespaceDecl *Namespace, SourceLocation NamespaceLoc, SourceLocation ColonColonLoc); /// Extend the current nested-name-specifier by another /// nested-name-specifier component of the form 'namespace-alias::'. /// /// \param Context The AST context in which this nested-name-specifier /// resides. /// /// \param Alias The namespace alias. /// /// \param AliasLoc The location of the namespace alias /// name. /// /// \param ColonColonLoc The location of the trailing '::'. void Extend(ASTContext &Context, NamespaceAliasDecl *Alias, SourceLocation AliasLoc, SourceLocation ColonColonLoc); /// Turn this (empty) nested-name-specifier into the global /// nested-name-specifier '::'. void MakeGlobal(ASTContext &Context, SourceLocation ColonColonLoc); /// Turns this (empty) nested-name-specifier into '__super' /// nested-name-specifier. /// /// \param Context The AST context in which this nested-name-specifier /// resides. /// /// \param RD The declaration of the class in which nested-name-specifier /// appeared. /// /// \param SuperLoc The location of the '__super' keyword. /// name. /// /// \param ColonColonLoc The location of the trailing '::'. void MakeSuper(ASTContext &Context, CXXRecordDecl *RD, SourceLocation SuperLoc, SourceLocation ColonColonLoc); /// Make a new nested-name-specifier from incomplete source-location /// information. /// /// FIXME: This routine should be used very, very rarely, in cases where we /// need to synthesize a nested-name-specifier. Most code should instead use /// \c Adopt() with a proper \c NestedNameSpecifierLoc. void MakeTrivial(ASTContext &Context, NestedNameSpecifier *Qualifier, SourceRange R); /// Adopt an existing nested-name-specifier (with source-range /// information). void Adopt(NestedNameSpecifierLoc Other); /// Retrieve a nested-name-specifier with location information, copied /// into the given AST context. /// /// \param Context The context into which this nested-name-specifier will be /// copied. NestedNameSpecifierLoc getWithLocInContext(ASTContext &Context) const; /// Retrieve the location of the name in the last qualifier /// in this nested name specifier. /// /// For example, the location of \c bar /// in /// \verbatim /// \::foo::bar<0>:: /// ^~~ /// \endverbatim SourceLocation getLastQualifierNameLoc() const; /// No scope specifier. bool isEmpty() const { return Range.isInvalid() && getScopeRep() == nullptr; } /// A scope specifier is present, but may be valid or invalid. bool isNotEmpty() const { return !isEmpty(); } /// An error occurred during parsing of the scope specifier. bool isInvalid() const { return Range.isValid() && getScopeRep() == nullptr; } /// A scope specifier is present, and it refers to a real scope. bool isValid() const { return getScopeRep() != nullptr; } /// Indicate that this nested-name-specifier is invalid. void SetInvalid(SourceRange R) { assert(R.isValid() && "Must have a valid source range"); if (Range.getBegin().isInvalid()) Range.setBegin(R.getBegin()); Range.setEnd(R.getEnd()); Builder.Clear(); } /// Deprecated. Some call sites intend isNotEmpty() while others intend /// isValid(). bool isSet() const { return getScopeRep() != nullptr; } void clear() { Range = SourceRange(); Builder.Clear(); } /// Retrieve the data associated with the source-location information. char *location_data() const { return Builder.getBuffer().first; } /// Retrieve the size of the data associated with source-location /// information. unsigned location_size() const { return Builder.getBuffer().second; } }; /// Captures information about "declaration specifiers". /// /// "Declaration specifiers" encompasses storage-class-specifiers, /// type-specifiers, type-qualifiers, and function-specifiers. class DeclSpec { public: /// storage-class-specifier /// \note The order of these enumerators is important for diagnostics. enum SCS { SCS_unspecified = 0, SCS_typedef, SCS_extern, SCS_static, SCS_auto, SCS_register, SCS_private_extern, SCS_mutable }; // Import thread storage class specifier enumeration and constants. // These can be combined with SCS_extern and SCS_static. typedef ThreadStorageClassSpecifier TSCS; static const TSCS TSCS_unspecified = clang::TSCS_unspecified; static const TSCS TSCS___thread = clang::TSCS___thread; static const TSCS TSCS_thread_local = clang::TSCS_thread_local; static const TSCS TSCS__Thread_local = clang::TSCS__Thread_local; // Import type specifier width enumeration and constants. typedef TypeSpecifierWidth TSW; static const TSW TSW_unspecified = clang::TSW_unspecified; static const TSW TSW_short = clang::TSW_short; static const TSW TSW_long = clang::TSW_long; static const TSW TSW_longlong = clang::TSW_longlong; enum TSC { TSC_unspecified, TSC_imaginary, TSC_complex }; // Import type specifier sign enumeration and constants. typedef TypeSpecifierSign TSS; static const TSS TSS_unspecified = clang::TSS_unspecified; static const TSS TSS_signed = clang::TSS_signed; static const TSS TSS_unsigned = clang::TSS_unsigned; // Import type specifier type enumeration and constants. typedef TypeSpecifierType TST; static const TST TST_unspecified = clang::TST_unspecified; static const TST TST_void = clang::TST_void; static const TST TST_char = clang::TST_char; static const TST TST_wchar = clang::TST_wchar; static const TST TST_char8 = clang::TST_char8; static const TST TST_char16 = clang::TST_char16; static const TST TST_char32 = clang::TST_char32; static const TST TST_int = clang::TST_int; static const TST TST_int128 = clang::TST_int128; static const TST TST_extint = clang::TST_extint; static const TST TST_half = clang::TST_half; static const TST TST_BFloat16 = clang::TST_BFloat16; static const TST TST_float = clang::TST_float; static const TST TST_double = clang::TST_double; static const TST TST_float16 = clang::TST_Float16; static const TST TST_accum = clang::TST_Accum; static const TST TST_fract = clang::TST_Fract; static const TST TST_float128 = clang::TST_float128; static const TST TST_bool = clang::TST_bool; static const TST TST_decimal32 = clang::TST_decimal32; static const TST TST_decimal64 = clang::TST_decimal64; static const TST TST_decimal128 = clang::TST_decimal128; static const TST TST_enum = clang::TST_enum; static const TST TST_union = clang::TST_union; static const TST TST_struct = clang::TST_struct; static const TST TST_interface = clang::TST_interface; static const TST TST_class = clang::TST_class; static const TST TST_typename = clang::TST_typename; static const TST TST_typeofType = clang::TST_typeofType; static const TST TST_typeofExpr = clang::TST_typeofExpr; static const TST TST_decltype = clang::TST_decltype; static const TST TST_decltype_auto = clang::TST_decltype_auto; static const TST TST_underlyingType = clang::TST_underlyingType; static const TST TST_auto = clang::TST_auto; static const TST TST_auto_type = clang::TST_auto_type; static const TST TST_unknown_anytype = clang::TST_unknown_anytype; static const TST TST_atomic = clang::TST_atomic; #define GENERIC_IMAGE_TYPE(ImgType, Id) \ static const TST TST_##ImgType##_t = clang::TST_##ImgType##_t; #include "clang/Basic/OpenCLImageTypes.def" static const TST TST_error = clang::TST_error; // type-qualifiers enum TQ { // NOTE: These flags must be kept in sync with Qualifiers::TQ. TQ_unspecified = 0, TQ_const = 1, TQ_restrict = 2, TQ_volatile = 4, TQ_unaligned = 8, // This has no corresponding Qualifiers::TQ value, because it's not treated // as a qualifier in our type system. TQ_atomic = 16 }; /// ParsedSpecifiers - Flags to query which specifiers were applied. This is /// returned by getParsedSpecifiers. enum ParsedSpecifiers { PQ_None = 0, PQ_StorageClassSpecifier = 1, PQ_TypeSpecifier = 2, PQ_TypeQualifier = 4, PQ_FunctionSpecifier = 8 // FIXME: Attributes should be included here. }; private: // storage-class-specifier /*SCS*/unsigned StorageClassSpec : 3; /*TSCS*/unsigned ThreadStorageClassSpec : 2; unsigned SCS_extern_in_linkage_spec : 1; // type-specifier /*TSW*/unsigned TypeSpecWidth : 2; /*TSC*/unsigned TypeSpecComplex : 2; /*TSS*/unsigned TypeSpecSign : 2; /*TST*/unsigned TypeSpecType : 6; unsigned TypeAltiVecVector : 1; unsigned TypeAltiVecPixel : 1; unsigned TypeAltiVecBool : 1; unsigned TypeSpecOwned : 1; unsigned TypeSpecPipe : 1; unsigned TypeSpecSat : 1; unsigned ConstrainedAuto : 1; // type-qualifiers unsigned TypeQualifiers : 5; // Bitwise OR of TQ. // function-specifier unsigned FS_inline_specified : 1; unsigned FS_forceinline_specified: 1; unsigned FS_virtual_specified : 1; unsigned FS_noreturn_specified : 1; // friend-specifier unsigned Friend_specified : 1; // constexpr-specifier unsigned ConstexprSpecifier : 2; union { UnionParsedType TypeRep; Decl *DeclRep; Expr *ExprRep; TemplateIdAnnotation *TemplateIdRep; }; /// ExplicitSpecifier - Store information about explicit spicifer. ExplicitSpecifier FS_explicit_specifier; // attributes. ParsedAttributes Attrs; // Scope specifier for the type spec, if applicable. CXXScopeSpec TypeScope; // SourceLocation info. These are null if the item wasn't specified or if // the setting was synthesized. SourceRange Range; SourceLocation StorageClassSpecLoc, ThreadStorageClassSpecLoc; SourceRange TSWRange; SourceLocation TSCLoc, TSSLoc, TSTLoc, AltiVecLoc, TSSatLoc; /// TSTNameLoc - If TypeSpecType is any of class, enum, struct, union, /// typename, then this is the location of the named type (if present); /// otherwise, it is the same as TSTLoc. Hence, the pair TSTLoc and /// TSTNameLoc provides source range info for tag types. SourceLocation TSTNameLoc; SourceRange TypeofParensRange; SourceLocation TQ_constLoc, TQ_restrictLoc, TQ_volatileLoc, TQ_atomicLoc, TQ_unalignedLoc; SourceLocation FS_inlineLoc, FS_virtualLoc, FS_explicitLoc, FS_noreturnLoc; SourceLocation FS_explicitCloseParenLoc; SourceLocation FS_forceinlineLoc; SourceLocation FriendLoc, ModulePrivateLoc, ConstexprLoc; SourceLocation TQ_pipeLoc; WrittenBuiltinSpecs writtenBS; void SaveWrittenBuiltinSpecs(); ObjCDeclSpec *ObjCQualifiers; static bool isTypeRep(TST T) { return (T == TST_typename || T == TST_typeofType || T == TST_underlyingType || T == TST_atomic); } static bool isExprRep(TST T) { return (T == TST_typeofExpr || T == TST_decltype || T == TST_extint); } static bool isTemplateIdRep(TST T) { return (T == TST_auto || T == TST_decltype_auto); } DeclSpec(const DeclSpec &) = delete; void operator=(const DeclSpec &) = delete; public: static bool isDeclRep(TST T) { return (T == TST_enum || T == TST_struct || T == TST_interface || T == TST_union || T == TST_class); } DeclSpec(AttributeFactory &attrFactory) : StorageClassSpec(SCS_unspecified), ThreadStorageClassSpec(TSCS_unspecified), SCS_extern_in_linkage_spec(false), TypeSpecWidth(TSW_unspecified), TypeSpecComplex(TSC_unspecified), TypeSpecSign(TSS_unspecified), TypeSpecType(TST_unspecified), TypeAltiVecVector(false), TypeAltiVecPixel(false), TypeAltiVecBool(false), TypeSpecOwned(false), TypeSpecPipe(false), TypeSpecSat(false), ConstrainedAuto(false), TypeQualifiers(TQ_unspecified), FS_inline_specified(false), FS_forceinline_specified(false), FS_virtual_specified(false), FS_noreturn_specified(false), Friend_specified(false), ConstexprSpecifier(CSK_unspecified), FS_explicit_specifier(), Attrs(attrFactory), writtenBS(), ObjCQualifiers(nullptr) {} // storage-class-specifier SCS getStorageClassSpec() const { return (SCS)StorageClassSpec; } TSCS getThreadStorageClassSpec() const { return (TSCS)ThreadStorageClassSpec; } bool isExternInLinkageSpec() const { return SCS_extern_in_linkage_spec; } void setExternInLinkageSpec(bool Value) { SCS_extern_in_linkage_spec = Value; } SourceLocation getStorageClassSpecLoc() const { return StorageClassSpecLoc; } SourceLocation getThreadStorageClassSpecLoc() const { return ThreadStorageClassSpecLoc; } void ClearStorageClassSpecs() { StorageClassSpec = DeclSpec::SCS_unspecified; ThreadStorageClassSpec = DeclSpec::TSCS_unspecified; SCS_extern_in_linkage_spec = false; StorageClassSpecLoc = SourceLocation(); ThreadStorageClassSpecLoc = SourceLocation(); } void ClearTypeSpecType() { TypeSpecType = DeclSpec::TST_unspecified; TypeSpecOwned = false; TSTLoc = SourceLocation(); } // type-specifier TSW getTypeSpecWidth() const { return (TSW)TypeSpecWidth; } TSC getTypeSpecComplex() const { return (TSC)TypeSpecComplex; } TSS getTypeSpecSign() const { return (TSS)TypeSpecSign; } TST getTypeSpecType() const { return (TST)TypeSpecType; } bool isTypeAltiVecVector() const { return TypeAltiVecVector; } bool isTypeAltiVecPixel() const { return TypeAltiVecPixel; } bool isTypeAltiVecBool() const { return TypeAltiVecBool; } bool isTypeSpecOwned() const { return TypeSpecOwned; } bool isTypeRep() const { return isTypeRep((TST) TypeSpecType); } bool isTypeSpecPipe() const { return TypeSpecPipe; } bool isTypeSpecSat() const { return TypeSpecSat; } bool isConstrainedAuto() const { return ConstrainedAuto; } ParsedType getRepAsType() const { assert(isTypeRep((TST) TypeSpecType) && "DeclSpec does not store a type"); return TypeRep; } Decl *getRepAsDecl() const { assert(isDeclRep((TST) TypeSpecType) && "DeclSpec does not store a decl"); return DeclRep; } Expr *getRepAsExpr() const { assert(isExprRep((TST) TypeSpecType) && "DeclSpec does not store an expr"); return ExprRep; } TemplateIdAnnotation *getRepAsTemplateId() const { assert(isTemplateIdRep((TST) TypeSpecType) && "DeclSpec does not store a template id"); return TemplateIdRep; } CXXScopeSpec &getTypeSpecScope() { return TypeScope; } const CXXScopeSpec &getTypeSpecScope() const { return TypeScope; } SourceRange getSourceRange() const LLVM_READONLY { return Range; } SourceLocation getBeginLoc() const LLVM_READONLY { return Range.getBegin(); } SourceLocation getEndLoc() const LLVM_READONLY { return Range.getEnd(); } SourceLocation getTypeSpecWidthLoc() const { return TSWRange.getBegin(); } SourceRange getTypeSpecWidthRange() const { return TSWRange; } SourceLocation getTypeSpecComplexLoc() const { return TSCLoc; } SourceLocation getTypeSpecSignLoc() const { return TSSLoc; } SourceLocation getTypeSpecTypeLoc() const { return TSTLoc; } SourceLocation getAltiVecLoc() const { return AltiVecLoc; } SourceLocation getTypeSpecSatLoc() const { return TSSatLoc; } SourceLocation getTypeSpecTypeNameLoc() const { assert(isDeclRep((TST) TypeSpecType) || TypeSpecType == TST_typename); return TSTNameLoc; } SourceRange getTypeofParensRange() const { return TypeofParensRange; } void setTypeofParensRange(SourceRange range) { TypeofParensRange = range; } bool hasAutoTypeSpec() const { return (TypeSpecType == TST_auto || TypeSpecType == TST_auto_type || TypeSpecType == TST_decltype_auto); } bool hasTagDefinition() const; /// Turn a type-specifier-type into a string like "_Bool" or "union". static const char *getSpecifierName(DeclSpec::TST T, const PrintingPolicy &Policy); static const char *getSpecifierName(DeclSpec::TQ Q); static const char *getSpecifierName(DeclSpec::TSS S); static const char *getSpecifierName(DeclSpec::TSC C); static const char *getSpecifierName(DeclSpec::TSW W); static const char *getSpecifierName(DeclSpec::SCS S); static const char *getSpecifierName(DeclSpec::TSCS S); static const char *getSpecifierName(ConstexprSpecKind C); // type-qualifiers /// getTypeQualifiers - Return a set of TQs. unsigned getTypeQualifiers() const { return TypeQualifiers; } SourceLocation getConstSpecLoc() const { return TQ_constLoc; } SourceLocation getRestrictSpecLoc() const { return TQ_restrictLoc; } SourceLocation getVolatileSpecLoc() const { return TQ_volatileLoc; } SourceLocation getAtomicSpecLoc() const { return TQ_atomicLoc; } SourceLocation getUnalignedSpecLoc() const { return TQ_unalignedLoc; } SourceLocation getPipeLoc() const { return TQ_pipeLoc; } /// Clear out all of the type qualifiers. void ClearTypeQualifiers() { TypeQualifiers = 0; TQ_constLoc = SourceLocation(); TQ_restrictLoc = SourceLocation(); TQ_volatileLoc = SourceLocation(); TQ_atomicLoc = SourceLocation(); TQ_unalignedLoc = SourceLocation(); TQ_pipeLoc = SourceLocation(); } // function-specifier bool isInlineSpecified() const { return FS_inline_specified | FS_forceinline_specified; } SourceLocation getInlineSpecLoc() const { return FS_inline_specified ? FS_inlineLoc : FS_forceinlineLoc; } ExplicitSpecifier getExplicitSpecifier() const { return FS_explicit_specifier; } bool isVirtualSpecified() const { return FS_virtual_specified; } SourceLocation getVirtualSpecLoc() const { return FS_virtualLoc; } bool hasExplicitSpecifier() const { return FS_explicit_specifier.isSpecified(); } SourceLocation getExplicitSpecLoc() const { return FS_explicitLoc; } SourceRange getExplicitSpecRange() const { return FS_explicit_specifier.getExpr() ? SourceRange(FS_explicitLoc, FS_explicitCloseParenLoc) : SourceRange(FS_explicitLoc); } bool isNoreturnSpecified() const { return FS_noreturn_specified; } SourceLocation getNoreturnSpecLoc() const { return FS_noreturnLoc; } void ClearFunctionSpecs() { FS_inline_specified = false; FS_inlineLoc = SourceLocation(); FS_forceinline_specified = false; FS_forceinlineLoc = SourceLocation(); FS_virtual_specified = false; FS_virtualLoc = SourceLocation(); FS_explicit_specifier = ExplicitSpecifier(); FS_explicitLoc = SourceLocation(); FS_explicitCloseParenLoc = SourceLocation(); FS_noreturn_specified = false; FS_noreturnLoc = SourceLocation(); } /// This method calls the passed in handler on each CVRU qual being /// set. /// Handle - a handler to be invoked. void forEachCVRUQualifier( llvm::function_ref Handle); /// This method calls the passed in handler on each qual being /// set. /// Handle - a handler to be invoked. void forEachQualifier( llvm::function_ref Handle); /// Return true if any type-specifier has been found. bool hasTypeSpecifier() const { return getTypeSpecType() != DeclSpec::TST_unspecified || getTypeSpecWidth() != DeclSpec::TSW_unspecified || getTypeSpecComplex() != DeclSpec::TSC_unspecified || getTypeSpecSign() != DeclSpec::TSS_unspecified; } /// Return a bitmask of which flavors of specifiers this /// DeclSpec includes. unsigned getParsedSpecifiers() const; /// isEmpty - Return true if this declaration specifier is completely empty: /// no tokens were parsed in the production of it. bool isEmpty() const { return getParsedSpecifiers() == DeclSpec::PQ_None; } void SetRangeStart(SourceLocation Loc) { Range.setBegin(Loc); } void SetRangeEnd(SourceLocation Loc) { Range.setEnd(Loc); } /// These methods set the specified attribute of the DeclSpec and /// return false if there was no error. If an error occurs (for /// example, if we tried to set "auto" on a spec with "extern" /// already set), they return true and set PrevSpec and DiagID /// such that /// Diag(Loc, DiagID) << PrevSpec; /// will yield a useful result. /// /// TODO: use a more general approach that still allows these /// diagnostics to be ignored when desired. bool SetStorageClassSpec(Sema &S, SCS SC, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, const PrintingPolicy &Policy); bool SetStorageClassSpecThread(TSCS TSC, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID); bool SetTypeSpecWidth(TSW W, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, const PrintingPolicy &Policy); bool SetTypeSpecComplex(TSC C, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID); bool SetTypeSpecSign(TSS S, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID); bool SetTypeSpecType(TST T, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, const PrintingPolicy &Policy); bool SetTypeSpecType(TST T, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, ParsedType Rep, const PrintingPolicy &Policy); bool SetTypeSpecType(TST T, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, TypeResult Rep, const PrintingPolicy &Policy) { if (Rep.isInvalid()) return SetTypeSpecError(); return SetTypeSpecType(T, Loc, PrevSpec, DiagID, Rep.get(), Policy); } bool SetTypeSpecType(TST T, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, Decl *Rep, bool Owned, const PrintingPolicy &Policy); bool SetTypeSpecType(TST T, SourceLocation TagKwLoc, SourceLocation TagNameLoc, const char *&PrevSpec, unsigned &DiagID, ParsedType Rep, const PrintingPolicy &Policy); bool SetTypeSpecType(TST T, SourceLocation TagKwLoc, SourceLocation TagNameLoc, const char *&PrevSpec, unsigned &DiagID, Decl *Rep, bool Owned, const PrintingPolicy &Policy); bool SetTypeSpecType(TST T, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, TemplateIdAnnotation *Rep, const PrintingPolicy &Policy); bool SetTypeSpecType(TST T, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, Expr *Rep, const PrintingPolicy &policy); bool SetTypeAltiVecVector(bool isAltiVecVector, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, const PrintingPolicy &Policy); bool SetTypeAltiVecPixel(bool isAltiVecPixel, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, const PrintingPolicy &Policy); bool SetTypeAltiVecBool(bool isAltiVecBool, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, const PrintingPolicy &Policy); bool SetTypePipe(bool isPipe, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, const PrintingPolicy &Policy); bool SetExtIntType(SourceLocation KWLoc, Expr *BitWidth, const char *&PrevSpec, unsigned &DiagID, const PrintingPolicy &Policy); bool SetTypeSpecSat(SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID); bool SetTypeSpecError(); void UpdateDeclRep(Decl *Rep) { assert(isDeclRep((TST) TypeSpecType)); DeclRep = Rep; } void UpdateTypeRep(ParsedType Rep) { assert(isTypeRep((TST) TypeSpecType)); TypeRep = Rep; } void UpdateExprRep(Expr *Rep) { assert(isExprRep((TST) TypeSpecType)); ExprRep = Rep; } bool SetTypeQual(TQ T, SourceLocation Loc); bool SetTypeQual(TQ T, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, const LangOptions &Lang); bool setFunctionSpecInline(SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID); bool setFunctionSpecForceInline(SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID); bool setFunctionSpecVirtual(SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID); bool setFunctionSpecExplicit(SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, ExplicitSpecifier ExplicitSpec, SourceLocation CloseParenLoc); bool setFunctionSpecNoreturn(SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID); bool SetFriendSpec(SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID); bool setModulePrivateSpec(SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID); bool SetConstexprSpec(ConstexprSpecKind ConstexprKind, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID); bool isFriendSpecified() const { return Friend_specified; } SourceLocation getFriendSpecLoc() const { return FriendLoc; } bool isModulePrivateSpecified() const { return ModulePrivateLoc.isValid(); } SourceLocation getModulePrivateSpecLoc() const { return ModulePrivateLoc; } ConstexprSpecKind getConstexprSpecifier() const { return ConstexprSpecKind(ConstexprSpecifier); } SourceLocation getConstexprSpecLoc() const { return ConstexprLoc; } bool hasConstexprSpecifier() const { return ConstexprSpecifier != CSK_unspecified; } void ClearConstexprSpec() { ConstexprSpecifier = CSK_unspecified; ConstexprLoc = SourceLocation(); } AttributePool &getAttributePool() const { return Attrs.getPool(); } /// Concatenates two attribute lists. /// /// The GCC attribute syntax allows for the following: /// /// \code /// short __attribute__(( unused, deprecated )) /// int __attribute__(( may_alias, aligned(16) )) var; /// \endcode /// /// This declares 4 attributes using 2 lists. The following syntax is /// also allowed and equivalent to the previous declaration. /// /// \code /// short __attribute__((unused)) __attribute__((deprecated)) /// int __attribute__((may_alias)) __attribute__((aligned(16))) var; /// \endcode /// void addAttributes(ParsedAttributesView &AL) { Attrs.addAll(AL.begin(), AL.end()); } bool hasAttributes() const { return !Attrs.empty(); } ParsedAttributes &getAttributes() { return Attrs; } const ParsedAttributes &getAttributes() const { return Attrs; } void takeAttributesFrom(ParsedAttributes &attrs) { Attrs.takeAllFrom(attrs); } /// Finish - This does final analysis of the declspec, issuing diagnostics for /// things like "_Imaginary" (lacking an FP type). After calling this method, /// DeclSpec is guaranteed self-consistent, even if an error occurred. void Finish(Sema &S, const PrintingPolicy &Policy); const WrittenBuiltinSpecs& getWrittenBuiltinSpecs() const { return writtenBS; } ObjCDeclSpec *getObjCQualifiers() const { return ObjCQualifiers; } void setObjCQualifiers(ObjCDeclSpec *quals) { ObjCQualifiers = quals; } /// Checks if this DeclSpec can stand alone, without a Declarator. /// /// Only tag declspecs can stand alone. bool isMissingDeclaratorOk(); }; /// Captures information about "declaration specifiers" specific to /// Objective-C. class ObjCDeclSpec { public: /// ObjCDeclQualifier - Qualifier used on types in method /// declarations. Not all combinations are sensible. Parameters /// can be one of { in, out, inout } with one of { bycopy, byref }. /// Returns can either be { oneway } or not. /// /// This should be kept in sync with Decl::ObjCDeclQualifier. enum ObjCDeclQualifier { DQ_None = 0x0, DQ_In = 0x1, DQ_Inout = 0x2, DQ_Out = 0x4, DQ_Bycopy = 0x8, DQ_Byref = 0x10, DQ_Oneway = 0x20, DQ_CSNullability = 0x40 }; ObjCDeclSpec() : objcDeclQualifier(DQ_None), PropertyAttributes(ObjCPropertyAttribute::kind_noattr), Nullability(0), GetterName(nullptr), SetterName(nullptr) {} ObjCDeclQualifier getObjCDeclQualifier() const { return (ObjCDeclQualifier)objcDeclQualifier; } void setObjCDeclQualifier(ObjCDeclQualifier DQVal) { objcDeclQualifier = (ObjCDeclQualifier) (objcDeclQualifier | DQVal); } void clearObjCDeclQualifier(ObjCDeclQualifier DQVal) { objcDeclQualifier = (ObjCDeclQualifier) (objcDeclQualifier & ~DQVal); } ObjCPropertyAttribute::Kind getPropertyAttributes() const { return ObjCPropertyAttribute::Kind(PropertyAttributes); } void setPropertyAttributes(ObjCPropertyAttribute::Kind PRVal) { PropertyAttributes = (ObjCPropertyAttribute::Kind)(PropertyAttributes | PRVal); } NullabilityKind getNullability() const { assert( ((getObjCDeclQualifier() & DQ_CSNullability) || (getPropertyAttributes() & ObjCPropertyAttribute::kind_nullability)) && "Objective-C declspec doesn't have nullability"); return static_cast(Nullability); } SourceLocation getNullabilityLoc() const { assert( ((getObjCDeclQualifier() & DQ_CSNullability) || (getPropertyAttributes() & ObjCPropertyAttribute::kind_nullability)) && "Objective-C declspec doesn't have nullability"); return NullabilityLoc; } void setNullability(SourceLocation loc, NullabilityKind kind) { assert( ((getObjCDeclQualifier() & DQ_CSNullability) || (getPropertyAttributes() & ObjCPropertyAttribute::kind_nullability)) && "Set the nullability declspec or property attribute first"); Nullability = static_cast(kind); NullabilityLoc = loc; } const IdentifierInfo *getGetterName() const { return GetterName; } IdentifierInfo *getGetterName() { return GetterName; } SourceLocation getGetterNameLoc() const { return GetterNameLoc; } void setGetterName(IdentifierInfo *name, SourceLocation loc) { GetterName = name; GetterNameLoc = loc; } const IdentifierInfo *getSetterName() const { return SetterName; } IdentifierInfo *getSetterName() { return SetterName; } SourceLocation getSetterNameLoc() const { return SetterNameLoc; } void setSetterName(IdentifierInfo *name, SourceLocation loc) { SetterName = name; SetterNameLoc = loc; } private: // FIXME: These two are unrelated and mutually exclusive. So perhaps // we can put them in a union to reflect their mutual exclusivity // (space saving is negligible). unsigned objcDeclQualifier : 7; // NOTE: VC++ treats enums as signed, avoid using ObjCPropertyAttribute::Kind unsigned PropertyAttributes : NumObjCPropertyAttrsBits; unsigned Nullability : 2; SourceLocation NullabilityLoc; IdentifierInfo *GetterName; // getter name or NULL if no getter IdentifierInfo *SetterName; // setter name or NULL if no setter SourceLocation GetterNameLoc; // location of the getter attribute's value SourceLocation SetterNameLoc; // location of the setter attribute's value }; /// Describes the kind of unqualified-id parsed. enum class UnqualifiedIdKind { /// An identifier. IK_Identifier, /// An overloaded operator name, e.g., operator+. IK_OperatorFunctionId, /// A conversion function name, e.g., operator int. IK_ConversionFunctionId, /// A user-defined literal name, e.g., operator "" _i. IK_LiteralOperatorId, /// A constructor name. IK_ConstructorName, /// A constructor named via a template-id. IK_ConstructorTemplateId, /// A destructor name. IK_DestructorName, /// A template-id, e.g., f. IK_TemplateId, /// An implicit 'self' parameter IK_ImplicitSelfParam, /// A deduction-guide name (a template-name) IK_DeductionGuideName }; /// Represents a C++ unqualified-id that has been parsed. class UnqualifiedId { private: UnqualifiedId(const UnqualifiedId &Other) = delete; const UnqualifiedId &operator=(const UnqualifiedId &) = delete; public: /// Describes the kind of unqualified-id parsed. UnqualifiedIdKind Kind; struct OFI { /// The kind of overloaded operator. OverloadedOperatorKind Operator; /// The source locations of the individual tokens that name /// the operator, e.g., the "new", "[", and "]" tokens in /// operator new []. /// /// Different operators have different numbers of tokens in their name, /// up to three. Any remaining source locations in this array will be /// set to an invalid value for operators with fewer than three tokens. unsigned SymbolLocations[3]; }; /// Anonymous union that holds extra data associated with the /// parsed unqualified-id. union { /// When Kind == IK_Identifier, the parsed identifier, or when /// Kind == IK_UserLiteralId, the identifier suffix. IdentifierInfo *Identifier; /// When Kind == IK_OperatorFunctionId, the overloaded operator /// that we parsed. struct OFI OperatorFunctionId; /// When Kind == IK_ConversionFunctionId, the type that the /// conversion function names. UnionParsedType ConversionFunctionId; /// When Kind == IK_ConstructorName, the class-name of the type /// whose constructor is being referenced. UnionParsedType ConstructorName; /// When Kind == IK_DestructorName, the type referred to by the /// class-name. UnionParsedType DestructorName; /// When Kind == IK_DeductionGuideName, the parsed template-name. UnionParsedTemplateTy TemplateName; /// When Kind == IK_TemplateId or IK_ConstructorTemplateId, /// the template-id annotation that contains the template name and /// template arguments. TemplateIdAnnotation *TemplateId; }; /// The location of the first token that describes this unqualified-id, /// which will be the location of the identifier, "operator" keyword, /// tilde (for a destructor), or the template name of a template-id. SourceLocation StartLocation; /// The location of the last token that describes this unqualified-id. SourceLocation EndLocation; UnqualifiedId() : Kind(UnqualifiedIdKind::IK_Identifier), Identifier(nullptr) {} /// Clear out this unqualified-id, setting it to default (invalid) /// state. void clear() { Kind = UnqualifiedIdKind::IK_Identifier; Identifier = nullptr; StartLocation = SourceLocation(); EndLocation = SourceLocation(); } /// Determine whether this unqualified-id refers to a valid name. bool isValid() const { return StartLocation.isValid(); } /// Determine whether this unqualified-id refers to an invalid name. bool isInvalid() const { return !isValid(); } /// Determine what kind of name we have. UnqualifiedIdKind getKind() const { return Kind; } void setKind(UnqualifiedIdKind kind) { Kind = kind; } /// Specify that this unqualified-id was parsed as an identifier. /// /// \param Id the parsed identifier. /// \param IdLoc the location of the parsed identifier. void setIdentifier(const IdentifierInfo *Id, SourceLocation IdLoc) { Kind = UnqualifiedIdKind::IK_Identifier; Identifier = const_cast(Id); StartLocation = EndLocation = IdLoc; } /// Specify that this unqualified-id was parsed as an /// operator-function-id. /// /// \param OperatorLoc the location of the 'operator' keyword. /// /// \param Op the overloaded operator. /// /// \param SymbolLocations the locations of the individual operator symbols /// in the operator. void setOperatorFunctionId(SourceLocation OperatorLoc, OverloadedOperatorKind Op, SourceLocation SymbolLocations[3]); /// Specify that this unqualified-id was parsed as a /// conversion-function-id. /// /// \param OperatorLoc the location of the 'operator' keyword. /// /// \param Ty the type to which this conversion function is converting. /// /// \param EndLoc the location of the last token that makes up the type name. void setConversionFunctionId(SourceLocation OperatorLoc, ParsedType Ty, SourceLocation EndLoc) { Kind = UnqualifiedIdKind::IK_ConversionFunctionId; StartLocation = OperatorLoc; EndLocation = EndLoc; ConversionFunctionId = Ty; } /// Specific that this unqualified-id was parsed as a /// literal-operator-id. /// /// \param Id the parsed identifier. /// /// \param OpLoc the location of the 'operator' keyword. /// /// \param IdLoc the location of the identifier. void setLiteralOperatorId(const IdentifierInfo *Id, SourceLocation OpLoc, SourceLocation IdLoc) { Kind = UnqualifiedIdKind::IK_LiteralOperatorId; Identifier = const_cast(Id); StartLocation = OpLoc; EndLocation = IdLoc; } /// Specify that this unqualified-id was parsed as a constructor name. /// /// \param ClassType the class type referred to by the constructor name. /// /// \param ClassNameLoc the location of the class name. /// /// \param EndLoc the location of the last token that makes up the type name. void setConstructorName(ParsedType ClassType, SourceLocation ClassNameLoc, SourceLocation EndLoc) { Kind = UnqualifiedIdKind::IK_ConstructorName; StartLocation = ClassNameLoc; EndLocation = EndLoc; ConstructorName = ClassType; } /// Specify that this unqualified-id was parsed as a /// template-id that names a constructor. /// /// \param TemplateId the template-id annotation that describes the parsed /// template-id. This UnqualifiedId instance will take ownership of the /// \p TemplateId and will free it on destruction. void setConstructorTemplateId(TemplateIdAnnotation *TemplateId); /// Specify that this unqualified-id was parsed as a destructor name. /// /// \param TildeLoc the location of the '~' that introduces the destructor /// name. /// /// \param ClassType the name of the class referred to by the destructor name. void setDestructorName(SourceLocation TildeLoc, ParsedType ClassType, SourceLocation EndLoc) { Kind = UnqualifiedIdKind::IK_DestructorName; StartLocation = TildeLoc; EndLocation = EndLoc; DestructorName = ClassType; } /// Specify that this unqualified-id was parsed as a template-id. /// /// \param TemplateId the template-id annotation that describes the parsed /// template-id. This UnqualifiedId instance will take ownership of the /// \p TemplateId and will free it on destruction. void setTemplateId(TemplateIdAnnotation *TemplateId); /// Specify that this unqualified-id was parsed as a template-name for /// a deduction-guide. /// /// \param Template The parsed template-name. /// \param TemplateLoc The location of the parsed template-name. void setDeductionGuideName(ParsedTemplateTy Template, SourceLocation TemplateLoc) { Kind = UnqualifiedIdKind::IK_DeductionGuideName; TemplateName = Template; StartLocation = EndLocation = TemplateLoc; } /// Return the source range that covers this unqualified-id. SourceRange getSourceRange() const LLVM_READONLY { return SourceRange(StartLocation, EndLocation); } SourceLocation getBeginLoc() const LLVM_READONLY { return StartLocation; } SourceLocation getEndLoc() const LLVM_READONLY { return EndLocation; } }; /// A set of tokens that has been cached for later parsing. typedef SmallVector CachedTokens; /// One instance of this struct is used for each type in a /// declarator that is parsed. /// /// This is intended to be a small value object. struct DeclaratorChunk { enum { Pointer, Reference, Array, Function, BlockPointer, MemberPointer, Paren, Pipe } Kind; /// Loc - The place where this type was defined. SourceLocation Loc; /// EndLoc - If valid, the place where this chunck ends. SourceLocation EndLoc; SourceRange getSourceRange() const { if (EndLoc.isInvalid()) return SourceRange(Loc, Loc); return SourceRange(Loc, EndLoc); } ParsedAttributesView AttrList; struct PointerTypeInfo { /// The type qualifiers: const/volatile/restrict/unaligned/atomic. unsigned TypeQuals : 5; /// The location of the const-qualifier, if any. unsigned ConstQualLoc; /// The location of the volatile-qualifier, if any. unsigned VolatileQualLoc; /// The location of the restrict-qualifier, if any. unsigned RestrictQualLoc; /// The location of the _Atomic-qualifier, if any. unsigned AtomicQualLoc; /// The location of the __unaligned-qualifier, if any. unsigned UnalignedQualLoc; void destroy() { } }; struct ReferenceTypeInfo { /// The type qualifier: restrict. [GNU] C++ extension bool HasRestrict : 1; /// True if this is an lvalue reference, false if it's an rvalue reference. bool LValueRef : 1; void destroy() { } }; struct ArrayTypeInfo { /// The type qualifiers for the array: /// const/volatile/restrict/__unaligned/_Atomic. unsigned TypeQuals : 5; /// True if this dimension included the 'static' keyword. unsigned hasStatic : 1; /// True if this dimension was [*]. In this case, NumElts is null. unsigned isStar : 1; /// This is the size of the array, or null if [] or [*] was specified. /// Since the parser is multi-purpose, and we don't want to impose a root /// expression class on all clients, NumElts is untyped. Expr *NumElts; void destroy() {} }; /// ParamInfo - An array of paraminfo objects is allocated whenever a function /// declarator is parsed. There are two interesting styles of parameters /// here: /// K&R-style identifier lists and parameter type lists. K&R-style identifier /// lists will have information about the identifier, but no type information. /// Parameter type lists will have type info (if the actions module provides /// it), but may have null identifier info: e.g. for 'void foo(int X, int)'. struct ParamInfo { IdentifierInfo *Ident; SourceLocation IdentLoc; Decl *Param; /// DefaultArgTokens - When the parameter's default argument /// cannot be parsed immediately (because it occurs within the /// declaration of a member function), it will be stored here as a /// sequence of tokens to be parsed once the class definition is /// complete. Non-NULL indicates that there is a default argument. std::unique_ptr DefaultArgTokens; ParamInfo() = default; ParamInfo(IdentifierInfo *ident, SourceLocation iloc, Decl *param, std::unique_ptr DefArgTokens = nullptr) : Ident(ident), IdentLoc(iloc), Param(param), DefaultArgTokens(std::move(DefArgTokens)) {} }; struct TypeAndRange { ParsedType Ty; SourceRange Range; }; struct FunctionTypeInfo { /// hasPrototype - This is true if the function had at least one typed /// parameter. If the function is () or (a,b,c), then it has no prototype, /// and is treated as a K&R-style function. unsigned hasPrototype : 1; /// isVariadic - If this function has a prototype, and if that /// proto ends with ',...)', this is true. When true, EllipsisLoc /// contains the location of the ellipsis. unsigned isVariadic : 1; /// Can this declaration be a constructor-style initializer? unsigned isAmbiguous : 1; /// Whether the ref-qualifier (if any) is an lvalue reference. /// Otherwise, it's an rvalue reference. unsigned RefQualifierIsLValueRef : 1; /// ExceptionSpecType - An ExceptionSpecificationType value. unsigned ExceptionSpecType : 4; /// DeleteParams - If this is true, we need to delete[] Params. unsigned DeleteParams : 1; /// HasTrailingReturnType - If this is true, a trailing return type was /// specified. unsigned HasTrailingReturnType : 1; /// The location of the left parenthesis in the source. unsigned LParenLoc; /// When isVariadic is true, the location of the ellipsis in the source. unsigned EllipsisLoc; /// The location of the right parenthesis in the source. unsigned RParenLoc; /// NumParams - This is the number of formal parameters specified by the /// declarator. unsigned NumParams; /// NumExceptionsOrDecls - This is the number of types in the /// dynamic-exception-decl, if the function has one. In C, this is the /// number of declarations in the function prototype. unsigned NumExceptionsOrDecls; /// The location of the ref-qualifier, if any. /// /// If this is an invalid location, there is no ref-qualifier. unsigned RefQualifierLoc; /// The location of the 'mutable' qualifer in a lambda-declarator, if /// any. unsigned MutableLoc; /// The beginning location of the exception specification, if any. unsigned ExceptionSpecLocBeg; /// The end location of the exception specification, if any. unsigned ExceptionSpecLocEnd; /// Params - This is a pointer to a new[]'d array of ParamInfo objects that /// describe the parameters specified by this function declarator. null if /// there are no parameters specified. ParamInfo *Params; /// DeclSpec for the function with the qualifier related info. DeclSpec *MethodQualifiers; /// AtttibuteFactory for the MethodQualifiers. AttributeFactory *QualAttrFactory; union { /// Pointer to a new[]'d array of TypeAndRange objects that /// contain the types in the function's dynamic exception specification /// and their locations, if there is one. TypeAndRange *Exceptions; /// Pointer to the expression in the noexcept-specifier of this /// function, if it has one. Expr *NoexceptExpr; /// Pointer to the cached tokens for an exception-specification /// that has not yet been parsed. CachedTokens *ExceptionSpecTokens; /// Pointer to a new[]'d array of declarations that need to be available /// for lookup inside the function body, if one exists. Does not exist in /// C++. NamedDecl **DeclsInPrototype; }; /// If HasTrailingReturnType is true, this is the trailing return /// type specified. UnionParsedType TrailingReturnType; /// Reset the parameter list to having zero parameters. /// /// This is used in various places for error recovery. void freeParams() { for (unsigned I = 0; I < NumParams; ++I) Params[I].DefaultArgTokens.reset(); if (DeleteParams) { delete[] Params; DeleteParams = false; } NumParams = 0; } void destroy() { freeParams(); delete QualAttrFactory; delete MethodQualifiers; switch (getExceptionSpecType()) { default: break; case EST_Dynamic: delete[] Exceptions; break; case EST_Unparsed: delete ExceptionSpecTokens; break; case EST_None: if (NumExceptionsOrDecls != 0) delete[] DeclsInPrototype; break; } } DeclSpec &getOrCreateMethodQualifiers() { if (!MethodQualifiers) { QualAttrFactory = new AttributeFactory(); MethodQualifiers = new DeclSpec(*QualAttrFactory); } return *MethodQualifiers; } /// isKNRPrototype - Return true if this is a K&R style identifier list, /// like "void foo(a,b,c)". In a function definition, this will be followed /// by the parameter type definitions. bool isKNRPrototype() const { return !hasPrototype && NumParams != 0; } SourceLocation getLParenLoc() const { return SourceLocation::getFromRawEncoding(LParenLoc); } SourceLocation getEllipsisLoc() const { return SourceLocation::getFromRawEncoding(EllipsisLoc); } SourceLocation getRParenLoc() const { return SourceLocation::getFromRawEncoding(RParenLoc); } SourceLocation getExceptionSpecLocBeg() const { return SourceLocation::getFromRawEncoding(ExceptionSpecLocBeg); } SourceLocation getExceptionSpecLocEnd() const { return SourceLocation::getFromRawEncoding(ExceptionSpecLocEnd); } SourceRange getExceptionSpecRange() const { return SourceRange(getExceptionSpecLocBeg(), getExceptionSpecLocEnd()); } /// Retrieve the location of the ref-qualifier, if any. SourceLocation getRefQualifierLoc() const { return SourceLocation::getFromRawEncoding(RefQualifierLoc); } /// Retrieve the location of the 'const' qualifier. SourceLocation getConstQualifierLoc() const { assert(MethodQualifiers); return MethodQualifiers->getConstSpecLoc(); } /// Retrieve the location of the 'volatile' qualifier. SourceLocation getVolatileQualifierLoc() const { assert(MethodQualifiers); return MethodQualifiers->getVolatileSpecLoc(); } /// Retrieve the location of the 'restrict' qualifier. SourceLocation getRestrictQualifierLoc() const { assert(MethodQualifiers); return MethodQualifiers->getRestrictSpecLoc(); } /// Retrieve the location of the 'mutable' qualifier, if any. SourceLocation getMutableLoc() const { return SourceLocation::getFromRawEncoding(MutableLoc); } /// Determine whether this function declaration contains a /// ref-qualifier. bool hasRefQualifier() const { return getRefQualifierLoc().isValid(); } /// Determine whether this lambda-declarator contains a 'mutable' /// qualifier. bool hasMutableQualifier() const { return getMutableLoc().isValid(); } /// Determine whether this method has qualifiers. bool hasMethodTypeQualifiers() const { return MethodQualifiers && (MethodQualifiers->getTypeQualifiers() || MethodQualifiers->getAttributes().size()); } /// Get the type of exception specification this function has. ExceptionSpecificationType getExceptionSpecType() const { return static_cast(ExceptionSpecType); } /// Get the number of dynamic exception specifications. unsigned getNumExceptions() const { assert(ExceptionSpecType != EST_None); return NumExceptionsOrDecls; } /// Get the non-parameter decls defined within this function /// prototype. Typically these are tag declarations. ArrayRef getDeclsInPrototype() const { assert(ExceptionSpecType == EST_None); return llvm::makeArrayRef(DeclsInPrototype, NumExceptionsOrDecls); } /// Determine whether this function declarator had a /// trailing-return-type. bool hasTrailingReturnType() const { return HasTrailingReturnType; } /// Get the trailing-return-type for this function declarator. ParsedType getTrailingReturnType() const { return TrailingReturnType; } }; struct BlockPointerTypeInfo { /// For now, sema will catch these as invalid. /// The type qualifiers: const/volatile/restrict/__unaligned/_Atomic. unsigned TypeQuals : 5; void destroy() { } }; struct MemberPointerTypeInfo { /// The type qualifiers: const/volatile/restrict/__unaligned/_Atomic. unsigned TypeQuals : 5; /// Location of the '*' token. unsigned StarLoc; // CXXScopeSpec has a constructor, so it can't be a direct member. // So we need some pointer-aligned storage and a bit of trickery. alignas(CXXScopeSpec) char ScopeMem[sizeof(CXXScopeSpec)]; CXXScopeSpec &Scope() { return *reinterpret_cast(ScopeMem); } const CXXScopeSpec &Scope() const { return *reinterpret_cast(ScopeMem); } void destroy() { Scope().~CXXScopeSpec(); } }; struct PipeTypeInfo { /// The access writes. unsigned AccessWrites : 3; void destroy() {} }; union { PointerTypeInfo Ptr; ReferenceTypeInfo Ref; ArrayTypeInfo Arr; FunctionTypeInfo Fun; BlockPointerTypeInfo Cls; MemberPointerTypeInfo Mem; PipeTypeInfo PipeInfo; }; void destroy() { switch (Kind) { case DeclaratorChunk::Function: return Fun.destroy(); case DeclaratorChunk::Pointer: return Ptr.destroy(); case DeclaratorChunk::BlockPointer: return Cls.destroy(); case DeclaratorChunk::Reference: return Ref.destroy(); case DeclaratorChunk::Array: return Arr.destroy(); case DeclaratorChunk::MemberPointer: return Mem.destroy(); case DeclaratorChunk::Paren: return; case DeclaratorChunk::Pipe: return PipeInfo.destroy(); } } /// If there are attributes applied to this declaratorchunk, return /// them. const ParsedAttributesView &getAttrs() const { return AttrList; } ParsedAttributesView &getAttrs() { return AttrList; } /// Return a DeclaratorChunk for a pointer. static DeclaratorChunk getPointer(unsigned TypeQuals, SourceLocation Loc, SourceLocation ConstQualLoc, SourceLocation VolatileQualLoc, SourceLocation RestrictQualLoc, SourceLocation AtomicQualLoc, SourceLocation UnalignedQualLoc) { DeclaratorChunk I; I.Kind = Pointer; I.Loc = Loc; I.Ptr.TypeQuals = TypeQuals; I.Ptr.ConstQualLoc = ConstQualLoc.getRawEncoding(); I.Ptr.VolatileQualLoc = VolatileQualLoc.getRawEncoding(); I.Ptr.RestrictQualLoc = RestrictQualLoc.getRawEncoding(); I.Ptr.AtomicQualLoc = AtomicQualLoc.getRawEncoding(); I.Ptr.UnalignedQualLoc = UnalignedQualLoc.getRawEncoding(); return I; } /// Return a DeclaratorChunk for a reference. static DeclaratorChunk getReference(unsigned TypeQuals, SourceLocation Loc, bool lvalue) { DeclaratorChunk I; I.Kind = Reference; I.Loc = Loc; I.Ref.HasRestrict = (TypeQuals & DeclSpec::TQ_restrict) != 0; I.Ref.LValueRef = lvalue; return I; } /// Return a DeclaratorChunk for an array. static DeclaratorChunk getArray(unsigned TypeQuals, bool isStatic, bool isStar, Expr *NumElts, SourceLocation LBLoc, SourceLocation RBLoc) { DeclaratorChunk I; I.Kind = Array; I.Loc = LBLoc; I.EndLoc = RBLoc; I.Arr.TypeQuals = TypeQuals; I.Arr.hasStatic = isStatic; I.Arr.isStar = isStar; I.Arr.NumElts = NumElts; return I; } /// DeclaratorChunk::getFunction - Return a DeclaratorChunk for a function. /// "TheDeclarator" is the declarator that this will be added to. static DeclaratorChunk getFunction(bool HasProto, bool IsAmbiguous, SourceLocation LParenLoc, ParamInfo *Params, unsigned NumParams, SourceLocation EllipsisLoc, SourceLocation RParenLoc, bool RefQualifierIsLvalueRef, SourceLocation RefQualifierLoc, SourceLocation MutableLoc, ExceptionSpecificationType ESpecType, SourceRange ESpecRange, ParsedType *Exceptions, SourceRange *ExceptionRanges, unsigned NumExceptions, Expr *NoexceptExpr, CachedTokens *ExceptionSpecTokens, ArrayRef DeclsInPrototype, SourceLocation LocalRangeBegin, SourceLocation LocalRangeEnd, Declarator &TheDeclarator, TypeResult TrailingReturnType = TypeResult(), DeclSpec *MethodQualifiers = nullptr); /// Return a DeclaratorChunk for a block. static DeclaratorChunk getBlockPointer(unsigned TypeQuals, SourceLocation Loc) { DeclaratorChunk I; I.Kind = BlockPointer; I.Loc = Loc; I.Cls.TypeQuals = TypeQuals; return I; } /// Return a DeclaratorChunk for a block. static DeclaratorChunk getPipe(unsigned TypeQuals, SourceLocation Loc) { DeclaratorChunk I; I.Kind = Pipe; I.Loc = Loc; I.Cls.TypeQuals = TypeQuals; return I; } static DeclaratorChunk getMemberPointer(const CXXScopeSpec &SS, unsigned TypeQuals, SourceLocation StarLoc, SourceLocation EndLoc) { DeclaratorChunk I; I.Kind = MemberPointer; I.Loc = SS.getBeginLoc(); I.EndLoc = EndLoc; I.Mem.StarLoc = StarLoc.getRawEncoding(); I.Mem.TypeQuals = TypeQuals; new (I.Mem.ScopeMem) CXXScopeSpec(SS); return I; } /// Return a DeclaratorChunk for a paren. static DeclaratorChunk getParen(SourceLocation LParenLoc, SourceLocation RParenLoc) { DeclaratorChunk I; I.Kind = Paren; I.Loc = LParenLoc; I.EndLoc = RParenLoc; return I; } bool isParen() const { return Kind == Paren; } }; /// A parsed C++17 decomposition declarator of the form /// '[' identifier-list ']' class DecompositionDeclarator { public: struct Binding { IdentifierInfo *Name; SourceLocation NameLoc; }; private: /// The locations of the '[' and ']' tokens. SourceLocation LSquareLoc, RSquareLoc; /// The bindings. Binding *Bindings; unsigned NumBindings : 31; unsigned DeleteBindings : 1; friend class Declarator; public: DecompositionDeclarator() : Bindings(nullptr), NumBindings(0), DeleteBindings(false) {} DecompositionDeclarator(const DecompositionDeclarator &G) = delete; DecompositionDeclarator &operator=(const DecompositionDeclarator &G) = delete; ~DecompositionDeclarator() { if (DeleteBindings) delete[] Bindings; } void clear() { LSquareLoc = RSquareLoc = SourceLocation(); if (DeleteBindings) delete[] Bindings; Bindings = nullptr; NumBindings = 0; DeleteBindings = false; } ArrayRef bindings() const { return llvm::makeArrayRef(Bindings, NumBindings); } bool isSet() const { return LSquareLoc.isValid(); } SourceLocation getLSquareLoc() const { return LSquareLoc; } SourceLocation getRSquareLoc() const { return RSquareLoc; } SourceRange getSourceRange() const { return SourceRange(LSquareLoc, RSquareLoc); } }; /// Described the kind of function definition (if any) provided for /// a function. enum FunctionDefinitionKind { FDK_Declaration, FDK_Definition, FDK_Defaulted, FDK_Deleted }; enum class DeclaratorContext { FileContext, // File scope declaration. PrototypeContext, // Within a function prototype. ObjCResultContext, // An ObjC method result type. ObjCParameterContext,// An ObjC method parameter type. KNRTypeListContext, // K&R type definition list for formals. TypeNameContext, // Abstract declarator for types. FunctionalCastContext, // Type in a C++ functional cast expression. MemberContext, // Struct/Union field. BlockContext, // Declaration within a block in a function. ForContext, // Declaration within first part of a for loop. InitStmtContext, // Declaration within optional init stmt of if/switch. ConditionContext, // Condition declaration in a C++ if/switch/while/for. TemplateParamContext,// Within a template parameter list. CXXNewContext, // C++ new-expression. CXXCatchContext, // C++ catch exception-declaration ObjCCatchContext, // Objective-C catch exception-declaration BlockLiteralContext, // Block literal declarator. LambdaExprContext, // Lambda-expression declarator. LambdaExprParameterContext, // Lambda-expression parameter declarator. ConversionIdContext, // C++ conversion-type-id. TrailingReturnContext, // C++11 trailing-type-specifier. TrailingReturnVarContext, // C++11 trailing-type-specifier for variable. TemplateArgContext, // Any template argument (in template argument list). TemplateTypeArgContext, // Template type argument (in default argument). AliasDeclContext, // C++11 alias-declaration. AliasTemplateContext, // C++11 alias-declaration template. RequiresExprContext // C++2a requires-expression. }; /// Information about one declarator, including the parsed type /// information and the identifier. /// /// When the declarator is fully formed, this is turned into the appropriate /// Decl object. /// /// Declarators come in two types: normal declarators and abstract declarators. /// Abstract declarators are used when parsing types, and don't have an /// identifier. Normal declarators do have ID's. /// /// Instances of this class should be a transient object that lives on the /// stack, not objects that are allocated in large quantities on the heap. class Declarator { private: const DeclSpec &DS; CXXScopeSpec SS; UnqualifiedId Name; SourceRange Range; /// Where we are parsing this declarator. DeclaratorContext Context; /// The C++17 structured binding, if any. This is an alternative to a Name. DecompositionDeclarator BindingGroup; /// DeclTypeInfo - This holds each type that the declarator includes as it is /// parsed. This is pushed from the identifier out, which means that element /// #0 will be the most closely bound to the identifier, and /// DeclTypeInfo.back() will be the least closely bound. SmallVector DeclTypeInfo; /// InvalidType - Set by Sema::GetTypeForDeclarator(). unsigned InvalidType : 1; /// GroupingParens - Set by Parser::ParseParenDeclarator(). unsigned GroupingParens : 1; /// FunctionDefinition - Is this Declarator for a function or member /// definition and, if so, what kind? /// /// Actually a FunctionDefinitionKind. unsigned FunctionDefinition : 2; /// Is this Declarator a redeclaration? unsigned Redeclaration : 1; /// true if the declaration is preceded by \c __extension__. unsigned Extension : 1; /// Indicates whether this is an Objective-C instance variable. unsigned ObjCIvar : 1; /// Indicates whether this is an Objective-C 'weak' property. unsigned ObjCWeakProperty : 1; /// Indicates whether the InlineParams / InlineBindings storage has been used. unsigned InlineStorageUsed : 1; /// Attrs - Attributes. ParsedAttributes Attrs; /// The asm label, if specified. Expr *AsmLabel; /// \brief The constraint-expression specified by the trailing /// requires-clause, or null if no such clause was specified. Expr *TrailingRequiresClause; /// If this declarator declares a template, its template parameter lists. ArrayRef TemplateParameterLists; /// If the declarator declares an abbreviated function template, the innermost /// template parameter list containing the invented and explicit template /// parameters (if any). TemplateParameterList *InventedTemplateParameterList; #ifndef _MSC_VER union { #endif /// InlineParams - This is a local array used for the first function decl /// chunk to avoid going to the heap for the common case when we have one /// function chunk in the declarator. DeclaratorChunk::ParamInfo InlineParams[16]; DecompositionDeclarator::Binding InlineBindings[16]; #ifndef _MSC_VER }; #endif /// If this is the second or subsequent declarator in this declaration, /// the location of the comma before this declarator. SourceLocation CommaLoc; /// If provided, the source location of the ellipsis used to describe /// this declarator as a parameter pack. SourceLocation EllipsisLoc; friend struct DeclaratorChunk; public: Declarator(const DeclSpec &ds, DeclaratorContext C) : DS(ds), Range(ds.getSourceRange()), Context(C), InvalidType(DS.getTypeSpecType() == DeclSpec::TST_error), GroupingParens(false), FunctionDefinition(FDK_Declaration), Redeclaration(false), Extension(false), ObjCIvar(false), ObjCWeakProperty(false), InlineStorageUsed(false), Attrs(ds.getAttributePool().getFactory()), AsmLabel(nullptr), TrailingRequiresClause(nullptr), InventedTemplateParameterList(nullptr) {} ~Declarator() { clear(); } /// getDeclSpec - Return the declaration-specifier that this declarator was /// declared with. const DeclSpec &getDeclSpec() const { return DS; } /// getMutableDeclSpec - Return a non-const version of the DeclSpec. This /// should be used with extreme care: declspecs can often be shared between /// multiple declarators, so mutating the DeclSpec affects all of the /// Declarators. This should only be done when the declspec is known to not /// be shared or when in error recovery etc. DeclSpec &getMutableDeclSpec() { return const_cast(DS); } AttributePool &getAttributePool() const { return Attrs.getPool(); } /// getCXXScopeSpec - Return the C++ scope specifier (global scope or /// nested-name-specifier) that is part of the declarator-id. const CXXScopeSpec &getCXXScopeSpec() const { return SS; } CXXScopeSpec &getCXXScopeSpec() { return SS; } /// Retrieve the name specified by this declarator. UnqualifiedId &getName() { return Name; } const DecompositionDeclarator &getDecompositionDeclarator() const { return BindingGroup; } DeclaratorContext getContext() const { return Context; } bool isPrototypeContext() const { return (Context == DeclaratorContext::PrototypeContext || Context == DeclaratorContext::ObjCParameterContext || Context == DeclaratorContext::ObjCResultContext || Context == DeclaratorContext::LambdaExprParameterContext); } /// Get the source range that spans this declarator. SourceRange getSourceRange() const LLVM_READONLY { return Range; } SourceLocation getBeginLoc() const LLVM_READONLY { return Range.getBegin(); } SourceLocation getEndLoc() const LLVM_READONLY { return Range.getEnd(); } void SetSourceRange(SourceRange R) { Range = R; } /// SetRangeBegin - Set the start of the source range to Loc, unless it's /// invalid. void SetRangeBegin(SourceLocation Loc) { if (!Loc.isInvalid()) Range.setBegin(Loc); } /// SetRangeEnd - Set the end of the source range to Loc, unless it's invalid. void SetRangeEnd(SourceLocation Loc) { if (!Loc.isInvalid()) Range.setEnd(Loc); } /// ExtendWithDeclSpec - Extend the declarator source range to include the /// given declspec, unless its location is invalid. Adopts the range start if /// the current range start is invalid. void ExtendWithDeclSpec(const DeclSpec &DS) { SourceRange SR = DS.getSourceRange(); if (Range.getBegin().isInvalid()) Range.setBegin(SR.getBegin()); if (!SR.getEnd().isInvalid()) Range.setEnd(SR.getEnd()); } /// Reset the contents of this Declarator. void clear() { SS.clear(); Name.clear(); Range = DS.getSourceRange(); BindingGroup.clear(); for (unsigned i = 0, e = DeclTypeInfo.size(); i != e; ++i) DeclTypeInfo[i].destroy(); DeclTypeInfo.clear(); Attrs.clear(); AsmLabel = nullptr; InlineStorageUsed = false; ObjCIvar = false; ObjCWeakProperty = false; CommaLoc = SourceLocation(); EllipsisLoc = SourceLocation(); } /// mayOmitIdentifier - Return true if the identifier is either optional or /// not allowed. This is true for typenames, prototypes, and template /// parameter lists. bool mayOmitIdentifier() const { switch (Context) { case DeclaratorContext::FileContext: case DeclaratorContext::KNRTypeListContext: case DeclaratorContext::MemberContext: case DeclaratorContext::BlockContext: case DeclaratorContext::ForContext: case DeclaratorContext::InitStmtContext: case DeclaratorContext::ConditionContext: return false; case DeclaratorContext::TypeNameContext: case DeclaratorContext::FunctionalCastContext: case DeclaratorContext::AliasDeclContext: case DeclaratorContext::AliasTemplateContext: case DeclaratorContext::PrototypeContext: case DeclaratorContext::LambdaExprParameterContext: case DeclaratorContext::ObjCParameterContext: case DeclaratorContext::ObjCResultContext: case DeclaratorContext::TemplateParamContext: case DeclaratorContext::CXXNewContext: case DeclaratorContext::CXXCatchContext: case DeclaratorContext::ObjCCatchContext: case DeclaratorContext::BlockLiteralContext: case DeclaratorContext::LambdaExprContext: case DeclaratorContext::ConversionIdContext: case DeclaratorContext::TemplateArgContext: case DeclaratorContext::TemplateTypeArgContext: case DeclaratorContext::TrailingReturnContext: case DeclaratorContext::TrailingReturnVarContext: case DeclaratorContext::RequiresExprContext: return true; } llvm_unreachable("unknown context kind!"); } /// mayHaveIdentifier - Return true if the identifier is either optional or /// required. This is true for normal declarators and prototypes, but not /// typenames. bool mayHaveIdentifier() const { switch (Context) { case DeclaratorContext::FileContext: case DeclaratorContext::KNRTypeListContext: case DeclaratorContext::MemberContext: case DeclaratorContext::BlockContext: case DeclaratorContext::ForContext: case DeclaratorContext::InitStmtContext: case DeclaratorContext::ConditionContext: case DeclaratorContext::PrototypeContext: case DeclaratorContext::LambdaExprParameterContext: case DeclaratorContext::TemplateParamContext: case DeclaratorContext::CXXCatchContext: case DeclaratorContext::ObjCCatchContext: case DeclaratorContext::RequiresExprContext: return true; case DeclaratorContext::TypeNameContext: case DeclaratorContext::FunctionalCastContext: case DeclaratorContext::CXXNewContext: case DeclaratorContext::AliasDeclContext: case DeclaratorContext::AliasTemplateContext: case DeclaratorContext::ObjCParameterContext: case DeclaratorContext::ObjCResultContext: case DeclaratorContext::BlockLiteralContext: case DeclaratorContext::LambdaExprContext: case DeclaratorContext::ConversionIdContext: case DeclaratorContext::TemplateArgContext: case DeclaratorContext::TemplateTypeArgContext: case DeclaratorContext::TrailingReturnContext: case DeclaratorContext::TrailingReturnVarContext: return false; } llvm_unreachable("unknown context kind!"); } /// Return true if the context permits a C++17 decomposition declarator. bool mayHaveDecompositionDeclarator() const { switch (Context) { case DeclaratorContext::FileContext: // FIXME: It's not clear that the proposal meant to allow file-scope // structured bindings, but it does. case DeclaratorContext::BlockContext: case DeclaratorContext::ForContext: case DeclaratorContext::InitStmtContext: case DeclaratorContext::ConditionContext: return true; case DeclaratorContext::MemberContext: case DeclaratorContext::PrototypeContext: case DeclaratorContext::TemplateParamContext: case DeclaratorContext::RequiresExprContext: // Maybe one day... return false; // These contexts don't allow any kind of non-abstract declarator. case DeclaratorContext::KNRTypeListContext: case DeclaratorContext::TypeNameContext: case DeclaratorContext::FunctionalCastContext: case DeclaratorContext::AliasDeclContext: case DeclaratorContext::AliasTemplateContext: case DeclaratorContext::LambdaExprParameterContext: case DeclaratorContext::ObjCParameterContext: case DeclaratorContext::ObjCResultContext: case DeclaratorContext::CXXNewContext: case DeclaratorContext::CXXCatchContext: case DeclaratorContext::ObjCCatchContext: case DeclaratorContext::BlockLiteralContext: case DeclaratorContext::LambdaExprContext: case DeclaratorContext::ConversionIdContext: case DeclaratorContext::TemplateArgContext: case DeclaratorContext::TemplateTypeArgContext: case DeclaratorContext::TrailingReturnContext: case DeclaratorContext::TrailingReturnVarContext: return false; } llvm_unreachable("unknown context kind!"); } /// mayBeFollowedByCXXDirectInit - Return true if the declarator can be /// followed by a C++ direct initializer, e.g. "int x(1);". bool mayBeFollowedByCXXDirectInit() const { if (hasGroupingParens()) return false; if (getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef) return false; if (getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_extern && Context != DeclaratorContext::FileContext) return false; // Special names can't have direct initializers. if (Name.getKind() != UnqualifiedIdKind::IK_Identifier) return false; switch (Context) { case DeclaratorContext::FileContext: case DeclaratorContext::BlockContext: case DeclaratorContext::ForContext: case DeclaratorContext::InitStmtContext: case DeclaratorContext::TrailingReturnVarContext: return true; case DeclaratorContext::ConditionContext: // This may not be followed by a direct initializer, but it can't be a // function declaration either, and we'd prefer to perform a tentative // parse in order to produce the right diagnostic. return true; case DeclaratorContext::KNRTypeListContext: case DeclaratorContext::MemberContext: case DeclaratorContext::PrototypeContext: case DeclaratorContext::LambdaExprParameterContext: case DeclaratorContext::ObjCParameterContext: case DeclaratorContext::ObjCResultContext: case DeclaratorContext::TemplateParamContext: case DeclaratorContext::CXXCatchContext: case DeclaratorContext::ObjCCatchContext: case DeclaratorContext::TypeNameContext: case DeclaratorContext::FunctionalCastContext: // FIXME case DeclaratorContext::CXXNewContext: case DeclaratorContext::AliasDeclContext: case DeclaratorContext::AliasTemplateContext: case DeclaratorContext::BlockLiteralContext: case DeclaratorContext::LambdaExprContext: case DeclaratorContext::ConversionIdContext: case DeclaratorContext::TemplateArgContext: case DeclaratorContext::TemplateTypeArgContext: case DeclaratorContext::TrailingReturnContext: case DeclaratorContext::RequiresExprContext: return false; } llvm_unreachable("unknown context kind!"); } /// isPastIdentifier - Return true if we have parsed beyond the point where /// the name would appear. (This may happen even if we haven't actually parsed /// a name, perhaps because this context doesn't require one.) bool isPastIdentifier() const { return Name.isValid(); } /// hasName - Whether this declarator has a name, which might be an /// identifier (accessible via getIdentifier()) or some kind of /// special C++ name (constructor, destructor, etc.), or a structured /// binding (which is not exactly a name, but occupies the same position). bool hasName() const { return Name.getKind() != UnqualifiedIdKind::IK_Identifier || Name.Identifier || isDecompositionDeclarator(); } /// Return whether this declarator is a decomposition declarator. bool isDecompositionDeclarator() const { return BindingGroup.isSet(); } IdentifierInfo *getIdentifier() const { if (Name.getKind() == UnqualifiedIdKind::IK_Identifier) return Name.Identifier; return nullptr; } SourceLocation getIdentifierLoc() const { return Name.StartLocation; } /// Set the name of this declarator to be the given identifier. void SetIdentifier(IdentifierInfo *Id, SourceLocation IdLoc) { Name.setIdentifier(Id, IdLoc); } /// Set the decomposition bindings for this declarator. void setDecompositionBindings(SourceLocation LSquareLoc, ArrayRef Bindings, SourceLocation RSquareLoc); /// AddTypeInfo - Add a chunk to this declarator. Also extend the range to /// EndLoc, which should be the last token of the chunk. /// This function takes attrs by R-Value reference because it takes ownership /// of those attributes from the parameter. void AddTypeInfo(const DeclaratorChunk &TI, ParsedAttributes &&attrs, SourceLocation EndLoc) { DeclTypeInfo.push_back(TI); DeclTypeInfo.back().getAttrs().addAll(attrs.begin(), attrs.end()); getAttributePool().takeAllFrom(attrs.getPool()); if (!EndLoc.isInvalid()) SetRangeEnd(EndLoc); } /// AddTypeInfo - Add a chunk to this declarator. Also extend the range to /// EndLoc, which should be the last token of the chunk. void AddTypeInfo(const DeclaratorChunk &TI, SourceLocation EndLoc) { DeclTypeInfo.push_back(TI); if (!EndLoc.isInvalid()) SetRangeEnd(EndLoc); } /// Add a new innermost chunk to this declarator. void AddInnermostTypeInfo(const DeclaratorChunk &TI) { DeclTypeInfo.insert(DeclTypeInfo.begin(), TI); } /// Return the number of types applied to this declarator. unsigned getNumTypeObjects() const { return DeclTypeInfo.size(); } /// Return the specified TypeInfo from this declarator. TypeInfo #0 is /// closest to the identifier. const DeclaratorChunk &getTypeObject(unsigned i) const { assert(i < DeclTypeInfo.size() && "Invalid type chunk"); return DeclTypeInfo[i]; } DeclaratorChunk &getTypeObject(unsigned i) { assert(i < DeclTypeInfo.size() && "Invalid type chunk"); return DeclTypeInfo[i]; } typedef SmallVectorImpl::const_iterator type_object_iterator; typedef llvm::iterator_range type_object_range; /// Returns the range of type objects, from the identifier outwards. type_object_range type_objects() const { return type_object_range(DeclTypeInfo.begin(), DeclTypeInfo.end()); } void DropFirstTypeObject() { assert(!DeclTypeInfo.empty() && "No type chunks to drop."); DeclTypeInfo.front().destroy(); DeclTypeInfo.erase(DeclTypeInfo.begin()); } /// Return the innermost (closest to the declarator) chunk of this /// declarator that is not a parens chunk, or null if there are no /// non-parens chunks. const DeclaratorChunk *getInnermostNonParenChunk() const { for (unsigned i = 0, i_end = DeclTypeInfo.size(); i < i_end; ++i) { if (!DeclTypeInfo[i].isParen()) return &DeclTypeInfo[i]; } return nullptr; } /// Return the outermost (furthest from the declarator) chunk of /// this declarator that is not a parens chunk, or null if there are /// no non-parens chunks. const DeclaratorChunk *getOutermostNonParenChunk() const { for (unsigned i = DeclTypeInfo.size(), i_end = 0; i != i_end; --i) { if (!DeclTypeInfo[i-1].isParen()) return &DeclTypeInfo[i-1]; } return nullptr; } /// isArrayOfUnknownBound - This method returns true if the declarator /// is a declarator for an array of unknown bound (looking through /// parentheses). bool isArrayOfUnknownBound() const { const DeclaratorChunk *chunk = getInnermostNonParenChunk(); return (chunk && chunk->Kind == DeclaratorChunk::Array && !chunk->Arr.NumElts); } /// isFunctionDeclarator - This method returns true if the declarator /// is a function declarator (looking through parentheses). /// If true is returned, then the reference type parameter idx is /// assigned with the index of the declaration chunk. bool isFunctionDeclarator(unsigned& idx) const { for (unsigned i = 0, i_end = DeclTypeInfo.size(); i < i_end; ++i) { switch (DeclTypeInfo[i].Kind) { case DeclaratorChunk::Function: idx = i; return true; case DeclaratorChunk::Paren: continue; case DeclaratorChunk::Pointer: case DeclaratorChunk::Reference: case DeclaratorChunk::Array: case DeclaratorChunk::BlockPointer: case DeclaratorChunk::MemberPointer: case DeclaratorChunk::Pipe: return false; } llvm_unreachable("Invalid type chunk"); } return false; } /// isFunctionDeclarator - Once this declarator is fully parsed and formed, /// this method returns true if the identifier is a function declarator /// (looking through parentheses). bool isFunctionDeclarator() const { unsigned index; return isFunctionDeclarator(index); } /// getFunctionTypeInfo - Retrieves the function type info object /// (looking through parentheses). DeclaratorChunk::FunctionTypeInfo &getFunctionTypeInfo() { assert(isFunctionDeclarator() && "Not a function declarator!"); unsigned index = 0; isFunctionDeclarator(index); return DeclTypeInfo[index].Fun; } /// getFunctionTypeInfo - Retrieves the function type info object /// (looking through parentheses). const DeclaratorChunk::FunctionTypeInfo &getFunctionTypeInfo() const { return const_cast(this)->getFunctionTypeInfo(); } /// Determine whether the declaration that will be produced from /// this declaration will be a function. /// /// A declaration can declare a function even if the declarator itself /// isn't a function declarator, if the type specifier refers to a function /// type. This routine checks for both cases. bool isDeclarationOfFunction() const; /// Return true if this declaration appears in a context where a /// function declarator would be a function declaration. bool isFunctionDeclarationContext() const { if (getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef) return false; switch (Context) { case DeclaratorContext::FileContext: case DeclaratorContext::MemberContext: case DeclaratorContext::BlockContext: case DeclaratorContext::ForContext: case DeclaratorContext::InitStmtContext: return true; case DeclaratorContext::ConditionContext: case DeclaratorContext::KNRTypeListContext: case DeclaratorContext::TypeNameContext: case DeclaratorContext::FunctionalCastContext: case DeclaratorContext::AliasDeclContext: case DeclaratorContext::AliasTemplateContext: case DeclaratorContext::PrototypeContext: case DeclaratorContext::LambdaExprParameterContext: case DeclaratorContext::ObjCParameterContext: case DeclaratorContext::ObjCResultContext: case DeclaratorContext::TemplateParamContext: case DeclaratorContext::CXXNewContext: case DeclaratorContext::CXXCatchContext: case DeclaratorContext::ObjCCatchContext: case DeclaratorContext::BlockLiteralContext: case DeclaratorContext::LambdaExprContext: case DeclaratorContext::ConversionIdContext: case DeclaratorContext::TemplateArgContext: case DeclaratorContext::TemplateTypeArgContext: case DeclaratorContext::TrailingReturnContext: case DeclaratorContext::TrailingReturnVarContext: case DeclaratorContext::RequiresExprContext: return false; } llvm_unreachable("unknown context kind!"); } /// Determine whether this declaration appears in a context where an /// expression could appear. bool isExpressionContext() const { switch (Context) { case DeclaratorContext::FileContext: case DeclaratorContext::KNRTypeListContext: case DeclaratorContext::MemberContext: // FIXME: sizeof(...) permits an expression. case DeclaratorContext::TypeNameContext: case DeclaratorContext::FunctionalCastContext: case DeclaratorContext::AliasDeclContext: case DeclaratorContext::AliasTemplateContext: case DeclaratorContext::PrototypeContext: case DeclaratorContext::LambdaExprParameterContext: case DeclaratorContext::ObjCParameterContext: case DeclaratorContext::ObjCResultContext: case DeclaratorContext::TemplateParamContext: case DeclaratorContext::CXXNewContext: case DeclaratorContext::CXXCatchContext: case DeclaratorContext::ObjCCatchContext: case DeclaratorContext::BlockLiteralContext: case DeclaratorContext::LambdaExprContext: case DeclaratorContext::ConversionIdContext: case DeclaratorContext::TrailingReturnContext: case DeclaratorContext::TrailingReturnVarContext: case DeclaratorContext::TemplateTypeArgContext: case DeclaratorContext::RequiresExprContext: return false; case DeclaratorContext::BlockContext: case DeclaratorContext::ForContext: case DeclaratorContext::InitStmtContext: case DeclaratorContext::ConditionContext: case DeclaratorContext::TemplateArgContext: return true; } llvm_unreachable("unknown context kind!"); } /// Return true if a function declarator at this position would be a /// function declaration. bool isFunctionDeclaratorAFunctionDeclaration() const { if (!isFunctionDeclarationContext()) return false; for (unsigned I = 0, N = getNumTypeObjects(); I != N; ++I) if (getTypeObject(I).Kind != DeclaratorChunk::Paren) return false; return true; } /// Determine whether a trailing return type was written (at any /// level) within this declarator. bool hasTrailingReturnType() const { for (const auto &Chunk : type_objects()) if (Chunk.Kind == DeclaratorChunk::Function && Chunk.Fun.hasTrailingReturnType()) return true; return false; } + /// Get the trailing return type appearing (at any level) within this + /// declarator. + ParsedType getTrailingReturnType() const { + for (const auto &Chunk : type_objects()) + if (Chunk.Kind == DeclaratorChunk::Function && + Chunk.Fun.hasTrailingReturnType()) + return Chunk.Fun.getTrailingReturnType(); + return ParsedType(); + } /// \brief Sets a trailing requires clause for this declarator. void setTrailingRequiresClause(Expr *TRC) { TrailingRequiresClause = TRC; } /// \brief Sets a trailing requires clause for this declarator. Expr *getTrailingRequiresClause() { return TrailingRequiresClause; } /// \brief Determine whether a trailing requires clause was written in this /// declarator. bool hasTrailingRequiresClause() const { return TrailingRequiresClause != nullptr; } /// Sets the template parameter lists that preceded the declarator. void setTemplateParameterLists(ArrayRef TPLs) { TemplateParameterLists = TPLs; } /// The template parameter lists that preceded the declarator. ArrayRef getTemplateParameterLists() const { return TemplateParameterLists; } /// Sets the template parameter list generated from the explicit template /// parameters along with any invented template parameters from /// placeholder-typed parameters. void setInventedTemplateParameterList(TemplateParameterList *Invented) { InventedTemplateParameterList = Invented; } /// The template parameter list generated from the explicit template /// parameters along with any invented template parameters from /// placeholder-typed parameters, if there were any such parameters. TemplateParameterList * getInventedTemplateParameterList() const { return InventedTemplateParameterList; } /// takeAttributes - Takes attributes from the given parsed-attributes /// set and add them to this declarator. /// /// These examples both add 3 attributes to "var": /// short int var __attribute__((aligned(16),common,deprecated)); /// short int x, __attribute__((aligned(16)) var /// __attribute__((common,deprecated)); /// /// Also extends the range of the declarator. void takeAttributes(ParsedAttributes &attrs, SourceLocation lastLoc) { Attrs.takeAllFrom(attrs); if (!lastLoc.isInvalid()) SetRangeEnd(lastLoc); } const ParsedAttributes &getAttributes() const { return Attrs; } ParsedAttributes &getAttributes() { return Attrs; } /// hasAttributes - do we contain any attributes? bool hasAttributes() const { if (!getAttributes().empty() || getDeclSpec().hasAttributes()) return true; for (unsigned i = 0, e = getNumTypeObjects(); i != e; ++i) if (!getTypeObject(i).getAttrs().empty()) return true; return false; } /// Return a source range list of C++11 attributes associated /// with the declarator. void getCXX11AttributeRanges(SmallVectorImpl &Ranges) { for (const ParsedAttr &AL : Attrs) if (AL.isCXX11Attribute()) Ranges.push_back(AL.getRange()); } void setAsmLabel(Expr *E) { AsmLabel = E; } Expr *getAsmLabel() const { return AsmLabel; } void setExtension(bool Val = true) { Extension = Val; } bool getExtension() const { return Extension; } void setObjCIvar(bool Val = true) { ObjCIvar = Val; } bool isObjCIvar() const { return ObjCIvar; } void setObjCWeakProperty(bool Val = true) { ObjCWeakProperty = Val; } bool isObjCWeakProperty() const { return ObjCWeakProperty; } void setInvalidType(bool Val = true) { InvalidType = Val; } bool isInvalidType() const { return InvalidType || DS.getTypeSpecType() == DeclSpec::TST_error; } void setGroupingParens(bool flag) { GroupingParens = flag; } bool hasGroupingParens() const { return GroupingParens; } bool isFirstDeclarator() const { return !CommaLoc.isValid(); } SourceLocation getCommaLoc() const { return CommaLoc; } void setCommaLoc(SourceLocation CL) { CommaLoc = CL; } bool hasEllipsis() const { return EllipsisLoc.isValid(); } SourceLocation getEllipsisLoc() const { return EllipsisLoc; } void setEllipsisLoc(SourceLocation EL) { EllipsisLoc = EL; } void setFunctionDefinitionKind(FunctionDefinitionKind Val) { FunctionDefinition = Val; } bool isFunctionDefinition() const { return getFunctionDefinitionKind() != FDK_Declaration; } FunctionDefinitionKind getFunctionDefinitionKind() const { return (FunctionDefinitionKind)FunctionDefinition; } /// Returns true if this declares a real member and not a friend. bool isFirstDeclarationOfMember() { return getContext() == DeclaratorContext::MemberContext && !getDeclSpec().isFriendSpecified(); } /// Returns true if this declares a static member. This cannot be called on a /// declarator outside of a MemberContext because we won't know until /// redeclaration time if the decl is static. bool isStaticMember(); /// Returns true if this declares a constructor or a destructor. bool isCtorOrDtor(); void setRedeclaration(bool Val) { Redeclaration = Val; } bool isRedeclaration() const { return Redeclaration; } }; /// This little struct is used to capture information about /// structure field declarators, which is basically just a bitfield size. struct FieldDeclarator { Declarator D; Expr *BitfieldSize; explicit FieldDeclarator(const DeclSpec &DS) : D(DS, DeclaratorContext::MemberContext), BitfieldSize(nullptr) {} }; /// Represents a C++11 virt-specifier-seq. class VirtSpecifiers { public: enum Specifier { VS_None = 0, VS_Override = 1, VS_Final = 2, VS_Sealed = 4, // Represents the __final keyword, which is legal for gcc in pre-C++11 mode. VS_GNU_Final = 8 }; VirtSpecifiers() : Specifiers(0), LastSpecifier(VS_None) { } bool SetSpecifier(Specifier VS, SourceLocation Loc, const char *&PrevSpec); bool isUnset() const { return Specifiers == 0; } bool isOverrideSpecified() const { return Specifiers & VS_Override; } SourceLocation getOverrideLoc() const { return VS_overrideLoc; } bool isFinalSpecified() const { return Specifiers & (VS_Final | VS_Sealed | VS_GNU_Final); } bool isFinalSpelledSealed() const { return Specifiers & VS_Sealed; } SourceLocation getFinalLoc() const { return VS_finalLoc; } void clear() { Specifiers = 0; } static const char *getSpecifierName(Specifier VS); SourceLocation getFirstLocation() const { return FirstLocation; } SourceLocation getLastLocation() const { return LastLocation; } Specifier getLastSpecifier() const { return LastSpecifier; } private: unsigned Specifiers; Specifier LastSpecifier; SourceLocation VS_overrideLoc, VS_finalLoc; SourceLocation FirstLocation; SourceLocation LastLocation; }; enum class LambdaCaptureInitKind { NoInit, //!< [a] CopyInit, //!< [a = b], [a = {b}] DirectInit, //!< [a(b)] ListInit //!< [a{b}] }; /// Represents a complete lambda introducer. struct LambdaIntroducer { /// An individual capture in a lambda introducer. struct LambdaCapture { LambdaCaptureKind Kind; SourceLocation Loc; IdentifierInfo *Id; SourceLocation EllipsisLoc; LambdaCaptureInitKind InitKind; ExprResult Init; ParsedType InitCaptureType; SourceRange ExplicitRange; LambdaCapture(LambdaCaptureKind Kind, SourceLocation Loc, IdentifierInfo *Id, SourceLocation EllipsisLoc, LambdaCaptureInitKind InitKind, ExprResult Init, ParsedType InitCaptureType, SourceRange ExplicitRange) : Kind(Kind), Loc(Loc), Id(Id), EllipsisLoc(EllipsisLoc), InitKind(InitKind), Init(Init), InitCaptureType(InitCaptureType), ExplicitRange(ExplicitRange) {} }; SourceRange Range; SourceLocation DefaultLoc; LambdaCaptureDefault Default; SmallVector Captures; LambdaIntroducer() : Default(LCD_None) {} /// Append a capture in a lambda introducer. void addCapture(LambdaCaptureKind Kind, SourceLocation Loc, IdentifierInfo* Id, SourceLocation EllipsisLoc, LambdaCaptureInitKind InitKind, ExprResult Init, ParsedType InitCaptureType, SourceRange ExplicitRange) { Captures.push_back(LambdaCapture(Kind, Loc, Id, EllipsisLoc, InitKind, Init, InitCaptureType, ExplicitRange)); } }; struct InventedTemplateParameterInfo { /// The number of parameters in the template parameter list that were /// explicitly specified by the user, as opposed to being invented by use /// of an auto parameter. unsigned NumExplicitTemplateParams = 0; /// If this is a generic lambda or abbreviated function template, use this /// as the depth of each 'auto' parameter, during initial AST construction. unsigned AutoTemplateParameterDepth = 0; /// Store the list of the template parameters for a generic lambda or an /// abbreviated function template. /// If this is a generic lambda or abbreviated function template, this holds /// the explicit template parameters followed by the auto parameters /// converted into TemplateTypeParmDecls. /// It can be used to construct the generic lambda or abbreviated template's /// template parameter list during initial AST construction. SmallVector TemplateParams; }; } // end namespace clang #endif // LLVM_CLANG_SEMA_DECLSPEC_H diff --git a/contrib/llvm-project/clang/include/clang/Sema/Sema.h b/contrib/llvm-project/clang/include/clang/Sema/Sema.h index 16a7084f6b08..1b5e389501c3 100644 --- a/contrib/llvm-project/clang/include/clang/Sema/Sema.h +++ b/contrib/llvm-project/clang/include/clang/Sema/Sema.h @@ -1,12647 +1,12649 @@ //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/ASTConcept.h" #include "clang/AST/ASTFwd.h" #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprConcepts.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExprOpenMP.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenCLOptions.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/SemaConcept.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include #include #include #include #include namespace llvm { class APSInt; template struct DenseMapInfo; template class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; enum class OverloadCandidateParamOrder : char; enum OverloadCandidateRewriteKind : unsigned; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Keeps track of expected type during expression parsing. The type is tied to /// a particular token, all functions that update or consume the type take a /// start location of the token they are looking at as a parameter. This allows /// to avoid updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder() = default; explicit PreferredTypeBuilder(QualType Type) : Type(Type) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); QualType get(SourceLocation Tok) const { if (Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema final { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; /// A key method to reduce duplicate debug info from Sema. virtual void anchor(); ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef Args); public: /// The maximum alignment, same as in llvm::Value. We duplicate them here /// because that allows us not to duplicate the constants in clang code, /// which we must to since we can't directly use the llvm constants. /// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp /// /// This is the greatest alignment value supported by load, store, and alloca /// instructions, and global values. static const unsigned MaxAlignmentExponent = 29; static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent; typedef OpaquePtr DeclGroupPtrTy; typedef OpaquePtr TemplateTy; typedef OpaquePtr TypeTy; OpenCLOptions OpenCLFeatures; FPOptions CurFPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// Holds TypoExprs that are created from `createDelayedTypo`. This is used by /// `TransformTypos` in order to keep track of any TypoExprs that are created /// recursively during typo correction and wipe them away if the correction /// fails. llvm::SmallVector TypoExprs; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4, PCSK_Relro = 5 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangRelroSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value) { if (Action == PSK_Reset) { CurrentValue = DefaultValue; CurrentPragmaLocation = PragmaLocation; return; } if (Action & PSK_Push) Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation, PragmaLocation); else if (Action & PSK_Pop) { if (!StackSlotLabel.empty()) { // If we've got a label, try to find it and jump there. auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) { return x.StackSlotLabel == StackSlotLabel; }); // If we found the label so pop from there. if (I != Stack.rend()) { CurrentValue = I->Value; CurrentPragmaLocation = I->PragmaLocation; Stack.erase(std::prev(I.base()), Stack.end()); } } else if (!Stack.empty()) { // We do not have a label, just pop the last entry. CurrentValue = Stack.back().Value; CurrentPragmaLocation = Stack.back().PragmaLocation; Stack.pop_back(); } } if (Action & PSK_Set) { CurrentValue = Value; CurrentPragmaLocation = PragmaLocation; } } // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma (push, InternalPragmaSlot, ) // void Method {} // #pragma (pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector PackIncludeStack; // Segment #pragmas. PragmaStack DataSegStack; PragmaStack BSSSegStack; PragmaStack ConstSegStack; PragmaStack CodeSegStack; // This stack tracks the current state of Sema.CurFPFeatures. PragmaStack FpPragmaStack; FPOptionsOverride CurFPFeatureOverrides() { FPOptionsOverride result; if (!FpPragmaStack.hasValue()) { result = FPOptionsOverride(); } else { result = FPOptionsOverride(FpPragmaStack.CurrentValue); } return result; } // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector Entries; }; SmallVector PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. SmallVector ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SetVector, llvm::SmallPtrSet>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector FunctionScopes; /// The index of the first FunctionScope that corresponds to the current /// context. unsigned FunctionScopesStart = 0; ArrayRef getFunctionScopes() const { return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart, FunctionScopes.end()); } /// Stack containing information needed when in C++2a an 'auto' is encountered /// in a function declaration parameter type specifier in order to invent a /// corresponding template parameter in the enclosing abbreviated function /// template. This information is also present in LambdaScopeInfo, stored in /// the FunctionScopes stack. SmallVector InventedParameterInfos; /// The index of the first InventedParameterInfo that refers to the current /// context. unsigned InventedParameterInfosStart = 0; ArrayRef getInventedParameterInfos() const { return llvm::makeArrayRef(InventedParameterInfos.begin() + InventedParameterInfosStart, InventedParameterInfos.end()); } typedef LazyVector ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr FieldCollector; typedef llvm::SmallSetVector NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair DeleteExprLoc; typedef llvm::SmallVector DeleteLocs; llvm::MapVector DeleteExprs; typedef llvm::SmallPtrSet RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; /// All the external declarations encoutered and used in the TU. SmallVector ExternalDeclarations; typedef LazyVector UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; unsigned SavedFunctionScopesStart; unsigned SavedInventedParameterInfosStart; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride), SavedFunctionScopesStart(S.FunctionScopesStart), SavedInventedParameterInfosStart(S.InventedParameterInfosStart) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); // Any saved FunctionScopes do not refer to this context. S.FunctionScopesStart = S.FunctionScopes.size(); S.InventedParameterInfosStart = S.InventedParameterInfos.size(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; S.FunctionScopesStart = SavedFunctionScopesStart; S.InventedParameterInfosStart = SavedInventedParameterInfosStart; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Whether the AST is currently being rebuilt to correct immediate /// invocations. Immediate invocation candidates and references to consteval /// functions aren't tracked when this is set. bool RebuildingImmediateInvocation = false; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast(DC)) FD->setWillHaveBody(true); else assert(isa(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \ ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; using ImmediateInvocationCandidate = llvm::PointerIntPair; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// Whether we are in a decltype expression. bool IsDecltype; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector DelayedDecltypeBinds; llvm::SmallPtrSet PossibleDerefs; /// Expressions appearing as the LHS of a volatile assignment in this /// context. We produce a warning for these when popping the context if /// they are not discarded-value expressions nor unevaluated operands. SmallVector VolatileAssignmentLHSs; /// Set of candidates for starting an immediate invocation. llvm::SmallVector ImmediateInvocationCandidates; /// Set of DeclRefExprs referencing a consteval function when used in a /// context not already known to be immediately invoked. llvm::SmallPtrSet ReferenceToConsteval; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {} bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. Also return the extra mangling decl if any. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. std::tuple getCurrentMangleNumberContext(const DeclContext *DC); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector & getMismatchingDeleteExpressions() const; typedef std::pair GlobalMethods; typedef llvm::DenseMap GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet SpecialMembersBeingDeclared; /// Kinds of defaulted comparison operator functions. enum class DefaultedComparisonKind : unsigned char { /// This is not a defaultable comparison operator. None, /// This is an operator== that should be implemented as a series of /// subobject comparisons. Equal, /// This is an operator<=> that should be implemented as a series of /// subobject comparisons. ThreeWay, /// This is an operator!= that should be implemented as a rewrite in terms /// of a == comparison. NotEqual, /// This is an <, <=, >, or >= that should be implemented as a rewrite in /// terms of a <=> comparison. Relational, }; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the CurFPFeatures state on entry/exit of compound /// statements. class FPFeaturesStateRAII { public: FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.CurFPFeatures) { OldOverrides = S.FpPragmaStack.CurrentValue; } ~FPFeaturesStateRAII() { S.CurFPFeatures = OldFPFeaturesState; S.FpPragmaStack.CurrentValue = OldOverrides; } unsigned getOverrides() { return OldOverrides; } private: Sema& S; FPOptions OldFPFeaturesState; unsigned OldOverrides; }; void addImplicitTypedef(StringRef Name, QualType T); bool WarnedStackExhausted = false; public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getCurFPFeatures() { return CurFPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Warn that the stack is nearly exhausted. void warnStackExhausted(SourceLocation Loc); /// Run some code with "sufficient" stack space. (Currently, at least 256K is /// guaranteed). Produces a warning if we're low on stack space and allocates /// more in that case. Use this in code that may recurse deeply (for example, /// in template instantiation) to avoid stack overflow. void runWithSufficientStackSpace(SourceLocation Loc, llvm::function_ref Fn); /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; /// Invent a new identifier for parameters of abbreviated templates. IdentifierInfo * InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName, unsigned Index); void emitAndClearUnusedLocalTypedefWarnings(); private: /// Function or variable declarations to be checked for whether the deferred /// diagnostics should be emitted. SmallVector DeclsToCheckForDeferredDiags; public: // Emit all deferred diagnostics. void emitDeferredDiags(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K, unsigned OpenMPCaptureLevel = 0); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Get the innermost lambda enclosing the current location, if any. This /// looks through intervening non-lambda scopes such as local functions and /// blocks. sema::LambdaScopeInfo *getEnclosingLambda() const; /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl &WeakTopLevelDecls() { return WeakTopLevelDecl; } /// Called before parsing a function declarator belonging to a function /// declaration. void ActOnStartFunctionDeclarationDeclarator(Declarator &D, unsigned TemplateParameterDepth); /// Called after parsing a function declarator belonging to a function /// declaration. void ActOnFinishFunctionDeclarationDeclarator(Declarator &D); void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); QualType BuildExtIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Stmt *E); /// Determine whether the callee of a particular function call can throw. /// E, D and Loc are all optional. static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D, SourceLocation Loc = SourceLocation()); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template class BoundTypeDiagnoser : public TypeDiagnoser { protected: unsigned DiagID; std::tuple Args; template void emit(const SemaDiagnosticBuilder &DB, std::index_sequence) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, std::index_sequence_for()); DB << T; } }; /// A derivative of BoundTypeDiagnoser for which the diagnostic's type /// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless. /// For example, a diagnostic with no other parameters would generally have /// the form "...%select{incomplete|sizeless}0 type %1...". template class SizelessTypeDiagnoser : public BoundTypeDiagnoser { public: SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args) : BoundTypeDiagnoser(DiagID, Args...) {} void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID); this->emit(DB, std::index_sequence_for()); DB << T->isSizelessType() << T; } }; enum class CompleteTypeKind { /// Apply the normal rules for complete types. In particular, /// treat all sizeless types as incomplete. Normal, /// Relax the normal rules for complete types so that they include /// sizeless built-in types. AcceptSizeless, // FIXME: Eventually we should flip the default to Normal and opt in // to AcceptSizeless rather than opt out of it. Default = AcceptSizeless }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(const Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return D->isUnconditionallyVisible() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind = CompleteTypeKind::Default) { return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, unsigned DiagID); bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser); } bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID); } template bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } template bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser); } template bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { /// This name is not a type or template in this context, but might be /// something else. NC_Unknown, /// Classification failed; an error has been produced. NC_Error, /// The name has been typo-corrected to a keyword. NC_Keyword, /// The name was classified as a type. NC_Type, /// The name was classified as a specific non-type, non-template /// declaration. ActOnNameClassifiedAsNonType should be called to /// convert the declaration to an expression. NC_NonType, /// The name was classified as an ADL-only function name. /// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the /// result to an expression. NC_UndeclaredNonType, /// The name denotes a member of a dependent type that could not be /// resolved. ActOnNameClassifiedAsDependentNonType should be called to /// convert the result to an expression. NC_DependentNonType, /// The name was classified as a non-type, and an expression representing /// that name has been formed. NC_ContextIndependentExpr, /// The name was classified as a template whose specializations are types. NC_TypeTemplate, /// The name was classified as a variable template name. NC_VarTemplate, /// The name was classified as a function template name. NC_FunctionTemplate, /// The name was classified as an ADL-only function template name. NC_UndeclaredTemplate, /// The name was classified as a concept name. NC_Concept, }; class NameClassification { NameClassificationKind Kind; union { ExprResult Expr; NamedDecl *NonTypeDecl; TemplateName Template; ParsedType Type; }; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification ContextIndependentExpr(ExprResult E) { NameClassification Result(NC_ContextIndependentExpr); Result.Expr = E; return Result; } static NameClassification NonType(NamedDecl *D) { NameClassification Result(NC_NonType); Result.NonTypeDecl = D; return Result; } static NameClassification UndeclaredNonType() { return NameClassification(NC_UndeclaredNonType); } static NameClassification DependentNonType() { return NameClassification(NC_DependentNonType); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification Concept(TemplateName Name) { NameClassification Result(NC_Concept); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ExprResult getExpression() const { assert(Kind == NC_ContextIndependentExpr); return Expr; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } NamedDecl *getNonTypeDecl() const { assert(Kind == NC_NonType); return NonTypeDecl; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_Concept || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_Concept: return TNK_Concept_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, CorrectionCandidateCallback *CCC = nullptr); /// Act on the result of classifying a name as an undeclared (ADL-only) /// non-type declaration. ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name, SourceLocation NameLoc); /// Act on the result of classifying a name as an undeclared member of a /// dependent base class. ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, bool IsAddressOfOperand); /// Act on the result of classifying a name as a specific non-type /// declaration. ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS, NamedDecl *Found, SourceLocation NameLoc, const Token &NextToken); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); ExprResult ConvertParamDefaultArgument(const ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); // Contexts where using non-trivial C union types can be disallowed. This is // passed to err_non_trivial_c_union_in_invalid_context. enum NonTrivialCUnionContext { // Function parameter. NTCUC_FunctionParam, // Function return. NTCUC_FunctionReturn, // Default-initialized object. NTCUC_DefaultInitializedObject, // Variable with automatic storage duration. NTCUC_AutoVar, // Initializer expression that might copy from another object. NTCUC_CopyInit, // Assignment. NTCUC_Assignment, // Compound literal. NTCUC_CompoundLiteral, // Block capture. NTCUC_BlockCapture, // lvalue-to-rvalue conversion of volatile type. NTCUC_LValueToRValueVolatile, }; /// Emit diagnostics if the initializer or any of its explicit or /// implicitly-generated subexpressions require copying or /// default-initializing a type that is or contains a C union type that is /// non-trivial to copy or default-initialize. void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc); // These flags are passed to checkNonTrivialCUnion. enum NonTrivialCUnionKind { NTCUK_Init = 0x1, NTCUK_Destruct = 0x2, NTCUK_Copy = 0x4, }; /// Emit diagnostics if a non-trivial C union type or a struct that contains /// a non-trivial C union is used in an invalid context. void checkNonTrivialCUnion(QualType QT, SourceLocation Loc, NonTrivialCUnionContext UseContext, unsigned NonTrivialKind); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D); ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); /// For a defaulted function, the kind of defaulted function that it is. class DefaultedFunctionKind { CXXSpecialMember SpecialMember : 8; DefaultedComparisonKind Comparison : 8; public: DefaultedFunctionKind() : SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) { } DefaultedFunctionKind(CXXSpecialMember CSM) : SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {} DefaultedFunctionKind(DefaultedComparisonKind Comp) : SpecialMember(CXXInvalid), Comparison(Comp) {} bool isSpecialMember() const { return SpecialMember != CXXInvalid; } bool isComparison() const { return Comparison != DefaultedComparisonKind::None; } explicit operator bool() const { return isSpecialMember() || isComparison(); } CXXSpecialMember asSpecialMember() const { return SpecialMember; } DefaultedComparisonKind asComparison() const { return Comparison; } /// Get the index of this function kind for use in diagnostics. unsigned getDiagnosticIndex() const { static_assert(CXXInvalid > CXXDestructor, "invalid should have highest index"); static_assert((unsigned)DefaultedComparisonKind::None == 0, "none should be equal to zero"); return SpecialMember + (unsigned)Comparison; } }; DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) { return getDefaultedFunctionKind(MD).asSpecialMember(); } DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) { return getDefaultedFunctionKind(FD).asComparison(); } void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef Elements, Scope *S, const ParsedAttributesView &Attr); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Enter a template parameter scope, after it's been associated with a particular /// DeclContext. Causes lookup within the scope to chain through enclosing contexts /// in the correct order. void EnterTemplatedContext(Scope *S, DeclContext *DC); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr * mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority); TypeVisibilityAttr * mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, TypeVisibilityAttr::VisibilityType Vis); VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, VisibilityAttr::VisibilityType Vis); UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI, StringRef UuidAsWritten, MSGuidDecl *GuidDecl); DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI); DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI); MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D, const AttributeCommonInfo &CI, bool BestCase, MSInheritanceModel Model); FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Format, int FormatIdx, int FirstArg); SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, const AttributeCommonInfo &CI, const IdentifierInfo *Ident); MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, const AttributeCommonInfo &CI); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); WebAssemblyImportNameAttr *mergeImportNameAttr( Decl *D, const WebAssemblyImportNameAttr &AL); WebAssemblyImportModuleAttr *mergeImportModuleAttr( Decl *D, const WebAssemblyImportModuleAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true, bool ConsiderRequiresClauses = true); enum class AllowedExplicit { /// Allow no explicit functions to be used. None, /// Allow explicit conversion functions but not explicit constructors. Conversions, /// Allow both explicit conversion functions and explicit constructors. All }; ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, AllowedExplicit AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformAggregateInitializationForOverloadResolution( const InitializedEntity &Entity, InitListExpr *From); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf, ///< Condition in a constexpr if statement. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector AssociatedNamespaceSet; typedef llvm::SmallSetVector AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false, OverloadCandidateParamOrder PO = {}); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, OverloadCandidateParamOrder PO = {}); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, OverloadCandidateParamOrder PO = {}); bool CheckNonDependentConversions( FunctionTemplateDecl *FunctionTemplate, ArrayRef ParamTypes, ArrayRef Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}, OverloadCandidateParamOrder PO = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef Args, OverloadCandidateSet& CandidateSet); void AddNonMemberOperatorCandidates( const UnresolvedSetImpl &Functions, ArrayRef Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef Args, OverloadCandidateSet &CandidateSet, OverloadCandidateParamOrder PO = {}); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate( NamedDecl *Found, FunctionDecl *Fn, OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(), QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc, ArrayRef Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfSingleOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet, OverloadedOperatorKind Op, const UnresolvedSetImpl &Fns, ArrayRef Args, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true, bool AllowRewrittenCandidates = true, FunctionDecl *DefaultedFn = nullptr); ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, FunctionDecl *DefaultedFn); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up a name following ~ in a destructor name. This is an ordinary /// lookup, but prefers tags to typedefs. LookupDestructorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function TypoDiagnosticGenerator; typedef std::function TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, SourceLocation TypoLoc); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupBuiltin(LookupResult &R); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); /// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs. enum class FunctionEmissionStatus { Emitted, CUDADiscarded, // Discarded due to CUDA/HIP hostness OMPDiscarded, // Discarded due to OpenMP hostness TemplateDiscarded, // Discarded due to uninstantiated templates Unknown, }; FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl, bool Final = false); // Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check. bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param RecoverUncorrectedTypos If true, when typo correction fails, it /// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr( Expr *E, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr( ExprResult ER, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), InitDecl, RecoverUncorrectedTypos, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} /// Attempts to produce a RecoveryExpr after some AST node cannot be created. ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End, ArrayRef SubExprs, QualType T = QualType()); ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); FunctionDecl *CreateBuiltin(IdentifierInfo *II, QualType Type, unsigned ID, SourceLocation Loc); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction( FunctionDecl *FD); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceModel SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); /// Returns default addr space for method qualifiers. LangAS getDefaultCXXMethodAddrSpace() const; private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc, ConditionResult Cond, SourceLocation RParenLoc, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef Params, unsigned OpenMPCaptureLevel = 0); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef Constraints, ArrayRef Clobbers, ArrayRef Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { ParsingClassDepth++; return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { ParsingClassDepth--; DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult CheckUnevaluatedOperand(Expr *E); void CheckUnusedVolatileAssignment(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Try to convert an expression \p E to type \p Ty. Returns the result of the /// conversion. ExprResult tryConvertExprToType(Expr *E, QualType Ty); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef Args = None, TypoExpr **Out = nullptr); DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S, IdentifierInfo *II); ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); ExprResult BuildUniqueStableName(SourceLocation Loc, TypeSourceInfo *Operand); ExprResult BuildUniqueStableName(SourceLocation Loc, Expr *E); ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation LParen, SourceLocation RParen, ParsedType Ty); ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation LParen, SourceLocation RParen, Expr *E); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef ArgTypes, ArrayRef ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef Types, ArrayRef Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx, Expr *ColumnIdx, SourceLocation RBLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLocFirst, SourceLocation ColonLocSecond, Expr *Length, Expr *Stride, SourceLocation RBLoc); ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc, SourceLocation RParenLoc, ArrayRef Dims, ArrayRef Brackets); /// Data structure for iterator expression. struct OMPIteratorData { IdentifierInfo *DeclIdent = nullptr; SourceLocation DeclIdentLoc; ParsedType Type; OMPIteratorExpr::IteratorRange Range; SourceLocation AssignLoc; SourceLocation ColonLoc; SourceLocation SecColonLoc; }; ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc, SourceLocation LLoc, SourceLocation RLoc, ArrayRef Data); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); enum class AtomicArgumentOrder { API, AST }; ExprResult BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, SourceLocation RParenLoc, MultiExprArg Args, AtomicExpr::AtomicOp Op, AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult BuildInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation EqualOrColonLoc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc, unsigned TemplateDepth); // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: enum class ComparisonCategoryUsage { /// The '<=>' operator was used in an expression and a builtin operator /// was selected. OperatorInExpression, /// A defaulted 'operator<=>' needed the comparison category. This /// typically only applies to 'std::strong_ordering', due to the implicit /// fallback return value. DefaultedOperator, }; /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc, ComparisonCategoryUsage Usage); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet ExceptionsSeen; SmallVector Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E) { CalledStmt(E); } /// Integrate an invoked statement into the collected data. void CalledStmt(Stmt *S); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef DynamicExceptions, ArrayRef DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef DynamicExceptions, ArrayRef DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Produce notes explaining why a defaulted function was defined as deleted. void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); /// Wrap the expression in a ConstantExpr if it is a potential immediate /// invocation. ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse /// {dynamic,static,reinterpret,const,addrspace}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef Params, ConstexprSpecKind ConstexprKind, Expr *TrailingRequiresClause); /// Number lambda for linkage purposes if necessary. void handleLambdaNumbering( CXXRecordDecl *Class, CXXMethodDecl *Method, Optional> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef TParams, SourceLocation RAngleLoc); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); /// Check whether the given expression is a valid constraint expression. /// A diagnostic is emitted if it is not, false is returned, and /// PossibleNonPrimary will be set to true if the failure might be due to a /// non-primary expression being used as an atomic constraint. bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(), bool *PossibleNonPrimary = nullptr, bool IsTrailingRequiresClause = false); private: /// Caches pairs of template-like decls whose associated constraints were /// checked for subsumption and whether or not the first's constraints did in /// fact subsume the second's. llvm::DenseMap, bool> SubsumptionCache; /// Caches the normalized associated constraints of declarations (concepts or /// constrained declarations). If an error occurred while normalizing the /// associated constraints of the template or concept, nullptr will be cached /// here. llvm::DenseMap NormalizationCache; llvm::ContextualFoldingSet SatisfactionCache; public: const NormalizedConstraint * getNormalizedAssociatedConstraints( NamedDecl *ConstrainedDecl, ArrayRef AssociatedConstraints); /// \brief Check whether the given declaration's associated constraints are /// at least as constrained than another declaration's according to the /// partial ordering of constraints. /// /// \param Result If no error occurred, receives the result of true if D1 is /// at least constrained than D2, and false otherwise. /// /// \returns true if an error occurred, false otherwise. bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef AC1, NamedDecl *D2, ArrayRef AC2, bool &Result); /// If D1 was not at least as constrained as D2, but would've been if a pair /// of atomic constraints involved had been declared in a concept and not /// repeated in two separate places in code. /// \returns true if such a diagnostic was emitted, false otherwise. bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1, ArrayRef AC1, NamedDecl *D2, ArrayRef AC2); /// \brief Check whether the given list of constraint expressions are /// satisfied (as if in a 'conjunction') given template arguments. /// \param Template the template-like entity that triggered the constraints /// check (either a concept or a constrained entity). /// \param ConstraintExprs a list of constraint expressions, treated as if /// they were 'AND'ed together. /// \param TemplateArgs the list of template arguments to substitute into the /// constraint expression. /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// \param Satisfaction if true is returned, will contain details of the /// satisfaction, with enough information to diagnose an unsatisfied /// expression. /// \returns true if an error occurred and satisfaction could not be checked, /// false otherwise. bool CheckConstraintSatisfaction( const NamedDecl *Template, ArrayRef ConstraintExprs, ArrayRef TemplateArgs, SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction); /// \brief Check whether the given non-dependent constraint expression is /// satisfied. Returns false and updates Satisfaction with the satisfaction /// verdict if successful, emits a diagnostic and returns true if an error /// occured and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckConstraintSatisfaction(const Expr *ConstraintExpr, ConstraintSatisfaction &Satisfaction); /// Check whether the given function decl's trailing requires clause is /// satisfied, if any. Returns false and updates Satisfaction with the /// satisfaction verdict if successful, emits a diagnostic and returns true if /// an error occured and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckFunctionConstraints(const FunctionDecl *FD, ConstraintSatisfaction &Satisfaction, SourceLocation UsageLoc = SourceLocation()); /// \brief Ensure that the given template arguments satisfy the constraints /// associated with the given template, emitting a diagnostic if they do not. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateArgs The converted, canonicalized template arguments. /// /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// /// \returns true if the constrains are not satisfied or could not be checked /// for satisfaction, false if the constraints are satisfied. bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template, ArrayRef TemplateArgs, SourceRange TemplateIDRange); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. /// \param First whether this is the first time an unsatisfied constraint is /// diagnosed for this error. void DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction, bool First = true); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. void DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction, bool First = true); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied because it was ill-formed. void DiagnoseUnsatisfiedIllFormedConstraint(SourceLocation DiagnosticLocation, StringRef Diagnostic); void DiagnoseRedeclarationConstraintMismatch(SourceLocation Old, SourceLocation New); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// Mark destructors of virtual bases of this class referenced. In the Itanium /// C++ ABI, this is done when emitting a destructor for any non-abstract /// class. In the Microsoft C++ ABI, this is done any time a class's /// destructor is referenced. void MarkVirtualBaseDestructorsReferenced( SourceLocation Location, CXXRecordDecl *ClassDecl, llvm::SmallPtrSetImpl *DirectVirtualBases = nullptr); /// Do semantic checks to allow the complete destructor variant to be emitted /// when the destructor is defined in another translation unit. In the Itanium /// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they /// can be emitted in separate TUs. To emit the complete variant, run a subset /// of the checks performed when emitting a regular destructor. void CheckCompleteDestructorVariant(SourceLocation CurrentLocation, CXXDestructorDecl *Dtor); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); /// Add gsl::Pointer attribute to std::container::iterator /// \param ND The declaration that introduces the name /// std::container::iterator. \param UnderlyingRecord The record named by ND. void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord); /// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types. void inferGslOwnerPointerAttribute(CXXRecordDecl *Record); /// Add [[gsl::Pointer]] attributes for std:: types. void inferGslPointerAttribute(TypedefNameDecl *TD); void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Decl *Template, llvm::function_ref EnterScope); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD); bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM); void CheckDelayedMemberExceptionSpecs(); bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD, DefaultedComparisonKind DCK); void DeclareImplicitEqualityComparison(CXXRecordDecl *RD, FunctionDecl *Spaceship); void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD, DefaultedComparisonKind DCK); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbiguousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType, SourceLocation Loc, const PartialDiagnostic &Diag); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType) { return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType, SourceLocation(), PDiag()); } void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum TemplateNameIsRequiredTag { TemplateNameIsRequired }; /// Whether and why a template name is required in this lookup. class RequiredTemplateKind { public: /// Template name is required if TemplateKWLoc is valid. RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation()) : TemplateKW(TemplateKWLoc) {} /// Template name is unconditionally required. RequiredTemplateKind(TemplateNameIsRequiredTag) : TemplateKW() {} SourceLocation getTemplateKeywordLoc() const { return TemplateKW.getValueOr(SourceLocation()); } bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); } bool isRequired() const { return TemplateKW != SourceLocation(); } explicit operator bool() const { return isRequired(); } private: llvm::Optional TemplateKW; }; enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName( LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, RequiredTemplateKind RequiredTemplate = SourceLocation(), AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization, bool Disambiguation = false); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg, bool HasTypeConstraint); bool ActOnTypeConstraint(const CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool AttachTypeConstraint(NestedNameSpecifierLoc NS, DeclarationNameInfo NameInfo, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool AttachTypeConstraint(AutoTypeLoc TL, NonTypeTemplateParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid, bool SuppressDiagnostic = false); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); /// Get a template argument mapping the given template parameter to itself, /// e.g. for X in \c template, this would return an expression template /// argument referencing X. TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param, SourceLocation Location); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &ConceptNameInfo, NamedDecl *FoundDecl, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, CXXScopeSpec &SS, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \param ConstraintsNotSatisfied If provided, and an error occured, will /// receive true if the cause for the error is the associated constraints of /// the template not being satisfied by the template arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl &Converted, bool UpdateArgsWithConversions = true, bool *ConstraintsNotSatisfied = nullptr); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param, TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template struct X; /// template struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template class TT> struct X; /// template class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template class Metafun> struct X; /// template struct integer_c; /// X xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, TypeSourceInfo **TSI, bool DeducedTSTContext); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, bool DeducedTSTContext = true); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Concepts //===--------------------------------------------------------------------===// Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); RequiresExprBodyDecl * ActOnStartRequiresExpr(SourceLocation RequiresKWLoc, ArrayRef LocalParameters, Scope *BodyScope); void ActOnFinishRequiresExpr(); concepts::Requirement *ActOnSimpleRequirement(Expr *E); concepts::Requirement *ActOnTypeRequirement( SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId); concepts::Requirement *ActOnCompoundRequirement(Expr *E, SourceLocation NoexceptLoc); concepts::Requirement * ActOnCompoundRequirement( Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, unsigned Depth); concepts::Requirement *ActOnNestedRequirement(Expr *Constraint); concepts::ExprRequirement * BuildExprRequirement( Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::ExprRequirement * BuildExprRequirement( concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type); concepts::TypeRequirement * BuildTypeRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); concepts::NestedRequirement *BuildNestedRequirement(Expr *E); concepts::NestedRequirement * BuildNestedRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc, RequiresExprBodyDecl *Body, ArrayRef LocalParameters, ArrayRef Requirements, SourceLocation ClosingBraceLoc); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression, UPPC_Block, /// A type constraint, UPPC_TypeConstraint }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// The deduced arguments did not satisfy the constraints associated /// with the template. TDK_ConstraintsNotSatisfied, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl &Deduced, SmallVectorImpl &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); + TypeSourceInfo *ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, + QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional DependentDeductionDepth = None, bool IgnoreConstraints = false); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional DependentDeductionDepth = None, bool IgnoreConstraints = false); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate( FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2, bool Reversed = false); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are instantiating a requirement of a requires expression. RequirementInstantiation, /// We are checking the satisfaction of a nested requirement of a requires /// expression. NestedRequirementConstraintsCheck, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are declaring an implicit 'operator==' for a defaulted /// 'operator<=>'. DeclaringImplicitEqualityComparison, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, // We are checking the constraints associated with a constrained entity or // the constraint expression of a concept. This includes the checks that // atomic constraints have the type 'bool' and that they can be constant // evaluated. ConstraintsCheck, // We are substituting template arguments into a constraint expression. ConstraintSubstitution, // We are normalizing a constraint expression. ConstraintNormalization, // We are substituting into the parameter mapping of an atomic constraint // during normalization. ParameterMappingSubstitution, /// We are rewriting a comparison operator in terms of an operator<=>. RewritingOperatorAsSpaceship, /// We are initializing a structured binding. InitializingStructuredBinding, /// We are marking a class as __dllexport. MarkingClassDllexported, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef TemplateArgs, SourceRange InstantiationRange); struct ConstraintsCheck {}; /// \brief Note that we are checking the constraints associated with some /// constrained entity (a concept declaration or a template with associated /// constraints). InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintsCheck, NamedDecl *Template, ArrayRef TemplateArgs, SourceRange InstantiationRange); struct ConstraintSubstitution {}; /// \brief Note that we are checking a constraint expression associated /// with a template declaration or as part of the satisfaction check of a /// concept. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintSubstitution, NamedDecl *Template, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange); struct ConstraintNormalization {}; /// \brief Note that we are normalizing a constraint expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintNormalization, NamedDecl *Template, SourceRange InstantiationRange); struct ParameterMappingSubstitution {}; /// \brief Note that we are subtituting into the parameter mapping of an /// atomic constraint during constraint normalization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParameterMappingSubstitution, NamedDecl *Template, SourceRange InstantiationRange); /// \brief Note that we are substituting template arguments into a part of /// a requirement of a requires expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::Requirement *Req, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are checking the satisfaction of the constraint /// expression inside of a nested requirement. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::NestedRequirement *Req, ConstraintsCheck, SourceRange InstantiationRange = SourceRange()); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet SrcLocSet; typedef llvm::DenseMap IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) { assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } else { // Template instantiations in the PCH may be delayed until the TU. S.PendingInstantiations.swap(SavedPendingInstantiations); S.PendingInstantiations.insert(S.PendingInstantiations.end(), SavedPendingInstantiations.begin(), SavedPendingInstantiations.end()); } } private: Sema &S; SmallVector SavedVTableUses; std::deque SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl &ParamTypes, SmallVectorImpl *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); bool SubstTemplateArguments(ArrayRef Args, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateArgumentListInfo &Outputs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the name and return type of a defaulted 'operator<=>' to form /// an implicit 'operator=='. FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD, FunctionDecl *Spaceship); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); bool CheckInstantiatedFunctionTemplateConstraints( SourceLocation PointOfInstantiation, FunctionDecl *Decl, ArrayRef TemplateArgs, ConstraintSatisfaction &Satisfaction); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); VarDecl *getVarTemplateSpecialization( VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs, const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl &ProtocolRefs, SmallVectorImpl &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef ProtocolId, SmallVectorImpl &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef identifiers, ArrayRef identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef protocols, ArrayRef protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef Protocols, ArrayRef ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef Protocols, ArrayRef ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef Protocols, ArrayRef ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef allMethods = None, ArrayRef allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); void deduceOpenCLAddressSpace(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method, ObjCMethodDecl *overridden); void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispMode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// Are precise floating point semantics currently enabled? bool isPreciseFPEnabled() { return !CurFPFeatures.getAllowFPReassociate() && !CurFPFeatures.getNoSignedZero() && !CurFPFeatures.getAllowReciprocal() && !CurFPFeatures.getAllowApproxFunc(); } /// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action, PragmaFloatControlKind Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC); /// Called on well formed /// \#pragma clang fp reassociate void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled); /// Called to set rounding mode for floating point operations. void setRoundingMode(SourceLocation Loc, llvm::RoundingMode); /// Called to set exception behavior for floating point operations. void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, bool IsPackExpansion); void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, Expr *OE); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ParamExpr); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI, Expr *MaxThreads, Expr *MinBlocks); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name, bool InInstantiation = false); void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI, ParameterABI ABI); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); /// Check that the expression co_await promise.final_suspend() shall not be /// potentially-throwing. bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = std::string(Ext); } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested '#pragma omp declare target' directives. unsigned DeclareTargetNestingLevel = 0; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Returns the number of scopes associated with the construct on the given /// OpenMP level. int getNumberOfConstructScopes(unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); /// Helper to keep information about the current `omp begin/end declare /// variant` nesting. struct OMPDeclareVariantScope { /// The associated OpenMP context selector. OMPTraitInfo *TI; /// The associated OpenMP context selector mangling. std::string NameSuffix; OMPDeclareVariantScope(OMPTraitInfo &TI); }; /// The current `omp begin/end declare variant` scopes. SmallVector OMPDeclareVariantScopes; /// The declarator \p D defines a function in the scope \p S which is nested /// in an `omp begin/end declare variant` scope. In this method we create a /// declaration for \p D and rename \p D according to the OpenMP context /// selector of the surrounding scope. FunctionDecl * ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(Scope *S, Declarator &D); /// Register \p FD as specialization of \p BaseFD in the current `omp /// begin/end declare variant` scope. void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope( FunctionDecl *FD, FunctionDecl *BaseFD); public: /// Can we exit a scope at the moment. bool isInOpenMPDeclareVariantScope() { return !OMPDeclareVariantScopes.empty(); } /// Given the potential call expression \p Call, determine if there is a /// specialization via the OpenMP declare variant mechanism available. If /// there is, return the specialized call expression, otherwise return the /// original \p Call. ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig); /// Handle a `omp begin declare variant`. void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI); /// Handle a `omp end declare variant`. void ActOnOpenMPEndDeclareVariant(); /// Checks if the variant/multiversion functions are compatible. bool areMultiversionVariantFunctionsCompatible( const FunctionDecl *OldFD, const FunctionDecl *NewFD, const PartialDiagnostic &NoProtoDiagID, const PartialDiagnosticAt &NoteCausedDiagIDAt, const PartialDiagnosticAt &NoSupportDiagIDAt, const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported, bool ConstexprSupported, bool CLinkageMayDiffer); /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. /// \param OpenMPCaptureLevel Capture level within an OpenMP construct. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, unsigned OpenMPCaptureLevel) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// If the current region is a range loop-based region, mark the start of the /// loop construct. void startOpenMPCXXRangeFor(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level, unsigned CapLevel) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; /// Check if the specified global variable must be captured by outer capture /// regions. /// \param Level Relative level of nested OpenMP construct for that /// the check is performed. bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef VarList, ArrayRef Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD, Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); /// Called at the end of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S, ArrayRef ClauseList); /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Searches for the provided declaration name for OpenMP declare target /// directive. NamedDecl * lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, NamedDeclSetType &SameDirectiveDecls); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, OMPDeclareTargetDeclAttr::MapTypeTy MT, OMPDeclareTargetDeclAttr::DevTypeTy DT); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Finishes analysis of the deferred functions calls that may be declared as /// host/nohost during device/host compilation. void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller, const FunctionDecl *Callee, SourceLocation Loc); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return DeclareTargetNestingLevel > 0; } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp depobj'. StmtResult ActOnOpenMPDepobjDirective(ArrayRef Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp scan'. StmtResult ActOnOpenMPScanDirective(ArrayRef Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterTaskLoopDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPMasterTaskLoopSimdDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type, bool IsDeclareSimd = false); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef Uniforms, ArrayRef Aligneds, ArrayRef Alignments, ArrayRef Linears, ArrayRef LinModifiers, ArrayRef Steps, SourceRange SR); /// Checks '\#pragma omp declare variant' variant function and original /// functions after parsing of the associated method/function. /// \param DG Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The trait info object representing the match clause. /// \returns None, if the function/variant function are not compatible with /// the pragma, pair of original function/variant ref expression otherwise. Optional> checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef, OMPTraitInfo &TI, SourceRange SR); /// Called on well-formed '\#pragma omp declare variant' after parsing of /// the associated method/function. /// \param FD Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The context traits associated with the function variant. void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef, OMPTraitInfo &TI, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'detach' clause. OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'order' clause. OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acq_rel' clause. OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acquire' clause. OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'release' clause. OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'relaxed' clause. OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'destroy' clause. OMPClause *ActOnOpenMPDestroyClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef Vars, Expr *DepModOrTailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier, ArrayRef MapTypeModifiers, ArrayRef MapTypeModifiersLoc, bool IsMapTypeImplicit, SourceLocation ExtraModifierLoc); /// Called on well-formed 'inclusive' clause. OMPClause *ActOnOpenMPInclusiveClause(ArrayRef VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'exclusive' clause. OMPClause *ActOnOpenMPExclusiveClause(ArrayRef VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause( ArrayRef VarList, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef VarList, OpenMPReductionClauseModifier Modifier, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depobj' pseudo clause. OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier, Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef MapTypeModifiers, ArrayRef MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef VarList, const OMPVarListLocTy &Locs, ArrayRef UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause *ActOnOpenMPFromClause( ArrayRef VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'use_device_addr' clause. OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'nontemporal' clause. OMPClause *ActOnOpenMPNontemporalClause(ArrayRef VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Data for list of allocators. struct UsesAllocatorsData { /// Allocator. Expr *Allocator = nullptr; /// Allocator traits. Expr *AllocatorTraits = nullptr; /// Locations of '(' and ')' symbols. SourceLocation LParenLoc, RParenLoc; }; /// Called on well-formed 'uses_allocators' clause. OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef Data); /// Called on well-formed 'affinity' clause. OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, Expr *Modifier, ArrayRef Locators); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This function is a no-op if the operand has a function type // or an array type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef Args, SmallVectorImpl &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); /// Context in which we're performing a usual arithmetic conversion. enum ArithConvKind { /// An arithmetic operation. ACK_Arithmetic, /// A bitwise operation. ACK_BitwiseOp, /// A comparison. ACK_Comparison, /// A conditional (?:) operator. ACK_Conditional, /// A compound assignment expression. ACK_CompAssign, }; // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, ArithConvKind ACK); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatibleFunctionPointer - The assignment is between two function /// pointers types that are not compatible, but we accept them as an /// extension. IncompatibleFunctionPointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id " = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType CheckGNUVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); /// Type checking for matrix binary operators. QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; // Fake up a scoped enumeration that still contextually converts to bool. struct ReferenceConversionsScope { /// The conversions that would be performed on an lvalue of type T2 when /// binding a reference of type T1 to it, as determined when evaluating /// whether T1 is reference-compatible with T2. enum ReferenceConversions { Qualification = 0x1, NestedQualification = 0x2, Function = 0x4, DerivedToBase = 0x8, ObjC = 0x10, ObjCLifetime = 0x20, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime) }; }; using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, ReferenceConversions *Conv = nullptr); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType ¶mType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair get() const { return std::make_pair(cast_or_null(ConditionVar), Condition.get()); } llvm::Optional getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap, std::vector> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be /// deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class DeviceDiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); DeviceDiagBuilder(DeviceDiagBuilder &&D); DeviceDiagBuilder(const DeviceDiagBuilder &) = default; ~DeviceDiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (DeviceDiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a DeviceDiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional ImmediateDiag; llvm::Optional PartialDiagId; }; /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the host, emits the diagnostics immediately. /// - If CurContext is a non-host function, just ignore it. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID); DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID); /// Check if the expression is allowed to be used in expressions for the /// offloading devices. void checkDeviceDecl(const ValueDecl *D, SourceLocation Loc); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast(CurContext)); } static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D); // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); /// May add implicit CUDAConstantAttr attribute to VD, depending on VD /// and current compilation settings. void MaybeAddCUDAConstantAttr(VarDecl *VD); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas by default is host device function unless it has explicit /// host or device attribute. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Reports signatures for a call to CodeCompleteConsumer and returns the /// preferred type for the current argument. Returned type can be null. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); /// Trigger code completion for a record of \p BaseType. \p InitExprs are /// expressions in the initializer list seen so far and \p D is the current /// Designation being parsed. void CodeCompleteDesignator(const QualType BaseType, llvm::ArrayRef InitExprs, const Designation &D); void CodeCompleteAfterIf(Scope *S, bool IsBracedThen); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, bool IsUsingDeclaration, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteAfterFunctionEquals(Declarator &D); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg, bool WantCDE); bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, ArrayRef ArgNums); bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, int ArgNum); bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef ArgNums); bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, ArrayRef ArgNums); bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum); bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); // Matrix builtin handling. ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, ExprResult CallResult); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(const Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast(DC)) DC = CatD->getClassInterface(); return DC; } /// Determine the number of levels of enclosing template parameters. This is /// only usable while parsing. Note that this does not include dependent /// contexts in which no template parameters have yet been declared, such as /// in a terse function template or generic lambda before the first 'auto' is /// encountered. unsigned getTemplateDepth(Scope *S) const; /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector DelayedDllExportClasses; SmallVector DelayedDllExportMemberFunctions; private: int ParsingClassDepth = 0; class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurLexicalContext is a kernel function or it is known that the /// function will be emitted for the device, emits the diagnostics /// immediately. /// - If CurLexicalContext is a function and we are compiling /// for the device, but we don't know that this function will be codegen'ed /// for devive yet, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// Diagnose __float128 type usage only from SYCL device code if the current /// target doesn't support it /// if (!S.Context.getTargetInfo().hasFloat128Type() && /// S.getLangOpts().SYCLIsDevice) /// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128"; DeviceDiagBuilder SYCLDiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed, creates a deferred diagnostic to be emitted if /// and when the caller is codegen'ed, and returns true. /// /// - Otherwise, returns true without emitting any diagnostics. /// /// Adds Callee to DeviceCallGraph if we don't know if its caller will be /// codegen'ed yet. bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee); }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif diff --git a/contrib/llvm-project/clang/lib/AST/ASTContext.cpp b/contrib/llvm-project/clang/lib/AST/ASTContext.cpp index bf51d35d9693..1497cccff175 100644 --- a/contrib/llvm-project/clang/lib/AST/ASTContext.cpp +++ b/contrib/llvm-project/clang/lib/AST/ASTContext.cpp @@ -1,11278 +1,11268 @@ //===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements the ASTContext interface. // //===----------------------------------------------------------------------===// #include "clang/AST/ASTContext.h" #include "CXXABI.h" #include "Interp/Context.h" #include "clang/AST/APValue.h" #include "clang/AST/ASTConcept.h" #include "clang/AST/ASTMutationListener.h" #include "clang/AST/ASTTypeTraits.h" #include "clang/AST/Attr.h" #include "clang/AST/AttrIterator.h" #include "clang/AST/CharUnits.h" #include "clang/AST/Comment.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclBase.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclContextInternals.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclOpenMP.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/DependenceFlags.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprConcepts.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/Mangle.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/ParentMapContext.h" #include "clang/AST/RawCommentList.h" #include "clang/AST/RecordLayout.h" #include "clang/AST/Stmt.h" #include "clang/AST/TemplateBase.h" #include "clang/AST/TemplateName.h" #include "clang/AST/Type.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/UnresolvedSet.h" #include "clang/AST/VTableBuilder.h" #include "clang/Basic/AddressSpaces.h" #include "clang/Basic/Builtins.h" #include "clang/Basic/CommentOptions.h" #include "clang/Basic/ExceptionSpecificationType.h" #include "clang/Basic/FixedPoint.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/LangOptions.h" #include "clang/Basic/Linkage.h" #include "clang/Basic/Module.h" #include "clang/Basic/ObjCRuntime.h" #include "clang/Basic/SanitizerBlacklist.h" #include "clang/Basic/SourceLocation.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TargetCXXABI.h" #include "clang/Basic/TargetInfo.h" #include "clang/Basic/XRayLists.h" #include "llvm/ADT/APInt.h" #include "llvm/ADT/APSInt.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/FoldingSet.h" #include "llvm/ADT/None.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/PointerUnion.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/Triple.h" #include "llvm/Support/Capacity.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" #include #include #include #include #include #include #include #include #include #include using namespace clang; enum FloatingRank { BFloat16Rank, Float16Rank, HalfRank, FloatRank, DoubleRank, LongDoubleRank, Float128Rank }; /// \returns location that is relevant when searching for Doc comments related /// to \p D. static SourceLocation getDeclLocForCommentSearch(const Decl *D, SourceManager &SourceMgr) { assert(D); // User can not attach documentation to implicit declarations. if (D->isImplicit()) return {}; // User can not attach documentation to implicit instantiations. if (const auto *FD = dyn_cast(D)) { if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) return {}; } if (const auto *VD = dyn_cast(D)) { if (VD->isStaticDataMember() && VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) return {}; } if (const auto *CRD = dyn_cast(D)) { if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) return {}; } if (const auto *CTSD = dyn_cast(D)) { TemplateSpecializationKind TSK = CTSD->getSpecializationKind(); if (TSK == TSK_ImplicitInstantiation || TSK == TSK_Undeclared) return {}; } if (const auto *ED = dyn_cast(D)) { if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) return {}; } if (const auto *TD = dyn_cast(D)) { // When tag declaration (but not definition!) is part of the // decl-specifier-seq of some other declaration, it doesn't get comment if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition()) return {}; } // TODO: handle comments for function parameters properly. if (isa(D)) return {}; // TODO: we could look up template parameter documentation in the template // documentation. if (isa(D) || isa(D) || isa(D)) return {}; // Find declaration location. // For Objective-C declarations we generally don't expect to have multiple // declarators, thus use declaration starting location as the "declaration // location". // For all other declarations multiple declarators are used quite frequently, // so we use the location of the identifier as the "declaration location". if (isa(D) || isa(D) || isa(D) || isa(D) || isa(D) || // Allow association with Y across {} in `typedef struct X {} Y`. isa(D)) return D->getBeginLoc(); else { const SourceLocation DeclLoc = D->getLocation(); if (DeclLoc.isMacroID()) { if (isa(D)) { // If location of the typedef name is in a macro, it is because being // declared via a macro. Try using declaration's starting location as // the "declaration location". return D->getBeginLoc(); } else if (const auto *TD = dyn_cast(D)) { // If location of the tag decl is inside a macro, but the spelling of // the tag name comes from a macro argument, it looks like a special // macro like NS_ENUM is being used to define the tag decl. In that // case, adjust the source location to the expansion loc so that we can // attach the comment to the tag decl. if (SourceMgr.isMacroArgExpansion(DeclLoc) && TD->isCompleteDefinition()) return SourceMgr.getExpansionLoc(DeclLoc); } } return DeclLoc; } return {}; } RawComment *ASTContext::getRawCommentForDeclNoCacheImpl( const Decl *D, const SourceLocation RepresentativeLocForDecl, const std::map &CommentsInTheFile) const { // If the declaration doesn't map directly to a location in a file, we // can't find the comment. if (RepresentativeLocForDecl.isInvalid() || !RepresentativeLocForDecl.isFileID()) return nullptr; // If there are no comments anywhere, we won't find anything. if (CommentsInTheFile.empty()) return nullptr; // Decompose the location for the declaration and find the beginning of the // file buffer. const std::pair DeclLocDecomp = SourceMgr.getDecomposedLoc(RepresentativeLocForDecl); // Slow path. auto OffsetCommentBehindDecl = CommentsInTheFile.lower_bound(DeclLocDecomp.second); // First check whether we have a trailing comment. if (OffsetCommentBehindDecl != CommentsInTheFile.end()) { RawComment *CommentBehindDecl = OffsetCommentBehindDecl->second; if ((CommentBehindDecl->isDocumentation() || LangOpts.CommentOpts.ParseAllComments) && CommentBehindDecl->isTrailingComment() && (isa(D) || isa(D) || isa(D) || isa(D) || isa(D))) { // Check that Doxygen trailing comment comes after the declaration, starts // on the same line and in the same file as the declaration. if (SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second) == Comments.getCommentBeginLine(CommentBehindDecl, DeclLocDecomp.first, OffsetCommentBehindDecl->first)) { return CommentBehindDecl; } } } // The comment just after the declaration was not a trailing comment. // Let's look at the previous comment. if (OffsetCommentBehindDecl == CommentsInTheFile.begin()) return nullptr; auto OffsetCommentBeforeDecl = --OffsetCommentBehindDecl; RawComment *CommentBeforeDecl = OffsetCommentBeforeDecl->second; // Check that we actually have a non-member Doxygen comment. if (!(CommentBeforeDecl->isDocumentation() || LangOpts.CommentOpts.ParseAllComments) || CommentBeforeDecl->isTrailingComment()) return nullptr; // Decompose the end of the comment. const unsigned CommentEndOffset = Comments.getCommentEndOffset(CommentBeforeDecl); // Get the corresponding buffer. bool Invalid = false; const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first, &Invalid).data(); if (Invalid) return nullptr; // Extract text between the comment and declaration. StringRef Text(Buffer + CommentEndOffset, DeclLocDecomp.second - CommentEndOffset); // There should be no other declarations or preprocessor directives between // comment and declaration. if (Text.find_first_of(";{}#@") != StringRef::npos) return nullptr; return CommentBeforeDecl; } RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const { const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr); // If the declaration doesn't map directly to a location in a file, we // can't find the comment. if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) return nullptr; if (ExternalSource && !CommentsLoaded) { ExternalSource->ReadComments(); CommentsLoaded = true; } if (Comments.empty()) return nullptr; const FileID File = SourceMgr.getDecomposedLoc(DeclLoc).first; const auto CommentsInThisFile = Comments.getCommentsInFile(File); if (!CommentsInThisFile || CommentsInThisFile->empty()) return nullptr; return getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile); } void ASTContext::addComment(const RawComment &RC) { assert(LangOpts.RetainCommentsFromSystemHeaders || !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin())); Comments.addComment(RC, LangOpts.CommentOpts, BumpAlloc); } /// If we have a 'templated' declaration for a template, adjust 'D' to /// refer to the actual template. /// If we have an implicit instantiation, adjust 'D' to refer to template. static const Decl &adjustDeclToTemplate(const Decl &D) { if (const auto *FD = dyn_cast(&D)) { // Is this function declaration part of a function template? if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate()) return *FTD; // Nothing to do if function is not an implicit instantiation. if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation) return D; // Function is an implicit instantiation of a function template? if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate()) return *FTD; // Function is instantiated from a member definition of a class template? if (const FunctionDecl *MemberDecl = FD->getInstantiatedFromMemberFunction()) return *MemberDecl; return D; } if (const auto *VD = dyn_cast(&D)) { // Static data member is instantiated from a member definition of a class // template? if (VD->isStaticDataMember()) if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember()) return *MemberDecl; return D; } if (const auto *CRD = dyn_cast(&D)) { // Is this class declaration part of a class template? if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate()) return *CTD; // Class is an implicit instantiation of a class template or partial // specialization? if (const auto *CTSD = dyn_cast(CRD)) { if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation) return D; llvm::PointerUnion PU = CTSD->getSpecializedTemplateOrPartial(); return PU.is() ? *static_cast(PU.get()) : *static_cast( PU.get()); } // Class is instantiated from a member definition of a class template? if (const MemberSpecializationInfo *Info = CRD->getMemberSpecializationInfo()) return *Info->getInstantiatedFrom(); return D; } if (const auto *ED = dyn_cast(&D)) { // Enum is instantiated from a member definition of a class template? if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum()) return *MemberDecl; return D; } // FIXME: Adjust alias templates? return D; } const RawComment *ASTContext::getRawCommentForAnyRedecl( const Decl *D, const Decl **OriginalDecl) const { if (!D) { if (OriginalDecl) OriginalDecl = nullptr; return nullptr; } D = &adjustDeclToTemplate(*D); // Any comment directly attached to D? { auto DeclComment = DeclRawComments.find(D); if (DeclComment != DeclRawComments.end()) { if (OriginalDecl) *OriginalDecl = D; return DeclComment->second; } } // Any comment attached to any redeclaration of D? const Decl *CanonicalD = D->getCanonicalDecl(); if (!CanonicalD) return nullptr; { auto RedeclComment = RedeclChainComments.find(CanonicalD); if (RedeclComment != RedeclChainComments.end()) { if (OriginalDecl) *OriginalDecl = RedeclComment->second; auto CommentAtRedecl = DeclRawComments.find(RedeclComment->second); assert(CommentAtRedecl != DeclRawComments.end() && "This decl is supposed to have comment attached."); return CommentAtRedecl->second; } } // Any redeclarations of D that we haven't checked for comments yet? // We can't use DenseMap::iterator directly since it'd get invalid. auto LastCheckedRedecl = [this, CanonicalD]() -> const Decl * { auto LookupRes = CommentlessRedeclChains.find(CanonicalD); if (LookupRes != CommentlessRedeclChains.end()) return LookupRes->second; return nullptr; }(); for (const auto Redecl : D->redecls()) { assert(Redecl); // Skip all redeclarations that have been checked previously. if (LastCheckedRedecl) { if (LastCheckedRedecl == Redecl) { LastCheckedRedecl = nullptr; } continue; } const RawComment *RedeclComment = getRawCommentForDeclNoCache(Redecl); if (RedeclComment) { cacheRawCommentForDecl(*Redecl, *RedeclComment); if (OriginalDecl) *OriginalDecl = Redecl; return RedeclComment; } CommentlessRedeclChains[CanonicalD] = Redecl; } if (OriginalDecl) *OriginalDecl = nullptr; return nullptr; } void ASTContext::cacheRawCommentForDecl(const Decl &OriginalD, const RawComment &Comment) const { assert(Comment.isDocumentation() || LangOpts.CommentOpts.ParseAllComments); DeclRawComments.try_emplace(&OriginalD, &Comment); const Decl *const CanonicalDecl = OriginalD.getCanonicalDecl(); RedeclChainComments.try_emplace(CanonicalDecl, &OriginalD); CommentlessRedeclChains.erase(CanonicalDecl); } static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod, SmallVectorImpl &Redeclared) { const DeclContext *DC = ObjCMethod->getDeclContext(); if (const auto *IMD = dyn_cast(DC)) { const ObjCInterfaceDecl *ID = IMD->getClassInterface(); if (!ID) return; // Add redeclared method here. for (const auto *Ext : ID->known_extensions()) { if (ObjCMethodDecl *RedeclaredMethod = Ext->getMethod(ObjCMethod->getSelector(), ObjCMethod->isInstanceMethod())) Redeclared.push_back(RedeclaredMethod); } } } void ASTContext::attachCommentsToJustParsedDecls(ArrayRef Decls, const Preprocessor *PP) { if (Comments.empty() || Decls.empty()) return; FileID File; for (Decl *D : Decls) { SourceLocation Loc = D->getLocation(); if (Loc.isValid()) { // See if there are any new comments that are not attached to a decl. // The location doesn't have to be precise - we care only about the file. File = SourceMgr.getDecomposedLoc(Loc).first; break; } } if (File.isInvalid()) return; auto CommentsInThisFile = Comments.getCommentsInFile(File); if (!CommentsInThisFile || CommentsInThisFile->empty() || CommentsInThisFile->rbegin()->second->isAttached()) return; // There is at least one comment not attached to a decl. // Maybe it should be attached to one of Decls? // // Note that this way we pick up not only comments that precede the // declaration, but also comments that *follow* the declaration -- thanks to // the lookahead in the lexer: we've consumed the semicolon and looked // ahead through comments. for (const Decl *D : Decls) { assert(D); if (D->isInvalidDecl()) continue; D = &adjustDeclToTemplate(*D); const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr); if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) continue; if (DeclRawComments.count(D) > 0) continue; if (RawComment *const DocComment = getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile)) { cacheRawCommentForDecl(*D, *DocComment); comments::FullComment *FC = DocComment->parse(*this, PP, D); ParsedComments[D->getCanonicalDecl()] = FC; } } } comments::FullComment *ASTContext::cloneFullComment(comments::FullComment *FC, const Decl *D) const { auto *ThisDeclInfo = new (*this) comments::DeclInfo; ThisDeclInfo->CommentDecl = D; ThisDeclInfo->IsFilled = false; ThisDeclInfo->fill(); ThisDeclInfo->CommentDecl = FC->getDecl(); if (!ThisDeclInfo->TemplateParameters) ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters; comments::FullComment *CFC = new (*this) comments::FullComment(FC->getBlocks(), ThisDeclInfo); return CFC; } comments::FullComment *ASTContext::getLocalCommentForDeclUncached(const Decl *D) const { const RawComment *RC = getRawCommentForDeclNoCache(D); return RC ? RC->parse(*this, nullptr, D) : nullptr; } comments::FullComment *ASTContext::getCommentForDecl( const Decl *D, const Preprocessor *PP) const { if (!D || D->isInvalidDecl()) return nullptr; D = &adjustDeclToTemplate(*D); const Decl *Canonical = D->getCanonicalDecl(); llvm::DenseMap::iterator Pos = ParsedComments.find(Canonical); if (Pos != ParsedComments.end()) { if (Canonical != D) { comments::FullComment *FC = Pos->second; comments::FullComment *CFC = cloneFullComment(FC, D); return CFC; } return Pos->second; } const Decl *OriginalDecl = nullptr; const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl); if (!RC) { if (isa(D) || isa(D)) { SmallVector Overridden; const auto *OMD = dyn_cast(D); if (OMD && OMD->isPropertyAccessor()) if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl()) if (comments::FullComment *FC = getCommentForDecl(PDecl, PP)) return cloneFullComment(FC, D); if (OMD) addRedeclaredMethods(OMD, Overridden); getOverriddenMethods(dyn_cast(D), Overridden); for (unsigned i = 0, e = Overridden.size(); i < e; i++) if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP)) return cloneFullComment(FC, D); } else if (const auto *TD = dyn_cast(D)) { // Attach any tag type's documentation to its typedef if latter // does not have one of its own. QualType QT = TD->getUnderlyingType(); if (const auto *TT = QT->getAs()) if (const Decl *TD = TT->getDecl()) if (comments::FullComment *FC = getCommentForDecl(TD, PP)) return cloneFullComment(FC, D); } else if (const auto *IC = dyn_cast(D)) { while (IC->getSuperClass()) { IC = IC->getSuperClass(); if (comments::FullComment *FC = getCommentForDecl(IC, PP)) return cloneFullComment(FC, D); } } else if (const auto *CD = dyn_cast(D)) { if (const ObjCInterfaceDecl *IC = CD->getClassInterface()) if (comments::FullComment *FC = getCommentForDecl(IC, PP)) return cloneFullComment(FC, D); } else if (const auto *RD = dyn_cast(D)) { if (!(RD = RD->getDefinition())) return nullptr; // Check non-virtual bases. for (const auto &I : RD->bases()) { if (I.isVirtual() || (I.getAccessSpecifier() != AS_public)) continue; QualType Ty = I.getType(); if (Ty.isNull()) continue; if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) { if (!(NonVirtualBase= NonVirtualBase->getDefinition())) continue; if (comments::FullComment *FC = getCommentForDecl((NonVirtualBase), PP)) return cloneFullComment(FC, D); } } // Check virtual bases. for (const auto &I : RD->vbases()) { if (I.getAccessSpecifier() != AS_public) continue; QualType Ty = I.getType(); if (Ty.isNull()) continue; if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) { if (!(VirtualBase= VirtualBase->getDefinition())) continue; if (comments::FullComment *FC = getCommentForDecl((VirtualBase), PP)) return cloneFullComment(FC, D); } } } return nullptr; } // If the RawComment was attached to other redeclaration of this Decl, we // should parse the comment in context of that other Decl. This is important // because comments can contain references to parameter names which can be // different across redeclarations. if (D != OriginalDecl && OriginalDecl) return getCommentForDecl(OriginalDecl, PP); comments::FullComment *FC = RC->parse(*this, PP, D); ParsedComments[Canonical] = FC; return FC; } void ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &C, TemplateTemplateParmDecl *Parm) { ID.AddInteger(Parm->getDepth()); ID.AddInteger(Parm->getPosition()); ID.AddBoolean(Parm->isParameterPack()); TemplateParameterList *Params = Parm->getTemplateParameters(); ID.AddInteger(Params->size()); for (TemplateParameterList::const_iterator P = Params->begin(), PEnd = Params->end(); P != PEnd; ++P) { if (const auto *TTP = dyn_cast(*P)) { ID.AddInteger(0); ID.AddBoolean(TTP->isParameterPack()); const TypeConstraint *TC = TTP->getTypeConstraint(); ID.AddBoolean(TC != nullptr); if (TC) TC->getImmediatelyDeclaredConstraint()->Profile(ID, C, /*Canonical=*/true); if (TTP->isExpandedParameterPack()) { ID.AddBoolean(true); ID.AddInteger(TTP->getNumExpansionParameters()); } else ID.AddBoolean(false); continue; } if (const auto *NTTP = dyn_cast(*P)) { ID.AddInteger(1); ID.AddBoolean(NTTP->isParameterPack()); ID.AddPointer(NTTP->getType().getCanonicalType().getAsOpaquePtr()); if (NTTP->isExpandedParameterPack()) { ID.AddBoolean(true); ID.AddInteger(NTTP->getNumExpansionTypes()); for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { QualType T = NTTP->getExpansionType(I); ID.AddPointer(T.getCanonicalType().getAsOpaquePtr()); } } else ID.AddBoolean(false); continue; } auto *TTP = cast(*P); ID.AddInteger(2); Profile(ID, C, TTP); } Expr *RequiresClause = Parm->getTemplateParameters()->getRequiresClause(); ID.AddBoolean(RequiresClause != nullptr); if (RequiresClause) RequiresClause->Profile(ID, C, /*Canonical=*/true); } static Expr * canonicalizeImmediatelyDeclaredConstraint(const ASTContext &C, Expr *IDC, QualType ConstrainedType) { // This is a bit ugly - we need to form a new immediately-declared // constraint that references the new parameter; this would ideally // require semantic analysis (e.g. template struct S {}; - the // converted arguments of C could be an argument pack if C is // declared as template concept C = ...). // We don't have semantic analysis here so we dig deep into the // ready-made constraint expr and change the thing manually. ConceptSpecializationExpr *CSE; if (const auto *Fold = dyn_cast(IDC)) CSE = cast(Fold->getLHS()); else CSE = cast(IDC); ArrayRef OldConverted = CSE->getTemplateArguments(); SmallVector NewConverted; NewConverted.reserve(OldConverted.size()); if (OldConverted.front().getKind() == TemplateArgument::Pack) { // The case: // template concept C = true; // template T> struct S; -> constraint is C<{T, int}> NewConverted.push_back(ConstrainedType); for (auto &Arg : OldConverted.front().pack_elements().drop_front(1)) NewConverted.push_back(Arg); TemplateArgument NewPack(NewConverted); NewConverted.clear(); NewConverted.push_back(NewPack); assert(OldConverted.size() == 1 && "Template parameter pack should be the last parameter"); } else { assert(OldConverted.front().getKind() == TemplateArgument::Type && "Unexpected first argument kind for immediately-declared " "constraint"); NewConverted.push_back(ConstrainedType); for (auto &Arg : OldConverted.drop_front(1)) NewConverted.push_back(Arg); } Expr *NewIDC = ConceptSpecializationExpr::Create( C, CSE->getNamedConcept(), NewConverted, nullptr, CSE->isInstantiationDependent(), CSE->containsUnexpandedParameterPack()); if (auto *OrigFold = dyn_cast(IDC)) NewIDC = new (C) CXXFoldExpr(OrigFold->getType(), SourceLocation(), NewIDC, BinaryOperatorKind::BO_LAnd, SourceLocation(), /*RHS=*/nullptr, SourceLocation(), /*NumExpansions=*/None); return NewIDC; } TemplateTemplateParmDecl * ASTContext::getCanonicalTemplateTemplateParmDecl( TemplateTemplateParmDecl *TTP) const { // Check if we already have a canonical template template parameter. llvm::FoldingSetNodeID ID; CanonicalTemplateTemplateParm::Profile(ID, *this, TTP); void *InsertPos = nullptr; CanonicalTemplateTemplateParm *Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); if (Canonical) return Canonical->getParam(); // Build a canonical template parameter list. TemplateParameterList *Params = TTP->getTemplateParameters(); SmallVector CanonParams; CanonParams.reserve(Params->size()); for (TemplateParameterList::const_iterator P = Params->begin(), PEnd = Params->end(); P != PEnd; ++P) { if (const auto *TTP = dyn_cast(*P)) { TemplateTypeParmDecl *NewTTP = TemplateTypeParmDecl::Create(*this, getTranslationUnitDecl(), SourceLocation(), SourceLocation(), TTP->getDepth(), TTP->getIndex(), nullptr, false, TTP->isParameterPack(), TTP->hasTypeConstraint(), TTP->isExpandedParameterPack() ? llvm::Optional(TTP->getNumExpansionParameters()) : None); if (const auto *TC = TTP->getTypeConstraint()) { QualType ParamAsArgument(NewTTP->getTypeForDecl(), 0); Expr *NewIDC = canonicalizeImmediatelyDeclaredConstraint( *this, TC->getImmediatelyDeclaredConstraint(), ParamAsArgument); TemplateArgumentListInfo CanonArgsAsWritten; if (auto *Args = TC->getTemplateArgsAsWritten()) for (const auto &ArgLoc : Args->arguments()) CanonArgsAsWritten.addArgument( TemplateArgumentLoc(ArgLoc.getArgument(), TemplateArgumentLocInfo())); NewTTP->setTypeConstraint( NestedNameSpecifierLoc(), DeclarationNameInfo(TC->getNamedConcept()->getDeclName(), SourceLocation()), /*FoundDecl=*/nullptr, // Actually canonicalizing a TemplateArgumentLoc is difficult so we // simply omit the ArgsAsWritten TC->getNamedConcept(), /*ArgsAsWritten=*/nullptr, NewIDC); } CanonParams.push_back(NewTTP); } else if (const auto *NTTP = dyn_cast(*P)) { QualType T = getCanonicalType(NTTP->getType()); TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); NonTypeTemplateParmDecl *Param; if (NTTP->isExpandedParameterPack()) { SmallVector ExpandedTypes; SmallVector ExpandedTInfos; for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I))); ExpandedTInfos.push_back( getTrivialTypeSourceInfo(ExpandedTypes.back())); } Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), SourceLocation(), SourceLocation(), NTTP->getDepth(), NTTP->getPosition(), nullptr, T, TInfo, ExpandedTypes, ExpandedTInfos); } else { Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), SourceLocation(), SourceLocation(), NTTP->getDepth(), NTTP->getPosition(), nullptr, T, NTTP->isParameterPack(), TInfo); } if (AutoType *AT = T->getContainedAutoType()) { if (AT->isConstrained()) { Param->setPlaceholderTypeConstraint( canonicalizeImmediatelyDeclaredConstraint( *this, NTTP->getPlaceholderTypeConstraint(), T)); } } CanonParams.push_back(Param); } else CanonParams.push_back(getCanonicalTemplateTemplateParmDecl( cast(*P))); } Expr *CanonRequiresClause = nullptr; if (Expr *RequiresClause = TTP->getTemplateParameters()->getRequiresClause()) CanonRequiresClause = RequiresClause; TemplateTemplateParmDecl *CanonTTP = TemplateTemplateParmDecl::Create(*this, getTranslationUnitDecl(), SourceLocation(), TTP->getDepth(), TTP->getPosition(), TTP->isParameterPack(), nullptr, TemplateParameterList::Create(*this, SourceLocation(), SourceLocation(), CanonParams, SourceLocation(), CanonRequiresClause)); // Get the new insert position for the node we care about. Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); assert(!Canonical && "Shouldn't be in the map!"); (void)Canonical; // Create the canonical template template parameter entry. Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP); CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos); return CanonTTP; } CXXABI *ASTContext::createCXXABI(const TargetInfo &T) { if (!LangOpts.CPlusPlus) return nullptr; switch (T.getCXXABI().getKind()) { case TargetCXXABI::Fuchsia: case TargetCXXABI::GenericARM: // Same as Itanium at this level case TargetCXXABI::iOS: case TargetCXXABI::iOS64: case TargetCXXABI::WatchOS: case TargetCXXABI::GenericAArch64: case TargetCXXABI::GenericMIPS: case TargetCXXABI::GenericItanium: case TargetCXXABI::WebAssembly: case TargetCXXABI::XL: return CreateItaniumCXXABI(*this); case TargetCXXABI::Microsoft: return CreateMicrosoftCXXABI(*this); } llvm_unreachable("Invalid CXXABI type!"); } interp::Context &ASTContext::getInterpContext() { if (!InterpContext) { InterpContext.reset(new interp::Context(*this)); } return *InterpContext.get(); } ParentMapContext &ASTContext::getParentMapContext() { if (!ParentMapCtx) ParentMapCtx.reset(new ParentMapContext(*this)); return *ParentMapCtx.get(); } static const LangASMap *getAddressSpaceMap(const TargetInfo &T, const LangOptions &LOpts) { if (LOpts.FakeAddressSpaceMap) { // The fake address space map must have a distinct entry for each // language-specific address space. static const unsigned FakeAddrSpaceMap[] = { 0, // Default 1, // opencl_global 3, // opencl_local 2, // opencl_constant 0, // opencl_private 4, // opencl_generic 5, // cuda_device 6, // cuda_constant 7, // cuda_shared 8, // ptr32_sptr 9, // ptr32_uptr 10 // ptr64 }; return &FakeAddrSpaceMap; } else { return &T.getAddressSpaceMap(); } } static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI, const LangOptions &LangOpts) { switch (LangOpts.getAddressSpaceMapMangling()) { case LangOptions::ASMM_Target: return TI.useAddressSpaceMapMangling(); case LangOptions::ASMM_On: return true; case LangOptions::ASMM_Off: return false; } llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything."); } ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM, IdentifierTable &idents, SelectorTable &sels, Builtin::Context &builtins) : ConstantArrayTypes(this_()), FunctionProtoTypes(this_()), TemplateSpecializationTypes(this_()), DependentTemplateSpecializationTypes(this_()), AutoTypes(this_()), SubstTemplateTemplateParmPacks(this_()), CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts), SanitizerBL(new SanitizerBlacklist(LangOpts.SanitizerBlacklistFiles, SM)), XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles, LangOpts.XRayNeverInstrumentFiles, LangOpts.XRayAttrListFiles, SM)), PrintingPolicy(LOpts), Idents(idents), Selectors(sels), BuiltinInfo(builtins), DeclarationNames(*this), Comments(SM), CommentCommandTraits(BumpAlloc, LOpts.CommentOpts), CompCategories(this_()), LastSDM(nullptr, 0) { TUDecl = TranslationUnitDecl::Create(*this); TraversalScope = {TUDecl}; } ASTContext::~ASTContext() { // Release the DenseMaps associated with DeclContext objects. // FIXME: Is this the ideal solution? ReleaseDeclContextMaps(); // Call all of the deallocation functions on all of their targets. for (auto &Pair : Deallocations) (Pair.first)(Pair.second); // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed // because they can contain DenseMaps. for (llvm::DenseMap::iterator I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; ) // Increment in loop to prevent using deallocated memory. if (auto *R = const_cast((I++)->second)) R->Destroy(*this); for (llvm::DenseMap::iterator I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) { // Increment in loop to prevent using deallocated memory. if (auto *R = const_cast((I++)->second)) R->Destroy(*this); } for (llvm::DenseMap::iterator A = DeclAttrs.begin(), AEnd = DeclAttrs.end(); A != AEnd; ++A) A->second->~AttrVec(); for (const auto &Value : ModuleInitializers) Value.second->~PerModuleInitializers(); for (APValue *Value : APValueCleanups) Value->~APValue(); } void ASTContext::setTraversalScope(const std::vector &TopLevelDecls) { TraversalScope = TopLevelDecls; getParentMapContext().clear(); } void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const { Deallocations.push_back({Callback, Data}); } void ASTContext::setExternalSource(IntrusiveRefCntPtr Source) { ExternalSource = std::move(Source); } void ASTContext::PrintStats() const { llvm::errs() << "\n*** AST Context Stats:\n"; llvm::errs() << " " << Types.size() << " types total.\n"; unsigned counts[] = { #define TYPE(Name, Parent) 0, #define ABSTRACT_TYPE(Name, Parent) #include "clang/AST/TypeNodes.inc" 0 // Extra }; for (unsigned i = 0, e = Types.size(); i != e; ++i) { Type *T = Types[i]; counts[(unsigned)T->getTypeClass()]++; } unsigned Idx = 0; unsigned TotalBytes = 0; #define TYPE(Name, Parent) \ if (counts[Idx]) \ llvm::errs() << " " << counts[Idx] << " " << #Name \ << " types, " << sizeof(Name##Type) << " each " \ << "(" << counts[Idx] * sizeof(Name##Type) \ << " bytes)\n"; \ TotalBytes += counts[Idx] * sizeof(Name##Type); \ ++Idx; #define ABSTRACT_TYPE(Name, Parent) #include "clang/AST/TypeNodes.inc" llvm::errs() << "Total bytes = " << TotalBytes << "\n"; // Implicit special member functions. llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/" << NumImplicitDefaultConstructors << " implicit default constructors created\n"; llvm::errs() << NumImplicitCopyConstructorsDeclared << "/" << NumImplicitCopyConstructors << " implicit copy constructors created\n"; if (getLangOpts().CPlusPlus) llvm::errs() << NumImplicitMoveConstructorsDeclared << "/" << NumImplicitMoveConstructors << " implicit move constructors created\n"; llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/" << NumImplicitCopyAssignmentOperators << " implicit copy assignment operators created\n"; if (getLangOpts().CPlusPlus) llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/" << NumImplicitMoveAssignmentOperators << " implicit move assignment operators created\n"; llvm::errs() << NumImplicitDestructorsDeclared << "/" << NumImplicitDestructors << " implicit destructors created\n"; if (ExternalSource) { llvm::errs() << "\n"; ExternalSource->PrintStats(); } BumpAlloc.PrintStats(); } void ASTContext::mergeDefinitionIntoModule(NamedDecl *ND, Module *M, bool NotifyListeners) { if (NotifyListeners) if (auto *Listener = getASTMutationListener()) Listener->RedefinedHiddenDefinition(ND, M); MergedDefModules[cast(ND->getCanonicalDecl())].push_back(M); } void ASTContext::deduplicateMergedDefinitonsFor(NamedDecl *ND) { auto It = MergedDefModules.find(cast(ND->getCanonicalDecl())); if (It == MergedDefModules.end()) return; auto &Merged = It->second; llvm::DenseSet Found; for (Module *&M : Merged) if (!Found.insert(M).second) M = nullptr; Merged.erase(std::remove(Merged.begin(), Merged.end(), nullptr), Merged.end()); } ArrayRef ASTContext::getModulesWithMergedDefinition(const NamedDecl *Def) { auto MergedIt = MergedDefModules.find(cast(Def->getCanonicalDecl())); if (MergedIt == MergedDefModules.end()) return None; return MergedIt->second; } void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) { if (LazyInitializers.empty()) return; auto *Source = Ctx.getExternalSource(); assert(Source && "lazy initializers but no external source"); auto LazyInits = std::move(LazyInitializers); LazyInitializers.clear(); for (auto ID : LazyInits) Initializers.push_back(Source->GetExternalDecl(ID)); assert(LazyInitializers.empty() && "GetExternalDecl for lazy module initializer added more inits"); } void ASTContext::addModuleInitializer(Module *M, Decl *D) { // One special case: if we add a module initializer that imports another // module, and that module's only initializer is an ImportDecl, simplify. if (const auto *ID = dyn_cast(D)) { auto It = ModuleInitializers.find(ID->getImportedModule()); // Maybe the ImportDecl does nothing at all. (Common case.) if (It == ModuleInitializers.end()) return; // Maybe the ImportDecl only imports another ImportDecl. auto &Imported = *It->second; if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) { Imported.resolve(*this); auto *OnlyDecl = Imported.Initializers.front(); if (isa(OnlyDecl)) D = OnlyDecl; } } auto *&Inits = ModuleInitializers[M]; if (!Inits) Inits = new (*this) PerModuleInitializers; Inits->Initializers.push_back(D); } void ASTContext::addLazyModuleInitializers(Module *M, ArrayRef IDs) { auto *&Inits = ModuleInitializers[M]; if (!Inits) Inits = new (*this) PerModuleInitializers; Inits->LazyInitializers.insert(Inits->LazyInitializers.end(), IDs.begin(), IDs.end()); } ArrayRef ASTContext::getModuleInitializers(Module *M) { auto It = ModuleInitializers.find(M); if (It == ModuleInitializers.end()) return None; auto *Inits = It->second; Inits->resolve(*this); return Inits->Initializers; } ExternCContextDecl *ASTContext::getExternCContextDecl() const { if (!ExternCContext) ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl()); return ExternCContext; } BuiltinTemplateDecl * ASTContext::buildBuiltinTemplateDecl(BuiltinTemplateKind BTK, const IdentifierInfo *II) const { auto *BuiltinTemplate = BuiltinTemplateDecl::Create(*this, TUDecl, II, BTK); BuiltinTemplate->setImplicit(); TUDecl->addDecl(BuiltinTemplate); return BuiltinTemplate; } BuiltinTemplateDecl * ASTContext::getMakeIntegerSeqDecl() const { if (!MakeIntegerSeqDecl) MakeIntegerSeqDecl = buildBuiltinTemplateDecl(BTK__make_integer_seq, getMakeIntegerSeqName()); return MakeIntegerSeqDecl; } BuiltinTemplateDecl * ASTContext::getTypePackElementDecl() const { if (!TypePackElementDecl) TypePackElementDecl = buildBuiltinTemplateDecl(BTK__type_pack_element, getTypePackElementName()); return TypePackElementDecl; } RecordDecl *ASTContext::buildImplicitRecord(StringRef Name, RecordDecl::TagKind TK) const { SourceLocation Loc; RecordDecl *NewDecl; if (getLangOpts().CPlusPlus) NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc, &Idents.get(Name)); else NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc, &Idents.get(Name)); NewDecl->setImplicit(); NewDecl->addAttr(TypeVisibilityAttr::CreateImplicit( const_cast(*this), TypeVisibilityAttr::Default)); return NewDecl; } TypedefDecl *ASTContext::buildImplicitTypedef(QualType T, StringRef Name) const { TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); TypedefDecl *NewDecl = TypedefDecl::Create( const_cast(*this), getTranslationUnitDecl(), SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo); NewDecl->setImplicit(); return NewDecl; } TypedefDecl *ASTContext::getInt128Decl() const { if (!Int128Decl) Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t"); return Int128Decl; } TypedefDecl *ASTContext::getUInt128Decl() const { if (!UInt128Decl) UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t"); return UInt128Decl; } void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) { auto *Ty = new (*this, TypeAlignment) BuiltinType(K); R = CanQualType::CreateUnsafe(QualType(Ty, 0)); Types.push_back(Ty); } void ASTContext::InitBuiltinTypes(const TargetInfo &Target, const TargetInfo *AuxTarget) { assert((!this->Target || this->Target == &Target) && "Incorrect target reinitialization"); assert(VoidTy.isNull() && "Context reinitialized?"); this->Target = &Target; this->AuxTarget = AuxTarget; ABI.reset(createCXXABI(Target)); AddrSpaceMap = getAddressSpaceMap(Target, LangOpts); AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts); // C99 6.2.5p19. InitBuiltinType(VoidTy, BuiltinType::Void); // C99 6.2.5p2. InitBuiltinType(BoolTy, BuiltinType::Bool); // C99 6.2.5p3. if (LangOpts.CharIsSigned) InitBuiltinType(CharTy, BuiltinType::Char_S); else InitBuiltinType(CharTy, BuiltinType::Char_U); // C99 6.2.5p4. InitBuiltinType(SignedCharTy, BuiltinType::SChar); InitBuiltinType(ShortTy, BuiltinType::Short); InitBuiltinType(IntTy, BuiltinType::Int); InitBuiltinType(LongTy, BuiltinType::Long); InitBuiltinType(LongLongTy, BuiltinType::LongLong); // C99 6.2.5p6. InitBuiltinType(UnsignedCharTy, BuiltinType::UChar); InitBuiltinType(UnsignedShortTy, BuiltinType::UShort); InitBuiltinType(UnsignedIntTy, BuiltinType::UInt); InitBuiltinType(UnsignedLongTy, BuiltinType::ULong); InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong); // C99 6.2.5p10. InitBuiltinType(FloatTy, BuiltinType::Float); InitBuiltinType(DoubleTy, BuiltinType::Double); InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble); // GNU extension, __float128 for IEEE quadruple precision InitBuiltinType(Float128Ty, BuiltinType::Float128); // C11 extension ISO/IEC TS 18661-3 InitBuiltinType(Float16Ty, BuiltinType::Float16); // ISO/IEC JTC1 SC22 WG14 N1169 Extension InitBuiltinType(ShortAccumTy, BuiltinType::ShortAccum); InitBuiltinType(AccumTy, BuiltinType::Accum); InitBuiltinType(LongAccumTy, BuiltinType::LongAccum); InitBuiltinType(UnsignedShortAccumTy, BuiltinType::UShortAccum); InitBuiltinType(UnsignedAccumTy, BuiltinType::UAccum); InitBuiltinType(UnsignedLongAccumTy, BuiltinType::ULongAccum); InitBuiltinType(ShortFractTy, BuiltinType::ShortFract); InitBuiltinType(FractTy, BuiltinType::Fract); InitBuiltinType(LongFractTy, BuiltinType::LongFract); InitBuiltinType(UnsignedShortFractTy, BuiltinType::UShortFract); InitBuiltinType(UnsignedFractTy, BuiltinType::UFract); InitBuiltinType(UnsignedLongFractTy, BuiltinType::ULongFract); InitBuiltinType(SatShortAccumTy, BuiltinType::SatShortAccum); InitBuiltinType(SatAccumTy, BuiltinType::SatAccum); InitBuiltinType(SatLongAccumTy, BuiltinType::SatLongAccum); InitBuiltinType(SatUnsignedShortAccumTy, BuiltinType::SatUShortAccum); InitBuiltinType(SatUnsignedAccumTy, BuiltinType::SatUAccum); InitBuiltinType(SatUnsignedLongAccumTy, BuiltinType::SatULongAccum); InitBuiltinType(SatShortFractTy, BuiltinType::SatShortFract); InitBuiltinType(SatFractTy, BuiltinType::SatFract); InitBuiltinType(SatLongFractTy, BuiltinType::SatLongFract); InitBuiltinType(SatUnsignedShortFractTy, BuiltinType::SatUShortFract); InitBuiltinType(SatUnsignedFractTy, BuiltinType::SatUFract); InitBuiltinType(SatUnsignedLongFractTy, BuiltinType::SatULongFract); // GNU extension, 128-bit integers. InitBuiltinType(Int128Ty, BuiltinType::Int128); InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128); // C++ 3.9.1p5 if (TargetInfo::isTypeSigned(Target.getWCharType())) InitBuiltinType(WCharTy, BuiltinType::WChar_S); else // -fshort-wchar makes wchar_t be unsigned. InitBuiltinType(WCharTy, BuiltinType::WChar_U); if (LangOpts.CPlusPlus && LangOpts.WChar) WideCharTy = WCharTy; else { // C99 (or C++ using -fno-wchar). WideCharTy = getFromTargetType(Target.getWCharType()); } WIntTy = getFromTargetType(Target.getWIntType()); // C++20 (proposed) InitBuiltinType(Char8Ty, BuiltinType::Char8); if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ InitBuiltinType(Char16Ty, BuiltinType::Char16); else // C99 Char16Ty = getFromTargetType(Target.getChar16Type()); if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ InitBuiltinType(Char32Ty, BuiltinType::Char32); else // C99 Char32Ty = getFromTargetType(Target.getChar32Type()); // Placeholder type for type-dependent expressions whose type is // completely unknown. No code should ever check a type against // DependentTy and users should never see it; however, it is here to // help diagnose failures to properly check for type-dependent // expressions. InitBuiltinType(DependentTy, BuiltinType::Dependent); // Placeholder type for functions. InitBuiltinType(OverloadTy, BuiltinType::Overload); // Placeholder type for bound members. InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember); // Placeholder type for pseudo-objects. InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject); // "any" type; useful for debugger-like clients. InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny); // Placeholder type for unbridged ARC casts. InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast); // Placeholder type for builtin functions. InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn); // Placeholder type for OMP array sections. if (LangOpts.OpenMP) { InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection); InitBuiltinType(OMPArrayShapingTy, BuiltinType::OMPArrayShaping); InitBuiltinType(OMPIteratorTy, BuiltinType::OMPIterator); } if (LangOpts.MatrixTypes) InitBuiltinType(IncompleteMatrixIdxTy, BuiltinType::IncompleteMatrixIdx); // C99 6.2.5p11. FloatComplexTy = getComplexType(FloatTy); DoubleComplexTy = getComplexType(DoubleTy); LongDoubleComplexTy = getComplexType(LongDoubleTy); Float128ComplexTy = getComplexType(Float128Ty); // Builtin types for 'id', 'Class', and 'SEL'. InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId); InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass); InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel); if (LangOpts.OpenCL) { #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ InitBuiltinType(SingletonId, BuiltinType::Id); #include "clang/Basic/OpenCLImageTypes.def" InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler); InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent); InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent); InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue); InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID); #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ InitBuiltinType(Id##Ty, BuiltinType::Id); #include "clang/Basic/OpenCLExtensionTypes.def" } if (Target.hasAArch64SVETypes()) { #define SVE_TYPE(Name, Id, SingletonId) \ InitBuiltinType(SingletonId, BuiltinType::Id); #include "clang/Basic/AArch64SVEACLETypes.def" } // Builtin type for __objc_yes and __objc_no ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ? SignedCharTy : BoolTy); ObjCConstantStringType = QualType(); ObjCSuperType = QualType(); // void * type if (LangOpts.OpenCLVersion >= 200) { auto Q = VoidTy.getQualifiers(); Q.setAddressSpace(LangAS::opencl_generic); VoidPtrTy = getPointerType(getCanonicalType( getQualifiedType(VoidTy.getUnqualifiedType(), Q))); } else { VoidPtrTy = getPointerType(VoidTy); } // nullptr type (C++0x 2.14.7) InitBuiltinType(NullPtrTy, BuiltinType::NullPtr); // half type (OpenCL 6.1.1.1) / ARM NEON __fp16 InitBuiltinType(HalfTy, BuiltinType::Half); InitBuiltinType(BFloat16Ty, BuiltinType::BFloat16); // Builtin type used to help define __builtin_va_list. VaListTagDecl = nullptr; // MSVC predeclares struct _GUID, and we need it to create MSGuidDecls. if (LangOpts.MicrosoftExt || LangOpts.Borland) { MSGuidTagDecl = buildImplicitRecord("_GUID"); TUDecl->addDecl(MSGuidTagDecl); } } DiagnosticsEngine &ASTContext::getDiagnostics() const { return SourceMgr.getDiagnostics(); } AttrVec& ASTContext::getDeclAttrs(const Decl *D) { AttrVec *&Result = DeclAttrs[D]; if (!Result) { void *Mem = Allocate(sizeof(AttrVec)); Result = new (Mem) AttrVec; } return *Result; } /// Erase the attributes corresponding to the given declaration. void ASTContext::eraseDeclAttrs(const Decl *D) { llvm::DenseMap::iterator Pos = DeclAttrs.find(D); if (Pos != DeclAttrs.end()) { Pos->second->~AttrVec(); DeclAttrs.erase(Pos); } } // FIXME: Remove ? MemberSpecializationInfo * ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) { assert(Var->isStaticDataMember() && "Not a static data member"); return getTemplateOrSpecializationInfo(Var) .dyn_cast(); } ASTContext::TemplateOrSpecializationInfo ASTContext::getTemplateOrSpecializationInfo(const VarDecl *Var) { llvm::DenseMap::iterator Pos = TemplateOrInstantiation.find(Var); if (Pos == TemplateOrInstantiation.end()) return {}; return Pos->second; } void ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl, TemplateSpecializationKind TSK, SourceLocation PointOfInstantiation) { assert(Inst->isStaticDataMember() && "Not a static data member"); assert(Tmpl->isStaticDataMember() && "Not a static data member"); setTemplateOrSpecializationInfo(Inst, new (*this) MemberSpecializationInfo( Tmpl, TSK, PointOfInstantiation)); } void ASTContext::setTemplateOrSpecializationInfo(VarDecl *Inst, TemplateOrSpecializationInfo TSI) { assert(!TemplateOrInstantiation[Inst] && "Already noted what the variable was instantiated from"); TemplateOrInstantiation[Inst] = TSI; } NamedDecl * ASTContext::getInstantiatedFromUsingDecl(NamedDecl *UUD) { auto Pos = InstantiatedFromUsingDecl.find(UUD); if (Pos == InstantiatedFromUsingDecl.end()) return nullptr; return Pos->second; } void ASTContext::setInstantiatedFromUsingDecl(NamedDecl *Inst, NamedDecl *Pattern) { assert((isa(Pattern) || isa(Pattern) || isa(Pattern)) && "pattern decl is not a using decl"); assert((isa(Inst) || isa(Inst) || isa(Inst)) && "instantiation did not produce a using decl"); assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists"); InstantiatedFromUsingDecl[Inst] = Pattern; } UsingShadowDecl * ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) { llvm::DenseMap::const_iterator Pos = InstantiatedFromUsingShadowDecl.find(Inst); if (Pos == InstantiatedFromUsingShadowDecl.end()) return nullptr; return Pos->second; } void ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst, UsingShadowDecl *Pattern) { assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists"); InstantiatedFromUsingShadowDecl[Inst] = Pattern; } FieldDecl *ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) { llvm::DenseMap::iterator Pos = InstantiatedFromUnnamedFieldDecl.find(Field); if (Pos == InstantiatedFromUnnamedFieldDecl.end()) return nullptr; return Pos->second; } void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst, FieldDecl *Tmpl) { assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed"); assert(!Tmpl->getDeclName() && "Template field decl is not unnamed"); assert(!InstantiatedFromUnnamedFieldDecl[Inst] && "Already noted what unnamed field was instantiated from"); InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl; } ASTContext::overridden_cxx_method_iterator ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const { return overridden_methods(Method).begin(); } ASTContext::overridden_cxx_method_iterator ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const { return overridden_methods(Method).end(); } unsigned ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const { auto Range = overridden_methods(Method); return Range.end() - Range.begin(); } ASTContext::overridden_method_range ASTContext::overridden_methods(const CXXMethodDecl *Method) const { llvm::DenseMap::const_iterator Pos = OverriddenMethods.find(Method->getCanonicalDecl()); if (Pos == OverriddenMethods.end()) return overridden_method_range(nullptr, nullptr); return overridden_method_range(Pos->second.begin(), Pos->second.end()); } void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method, const CXXMethodDecl *Overridden) { assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl()); OverriddenMethods[Method].push_back(Overridden); } void ASTContext::getOverriddenMethods( const NamedDecl *D, SmallVectorImpl &Overridden) const { assert(D); if (const auto *CXXMethod = dyn_cast(D)) { Overridden.append(overridden_methods_begin(CXXMethod), overridden_methods_end(CXXMethod)); return; } const auto *Method = dyn_cast(D); if (!Method) return; SmallVector OverDecls; Method->getOverriddenMethods(OverDecls); Overridden.append(OverDecls.begin(), OverDecls.end()); } void ASTContext::addedLocalImportDecl(ImportDecl *Import) { assert(!Import->getNextLocalImport() && "Import declaration already in the chain"); assert(!Import->isFromASTFile() && "Non-local import declaration"); if (!FirstLocalImport) { FirstLocalImport = Import; LastLocalImport = Import; return; } LastLocalImport->setNextLocalImport(Import); LastLocalImport = Import; } //===----------------------------------------------------------------------===// // Type Sizing and Analysis //===----------------------------------------------------------------------===// /// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified /// scalar floating point type. const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const { switch (T->castAs()->getKind()) { default: llvm_unreachable("Not a floating point type!"); case BuiltinType::BFloat16: return Target->getBFloat16Format(); case BuiltinType::Float16: case BuiltinType::Half: return Target->getHalfFormat(); case BuiltinType::Float: return Target->getFloatFormat(); case BuiltinType::Double: return Target->getDoubleFormat(); case BuiltinType::LongDouble: if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice) return AuxTarget->getLongDoubleFormat(); return Target->getLongDoubleFormat(); case BuiltinType::Float128: if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice) return AuxTarget->getFloat128Format(); return Target->getFloat128Format(); } } CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const { unsigned Align = Target->getCharWidth(); bool UseAlignAttrOnly = false; if (unsigned AlignFromAttr = D->getMaxAlignment()) { Align = AlignFromAttr; // __attribute__((aligned)) can increase or decrease alignment // *except* on a struct or struct member, where it only increases // alignment unless 'packed' is also specified. // // It is an error for alignas to decrease alignment, so we can // ignore that possibility; Sema should diagnose it. if (isa(D)) { UseAlignAttrOnly = D->hasAttr() || cast(D)->getParent()->hasAttr(); } else { UseAlignAttrOnly = true; } } else if (isa(D)) UseAlignAttrOnly = D->hasAttr() || cast(D)->getParent()->hasAttr(); // If we're using the align attribute only, just ignore everything // else about the declaration and its type. if (UseAlignAttrOnly) { // do nothing } else if (const auto *VD = dyn_cast(D)) { QualType T = VD->getType(); if (const auto *RT = T->getAs()) { if (ForAlignof) T = RT->getPointeeType(); else T = getPointerType(RT->getPointeeType()); } QualType BaseT = getBaseElementType(T); if (T->isFunctionType()) Align = getTypeInfoImpl(T.getTypePtr()).Align; else if (!BaseT->isIncompleteType()) { // Adjust alignments of declarations with array type by the // large-array alignment on the target. if (const ArrayType *arrayType = getAsArrayType(T)) { unsigned MinWidth = Target->getLargeArrayMinWidth(); if (!ForAlignof && MinWidth) { if (isa(arrayType)) Align = std::max(Align, Target->getLargeArrayAlign()); else if (isa(arrayType) && MinWidth <= getTypeSize(cast(arrayType))) Align = std::max(Align, Target->getLargeArrayAlign()); } } Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr())); if (BaseT.getQualifiers().hasUnaligned()) Align = Target->getCharWidth(); if (const auto *VD = dyn_cast(D)) { if (VD->hasGlobalStorage() && !ForAlignof) { uint64_t TypeSize = getTypeSize(T.getTypePtr()); Align = std::max(Align, getTargetInfo().getMinGlobalAlign(TypeSize)); } } } // Fields can be subject to extra alignment constraints, like if // the field is packed, the struct is packed, or the struct has a // a max-field-alignment constraint (#pragma pack). So calculate // the actual alignment of the field within the struct, and then // (as we're expected to) constrain that by the alignment of the type. if (const auto *Field = dyn_cast(VD)) { const RecordDecl *Parent = Field->getParent(); // We can only produce a sensible answer if the record is valid. if (!Parent->isInvalidDecl()) { const ASTRecordLayout &Layout = getASTRecordLayout(Parent); // Start with the record's overall alignment. unsigned FieldAlign = toBits(Layout.getAlignment()); // Use the GCD of that and the offset within the record. uint64_t Offset = Layout.getFieldOffset(Field->getFieldIndex()); if (Offset > 0) { // Alignment is always a power of 2, so the GCD will be a power of 2, // which means we get to do this crazy thing instead of Euclid's. uint64_t LowBitOfOffset = Offset & (~Offset + 1); if (LowBitOfOffset < FieldAlign) FieldAlign = static_cast(LowBitOfOffset); } Align = std::min(Align, FieldAlign); } } } return toCharUnitsFromBits(Align); } CharUnits ASTContext::getExnObjectAlignment() const { return toCharUnitsFromBits(Target->getExnObjectAlignment()); } // getTypeInfoDataSizeInChars - Return the size of a type, in // chars. If the type is a record, its data size is returned. This is // the size of the memcpy that's performed when assigning this type // using a trivial copy/move assignment operator. std::pair ASTContext::getTypeInfoDataSizeInChars(QualType T) const { std::pair sizeAndAlign = getTypeInfoInChars(T); // In C++, objects can sometimes be allocated into the tail padding // of a base-class subobject. We decide whether that's possible // during class layout, so here we can just trust the layout results. if (getLangOpts().CPlusPlus) { if (const auto *RT = T->getAs()) { const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl()); sizeAndAlign.first = layout.getDataSize(); } } return sizeAndAlign; } /// getConstantArrayInfoInChars - Performing the computation in CharUnits /// instead of in bits prevents overflowing the uint64_t for some large arrays. std::pair static getConstantArrayInfoInChars(const ASTContext &Context, const ConstantArrayType *CAT) { std::pair EltInfo = Context.getTypeInfoInChars(CAT->getElementType()); uint64_t Size = CAT->getSize().getZExtValue(); assert((Size == 0 || static_cast(EltInfo.first.getQuantity()) <= (uint64_t)(-1)/Size) && "Overflow in array type char size evaluation"); uint64_t Width = EltInfo.first.getQuantity() * Size; unsigned Align = EltInfo.second.getQuantity(); if (!Context.getTargetInfo().getCXXABI().isMicrosoft() || Context.getTargetInfo().getPointerWidth(0) == 64) Width = llvm::alignTo(Width, Align); return std::make_pair(CharUnits::fromQuantity(Width), CharUnits::fromQuantity(Align)); } std::pair ASTContext::getTypeInfoInChars(const Type *T) const { if (const auto *CAT = dyn_cast(T)) return getConstantArrayInfoInChars(*this, CAT); TypeInfo Info = getTypeInfo(T); return std::make_pair(toCharUnitsFromBits(Info.Width), toCharUnitsFromBits(Info.Align)); } std::pair ASTContext::getTypeInfoInChars(QualType T) const { return getTypeInfoInChars(T.getTypePtr()); } bool ASTContext::isAlignmentRequired(const Type *T) const { return getTypeInfo(T).AlignIsRequired; } bool ASTContext::isAlignmentRequired(QualType T) const { return isAlignmentRequired(T.getTypePtr()); } unsigned ASTContext::getTypeAlignIfKnown(QualType T) const { // An alignment on a typedef overrides anything else. if (const auto *TT = T->getAs()) if (unsigned Align = TT->getDecl()->getMaxAlignment()) return Align; // If we have an (array of) complete type, we're done. T = getBaseElementType(T); if (!T->isIncompleteType()) return getTypeAlign(T); // If we had an array type, its element type might be a typedef // type with an alignment attribute. if (const auto *TT = T->getAs()) if (unsigned Align = TT->getDecl()->getMaxAlignment()) return Align; // Otherwise, see if the declaration of the type had an attribute. if (const auto *TT = T->getAs()) return TT->getDecl()->getMaxAlignment(); return 0; } TypeInfo ASTContext::getTypeInfo(const Type *T) const { TypeInfoMap::iterator I = MemoizedTypeInfo.find(T); if (I != MemoizedTypeInfo.end()) return I->second; // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup. TypeInfo TI = getTypeInfoImpl(T); MemoizedTypeInfo[T] = TI; return TI; } /// getTypeInfoImpl - Return the size of the specified type, in bits. This /// method does not work on incomplete types. /// /// FIXME: Pointers into different addr spaces could have different sizes and /// alignment requirements: getPointerInfo should take an AddrSpace, this /// should take a QualType, &c. TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { uint64_t Width = 0; unsigned Align = 8; bool AlignIsRequired = false; unsigned AS = 0; switch (T->getTypeClass()) { #define TYPE(Class, Base) #define ABSTRACT_TYPE(Class, Base) #define NON_CANONICAL_TYPE(Class, Base) #define DEPENDENT_TYPE(Class, Base) case Type::Class: #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \ case Type::Class: \ assert(!T->isDependentType() && "should not see dependent types here"); \ return getTypeInfo(cast(T)->desugar().getTypePtr()); #include "clang/AST/TypeNodes.inc" llvm_unreachable("Should not see dependent types"); case Type::FunctionNoProto: case Type::FunctionProto: // GCC extension: alignof(function) = 32 bits Width = 0; Align = 32; break; case Type::IncompleteArray: case Type::VariableArray: case Type::ConstantArray: { // Model non-constant sized arrays as size zero, but track the alignment. uint64_t Size = 0; if (const auto *CAT = dyn_cast(T)) Size = CAT->getSize().getZExtValue(); TypeInfo EltInfo = getTypeInfo(cast(T)->getElementType()); assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) && "Overflow in array type bit size evaluation"); Width = EltInfo.Width * Size; Align = EltInfo.Align; AlignIsRequired = EltInfo.AlignIsRequired; if (!getTargetInfo().getCXXABI().isMicrosoft() || getTargetInfo().getPointerWidth(0) == 64) Width = llvm::alignTo(Width, Align); break; } case Type::ExtVector: case Type::Vector: { const auto *VT = cast(T); TypeInfo EltInfo = getTypeInfo(VT->getElementType()); Width = EltInfo.Width * VT->getNumElements(); Align = Width; // If the alignment is not a power of 2, round up to the next power of 2. // This happens for non-power-of-2 length vectors. if (Align & (Align-1)) { Align = llvm::NextPowerOf2(Align); Width = llvm::alignTo(Width, Align); } // Adjust the alignment based on the target max. uint64_t TargetVectorAlign = Target->getMaxVectorAlign(); if (TargetVectorAlign && TargetVectorAlign < Align) Align = TargetVectorAlign; break; } case Type::ConstantMatrix: { const auto *MT = cast(T); TypeInfo ElementInfo = getTypeInfo(MT->getElementType()); // The internal layout of a matrix value is implementation defined. // Initially be ABI compatible with arrays with respect to alignment and // size. Width = ElementInfo.Width * MT->getNumRows() * MT->getNumColumns(); Align = ElementInfo.Align; break; } case Type::Builtin: switch (cast(T)->getKind()) { default: llvm_unreachable("Unknown builtin type!"); case BuiltinType::Void: // GCC extension: alignof(void) = 8 bits. Width = 0; Align = 8; break; case BuiltinType::Bool: Width = Target->getBoolWidth(); Align = Target->getBoolAlign(); break; case BuiltinType::Char_S: case BuiltinType::Char_U: case BuiltinType::UChar: case BuiltinType::SChar: case BuiltinType::Char8: Width = Target->getCharWidth(); Align = Target->getCharAlign(); break; case BuiltinType::WChar_S: case BuiltinType::WChar_U: Width = Target->getWCharWidth(); Align = Target->getWCharAlign(); break; case BuiltinType::Char16: Width = Target->getChar16Width(); Align = Target->getChar16Align(); break; case BuiltinType::Char32: Width = Target->getChar32Width(); Align = Target->getChar32Align(); break; case BuiltinType::UShort: case BuiltinType::Short: Width = Target->getShortWidth(); Align = Target->getShortAlign(); break; case BuiltinType::UInt: case BuiltinType::Int: Width = Target->getIntWidth(); Align = Target->getIntAlign(); break; case BuiltinType::ULong: case BuiltinType::Long: Width = Target->getLongWidth(); Align = Target->getLongAlign(); break; case BuiltinType::ULongLong: case BuiltinType::LongLong: Width = Target->getLongLongWidth(); Align = Target->getLongLongAlign(); break; case BuiltinType::Int128: case BuiltinType::UInt128: Width = 128; Align = 128; // int128_t is 128-bit aligned on all targets. break; case BuiltinType::ShortAccum: case BuiltinType::UShortAccum: case BuiltinType::SatShortAccum: case BuiltinType::SatUShortAccum: Width = Target->getShortAccumWidth(); Align = Target->getShortAccumAlign(); break; case BuiltinType::Accum: case BuiltinType::UAccum: case BuiltinType::SatAccum: case BuiltinType::SatUAccum: Width = Target->getAccumWidth(); Align = Target->getAccumAlign(); break; case BuiltinType::LongAccum: case BuiltinType::ULongAccum: case BuiltinType::SatLongAccum: case BuiltinType::SatULongAccum: Width = Target->getLongAccumWidth(); Align = Target->getLongAccumAlign(); break; case BuiltinType::ShortFract: case BuiltinType::UShortFract: case BuiltinType::SatShortFract: case BuiltinType::SatUShortFract: Width = Target->getShortFractWidth(); Align = Target->getShortFractAlign(); break; case BuiltinType::Fract: case BuiltinType::UFract: case BuiltinType::SatFract: case BuiltinType::SatUFract: Width = Target->getFractWidth(); Align = Target->getFractAlign(); break; case BuiltinType::LongFract: case BuiltinType::ULongFract: case BuiltinType::SatLongFract: case BuiltinType::SatULongFract: Width = Target->getLongFractWidth(); Align = Target->getLongFractAlign(); break; case BuiltinType::BFloat16: Width = Target->getBFloat16Width(); Align = Target->getBFloat16Align(); break; case BuiltinType::Float16: case BuiltinType::Half: if (Target->hasFloat16Type() || !getLangOpts().OpenMP || !getLangOpts().OpenMPIsDevice) { Width = Target->getHalfWidth(); Align = Target->getHalfAlign(); } else { assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice && "Expected OpenMP device compilation."); Width = AuxTarget->getHalfWidth(); Align = AuxTarget->getHalfAlign(); } break; case BuiltinType::Float: Width = Target->getFloatWidth(); Align = Target->getFloatAlign(); break; case BuiltinType::Double: Width = Target->getDoubleWidth(); Align = Target->getDoubleAlign(); break; case BuiltinType::LongDouble: if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice && (Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() || Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) { Width = AuxTarget->getLongDoubleWidth(); Align = AuxTarget->getLongDoubleAlign(); } else { Width = Target->getLongDoubleWidth(); Align = Target->getLongDoubleAlign(); } break; case BuiltinType::Float128: if (Target->hasFloat128Type() || !getLangOpts().OpenMP || !getLangOpts().OpenMPIsDevice) { Width = Target->getFloat128Width(); Align = Target->getFloat128Align(); } else { assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice && "Expected OpenMP device compilation."); Width = AuxTarget->getFloat128Width(); Align = AuxTarget->getFloat128Align(); } break; case BuiltinType::NullPtr: Width = Target->getPointerWidth(0); // C++ 3.9.1p11: sizeof(nullptr_t) Align = Target->getPointerAlign(0); // == sizeof(void*) break; case BuiltinType::ObjCId: case BuiltinType::ObjCClass: case BuiltinType::ObjCSel: Width = Target->getPointerWidth(0); Align = Target->getPointerAlign(0); break; case BuiltinType::OCLSampler: case BuiltinType::OCLEvent: case BuiltinType::OCLClkEvent: case BuiltinType::OCLQueue: case BuiltinType::OCLReserveID: #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ case BuiltinType::Id: #include "clang/Basic/OpenCLImageTypes.def" #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ case BuiltinType::Id: #include "clang/Basic/OpenCLExtensionTypes.def" AS = getTargetAddressSpace( Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T))); Width = Target->getPointerWidth(AS); Align = Target->getPointerAlign(AS); break; // The SVE types are effectively target-specific. The length of an // SVE_VECTOR_TYPE is only known at runtime, but it is always a multiple // of 128 bits. There is one predicate bit for each vector byte, so the // length of an SVE_PREDICATE_TYPE is always a multiple of 16 bits. // // Because the length is only known at runtime, we use a dummy value // of 0 for the static length. The alignment values are those defined // by the Procedure Call Standard for the Arm Architecture. #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ IsSigned, IsFP, IsBF) \ case BuiltinType::Id: \ Width = 0; \ Align = 128; \ break; #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ case BuiltinType::Id: \ Width = 0; \ Align = 16; \ break; #include "clang/Basic/AArch64SVEACLETypes.def" } break; case Type::ObjCObjectPointer: Width = Target->getPointerWidth(0); Align = Target->getPointerAlign(0); break; case Type::BlockPointer: AS = getTargetAddressSpace(cast(T)->getPointeeType()); Width = Target->getPointerWidth(AS); Align = Target->getPointerAlign(AS); break; case Type::LValueReference: case Type::RValueReference: // alignof and sizeof should never enter this code path here, so we go // the pointer route. AS = getTargetAddressSpace(cast(T)->getPointeeType()); Width = Target->getPointerWidth(AS); Align = Target->getPointerAlign(AS); break; case Type::Pointer: AS = getTargetAddressSpace(cast(T)->getPointeeType()); Width = Target->getPointerWidth(AS); Align = Target->getPointerAlign(AS); break; case Type::MemberPointer: { const auto *MPT = cast(T); CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT); Width = MPI.Width; Align = MPI.Align; break; } case Type::Complex: { // Complex types have the same alignment as their elements, but twice the // size. TypeInfo EltInfo = getTypeInfo(cast(T)->getElementType()); Width = EltInfo.Width * 2; Align = EltInfo.Align; break; } case Type::ObjCObject: return getTypeInfo(cast(T)->getBaseType().getTypePtr()); case Type::Adjusted: case Type::Decayed: return getTypeInfo(cast(T)->getAdjustedType().getTypePtr()); case Type::ObjCInterface: { const auto *ObjCI = cast(T); if (ObjCI->getDecl()->isInvalidDecl()) { Width = 8; Align = 8; break; } const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); Width = toBits(Layout.getSize()); Align = toBits(Layout.getAlignment()); break; } case Type::ExtInt: { const auto *EIT = cast(T); Align = std::min(static_cast(std::max( getCharWidth(), llvm::PowerOf2Ceil(EIT->getNumBits()))), Target->getLongLongAlign()); Width = llvm::alignTo(EIT->getNumBits(), Align); break; } case Type::Record: case Type::Enum: { const auto *TT = cast(T); if (TT->getDecl()->isInvalidDecl()) { Width = 8; Align = 8; break; } if (const auto *ET = dyn_cast(TT)) { const EnumDecl *ED = ET->getDecl(); TypeInfo Info = getTypeInfo(ED->getIntegerType()->getUnqualifiedDesugaredType()); if (unsigned AttrAlign = ED->getMaxAlignment()) { Info.Align = AttrAlign; Info.AlignIsRequired = true; } return Info; } const auto *RT = cast(TT); const RecordDecl *RD = RT->getDecl(); const ASTRecordLayout &Layout = getASTRecordLayout(RD); Width = toBits(Layout.getSize()); Align = toBits(Layout.getAlignment()); AlignIsRequired = RD->hasAttr(); break; } case Type::SubstTemplateTypeParm: return getTypeInfo(cast(T)-> getReplacementType().getTypePtr()); case Type::Auto: case Type::DeducedTemplateSpecialization: { const auto *A = cast(T); assert(!A->getDeducedType().isNull() && "cannot request the size of an undeduced or dependent auto type"); return getTypeInfo(A->getDeducedType().getTypePtr()); } case Type::Paren: return getTypeInfo(cast(T)->getInnerType().getTypePtr()); case Type::MacroQualified: return getTypeInfo( cast(T)->getUnderlyingType().getTypePtr()); case Type::ObjCTypeParam: return getTypeInfo(cast(T)->desugar().getTypePtr()); case Type::Typedef: { const TypedefNameDecl *Typedef = cast(T)->getDecl(); TypeInfo Info = getTypeInfo(Typedef->getUnderlyingType().getTypePtr()); // If the typedef has an aligned attribute on it, it overrides any computed // alignment we have. This violates the GCC documentation (which says that // attribute(aligned) can only round up) but matches its implementation. if (unsigned AttrAlign = Typedef->getMaxAlignment()) { Align = AttrAlign; AlignIsRequired = true; } else { Align = Info.Align; AlignIsRequired = Info.AlignIsRequired; } Width = Info.Width; break; } case Type::Elaborated: return getTypeInfo(cast(T)->getNamedType().getTypePtr()); case Type::Attributed: return getTypeInfo( cast(T)->getEquivalentType().getTypePtr()); case Type::Atomic: { // Start with the base type information. TypeInfo Info = getTypeInfo(cast(T)->getValueType()); Width = Info.Width; Align = Info.Align; if (!Width) { // An otherwise zero-sized type should still generate an // atomic operation. Width = Target->getCharWidth(); assert(Align); } else if (Width <= Target->getMaxAtomicPromoteWidth()) { // If the size of the type doesn't exceed the platform's max // atomic promotion width, make the size and alignment more // favorable to atomic operations: // Round the size up to a power of 2. if (!llvm::isPowerOf2_64(Width)) Width = llvm::NextPowerOf2(Width); // Set the alignment equal to the size. Align = static_cast(Width); } } break; case Type::Pipe: Width = Target->getPointerWidth(getTargetAddressSpace(LangAS::opencl_global)); Align = Target->getPointerAlign(getTargetAddressSpace(LangAS::opencl_global)); break; } assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2"); return TypeInfo(Width, Align, AlignIsRequired); } unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const { UnadjustedAlignMap::iterator I = MemoizedUnadjustedAlign.find(T); if (I != MemoizedUnadjustedAlign.end()) return I->second; unsigned UnadjustedAlign; if (const auto *RT = T->getAs()) { const RecordDecl *RD = RT->getDecl(); const ASTRecordLayout &Layout = getASTRecordLayout(RD); UnadjustedAlign = toBits(Layout.getUnadjustedAlignment()); } else if (const auto *ObjCI = T->getAs()) { const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); UnadjustedAlign = toBits(Layout.getUnadjustedAlignment()); } else { UnadjustedAlign = getTypeAlign(T->getUnqualifiedDesugaredType()); } MemoizedUnadjustedAlign[T] = UnadjustedAlign; return UnadjustedAlign; } unsigned ASTContext::getOpenMPDefaultSimdAlign(QualType T) const { unsigned SimdAlign = getTargetInfo().getSimdDefaultAlign(); // Target ppc64 with QPX: simd default alignment for pointer to double is 32. if ((getTargetInfo().getTriple().getArch() == llvm::Triple::ppc64 || getTargetInfo().getTriple().getArch() == llvm::Triple::ppc64le) && getTargetInfo().getABI() == "elfv1-qpx" && T->isSpecificBuiltinType(BuiltinType::Double)) SimdAlign = 256; return SimdAlign; } /// toCharUnitsFromBits - Convert a size in bits to a size in characters. CharUnits ASTContext::toCharUnitsFromBits(int64_t BitSize) const { return CharUnits::fromQuantity(BitSize / getCharWidth()); } /// toBits - Convert a size in characters to a size in characters. int64_t ASTContext::toBits(CharUnits CharSize) const { return CharSize.getQuantity() * getCharWidth(); } /// getTypeSizeInChars - Return the size of the specified type, in characters. /// This method does not work on incomplete types. CharUnits ASTContext::getTypeSizeInChars(QualType T) const { return getTypeInfoInChars(T).first; } CharUnits ASTContext::getTypeSizeInChars(const Type *T) const { return getTypeInfoInChars(T).first; } /// getTypeAlignInChars - Return the ABI-specified alignment of a type, in /// characters. This method does not work on incomplete types. CharUnits ASTContext::getTypeAlignInChars(QualType T) const { return toCharUnitsFromBits(getTypeAlign(T)); } CharUnits ASTContext::getTypeAlignInChars(const Type *T) const { return toCharUnitsFromBits(getTypeAlign(T)); } /// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a /// type, in characters, before alignment adustments. This method does /// not work on incomplete types. CharUnits ASTContext::getTypeUnadjustedAlignInChars(QualType T) const { return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); } CharUnits ASTContext::getTypeUnadjustedAlignInChars(const Type *T) const { return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); } /// getPreferredTypeAlign - Return the "preferred" alignment of the specified /// type for the current target in bits. This can be different than the ABI /// alignment in cases where it is beneficial for performance to overalign /// a data type. unsigned ASTContext::getPreferredTypeAlign(const Type *T) const { TypeInfo TI = getTypeInfo(T); unsigned ABIAlign = TI.Align; T = T->getBaseElementTypeUnsafe(); // The preferred alignment of member pointers is that of a pointer. if (T->isMemberPointerType()) return getPreferredTypeAlign(getPointerDiffType().getTypePtr()); if (!Target->allowsLargerPreferedTypeAlignment()) return ABIAlign; // Double and long long should be naturally aligned if possible. if (const auto *CT = T->getAs()) T = CT->getElementType().getTypePtr(); if (const auto *ET = T->getAs()) T = ET->getDecl()->getIntegerType().getTypePtr(); if (T->isSpecificBuiltinType(BuiltinType::Double) || T->isSpecificBuiltinType(BuiltinType::LongLong) || T->isSpecificBuiltinType(BuiltinType::ULongLong)) // Don't increase the alignment if an alignment attribute was specified on a // typedef declaration. if (!TI.AlignIsRequired) return std::max(ABIAlign, (unsigned)getTypeSize(T)); return ABIAlign; } /// getTargetDefaultAlignForAttributeAligned - Return the default alignment /// for __attribute__((aligned)) on this target, to be used if no alignment /// value is specified. unsigned ASTContext::getTargetDefaultAlignForAttributeAligned() const { return getTargetInfo().getDefaultAlignForAttributeAligned(); } /// getAlignOfGlobalVar - Return the alignment in bits that should be given /// to a global variable of the specified type. unsigned ASTContext::getAlignOfGlobalVar(QualType T) const { uint64_t TypeSize = getTypeSize(T.getTypePtr()); return std::max(getTypeAlign(T), getTargetInfo().getMinGlobalAlign(TypeSize)); } /// getAlignOfGlobalVarInChars - Return the alignment in characters that /// should be given to a global variable of the specified type. CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T) const { return toCharUnitsFromBits(getAlignOfGlobalVar(T)); } CharUnits ASTContext::getOffsetOfBaseWithVBPtr(const CXXRecordDecl *RD) const { CharUnits Offset = CharUnits::Zero(); const ASTRecordLayout *Layout = &getASTRecordLayout(RD); while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) { Offset += Layout->getBaseClassOffset(Base); Layout = &getASTRecordLayout(Base); } return Offset; } /// DeepCollectObjCIvars - /// This routine first collects all declared, but not synthesized, ivars in /// super class and then collects all ivars, including those synthesized for /// current class. This routine is used for implementation of current class /// when all ivars, declared and synthesized are known. void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI, bool leafClass, SmallVectorImpl &Ivars) const { if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass()) DeepCollectObjCIvars(SuperClass, false, Ivars); if (!leafClass) { for (const auto *I : OI->ivars()) Ivars.push_back(I); } else { auto *IDecl = const_cast(OI); for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv; Iv= Iv->getNextIvar()) Ivars.push_back(Iv); } } /// CollectInheritedProtocols - Collect all protocols in current class and /// those inherited by it. void ASTContext::CollectInheritedProtocols(const Decl *CDecl, llvm::SmallPtrSet &Protocols) { if (const auto *OI = dyn_cast(CDecl)) { // We can use protocol_iterator here instead of // all_referenced_protocol_iterator since we are walking all categories. for (auto *Proto : OI->all_referenced_protocols()) { CollectInheritedProtocols(Proto, Protocols); } // Categories of this Interface. for (const auto *Cat : OI->visible_categories()) CollectInheritedProtocols(Cat, Protocols); if (ObjCInterfaceDecl *SD = OI->getSuperClass()) while (SD) { CollectInheritedProtocols(SD, Protocols); SD = SD->getSuperClass(); } } else if (const auto *OC = dyn_cast(CDecl)) { for (auto *Proto : OC->protocols()) { CollectInheritedProtocols(Proto, Protocols); } } else if (const auto *OP = dyn_cast(CDecl)) { // Insert the protocol. if (!Protocols.insert( const_cast(OP->getCanonicalDecl())).second) return; for (auto *Proto : OP->protocols()) CollectInheritedProtocols(Proto, Protocols); } } static bool unionHasUniqueObjectRepresentations(const ASTContext &Context, const RecordDecl *RD) { assert(RD->isUnion() && "Must be union type"); CharUnits UnionSize = Context.getTypeSizeInChars(RD->getTypeForDecl()); for (const auto *Field : RD->fields()) { if (!Context.hasUniqueObjectRepresentations(Field->getType())) return false; CharUnits FieldSize = Context.getTypeSizeInChars(Field->getType()); if (FieldSize != UnionSize) return false; } return !RD->field_empty(); } static bool isStructEmpty(QualType Ty) { const RecordDecl *RD = Ty->castAs()->getDecl(); if (!RD->field_empty()) return false; if (const auto *ClassDecl = dyn_cast(RD)) return ClassDecl->isEmpty(); return true; } static llvm::Optional structHasUniqueObjectRepresentations(const ASTContext &Context, const RecordDecl *RD) { assert(!RD->isUnion() && "Must be struct/class type"); const auto &Layout = Context.getASTRecordLayout(RD); int64_t CurOffsetInBits = 0; if (const auto *ClassDecl = dyn_cast(RD)) { if (ClassDecl->isDynamicClass()) return llvm::None; SmallVector, 4> Bases; for (const auto &Base : ClassDecl->bases()) { // Empty types can be inherited from, and non-empty types can potentially // have tail padding, so just make sure there isn't an error. if (!isStructEmpty(Base.getType())) { llvm::Optional Size = structHasUniqueObjectRepresentations( Context, Base.getType()->castAs()->getDecl()); if (!Size) return llvm::None; Bases.emplace_back(Base.getType(), Size.getValue()); } } llvm::sort(Bases, [&](const std::pair &L, const std::pair &R) { return Layout.getBaseClassOffset(L.first->getAsCXXRecordDecl()) < Layout.getBaseClassOffset(R.first->getAsCXXRecordDecl()); }); for (const auto &Base : Bases) { int64_t BaseOffset = Context.toBits( Layout.getBaseClassOffset(Base.first->getAsCXXRecordDecl())); int64_t BaseSize = Base.second; if (BaseOffset != CurOffsetInBits) return llvm::None; CurOffsetInBits = BaseOffset + BaseSize; } } for (const auto *Field : RD->fields()) { if (!Field->getType()->isReferenceType() && !Context.hasUniqueObjectRepresentations(Field->getType())) return llvm::None; int64_t FieldSizeInBits = Context.toBits(Context.getTypeSizeInChars(Field->getType())); if (Field->isBitField()) { int64_t BitfieldSize = Field->getBitWidthValue(Context); if (BitfieldSize > FieldSizeInBits) return llvm::None; FieldSizeInBits = BitfieldSize; } int64_t FieldOffsetInBits = Context.getFieldOffset(Field); if (FieldOffsetInBits != CurOffsetInBits) return llvm::None; CurOffsetInBits = FieldSizeInBits + FieldOffsetInBits; } return CurOffsetInBits; } bool ASTContext::hasUniqueObjectRepresentations(QualType Ty) const { // C++17 [meta.unary.prop]: // The predicate condition for a template specialization // has_unique_object_representations shall be // satisfied if and only if: // (9.1) - T is trivially copyable, and // (9.2) - any two objects of type T with the same value have the same // object representation, where two objects // of array or non-union class type are considered to have the same value // if their respective sequences of // direct subobjects have the same values, and two objects of union type // are considered to have the same // value if they have the same active member and the corresponding members // have the same value. // The set of scalar types for which this condition holds is // implementation-defined. [ Note: If a type has padding // bits, the condition does not hold; otherwise, the condition holds true // for unsigned integral types. -- end note ] assert(!Ty.isNull() && "Null QualType sent to unique object rep check"); // Arrays are unique only if their element type is unique. if (Ty->isArrayType()) return hasUniqueObjectRepresentations(getBaseElementType(Ty)); // (9.1) - T is trivially copyable... if (!Ty.isTriviallyCopyableType(*this)) return false; // All integrals and enums are unique. if (Ty->isIntegralOrEnumerationType()) return true; // All other pointers are unique. if (Ty->isPointerType()) return true; if (Ty->isMemberPointerType()) { const auto *MPT = Ty->getAs(); return !ABI->getMemberPointerInfo(MPT).HasPadding; } if (Ty->isRecordType()) { const RecordDecl *Record = Ty->castAs()->getDecl(); if (Record->isInvalidDecl()) return false; if (Record->isUnion()) return unionHasUniqueObjectRepresentations(*this, Record); Optional StructSize = structHasUniqueObjectRepresentations(*this, Record); return StructSize && StructSize.getValue() == static_cast(getTypeSize(Ty)); } // FIXME: More cases to handle here (list by rsmith): // vectors (careful about, eg, vector of 3 foo) // _Complex int and friends // _Atomic T // Obj-C block pointers // Obj-C object pointers // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t, // clk_event_t, queue_t, reserve_id_t) // There're also Obj-C class types and the Obj-C selector type, but I think it // makes sense for those to return false here. return false; } unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const { unsigned count = 0; // Count ivars declared in class extension. for (const auto *Ext : OI->known_extensions()) count += Ext->ivar_size(); // Count ivar defined in this class's implementation. This // includes synthesized ivars. if (ObjCImplementationDecl *ImplDecl = OI->getImplementation()) count += ImplDecl->ivar_size(); return count; } bool ASTContext::isSentinelNullExpr(const Expr *E) { if (!E) return false; // nullptr_t is always treated as null. if (E->getType()->isNullPtrType()) return true; if (E->getType()->isAnyPointerType() && E->IgnoreParenCasts()->isNullPointerConstant(*this, Expr::NPC_ValueDependentIsNull)) return true; // Unfortunately, __null has type 'int'. if (isa(E)) return true; return false; } /// Get the implementation of ObjCInterfaceDecl, or nullptr if none /// exists. ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D) { llvm::DenseMap::iterator I = ObjCImpls.find(D); if (I != ObjCImpls.end()) return cast(I->second); return nullptr; } /// Get the implementation of ObjCCategoryDecl, or nullptr if none /// exists. ObjCCategoryImplDecl *ASTContext::getObjCImplementation(ObjCCategoryDecl *D) { llvm::DenseMap::iterator I = ObjCImpls.find(D); if (I != ObjCImpls.end()) return cast(I->second); return nullptr; } /// Set the implementation of ObjCInterfaceDecl. void ASTContext::setObjCImplementation(ObjCInterfaceDecl *IFaceD, ObjCImplementationDecl *ImplD) { assert(IFaceD && ImplD && "Passed null params"); ObjCImpls[IFaceD] = ImplD; } /// Set the implementation of ObjCCategoryDecl. void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD, ObjCCategoryImplDecl *ImplD) { assert(CatD && ImplD && "Passed null params"); ObjCImpls[CatD] = ImplD; } const ObjCMethodDecl * ASTContext::getObjCMethodRedeclaration(const ObjCMethodDecl *MD) const { return ObjCMethodRedecls.lookup(MD); } void ASTContext::setObjCMethodRedeclaration(const ObjCMethodDecl *MD, const ObjCMethodDecl *Redecl) { assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration"); ObjCMethodRedecls[MD] = Redecl; } const ObjCInterfaceDecl *ASTContext::getObjContainingInterface( const NamedDecl *ND) const { if (const auto *ID = dyn_cast(ND->getDeclContext())) return ID; if (const auto *CD = dyn_cast(ND->getDeclContext())) return CD->getClassInterface(); if (const auto *IMD = dyn_cast(ND->getDeclContext())) return IMD->getClassInterface(); return nullptr; } /// Get the copy initialization expression of VarDecl, or nullptr if /// none exists. BlockVarCopyInit ASTContext::getBlockVarCopyInit(const VarDecl *VD) const { assert(VD && "Passed null params"); assert(VD->hasAttr() && "getBlockVarCopyInits - not __block var"); auto I = BlockVarCopyInits.find(VD); if (I != BlockVarCopyInits.end()) return I->second; return {nullptr, false}; } /// Set the copy initialization expression of a block var decl. void ASTContext::setBlockVarCopyInit(const VarDecl*VD, Expr *CopyExpr, bool CanThrow) { assert(VD && CopyExpr && "Passed null params"); assert(VD->hasAttr() && "setBlockVarCopyInits - not __block var"); BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow); } TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T, unsigned DataSize) const { if (!DataSize) DataSize = TypeLoc::getFullDataSizeForType(T); else assert(DataSize == TypeLoc::getFullDataSizeForType(T) && "incorrect data size provided to CreateTypeSourceInfo!"); auto *TInfo = (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8); new (TInfo) TypeSourceInfo(T); return TInfo; } TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T, SourceLocation L) const { TypeSourceInfo *DI = CreateTypeSourceInfo(T); DI->getTypeLoc().initialize(const_cast(*this), L); return DI; } const ASTRecordLayout & ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) const { return getObjCLayout(D, nullptr); } const ASTRecordLayout & ASTContext::getASTObjCImplementationLayout( const ObjCImplementationDecl *D) const { return getObjCLayout(D->getClassInterface(), D); } //===----------------------------------------------------------------------===// // Type creation/memoization methods //===----------------------------------------------------------------------===// QualType ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const { unsigned fastQuals = quals.getFastQualifiers(); quals.removeFastQualifiers(); // Check if we've already instantiated this type. llvm::FoldingSetNodeID ID; ExtQuals::Profile(ID, baseType, quals); void *insertPos = nullptr; if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) { assert(eq->getQualifiers() == quals); return QualType(eq, fastQuals); } // If the base type is not canonical, make the appropriate canonical type. QualType canon; if (!baseType->isCanonicalUnqualified()) { SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split(); canonSplit.Quals.addConsistentQualifiers(quals); canon = getExtQualType(canonSplit.Ty, canonSplit.Quals); // Re-find the insert position. (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos); } auto *eq = new (*this, TypeAlignment) ExtQuals(baseType, canon, quals); ExtQualNodes.InsertNode(eq, insertPos); return QualType(eq, fastQuals); } QualType ASTContext::getAddrSpaceQualType(QualType T, LangAS AddressSpace) const { QualType CanT = getCanonicalType(T); if (CanT.getAddressSpace() == AddressSpace) return T; // If we are composing extended qualifiers together, merge together // into one ExtQuals node. QualifierCollector Quals; const Type *TypeNode = Quals.strip(T); // If this type already has an address space specified, it cannot get // another one. assert(!Quals.hasAddressSpace() && "Type cannot be in multiple addr spaces!"); Quals.addAddressSpace(AddressSpace); return getExtQualType(TypeNode, Quals); } QualType ASTContext::removeAddrSpaceQualType(QualType T) const { // If we are composing extended qualifiers together, merge together // into one ExtQuals node. QualifierCollector Quals; const Type *TypeNode = Quals.strip(T); // If the qualifier doesn't have an address space just return it. if (!Quals.hasAddressSpace()) return T; Quals.removeAddressSpace(); // Removal of the address space can mean there are no longer any // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts) // or required. if (Quals.hasNonFastQualifiers()) return getExtQualType(TypeNode, Quals); else return QualType(TypeNode, Quals.getFastQualifiers()); } QualType ASTContext::getObjCGCQualType(QualType T, Qualifiers::GC GCAttr) const { QualType CanT = getCanonicalType(T); if (CanT.getObjCGCAttr() == GCAttr) return T; if (const auto *ptr = T->getAs()) { QualType Pointee = ptr->getPointeeType(); if (Pointee->isAnyPointerType()) { QualType ResultType = getObjCGCQualType(Pointee, GCAttr); return getPointerType(ResultType); } } // If we are composing extended qualifiers together, merge together // into one ExtQuals node. QualifierCollector Quals; const Type *TypeNode = Quals.strip(T); // If this type already has an ObjCGC specified, it cannot get // another one. assert(!Quals.hasObjCGCAttr() && "Type cannot have multiple ObjCGCs!"); Quals.addObjCGCAttr(GCAttr); return getExtQualType(TypeNode, Quals); } QualType ASTContext::removePtrSizeAddrSpace(QualType T) const { if (const PointerType *Ptr = T->getAs()) { QualType Pointee = Ptr->getPointeeType(); if (isPtrSizeAddressSpace(Pointee.getAddressSpace())) { return getPointerType(removeAddrSpaceQualType(Pointee)); } } return T; } const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T, FunctionType::ExtInfo Info) { if (T->getExtInfo() == Info) return T; QualType Result; if (const auto *FNPT = dyn_cast(T)) { Result = getFunctionNoProtoType(FNPT->getReturnType(), Info); } else { const auto *FPT = cast(T); FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); EPI.ExtInfo = Info; Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI); } return cast(Result.getTypePtr()); } void ASTContext::adjustDeducedFunctionResultType(FunctionDecl *FD, QualType ResultType) { FD = FD->getMostRecentDecl(); while (true) { const auto *FPT = FD->getType()->castAs(); FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); FD->setType(getFunctionType(ResultType, FPT->getParamTypes(), EPI)); if (FunctionDecl *Next = FD->getPreviousDecl()) FD = Next; else break; } if (ASTMutationListener *L = getASTMutationListener()) L->DeducedReturnType(FD, ResultType); } /// Get a function type and produce the equivalent function type with the /// specified exception specification. Type sugar that can be present on a /// declaration of a function with an exception specification is permitted /// and preserved. Other type sugar (for instance, typedefs) is not. QualType ASTContext::getFunctionTypeWithExceptionSpec( QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) { // Might have some parens. if (const auto *PT = dyn_cast(Orig)) return getParenType( getFunctionTypeWithExceptionSpec(PT->getInnerType(), ESI)); // Might be wrapped in a macro qualified type. if (const auto *MQT = dyn_cast(Orig)) return getMacroQualifiedType( getFunctionTypeWithExceptionSpec(MQT->getUnderlyingType(), ESI), MQT->getMacroIdentifier()); // Might have a calling-convention attribute. if (const auto *AT = dyn_cast(Orig)) return getAttributedType( AT->getAttrKind(), getFunctionTypeWithExceptionSpec(AT->getModifiedType(), ESI), getFunctionTypeWithExceptionSpec(AT->getEquivalentType(), ESI)); // Anything else must be a function type. Rebuild it with the new exception // specification. const auto *Proto = Orig->castAs(); return getFunctionType( Proto->getReturnType(), Proto->getParamTypes(), Proto->getExtProtoInfo().withExceptionSpec(ESI)); } bool ASTContext::hasSameFunctionTypeIgnoringExceptionSpec(QualType T, QualType U) { return hasSameType(T, U) || (getLangOpts().CPlusPlus17 && hasSameType(getFunctionTypeWithExceptionSpec(T, EST_None), getFunctionTypeWithExceptionSpec(U, EST_None))); } QualType ASTContext::getFunctionTypeWithoutPtrSizes(QualType T) { if (const auto *Proto = T->getAs()) { QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); SmallVector Args(Proto->param_types()); for (unsigned i = 0, n = Args.size(); i != n; ++i) Args[i] = removePtrSizeAddrSpace(Args[i]); return getFunctionType(RetTy, Args, Proto->getExtProtoInfo()); } if (const FunctionNoProtoType *Proto = T->getAs()) { QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); return getFunctionNoProtoType(RetTy, Proto->getExtInfo()); } return T; } bool ASTContext::hasSameFunctionTypeIgnoringPtrSizes(QualType T, QualType U) { return hasSameType(T, U) || hasSameType(getFunctionTypeWithoutPtrSizes(T), getFunctionTypeWithoutPtrSizes(U)); } void ASTContext::adjustExceptionSpec( FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI, bool AsWritten) { // Update the type. QualType Updated = getFunctionTypeWithExceptionSpec(FD->getType(), ESI); FD->setType(Updated); if (!AsWritten) return; // Update the type in the type source information too. if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) { // If the type and the type-as-written differ, we may need to update // the type-as-written too. if (TSInfo->getType() != FD->getType()) Updated = getFunctionTypeWithExceptionSpec(TSInfo->getType(), ESI); // FIXME: When we get proper type location information for exceptions, // we'll also have to rebuild the TypeSourceInfo. For now, we just patch // up the TypeSourceInfo; assert(TypeLoc::getFullDataSizeForType(Updated) == TypeLoc::getFullDataSizeForType(TSInfo->getType()) && "TypeLoc size mismatch from updating exception specification"); TSInfo->overrideType(Updated); } } /// getComplexType - Return the uniqued reference to the type for a complex /// number with the specified element type. QualType ASTContext::getComplexType(QualType T) const { // Unique pointers, to guarantee there is only one pointer of a particular // structure. llvm::FoldingSetNodeID ID; ComplexType::Profile(ID, T); void *InsertPos = nullptr; if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos)) return QualType(CT, 0); // If the pointee type isn't canonical, this won't be a canonical type either, // so fill in the canonical type field. QualType Canonical; if (!T.isCanonical()) { Canonical = getComplexType(getCanonicalType(T)); // Get the new insert position for the node we care about. ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos); assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; } auto *New = new (*this, TypeAlignment) ComplexType(T, Canonical); Types.push_back(New); ComplexTypes.InsertNode(New, InsertPos); return QualType(New, 0); } /// getPointerType - Return the uniqued reference to the type for a pointer to /// the specified type. QualType ASTContext::getPointerType(QualType T) const { // Unique pointers, to guarantee there is only one pointer of a particular // structure. llvm::FoldingSetNodeID ID; PointerType::Profile(ID, T); void *InsertPos = nullptr; if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos)) return QualType(PT, 0); // If the pointee type isn't canonical, this won't be a canonical type either, // so fill in the canonical type field. QualType Canonical; if (!T.isCanonical()) { Canonical = getPointerType(getCanonicalType(T)); // Get the new insert position for the node we care about. PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos); assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; } auto *New = new (*this, TypeAlignment) PointerType(T, Canonical); Types.push_back(New); PointerTypes.InsertNode(New, InsertPos); return QualType(New, 0); } QualType ASTContext::getAdjustedType(QualType Orig, QualType New) const { llvm::FoldingSetNodeID ID; AdjustedType::Profile(ID, Orig, New); void *InsertPos = nullptr; AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); if (AT) return QualType(AT, 0); QualType Canonical = getCanonicalType(New); // Get the new insert position for the node we care about. AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); assert(!AT && "Shouldn't be in the map!"); AT = new (*this, TypeAlignment) AdjustedType(Type::Adjusted, Orig, New, Canonical); Types.push_back(AT); AdjustedTypes.InsertNode(AT, InsertPos); return QualType(AT, 0); } QualType ASTContext::getDecayedType(QualType T) const { assert((T->isArrayType() || T->isFunctionType()) && "T does not decay"); QualType Decayed; // C99 6.7.5.3p7: // A declaration of a parameter as "array of type" shall be // adjusted to "qualified pointer to type", where the type // qualifiers (if any) are those specified within the [ and ] of // the array type derivation. if (T->isArrayType()) Decayed = getArrayDecayedType(T); // C99 6.7.5.3p8: // A declaration of a parameter as "function returning type" // shall be adjusted to "pointer to function returning type", as // in 6.3.2.1. if (T->isFunctionType()) Decayed = getPointerType(T); llvm::FoldingSetNodeID ID; AdjustedType::Profile(ID, T, Decayed); void *InsertPos = nullptr; AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); if (AT) return QualType(AT, 0); QualType Canonical = getCanonicalType(Decayed); // Get the new insert position for the node we care about. AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); assert(!AT && "Shouldn't be in the map!"); AT = new (*this, TypeAlignment) DecayedType(T, Decayed, Canonical); Types.push_back(AT); AdjustedTypes.InsertNode(AT, InsertPos); return QualType(AT, 0); } /// getBlockPointerType - Return the uniqued reference to the type for /// a pointer to the specified block. QualType ASTContext::getBlockPointerType(QualType T) const { assert(T->isFunctionType() && "block of function types only"); // Unique pointers, to guarantee there is only one block of a particular // structure. llvm::FoldingSetNodeID ID; BlockPointerType::Profile(ID, T); void *InsertPos = nullptr; if (BlockPointerType *PT = BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) return QualType(PT, 0); // If the block pointee type isn't canonical, this won't be a canonical // type either so fill in the canonical type field. QualType Canonical; if (!T.isCanonical()) { Canonical = getBlockPointerType(getCanonicalType(T)); // Get the new insert position for the node we care about. BlockPointerType *NewIP = BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos); assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; } auto *New = new (*this, TypeAlignment) BlockPointerType(T, Canonical); Types.push_back(New); BlockPointerTypes.InsertNode(New, InsertPos); return QualType(New, 0); } /// getLValueReferenceType - Return the uniqued reference to the type for an /// lvalue reference to the specified type. QualType ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const { assert(getCanonicalType(T) != OverloadTy && "Unresolved overloaded function type"); // Unique pointers, to guarantee there is only one pointer of a particular // structure. llvm::FoldingSetNodeID ID; ReferenceType::Profile(ID, T, SpelledAsLValue); void *InsertPos = nullptr; if (LValueReferenceType *RT = LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) return QualType(RT, 0); const auto *InnerRef = T->getAs(); // If the referencee type isn't canonical, this won't be a canonical type // either, so fill in the canonical type field. QualType Canonical; if (!SpelledAsLValue || InnerRef || !T.isCanonical()) { QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); Canonical = getLValueReferenceType(getCanonicalType(PointeeType)); // Get the new insert position for the node we care about. LValueReferenceType *NewIP = LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; } auto *New = new (*this, TypeAlignment) LValueReferenceType(T, Canonical, SpelledAsLValue); Types.push_back(New); LValueReferenceTypes.InsertNode(New, InsertPos); return QualType(New, 0); } /// getRValueReferenceType - Return the uniqued reference to the type for an /// rvalue reference to the specified type. QualType ASTContext::getRValueReferenceType(QualType T) const { // Unique pointers, to guarantee there is only one pointer of a particular // structure. llvm::FoldingSetNodeID ID; ReferenceType::Profile(ID, T, false); void *InsertPos = nullptr; if (RValueReferenceType *RT = RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) return QualType(RT, 0); const auto *InnerRef = T->getAs(); // If the referencee type isn't canonical, this won't be a canonical type // either, so fill in the canonical type field. QualType Canonical; if (InnerRef || !T.isCanonical()) { QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); Canonical = getRValueReferenceType(getCanonicalType(PointeeType)); // Get the new insert position for the node we care about. RValueReferenceType *NewIP = RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; } auto *New = new (*this, TypeAlignment) RValueReferenceType(T, Canonical); Types.push_back(New); RValueReferenceTypes.InsertNode(New, InsertPos); return QualType(New, 0); } /// getMemberPointerType - Return the uniqued reference to the type for a /// member pointer to the specified type, in the specified class. QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) const { // Unique pointers, to guarantee there is only one pointer of a particular // structure. llvm::FoldingSetNodeID ID; MemberPointerType::Profile(ID, T, Cls); void *InsertPos = nullptr; if (MemberPointerType *PT = MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) return QualType(PT, 0); // If the pointee or class type isn't canonical, this won't be a canonical // type either, so fill in the canonical type field. QualType Canonical; if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) { Canonical = getMemberPointerType(getCanonicalType(T),getCanonicalType(Cls)); // Get the new insert position for the node we care about. MemberPointerType *NewIP = MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos); assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; } auto *New = new (*this, TypeAlignment) MemberPointerType(T, Cls, Canonical); Types.push_back(New); MemberPointerTypes.InsertNode(New, InsertPos); return QualType(New, 0); } /// getConstantArrayType - Return the unique reference to the type for an /// array of the specified element type. QualType ASTContext::getConstantArrayType(QualType EltTy, const llvm::APInt &ArySizeIn, const Expr *SizeExpr, ArrayType::ArraySizeModifier ASM, unsigned IndexTypeQuals) const { assert((EltTy->isDependentType() || EltTy->isIncompleteType() || EltTy->isConstantSizeType()) && "Constant array of VLAs is illegal!"); // We only need the size as part of the type if it's instantiation-dependent. if (SizeExpr && !SizeExpr->isInstantiationDependent()) SizeExpr = nullptr; // Convert the array size into a canonical width matching the pointer size for // the target. llvm::APInt ArySize(ArySizeIn); ArySize = ArySize.zextOrTrunc(Target->getMaxPointerWidth()); llvm::FoldingSetNodeID ID; ConstantArrayType::Profile(ID, *this, EltTy, ArySize, SizeExpr, ASM, IndexTypeQuals); void *InsertPos = nullptr; if (ConstantArrayType *ATP = ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos)) return QualType(ATP, 0); // If the element type isn't canonical or has qualifiers, or the array bound // is instantiation-dependent, this won't be a canonical type either, so fill // in the canonical type field. QualType Canon; if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers() || SizeExpr) { SplitQualType canonSplit = getCanonicalType(EltTy).split(); Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize, nullptr, ASM, IndexTypeQuals); Canon = getQualifiedType(Canon, canonSplit.Quals); // Get the new insert position for the node we care about. ConstantArrayType *NewIP = ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos); assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; } void *Mem = Allocate( ConstantArrayType::totalSizeToAlloc(SizeExpr ? 1 : 0), TypeAlignment); auto *New = new (Mem) ConstantArrayType(EltTy, Canon, ArySize, SizeExpr, ASM, IndexTypeQuals); ConstantArrayTypes.InsertNode(New, InsertPos); Types.push_back(New); return QualType(New, 0); } /// getVariableArrayDecayedType - Turns the given type, which may be /// variably-modified, into the corresponding type with all the known /// sizes replaced with [*]. QualType ASTContext::getVariableArrayDecayedType(QualType type) const { // Vastly most common case. if (!type->isVariablyModifiedType()) return type; QualType result; SplitQualType split = type.getSplitDesugaredType(); const Type *ty = split.Ty; switch (ty->getTypeClass()) { #define TYPE(Class, Base) #define ABSTRACT_TYPE(Class, Base) #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: #include "clang/AST/TypeNodes.inc" llvm_unreachable("didn't desugar past all non-canonical types?"); // These types should never be variably-modified. case Type::Builtin: case Type::Complex: case Type::Vector: case Type::DependentVector: case Type::ExtVector: case Type::DependentSizedExtVector: case Type::ConstantMatrix: case Type::DependentSizedMatrix: case Type::DependentAddressSpace: case Type::ObjCObject: case Type::ObjCInterface: case Type::ObjCObjectPointer: case Type::Record: case Type::Enum: case Type::UnresolvedUsing: case Type::TypeOfExpr: case Type::TypeOf: case Type::Decltype: case Type::UnaryTransform: case Type::DependentName: case Type::InjectedClassName: case Type::TemplateSpecialization: case Type::DependentTemplateSpecialization: case Type::TemplateTypeParm: case Type::SubstTemplateTypeParmPack: case Type::Auto: case Type::DeducedTemplateSpecialization: case Type::PackExpansion: case Type::ExtInt: case Type::DependentExtInt: llvm_unreachable("type should never be variably-modified"); // These types can be variably-modified but should never need to // further decay. case Type::FunctionNoProto: case Type::FunctionProto: case Type::BlockPointer: case Type::MemberPointer: case Type::Pipe: return type; // These types can be variably-modified. All these modifications // preserve structure except as noted by comments. // TODO: if we ever care about optimizing VLAs, there are no-op // optimizations available here. case Type::Pointer: result = getPointerType(getVariableArrayDecayedType( cast(ty)->getPointeeType())); break; case Type::LValueReference: { const auto *lv = cast(ty); result = getLValueReferenceType( getVariableArrayDecayedType(lv->getPointeeType()), lv->isSpelledAsLValue()); break; } case Type::RValueReference: { const auto *lv = cast(ty); result = getRValueReferenceType( getVariableArrayDecayedType(lv->getPointeeType())); break; } case Type::Atomic: { const auto *at = cast(ty); result = getAtomicType(getVariableArrayDecayedType(at->getValueType())); break; } case Type::ConstantArray: { const auto *cat = cast(ty); result = getConstantArrayType( getVariableArrayDecayedType(cat->getElementType()), cat->getSize(), cat->getSizeExpr(), cat->getSizeModifier(), cat->getIndexTypeCVRQualifiers()); break; } case Type::DependentSizedArray: { const auto *dat = cast(ty); result = getDependentSizedArrayType( getVariableArrayDecayedType(dat->getElementType()), dat->getSizeExpr(), dat->getSizeModifier(), dat->getIndexTypeCVRQualifiers(), dat->getBracketsRange()); break; } // Turn incomplete types into [*] types. case Type::IncompleteArray: { const auto *iat = cast(ty); result = getVariableArrayType( getVariableArrayDecayedType(iat->getElementType()), /*size*/ nullptr, ArrayType::Normal, iat->getIndexTypeCVRQualifiers(), SourceRange()); break; } // Turn VLA types into [*] types. case Type::VariableArray: { const auto *vat = cast(ty); result = getVariableArrayType( getVariableArrayDecayedType(vat->getElementType()), /*size*/ nullptr, ArrayType::Star, vat->getIndexTypeCVRQualifiers(), vat->getBracketsRange()); break; } } // Apply the top-level qualifiers from the original. return getQualifiedType(result, split.Quals); } /// getVariableArrayType - Returns a non-unique reference to the type for a /// variable array of the specified element type. QualType ASTContext::getVariableArrayType(QualType EltTy, Expr *NumElts, ArrayType::ArraySizeModifier ASM, unsigned IndexTypeQuals, SourceRange Brackets) const { // Since we don't unique expressions, it isn't possible to unique VLA's // that have an expression provided for their size. QualType Canon; // Be sure to pull qualifiers off the element type. if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) { SplitQualType canonSplit = getCanonicalType(EltTy).split(); Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM, IndexTypeQuals, Brackets); Canon = getQualifiedType(Canon, canonSplit.Quals); } auto *New = new (*this, TypeAlignment) VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets); VariableArrayTypes.push_back(New); Types.push_back(New); return QualType(New, 0); } /// getDependentSizedArrayType - Returns a non-unique reference to /// the type for a dependently-sized array of the specified element /// type. QualType ASTContext::getDependentSizedArrayType(QualType elementType, Expr *numElements, ArrayType::ArraySizeModifier ASM, unsigned elementTypeQuals, SourceRange brackets) const { assert((!numElements || numElements->isTypeDependent() || numElements->isValueDependent()) && "Size must be type- or value-dependent!"); // Dependently-sized array types that do not have a specified number // of elements will have their sizes deduced from a dependent // initializer. We do no canonicalization here at all, which is okay // because they can't be used in most locations. if (!numElements) { auto *newType = new (*this, TypeAlignment) DependentSizedArrayType(*this, elementType, QualType(), numElements, ASM, elementTypeQuals, brackets); Types.push_back(newType); return QualType(newType, 0); } // Otherwise, we actually build a new type every time, but we // also build a canonical type. SplitQualType canonElementType = getCanonicalType(elementType).split(); void *insertPos = nullptr; llvm::FoldingSetNodeID ID; DependentSizedArrayType::Profile(ID, *this, QualType(canonElementType.Ty, 0), ASM, elementTypeQuals, numElements); // Look for an existing type with these properties. DependentSizedArrayType *canonTy = DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos); // If we don't have one, build one. if (!canonTy) { canonTy = new (*this, TypeAlignment) DependentSizedArrayType(*this, QualType(canonElementType.Ty, 0), QualType(), numElements, ASM, elementTypeQuals, brackets); DependentSizedArrayTypes.InsertNode(canonTy, insertPos); Types.push_back(canonTy); } // Apply qualifiers from the element type to the array. QualType canon = getQualifiedType(QualType(canonTy,0), canonElementType.Quals); // If we didn't need extra canonicalization for the element type or the size // expression, then just use that as our result. if (QualType(canonElementType.Ty, 0) == elementType && canonTy->getSizeExpr() == numElements) return canon; // Otherwise, we need to build a type which follows the spelling // of the element type. auto *sugaredType = new (*this, TypeAlignment) DependentSizedArrayType(*this, elementType, canon, numElements, ASM, elementTypeQuals, brackets); Types.push_back(sugaredType); return QualType(sugaredType, 0); } QualType ASTContext::getIncompleteArrayType(QualType elementType, ArrayType::ArraySizeModifier ASM, unsigned elementTypeQuals) const { llvm::FoldingSetNodeID ID; IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals); void *insertPos = nullptr; if (IncompleteArrayType *iat = IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos)) return QualType(iat, 0); // If the element type isn't canonical, this won't be a canonical type // either, so fill in the canonical type field. We also have to pull // qualifiers off the element type. QualType canon; if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) { SplitQualType canonSplit = getCanonicalType(elementType).split(); canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0), ASM, elementTypeQuals); canon = getQualifiedType(canon, canonSplit.Quals); // Get the new insert position for the node we care about. IncompleteArrayType *existing = IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos); assert(!existing && "Shouldn't be in the map!"); (void) existing; } auto *newType = new (*this, TypeAlignment) IncompleteArrayType(elementType, canon, ASM, elementTypeQuals); IncompleteArrayTypes.InsertNode(newType, insertPos); Types.push_back(newType); return QualType(newType, 0); } ASTContext::BuiltinVectorTypeInfo ASTContext::getBuiltinVectorTypeInfo(const BuiltinType *Ty) const { #define SVE_INT_ELTTY(BITS, ELTS, SIGNED, NUMVECTORS) \ {getIntTypeForBitwidth(BITS, SIGNED), llvm::ElementCount(ELTS, true), \ NUMVECTORS}; #define SVE_ELTTY(ELTTY, ELTS, NUMVECTORS) \ {ELTTY, llvm::ElementCount(ELTS, true), NUMVECTORS}; switch (Ty->getKind()) { default: llvm_unreachable("Unsupported builtin vector type"); case BuiltinType::SveInt8: return SVE_INT_ELTTY(8, 16, true, 1); case BuiltinType::SveUint8: return SVE_INT_ELTTY(8, 16, false, 1); case BuiltinType::SveInt8x2: return SVE_INT_ELTTY(8, 16, true, 2); case BuiltinType::SveUint8x2: return SVE_INT_ELTTY(8, 16, false, 2); case BuiltinType::SveInt8x3: return SVE_INT_ELTTY(8, 16, true, 3); case BuiltinType::SveUint8x3: return SVE_INT_ELTTY(8, 16, false, 3); case BuiltinType::SveInt8x4: return SVE_INT_ELTTY(8, 16, true, 4); case BuiltinType::SveUint8x4: return SVE_INT_ELTTY(8, 16, false, 4); case BuiltinType::SveInt16: return SVE_INT_ELTTY(16, 8, true, 1); case BuiltinType::SveUint16: return SVE_INT_ELTTY(16, 8, false, 1); case BuiltinType::SveInt16x2: return SVE_INT_ELTTY(16, 8, true, 2); case BuiltinType::SveUint16x2: return SVE_INT_ELTTY(16, 8, false, 2); case BuiltinType::SveInt16x3: return SVE_INT_ELTTY(16, 8, true, 3); case BuiltinType::SveUint16x3: return SVE_INT_ELTTY(16, 8, false, 3); case BuiltinType::SveInt16x4: return SVE_INT_ELTTY(16, 8, true, 4); case BuiltinType::SveUint16x4: return SVE_INT_ELTTY(16, 8, false, 4); case BuiltinType::SveInt32: return SVE_INT_ELTTY(32, 4, true, 1); case BuiltinType::SveUint32: return SVE_INT_ELTTY(32, 4, false, 1); case BuiltinType::SveInt32x2: return SVE_INT_ELTTY(32, 4, true, 2); case BuiltinType::SveUint32x2: return SVE_INT_ELTTY(32, 4, false, 2); case BuiltinType::SveInt32x3: return SVE_INT_ELTTY(32, 4, true, 3); case BuiltinType::SveUint32x3: return SVE_INT_ELTTY(32, 4, false, 3); case BuiltinType::SveInt32x4: return SVE_INT_ELTTY(32, 4, true, 4); case BuiltinType::SveUint32x4: return SVE_INT_ELTTY(32, 4, false, 4); case BuiltinType::SveInt64: return SVE_INT_ELTTY(64, 2, true, 1); case BuiltinType::SveUint64: return SVE_INT_ELTTY(64, 2, false, 1); case BuiltinType::SveInt64x2: return SVE_INT_ELTTY(64, 2, true, 2); case BuiltinType::SveUint64x2: return SVE_INT_ELTTY(64, 2, false, 2); case BuiltinType::SveInt64x3: return SVE_INT_ELTTY(64, 2, true, 3); case BuiltinType::SveUint64x3: return SVE_INT_ELTTY(64, 2, false, 3); case BuiltinType::SveInt64x4: return SVE_INT_ELTTY(64, 2, true, 4); case BuiltinType::SveUint64x4: return SVE_INT_ELTTY(64, 2, false, 4); case BuiltinType::SveBool: return SVE_ELTTY(BoolTy, 16, 1); case BuiltinType::SveFloat16: return SVE_ELTTY(HalfTy, 8, 1); case BuiltinType::SveFloat16x2: return SVE_ELTTY(HalfTy, 8, 2); case BuiltinType::SveFloat16x3: return SVE_ELTTY(HalfTy, 8, 3); case BuiltinType::SveFloat16x4: return SVE_ELTTY(HalfTy, 8, 4); case BuiltinType::SveFloat32: return SVE_ELTTY(FloatTy, 4, 1); case BuiltinType::SveFloat32x2: return SVE_ELTTY(FloatTy, 4, 2); case BuiltinType::SveFloat32x3: return SVE_ELTTY(FloatTy, 4, 3); case BuiltinType::SveFloat32x4: return SVE_ELTTY(FloatTy, 4, 4); case BuiltinType::SveFloat64: return SVE_ELTTY(DoubleTy, 2, 1); case BuiltinType::SveFloat64x2: return SVE_ELTTY(DoubleTy, 2, 2); case BuiltinType::SveFloat64x3: return SVE_ELTTY(DoubleTy, 2, 3); case BuiltinType::SveFloat64x4: return SVE_ELTTY(DoubleTy, 2, 4); case BuiltinType::SveBFloat16: return SVE_ELTTY(BFloat16Ty, 8, 1); case BuiltinType::SveBFloat16x2: return SVE_ELTTY(BFloat16Ty, 8, 2); case BuiltinType::SveBFloat16x3: return SVE_ELTTY(BFloat16Ty, 8, 3); case BuiltinType::SveBFloat16x4: return SVE_ELTTY(BFloat16Ty, 8, 4); } } /// getScalableVectorType - Return the unique reference to a scalable vector /// type of the specified element type and size. VectorType must be a built-in /// type. QualType ASTContext::getScalableVectorType(QualType EltTy, unsigned NumElts) const { if (Target->hasAArch64SVETypes()) { uint64_t EltTySize = getTypeSize(EltTy); #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ IsSigned, IsFP, IsBF) \ if (!EltTy->isBooleanType() && \ ((EltTy->hasIntegerRepresentation() && \ EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \ IsFP && !IsBF) || \ (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \ IsBF && !IsFP)) && \ EltTySize == ElBits && NumElts == NumEls) { \ return SingletonId; \ } #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ if (EltTy->isBooleanType() && NumElts == NumEls) \ return SingletonId; #include "clang/Basic/AArch64SVEACLETypes.def" } return QualType(); } /// getVectorType - Return the unique reference to a vector type of /// the specified element type and size. VectorType must be a built-in type. QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts, VectorType::VectorKind VecKind) const { assert(vecType->isBuiltinType()); // Check if we've already instantiated a vector of this type. llvm::FoldingSetNodeID ID; VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind); void *InsertPos = nullptr; if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) return QualType(VTP, 0); // If the element type isn't canonical, this won't be a canonical type either, // so fill in the canonical type field. QualType Canonical; if (!vecType.isCanonical()) { Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind); // Get the new insert position for the node we care about. VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; } auto *New = new (*this, TypeAlignment) VectorType(vecType, NumElts, Canonical, VecKind); VectorTypes.InsertNode(New, InsertPos); Types.push_back(New); return QualType(New, 0); } QualType ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr, SourceLocation AttrLoc, VectorType::VectorKind VecKind) const { llvm::FoldingSetNodeID ID; DependentVectorType::Profile(ID, *this, getCanonicalType(VecType), SizeExpr, VecKind); void *InsertPos = nullptr; DependentVectorType *Canon = DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); DependentVectorType *New; if (Canon) { New = new (*this, TypeAlignment) DependentVectorType( *this, VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind); } else { QualType CanonVecTy = getCanonicalType(VecType); if (CanonVecTy == VecType) { New = new (*this, TypeAlignment) DependentVectorType( *this, VecType, QualType(), SizeExpr, AttrLoc, VecKind); DependentVectorType *CanonCheck = DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); assert(!CanonCheck && "Dependent-sized vector_size canonical type broken"); (void)CanonCheck; DependentVectorTypes.InsertNode(New, InsertPos); } else { QualType CanonTy = getDependentVectorType(CanonVecTy, SizeExpr, SourceLocation(), VecKind); New = new (*this, TypeAlignment) DependentVectorType( *this, VecType, CanonTy, SizeExpr, AttrLoc, VecKind); } } Types.push_back(New); return QualType(New, 0); } /// getExtVectorType - Return the unique reference to an extended vector type of /// the specified element type and size. VectorType must be a built-in type. QualType ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) const { assert(vecType->isBuiltinType() || vecType->isDependentType()); // Check if we've already instantiated a vector of this type. llvm::FoldingSetNodeID ID; VectorType::Profile(ID, vecType, NumElts, Type::ExtVector, VectorType::GenericVector); void *InsertPos = nullptr; if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) return QualType(VTP, 0); // If the element type isn't canonical, this won't be a canonical type either, // so fill in the canonical type field. QualType Canonical; if (!vecType.isCanonical()) { Canonical = getExtVectorType(getCanonicalType(vecType), NumElts); // Get the new insert position for the node we care about. VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; } auto *New = new (*this, TypeAlignment) ExtVectorType(vecType, NumElts, Canonical); VectorTypes.InsertNode(New, InsertPos); Types.push_back(New); return QualType(New, 0); } QualType ASTContext::getDependentSizedExtVectorType(QualType vecType, Expr *SizeExpr, SourceLocation AttrLoc) const { llvm::FoldingSetNodeID ID; DependentSizedExtVectorType::Profile(ID, *this, getCanonicalType(vecType), SizeExpr); void *InsertPos = nullptr; DependentSizedExtVectorType *Canon = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); DependentSizedExtVectorType *New; if (Canon) { // We already have a canonical version of this array type; use it as // the canonical type for a newly-built type. New = new (*this, TypeAlignment) DependentSizedExtVectorType(*this, vecType, QualType(Canon, 0), SizeExpr, AttrLoc); } else { QualType CanonVecTy = getCanonicalType(vecType); if (CanonVecTy == vecType) { New = new (*this, TypeAlignment) DependentSizedExtVectorType(*this, vecType, QualType(), SizeExpr, AttrLoc); DependentSizedExtVectorType *CanonCheck = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken"); (void)CanonCheck; DependentSizedExtVectorTypes.InsertNode(New, InsertPos); } else { QualType CanonExtTy = getDependentSizedExtVectorType(CanonVecTy, SizeExpr, SourceLocation()); New = new (*this, TypeAlignment) DependentSizedExtVectorType( *this, vecType, CanonExtTy, SizeExpr, AttrLoc); } } Types.push_back(New); return QualType(New, 0); } QualType ASTContext::getConstantMatrixType(QualType ElementTy, unsigned NumRows, unsigned NumColumns) const { llvm::FoldingSetNodeID ID; ConstantMatrixType::Profile(ID, ElementTy, NumRows, NumColumns, Type::ConstantMatrix); assert(MatrixType::isValidElementType(ElementTy) && "need a valid element type"); assert(ConstantMatrixType::isDimensionValid(NumRows) && ConstantMatrixType::isDimensionValid(NumColumns) && "need valid matrix dimensions"); void *InsertPos = nullptr; if (ConstantMatrixType *MTP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos)) return QualType(MTP, 0); QualType Canonical; if (!ElementTy.isCanonical()) { Canonical = getConstantMatrixType(getCanonicalType(ElementTy), NumRows, NumColumns); ConstantMatrixType *NewIP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos); assert(!NewIP && "Matrix type shouldn't already exist in the map"); (void)NewIP; } auto *New = new (*this, TypeAlignment) ConstantMatrixType(ElementTy, NumRows, NumColumns, Canonical); MatrixTypes.InsertNode(New, InsertPos); Types.push_back(New); return QualType(New, 0); } QualType ASTContext::getDependentSizedMatrixType(QualType ElementTy, Expr *RowExpr, Expr *ColumnExpr, SourceLocation AttrLoc) const { QualType CanonElementTy = getCanonicalType(ElementTy); llvm::FoldingSetNodeID ID; DependentSizedMatrixType::Profile(ID, *this, CanonElementTy, RowExpr, ColumnExpr); void *InsertPos = nullptr; DependentSizedMatrixType *Canon = DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); if (!Canon) { Canon = new (*this, TypeAlignment) DependentSizedMatrixType( *this, CanonElementTy, QualType(), RowExpr, ColumnExpr, AttrLoc); #ifndef NDEBUG DependentSizedMatrixType *CanonCheck = DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); assert(!CanonCheck && "Dependent-sized matrix canonical type broken"); #endif DependentSizedMatrixTypes.InsertNode(Canon, InsertPos); Types.push_back(Canon); } // Already have a canonical version of the matrix type // // If it exactly matches the requested type, use it directly. if (Canon->getElementType() == ElementTy && Canon->getRowExpr() == RowExpr && Canon->getRowExpr() == ColumnExpr) return QualType(Canon, 0); // Use Canon as the canonical type for newly-built type. DependentSizedMatrixType *New = new (*this, TypeAlignment) DependentSizedMatrixType(*this, ElementTy, QualType(Canon, 0), RowExpr, ColumnExpr, AttrLoc); Types.push_back(New); return QualType(New, 0); } QualType ASTContext::getDependentAddressSpaceType(QualType PointeeType, Expr *AddrSpaceExpr, SourceLocation AttrLoc) const { assert(AddrSpaceExpr->isInstantiationDependent()); QualType canonPointeeType = getCanonicalType(PointeeType); void *insertPos = nullptr; llvm::FoldingSetNodeID ID; DependentAddressSpaceType::Profile(ID, *this, canonPointeeType, AddrSpaceExpr); DependentAddressSpaceType *canonTy = DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, insertPos); if (!canonTy) { canonTy = new (*this, TypeAlignment) DependentAddressSpaceType(*this, canonPointeeType, QualType(), AddrSpaceExpr, AttrLoc); DependentAddressSpaceTypes.InsertNode(canonTy, insertPos); Types.push_back(canonTy); } if (canonPointeeType == PointeeType && canonTy->getAddrSpaceExpr() == AddrSpaceExpr) return QualType(canonTy, 0); auto *sugaredType = new (*this, TypeAlignment) DependentAddressSpaceType(*this, PointeeType, QualType(canonTy, 0), AddrSpaceExpr, AttrLoc); Types.push_back(sugaredType); return QualType(sugaredType, 0); } /// Determine whether \p T is canonical as the result type of a function. static bool isCanonicalResultType(QualType T) { return T.isCanonical() && (T.getObjCLifetime() == Qualifiers::OCL_None || T.getObjCLifetime() == Qualifiers::OCL_ExplicitNone); } /// getFunctionNoProtoType - Return a K&R style C function type like 'int()'. QualType ASTContext::getFunctionNoProtoType(QualType ResultTy, const FunctionType::ExtInfo &Info) const { // Unique functions, to guarantee there is only one function of a particular // structure. llvm::FoldingSetNodeID ID; FunctionNoProtoType::Profile(ID, ResultTy, Info); void *InsertPos = nullptr; if (FunctionNoProtoType *FT = FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) return QualType(FT, 0); QualType Canonical; if (!isCanonicalResultType(ResultTy)) { Canonical = getFunctionNoProtoType(getCanonicalFunctionResultType(ResultTy), Info); // Get the new insert position for the node we care about. FunctionNoProtoType *NewIP = FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos); assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; } auto *New = new (*this, TypeAlignment) FunctionNoProtoType(ResultTy, Canonical, Info); Types.push_back(New); FunctionNoProtoTypes.InsertNode(New, InsertPos); return QualType(New, 0); } CanQualType ASTContext::getCanonicalFunctionResultType(QualType ResultType) const { CanQualType CanResultType = getCanonicalType(ResultType); // Canonical result types do not have ARC lifetime qualifiers. if (CanResultType.getQualifiers().hasObjCLifetime()) { Qualifiers Qs = CanResultType.getQualifiers(); Qs.removeObjCLifetime(); return CanQualType::CreateUnsafe( getQualifiedType(CanResultType.getUnqualifiedType(), Qs)); } return CanResultType; } static bool isCanonicalExceptionSpecification( const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) { if (ESI.Type == EST_None) return true; if (!NoexceptInType) return false; // C++17 onwards: exception specification is part of the type, as a simple // boolean "can this function type throw". if (ESI.Type == EST_BasicNoexcept) return true; // A noexcept(expr) specification is (possibly) canonical if expr is // value-dependent. if (ESI.Type == EST_DependentNoexcept) return true; // A dynamic exception specification is canonical if it only contains pack // expansions (so we can't tell whether it's non-throwing) and all its // contained types are canonical. if (ESI.Type == EST_Dynamic) { bool AnyPackExpansions = false; for (QualType ET : ESI.Exceptions) { if (!ET.isCanonical()) return false; if (ET->getAs()) AnyPackExpansions = true; } return AnyPackExpansions; } return false; } QualType ASTContext::getFunctionTypeInternal( QualType ResultTy, ArrayRef ArgArray, const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const { size_t NumArgs = ArgArray.size(); // Unique functions, to guarantee there is only one function of a particular // structure. llvm::FoldingSetNodeID ID; FunctionProtoType::Profile(ID, ResultTy, ArgArray.begin(), NumArgs, EPI, *this, true); QualType Canonical; bool Unique = false; void *InsertPos = nullptr; if (FunctionProtoType *FPT = FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) { QualType Existing = QualType(FPT, 0); // If we find a pre-existing equivalent FunctionProtoType, we can just reuse // it so long as our exception specification doesn't contain a dependent // noexcept expression, or we're just looking for a canonical type. // Otherwise, we're going to need to create a type // sugar node to hold the concrete expression. if (OnlyWantCanonical || !isComputedNoexcept(EPI.ExceptionSpec.Type) || EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr()) return Existing; // We need a new type sugar node for this one, to hold the new noexcept // expression. We do no canonicalization here, but that's OK since we don't // expect to see the same noexcept expression much more than once. Canonical = getCanonicalType(Existing); Unique = true; } bool NoexceptInType = getLangOpts().CPlusPlus17; bool IsCanonicalExceptionSpec = isCanonicalExceptionSpecification(EPI.ExceptionSpec, NoexceptInType); // Determine whether the type being created is already canonical or not. bool isCanonical = !Unique && IsCanonicalExceptionSpec && isCanonicalResultType(ResultTy) && !EPI.HasTrailingReturn; for (unsigned i = 0; i != NumArgs && isCanonical; ++i) if (!ArgArray[i].isCanonicalAsParam()) isCanonical = false; if (OnlyWantCanonical) assert(isCanonical && "given non-canonical parameters constructing canonical type"); // If this type isn't canonical, get the canonical version of it if we don't // already have it. The exception spec is only partially part of the // canonical type, and only in C++17 onwards. if (!isCanonical && Canonical.isNull()) { SmallVector CanonicalArgs; CanonicalArgs.reserve(NumArgs); for (unsigned i = 0; i != NumArgs; ++i) CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i])); llvm::SmallVector ExceptionTypeStorage; FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI; CanonicalEPI.HasTrailingReturn = false; if (IsCanonicalExceptionSpec) { // Exception spec is already OK. } else if (NoexceptInType) { switch (EPI.ExceptionSpec.Type) { case EST_Unparsed: case EST_Unevaluated: case EST_Uninstantiated: // We don't know yet. It shouldn't matter what we pick here; no-one // should ever look at this. LLVM_FALLTHROUGH; case EST_None: case EST_MSAny: case EST_NoexceptFalse: CanonicalEPI.ExceptionSpec.Type = EST_None; break; // A dynamic exception specification is almost always "not noexcept", // with the exception that a pack expansion might expand to no types. case EST_Dynamic: { bool AnyPacks = false; for (QualType ET : EPI.ExceptionSpec.Exceptions) { if (ET->getAs()) AnyPacks = true; ExceptionTypeStorage.push_back(getCanonicalType(ET)); } if (!AnyPacks) CanonicalEPI.ExceptionSpec.Type = EST_None; else { CanonicalEPI.ExceptionSpec.Type = EST_Dynamic; CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage; } break; } case EST_DynamicNone: case EST_BasicNoexcept: case EST_NoexceptTrue: case EST_NoThrow: CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept; break; case EST_DependentNoexcept: llvm_unreachable("dependent noexcept is already canonical"); } } else { CanonicalEPI.ExceptionSpec = FunctionProtoType::ExceptionSpecInfo(); } // Adjust the canonical function result type. CanQualType CanResultTy = getCanonicalFunctionResultType(ResultTy); Canonical = getFunctionTypeInternal(CanResultTy, CanonicalArgs, CanonicalEPI, true); // Get the new insert position for the node we care about. FunctionProtoType *NewIP = FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos); assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; } // Compute the needed size to hold this FunctionProtoType and the // various trailing objects. auto ESH = FunctionProtoType::getExceptionSpecSize( EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size()); size_t Size = FunctionProtoType::totalSizeToAlloc< QualType, SourceLocation, FunctionType::FunctionTypeExtraBitfields, FunctionType::ExceptionType, Expr *, FunctionDecl *, FunctionProtoType::ExtParameterInfo, Qualifiers>( NumArgs, EPI.Variadic, FunctionProtoType::hasExtraBitfields(EPI.ExceptionSpec.Type), ESH.NumExceptionType, ESH.NumExprPtr, ESH.NumFunctionDeclPtr, EPI.ExtParameterInfos ? NumArgs : 0, EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0); auto *FTP = (FunctionProtoType *)Allocate(Size, TypeAlignment); FunctionProtoType::ExtProtoInfo newEPI = EPI; new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI); Types.push_back(FTP); if (!Unique) FunctionProtoTypes.InsertNode(FTP, InsertPos); return QualType(FTP, 0); } QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const { llvm::FoldingSetNodeID ID; PipeType::Profile(ID, T, ReadOnly); void *InsertPos = nullptr; if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos)) return QualType(PT, 0); // If the pipe element type isn't canonical, this won't be a canonical type // either, so fill in the canonical type field. QualType Canonical; if (!T.isCanonical()) { Canonical = getPipeType(getCanonicalType(T), ReadOnly); // Get the new insert position for the node we care about. PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos); assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; } auto *New = new (*this, TypeAlignment) PipeType(T, Canonical, ReadOnly); Types.push_back(New); PipeTypes.InsertNode(New, InsertPos); return QualType(New, 0); } QualType ASTContext::adjustStringLiteralBaseType(QualType Ty) const { // OpenCL v1.1 s6.5.3: a string literal is in the constant address space. return LangOpts.OpenCL ? getAddrSpaceQualType(Ty, LangAS::opencl_constant) : Ty; } QualType ASTContext::getReadPipeType(QualType T) const { return getPipeType(T, true); } QualType ASTContext::getWritePipeType(QualType T) const { return getPipeType(T, false); } QualType ASTContext::getExtIntType(bool IsUnsigned, unsigned NumBits) const { llvm::FoldingSetNodeID ID; ExtIntType::Profile(ID, IsUnsigned, NumBits); void *InsertPos = nullptr; if (ExtIntType *EIT = ExtIntTypes.FindNodeOrInsertPos(ID, InsertPos)) return QualType(EIT, 0); auto *New = new (*this, TypeAlignment) ExtIntType(IsUnsigned, NumBits); ExtIntTypes.InsertNode(New, InsertPos); Types.push_back(New); return QualType(New, 0); } QualType ASTContext::getDependentExtIntType(bool IsUnsigned, Expr *NumBitsExpr) const { assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent"); llvm::FoldingSetNodeID ID; DependentExtIntType::Profile(ID, *this, IsUnsigned, NumBitsExpr); void *InsertPos = nullptr; if (DependentExtIntType *Existing = DependentExtIntTypes.FindNodeOrInsertPos(ID, InsertPos)) return QualType(Existing, 0); auto *New = new (*this, TypeAlignment) DependentExtIntType(*this, IsUnsigned, NumBitsExpr); DependentExtIntTypes.InsertNode(New, InsertPos); Types.push_back(New); return QualType(New, 0); } #ifndef NDEBUG static bool NeedsInjectedClassNameType(const RecordDecl *D) { if (!isa(D)) return false; const auto *RD = cast(D); if (isa(RD)) return true; if (RD->getDescribedClassTemplate() && !isa(RD)) return true; return false; } #endif /// getInjectedClassNameType - Return the unique reference to the /// injected class name type for the specified templated declaration. QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl, QualType TST) const { assert(NeedsInjectedClassNameType(Decl)); if (Decl->TypeForDecl) { assert(isa(Decl->TypeForDecl)); } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) { assert(PrevDecl->TypeForDecl && "previous declaration has no type"); Decl->TypeForDecl = PrevDecl->TypeForDecl; assert(isa(Decl->TypeForDecl)); } else { Type *newType = new (*this, TypeAlignment) InjectedClassNameType(Decl, TST); Decl->TypeForDecl = newType; Types.push_back(newType); } return QualType(Decl->TypeForDecl, 0); } /// getTypeDeclType - Return the unique reference to the type for the /// specified type declaration. QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const { assert(Decl && "Passed null for Decl param"); assert(!Decl->TypeForDecl && "TypeForDecl present in slow case"); if (const auto *Typedef = dyn_cast(Decl)) return getTypedefType(Typedef); assert(!isa(Decl) && "Template type parameter types are always available."); if (const auto *Record = dyn_cast(Decl)) { assert(Record->isFirstDecl() && "struct/union has previous declaration"); assert(!NeedsInjectedClassNameType(Record)); return getRecordType(Record); } else if (const auto *Enum = dyn_cast(Decl)) { assert(Enum->isFirstDecl() && "enum has previous declaration"); return getEnumType(Enum); } else if (const auto *Using = dyn_cast(Decl)) { Type *newType = new (*this, TypeAlignment) UnresolvedUsingType(Using); Decl->TypeForDecl = newType; Types.push_back(newType); } else llvm_unreachable("TypeDecl without a type?"); return QualType(Decl->TypeForDecl, 0); } /// getTypedefType - Return the unique reference to the type for the /// specified typedef name decl. QualType ASTContext::getTypedefType(const TypedefNameDecl *Decl, QualType Canonical) const { if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); if (Canonical.isNull()) Canonical = getCanonicalType(Decl->getUnderlyingType()); auto *newType = new (*this, TypeAlignment) TypedefType(Type::Typedef, Decl, Canonical); Decl->TypeForDecl = newType; Types.push_back(newType); return QualType(newType, 0); } QualType ASTContext::getRecordType(const RecordDecl *Decl) const { if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); if (const RecordDecl *PrevDecl = Decl->getPreviousDecl()) if (PrevDecl->TypeForDecl) return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); auto *newType = new (*this, TypeAlignment) RecordType(Decl); Decl->TypeForDecl = newType; Types.push_back(newType); return QualType(newType, 0); } QualType ASTContext::getEnumType(const EnumDecl *Decl) const { if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); if (const EnumDecl *PrevDecl = Decl->getPreviousDecl()) if (PrevDecl->TypeForDecl) return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); auto *newType = new (*this, TypeAlignment) EnumType(Decl); Decl->TypeForDecl = newType; Types.push_back(newType); return QualType(newType, 0); } QualType ASTContext::getAttributedType(attr::Kind attrKind, QualType modifiedType, QualType equivalentType) { llvm::FoldingSetNodeID id; AttributedType::Profile(id, attrKind, modifiedType, equivalentType); void *insertPos = nullptr; AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos); if (type) return QualType(type, 0); QualType canon = getCanonicalType(equivalentType); type = new (*this, TypeAlignment) AttributedType(canon, attrKind, modifiedType, equivalentType); Types.push_back(type); AttributedTypes.InsertNode(type, insertPos); return QualType(type, 0); } /// Retrieve a substitution-result type. QualType ASTContext::getSubstTemplateTypeParmType(const TemplateTypeParmType *Parm, QualType Replacement) const { assert(Replacement.isCanonical() && "replacement types must always be canonical"); llvm::FoldingSetNodeID ID; SubstTemplateTypeParmType::Profile(ID, Parm, Replacement); void *InsertPos = nullptr; SubstTemplateTypeParmType *SubstParm = SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); if (!SubstParm) { SubstParm = new (*this, TypeAlignment) SubstTemplateTypeParmType(Parm, Replacement); Types.push_back(SubstParm); SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos); } return QualType(SubstParm, 0); } /// Retrieve a QualType ASTContext::getSubstTemplateTypeParmPackType( const TemplateTypeParmType *Parm, const TemplateArgument &ArgPack) { #ifndef NDEBUG for (const auto &P : ArgPack.pack_elements()) { assert(P.getKind() == TemplateArgument::Type &&"Pack contains a non-type"); assert(P.getAsType().isCanonical() && "Pack contains non-canonical type"); } #endif llvm::FoldingSetNodeID ID; SubstTemplateTypeParmPackType::Profile(ID, Parm, ArgPack); void *InsertPos = nullptr; if (SubstTemplateTypeParmPackType *SubstParm = SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos)) return QualType(SubstParm, 0); QualType Canon; if (!Parm->isCanonicalUnqualified()) { Canon = getCanonicalType(QualType(Parm, 0)); Canon = getSubstTemplateTypeParmPackType(cast(Canon), ArgPack); SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos); } auto *SubstParm = new (*this, TypeAlignment) SubstTemplateTypeParmPackType(Parm, Canon, ArgPack); Types.push_back(SubstParm); SubstTemplateTypeParmPackTypes.InsertNode(SubstParm, InsertPos); return QualType(SubstParm, 0); } /// Retrieve the template type parameter type for a template /// parameter or parameter pack with the given depth, index, and (optionally) /// name. QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index, bool ParameterPack, TemplateTypeParmDecl *TTPDecl) const { llvm::FoldingSetNodeID ID; TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl); void *InsertPos = nullptr; TemplateTypeParmType *TypeParm = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); if (TypeParm) return QualType(TypeParm, 0); if (TTPDecl) { QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack); TypeParm = new (*this, TypeAlignment) TemplateTypeParmType(TTPDecl, Canon); TemplateTypeParmType *TypeCheck = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); assert(!TypeCheck && "Template type parameter canonical type broken"); (void)TypeCheck; } else TypeParm = new (*this, TypeAlignment) TemplateTypeParmType(Depth, Index, ParameterPack); Types.push_back(TypeParm); TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos); return QualType(TypeParm, 0); } TypeSourceInfo * ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name, SourceLocation NameLoc, const TemplateArgumentListInfo &Args, QualType Underlying) const { assert(!Name.getAsDependentTemplateName() && "No dependent template names here!"); QualType TST = getTemplateSpecializationType(Name, Args, Underlying); TypeSourceInfo *DI = CreateTypeSourceInfo(TST); TemplateSpecializationTypeLoc TL = DI->getTypeLoc().castAs(); TL.setTemplateKeywordLoc(SourceLocation()); TL.setTemplateNameLoc(NameLoc); TL.setLAngleLoc(Args.getLAngleLoc()); TL.setRAngleLoc(Args.getRAngleLoc()); for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i) TL.setArgLocInfo(i, Args[i].getLocInfo()); return DI; } QualType ASTContext::getTemplateSpecializationType(TemplateName Template, const TemplateArgumentListInfo &Args, QualType Underlying) const { assert(!Template.getAsDependentTemplateName() && "No dependent template names here!"); SmallVector ArgVec; ArgVec.reserve(Args.size()); for (const TemplateArgumentLoc &Arg : Args.arguments()) ArgVec.push_back(Arg.getArgument()); return getTemplateSpecializationType(Template, ArgVec, Underlying); } #ifndef NDEBUG static bool hasAnyPackExpansions(ArrayRef Args) { for (const TemplateArgument &Arg : Args) if (Arg.isPackExpansion()) return true; return true; } #endif QualType ASTContext::getTemplateSpecializationType(TemplateName Template, ArrayRef Args, QualType Underlying) const { assert(!Template.getAsDependentTemplateName() && "No dependent template names here!"); // Look through qualified template names. if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) Template = TemplateName(QTN->getTemplateDecl()); bool IsTypeAlias = Template.getAsTemplateDecl() && isa(Template.getAsTemplateDecl()); QualType CanonType; if (!Underlying.isNull()) CanonType = getCanonicalType(Underlying); else { // We can get here with an alias template when the specialization contains // a pack expansion that does not match up with a parameter pack. assert((!IsTypeAlias || hasAnyPackExpansions(Args)) && "Caller must compute aliased type"); IsTypeAlias = false; CanonType = getCanonicalTemplateSpecializationType(Template, Args); } // Allocate the (non-canonical) template specialization type, but don't // try to unique it: these types typically have location information that // we don't unique and don't want to lose. void *Mem = Allocate(sizeof(TemplateSpecializationType) + sizeof(TemplateArgument) * Args.size() + (IsTypeAlias? sizeof(QualType) : 0), TypeAlignment); auto *Spec = new (Mem) TemplateSpecializationType(Template, Args, CanonType, IsTypeAlias ? Underlying : QualType()); Types.push_back(Spec); return QualType(Spec, 0); } QualType ASTContext::getCanonicalTemplateSpecializationType( TemplateName Template, ArrayRef Args) const { assert(!Template.getAsDependentTemplateName() && "No dependent template names here!"); // Look through qualified template names. if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) Template = TemplateName(QTN->getTemplateDecl()); // Build the canonical template specialization type. TemplateName CanonTemplate = getCanonicalTemplateName(Template); SmallVector CanonArgs; unsigned NumArgs = Args.size(); CanonArgs.reserve(NumArgs); for (const TemplateArgument &Arg : Args) CanonArgs.push_back(getCanonicalTemplateArgument(Arg)); // Determine whether this canonical template specialization type already // exists. llvm::FoldingSetNodeID ID; TemplateSpecializationType::Profile(ID, CanonTemplate, CanonArgs, *this); void *InsertPos = nullptr; TemplateSpecializationType *Spec = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); if (!Spec) { // Allocate a new canonical template specialization type. void *Mem = Allocate((sizeof(TemplateSpecializationType) + sizeof(TemplateArgument) * NumArgs), TypeAlignment); Spec = new (Mem) TemplateSpecializationType(CanonTemplate, CanonArgs, QualType(), QualType()); Types.push_back(Spec); TemplateSpecializationTypes.InsertNode(Spec, InsertPos); } assert(Spec->isDependentType() && "Non-dependent template-id type must have a canonical type"); return QualType(Spec, 0); } QualType ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS, QualType NamedType, TagDecl *OwnedTagDecl) const { llvm::FoldingSetNodeID ID; ElaboratedType::Profile(ID, Keyword, NNS, NamedType, OwnedTagDecl); void *InsertPos = nullptr; ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); if (T) return QualType(T, 0); QualType Canon = NamedType; if (!Canon.isCanonical()) { Canon = getCanonicalType(NamedType); ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); assert(!CheckT && "Elaborated canonical type broken"); (void)CheckT; } void *Mem = Allocate(ElaboratedType::totalSizeToAlloc(!!OwnedTagDecl), TypeAlignment); T = new (Mem) ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl); Types.push_back(T); ElaboratedTypes.InsertNode(T, InsertPos); return QualType(T, 0); } QualType ASTContext::getParenType(QualType InnerType) const { llvm::FoldingSetNodeID ID; ParenType::Profile(ID, InnerType); void *InsertPos = nullptr; ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); if (T) return QualType(T, 0); QualType Canon = InnerType; if (!Canon.isCanonical()) { Canon = getCanonicalType(InnerType); ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); assert(!CheckT && "Paren canonical type broken"); (void)CheckT; } T = new (*this, TypeAlignment) ParenType(InnerType, Canon); Types.push_back(T); ParenTypes.InsertNode(T, InsertPos); return QualType(T, 0); } QualType ASTContext::getMacroQualifiedType(QualType UnderlyingTy, const IdentifierInfo *MacroII) const { QualType Canon = UnderlyingTy; if (!Canon.isCanonical()) Canon = getCanonicalType(UnderlyingTy); auto *newType = new (*this, TypeAlignment) MacroQualifiedType(UnderlyingTy, Canon, MacroII); Types.push_back(newType); return QualType(newType, 0); } QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS, const IdentifierInfo *Name, QualType Canon) const { if (Canon.isNull()) { NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); if (CanonNNS != NNS) Canon = getDependentNameType(Keyword, CanonNNS, Name); } llvm::FoldingSetNodeID ID; DependentNameType::Profile(ID, Keyword, NNS, Name); void *InsertPos = nullptr; DependentNameType *T = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos); if (T) return QualType(T, 0); T = new (*this, TypeAlignment) DependentNameType(Keyword, NNS, Name, Canon); Types.push_back(T); DependentNameTypes.InsertNode(T, InsertPos); return QualType(T, 0); } QualType ASTContext::getDependentTemplateSpecializationType( ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS, const IdentifierInfo *Name, const TemplateArgumentListInfo &Args) const { // TODO: avoid this copy SmallVector ArgCopy; for (unsigned I = 0, E = Args.size(); I != E; ++I) ArgCopy.push_back(Args[I].getArgument()); return getDependentTemplateSpecializationType(Keyword, NNS, Name, ArgCopy); } QualType ASTContext::getDependentTemplateSpecializationType( ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS, const IdentifierInfo *Name, ArrayRef Args) const { assert((!NNS || NNS->isDependent()) && "nested-name-specifier must be dependent"); llvm::FoldingSetNodeID ID; DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS, Name, Args); void *InsertPos = nullptr; DependentTemplateSpecializationType *T = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); if (T) return QualType(T, 0); NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); ElaboratedTypeKeyword CanonKeyword = Keyword; if (Keyword == ETK_None) CanonKeyword = ETK_Typename; bool AnyNonCanonArgs = false; unsigned NumArgs = Args.size(); SmallVector CanonArgs(NumArgs); for (unsigned I = 0; I != NumArgs; ++I) { CanonArgs[I] = getCanonicalTemplateArgument(Args[I]); if (!CanonArgs[I].structurallyEquals(Args[I])) AnyNonCanonArgs = true; } QualType Canon; if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) { Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS, Name, CanonArgs); // Find the insert position again. DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); } void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) + sizeof(TemplateArgument) * NumArgs), TypeAlignment); T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS, Name, Args, Canon); Types.push_back(T); DependentTemplateSpecializationTypes.InsertNode(T, InsertPos); return QualType(T, 0); } TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) { TemplateArgument Arg; if (const auto *TTP = dyn_cast(Param)) { QualType ArgType = getTypeDeclType(TTP); if (TTP->isParameterPack()) ArgType = getPackExpansionType(ArgType, None); Arg = TemplateArgument(ArgType); } else if (auto *NTTP = dyn_cast(Param)) { Expr *E = new (*this) DeclRefExpr( *this, NTTP, /*enclosing*/ false, NTTP->getType().getNonPackExpansionType().getNonLValueExprType(*this), Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation()); if (NTTP->isParameterPack()) E = new (*this) PackExpansionExpr(DependentTy, E, NTTP->getLocation(), None); Arg = TemplateArgument(E); } else { auto *TTP = cast(Param); if (TTP->isParameterPack()) Arg = TemplateArgument(TemplateName(TTP), Optional()); else Arg = TemplateArgument(TemplateName(TTP)); } if (Param->isTemplateParameterPack()) Arg = TemplateArgument::CreatePackCopy(*this, Arg); return Arg; } void ASTContext::getInjectedTemplateArgs(const TemplateParameterList *Params, SmallVectorImpl &Args) { Args.reserve(Args.size() + Params->size()); for (NamedDecl *Param : *Params) Args.push_back(getInjectedTemplateArg(Param)); } QualType ASTContext::getPackExpansionType(QualType Pattern, - Optional NumExpansions) { + Optional NumExpansions, + bool ExpectPackInType) { + assert((!ExpectPackInType || Pattern->containsUnexpandedParameterPack()) && + "Pack expansions must expand one or more parameter packs"); + llvm::FoldingSetNodeID ID; PackExpansionType::Profile(ID, Pattern, NumExpansions); - // A deduced type can deduce to a pack, eg - // auto ...x = some_pack; - // That declaration isn't (yet) valid, but is created as part of building an - // init-capture pack: - // [...x = some_pack] {} - assert((Pattern->containsUnexpandedParameterPack() || - Pattern->getContainedDeducedType()) && - "Pack expansions must expand one or more parameter packs"); void *InsertPos = nullptr; - PackExpansionType *T - = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); + PackExpansionType *T = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); if (T) return QualType(T, 0); QualType Canon; if (!Pattern.isCanonical()) { - Canon = getCanonicalType(Pattern); - // The canonical type might not contain an unexpanded parameter pack, if it - // contains an alias template specialization which ignores one of its - // parameters. - if (Canon->containsUnexpandedParameterPack()) { - Canon = getPackExpansionType(Canon, NumExpansions); - - // Find the insert position again, in case we inserted an element into - // PackExpansionTypes and invalidated our insert position. - PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); - } + Canon = getPackExpansionType(getCanonicalType(Pattern), NumExpansions, + /*ExpectPackInType=*/false); + + // Find the insert position again, in case we inserted an element into + // PackExpansionTypes and invalidated our insert position. + PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); } T = new (*this, TypeAlignment) PackExpansionType(Pattern, Canon, NumExpansions); Types.push_back(T); PackExpansionTypes.InsertNode(T, InsertPos); return QualType(T, 0); } /// CmpProtocolNames - Comparison predicate for sorting protocols /// alphabetically. static int CmpProtocolNames(ObjCProtocolDecl *const *LHS, ObjCProtocolDecl *const *RHS) { return DeclarationName::compare((*LHS)->getDeclName(), (*RHS)->getDeclName()); } static bool areSortedAndUniqued(ArrayRef Protocols) { if (Protocols.empty()) return true; if (Protocols[0]->getCanonicalDecl() != Protocols[0]) return false; for (unsigned i = 1; i != Protocols.size(); ++i) if (CmpProtocolNames(&Protocols[i - 1], &Protocols[i]) >= 0 || Protocols[i]->getCanonicalDecl() != Protocols[i]) return false; return true; } static void SortAndUniqueProtocols(SmallVectorImpl &Protocols) { // Sort protocols, keyed by name. llvm::array_pod_sort(Protocols.begin(), Protocols.end(), CmpProtocolNames); // Canonicalize. for (ObjCProtocolDecl *&P : Protocols) P = P->getCanonicalDecl(); // Remove duplicates. auto ProtocolsEnd = std::unique(Protocols.begin(), Protocols.end()); Protocols.erase(ProtocolsEnd, Protocols.end()); } QualType ASTContext::getObjCObjectType(QualType BaseType, ObjCProtocolDecl * const *Protocols, unsigned NumProtocols) const { return getObjCObjectType(BaseType, {}, llvm::makeArrayRef(Protocols, NumProtocols), /*isKindOf=*/false); } QualType ASTContext::getObjCObjectType( QualType baseType, ArrayRef typeArgs, ArrayRef protocols, bool isKindOf) const { // If the base type is an interface and there aren't any protocols or // type arguments to add, then the interface type will do just fine. if (typeArgs.empty() && protocols.empty() && !isKindOf && isa(baseType)) return baseType; // Look in the folding set for an existing type. llvm::FoldingSetNodeID ID; ObjCObjectTypeImpl::Profile(ID, baseType, typeArgs, protocols, isKindOf); void *InsertPos = nullptr; if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos)) return QualType(QT, 0); // Determine the type arguments to be used for canonicalization, // which may be explicitly specified here or written on the base // type. ArrayRef effectiveTypeArgs = typeArgs; if (effectiveTypeArgs.empty()) { if (const auto *baseObject = baseType->getAs()) effectiveTypeArgs = baseObject->getTypeArgs(); } // Build the canonical type, which has the canonical base type and a // sorted-and-uniqued list of protocols and the type arguments // canonicalized. QualType canonical; bool typeArgsAreCanonical = std::all_of(effectiveTypeArgs.begin(), effectiveTypeArgs.end(), [&](QualType type) { return type.isCanonical(); }); bool protocolsSorted = areSortedAndUniqued(protocols); if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) { // Determine the canonical type arguments. ArrayRef canonTypeArgs; SmallVector canonTypeArgsVec; if (!typeArgsAreCanonical) { canonTypeArgsVec.reserve(effectiveTypeArgs.size()); for (auto typeArg : effectiveTypeArgs) canonTypeArgsVec.push_back(getCanonicalType(typeArg)); canonTypeArgs = canonTypeArgsVec; } else { canonTypeArgs = effectiveTypeArgs; } ArrayRef canonProtocols; SmallVector canonProtocolsVec; if (!protocolsSorted) { canonProtocolsVec.append(protocols.begin(), protocols.end()); SortAndUniqueProtocols(canonProtocolsVec); canonProtocols = canonProtocolsVec; } else { canonProtocols = protocols; } canonical = getObjCObjectType(getCanonicalType(baseType), canonTypeArgs, canonProtocols, isKindOf); // Regenerate InsertPos. ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos); } unsigned size = sizeof(ObjCObjectTypeImpl); size += typeArgs.size() * sizeof(QualType); size += protocols.size() * sizeof(ObjCProtocolDecl *); void *mem = Allocate(size, TypeAlignment); auto *T = new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols, isKindOf); Types.push_back(T); ObjCObjectTypes.InsertNode(T, InsertPos); return QualType(T, 0); } /// Apply Objective-C protocol qualifiers to the given type. /// If this is for the canonical type of a type parameter, we can apply /// protocol qualifiers on the ObjCObjectPointerType. QualType ASTContext::applyObjCProtocolQualifiers(QualType type, ArrayRef protocols, bool &hasError, bool allowOnPointerType) const { hasError = false; if (const auto *objT = dyn_cast(type.getTypePtr())) { return getObjCTypeParamType(objT->getDecl(), protocols); } // Apply protocol qualifiers to ObjCObjectPointerType. if (allowOnPointerType) { if (const auto *objPtr = dyn_cast(type.getTypePtr())) { const ObjCObjectType *objT = objPtr->getObjectType(); // Merge protocol lists and construct ObjCObjectType. SmallVector protocolsVec; protocolsVec.append(objT->qual_begin(), objT->qual_end()); protocolsVec.append(protocols.begin(), protocols.end()); ArrayRef protocols = protocolsVec; type = getObjCObjectType( objT->getBaseType(), objT->getTypeArgsAsWritten(), protocols, objT->isKindOfTypeAsWritten()); return getObjCObjectPointerType(type); } } // Apply protocol qualifiers to ObjCObjectType. if (const auto *objT = dyn_cast(type.getTypePtr())){ // FIXME: Check for protocols to which the class type is already // known to conform. return getObjCObjectType(objT->getBaseType(), objT->getTypeArgsAsWritten(), protocols, objT->isKindOfTypeAsWritten()); } // If the canonical type is ObjCObjectType, ... if (type->isObjCObjectType()) { // Silently overwrite any existing protocol qualifiers. // TODO: determine whether that's the right thing to do. // FIXME: Check for protocols to which the class type is already // known to conform. return getObjCObjectType(type, {}, protocols, false); } // id if (type->isObjCIdType()) { const auto *objPtr = type->castAs(); type = getObjCObjectType(ObjCBuiltinIdTy, {}, protocols, objPtr->isKindOfType()); return getObjCObjectPointerType(type); } // Class if (type->isObjCClassType()) { const auto *objPtr = type->castAs(); type = getObjCObjectType(ObjCBuiltinClassTy, {}, protocols, objPtr->isKindOfType()); return getObjCObjectPointerType(type); } hasError = true; return type; } QualType ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl, ArrayRef protocols) const { // Look in the folding set for an existing type. llvm::FoldingSetNodeID ID; ObjCTypeParamType::Profile(ID, Decl, Decl->getUnderlyingType(), protocols); void *InsertPos = nullptr; if (ObjCTypeParamType *TypeParam = ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos)) return QualType(TypeParam, 0); // We canonicalize to the underlying type. QualType Canonical = getCanonicalType(Decl->getUnderlyingType()); if (!protocols.empty()) { // Apply the protocol qualifers. bool hasError; Canonical = getCanonicalType(applyObjCProtocolQualifiers( Canonical, protocols, hasError, true /*allowOnPointerType*/)); assert(!hasError && "Error when apply protocol qualifier to bound type"); } unsigned size = sizeof(ObjCTypeParamType); size += protocols.size() * sizeof(ObjCProtocolDecl *); void *mem = Allocate(size, TypeAlignment); auto *newType = new (mem) ObjCTypeParamType(Decl, Canonical, protocols); Types.push_back(newType); ObjCTypeParamTypes.InsertNode(newType, InsertPos); return QualType(newType, 0); } void ASTContext::adjustObjCTypeParamBoundType(const ObjCTypeParamDecl *Orig, ObjCTypeParamDecl *New) const { New->setTypeSourceInfo(getTrivialTypeSourceInfo(Orig->getUnderlyingType())); // Update TypeForDecl after updating TypeSourceInfo. auto NewTypeParamTy = cast(New->getTypeForDecl()); SmallVector protocols; protocols.append(NewTypeParamTy->qual_begin(), NewTypeParamTy->qual_end()); QualType UpdatedTy = getObjCTypeParamType(New, protocols); New->setTypeForDecl(UpdatedTy.getTypePtr()); } /// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's /// protocol list adopt all protocols in QT's qualified-id protocol /// list. bool ASTContext::ObjCObjectAdoptsQTypeProtocols(QualType QT, ObjCInterfaceDecl *IC) { if (!QT->isObjCQualifiedIdType()) return false; if (const auto *OPT = QT->getAs()) { // If both the right and left sides have qualifiers. for (auto *Proto : OPT->quals()) { if (!IC->ClassImplementsProtocol(Proto, false)) return false; } return true; } return false; } /// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in /// QT's qualified-id protocol list adopt all protocols in IDecl's list /// of protocols. bool ASTContext::QIdProtocolsAdoptObjCObjectProtocols(QualType QT, ObjCInterfaceDecl *IDecl) { if (!QT->isObjCQualifiedIdType()) return false; const auto *OPT = QT->getAs(); if (!OPT) return false; if (!IDecl->hasDefinition()) return false; llvm::SmallPtrSet InheritedProtocols; CollectInheritedProtocols(IDecl, InheritedProtocols); if (InheritedProtocols.empty()) return false; // Check that if every protocol in list of id conforms to a protocol // of IDecl's, then bridge casting is ok. bool Conforms = false; for (auto *Proto : OPT->quals()) { Conforms = false; for (auto *PI : InheritedProtocols) { if (ProtocolCompatibleWithProtocol(Proto, PI)) { Conforms = true; break; } } if (!Conforms) break; } if (Conforms) return true; for (auto *PI : InheritedProtocols) { // If both the right and left sides have qualifiers. bool Adopts = false; for (auto *Proto : OPT->quals()) { // return 'true' if 'PI' is in the inheritance hierarchy of Proto if ((Adopts = ProtocolCompatibleWithProtocol(PI, Proto))) break; } if (!Adopts) return false; } return true; } /// getObjCObjectPointerType - Return a ObjCObjectPointerType type for /// the given object type. QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const { llvm::FoldingSetNodeID ID; ObjCObjectPointerType::Profile(ID, ObjectT); void *InsertPos = nullptr; if (ObjCObjectPointerType *QT = ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) return QualType(QT, 0); // Find the canonical object type. QualType Canonical; if (!ObjectT.isCanonical()) { Canonical = getObjCObjectPointerType(getCanonicalType(ObjectT)); // Regenerate InsertPos. ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos); } // No match. void *Mem = Allocate(sizeof(ObjCObjectPointerType), TypeAlignment); auto *QType = new (Mem) ObjCObjectPointerType(Canonical, ObjectT); Types.push_back(QType); ObjCObjectPointerTypes.InsertNode(QType, InsertPos); return QualType(QType, 0); } /// getObjCInterfaceType - Return the unique reference to the type for the /// specified ObjC interface decl. The list of protocols is optional. QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl, ObjCInterfaceDecl *PrevDecl) const { if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); if (PrevDecl) { assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl"); Decl->TypeForDecl = PrevDecl->TypeForDecl; return QualType(PrevDecl->TypeForDecl, 0); } // Prefer the definition, if there is one. if (const ObjCInterfaceDecl *Def = Decl->getDefinition()) Decl = Def; void *Mem = Allocate(sizeof(ObjCInterfaceType), TypeAlignment); auto *T = new (Mem) ObjCInterfaceType(Decl); Decl->TypeForDecl = T; Types.push_back(T); return QualType(T, 0); } /// getTypeOfExprType - Unlike many "get" functions, we can't unique /// TypeOfExprType AST's (since expression's are never shared). For example, /// multiple declarations that refer to "typeof(x)" all contain different /// DeclRefExpr's. This doesn't effect the type checker, since it operates /// on canonical type's (which are always unique). QualType ASTContext::getTypeOfExprType(Expr *tofExpr) const { TypeOfExprType *toe; if (tofExpr->isTypeDependent()) { llvm::FoldingSetNodeID ID; DependentTypeOfExprType::Profile(ID, *this, tofExpr); void *InsertPos = nullptr; DependentTypeOfExprType *Canon = DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos); if (Canon) { // We already have a "canonical" version of an identical, dependent // typeof(expr) type. Use that as our canonical type. toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, QualType((TypeOfExprType*)Canon, 0)); } else { // Build a new, canonical typeof(expr) type. Canon = new (*this, TypeAlignment) DependentTypeOfExprType(*this, tofExpr); DependentTypeOfExprTypes.InsertNode(Canon, InsertPos); toe = Canon; } } else { QualType Canonical = getCanonicalType(tofExpr->getType()); toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, Canonical); } Types.push_back(toe); return QualType(toe, 0); } /// getTypeOfType - Unlike many "get" functions, we don't unique /// TypeOfType nodes. The only motivation to unique these nodes would be /// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be /// an issue. This doesn't affect the type checker, since it operates /// on canonical types (which are always unique). QualType ASTContext::getTypeOfType(QualType tofType) const { QualType Canonical = getCanonicalType(tofType); auto *tot = new (*this, TypeAlignment) TypeOfType(tofType, Canonical); Types.push_back(tot); return QualType(tot, 0); } /// Unlike many "get" functions, we don't unique DecltypeType /// nodes. This would never be helpful, since each such type has its own /// expression, and would not give a significant memory saving, since there /// is an Expr tree under each such type. QualType ASTContext::getDecltypeType(Expr *e, QualType UnderlyingType) const { DecltypeType *dt; // C++11 [temp.type]p2: // If an expression e involves a template parameter, decltype(e) denotes a // unique dependent type. Two such decltype-specifiers refer to the same // type only if their expressions are equivalent (14.5.6.1). if (e->isInstantiationDependent()) { llvm::FoldingSetNodeID ID; DependentDecltypeType::Profile(ID, *this, e); void *InsertPos = nullptr; DependentDecltypeType *Canon = DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos); if (!Canon) { // Build a new, canonical decltype(expr) type. Canon = new (*this, TypeAlignment) DependentDecltypeType(*this, e); DependentDecltypeTypes.InsertNode(Canon, InsertPos); } dt = new (*this, TypeAlignment) DecltypeType(e, UnderlyingType, QualType((DecltypeType *)Canon, 0)); } else { dt = new (*this, TypeAlignment) DecltypeType(e, UnderlyingType, getCanonicalType(UnderlyingType)); } Types.push_back(dt); return QualType(dt, 0); } /// getUnaryTransformationType - We don't unique these, since the memory /// savings are minimal and these are rare. QualType ASTContext::getUnaryTransformType(QualType BaseType, QualType UnderlyingType, UnaryTransformType::UTTKind Kind) const { UnaryTransformType *ut = nullptr; if (BaseType->isDependentType()) { // Look in the folding set for an existing type. llvm::FoldingSetNodeID ID; DependentUnaryTransformType::Profile(ID, getCanonicalType(BaseType), Kind); void *InsertPos = nullptr; DependentUnaryTransformType *Canon = DependentUnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos); if (!Canon) { // Build a new, canonical __underlying_type(type) type. Canon = new (*this, TypeAlignment) DependentUnaryTransformType(*this, getCanonicalType(BaseType), Kind); DependentUnaryTransformTypes.InsertNode(Canon, InsertPos); } ut = new (*this, TypeAlignment) UnaryTransformType (BaseType, QualType(), Kind, QualType(Canon, 0)); } else { QualType CanonType = getCanonicalType(UnderlyingType); ut = new (*this, TypeAlignment) UnaryTransformType (BaseType, UnderlyingType, Kind, CanonType); } Types.push_back(ut); return QualType(ut, 0); } /// getAutoType - Return the uniqued reference to the 'auto' type which has been /// deduced to the given type, or to the canonical undeduced 'auto' type, or the /// canonical deduced-but-dependent 'auto' type. QualType ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword, bool IsDependent, bool IsPack, ConceptDecl *TypeConstraintConcept, ArrayRef TypeConstraintArgs) const { assert((!IsPack || IsDependent) && "only use IsPack for a dependent pack"); if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto && !TypeConstraintConcept && !IsDependent) return getAutoDeductType(); // Look in the folding set for an existing type. void *InsertPos = nullptr; llvm::FoldingSetNodeID ID; AutoType::Profile(ID, *this, DeducedType, Keyword, IsDependent, TypeConstraintConcept, TypeConstraintArgs); if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos)) return QualType(AT, 0); void *Mem = Allocate(sizeof(AutoType) + sizeof(TemplateArgument) * TypeConstraintArgs.size(), TypeAlignment); auto *AT = new (Mem) AutoType( DeducedType, Keyword, (IsDependent ? TypeDependence::DependentInstantiation : TypeDependence::None) | (IsPack ? TypeDependence::UnexpandedPack : TypeDependence::None), TypeConstraintConcept, TypeConstraintArgs); Types.push_back(AT); if (InsertPos) AutoTypes.InsertNode(AT, InsertPos); return QualType(AT, 0); } /// Return the uniqued reference to the deduced template specialization type /// which has been deduced to the given type, or to the canonical undeduced /// such type, or the canonical deduced-but-dependent such type. QualType ASTContext::getDeducedTemplateSpecializationType( TemplateName Template, QualType DeducedType, bool IsDependent) const { // Look in the folding set for an existing type. void *InsertPos = nullptr; llvm::FoldingSetNodeID ID; DeducedTemplateSpecializationType::Profile(ID, Template, DeducedType, IsDependent); if (DeducedTemplateSpecializationType *DTST = DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos)) return QualType(DTST, 0); auto *DTST = new (*this, TypeAlignment) DeducedTemplateSpecializationType(Template, DeducedType, IsDependent); Types.push_back(DTST); if (InsertPos) DeducedTemplateSpecializationTypes.InsertNode(DTST, InsertPos); return QualType(DTST, 0); } /// getAtomicType - Return the uniqued reference to the atomic type for /// the given value type. QualType ASTContext::getAtomicType(QualType T) const { // Unique pointers, to guarantee there is only one pointer of a particular // structure. llvm::FoldingSetNodeID ID; AtomicType::Profile(ID, T); void *InsertPos = nullptr; if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos)) return QualType(AT, 0); // If the atomic value type isn't canonical, this won't be a canonical type // either, so fill in the canonical type field. QualType Canonical; if (!T.isCanonical()) { Canonical = getAtomicType(getCanonicalType(T)); // Get the new insert position for the node we care about. AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos); assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; } auto *New = new (*this, TypeAlignment) AtomicType(T, Canonical); Types.push_back(New); AtomicTypes.InsertNode(New, InsertPos); return QualType(New, 0); } /// getAutoDeductType - Get type pattern for deducing against 'auto'. QualType ASTContext::getAutoDeductType() const { if (AutoDeductTy.isNull()) AutoDeductTy = QualType(new (*this, TypeAlignment) AutoType(QualType(), AutoTypeKeyword::Auto, TypeDependence::None, /*concept*/ nullptr, /*args*/ {}), 0); return AutoDeductTy; } /// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'. QualType ASTContext::getAutoRRefDeductType() const { if (AutoRRefDeductTy.isNull()) AutoRRefDeductTy = getRValueReferenceType(getAutoDeductType()); assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern"); return AutoRRefDeductTy; } /// getTagDeclType - Return the unique reference to the type for the /// specified TagDecl (struct/union/class/enum) decl. QualType ASTContext::getTagDeclType(const TagDecl *Decl) const { assert(Decl); // FIXME: What is the design on getTagDeclType when it requires casting // away const? mutable? return getTypeDeclType(const_cast(Decl)); } /// getSizeType - Return the unique type for "size_t" (C99 7.17), the result /// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and /// needs to agree with the definition in . CanQualType ASTContext::getSizeType() const { return getFromTargetType(Target->getSizeType()); } /// Return the unique signed counterpart of the integer type /// corresponding to size_t. CanQualType ASTContext::getSignedSizeType() const { return getFromTargetType(Target->getSignedSizeType()); } /// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5). CanQualType ASTContext::getIntMaxType() const { return getFromTargetType(Target->getIntMaxType()); } /// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5). CanQualType ASTContext::getUIntMaxType() const { return getFromTargetType(Target->getUIntMaxType()); } /// getSignedWCharType - Return the type of "signed wchar_t". /// Used when in C++, as a GCC extension. QualType ASTContext::getSignedWCharType() const { // FIXME: derive from "Target" ? return WCharTy; } /// getUnsignedWCharType - Return the type of "unsigned wchar_t". /// Used when in C++, as a GCC extension. QualType ASTContext::getUnsignedWCharType() const { // FIXME: derive from "Target" ? return UnsignedIntTy; } QualType ASTContext::getIntPtrType() const { return getFromTargetType(Target->getIntPtrType()); } QualType ASTContext::getUIntPtrType() const { return getCorrespondingUnsignedType(getIntPtrType()); } /// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17) /// defined in . Pointer - pointer requires this (C99 6.5.6p9). QualType ASTContext::getPointerDiffType() const { return getFromTargetType(Target->getPtrDiffType(0)); } /// Return the unique unsigned counterpart of "ptrdiff_t" /// integer type. The standard (C11 7.21.6.1p7) refers to this type /// in the definition of %tu format specifier. QualType ASTContext::getUnsignedPointerDiffType() const { return getFromTargetType(Target->getUnsignedPtrDiffType(0)); } /// Return the unique type for "pid_t" defined in /// . We need this to compute the correct type for vfork(). QualType ASTContext::getProcessIDType() const { return getFromTargetType(Target->getProcessIDType()); } //===----------------------------------------------------------------------===// // Type Operators //===----------------------------------------------------------------------===// CanQualType ASTContext::getCanonicalParamType(QualType T) const { // Push qualifiers into arrays, and then discard any remaining // qualifiers. T = getCanonicalType(T); T = getVariableArrayDecayedType(T); const Type *Ty = T.getTypePtr(); QualType Result; if (isa(Ty)) { Result = getArrayDecayedType(QualType(Ty,0)); } else if (isa(Ty)) { Result = getPointerType(QualType(Ty, 0)); } else { Result = QualType(Ty, 0); } return CanQualType::CreateUnsafe(Result); } QualType ASTContext::getUnqualifiedArrayType(QualType type, Qualifiers &quals) { SplitQualType splitType = type.getSplitUnqualifiedType(); // FIXME: getSplitUnqualifiedType() actually walks all the way to // the unqualified desugared type and then drops it on the floor. // We then have to strip that sugar back off with // getUnqualifiedDesugaredType(), which is silly. const auto *AT = dyn_cast(splitType.Ty->getUnqualifiedDesugaredType()); // If we don't have an array, just use the results in splitType. if (!AT) { quals = splitType.Quals; return QualType(splitType.Ty, 0); } // Otherwise, recurse on the array's element type. QualType elementType = AT->getElementType(); QualType unqualElementType = getUnqualifiedArrayType(elementType, quals); // If that didn't change the element type, AT has no qualifiers, so we // can just use the results in splitType. if (elementType == unqualElementType) { assert(quals.empty()); // from the recursive call quals = splitType.Quals; return QualType(splitType.Ty, 0); } // Otherwise, add in the qualifiers from the outermost type, then // build the type back up. quals.addConsistentQualifiers(splitType.Quals); if (const auto *CAT = dyn_cast(AT)) { return getConstantArrayType(unqualElementType, CAT->getSize(), CAT->getSizeExpr(), CAT->getSizeModifier(), 0); } if (const auto *IAT = dyn_cast(AT)) { return getIncompleteArrayType(unqualElementType, IAT->getSizeModifier(), 0); } if (const auto *VAT = dyn_cast(AT)) { return getVariableArrayType(unqualElementType, VAT->getSizeExpr(), VAT->getSizeModifier(), VAT->getIndexTypeCVRQualifiers(), VAT->getBracketsRange()); } const auto *DSAT = cast(AT); return getDependentSizedArrayType(unqualElementType, DSAT->getSizeExpr(), DSAT->getSizeModifier(), 0, SourceRange()); } /// Attempt to unwrap two types that may both be array types with the same bound /// (or both be array types of unknown bound) for the purpose of comparing the /// cv-decomposition of two types per C++ [conv.qual]. bool ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2) { bool UnwrappedAny = false; while (true) { auto *AT1 = getAsArrayType(T1); if (!AT1) return UnwrappedAny; auto *AT2 = getAsArrayType(T2); if (!AT2) return UnwrappedAny; // If we don't have two array types with the same constant bound nor two // incomplete array types, we've unwrapped everything we can. if (auto *CAT1 = dyn_cast(AT1)) { auto *CAT2 = dyn_cast(AT2); if (!CAT2 || CAT1->getSize() != CAT2->getSize()) return UnwrappedAny; } else if (!isa(AT1) || !isa(AT2)) { return UnwrappedAny; } T1 = AT1->getElementType(); T2 = AT2->getElementType(); UnwrappedAny = true; } } /// Attempt to unwrap two types that may be similar (C++ [conv.qual]). /// /// If T1 and T2 are both pointer types of the same kind, or both array types /// with the same bound, unwraps layers from T1 and T2 until a pointer type is /// unwrapped. Top-level qualifiers on T1 and T2 are ignored. /// /// This function will typically be called in a loop that successively /// "unwraps" pointer and pointer-to-member types to compare them at each /// level. /// /// \return \c true if a pointer type was unwrapped, \c false if we reached a /// pair of types that can't be unwrapped further. bool ASTContext::UnwrapSimilarTypes(QualType &T1, QualType &T2) { UnwrapSimilarArrayTypes(T1, T2); const auto *T1PtrType = T1->getAs(); const auto *T2PtrType = T2->getAs(); if (T1PtrType && T2PtrType) { T1 = T1PtrType->getPointeeType(); T2 = T2PtrType->getPointeeType(); return true; } const auto *T1MPType = T1->getAs(); const auto *T2MPType = T2->getAs(); if (T1MPType && T2MPType && hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0), QualType(T2MPType->getClass(), 0))) { T1 = T1MPType->getPointeeType(); T2 = T2MPType->getPointeeType(); return true; } if (getLangOpts().ObjC) { const auto *T1OPType = T1->getAs(); const auto *T2OPType = T2->getAs(); if (T1OPType && T2OPType) { T1 = T1OPType->getPointeeType(); T2 = T2OPType->getPointeeType(); return true; } } // FIXME: Block pointers, too? return false; } bool ASTContext::hasSimilarType(QualType T1, QualType T2) { while (true) { Qualifiers Quals; T1 = getUnqualifiedArrayType(T1, Quals); T2 = getUnqualifiedArrayType(T2, Quals); if (hasSameType(T1, T2)) return true; if (!UnwrapSimilarTypes(T1, T2)) return false; } } bool ASTContext::hasCvrSimilarType(QualType T1, QualType T2) { while (true) { Qualifiers Quals1, Quals2; T1 = getUnqualifiedArrayType(T1, Quals1); T2 = getUnqualifiedArrayType(T2, Quals2); Quals1.removeCVRQualifiers(); Quals2.removeCVRQualifiers(); if (Quals1 != Quals2) return false; if (hasSameType(T1, T2)) return true; if (!UnwrapSimilarTypes(T1, T2)) return false; } } DeclarationNameInfo ASTContext::getNameForTemplate(TemplateName Name, SourceLocation NameLoc) const { switch (Name.getKind()) { case TemplateName::QualifiedTemplate: case TemplateName::Template: // DNInfo work in progress: CHECKME: what about DNLoc? return DeclarationNameInfo(Name.getAsTemplateDecl()->getDeclName(), NameLoc); case TemplateName::OverloadedTemplate: { OverloadedTemplateStorage *Storage = Name.getAsOverloadedTemplate(); // DNInfo work in progress: CHECKME: what about DNLoc? return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc); } case TemplateName::AssumedTemplate: { AssumedTemplateStorage *Storage = Name.getAsAssumedTemplateName(); return DeclarationNameInfo(Storage->getDeclName(), NameLoc); } case TemplateName::DependentTemplate: { DependentTemplateName *DTN = Name.getAsDependentTemplateName(); DeclarationName DName; if (DTN->isIdentifier()) { DName = DeclarationNames.getIdentifier(DTN->getIdentifier()); return DeclarationNameInfo(DName, NameLoc); } else { DName = DeclarationNames.getCXXOperatorName(DTN->getOperator()); // DNInfo work in progress: FIXME: source locations? DeclarationNameLoc DNLoc; DNLoc.CXXOperatorName.BeginOpNameLoc = SourceLocation().getRawEncoding(); DNLoc.CXXOperatorName.EndOpNameLoc = SourceLocation().getRawEncoding(); return DeclarationNameInfo(DName, NameLoc, DNLoc); } } case TemplateName::SubstTemplateTemplateParm: { SubstTemplateTemplateParmStorage *subst = Name.getAsSubstTemplateTemplateParm(); return DeclarationNameInfo(subst->getParameter()->getDeclName(), NameLoc); } case TemplateName::SubstTemplateTemplateParmPack: { SubstTemplateTemplateParmPackStorage *subst = Name.getAsSubstTemplateTemplateParmPack(); return DeclarationNameInfo(subst->getParameterPack()->getDeclName(), NameLoc); } } llvm_unreachable("bad template name kind!"); } TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name) const { switch (Name.getKind()) { case TemplateName::QualifiedTemplate: case TemplateName::Template: { TemplateDecl *Template = Name.getAsTemplateDecl(); if (auto *TTP = dyn_cast(Template)) Template = getCanonicalTemplateTemplateParmDecl(TTP); // The canonical template name is the canonical template declaration. return TemplateName(cast(Template->getCanonicalDecl())); } case TemplateName::OverloadedTemplate: case TemplateName::AssumedTemplate: llvm_unreachable("cannot canonicalize unresolved template"); case TemplateName::DependentTemplate: { DependentTemplateName *DTN = Name.getAsDependentTemplateName(); assert(DTN && "Non-dependent template names must refer to template decls."); return DTN->CanonicalTemplateName; } case TemplateName::SubstTemplateTemplateParm: { SubstTemplateTemplateParmStorage *subst = Name.getAsSubstTemplateTemplateParm(); return getCanonicalTemplateName(subst->getReplacement()); } case TemplateName::SubstTemplateTemplateParmPack: { SubstTemplateTemplateParmPackStorage *subst = Name.getAsSubstTemplateTemplateParmPack(); TemplateTemplateParmDecl *canonParameter = getCanonicalTemplateTemplateParmDecl(subst->getParameterPack()); TemplateArgument canonArgPack = getCanonicalTemplateArgument(subst->getArgumentPack()); return getSubstTemplateTemplateParmPack(canonParameter, canonArgPack); } } llvm_unreachable("bad template name!"); } bool ASTContext::hasSameTemplateName(TemplateName X, TemplateName Y) { X = getCanonicalTemplateName(X); Y = getCanonicalTemplateName(Y); return X.getAsVoidPointer() == Y.getAsVoidPointer(); } TemplateArgument ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const { switch (Arg.getKind()) { case TemplateArgument::Null: return Arg; case TemplateArgument::Expression: return Arg; case TemplateArgument::Declaration: { auto *D = cast(Arg.getAsDecl()->getCanonicalDecl()); return TemplateArgument(D, Arg.getParamTypeForDecl()); } case TemplateArgument::NullPtr: return TemplateArgument(getCanonicalType(Arg.getNullPtrType()), /*isNullPtr*/true); case TemplateArgument::Template: return TemplateArgument(getCanonicalTemplateName(Arg.getAsTemplate())); case TemplateArgument::TemplateExpansion: return TemplateArgument(getCanonicalTemplateName( Arg.getAsTemplateOrTemplatePattern()), Arg.getNumTemplateExpansions()); case TemplateArgument::Integral: return TemplateArgument(Arg, getCanonicalType(Arg.getIntegralType())); case TemplateArgument::Type: return TemplateArgument(getCanonicalType(Arg.getAsType())); case TemplateArgument::Pack: { if (Arg.pack_size() == 0) return Arg; auto *CanonArgs = new (*this) TemplateArgument[Arg.pack_size()]; unsigned Idx = 0; for (TemplateArgument::pack_iterator A = Arg.pack_begin(), AEnd = Arg.pack_end(); A != AEnd; (void)++A, ++Idx) CanonArgs[Idx] = getCanonicalTemplateArgument(*A); return TemplateArgument(llvm::makeArrayRef(CanonArgs, Arg.pack_size())); } } // Silence GCC warning llvm_unreachable("Unhandled template argument kind"); } NestedNameSpecifier * ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const { if (!NNS) return nullptr; switch (NNS->getKind()) { case NestedNameSpecifier::Identifier: // Canonicalize the prefix but keep the identifier the same. return NestedNameSpecifier::Create(*this, getCanonicalNestedNameSpecifier(NNS->getPrefix()), NNS->getAsIdentifier()); case NestedNameSpecifier::Namespace: // A namespace is canonical; build a nested-name-specifier with // this namespace and no prefix. return NestedNameSpecifier::Create(*this, nullptr, NNS->getAsNamespace()->getOriginalNamespace()); case NestedNameSpecifier::NamespaceAlias: // A namespace is canonical; build a nested-name-specifier with // this namespace and no prefix. return NestedNameSpecifier::Create(*this, nullptr, NNS->getAsNamespaceAlias()->getNamespace() ->getOriginalNamespace()); case NestedNameSpecifier::TypeSpec: case NestedNameSpecifier::TypeSpecWithTemplate: { QualType T = getCanonicalType(QualType(NNS->getAsType(), 0)); // If we have some kind of dependent-named type (e.g., "typename T::type"), // break it apart into its prefix and identifier, then reconsititute those // as the canonical nested-name-specifier. This is required to canonicalize // a dependent nested-name-specifier involving typedefs of dependent-name // types, e.g., // typedef typename T::type T1; // typedef typename T1::type T2; if (const auto *DNT = T->getAs()) return NestedNameSpecifier::Create(*this, DNT->getQualifier(), const_cast(DNT->getIdentifier())); // Otherwise, just canonicalize the type, and force it to be a TypeSpec. // FIXME: Why are TypeSpec and TypeSpecWithTemplate distinct in the // first place? return NestedNameSpecifier::Create(*this, nullptr, false, const_cast(T.getTypePtr())); } case NestedNameSpecifier::Global: case NestedNameSpecifier::Super: // The global specifier and __super specifer are canonical and unique. return NNS; } llvm_unreachable("Invalid NestedNameSpecifier::Kind!"); } const ArrayType *ASTContext::getAsArrayType(QualType T) const { // Handle the non-qualified case efficiently. if (!T.hasLocalQualifiers()) { // Handle the common positive case fast. if (const auto *AT = dyn_cast(T)) return AT; } // Handle the common negative case fast. if (!isa(T.getCanonicalType())) return nullptr; // Apply any qualifiers from the array type to the element type. This // implements C99 6.7.3p8: "If the specification of an array type includes // any type qualifiers, the element type is so qualified, not the array type." // If we get here, we either have type qualifiers on the type, or we have // sugar such as a typedef in the way. If we have type qualifiers on the type // we must propagate them down into the element type. SplitQualType split = T.getSplitDesugaredType(); Qualifiers qs = split.Quals; // If we have a simple case, just return now. const auto *ATy = dyn_cast(split.Ty); if (!ATy || qs.empty()) return ATy; // Otherwise, we have an array and we have qualifiers on it. Push the // qualifiers into the array element type and return a new array type. QualType NewEltTy = getQualifiedType(ATy->getElementType(), qs); if (const auto *CAT = dyn_cast(ATy)) return cast(getConstantArrayType(NewEltTy, CAT->getSize(), CAT->getSizeExpr(), CAT->getSizeModifier(), CAT->getIndexTypeCVRQualifiers())); if (const auto *IAT = dyn_cast(ATy)) return cast(getIncompleteArrayType(NewEltTy, IAT->getSizeModifier(), IAT->getIndexTypeCVRQualifiers())); if (const auto *DSAT = dyn_cast(ATy)) return cast( getDependentSizedArrayType(NewEltTy, DSAT->getSizeExpr(), DSAT->getSizeModifier(), DSAT->getIndexTypeCVRQualifiers(), DSAT->getBracketsRange())); const auto *VAT = cast(ATy); return cast(getVariableArrayType(NewEltTy, VAT->getSizeExpr(), VAT->getSizeModifier(), VAT->getIndexTypeCVRQualifiers(), VAT->getBracketsRange())); } QualType ASTContext::getAdjustedParameterType(QualType T) const { if (T->isArrayType() || T->isFunctionType()) return getDecayedType(T); return T; } QualType ASTContext::getSignatureParameterType(QualType T) const { T = getVariableArrayDecayedType(T); T = getAdjustedParameterType(T); return T.getUnqualifiedType(); } QualType ASTContext::getExceptionObjectType(QualType T) const { // C++ [except.throw]p3: // A throw-expression initializes a temporary object, called the exception // object, the type of which is determined by removing any top-level // cv-qualifiers from the static type of the operand of throw and adjusting // the type from "array of T" or "function returning T" to "pointer to T" // or "pointer to function returning T", [...] T = getVariableArrayDecayedType(T); if (T->isArrayType() || T->isFunctionType()) T = getDecayedType(T); return T.getUnqualifiedType(); } /// getArrayDecayedType - Return the properly qualified result of decaying the /// specified array type to a pointer. This operation is non-trivial when /// handling typedefs etc. The canonical type of "T" must be an array type, /// this returns a pointer to a properly qualified element of the array. /// /// See C99 6.7.5.3p7 and C99 6.3.2.1p3. QualType ASTContext::getArrayDecayedType(QualType Ty) const { // Get the element type with 'getAsArrayType' so that we don't lose any // typedefs in the element type of the array. This also handles propagation // of type qualifiers from the array type into the element type if present // (C99 6.7.3p8). const ArrayType *PrettyArrayType = getAsArrayType(Ty); assert(PrettyArrayType && "Not an array type!"); QualType PtrTy = getPointerType(PrettyArrayType->getElementType()); // int x[restrict 4] -> int *restrict QualType Result = getQualifiedType(PtrTy, PrettyArrayType->getIndexTypeQualifiers()); // int x[_Nullable] -> int * _Nullable if (auto Nullability = Ty->getNullability(*this)) { Result = const_cast(this)->getAttributedType( AttributedType::getNullabilityAttrKind(*Nullability), Result, Result); } return Result; } QualType ASTContext::getBaseElementType(const ArrayType *array) const { return getBaseElementType(array->getElementType()); } QualType ASTContext::getBaseElementType(QualType type) const { Qualifiers qs; while (true) { SplitQualType split = type.getSplitDesugaredType(); const ArrayType *array = split.Ty->getAsArrayTypeUnsafe(); if (!array) break; type = array->getElementType(); qs.addConsistentQualifiers(split.Quals); } return getQualifiedType(type, qs); } /// getConstantArrayElementCount - Returns number of constant array elements. uint64_t ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const { uint64_t ElementCount = 1; do { ElementCount *= CA->getSize().getZExtValue(); CA = dyn_cast_or_null( CA->getElementType()->getAsArrayTypeUnsafe()); } while (CA); return ElementCount; } /// getFloatingRank - Return a relative rank for floating point types. /// This routine will assert if passed a built-in type that isn't a float. static FloatingRank getFloatingRank(QualType T) { if (const auto *CT = T->getAs()) return getFloatingRank(CT->getElementType()); switch (T->castAs()->getKind()) { default: llvm_unreachable("getFloatingRank(): not a floating type"); case BuiltinType::Float16: return Float16Rank; case BuiltinType::Half: return HalfRank; case BuiltinType::Float: return FloatRank; case BuiltinType::Double: return DoubleRank; case BuiltinType::LongDouble: return LongDoubleRank; case BuiltinType::Float128: return Float128Rank; case BuiltinType::BFloat16: return BFloat16Rank; } } /// getFloatingTypeOfSizeWithinDomain - Returns a real floating /// point or a complex type (based on typeDomain/typeSize). /// 'typeDomain' is a real floating point or complex type. /// 'typeSize' is a real floating point or complex type. QualType ASTContext::getFloatingTypeOfSizeWithinDomain(QualType Size, QualType Domain) const { FloatingRank EltRank = getFloatingRank(Size); if (Domain->isComplexType()) { switch (EltRank) { case BFloat16Rank: llvm_unreachable("Complex bfloat16 is not supported"); case Float16Rank: case HalfRank: llvm_unreachable("Complex half is not supported"); case FloatRank: return FloatComplexTy; case DoubleRank: return DoubleComplexTy; case LongDoubleRank: return LongDoubleComplexTy; case Float128Rank: return Float128ComplexTy; } } assert(Domain->isRealFloatingType() && "Unknown domain!"); switch (EltRank) { case Float16Rank: return HalfTy; case BFloat16Rank: return BFloat16Ty; case HalfRank: return HalfTy; case FloatRank: return FloatTy; case DoubleRank: return DoubleTy; case LongDoubleRank: return LongDoubleTy; case Float128Rank: return Float128Ty; } llvm_unreachable("getFloatingRank(): illegal value for rank"); } /// getFloatingTypeOrder - Compare the rank of the two specified floating /// point types, ignoring the domain of the type (i.e. 'double' == /// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If /// LHS < RHS, return -1. int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) const { FloatingRank LHSR = getFloatingRank(LHS); FloatingRank RHSR = getFloatingRank(RHS); if (LHSR == RHSR) return 0; if (LHSR > RHSR) return 1; return -1; } int ASTContext::getFloatingTypeSemanticOrder(QualType LHS, QualType RHS) const { if (&getFloatTypeSemantics(LHS) == &getFloatTypeSemantics(RHS)) return 0; return getFloatingTypeOrder(LHS, RHS); } /// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This /// routine will assert if passed a built-in type that isn't an integer or enum, /// or if it is not canonicalized. unsigned ASTContext::getIntegerRank(const Type *T) const { assert(T->isCanonicalUnqualified() && "T should be canonicalized"); // Results in this 'losing' to any type of the same size, but winning if // larger. if (const auto *EIT = dyn_cast(T)) return 0 + (EIT->getNumBits() << 3); switch (cast(T)->getKind()) { default: llvm_unreachable("getIntegerRank(): not a built-in integer"); case BuiltinType::Bool: return 1 + (getIntWidth(BoolTy) << 3); case BuiltinType::Char_S: case BuiltinType::Char_U: case BuiltinType::SChar: case BuiltinType::UChar: return 2 + (getIntWidth(CharTy) << 3); case BuiltinType::Short: case BuiltinType::UShort: return 3 + (getIntWidth(ShortTy) << 3); case BuiltinType::Int: case BuiltinType::UInt: return 4 + (getIntWidth(IntTy) << 3); case BuiltinType::Long: case BuiltinType::ULong: return 5 + (getIntWidth(LongTy) << 3); case BuiltinType::LongLong: case BuiltinType::ULongLong: return 6 + (getIntWidth(LongLongTy) << 3); case BuiltinType::Int128: case BuiltinType::UInt128: return 7 + (getIntWidth(Int128Ty) << 3); } } /// Whether this is a promotable bitfield reference according /// to C99 6.3.1.1p2, bullet 2 (and GCC extensions). /// /// \returns the type this bit-field will promote to, or NULL if no /// promotion occurs. QualType ASTContext::isPromotableBitField(Expr *E) const { if (E->isTypeDependent() || E->isValueDependent()) return {}; // C++ [conv.prom]p5: // If the bit-field has an enumerated type, it is treated as any other // value of that type for promotion purposes. if (getLangOpts().CPlusPlus && E->getType()->isEnumeralType()) return {}; // FIXME: We should not do this unless E->refersToBitField() is true. This // matters in C where getSourceBitField() will find bit-fields for various // cases where the source expression is not a bit-field designator. FieldDecl *Field = E->getSourceBitField(); // FIXME: conditional bit-fields? if (!Field) return {}; QualType FT = Field->getType(); uint64_t BitWidth = Field->getBitWidthValue(*this); uint64_t IntSize = getTypeSize(IntTy); // C++ [conv.prom]p5: // A prvalue for an integral bit-field can be converted to a prvalue of type // int if int can represent all the values of the bit-field; otherwise, it // can be converted to unsigned int if unsigned int can represent all the // values of the bit-field. If the bit-field is larger yet, no integral // promotion applies to it. // C11 6.3.1.1/2: // [For a bit-field of type _Bool, int, signed int, or unsigned int:] // If an int can represent all values of the original type (as restricted by // the width, for a bit-field), the value is converted to an int; otherwise, // it is converted to an unsigned int. // // FIXME: C does not permit promotion of a 'long : 3' bitfield to int. // We perform that promotion here to match GCC and C++. // FIXME: C does not permit promotion of an enum bit-field whose rank is // greater than that of 'int'. We perform that promotion to match GCC. if (BitWidth < IntSize) return IntTy; if (BitWidth == IntSize) return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy; // Bit-fields wider than int are not subject to promotions, and therefore act // like the base type. GCC has some weird bugs in this area that we // deliberately do not follow (GCC follows a pre-standard resolution to // C's DR315 which treats bit-width as being part of the type, and this leaks // into their semantics in some cases). return {}; } /// getPromotedIntegerType - Returns the type that Promotable will /// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable /// integer type. QualType ASTContext::getPromotedIntegerType(QualType Promotable) const { assert(!Promotable.isNull()); assert(Promotable->isPromotableIntegerType()); if (const auto *ET = Promotable->getAs()) return ET->getDecl()->getPromotionType(); if (const auto *BT = Promotable->getAs()) { // C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t // (3.9.1) can be converted to a prvalue of the first of the following // types that can represent all the values of its underlying type: // int, unsigned int, long int, unsigned long int, long long int, or // unsigned long long int [...] // FIXME: Is there some better way to compute this? if (BT->getKind() == BuiltinType::WChar_S || BT->getKind() == BuiltinType::WChar_U || BT->getKind() == BuiltinType::Char8 || BT->getKind() == BuiltinType::Char16 || BT->getKind() == BuiltinType::Char32) { bool FromIsSigned = BT->getKind() == BuiltinType::WChar_S; uint64_t FromSize = getTypeSize(BT); QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy, LongLongTy, UnsignedLongLongTy }; for (size_t Idx = 0; Idx < llvm::array_lengthof(PromoteTypes); ++Idx) { uint64_t ToSize = getTypeSize(PromoteTypes[Idx]); if (FromSize < ToSize || (FromSize == ToSize && FromIsSigned == PromoteTypes[Idx]->isSignedIntegerType())) return PromoteTypes[Idx]; } llvm_unreachable("char type should fit into long long"); } } // At this point, we should have a signed or unsigned integer type. if (Promotable->isSignedIntegerType()) return IntTy; uint64_t PromotableSize = getIntWidth(Promotable); uint64_t IntSize = getIntWidth(IntTy); assert(Promotable->isUnsignedIntegerType() && PromotableSize <= IntSize); return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy; } /// Recurses in pointer/array types until it finds an objc retainable /// type and returns its ownership. Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const { while (!T.isNull()) { if (T.getObjCLifetime() != Qualifiers::OCL_None) return T.getObjCLifetime(); if (T->isArrayType()) T = getBaseElementType(T); else if (const auto *PT = T->getAs()) T = PT->getPointeeType(); else if (const auto *RT = T->getAs()) T = RT->getPointeeType(); else break; } return Qualifiers::OCL_None; } static const Type *getIntegerTypeForEnum(const EnumType *ET) { // Incomplete enum types are not treated as integer types. // FIXME: In C++, enum types are never integer types. if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped()) return ET->getDecl()->getIntegerType().getTypePtr(); return nullptr; } /// getIntegerTypeOrder - Returns the highest ranked integer type: /// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If /// LHS < RHS, return -1. int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const { const Type *LHSC = getCanonicalType(LHS).getTypePtr(); const Type *RHSC = getCanonicalType(RHS).getTypePtr(); // Unwrap enums to their underlying type. if (const auto *ET = dyn_cast(LHSC)) LHSC = getIntegerTypeForEnum(ET); if (const auto *ET = dyn_cast(RHSC)) RHSC = getIntegerTypeForEnum(ET); if (LHSC == RHSC) return 0; bool LHSUnsigned = LHSC->isUnsignedIntegerType(); bool RHSUnsigned = RHSC->isUnsignedIntegerType(); unsigned LHSRank = getIntegerRank(LHSC); unsigned RHSRank = getIntegerRank(RHSC); if (LHSUnsigned == RHSUnsigned) { // Both signed or both unsigned. if (LHSRank == RHSRank) return 0; return LHSRank > RHSRank ? 1 : -1; } // Otherwise, the LHS is signed and the RHS is unsigned or visa versa. if (LHSUnsigned) { // If the unsigned [LHS] type is larger, return it. if (LHSRank >= RHSRank) return 1; // If the signed type can represent all values of the unsigned type, it // wins. Because we are dealing with 2's complement and types that are // powers of two larger than each other, this is always safe. return -1; } // If the unsigned [RHS] type is larger, return it. if (RHSRank >= LHSRank) return -1; // If the signed type can represent all values of the unsigned type, it // wins. Because we are dealing with 2's complement and types that are // powers of two larger than each other, this is always safe. return 1; } TypedefDecl *ASTContext::getCFConstantStringDecl() const { if (CFConstantStringTypeDecl) return CFConstantStringTypeDecl; assert(!CFConstantStringTagDecl && "tag and typedef should be initialized together"); CFConstantStringTagDecl = buildImplicitRecord("__NSConstantString_tag"); CFConstantStringTagDecl->startDefinition(); struct { QualType Type; const char *Name; } Fields[5]; unsigned Count = 0; /// Objective-C ABI /// /// typedef struct __NSConstantString_tag { /// const int *isa; /// int flags; /// const char *str; /// long length; /// } __NSConstantString; /// /// Swift ABI (4.1, 4.2) /// /// typedef struct __NSConstantString_tag { /// uintptr_t _cfisa; /// uintptr_t _swift_rc; /// _Atomic(uint64_t) _cfinfoa; /// const char *_ptr; /// uint32_t _length; /// } __NSConstantString; /// /// Swift ABI (5.0) /// /// typedef struct __NSConstantString_tag { /// uintptr_t _cfisa; /// uintptr_t _swift_rc; /// _Atomic(uint64_t) _cfinfoa; /// const char *_ptr; /// uintptr_t _length; /// } __NSConstantString; const auto CFRuntime = getLangOpts().CFRuntime; if (static_cast(CFRuntime) < static_cast(LangOptions::CoreFoundationABI::Swift)) { Fields[Count++] = { getPointerType(IntTy.withConst()), "isa" }; Fields[Count++] = { IntTy, "flags" }; Fields[Count++] = { getPointerType(CharTy.withConst()), "str" }; Fields[Count++] = { LongTy, "length" }; } else { Fields[Count++] = { getUIntPtrType(), "_cfisa" }; Fields[Count++] = { getUIntPtrType(), "_swift_rc" }; Fields[Count++] = { getFromTargetType(Target->getUInt64Type()), "_swift_rc" }; Fields[Count++] = { getPointerType(CharTy.withConst()), "_ptr" }; if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 || CFRuntime == LangOptions::CoreFoundationABI::Swift4_2) Fields[Count++] = { IntTy, "_ptr" }; else Fields[Count++] = { getUIntPtrType(), "_ptr" }; } // Create fields for (unsigned i = 0; i < Count; ++i) { FieldDecl *Field = FieldDecl::Create(*this, CFConstantStringTagDecl, SourceLocation(), SourceLocation(), &Idents.get(Fields[i].Name), Fields[i].Type, /*TInfo=*/nullptr, /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); Field->setAccess(AS_public); CFConstantStringTagDecl->addDecl(Field); } CFConstantStringTagDecl->completeDefinition(); // This type is designed to be compatible with NSConstantString, but cannot // use the same name, since NSConstantString is an interface. auto tagType = getTagDeclType(CFConstantStringTagDecl); CFConstantStringTypeDecl = buildImplicitTypedef(tagType, "__NSConstantString"); return CFConstantStringTypeDecl; } RecordDecl *ASTContext::getCFConstantStringTagDecl() const { if (!CFConstantStringTagDecl) getCFConstantStringDecl(); // Build the tag and the typedef. return CFConstantStringTagDecl; } // getCFConstantStringType - Return the type used for constant CFStrings. QualType ASTContext::getCFConstantStringType() const { return getTypedefType(getCFConstantStringDecl()); } QualType ASTContext::getObjCSuperType() const { if (ObjCSuperType.isNull()) { RecordDecl *ObjCSuperTypeDecl = buildImplicitRecord("objc_super"); TUDecl->addDecl(ObjCSuperTypeDecl); ObjCSuperType = getTagDeclType(ObjCSuperTypeDecl); } return ObjCSuperType; } void ASTContext::setCFConstantStringType(QualType T) { const auto *TD = T->castAs(); CFConstantStringTypeDecl = cast(TD->getDecl()); const auto *TagType = CFConstantStringTypeDecl->getUnderlyingType()->castAs(); CFConstantStringTagDecl = TagType->getDecl(); } QualType ASTContext::getBlockDescriptorType() const { if (BlockDescriptorType) return getTagDeclType(BlockDescriptorType); RecordDecl *RD; // FIXME: Needs the FlagAppleBlock bit. RD = buildImplicitRecord("__block_descriptor"); RD->startDefinition(); QualType FieldTypes[] = { UnsignedLongTy, UnsignedLongTy, }; static const char *const FieldNames[] = { "reserved", "Size" }; for (size_t i = 0; i < 2; ++i) { FieldDecl *Field = FieldDecl::Create( *this, RD, SourceLocation(), SourceLocation(), &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); Field->setAccess(AS_public); RD->addDecl(Field); } RD->completeDefinition(); BlockDescriptorType = RD; return getTagDeclType(BlockDescriptorType); } QualType ASTContext::getBlockDescriptorExtendedType() const { if (BlockDescriptorExtendedType) return getTagDeclType(BlockDescriptorExtendedType); RecordDecl *RD; // FIXME: Needs the FlagAppleBlock bit. RD = buildImplicitRecord("__block_descriptor_withcopydispose"); RD->startDefinition(); QualType FieldTypes[] = { UnsignedLongTy, UnsignedLongTy, getPointerType(VoidPtrTy), getPointerType(VoidPtrTy) }; static const char *const FieldNames[] = { "reserved", "Size", "CopyFuncPtr", "DestroyFuncPtr" }; for (size_t i = 0; i < 4; ++i) { FieldDecl *Field = FieldDecl::Create( *this, RD, SourceLocation(), SourceLocation(), &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); Field->setAccess(AS_public); RD->addDecl(Field); } RD->completeDefinition(); BlockDescriptorExtendedType = RD; return getTagDeclType(BlockDescriptorExtendedType); } OpenCLTypeKind ASTContext::getOpenCLTypeKind(const Type *T) const { const auto *BT = dyn_cast(T); if (!BT) { if (isa(T)) return OCLTK_Pipe; return OCLTK_Default; } switch (BT->getKind()) { #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ case BuiltinType::Id: \ return OCLTK_Image; #include "clang/Basic/OpenCLImageTypes.def" case BuiltinType::OCLClkEvent: return OCLTK_ClkEvent; case BuiltinType::OCLEvent: return OCLTK_Event; case BuiltinType::OCLQueue: return OCLTK_Queue; case BuiltinType::OCLReserveID: return OCLTK_ReserveID; case BuiltinType::OCLSampler: return OCLTK_Sampler; default: return OCLTK_Default; } } LangAS ASTContext::getOpenCLTypeAddrSpace(const Type *T) const { return Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T)); } /// BlockRequiresCopying - Returns true if byref variable "D" of type "Ty" /// requires copy/dispose. Note that this must match the logic /// in buildByrefHelpers. bool ASTContext::BlockRequiresCopying(QualType Ty, const VarDecl *D) { if (const CXXRecordDecl *record = Ty->getAsCXXRecordDecl()) { const Expr *copyExpr = getBlockVarCopyInit(D).getCopyExpr(); if (!copyExpr && record->hasTrivialDestructor()) return false; return true; } // The block needs copy/destroy helpers if Ty is non-trivial to destructively // move or destroy. if (Ty.isNonTrivialToPrimitiveDestructiveMove() || Ty.isDestructedType()) return true; if (!Ty->isObjCRetainableType()) return false; Qualifiers qs = Ty.getQualifiers(); // If we have lifetime, that dominates. if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) { switch (lifetime) { case Qualifiers::OCL_None: llvm_unreachable("impossible"); // These are just bits as far as the runtime is concerned. case Qualifiers::OCL_ExplicitNone: case Qualifiers::OCL_Autoreleasing: return false; // These cases should have been taken care of when checking the type's // non-triviality. case Qualifiers::OCL_Weak: case Qualifiers::OCL_Strong: llvm_unreachable("impossible"); } llvm_unreachable("fell out of lifetime switch!"); } return (Ty->isBlockPointerType() || isObjCNSObjectType(Ty) || Ty->isObjCObjectPointerType()); } bool ASTContext::getByrefLifetime(QualType Ty, Qualifiers::ObjCLifetime &LifeTime, bool &HasByrefExtendedLayout) const { if (!getLangOpts().ObjC || getLangOpts().getGC() != LangOptions::NonGC) return false; HasByrefExtendedLayout = false; if (Ty->isRecordType()) { HasByrefExtendedLayout = true; LifeTime = Qualifiers::OCL_None; } else if ((LifeTime = Ty.getObjCLifetime())) { // Honor the ARC qualifiers. } else if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) { // The MRR rule. LifeTime = Qualifiers::OCL_ExplicitNone; } else { LifeTime = Qualifiers::OCL_None; } return true; } CanQualType ASTContext::getNSUIntegerType() const { assert(Target && "Expected target to be initialized"); const llvm::Triple &T = Target->getTriple(); // Windows is LLP64 rather than LP64 if (T.isOSWindows() && T.isArch64Bit()) return UnsignedLongLongTy; return UnsignedLongTy; } CanQualType ASTContext::getNSIntegerType() const { assert(Target && "Expected target to be initialized"); const llvm::Triple &T = Target->getTriple(); // Windows is LLP64 rather than LP64 if (T.isOSWindows() && T.isArch64Bit()) return LongLongTy; return LongTy; } TypedefDecl *ASTContext::getObjCInstanceTypeDecl() { if (!ObjCInstanceTypeDecl) ObjCInstanceTypeDecl = buildImplicitTypedef(getObjCIdType(), "instancetype"); return ObjCInstanceTypeDecl; } // This returns true if a type has been typedefed to BOOL: // typedef BOOL; static bool isTypeTypedefedAsBOOL(QualType T) { if (const auto *TT = dyn_cast(T)) if (IdentifierInfo *II = TT->getDecl()->getIdentifier()) return II->isStr("BOOL"); return false; } /// getObjCEncodingTypeSize returns size of type for objective-c encoding /// purpose. CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) const { if (!type->isIncompleteArrayType() && type->isIncompleteType()) return CharUnits::Zero(); CharUnits sz = getTypeSizeInChars(type); // Make all integer and enum types at least as large as an int if (sz.isPositive() && type->isIntegralOrEnumerationType()) sz = std::max(sz, getTypeSizeInChars(IntTy)); // Treat arrays as pointers, since that's how they're passed in. else if (type->isArrayType()) sz = getTypeSizeInChars(VoidPtrTy); return sz; } bool ASTContext::isMSStaticDataMemberInlineDefinition(const VarDecl *VD) const { return getTargetInfo().getCXXABI().isMicrosoft() && VD->isStaticDataMember() && VD->getType()->isIntegralOrEnumerationType() && !VD->getFirstDecl()->isOutOfLine() && VD->getFirstDecl()->hasInit(); } ASTContext::InlineVariableDefinitionKind ASTContext::getInlineVariableDefinitionKind(const VarDecl *VD) const { if (!VD->isInline()) return InlineVariableDefinitionKind::None; // In almost all cases, it's a weak definition. auto *First = VD->getFirstDecl(); if (First->isInlineSpecified() || !First->isStaticDataMember()) return InlineVariableDefinitionKind::Weak; // If there's a file-context declaration in this translation unit, it's a // non-discardable definition. for (auto *D : VD->redecls()) if (D->getLexicalDeclContext()->isFileContext() && !D->isInlineSpecified() && (D->isConstexpr() || First->isConstexpr())) return InlineVariableDefinitionKind::Strong; // If we've not seen one yet, we don't know. return InlineVariableDefinitionKind::WeakUnknown; } static std::string charUnitsToString(const CharUnits &CU) { return llvm::itostr(CU.getQuantity()); } /// getObjCEncodingForBlock - Return the encoded type for this block /// declaration. std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const { std::string S; const BlockDecl *Decl = Expr->getBlockDecl(); QualType BlockTy = Expr->getType()->castAs()->getPointeeType(); QualType BlockReturnTy = BlockTy->castAs()->getReturnType(); // Encode result type. if (getLangOpts().EncodeExtendedBlockSig) getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, BlockReturnTy, S, true /*Extended*/); else getObjCEncodingForType(BlockReturnTy, S); // Compute size of all parameters. // Start with computing size of a pointer in number of bytes. // FIXME: There might(should) be a better way of doing this computation! CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); CharUnits ParmOffset = PtrSize; for (auto PI : Decl->parameters()) { QualType PType = PI->getType(); CharUnits sz = getObjCEncodingTypeSize(PType); if (sz.isZero()) continue; assert(sz.isPositive() && "BlockExpr - Incomplete param type"); ParmOffset += sz; } // Size of the argument frame S += charUnitsToString(ParmOffset); // Block pointer and offset. S += "@?0"; // Argument types. ParmOffset = PtrSize; for (auto PVDecl : Decl->parameters()) { QualType PType = PVDecl->getOriginalType(); if (const auto *AT = dyn_cast(PType->getCanonicalTypeInternal())) { // Use array's original type only if it has known number of // elements. if (!isa(AT)) PType = PVDecl->getType(); } else if (PType->isFunctionType()) PType = PVDecl->getType(); if (getLangOpts().EncodeExtendedBlockSig) getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, PType, S, true /*Extended*/); else getObjCEncodingForType(PType, S); S += charUnitsToString(ParmOffset); ParmOffset += getObjCEncodingTypeSize(PType); } return S; } std::string ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const { std::string S; // Encode result type. getObjCEncodingForType(Decl->getReturnType(), S); CharUnits ParmOffset; // Compute size of all parameters. for (auto PI : Decl->parameters()) { QualType PType = PI->getType(); CharUnits sz = getObjCEncodingTypeSize(PType); if (sz.isZero()) continue; assert(sz.isPositive() && "getObjCEncodingForFunctionDecl - Incomplete param type"); ParmOffset += sz; } S += charUnitsToString(ParmOffset); ParmOffset = CharUnits::Zero(); // Argument types. for (auto PVDecl : Decl->parameters()) { QualType PType = PVDecl->getOriginalType(); if (const auto *AT = dyn_cast(PType->getCanonicalTypeInternal())) { // Use array's original type only if it has known number of // elements. if (!isa(AT)) PType = PVDecl->getType(); } else if (PType->isFunctionType()) PType = PVDecl->getType(); getObjCEncodingForType(PType, S); S += charUnitsToString(ParmOffset); ParmOffset += getObjCEncodingTypeSize(PType); } return S; } /// getObjCEncodingForMethodParameter - Return the encoded type for a single /// method parameter or return type. If Extended, include class names and /// block object types. void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT, QualType T, std::string& S, bool Extended) const { // Encode type qualifer, 'in', 'inout', etc. for the parameter. getObjCEncodingForTypeQualifier(QT, S); // Encode parameter type. ObjCEncOptions Options = ObjCEncOptions() .setExpandPointedToStructures() .setExpandStructures() .setIsOutermostType(); if (Extended) Options.setEncodeBlockParameters().setEncodeClassNames(); getObjCEncodingForTypeImpl(T, S, Options, /*Field=*/nullptr); } /// getObjCEncodingForMethodDecl - Return the encoded type for this method /// declaration. std::string ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl, bool Extended) const { // FIXME: This is not very efficient. // Encode return type. std::string S; getObjCEncodingForMethodParameter(Decl->getObjCDeclQualifier(), Decl->getReturnType(), S, Extended); // Compute size of all parameters. // Start with computing size of a pointer in number of bytes. // FIXME: There might(should) be a better way of doing this computation! CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); // The first two arguments (self and _cmd) are pointers; account for // their size. CharUnits ParmOffset = 2 * PtrSize; for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), E = Decl->sel_param_end(); PI != E; ++PI) { QualType PType = (*PI)->getType(); CharUnits sz = getObjCEncodingTypeSize(PType); if (sz.isZero()) continue; assert(sz.isPositive() && "getObjCEncodingForMethodDecl - Incomplete param type"); ParmOffset += sz; } S += charUnitsToString(ParmOffset); S += "@0:"; S += charUnitsToString(PtrSize); // Argument types. ParmOffset = 2 * PtrSize; for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), E = Decl->sel_param_end(); PI != E; ++PI) { const ParmVarDecl *PVDecl = *PI; QualType PType = PVDecl->getOriginalType(); if (const auto *AT = dyn_cast(PType->getCanonicalTypeInternal())) { // Use array's original type only if it has known number of // elements. if (!isa(AT)) PType = PVDecl->getType(); } else if (PType->isFunctionType()) PType = PVDecl->getType(); getObjCEncodingForMethodParameter(PVDecl->getObjCDeclQualifier(), PType, S, Extended); S += charUnitsToString(ParmOffset); ParmOffset += getObjCEncodingTypeSize(PType); } return S; } ObjCPropertyImplDecl * ASTContext::getObjCPropertyImplDeclForPropertyDecl( const ObjCPropertyDecl *PD, const Decl *Container) const { if (!Container) return nullptr; if (const auto *CID = dyn_cast(Container)) { for (auto *PID : CID->property_impls()) if (PID->getPropertyDecl() == PD) return PID; } else { const auto *OID = cast(Container); for (auto *PID : OID->property_impls()) if (PID->getPropertyDecl() == PD) return PID; } return nullptr; } /// getObjCEncodingForPropertyDecl - Return the encoded type for this /// property declaration. If non-NULL, Container must be either an /// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be /// NULL when getting encodings for protocol properties. /// Property attributes are stored as a comma-delimited C string. The simple /// attributes readonly and bycopy are encoded as single characters. The /// parametrized attributes, getter=name, setter=name, and ivar=name, are /// encoded as single characters, followed by an identifier. Property types /// are also encoded as a parametrized attribute. The characters used to encode /// these attributes are defined by the following enumeration: /// @code /// enum PropertyAttributes { /// kPropertyReadOnly = 'R', // property is read-only. /// kPropertyBycopy = 'C', // property is a copy of the value last assigned /// kPropertyByref = '&', // property is a reference to the value last assigned /// kPropertyDynamic = 'D', // property is dynamic /// kPropertyGetter = 'G', // followed by getter selector name /// kPropertySetter = 'S', // followed by setter selector name /// kPropertyInstanceVariable = 'V' // followed by instance variable name /// kPropertyType = 'T' // followed by old-style type encoding. /// kPropertyWeak = 'W' // 'weak' property /// kPropertyStrong = 'P' // property GC'able /// kPropertyNonAtomic = 'N' // property non-atomic /// }; /// @endcode std::string ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD, const Decl *Container) const { // Collect information from the property implementation decl(s). bool Dynamic = false; ObjCPropertyImplDecl *SynthesizePID = nullptr; if (ObjCPropertyImplDecl *PropertyImpDecl = getObjCPropertyImplDeclForPropertyDecl(PD, Container)) { if (PropertyImpDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic) Dynamic = true; else SynthesizePID = PropertyImpDecl; } // FIXME: This is not very efficient. std::string S = "T"; // Encode result type. // GCC has some special rules regarding encoding of properties which // closely resembles encoding of ivars. getObjCEncodingForPropertyType(PD->getType(), S); if (PD->isReadOnly()) { S += ",R"; if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_copy) S += ",C"; if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_retain) S += ",&"; if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak) S += ",W"; } else { switch (PD->getSetterKind()) { case ObjCPropertyDecl::Assign: break; case ObjCPropertyDecl::Copy: S += ",C"; break; case ObjCPropertyDecl::Retain: S += ",&"; break; case ObjCPropertyDecl::Weak: S += ",W"; break; } } // It really isn't clear at all what this means, since properties // are "dynamic by default". if (Dynamic) S += ",D"; if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_nonatomic) S += ",N"; if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_getter) { S += ",G"; S += PD->getGetterName().getAsString(); } if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_setter) { S += ",S"; S += PD->getSetterName().getAsString(); } if (SynthesizePID) { const ObjCIvarDecl *OID = SynthesizePID->getPropertyIvarDecl(); S += ",V"; S += OID->getNameAsString(); } // FIXME: OBJCGC: weak & strong return S; } /// getLegacyIntegralTypeEncoding - /// Another legacy compatibility encoding: 32-bit longs are encoded as /// 'l' or 'L' , but not always. For typedefs, we need to use /// 'i' or 'I' instead if encoding a struct field, or a pointer! void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const { if (isa(PointeeTy.getTypePtr())) { if (const auto *BT = PointeeTy->getAs()) { if (BT->getKind() == BuiltinType::ULong && getIntWidth(PointeeTy) == 32) PointeeTy = UnsignedIntTy; else if (BT->getKind() == BuiltinType::Long && getIntWidth(PointeeTy) == 32) PointeeTy = IntTy; } } } void ASTContext::getObjCEncodingForType(QualType T, std::string& S, const FieldDecl *Field, QualType *NotEncodedT) const { // We follow the behavior of gcc, expanding structures which are // directly pointed to, and expanding embedded structures. Note that // these rules are sufficient to prevent recursive encoding of the // same type. getObjCEncodingForTypeImpl(T, S, ObjCEncOptions() .setExpandPointedToStructures() .setExpandStructures() .setIsOutermostType(), Field, NotEncodedT); } void ASTContext::getObjCEncodingForPropertyType(QualType T, std::string& S) const { // Encode result type. // GCC has some special rules regarding encoding of properties which // closely resembles encoding of ivars. getObjCEncodingForTypeImpl(T, S, ObjCEncOptions() .setExpandPointedToStructures() .setExpandStructures() .setIsOutermostType() .setEncodingProperty(), /*Field=*/nullptr); } static char getObjCEncodingForPrimitiveType(const ASTContext *C, const BuiltinType *BT) { BuiltinType::Kind kind = BT->getKind(); switch (kind) { case BuiltinType::Void: return 'v'; case BuiltinType::Bool: return 'B'; case BuiltinType::Char8: case BuiltinType::Char_U: case BuiltinType::UChar: return 'C'; case BuiltinType::Char16: case BuiltinType::UShort: return 'S'; case BuiltinType::Char32: case BuiltinType::UInt: return 'I'; case BuiltinType::ULong: return C->getTargetInfo().getLongWidth() == 32 ? 'L' : 'Q'; case BuiltinType::UInt128: return 'T'; case BuiltinType::ULongLong: return 'Q'; case BuiltinType::Char_S: case BuiltinType::SChar: return 'c'; case BuiltinType::Short: return 's'; case BuiltinType::WChar_S: case BuiltinType::WChar_U: case BuiltinType::Int: return 'i'; case BuiltinType::Long: return C->getTargetInfo().getLongWidth() == 32 ? 'l' : 'q'; case BuiltinType::LongLong: return 'q'; case BuiltinType::Int128: return 't'; case BuiltinType::Float: return 'f'; case BuiltinType::Double: return 'd'; case BuiltinType::LongDouble: return 'D'; case BuiltinType::NullPtr: return '*'; // like char* case BuiltinType::BFloat16: case BuiltinType::Float16: case BuiltinType::Float128: case BuiltinType::Half: case BuiltinType::ShortAccum: case BuiltinType::Accum: case BuiltinType::LongAccum: case BuiltinType::UShortAccum: case BuiltinType::UAccum: case BuiltinType::ULongAccum: case BuiltinType::ShortFract: case BuiltinType::Fract: case BuiltinType::LongFract: case BuiltinType::UShortFract: case BuiltinType::UFract: case BuiltinType::ULongFract: case BuiltinType::SatShortAccum: case BuiltinType::SatAccum: case BuiltinType::SatLongAccum: case BuiltinType::SatUShortAccum: case BuiltinType::SatUAccum: case BuiltinType::SatULongAccum: case BuiltinType::SatShortFract: case BuiltinType::SatFract: case BuiltinType::SatLongFract: case BuiltinType::SatUShortFract: case BuiltinType::SatUFract: case BuiltinType::SatULongFract: // FIXME: potentially need @encodes for these! return ' '; #define SVE_TYPE(Name, Id, SingletonId) \ case BuiltinType::Id: #include "clang/Basic/AArch64SVEACLETypes.def" { DiagnosticsEngine &Diags = C->getDiagnostics(); unsigned DiagID = Diags.getCustomDiagID( DiagnosticsEngine::Error, "cannot yet @encode type %0"); Diags.Report(DiagID) << BT->getName(C->getPrintingPolicy()); return ' '; } case BuiltinType::ObjCId: case BuiltinType::ObjCClass: case BuiltinType::ObjCSel: llvm_unreachable("@encoding ObjC primitive type"); // OpenCL and placeholder types don't need @encodings. #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ case BuiltinType::Id: #include "clang/Basic/OpenCLImageTypes.def" #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ case BuiltinType::Id: #include "clang/Basic/OpenCLExtensionTypes.def" case BuiltinType::OCLEvent: case BuiltinType::OCLClkEvent: case BuiltinType::OCLQueue: case BuiltinType::OCLReserveID: case BuiltinType::OCLSampler: case BuiltinType::Dependent: #define BUILTIN_TYPE(KIND, ID) #define PLACEHOLDER_TYPE(KIND, ID) \ case BuiltinType::KIND: #include "clang/AST/BuiltinTypes.def" llvm_unreachable("invalid builtin type for @encode"); } llvm_unreachable("invalid BuiltinType::Kind value"); } static char ObjCEncodingForEnumType(const ASTContext *C, const EnumType *ET) { EnumDecl *Enum = ET->getDecl(); // The encoding of an non-fixed enum type is always 'i', regardless of size. if (!Enum->isFixed()) return 'i'; // The encoding of a fixed enum type matches its fixed underlying type. const auto *BT = Enum->getIntegerType()->castAs(); return getObjCEncodingForPrimitiveType(C, BT); } static void EncodeBitField(const ASTContext *Ctx, std::string& S, QualType T, const FieldDecl *FD) { assert(FD->isBitField() && "not a bitfield - getObjCEncodingForTypeImpl"); S += 'b'; // The NeXT runtime encodes bit fields as b followed by the number of bits. // The GNU runtime requires more information; bitfields are encoded as b, // then the offset (in bits) of the first element, then the type of the // bitfield, then the size in bits. For example, in this structure: // // struct // { // int integer; // int flags:2; // }; // On a 32-bit system, the encoding for flags would be b2 for the NeXT // runtime, but b32i2 for the GNU runtime. The reason for this extra // information is not especially sensible, but we're stuck with it for // compatibility with GCC, although providing it breaks anything that // actually uses runtime introspection and wants to work on both runtimes... if (Ctx->getLangOpts().ObjCRuntime.isGNUFamily()) { uint64_t Offset; if (const auto *IVD = dyn_cast(FD)) { Offset = Ctx->lookupFieldBitOffset(IVD->getContainingInterface(), nullptr, IVD); } else { const RecordDecl *RD = FD->getParent(); const ASTRecordLayout &RL = Ctx->getASTRecordLayout(RD); Offset = RL.getFieldOffset(FD->getFieldIndex()); } S += llvm::utostr(Offset); if (const auto *ET = T->getAs()) S += ObjCEncodingForEnumType(Ctx, ET); else { const auto *BT = T->castAs(); S += getObjCEncodingForPrimitiveType(Ctx, BT); } } S += llvm::utostr(FD->getBitWidthValue(*Ctx)); } // FIXME: Use SmallString for accumulating string. void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S, const ObjCEncOptions Options, const FieldDecl *FD, QualType *NotEncodedT) const { CanQualType CT = getCanonicalType(T); switch (CT->getTypeClass()) { case Type::Builtin: case Type::Enum: if (FD && FD->isBitField()) return EncodeBitField(this, S, T, FD); if (const auto *BT = dyn_cast(CT)) S += getObjCEncodingForPrimitiveType(this, BT); else S += ObjCEncodingForEnumType(this, cast(CT)); return; case Type::Complex: S += 'j'; getObjCEncodingForTypeImpl(T->castAs()->getElementType(), S, ObjCEncOptions(), /*Field=*/nullptr); return; case Type::Atomic: S += 'A'; getObjCEncodingForTypeImpl(T->castAs()->getValueType(), S, ObjCEncOptions(), /*Field=*/nullptr); return; // encoding for pointer or reference types. case Type::Pointer: case Type::LValueReference: case Type::RValueReference: { QualType PointeeTy; if (isa(CT)) { const auto *PT = T->castAs(); if (PT->isObjCSelType()) { S += ':'; return; } PointeeTy = PT->getPointeeType(); } else { PointeeTy = T->castAs()->getPointeeType(); } bool isReadOnly = false; // For historical/compatibility reasons, the read-only qualifier of the // pointee gets emitted _before_ the '^'. The read-only qualifier of // the pointer itself gets ignored, _unless_ we are looking at a typedef! // Also, do not emit the 'r' for anything but the outermost type! if (isa(T.getTypePtr())) { if (Options.IsOutermostType() && T.isConstQualified()) { isReadOnly = true; S += 'r'; } } else if (Options.IsOutermostType()) { QualType P = PointeeTy; while (auto PT = P->getAs()) P = PT->getPointeeType(); if (P.isConstQualified()) { isReadOnly = true; S += 'r'; } } if (isReadOnly) { // Another legacy compatibility encoding. Some ObjC qualifier and type // combinations need to be rearranged. // Rewrite "in const" from "nr" to "rn" if (StringRef(S).endswith("nr")) S.replace(S.end()-2, S.end(), "rn"); } if (PointeeTy->isCharType()) { // char pointer types should be encoded as '*' unless it is a // type that has been typedef'd to 'BOOL'. if (!isTypeTypedefedAsBOOL(PointeeTy)) { S += '*'; return; } } else if (const auto *RTy = PointeeTy->getAs()) { // GCC binary compat: Need to convert "struct objc_class *" to "#". if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_class")) { S += '#'; return; } // GCC binary compat: Need to convert "struct objc_object *" to "@". if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_object")) { S += '@'; return; } // fall through... } S += '^'; getLegacyIntegralTypeEncoding(PointeeTy); ObjCEncOptions NewOptions; if (Options.ExpandPointedToStructures()) NewOptions.setExpandStructures(); getObjCEncodingForTypeImpl(PointeeTy, S, NewOptions, /*Field=*/nullptr, NotEncodedT); return; } case Type::ConstantArray: case Type::IncompleteArray: case Type::VariableArray: { const auto *AT = cast(CT); if (isa(AT) && !Options.IsStructField()) { // Incomplete arrays are encoded as a pointer to the array element. S += '^'; getObjCEncodingForTypeImpl( AT->getElementType(), S, Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD); } else { S += '['; if (const auto *CAT = dyn_cast(AT)) S += llvm::utostr(CAT->getSize().getZExtValue()); else { //Variable length arrays are encoded as a regular array with 0 elements. assert((isa(AT) || isa(AT)) && "Unknown array type!"); S += '0'; } getObjCEncodingForTypeImpl( AT->getElementType(), S, Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD, NotEncodedT); S += ']'; } return; } case Type::FunctionNoProto: case Type::FunctionProto: S += '?'; return; case Type::Record: { RecordDecl *RDecl = cast(CT)->getDecl(); S += RDecl->isUnion() ? '(' : '{'; // Anonymous structures print as '?' if (const IdentifierInfo *II = RDecl->getIdentifier()) { S += II->getName(); if (const auto *Spec = dyn_cast(RDecl)) { const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs(); llvm::raw_string_ostream OS(S); printTemplateArgumentList(OS, TemplateArgs.asArray(), getPrintingPolicy()); } } else { S += '?'; } if (Options.ExpandStructures()) { S += '='; if (!RDecl->isUnion()) { getObjCEncodingForStructureImpl(RDecl, S, FD, true, NotEncodedT); } else { for (const auto *Field : RDecl->fields()) { if (FD) { S += '"'; S += Field->getNameAsString(); S += '"'; } // Special case bit-fields. if (Field->isBitField()) { getObjCEncodingForTypeImpl(Field->getType(), S, ObjCEncOptions().setExpandStructures(), Field); } else { QualType qt = Field->getType(); getLegacyIntegralTypeEncoding(qt); getObjCEncodingForTypeImpl( qt, S, ObjCEncOptions().setExpandStructures().setIsStructField(), FD, NotEncodedT); } } } } S += RDecl->isUnion() ? ')' : '}'; return; } case Type::BlockPointer: { const auto *BT = T->castAs(); S += "@?"; // Unlike a pointer-to-function, which is "^?". if (Options.EncodeBlockParameters()) { const auto *FT = BT->getPointeeType()->castAs(); S += '<'; // Block return type getObjCEncodingForTypeImpl(FT->getReturnType(), S, Options.forComponentType(), FD, NotEncodedT); // Block self S += "@?"; // Block parameters if (const auto *FPT = dyn_cast(FT)) { for (const auto &I : FPT->param_types()) getObjCEncodingForTypeImpl(I, S, Options.forComponentType(), FD, NotEncodedT); } S += '>'; } return; } case Type::ObjCObject: { // hack to match legacy encoding of *id and *Class QualType Ty = getObjCObjectPointerType(CT); if (Ty->isObjCIdType()) { S += "{objc_object=}"; return; } else if (Ty->isObjCClassType()) { S += "{objc_class=}"; return; } // TODO: Double check to make sure this intentionally falls through. LLVM_FALLTHROUGH; } case Type::ObjCInterface: { // Ignore protocol qualifiers when mangling at this level. // @encode(class_name) ObjCInterfaceDecl *OI = T->castAs()->getInterface(); S += '{'; S += OI->getObjCRuntimeNameAsString(); if (Options.ExpandStructures()) { S += '='; SmallVector Ivars; DeepCollectObjCIvars(OI, true, Ivars); for (unsigned i = 0, e = Ivars.size(); i != e; ++i) { const FieldDecl *Field = Ivars[i]; if (Field->isBitField()) getObjCEncodingForTypeImpl(Field->getType(), S, ObjCEncOptions().setExpandStructures(), Field); else getObjCEncodingForTypeImpl(Field->getType(), S, ObjCEncOptions().setExpandStructures(), FD, NotEncodedT); } } S += '}'; return; } case Type::ObjCObjectPointer: { const auto *OPT = T->castAs(); if (OPT->isObjCIdType()) { S += '@'; return; } if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) { // FIXME: Consider if we need to output qualifiers for 'Class

'. // Since this is a binary compatibility issue, need to consult with // runtime folks. Fortunately, this is a *very* obscure construct. S += '#'; return; } if (OPT->isObjCQualifiedIdType()) { getObjCEncodingForTypeImpl( getObjCIdType(), S, Options.keepingOnly(ObjCEncOptions() .setExpandPointedToStructures() .setExpandStructures()), FD); if (FD || Options.EncodingProperty() || Options.EncodeClassNames()) { // Note that we do extended encoding of protocol qualifer list // Only when doing ivar or property encoding. S += '"'; for (const auto *I : OPT->quals()) { S += '<'; S += I->getObjCRuntimeNameAsString(); S += '>'; } S += '"'; } return; } S += '@'; if (OPT->getInterfaceDecl() && (FD || Options.EncodingProperty() || Options.EncodeClassNames())) { S += '"'; S += OPT->getInterfaceDecl()->getObjCRuntimeNameAsString(); for (const auto *I : OPT->quals()) { S += '<'; S += I->getObjCRuntimeNameAsString(); S += '>'; } S += '"'; } return; } // gcc just blithely ignores member pointers. // FIXME: we should do better than that. 'M' is available. case Type::MemberPointer: // This matches gcc's encoding, even though technically it is insufficient. //FIXME. We should do a better job than gcc. case Type::Vector: case Type::ExtVector: // Until we have a coherent encoding of these three types, issue warning. if (NotEncodedT) *NotEncodedT = T; return; case Type::ConstantMatrix: if (NotEncodedT) *NotEncodedT = T; return; // We could see an undeduced auto type here during error recovery. // Just ignore it. case Type::Auto: case Type::DeducedTemplateSpecialization: return; case Type::Pipe: case Type::ExtInt: #define ABSTRACT_TYPE(KIND, BASE) #define TYPE(KIND, BASE) #define DEPENDENT_TYPE(KIND, BASE) \ case Type::KIND: #define NON_CANONICAL_TYPE(KIND, BASE) \ case Type::KIND: #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(KIND, BASE) \ case Type::KIND: #include "clang/AST/TypeNodes.inc" llvm_unreachable("@encode for dependent type!"); } llvm_unreachable("bad type kind!"); } void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl, std::string &S, const FieldDecl *FD, bool includeVBases, QualType *NotEncodedT) const { assert(RDecl && "Expected non-null RecordDecl"); assert(!RDecl->isUnion() && "Should not be called for unions"); if (!RDecl->getDefinition() || RDecl->getDefinition()->isInvalidDecl()) return; const auto *CXXRec = dyn_cast(RDecl); std::multimap FieldOrBaseOffsets; const ASTRecordLayout &layout = getASTRecordLayout(RDecl); if (CXXRec) { for (const auto &BI : CXXRec->bases()) { if (!BI.isVirtual()) { CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl(); if (base->isEmpty()) continue; uint64_t offs = toBits(layout.getBaseClassOffset(base)); FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), std::make_pair(offs, base)); } } } unsigned i = 0; for (auto *Field : RDecl->fields()) { uint64_t offs = layout.getFieldOffset(i); FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), std::make_pair(offs, Field)); ++i; } if (CXXRec && includeVBases) { for (const auto &BI : CXXRec->vbases()) { CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl(); if (base->isEmpty()) continue; uint64_t offs = toBits(layout.getVBaseClassOffset(base)); if (offs >= uint64_t(toBits(layout.getNonVirtualSize())) && FieldOrBaseOffsets.find(offs) == FieldOrBaseOffsets.end()) FieldOrBaseOffsets.insert(FieldOrBaseOffsets.end(), std::make_pair(offs, base)); } } CharUnits size; if (CXXRec) { size = includeVBases ? layout.getSize() : layout.getNonVirtualSize(); } else { size = layout.getSize(); } #ifndef NDEBUG uint64_t CurOffs = 0; #endif std::multimap::iterator CurLayObj = FieldOrBaseOffsets.begin(); if (CXXRec && CXXRec->isDynamicClass() && (CurLayObj == FieldOrBaseOffsets.end() || CurLayObj->first != 0)) { if (FD) { S += "\"_vptr$"; std::string recname = CXXRec->getNameAsString(); if (recname.empty()) recname = "?"; S += recname; S += '"'; } S += "^^?"; #ifndef NDEBUG CurOffs += getTypeSize(VoidPtrTy); #endif } if (!RDecl->hasFlexibleArrayMember()) { // Mark the end of the structure. uint64_t offs = toBits(size); FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), std::make_pair(offs, nullptr)); } for (; CurLayObj != FieldOrBaseOffsets.end(); ++CurLayObj) { #ifndef NDEBUG assert(CurOffs <= CurLayObj->first); if (CurOffs < CurLayObj->first) { uint64_t padding = CurLayObj->first - CurOffs; // FIXME: There doesn't seem to be a way to indicate in the encoding that // packing/alignment of members is different that normal, in which case // the encoding will be out-of-sync with the real layout. // If the runtime switches to just consider the size of types without // taking into account alignment, we could make padding explicit in the // encoding (e.g. using arrays of chars). The encoding strings would be // longer then though. CurOffs += padding; } #endif NamedDecl *dcl = CurLayObj->second; if (!dcl) break; // reached end of structure. if (auto *base = dyn_cast(dcl)) { // We expand the bases without their virtual bases since those are going // in the initial structure. Note that this differs from gcc which // expands virtual bases each time one is encountered in the hierarchy, // making the encoding type bigger than it really is. getObjCEncodingForStructureImpl(base, S, FD, /*includeVBases*/false, NotEncodedT); assert(!base->isEmpty()); #ifndef NDEBUG CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize()); #endif } else { const auto *field = cast(dcl); if (FD) { S += '"'; S += field->getNameAsString(); S += '"'; } if (field->isBitField()) { EncodeBitField(this, S, field->getType(), field); #ifndef NDEBUG CurOffs += field->getBitWidthValue(*this); #endif } else { QualType qt = field->getType(); getLegacyIntegralTypeEncoding(qt); getObjCEncodingForTypeImpl( qt, S, ObjCEncOptions().setExpandStructures().setIsStructField(), FD, NotEncodedT); #ifndef NDEBUG CurOffs += getTypeSize(field->getType()); #endif } } } } void ASTContext::getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT, std::string& S) const { if (QT & Decl::OBJC_TQ_In) S += 'n'; if (QT & Decl::OBJC_TQ_Inout) S += 'N'; if (QT & Decl::OBJC_TQ_Out) S += 'o'; if (QT & Decl::OBJC_TQ_Bycopy) S += 'O'; if (QT & Decl::OBJC_TQ_Byref) S += 'R'; if (QT & Decl::OBJC_TQ_Oneway) S += 'V'; } TypedefDecl *ASTContext::getObjCIdDecl() const { if (!ObjCIdDecl) { QualType T = getObjCObjectType(ObjCBuiltinIdTy, {}, {}); T = getObjCObjectPointerType(T); ObjCIdDecl = buildImplicitTypedef(T, "id"); } return ObjCIdDecl; } TypedefDecl *ASTContext::getObjCSelDecl() const { if (!ObjCSelDecl) { QualType T = getPointerType(ObjCBuiltinSelTy); ObjCSelDecl = buildImplicitTypedef(T, "SEL"); } return ObjCSelDecl; } TypedefDecl *ASTContext::getObjCClassDecl() const { if (!ObjCClassDecl) { QualType T = getObjCObjectType(ObjCBuiltinClassTy, {}, {}); T = getObjCObjectPointerType(T); ObjCClassDecl = buildImplicitTypedef(T, "Class"); } return ObjCClassDecl; } ObjCInterfaceDecl *ASTContext::getObjCProtocolDecl() const { if (!ObjCProtocolClassDecl) { ObjCProtocolClassDecl = ObjCInterfaceDecl::Create(*this, getTranslationUnitDecl(), SourceLocation(), &Idents.get("Protocol"), /*typeParamList=*/nullptr, /*PrevDecl=*/nullptr, SourceLocation(), true); } return ObjCProtocolClassDecl; } //===----------------------------------------------------------------------===// // __builtin_va_list Construction Functions //===----------------------------------------------------------------------===// static TypedefDecl *CreateCharPtrNamedVaListDecl(const ASTContext *Context, StringRef Name) { // typedef char* __builtin[_ms]_va_list; QualType T = Context->getPointerType(Context->CharTy); return Context->buildImplicitTypedef(T, Name); } static TypedefDecl *CreateMSVaListDecl(const ASTContext *Context) { return CreateCharPtrNamedVaListDecl(Context, "__builtin_ms_va_list"); } static TypedefDecl *CreateCharPtrBuiltinVaListDecl(const ASTContext *Context) { return CreateCharPtrNamedVaListDecl(Context, "__builtin_va_list"); } static TypedefDecl *CreateVoidPtrBuiltinVaListDecl(const ASTContext *Context) { // typedef void* __builtin_va_list; QualType T = Context->getPointerType(Context->VoidTy); return Context->buildImplicitTypedef(T, "__builtin_va_list"); } static TypedefDecl * CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) { // struct __va_list RecordDecl *VaListTagDecl = Context->buildImplicitRecord("__va_list"); if (Context->getLangOpts().CPlusPlus) { // namespace std { struct __va_list { NamespaceDecl *NS; NS = NamespaceDecl::Create(const_cast(*Context), Context->getTranslationUnitDecl(), /*Inline*/ false, SourceLocation(), SourceLocation(), &Context->Idents.get("std"), /*PrevDecl*/ nullptr); NS->setImplicit(); VaListTagDecl->setDeclContext(NS); } VaListTagDecl->startDefinition(); const size_t NumFields = 5; QualType FieldTypes[NumFields]; const char *FieldNames[NumFields]; // void *__stack; FieldTypes[0] = Context->getPointerType(Context->VoidTy); FieldNames[0] = "__stack"; // void *__gr_top; FieldTypes[1] = Context->getPointerType(Context->VoidTy); FieldNames[1] = "__gr_top"; // void *__vr_top; FieldTypes[2] = Context->getPointerType(Context->VoidTy); FieldNames[2] = "__vr_top"; // int __gr_offs; FieldTypes[3] = Context->IntTy; FieldNames[3] = "__gr_offs"; // int __vr_offs; FieldTypes[4] = Context->IntTy; FieldNames[4] = "__vr_offs"; // Create fields for (unsigned i = 0; i < NumFields; ++i) { FieldDecl *Field = FieldDecl::Create(const_cast(*Context), VaListTagDecl, SourceLocation(), SourceLocation(), &Context->Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); Field->setAccess(AS_public); VaListTagDecl->addDecl(Field); } VaListTagDecl->completeDefinition(); Context->VaListTagDecl = VaListTagDecl; QualType VaListTagType = Context->getRecordType(VaListTagDecl); // } __builtin_va_list; return Context->buildImplicitTypedef(VaListTagType, "__builtin_va_list"); } static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) { // typedef struct __va_list_tag { RecordDecl *VaListTagDecl; VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); VaListTagDecl->startDefinition(); const size_t NumFields = 5; QualType FieldTypes[NumFields]; const char *FieldNames[NumFields]; // unsigned char gpr; FieldTypes[0] = Context->UnsignedCharTy; FieldNames[0] = "gpr"; // unsigned char fpr; FieldTypes[1] = Context->UnsignedCharTy; FieldNames[1] = "fpr"; // unsigned short reserved; FieldTypes[2] = Context->UnsignedShortTy; FieldNames[2] = "reserved"; // void* overflow_arg_area; FieldTypes[3] = Context->getPointerType(Context->VoidTy); FieldNames[3] = "overflow_arg_area"; // void* reg_save_area; FieldTypes[4] = Context->getPointerType(Context->VoidTy); FieldNames[4] = "reg_save_area"; // Create fields for (unsigned i = 0; i < NumFields; ++i) { FieldDecl *Field = FieldDecl::Create(*Context, VaListTagDecl, SourceLocation(), SourceLocation(), &Context->Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); Field->setAccess(AS_public); VaListTagDecl->addDecl(Field); } VaListTagDecl->completeDefinition(); Context->VaListTagDecl = VaListTagDecl; QualType VaListTagType = Context->getRecordType(VaListTagDecl); // } __va_list_tag; TypedefDecl *VaListTagTypedefDecl = Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); QualType VaListTagTypedefType = Context->getTypedefType(VaListTagTypedefDecl); // typedef __va_list_tag __builtin_va_list[1]; llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); QualType VaListTagArrayType = Context->getConstantArrayType(VaListTagTypedefType, Size, nullptr, ArrayType::Normal, 0); return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); } static TypedefDecl * CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) { // struct __va_list_tag { RecordDecl *VaListTagDecl; VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); VaListTagDecl->startDefinition(); const size_t NumFields = 4; QualType FieldTypes[NumFields]; const char *FieldNames[NumFields]; // unsigned gp_offset; FieldTypes[0] = Context->UnsignedIntTy; FieldNames[0] = "gp_offset"; // unsigned fp_offset; FieldTypes[1] = Context->UnsignedIntTy; FieldNames[1] = "fp_offset"; // void* overflow_arg_area; FieldTypes[2] = Context->getPointerType(Context->VoidTy); FieldNames[2] = "overflow_arg_area"; // void* reg_save_area; FieldTypes[3] = Context->getPointerType(Context->VoidTy); FieldNames[3] = "reg_save_area"; // Create fields for (unsigned i = 0; i < NumFields; ++i) { FieldDecl *Field = FieldDecl::Create(const_cast(*Context), VaListTagDecl, SourceLocation(), SourceLocation(), &Context->Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); Field->setAccess(AS_public); VaListTagDecl->addDecl(Field); } VaListTagDecl->completeDefinition(); Context->VaListTagDecl = VaListTagDecl; QualType VaListTagType = Context->getRecordType(VaListTagDecl); // }; // typedef struct __va_list_tag __builtin_va_list[1]; llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); QualType VaListTagArrayType = Context->getConstantArrayType( VaListTagType, Size, nullptr, ArrayType::Normal, 0); return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); } static TypedefDecl *CreatePNaClABIBuiltinVaListDecl(const ASTContext *Context) { // typedef int __builtin_va_list[4]; llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 4); QualType IntArrayType = Context->getConstantArrayType( Context->IntTy, Size, nullptr, ArrayType::Normal, 0); return Context->buildImplicitTypedef(IntArrayType, "__builtin_va_list"); } static TypedefDecl * CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) { // struct __va_list RecordDecl *VaListDecl = Context->buildImplicitRecord("__va_list"); if (Context->getLangOpts().CPlusPlus) { // namespace std { struct __va_list { NamespaceDecl *NS; NS = NamespaceDecl::Create(const_cast(*Context), Context->getTranslationUnitDecl(), /*Inline*/false, SourceLocation(), SourceLocation(), &Context->Idents.get("std"), /*PrevDecl*/ nullptr); NS->setImplicit(); VaListDecl->setDeclContext(NS); } VaListDecl->startDefinition(); // void * __ap; FieldDecl *Field = FieldDecl::Create(const_cast(*Context), VaListDecl, SourceLocation(), SourceLocation(), &Context->Idents.get("__ap"), Context->getPointerType(Context->VoidTy), /*TInfo=*/nullptr, /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); Field->setAccess(AS_public); VaListDecl->addDecl(Field); // }; VaListDecl->completeDefinition(); Context->VaListTagDecl = VaListDecl; // typedef struct __va_list __builtin_va_list; QualType T = Context->getRecordType(VaListDecl); return Context->buildImplicitTypedef(T, "__builtin_va_list"); } static TypedefDecl * CreateSystemZBuiltinVaListDecl(const ASTContext *Context) { // struct __va_list_tag { RecordDecl *VaListTagDecl; VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); VaListTagDecl->startDefinition(); const size_t NumFields = 4; QualType FieldTypes[NumFields]; const char *FieldNames[NumFields]; // long __gpr; FieldTypes[0] = Context->LongTy; FieldNames[0] = "__gpr"; // long __fpr; FieldTypes[1] = Context->LongTy; FieldNames[1] = "__fpr"; // void *__overflow_arg_area; FieldTypes[2] = Context->getPointerType(Context->VoidTy); FieldNames[2] = "__overflow_arg_area"; // void *__reg_save_area; FieldTypes[3] = Context->getPointerType(Context->VoidTy); FieldNames[3] = "__reg_save_area"; // Create fields for (unsigned i = 0; i < NumFields; ++i) { FieldDecl *Field = FieldDecl::Create(const_cast(*Context), VaListTagDecl, SourceLocation(), SourceLocation(), &Context->Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); Field->setAccess(AS_public); VaListTagDecl->addDecl(Field); } VaListTagDecl->completeDefinition(); Context->VaListTagDecl = VaListTagDecl; QualType VaListTagType = Context->getRecordType(VaListTagDecl); // }; // typedef __va_list_tag __builtin_va_list[1]; llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); QualType VaListTagArrayType = Context->getConstantArrayType( VaListTagType, Size, nullptr, ArrayType::Normal, 0); return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); } static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) { // typedef struct __va_list_tag { RecordDecl *VaListTagDecl; VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); VaListTagDecl->startDefinition(); const size_t NumFields = 3; QualType FieldTypes[NumFields]; const char *FieldNames[NumFields]; // void *CurrentSavedRegisterArea; FieldTypes[0] = Context->getPointerType(Context->VoidTy); FieldNames[0] = "__current_saved_reg_area_pointer"; // void *SavedRegAreaEnd; FieldTypes[1] = Context->getPointerType(Context->VoidTy); FieldNames[1] = "__saved_reg_area_end_pointer"; // void *OverflowArea; FieldTypes[2] = Context->getPointerType(Context->VoidTy); FieldNames[2] = "__overflow_area_pointer"; // Create fields for (unsigned i = 0; i < NumFields; ++i) { FieldDecl *Field = FieldDecl::Create( const_cast(*Context), VaListTagDecl, SourceLocation(), SourceLocation(), &Context->Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/0, /*BitWidth=*/0, /*Mutable=*/false, ICIS_NoInit); Field->setAccess(AS_public); VaListTagDecl->addDecl(Field); } VaListTagDecl->completeDefinition(); Context->VaListTagDecl = VaListTagDecl; QualType VaListTagType = Context->getRecordType(VaListTagDecl); // } __va_list_tag; TypedefDecl *VaListTagTypedefDecl = Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); QualType VaListTagTypedefType = Context->getTypedefType(VaListTagTypedefDecl); // typedef __va_list_tag __builtin_va_list[1]; llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); QualType VaListTagArrayType = Context->getConstantArrayType( VaListTagTypedefType, Size, nullptr, ArrayType::Normal, 0); return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); } static TypedefDecl *CreateVaListDecl(const ASTContext *Context, TargetInfo::BuiltinVaListKind Kind) { switch (Kind) { case TargetInfo::CharPtrBuiltinVaList: return CreateCharPtrBuiltinVaListDecl(Context); case TargetInfo::VoidPtrBuiltinVaList: return CreateVoidPtrBuiltinVaListDecl(Context); case TargetInfo::AArch64ABIBuiltinVaList: return CreateAArch64ABIBuiltinVaListDecl(Context); case TargetInfo::PowerABIBuiltinVaList: return CreatePowerABIBuiltinVaListDecl(Context); case TargetInfo::X86_64ABIBuiltinVaList: return CreateX86_64ABIBuiltinVaListDecl(Context); case TargetInfo::PNaClABIBuiltinVaList: return CreatePNaClABIBuiltinVaListDecl(Context); case TargetInfo::AAPCSABIBuiltinVaList: return CreateAAPCSABIBuiltinVaListDecl(Context); case TargetInfo::SystemZBuiltinVaList: return CreateSystemZBuiltinVaListDecl(Context); case TargetInfo::HexagonBuiltinVaList: return CreateHexagonBuiltinVaListDecl(Context); } llvm_unreachable("Unhandled __builtin_va_list type kind"); } TypedefDecl *ASTContext::getBuiltinVaListDecl() const { if (!BuiltinVaListDecl) { BuiltinVaListDecl = CreateVaListDecl(this, Target->getBuiltinVaListKind()); assert(BuiltinVaListDecl->isImplicit()); } return BuiltinVaListDecl; } Decl *ASTContext::getVaListTagDecl() const { // Force the creation of VaListTagDecl by building the __builtin_va_list // declaration. if (!VaListTagDecl) (void)getBuiltinVaListDecl(); return VaListTagDecl; } TypedefDecl *ASTContext::getBuiltinMSVaListDecl() const { if (!BuiltinMSVaListDecl) BuiltinMSVaListDecl = CreateMSVaListDecl(this); return BuiltinMSVaListDecl; } bool ASTContext::canBuiltinBeRedeclared(const FunctionDecl *FD) const { return BuiltinInfo.canBeRedeclared(FD->getBuiltinID()); } void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) { assert(ObjCConstantStringType.isNull() && "'NSConstantString' type already set!"); ObjCConstantStringType = getObjCInterfaceType(Decl); } /// Retrieve the template name that corresponds to a non-empty /// lookup. TemplateName ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin, UnresolvedSetIterator End) const { unsigned size = End - Begin; assert(size > 1 && "set is not overloaded!"); void *memory = Allocate(sizeof(OverloadedTemplateStorage) + size * sizeof(FunctionTemplateDecl*)); auto *OT = new (memory) OverloadedTemplateStorage(size); NamedDecl **Storage = OT->getStorage(); for (UnresolvedSetIterator I = Begin; I != End; ++I) { NamedDecl *D = *I; assert(isa(D) || isa(D) || (isa(D) && isa(D->getUnderlyingDecl()))); *Storage++ = D; } return TemplateName(OT); } /// Retrieve a template name representing an unqualified-id that has been /// assumed to name a template for ADL purposes. TemplateName ASTContext::getAssumedTemplateName(DeclarationName Name) const { auto *OT = new (*this) AssumedTemplateStorage(Name); return TemplateName(OT); } /// Retrieve the template name that represents a qualified /// template name such as \c std::vector. TemplateName ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS, bool TemplateKeyword, TemplateDecl *Template) const { assert(NNS && "Missing nested-name-specifier in qualified template name"); // FIXME: Canonicalization? llvm::FoldingSetNodeID ID; QualifiedTemplateName::Profile(ID, NNS, TemplateKeyword, Template); void *InsertPos = nullptr; QualifiedTemplateName *QTN = QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos); if (!QTN) { QTN = new (*this, alignof(QualifiedTemplateName)) QualifiedTemplateName(NNS, TemplateKeyword, Template); QualifiedTemplateNames.InsertNode(QTN, InsertPos); } return TemplateName(QTN); } /// Retrieve the template name that represents a dependent /// template name such as \c MetaFun::template apply. TemplateName ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, const IdentifierInfo *Name) const { assert((!NNS || NNS->isDependent()) && "Nested name specifier must be dependent"); llvm::FoldingSetNodeID ID; DependentTemplateName::Profile(ID, NNS, Name); void *InsertPos = nullptr; DependentTemplateName *QTN = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); if (QTN) return TemplateName(QTN); NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); if (CanonNNS == NNS) { QTN = new (*this, alignof(DependentTemplateName)) DependentTemplateName(NNS, Name); } else { TemplateName Canon = getDependentTemplateName(CanonNNS, Name); QTN = new (*this, alignof(DependentTemplateName)) DependentTemplateName(NNS, Name, Canon); DependentTemplateName *CheckQTN = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); assert(!CheckQTN && "Dependent type name canonicalization broken"); (void)CheckQTN; } DependentTemplateNames.InsertNode(QTN, InsertPos); return TemplateName(QTN); } /// Retrieve the template name that represents a dependent /// template name such as \c MetaFun::template operator+. TemplateName ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, OverloadedOperatorKind Operator) const { assert((!NNS || NNS->isDependent()) && "Nested name specifier must be dependent"); llvm::FoldingSetNodeID ID; DependentTemplateName::Profile(ID, NNS, Operator); void *InsertPos = nullptr; DependentTemplateName *QTN = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); if (QTN) return TemplateName(QTN); NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); if (CanonNNS == NNS) { QTN = new (*this, alignof(DependentTemplateName)) DependentTemplateName(NNS, Operator); } else { TemplateName Canon = getDependentTemplateName(CanonNNS, Operator); QTN = new (*this, alignof(DependentTemplateName)) DependentTemplateName(NNS, Operator, Canon); DependentTemplateName *CheckQTN = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); assert(!CheckQTN && "Dependent template name canonicalization broken"); (void)CheckQTN; } DependentTemplateNames.InsertNode(QTN, InsertPos); return TemplateName(QTN); } TemplateName ASTContext::getSubstTemplateTemplateParm(TemplateTemplateParmDecl *param, TemplateName replacement) const { llvm::FoldingSetNodeID ID; SubstTemplateTemplateParmStorage::Profile(ID, param, replacement); void *insertPos = nullptr; SubstTemplateTemplateParmStorage *subst = SubstTemplateTemplateParms.FindNodeOrInsertPos(ID, insertPos); if (!subst) { subst = new (*this) SubstTemplateTemplateParmStorage(param, replacement); SubstTemplateTemplateParms.InsertNode(subst, insertPos); } return TemplateName(subst); } TemplateName ASTContext::getSubstTemplateTemplateParmPack(TemplateTemplateParmDecl *Param, const TemplateArgument &ArgPack) const { auto &Self = const_cast(*this); llvm::FoldingSetNodeID ID; SubstTemplateTemplateParmPackStorage::Profile(ID, Self, Param, ArgPack); void *InsertPos = nullptr; SubstTemplateTemplateParmPackStorage *Subst = SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos); if (!Subst) { Subst = new (*this) SubstTemplateTemplateParmPackStorage(Param, ArgPack.pack_size(), ArgPack.pack_begin()); SubstTemplateTemplateParmPacks.InsertNode(Subst, InsertPos); } return TemplateName(Subst); } /// getFromTargetType - Given one of the integer types provided by /// TargetInfo, produce the corresponding type. The unsigned @p Type /// is actually a value of type @c TargetInfo::IntType. CanQualType ASTContext::getFromTargetType(unsigned Type) const { switch (Type) { case TargetInfo::NoInt: return {}; case TargetInfo::SignedChar: return SignedCharTy; case TargetInfo::UnsignedChar: return UnsignedCharTy; case TargetInfo::SignedShort: return ShortTy; case TargetInfo::UnsignedShort: return UnsignedShortTy; case TargetInfo::SignedInt: return IntTy; case TargetInfo::UnsignedInt: return UnsignedIntTy; case TargetInfo::SignedLong: return LongTy; case TargetInfo::UnsignedLong: return UnsignedLongTy; case TargetInfo::SignedLongLong: return LongLongTy; case TargetInfo::UnsignedLongLong: return UnsignedLongLongTy; } llvm_unreachable("Unhandled TargetInfo::IntType value"); } //===----------------------------------------------------------------------===// // Type Predicates. //===----------------------------------------------------------------------===// /// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's /// garbage collection attribute. /// Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const { if (getLangOpts().getGC() == LangOptions::NonGC) return Qualifiers::GCNone; assert(getLangOpts().ObjC); Qualifiers::GC GCAttrs = Ty.getObjCGCAttr(); // Default behaviour under objective-C's gc is for ObjC pointers // (or pointers to them) be treated as though they were declared // as __strong. if (GCAttrs == Qualifiers::GCNone) { if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) return Qualifiers::Strong; else if (Ty->isPointerType()) return getObjCGCAttrKind(Ty->castAs()->getPointeeType()); } else { // It's not valid to set GC attributes on anything that isn't a // pointer. #ifndef NDEBUG QualType CT = Ty->getCanonicalTypeInternal(); while (const auto *AT = dyn_cast(CT)) CT = AT->getElementType(); assert(CT->isAnyPointerType() || CT->isBlockPointerType()); #endif } return GCAttrs; } //===----------------------------------------------------------------------===// // Type Compatibility Testing //===----------------------------------------------------------------------===// /// areCompatVectorTypes - Return true if the two specified vector types are /// compatible. static bool areCompatVectorTypes(const VectorType *LHS, const VectorType *RHS) { assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); return LHS->getElementType() == RHS->getElementType() && LHS->getNumElements() == RHS->getNumElements(); } /// areCompatMatrixTypes - Return true if the two specified matrix types are /// compatible. static bool areCompatMatrixTypes(const ConstantMatrixType *LHS, const ConstantMatrixType *RHS) { assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); return LHS->getElementType() == RHS->getElementType() && LHS->getNumRows() == RHS->getNumRows() && LHS->getNumColumns() == RHS->getNumColumns(); } bool ASTContext::areCompatibleVectorTypes(QualType FirstVec, QualType SecondVec) { assert(FirstVec->isVectorType() && "FirstVec should be a vector type"); assert(SecondVec->isVectorType() && "SecondVec should be a vector type"); if (hasSameUnqualifiedType(FirstVec, SecondVec)) return true; // Treat Neon vector types and most AltiVec vector types as if they are the // equivalent GCC vector types. const auto *First = FirstVec->castAs(); const auto *Second = SecondVec->castAs(); if (First->getNumElements() == Second->getNumElements() && hasSameType(First->getElementType(), Second->getElementType()) && First->getVectorKind() != VectorType::AltiVecPixel && First->getVectorKind() != VectorType::AltiVecBool && Second->getVectorKind() != VectorType::AltiVecPixel && Second->getVectorKind() != VectorType::AltiVecBool) return true; return false; } bool ASTContext::hasDirectOwnershipQualifier(QualType Ty) const { while (true) { // __strong id if (const AttributedType *Attr = dyn_cast(Ty)) { if (Attr->getAttrKind() == attr::ObjCOwnership) return true; Ty = Attr->getModifiedType(); // X *__strong (...) } else if (const ParenType *Paren = dyn_cast(Ty)) { Ty = Paren->getInnerType(); // We do not want to look through typedefs, typeof(expr), // typeof(type), or any other way that the type is somehow // abstracted. } else { return false; } } } //===----------------------------------------------------------------------===// // ObjCQualifiedIdTypesAreCompatible - Compatibility testing for qualified id's. //===----------------------------------------------------------------------===// /// ProtocolCompatibleWithProtocol - return 'true' if 'lProto' is in the /// inheritance hierarchy of 'rProto'. bool ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto, ObjCProtocolDecl *rProto) const { if (declaresSameEntity(lProto, rProto)) return true; for (auto *PI : rProto->protocols()) if (ProtocolCompatibleWithProtocol(lProto, PI)) return true; return false; } /// ObjCQualifiedClassTypesAreCompatible - compare Class and /// Class. bool ASTContext::ObjCQualifiedClassTypesAreCompatible( const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs) { for (auto *lhsProto : lhs->quals()) { bool match = false; for (auto *rhsProto : rhs->quals()) { if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto)) { match = true; break; } } if (!match) return false; } return true; } /// ObjCQualifiedIdTypesAreCompatible - We know that one of lhs/rhs is an /// ObjCQualifiedIDType. bool ASTContext::ObjCQualifiedIdTypesAreCompatible( const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs, bool compare) { // Allow id and an 'id' in all cases. if (lhs->isObjCIdType() || rhs->isObjCIdType()) return true; // Don't allow id to convert to Class or Class in either direction. if (lhs->isObjCClassType() || lhs->isObjCQualifiedClassType() || rhs->isObjCClassType() || rhs->isObjCQualifiedClassType()) return false; if (lhs->isObjCQualifiedIdType()) { if (rhs->qual_empty()) { // If the RHS is a unqualified interface pointer "NSString*", // make sure we check the class hierarchy. if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) { for (auto *I : lhs->quals()) { // when comparing an id

on lhs with a static type on rhs, // see if static class implements all of id's protocols, directly or // through its super class and categories. if (!rhsID->ClassImplementsProtocol(I, true)) return false; } } // If there are no qualifiers and no interface, we have an 'id'. return true; } // Both the right and left sides have qualifiers. for (auto *lhsProto : lhs->quals()) { bool match = false; // when comparing an id

on lhs with a static type on rhs, // see if static class implements all of id's protocols, directly or // through its super class and categories. for (auto *rhsProto : rhs->quals()) { if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { match = true; break; } } // If the RHS is a qualified interface pointer "NSString

*", // make sure we check the class hierarchy. if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) { for (auto *I : lhs->quals()) { // when comparing an id

on lhs with a static type on rhs, // see if static class implements all of id's protocols, directly or // through its super class and categories. if (rhsID->ClassImplementsProtocol(I, true)) { match = true; break; } } } if (!match) return false; } return true; } assert(rhs->isObjCQualifiedIdType() && "One of the LHS/RHS should be id"); if (lhs->getInterfaceType()) { // If both the right and left sides have qualifiers. for (auto *lhsProto : lhs->quals()) { bool match = false; // when comparing an id

on rhs with a static type on lhs, // see if static class implements all of id's protocols, directly or // through its super class and categories. // First, lhs protocols in the qualifier list must be found, direct // or indirect in rhs's qualifier list or it is a mismatch. for (auto *rhsProto : rhs->quals()) { if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { match = true; break; } } if (!match) return false; } // Static class's protocols, or its super class or category protocols // must be found, direct or indirect in rhs's qualifier list or it is a mismatch. if (ObjCInterfaceDecl *lhsID = lhs->getInterfaceDecl()) { llvm::SmallPtrSet LHSInheritedProtocols; CollectInheritedProtocols(lhsID, LHSInheritedProtocols); // This is rather dubious but matches gcc's behavior. If lhs has // no type qualifier and its class has no static protocol(s) // assume that it is mismatch. if (LHSInheritedProtocols.empty() && lhs->qual_empty()) return false; for (auto *lhsProto : LHSInheritedProtocols) { bool match = false; for (auto *rhsProto : rhs->quals()) { if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { match = true; break; } } if (!match) return false; } } return true; } return false; } /// canAssignObjCInterfaces - Return true if the two interface types are /// compatible for assignment from RHS to LHS. This handles validation of any /// protocol qualifiers on the LHS or RHS. bool ASTContext::canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT, const ObjCObjectPointerType *RHSOPT) { const ObjCObjectType* LHS = LHSOPT->getObjectType(); const ObjCObjectType* RHS = RHSOPT->getObjectType(); // If either type represents the built-in 'id' type, return true. if (LHS->isObjCUnqualifiedId() || RHS->isObjCUnqualifiedId()) return true; // Function object that propagates a successful result or handles // __kindof types. auto finish = [&](bool succeeded) -> bool { if (succeeded) return true; if (!RHS->isKindOfType()) return false; // Strip off __kindof and protocol qualifiers, then check whether // we can assign the other way. return canAssignObjCInterfaces(RHSOPT->stripObjCKindOfTypeAndQuals(*this), LHSOPT->stripObjCKindOfTypeAndQuals(*this)); }; // Casts from or to id

are allowed when the other side has compatible // protocols. if (LHS->isObjCQualifiedId() || RHS->isObjCQualifiedId()) { return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false)); } // Verify protocol compatibility for casts from Class to Class. if (LHS->isObjCQualifiedClass() && RHS->isObjCQualifiedClass()) { return finish(ObjCQualifiedClassTypesAreCompatible(LHSOPT, RHSOPT)); } // Casts from Class to Class, or vice-versa, are allowed. if (LHS->isObjCClass() && RHS->isObjCClass()) { return true; } // If we have 2 user-defined types, fall into that path. if (LHS->getInterface() && RHS->getInterface()) { return finish(canAssignObjCInterfaces(LHS, RHS)); } return false; } /// canAssignObjCInterfacesInBlockPointer - This routine is specifically written /// for providing type-safety for objective-c pointers used to pass/return /// arguments in block literals. When passed as arguments, passing 'A*' where /// 'id' is expected is not OK. Passing 'Sub *" where 'Super *" is expected is /// not OK. For the return type, the opposite is not OK. bool ASTContext::canAssignObjCInterfacesInBlockPointer( const ObjCObjectPointerType *LHSOPT, const ObjCObjectPointerType *RHSOPT, bool BlockReturnType) { // Function object that propagates a successful result or handles // __kindof types. auto finish = [&](bool succeeded) -> bool { if (succeeded) return true; const ObjCObjectPointerType *Expected = BlockReturnType ? RHSOPT : LHSOPT; if (!Expected->isKindOfType()) return false; // Strip off __kindof and protocol qualifiers, then check whether // we can assign the other way. return canAssignObjCInterfacesInBlockPointer( RHSOPT->stripObjCKindOfTypeAndQuals(*this), LHSOPT->stripObjCKindOfTypeAndQuals(*this), BlockReturnType); }; if (RHSOPT->isObjCBuiltinType() || LHSOPT->isObjCIdType()) return true; if (LHSOPT->isObjCBuiltinType()) { return finish(RHSOPT->isObjCBuiltinType() || RHSOPT->isObjCQualifiedIdType()); } if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType()) { if (getLangOpts().CompatibilityQualifiedIdBlockParamTypeChecking) // Use for block parameters previous type checking for compatibility. return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false) || // Or corrected type checking as in non-compat mode. (!BlockReturnType && ObjCQualifiedIdTypesAreCompatible(RHSOPT, LHSOPT, false))); else return finish(ObjCQualifiedIdTypesAreCompatible( (BlockReturnType ? LHSOPT : RHSOPT), (BlockReturnType ? RHSOPT : LHSOPT), false)); } const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType(); const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType(); if (LHS && RHS) { // We have 2 user-defined types. if (LHS != RHS) { if (LHS->getDecl()->isSuperClassOf(RHS->getDecl())) return finish(BlockReturnType); if (RHS->getDecl()->isSuperClassOf(LHS->getDecl())) return finish(!BlockReturnType); } else return true; } return false; } /// Comparison routine for Objective-C protocols to be used with /// llvm::array_pod_sort. static int compareObjCProtocolsByName(ObjCProtocolDecl * const *lhs, ObjCProtocolDecl * const *rhs) { return (*lhs)->getName().compare((*rhs)->getName()); } /// getIntersectionOfProtocols - This routine finds the intersection of set /// of protocols inherited from two distinct objective-c pointer objects with /// the given common base. /// It is used to build composite qualifier list of the composite type of /// the conditional expression involving two objective-c pointer objects. static void getIntersectionOfProtocols(ASTContext &Context, const ObjCInterfaceDecl *CommonBase, const ObjCObjectPointerType *LHSOPT, const ObjCObjectPointerType *RHSOPT, SmallVectorImpl &IntersectionSet) { const ObjCObjectType* LHS = LHSOPT->getObjectType(); const ObjCObjectType* RHS = RHSOPT->getObjectType(); assert(LHS->getInterface() && "LHS must have an interface base"); assert(RHS->getInterface() && "RHS must have an interface base"); // Add all of the protocols for the LHS. llvm::SmallPtrSet LHSProtocolSet; // Start with the protocol qualifiers. for (auto proto : LHS->quals()) { Context.CollectInheritedProtocols(proto, LHSProtocolSet); } // Also add the protocols associated with the LHS interface. Context.CollectInheritedProtocols(LHS->getInterface(), LHSProtocolSet); // Add all of the protocols for the RHS. llvm::SmallPtrSet RHSProtocolSet; // Start with the protocol qualifiers. for (auto proto : RHS->quals()) { Context.CollectInheritedProtocols(proto, RHSProtocolSet); } // Also add the protocols associated with the RHS interface. Context.CollectInheritedProtocols(RHS->getInterface(), RHSProtocolSet); // Compute the intersection of the collected protocol sets. for (auto proto : LHSProtocolSet) { if (RHSProtocolSet.count(proto)) IntersectionSet.push_back(proto); } // Compute the set of protocols that is implied by either the common type or // the protocols within the intersection. llvm::SmallPtrSet ImpliedProtocols; Context.CollectInheritedProtocols(CommonBase, ImpliedProtocols); // Remove any implied protocols from the list of inherited protocols. if (!ImpliedProtocols.empty()) { IntersectionSet.erase( std::remove_if(IntersectionSet.begin(), IntersectionSet.end(), [&](ObjCProtocolDecl *proto) -> bool { return ImpliedProtocols.count(proto) > 0; }), IntersectionSet.end()); } // Sort the remaining protocols by name. llvm::array_pod_sort(IntersectionSet.begin(), IntersectionSet.end(), compareObjCProtocolsByName); } /// Determine whether the first type is a subtype of the second. static bool canAssignObjCObjectTypes(ASTContext &ctx, QualType lhs, QualType rhs) { // Common case: two object pointers. const auto *lhsOPT = lhs->getAs(); const auto *rhsOPT = rhs->getAs(); if (lhsOPT && rhsOPT) return ctx.canAssignObjCInterfaces(lhsOPT, rhsOPT); // Two block pointers. const auto *lhsBlock = lhs->getAs(); const auto *rhsBlock = rhs->getAs(); if (lhsBlock && rhsBlock) return ctx.typesAreBlockPointerCompatible(lhs, rhs); // If either is an unqualified 'id' and the other is a block, it's // acceptable. if ((lhsOPT && lhsOPT->isObjCIdType() && rhsBlock) || (rhsOPT && rhsOPT->isObjCIdType() && lhsBlock)) return true; return false; } // Check that the given Objective-C type argument lists are equivalent. static bool sameObjCTypeArgs(ASTContext &ctx, const ObjCInterfaceDecl *iface, ArrayRef lhsArgs, ArrayRef rhsArgs, bool stripKindOf) { if (lhsArgs.size() != rhsArgs.size()) return false; ObjCTypeParamList *typeParams = iface->getTypeParamList(); for (unsigned i = 0, n = lhsArgs.size(); i != n; ++i) { if (ctx.hasSameType(lhsArgs[i], rhsArgs[i])) continue; switch (typeParams->begin()[i]->getVariance()) { case ObjCTypeParamVariance::Invariant: if (!stripKindOf || !ctx.hasSameType(lhsArgs[i].stripObjCKindOfType(ctx), rhsArgs[i].stripObjCKindOfType(ctx))) { return false; } break; case ObjCTypeParamVariance::Covariant: if (!canAssignObjCObjectTypes(ctx, lhsArgs[i], rhsArgs[i])) return false; break; case ObjCTypeParamVariance::Contravariant: if (!canAssignObjCObjectTypes(ctx, rhsArgs[i], lhsArgs[i])) return false; break; } } return true; } QualType ASTContext::areCommonBaseCompatible( const ObjCObjectPointerType *Lptr, const ObjCObjectPointerType *Rptr) { const ObjCObjectType *LHS = Lptr->getObjectType(); const ObjCObjectType *RHS = Rptr->getObjectType(); const ObjCInterfaceDecl* LDecl = LHS->getInterface(); const ObjCInterfaceDecl* RDecl = RHS->getInterface(); if (!LDecl || !RDecl) return {}; // When either LHS or RHS is a kindof type, we should return a kindof type. // For example, for common base of kindof(ASub1) and kindof(ASub2), we return // kindof(A). bool anyKindOf = LHS->isKindOfType() || RHS->isKindOfType(); // Follow the left-hand side up the class hierarchy until we either hit a // root or find the RHS. Record the ancestors in case we don't find it. llvm::SmallDenseMap LHSAncestors; while (true) { // Record this ancestor. We'll need this if the common type isn't in the // path from the LHS to the root. LHSAncestors[LHS->getInterface()->getCanonicalDecl()] = LHS; if (declaresSameEntity(LHS->getInterface(), RDecl)) { // Get the type arguments. ArrayRef LHSTypeArgs = LHS->getTypeArgsAsWritten(); bool anyChanges = false; if (LHS->isSpecialized() && RHS->isSpecialized()) { // Both have type arguments, compare them. if (!sameObjCTypeArgs(*this, LHS->getInterface(), LHS->getTypeArgs(), RHS->getTypeArgs(), /*stripKindOf=*/true)) return {}; } else if (LHS->isSpecialized() != RHS->isSpecialized()) { // If only one has type arguments, the result will not have type // arguments. LHSTypeArgs = {}; anyChanges = true; } // Compute the intersection of protocols. SmallVector Protocols; getIntersectionOfProtocols(*this, LHS->getInterface(), Lptr, Rptr, Protocols); if (!Protocols.empty()) anyChanges = true; // If anything in the LHS will have changed, build a new result type. // If we need to return a kindof type but LHS is not a kindof type, we // build a new result type. if (anyChanges || LHS->isKindOfType() != anyKindOf) { QualType Result = getObjCInterfaceType(LHS->getInterface()); Result = getObjCObjectType(Result, LHSTypeArgs, Protocols, anyKindOf || LHS->isKindOfType()); return getObjCObjectPointerType(Result); } return getObjCObjectPointerType(QualType(LHS, 0)); } // Find the superclass. QualType LHSSuperType = LHS->getSuperClassType(); if (LHSSuperType.isNull()) break; LHS = LHSSuperType->castAs(); } // We didn't find anything by following the LHS to its root; now check // the RHS against the cached set of ancestors. while (true) { auto KnownLHS = LHSAncestors.find(RHS->getInterface()->getCanonicalDecl()); if (KnownLHS != LHSAncestors.end()) { LHS = KnownLHS->second; // Get the type arguments. ArrayRef RHSTypeArgs = RHS->getTypeArgsAsWritten(); bool anyChanges = false; if (LHS->isSpecialized() && RHS->isSpecialized()) { // Both have type arguments, compare them. if (!sameObjCTypeArgs(*this, LHS->getInterface(), LHS->getTypeArgs(), RHS->getTypeArgs(), /*stripKindOf=*/true)) return {}; } else if (LHS->isSpecialized() != RHS->isSpecialized()) { // If only one has type arguments, the result will not have type // arguments. RHSTypeArgs = {}; anyChanges = true; } // Compute the intersection of protocols. SmallVector Protocols; getIntersectionOfProtocols(*this, RHS->getInterface(), Lptr, Rptr, Protocols); if (!Protocols.empty()) anyChanges = true; // If we need to return a kindof type but RHS is not a kindof type, we // build a new result type. if (anyChanges || RHS->isKindOfType() != anyKindOf) { QualType Result = getObjCInterfaceType(RHS->getInterface()); Result = getObjCObjectType(Result, RHSTypeArgs, Protocols, anyKindOf || RHS->isKindOfType()); return getObjCObjectPointerType(Result); } return getObjCObjectPointerType(QualType(RHS, 0)); } // Find the superclass of the RHS. QualType RHSSuperType = RHS->getSuperClassType(); if (RHSSuperType.isNull()) break; RHS = RHSSuperType->castAs(); } return {}; } bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS, const ObjCObjectType *RHS) { assert(LHS->getInterface() && "LHS is not an interface type"); assert(RHS->getInterface() && "RHS is not an interface type"); // Verify that the base decls are compatible: the RHS must be a subclass of // the LHS. ObjCInterfaceDecl *LHSInterface = LHS->getInterface(); bool IsSuperClass = LHSInterface->isSuperClassOf(RHS->getInterface()); if (!IsSuperClass) return false; // If the LHS has protocol qualifiers, determine whether all of them are // satisfied by the RHS (i.e., the RHS has a superset of the protocols in the // LHS). if (LHS->getNumProtocols() > 0) { // OK if conversion of LHS to SuperClass results in narrowing of types // ; i.e., SuperClass may implement at least one of the protocols // in LHS's protocol list. Example, SuperObj = lhs is ok. // But not SuperObj = lhs. llvm::SmallPtrSet SuperClassInheritedProtocols; CollectInheritedProtocols(RHS->getInterface(), SuperClassInheritedProtocols); // Also, if RHS has explicit quelifiers, include them for comparing with LHS's // qualifiers. for (auto *RHSPI : RHS->quals()) CollectInheritedProtocols(RHSPI, SuperClassInheritedProtocols); // If there is no protocols associated with RHS, it is not a match. if (SuperClassInheritedProtocols.empty()) return false; for (const auto *LHSProto : LHS->quals()) { bool SuperImplementsProtocol = false; for (auto *SuperClassProto : SuperClassInheritedProtocols) if (SuperClassProto->lookupProtocolNamed(LHSProto->getIdentifier())) { SuperImplementsProtocol = true; break; } if (!SuperImplementsProtocol) return false; } } // If the LHS is specialized, we may need to check type arguments. if (LHS->isSpecialized()) { // Follow the superclass chain until we've matched the LHS class in the // hierarchy. This substitutes type arguments through. const ObjCObjectType *RHSSuper = RHS; while (!declaresSameEntity(RHSSuper->getInterface(), LHSInterface)) RHSSuper = RHSSuper->getSuperClassType()->castAs(); // If the RHS is specializd, compare type arguments. if (RHSSuper->isSpecialized() && !sameObjCTypeArgs(*this, LHS->getInterface(), LHS->getTypeArgs(), RHSSuper->getTypeArgs(), /*stripKindOf=*/true)) { return false; } } return true; } bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) { // get the "pointed to" types const auto *LHSOPT = LHS->getAs(); const auto *RHSOPT = RHS->getAs(); if (!LHSOPT || !RHSOPT) return false; return canAssignObjCInterfaces(LHSOPT, RHSOPT) || canAssignObjCInterfaces(RHSOPT, LHSOPT); } bool ASTContext::canBindObjCObjectType(QualType To, QualType From) { return canAssignObjCInterfaces( getObjCObjectPointerType(To)->castAs(), getObjCObjectPointerType(From)->castAs()); } /// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible, /// both shall have the identically qualified version of a compatible type. /// C99 6.2.7p1: Two types have compatible types if their types are the /// same. See 6.7.[2,3,5] for additional rules. bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS, bool CompareUnqualified) { if (getLangOpts().CPlusPlus) return hasSameType(LHS, RHS); return !mergeTypes(LHS, RHS, false, CompareUnqualified).isNull(); } bool ASTContext::propertyTypesAreCompatible(QualType LHS, QualType RHS) { return typesAreCompatible(LHS, RHS); } bool ASTContext::typesAreBlockPointerCompatible(QualType LHS, QualType RHS) { return !mergeTypes(LHS, RHS, true).isNull(); } /// mergeTransparentUnionType - if T is a transparent union type and a member /// of T is compatible with SubType, return the merged type, else return /// QualType() QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType, bool OfBlockPointer, bool Unqualified) { if (const RecordType *UT = T->getAsUnionType()) { RecordDecl *UD = UT->getDecl(); if (UD->hasAttr()) { for (const auto *I : UD->fields()) { QualType ET = I->getType().getUnqualifiedType(); QualType MT = mergeTypes(ET, SubType, OfBlockPointer, Unqualified); if (!MT.isNull()) return MT; } } } return {}; } /// mergeFunctionParameterTypes - merge two types which appear as function /// parameter types QualType ASTContext::mergeFunctionParameterTypes(QualType lhs, QualType rhs, bool OfBlockPointer, bool Unqualified) { // GNU extension: two types are compatible if they appear as a function // argument, one of the types is a transparent union type and the other // type is compatible with a union member QualType lmerge = mergeTransparentUnionType(lhs, rhs, OfBlockPointer, Unqualified); if (!lmerge.isNull()) return lmerge; QualType rmerge = mergeTransparentUnionType(rhs, lhs, OfBlockPointer, Unqualified); if (!rmerge.isNull()) return rmerge; return mergeTypes(lhs, rhs, OfBlockPointer, Unqualified); } QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs, bool OfBlockPointer, bool Unqualified, bool AllowCXX) { const auto *lbase = lhs->castAs(); const auto *rbase = rhs->castAs(); const auto *lproto = dyn_cast(lbase); const auto *rproto = dyn_cast(rbase); bool allLTypes = true; bool allRTypes = true; // Check return type QualType retType; if (OfBlockPointer) { QualType RHS = rbase->getReturnType(); QualType LHS = lbase->getReturnType(); bool UnqualifiedResult = Unqualified; if (!UnqualifiedResult) UnqualifiedResult = (!RHS.hasQualifiers() && LHS.hasQualifiers()); retType = mergeTypes(LHS, RHS, true, UnqualifiedResult, true); } else retType = mergeTypes(lbase->getReturnType(), rbase->getReturnType(), false, Unqualified); if (retType.isNull()) return {}; if (Unqualified) retType = retType.getUnqualifiedType(); CanQualType LRetType = getCanonicalType(lbase->getReturnType()); CanQualType RRetType = getCanonicalType(rbase->getReturnType()); if (Unqualified) { LRetType = LRetType.getUnqualifiedType(); RRetType = RRetType.getUnqualifiedType(); } if (getCanonicalType(retType) != LRetType) allLTypes = false; if (getCanonicalType(retType) != RRetType) allRTypes = false; // FIXME: double check this // FIXME: should we error if lbase->getRegParmAttr() != 0 && // rbase->getRegParmAttr() != 0 && // lbase->getRegParmAttr() != rbase->getRegParmAttr()? FunctionType::ExtInfo lbaseInfo = lbase->getExtInfo(); FunctionType::ExtInfo rbaseInfo = rbase->getExtInfo(); // Compatible functions must have compatible calling conventions if (lbaseInfo.getCC() != rbaseInfo.getCC()) return {}; // Regparm is part of the calling convention. if (lbaseInfo.getHasRegParm() != rbaseInfo.getHasRegParm()) return {}; if (lbaseInfo.getRegParm() != rbaseInfo.getRegParm()) return {}; if (lbaseInfo.getProducesResult() != rbaseInfo.getProducesResult()) return {}; if (lbaseInfo.getNoCallerSavedRegs() != rbaseInfo.getNoCallerSavedRegs()) return {}; if (lbaseInfo.getNoCfCheck() != rbaseInfo.getNoCfCheck()) return {}; // FIXME: some uses, e.g. conditional exprs, really want this to be 'both'. bool NoReturn = lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn(); if (lbaseInfo.getNoReturn() != NoReturn) allLTypes = false; if (rbaseInfo.getNoReturn() != NoReturn) allRTypes = false; FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(NoReturn); if (lproto && rproto) { // two C99 style function prototypes assert((AllowCXX || (!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec())) && "C++ shouldn't be here"); // Compatible functions must have the same number of parameters if (lproto->getNumParams() != rproto->getNumParams()) return {}; // Variadic and non-variadic functions aren't compatible if (lproto->isVariadic() != rproto->isVariadic()) return {}; if (lproto->getMethodQuals() != rproto->getMethodQuals()) return {}; SmallVector newParamInfos; bool canUseLeft, canUseRight; if (!mergeExtParameterInfo(lproto, rproto, canUseLeft, canUseRight, newParamInfos)) return {}; if (!canUseLeft) allLTypes = false; if (!canUseRight) allRTypes = false; // Check parameter type compatibility SmallVector types; for (unsigned i = 0, n = lproto->getNumParams(); i < n; i++) { QualType lParamType = lproto->getParamType(i).getUnqualifiedType(); QualType rParamType = rproto->getParamType(i).getUnqualifiedType(); QualType paramType = mergeFunctionParameterTypes( lParamType, rParamType, OfBlockPointer, Unqualified); if (paramType.isNull()) return {}; if (Unqualified) paramType = paramType.getUnqualifiedType(); types.push_back(paramType); if (Unqualified) { lParamType = lParamType.getUnqualifiedType(); rParamType = rParamType.getUnqualifiedType(); } if (getCanonicalType(paramType) != getCanonicalType(lParamType)) allLTypes = false; if (getCanonicalType(paramType) != getCanonicalType(rParamType)) allRTypes = false; } if (allLTypes) return lhs; if (allRTypes) return rhs; FunctionProtoType::ExtProtoInfo EPI = lproto->getExtProtoInfo(); EPI.ExtInfo = einfo; EPI.ExtParameterInfos = newParamInfos.empty() ? nullptr : newParamInfos.data(); return getFunctionType(retType, types, EPI); } if (lproto) allRTypes = false; if (rproto) allLTypes = false; const FunctionProtoType *proto = lproto ? lproto : rproto; if (proto) { assert((AllowCXX || !proto->hasExceptionSpec()) && "C++ shouldn't be here"); if (proto->isVariadic()) return {}; // Check that the types are compatible with the types that // would result from default argument promotions (C99 6.7.5.3p15). // The only types actually affected are promotable integer // types and floats, which would be passed as a different // type depending on whether the prototype is visible. for (unsigned i = 0, n = proto->getNumParams(); i < n; ++i) { QualType paramTy = proto->getParamType(i); // Look at the converted type of enum types, since that is the type used // to pass enum values. if (const auto *Enum = paramTy->getAs()) { paramTy = Enum->getDecl()->getIntegerType(); if (paramTy.isNull()) return {}; } if (paramTy->isPromotableIntegerType() || getCanonicalType(paramTy).getUnqualifiedType() == FloatTy) return {}; } if (allLTypes) return lhs; if (allRTypes) return rhs; FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo(); EPI.ExtInfo = einfo; return getFunctionType(retType, proto->getParamTypes(), EPI); } if (allLTypes) return lhs; if (allRTypes) return rhs; return getFunctionNoProtoType(retType, einfo); } /// Given that we have an enum type and a non-enum type, try to merge them. static QualType mergeEnumWithInteger(ASTContext &Context, const EnumType *ET, QualType other, bool isBlockReturnType) { // C99 6.7.2.2p4: Each enumerated type shall be compatible with char, // a signed integer type, or an unsigned integer type. // Compatibility is based on the underlying type, not the promotion // type. QualType underlyingType = ET->getDecl()->getIntegerType(); if (underlyingType.isNull()) return {}; if (Context.hasSameType(underlyingType, other)) return other; // In block return types, we're more permissive and accept any // integral type of the same size. if (isBlockReturnType && other->isIntegerType() && Context.getTypeSize(underlyingType) == Context.getTypeSize(other)) return other; return {}; } QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, bool OfBlockPointer, bool Unqualified, bool BlockReturnType) { // C++ [expr]: If an expression initially has the type "reference to T", the // type is adjusted to "T" prior to any further analysis, the expression // designates the object or function denoted by the reference, and the // expression is an lvalue unless the reference is an rvalue reference and // the expression is a function call (possibly inside parentheses). assert(!LHS->getAs() && "LHS is a reference type?"); assert(!RHS->getAs() && "RHS is a reference type?"); if (Unqualified) { LHS = LHS.getUnqualifiedType(); RHS = RHS.getUnqualifiedType(); } QualType LHSCan = getCanonicalType(LHS), RHSCan = getCanonicalType(RHS); // If two types are identical, they are compatible. if (LHSCan == RHSCan) return LHS; // If the qualifiers are different, the types aren't compatible... mostly. Qualifiers LQuals = LHSCan.getLocalQualifiers(); Qualifiers RQuals = RHSCan.getLocalQualifiers(); if (LQuals != RQuals) { // If any of these qualifiers are different, we have a type // mismatch. if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || LQuals.getAddressSpace() != RQuals.getAddressSpace() || LQuals.getObjCLifetime() != RQuals.getObjCLifetime() || LQuals.hasUnaligned() != RQuals.hasUnaligned()) return {}; // Exactly one GC qualifier difference is allowed: __strong is // okay if the other type has no GC qualifier but is an Objective // C object pointer (i.e. implicitly strong by default). We fix // this by pretending that the unqualified type was actually // qualified __strong. Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) return {}; if (GC_L == Qualifiers::Strong && RHSCan->isObjCObjectPointerType()) { return mergeTypes(LHS, getObjCGCQualType(RHS, Qualifiers::Strong)); } if (GC_R == Qualifiers::Strong && LHSCan->isObjCObjectPointerType()) { return mergeTypes(getObjCGCQualType(LHS, Qualifiers::Strong), RHS); } return {}; } // Okay, qualifiers are equal. Type::TypeClass LHSClass = LHSCan->getTypeClass(); Type::TypeClass RHSClass = RHSCan->getTypeClass(); // We want to consider the two function types to be the same for these // comparisons, just force one to the other. if (LHSClass == Type::FunctionProto) LHSClass = Type::FunctionNoProto; if (RHSClass == Type::FunctionProto) RHSClass = Type::FunctionNoProto; // Same as above for arrays if (LHSClass == Type::VariableArray || LHSClass == Type::IncompleteArray) LHSClass = Type::ConstantArray; if (RHSClass == Type::VariableArray || RHSClass == Type::IncompleteArray) RHSClass = Type::ConstantArray; // ObjCInterfaces are just specialized ObjCObjects. if (LHSClass == Type::ObjCInterface) LHSClass = Type::ObjCObject; if (RHSClass == Type::ObjCInterface) RHSClass = Type::ObjCObject; // Canonicalize ExtVector -> Vector. if (LHSClass == Type::ExtVector) LHSClass = Type::Vector; if (RHSClass == Type::ExtVector) RHSClass = Type::Vector; // If the canonical type classes don't match. if (LHSClass != RHSClass) { // Note that we only have special rules for turning block enum // returns into block int returns, not vice-versa. if (const auto *ETy = LHS->getAs()) { return mergeEnumWithInteger(*this, ETy, RHS, false); } if (const EnumType* ETy = RHS->getAs()) { return mergeEnumWithInteger(*this, ETy, LHS, BlockReturnType); } // allow block pointer type to match an 'id' type. if (OfBlockPointer && !BlockReturnType) { if (LHS->isObjCIdType() && RHS->isBlockPointerType()) return LHS; if (RHS->isObjCIdType() && LHS->isBlockPointerType()) return RHS; } return {}; } // The canonical type classes match. switch (LHSClass) { #define TYPE(Class, Base) #define ABSTRACT_TYPE(Class, Base) #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: #define DEPENDENT_TYPE(Class, Base) case Type::Class: #include "clang/AST/TypeNodes.inc" llvm_unreachable("Non-canonical and dependent types shouldn't get here"); case Type::Auto: case Type::DeducedTemplateSpecialization: case Type::LValueReference: case Type::RValueReference: case Type::MemberPointer: llvm_unreachable("C++ should never be in mergeTypes"); case Type::ObjCInterface: case Type::IncompleteArray: case Type::VariableArray: case Type::FunctionProto: case Type::ExtVector: llvm_unreachable("Types are eliminated above"); case Type::Pointer: { // Merge two pointer types, while trying to preserve typedef info QualType LHSPointee = LHS->castAs()->getPointeeType(); QualType RHSPointee = RHS->castAs()->getPointeeType(); if (Unqualified) { LHSPointee = LHSPointee.getUnqualifiedType(); RHSPointee = RHSPointee.getUnqualifiedType(); } QualType ResultType = mergeTypes(LHSPointee, RHSPointee, false, Unqualified); if (ResultType.isNull()) return {}; if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) return LHS; if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) return RHS; return getPointerType(ResultType); } case Type::BlockPointer: { // Merge two block pointer types, while trying to preserve typedef info QualType LHSPointee = LHS->castAs()->getPointeeType(); QualType RHSPointee = RHS->castAs()->getPointeeType(); if (Unqualified) { LHSPointee = LHSPointee.getUnqualifiedType(); RHSPointee = RHSPointee.getUnqualifiedType(); } if (getLangOpts().OpenCL) { Qualifiers LHSPteeQual = LHSPointee.getQualifiers(); Qualifiers RHSPteeQual = RHSPointee.getQualifiers(); // Blocks can't be an expression in a ternary operator (OpenCL v2.0 // 6.12.5) thus the following check is asymmetric. if (!LHSPteeQual.isAddressSpaceSupersetOf(RHSPteeQual)) return {}; LHSPteeQual.removeAddressSpace(); RHSPteeQual.removeAddressSpace(); LHSPointee = QualType(LHSPointee.getTypePtr(), LHSPteeQual.getAsOpaqueValue()); RHSPointee = QualType(RHSPointee.getTypePtr(), RHSPteeQual.getAsOpaqueValue()); } QualType ResultType = mergeTypes(LHSPointee, RHSPointee, OfBlockPointer, Unqualified); if (ResultType.isNull()) return {}; if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) return LHS; if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) return RHS; return getBlockPointerType(ResultType); } case Type::Atomic: { // Merge two pointer types, while trying to preserve typedef info QualType LHSValue = LHS->castAs()->getValueType(); QualType RHSValue = RHS->castAs()->getValueType(); if (Unqualified) { LHSValue = LHSValue.getUnqualifiedType(); RHSValue = RHSValue.getUnqualifiedType(); } QualType ResultType = mergeTypes(LHSValue, RHSValue, false, Unqualified); if (ResultType.isNull()) return {}; if (getCanonicalType(LHSValue) == getCanonicalType(ResultType)) return LHS; if (getCanonicalType(RHSValue) == getCanonicalType(ResultType)) return RHS; return getAtomicType(ResultType); } case Type::ConstantArray: { const ConstantArrayType* LCAT = getAsConstantArrayType(LHS); const ConstantArrayType* RCAT = getAsConstantArrayType(RHS); if (LCAT && RCAT && RCAT->getSize() != LCAT->getSize()) return {}; QualType LHSElem = getAsArrayType(LHS)->getElementType(); QualType RHSElem = getAsArrayType(RHS)->getElementType(); if (Unqualified) { LHSElem = LHSElem.getUnqualifiedType(); RHSElem = RHSElem.getUnqualifiedType(); } QualType ResultType = mergeTypes(LHSElem, RHSElem, false, Unqualified); if (ResultType.isNull()) return {}; const VariableArrayType* LVAT = getAsVariableArrayType(LHS); const VariableArrayType* RVAT = getAsVariableArrayType(RHS); // If either side is a variable array, and both are complete, check whether // the current dimension is definite. if (LVAT || RVAT) { auto SizeFetch = [this](const VariableArrayType* VAT, const ConstantArrayType* CAT) -> std::pair { if (VAT) { llvm::APSInt TheInt; Expr *E = VAT->getSizeExpr(); if (E && E->isIntegerConstantExpr(TheInt, *this)) return std::make_pair(true, TheInt); else return std::make_pair(false, TheInt); } else if (CAT) { return std::make_pair(true, CAT->getSize()); } else { return std::make_pair(false, llvm::APInt()); } }; bool HaveLSize, HaveRSize; llvm::APInt LSize, RSize; std::tie(HaveLSize, LSize) = SizeFetch(LVAT, LCAT); std::tie(HaveRSize, RSize) = SizeFetch(RVAT, RCAT); if (HaveLSize && HaveRSize && !llvm::APInt::isSameValue(LSize, RSize)) return {}; // Definite, but unequal, array dimension } if (LCAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) return LHS; if (RCAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) return RHS; if (LCAT) return getConstantArrayType(ResultType, LCAT->getSize(), LCAT->getSizeExpr(), ArrayType::ArraySizeModifier(), 0); if (RCAT) return getConstantArrayType(ResultType, RCAT->getSize(), RCAT->getSizeExpr(), ArrayType::ArraySizeModifier(), 0); if (LVAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) return LHS; if (RVAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) return RHS; if (LVAT) { // FIXME: This isn't correct! But tricky to implement because // the array's size has to be the size of LHS, but the type // has to be different. return LHS; } if (RVAT) { // FIXME: This isn't correct! But tricky to implement because // the array's size has to be the size of RHS, but the type // has to be different. return RHS; } if (getCanonicalType(LHSElem) == getCanonicalType(ResultType)) return LHS; if (getCanonicalType(RHSElem) == getCanonicalType(ResultType)) return RHS; return getIncompleteArrayType(ResultType, ArrayType::ArraySizeModifier(), 0); } case Type::FunctionNoProto: return mergeFunctionTypes(LHS, RHS, OfBlockPointer, Unqualified); case Type::Record: case Type::Enum: return {}; case Type::Builtin: // Only exactly equal builtin types are compatible, which is tested above. return {}; case Type::Complex: // Distinct complex types are incompatible. return {}; case Type::Vector: // FIXME: The merged type should be an ExtVector! if (areCompatVectorTypes(LHSCan->castAs(), RHSCan->castAs())) return LHS; return {}; case Type::ConstantMatrix: if (areCompatMatrixTypes(LHSCan->castAs(), RHSCan->castAs())) return LHS; return {}; case Type::ObjCObject: { // Check if the types are assignment compatible. // FIXME: This should be type compatibility, e.g. whether // "LHS x; RHS x;" at global scope is legal. if (canAssignObjCInterfaces(LHS->castAs(), RHS->castAs())) return LHS; return {}; } case Type::ObjCObjectPointer: if (OfBlockPointer) { if (canAssignObjCInterfacesInBlockPointer( LHS->castAs(), RHS->castAs(), BlockReturnType)) return LHS; return {}; } if (canAssignObjCInterfaces(LHS->castAs(), RHS->castAs())) return LHS; return {}; case Type::Pipe: assert(LHS != RHS && "Equivalent pipe types should have already been handled!"); return {}; case Type::ExtInt: { // Merge two ext-int types, while trying to preserve typedef info. bool LHSUnsigned = LHS->castAs()->isUnsigned(); bool RHSUnsigned = RHS->castAs()->isUnsigned(); unsigned LHSBits = LHS->castAs()->getNumBits(); unsigned RHSBits = RHS->castAs()->getNumBits(); // Like unsigned/int, shouldn't have a type if they dont match. if (LHSUnsigned != RHSUnsigned) return {}; if (LHSBits != RHSBits) return {}; return LHS; } } llvm_unreachable("Invalid Type::Class!"); } bool ASTContext::mergeExtParameterInfo( const FunctionProtoType *FirstFnType, const FunctionProtoType *SecondFnType, bool &CanUseFirst, bool &CanUseSecond, SmallVectorImpl &NewParamInfos) { assert(NewParamInfos.empty() && "param info list not empty"); CanUseFirst = CanUseSecond = true; bool FirstHasInfo = FirstFnType->hasExtParameterInfos(); bool SecondHasInfo = SecondFnType->hasExtParameterInfos(); // Fast path: if the first type doesn't have ext parameter infos, // we match if and only if the second type also doesn't have them. if (!FirstHasInfo && !SecondHasInfo) return true; bool NeedParamInfo = false; size_t E = FirstHasInfo ? FirstFnType->getExtParameterInfos().size() : SecondFnType->getExtParameterInfos().size(); for (size_t I = 0; I < E; ++I) { FunctionProtoType::ExtParameterInfo FirstParam, SecondParam; if (FirstHasInfo) FirstParam = FirstFnType->getExtParameterInfo(I); if (SecondHasInfo) SecondParam = SecondFnType->getExtParameterInfo(I); // Cannot merge unless everything except the noescape flag matches. if (FirstParam.withIsNoEscape(false) != SecondParam.withIsNoEscape(false)) return false; bool FirstNoEscape = FirstParam.isNoEscape(); bool SecondNoEscape = SecondParam.isNoEscape(); bool IsNoEscape = FirstNoEscape && SecondNoEscape; NewParamInfos.push_back(FirstParam.withIsNoEscape(IsNoEscape)); if (NewParamInfos.back().getOpaqueValue()) NeedParamInfo = true; if (FirstNoEscape != IsNoEscape) CanUseFirst = false; if (SecondNoEscape != IsNoEscape) CanUseSecond = false; } if (!NeedParamInfo) NewParamInfos.clear(); return true; } void ASTContext::ResetObjCLayout(const ObjCContainerDecl *CD) { ObjCLayouts[CD] = nullptr; } /// mergeObjCGCQualifiers - This routine merges ObjC's GC attribute of 'LHS' and /// 'RHS' attributes and returns the merged version; including for function /// return types. QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) { QualType LHSCan = getCanonicalType(LHS), RHSCan = getCanonicalType(RHS); // If two types are identical, they are compatible. if (LHSCan == RHSCan) return LHS; if (RHSCan->isFunctionType()) { if (!LHSCan->isFunctionType()) return {}; QualType OldReturnType = cast(RHSCan.getTypePtr())->getReturnType(); QualType NewReturnType = cast(LHSCan.getTypePtr())->getReturnType(); QualType ResReturnType = mergeObjCGCQualifiers(NewReturnType, OldReturnType); if (ResReturnType.isNull()) return {}; if (ResReturnType == NewReturnType || ResReturnType == OldReturnType) { // id foo(); ... __strong id foo(); or: __strong id foo(); ... id foo(); // In either case, use OldReturnType to build the new function type. const auto *F = LHS->castAs(); if (const auto *FPT = cast(F)) { FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); EPI.ExtInfo = getFunctionExtInfo(LHS); QualType ResultType = getFunctionType(OldReturnType, FPT->getParamTypes(), EPI); return ResultType; } } return {}; } // If the qualifiers are different, the types can still be merged. Qualifiers LQuals = LHSCan.getLocalQualifiers(); Qualifiers RQuals = RHSCan.getLocalQualifiers(); if (LQuals != RQuals) { // If any of these qualifiers are different, we have a type mismatch. if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || LQuals.getAddressSpace() != RQuals.getAddressSpace()) return {}; // Exactly one GC qualifier difference is allowed: __strong is // okay if the other type has no GC qualifier but is an Objective // C object pointer (i.e. implicitly strong by default). We fix // this by pretending that the unqualified type was actually // qualified __strong. Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) return {}; if (GC_L == Qualifiers::Strong) return LHS; if (GC_R == Qualifiers::Strong) return RHS; return {}; } if (LHSCan->isObjCObjectPointerType() && RHSCan->isObjCObjectPointerType()) { QualType LHSBaseQT = LHS->castAs()->getPointeeType(); QualType RHSBaseQT = RHS->castAs()->getPointeeType(); QualType ResQT = mergeObjCGCQualifiers(LHSBaseQT, RHSBaseQT); if (ResQT == LHSBaseQT) return LHS; if (ResQT == RHSBaseQT) return RHS; } return {}; } //===----------------------------------------------------------------------===// // Integer Predicates //===----------------------------------------------------------------------===// unsigned ASTContext::getIntWidth(QualType T) const { if (const auto *ET = T->getAs()) T = ET->getDecl()->getIntegerType(); if (T->isBooleanType()) return 1; if(const auto *EIT = T->getAs()) return EIT->getNumBits(); // For builtin types, just use the standard type sizing method return (unsigned)getTypeSize(T); } QualType ASTContext::getCorrespondingUnsignedType(QualType T) const { assert((T->hasSignedIntegerRepresentation() || T->isSignedFixedPointType()) && "Unexpected type"); // Turn <4 x signed int> -> <4 x unsigned int> if (const auto *VTy = T->getAs()) return getVectorType(getCorrespondingUnsignedType(VTy->getElementType()), VTy->getNumElements(), VTy->getVectorKind()); // For enums, we return the unsigned version of the base type. if (const auto *ETy = T->getAs()) T = ETy->getDecl()->getIntegerType(); switch (T->castAs()->getKind()) { case BuiltinType::Char_S: case BuiltinType::SChar: return UnsignedCharTy; case BuiltinType::Short: return UnsignedShortTy; case BuiltinType::Int: return UnsignedIntTy; case BuiltinType::Long: return UnsignedLongTy; case BuiltinType::LongLong: return UnsignedLongLongTy; case BuiltinType::Int128: return UnsignedInt128Ty; case BuiltinType::ShortAccum: return UnsignedShortAccumTy; case BuiltinType::Accum: return UnsignedAccumTy; case BuiltinType::LongAccum: return UnsignedLongAccumTy; case BuiltinType::SatShortAccum: return SatUnsignedShortAccumTy; case BuiltinType::SatAccum: return SatUnsignedAccumTy; case BuiltinType::SatLongAccum: return SatUnsignedLongAccumTy; case BuiltinType::ShortFract: return UnsignedShortFractTy; case BuiltinType::Fract: return UnsignedFractTy; case BuiltinType::LongFract: return UnsignedLongFractTy; case BuiltinType::SatShortFract: return SatUnsignedShortFractTy; case BuiltinType::SatFract: return SatUnsignedFractTy; case BuiltinType::SatLongFract: return SatUnsignedLongFractTy; default: llvm_unreachable("Unexpected signed integer or fixed point type"); } } ASTMutationListener::~ASTMutationListener() = default; void ASTMutationListener::DeducedReturnType(const FunctionDecl *FD, QualType ReturnType) {} //===----------------------------------------------------------------------===// // Builtin Type Computation //===----------------------------------------------------------------------===// /// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the /// pointer over the consumed characters. This returns the resultant type. If /// AllowTypeModifiers is false then modifier like * are not parsed, just basic /// types. This allows "v2i*" to be parsed as a pointer to a v2i instead of /// a vector of "i*". /// /// RequiresICE is filled in on return to indicate whether the value is required /// to be an Integer Constant Expression. static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context, ASTContext::GetBuiltinTypeError &Error, bool &RequiresICE, bool AllowTypeModifiers) { // Modifiers. int HowLong = 0; bool Signed = false, Unsigned = false; RequiresICE = false; // Read the prefixed modifiers first. bool Done = false; #ifndef NDEBUG bool IsSpecial = false; #endif while (!Done) { switch (*Str++) { default: Done = true; --Str; break; case 'I': RequiresICE = true; break; case 'S': assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!"); assert(!Signed && "Can't use 'S' modifier multiple times!"); Signed = true; break; case 'U': assert(!Signed && "Can't use both 'S' and 'U' modifiers!"); assert(!Unsigned && "Can't use 'U' modifier multiple times!"); Unsigned = true; break; case 'L': assert(!IsSpecial && "Can't use 'L' with 'W', 'N', 'Z' or 'O' modifiers"); assert(HowLong <= 2 && "Can't have LLLL modifier"); ++HowLong; break; case 'N': // 'N' behaves like 'L' for all non LP64 targets and 'int' otherwise. assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); assert(HowLong == 0 && "Can't use both 'L' and 'N' modifiers!"); #ifndef NDEBUG IsSpecial = true; #endif if (Context.getTargetInfo().getLongWidth() == 32) ++HowLong; break; case 'W': // This modifier represents int64 type. assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); assert(HowLong == 0 && "Can't use both 'L' and 'W' modifiers!"); #ifndef NDEBUG IsSpecial = true; #endif switch (Context.getTargetInfo().getInt64Type()) { default: llvm_unreachable("Unexpected integer type"); case TargetInfo::SignedLong: HowLong = 1; break; case TargetInfo::SignedLongLong: HowLong = 2; break; } break; case 'Z': // This modifier represents int32 type. assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); assert(HowLong == 0 && "Can't use both 'L' and 'Z' modifiers!"); #ifndef NDEBUG IsSpecial = true; #endif switch (Context.getTargetInfo().getIntTypeByWidth(32, true)) { default: llvm_unreachable("Unexpected integer type"); case TargetInfo::SignedInt: HowLong = 0; break; case TargetInfo::SignedLong: HowLong = 1; break; case TargetInfo::SignedLongLong: HowLong = 2; break; } break; case 'O': assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); assert(HowLong == 0 && "Can't use both 'L' and 'O' modifiers!"); #ifndef NDEBUG IsSpecial = true; #endif if (Context.getLangOpts().OpenCL) HowLong = 1; else HowLong = 2; break; } } QualType Type; // Read the base type. switch (*Str++) { default: llvm_unreachable("Unknown builtin type letter!"); case 'y': assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers used with 'y'!"); Type = Context.BFloat16Ty; break; case 'v': assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers used with 'v'!"); Type = Context.VoidTy; break; case 'h': assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers used with 'h'!"); Type = Context.HalfTy; break; case 'f': assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers used with 'f'!"); Type = Context.FloatTy; break; case 'd': assert(HowLong < 3 && !Signed && !Unsigned && "Bad modifiers used with 'd'!"); if (HowLong == 1) Type = Context.LongDoubleTy; else if (HowLong == 2) Type = Context.Float128Ty; else Type = Context.DoubleTy; break; case 's': assert(HowLong == 0 && "Bad modifiers used with 's'!"); if (Unsigned) Type = Context.UnsignedShortTy; else Type = Context.ShortTy; break; case 'i': if (HowLong == 3) Type = Unsigned ? Context.UnsignedInt128Ty : Context.Int128Ty; else if (HowLong == 2) Type = Unsigned ? Context.UnsignedLongLongTy : Context.LongLongTy; else if (HowLong == 1) Type = Unsigned ? Context.UnsignedLongTy : Context.LongTy; else Type = Unsigned ? Context.UnsignedIntTy : Context.IntTy; break; case 'c': assert(HowLong == 0 && "Bad modifiers used with 'c'!"); if (Signed) Type = Context.SignedCharTy; else if (Unsigned) Type = Context.UnsignedCharTy; else Type = Context.CharTy; break; case 'b': // boolean assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'b'!"); Type = Context.BoolTy; break; case 'z': // size_t. assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!"); Type = Context.getSizeType(); break; case 'w': // wchar_t. assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'w'!"); Type = Context.getWideCharType(); break; case 'F': Type = Context.getCFConstantStringType(); break; case 'G': Type = Context.getObjCIdType(); break; case 'H': Type = Context.getObjCSelType(); break; case 'M': Type = Context.getObjCSuperType(); break; case 'a': Type = Context.getBuiltinVaListType(); assert(!Type.isNull() && "builtin va list type not initialized!"); break; case 'A': // This is a "reference" to a va_list; however, what exactly // this means depends on how va_list is defined. There are two // different kinds of va_list: ones passed by value, and ones // passed by reference. An example of a by-value va_list is // x86, where va_list is a char*. An example of by-ref va_list // is x86-64, where va_list is a __va_list_tag[1]. For x86, // we want this argument to be a char*&; for x86-64, we want // it to be a __va_list_tag*. Type = Context.getBuiltinVaListType(); assert(!Type.isNull() && "builtin va list type not initialized!"); if (Type->isArrayType()) Type = Context.getArrayDecayedType(Type); else Type = Context.getLValueReferenceType(Type); break; case 'q': { char *End; unsigned NumElements = strtoul(Str, &End, 10); assert(End != Str && "Missing vector size"); Str = End; QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, false); assert(!RequiresICE && "Can't require vector ICE"); Type = Context.getScalableVectorType(ElementType, NumElements); break; } case 'V': { char *End; unsigned NumElements = strtoul(Str, &End, 10); assert(End != Str && "Missing vector size"); Str = End; QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, false); assert(!RequiresICE && "Can't require vector ICE"); // TODO: No way to make AltiVec vectors in builtins yet. Type = Context.getVectorType(ElementType, NumElements, VectorType::GenericVector); break; } case 'E': { char *End; unsigned NumElements = strtoul(Str, &End, 10); assert(End != Str && "Missing vector size"); Str = End; QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, false); Type = Context.getExtVectorType(ElementType, NumElements); break; } case 'X': { QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, false); assert(!RequiresICE && "Can't require complex ICE"); Type = Context.getComplexType(ElementType); break; } case 'Y': Type = Context.getPointerDiffType(); break; case 'P': Type = Context.getFILEType(); if (Type.isNull()) { Error = ASTContext::GE_Missing_stdio; return {}; } break; case 'J': if (Signed) Type = Context.getsigjmp_bufType(); else Type = Context.getjmp_bufType(); if (Type.isNull()) { Error = ASTContext::GE_Missing_setjmp; return {}; } break; case 'K': assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'K'!"); Type = Context.getucontext_tType(); if (Type.isNull()) { Error = ASTContext::GE_Missing_ucontext; return {}; } break; case 'p': Type = Context.getProcessIDType(); break; } // If there are modifiers and if we're allowed to parse them, go for it. Done = !AllowTypeModifiers; while (!Done) { switch (char c = *Str++) { default: Done = true; --Str; break; case '*': case '&': { // Both pointers and references can have their pointee types // qualified with an address space. char *End; unsigned AddrSpace = strtoul(Str, &End, 10); if (End != Str) { // Note AddrSpace == 0 is not the same as an unspecified address space. Type = Context.getAddrSpaceQualType( Type, Context.getLangASForBuiltinAddressSpace(AddrSpace)); Str = End; } if (c == '*') Type = Context.getPointerType(Type); else Type = Context.getLValueReferenceType(Type); break; } // FIXME: There's no way to have a built-in with an rvalue ref arg. case 'C': Type = Type.withConst(); break; case 'D': Type = Context.getVolatileType(Type); break; case 'R': Type = Type.withRestrict(); break; } } assert((!RequiresICE || Type->isIntegralOrEnumerationType()) && "Integer constant 'I' type must be an integer"); return Type; } /// GetBuiltinType - Return the type for the specified builtin. QualType ASTContext::GetBuiltinType(unsigned Id, GetBuiltinTypeError &Error, unsigned *IntegerConstantArgs) const { const char *TypeStr = BuiltinInfo.getTypeString(Id); if (TypeStr[0] == '\0') { Error = GE_Missing_type; return {}; } SmallVector ArgTypes; bool RequiresICE = false; Error = GE_None; QualType ResType = DecodeTypeFromStr(TypeStr, *this, Error, RequiresICE, true); if (Error != GE_None) return {}; assert(!RequiresICE && "Result of intrinsic cannot be required to be an ICE"); while (TypeStr[0] && TypeStr[0] != '.') { QualType Ty = DecodeTypeFromStr(TypeStr, *this, Error, RequiresICE, true); if (Error != GE_None) return {}; // If this argument is required to be an IntegerConstantExpression and the // caller cares, fill in the bitmask we return. if (RequiresICE && IntegerConstantArgs) *IntegerConstantArgs |= 1 << ArgTypes.size(); // Do array -> pointer decay. The builtin should use the decayed type. if (Ty->isArrayType()) Ty = getArrayDecayedType(Ty); ArgTypes.push_back(Ty); } if (Id == Builtin::BI__GetExceptionInfo) return {}; assert((TypeStr[0] != '.' || TypeStr[1] == 0) && "'.' should only occur at end of builtin type list!"); bool Variadic = (TypeStr[0] == '.'); FunctionType::ExtInfo EI(getDefaultCallingConvention( Variadic, /*IsCXXMethod=*/false, /*IsBuiltin=*/true)); if (BuiltinInfo.isNoReturn(Id)) EI = EI.withNoReturn(true); // We really shouldn't be making a no-proto type here. if (ArgTypes.empty() && Variadic && !getLangOpts().CPlusPlus) return getFunctionNoProtoType(ResType, EI); FunctionProtoType::ExtProtoInfo EPI; EPI.ExtInfo = EI; EPI.Variadic = Variadic; if (getLangOpts().CPlusPlus && BuiltinInfo.isNoThrow(Id)) EPI.ExceptionSpec.Type = getLangOpts().CPlusPlus11 ? EST_BasicNoexcept : EST_DynamicNone; return getFunctionType(ResType, ArgTypes, EPI); } static GVALinkage basicGVALinkageForFunction(const ASTContext &Context, const FunctionDecl *FD) { if (!FD->isExternallyVisible()) return GVA_Internal; // Non-user-provided functions get emitted as weak definitions with every // use, no matter whether they've been explicitly instantiated etc. if (const auto *MD = dyn_cast(FD)) if (!MD->isUserProvided()) return GVA_DiscardableODR; GVALinkage External; switch (FD->getTemplateSpecializationKind()) { case TSK_Undeclared: case TSK_ExplicitSpecialization: External = GVA_StrongExternal; break; case TSK_ExplicitInstantiationDefinition: return GVA_StrongODR; // C++11 [temp.explicit]p10: // [ Note: The intent is that an inline function that is the subject of // an explicit instantiation declaration will still be implicitly // instantiated when used so that the body can be considered for // inlining, but that no out-of-line copy of the inline function would be // generated in the translation unit. -- end note ] case TSK_ExplicitInstantiationDeclaration: return GVA_AvailableExternally; case TSK_ImplicitInstantiation: External = GVA_DiscardableODR; break; } if (!FD->isInlined()) return External; if ((!Context.getLangOpts().CPlusPlus && !Context.getTargetInfo().getCXXABI().isMicrosoft() && !FD->hasAttr()) || FD->hasAttr()) { // FIXME: This doesn't match gcc's behavior for dllexport inline functions. // GNU or C99 inline semantics. Determine whether this symbol should be // externally visible. if (FD->isInlineDefinitionExternallyVisible()) return External; // C99 inline semantics, where the symbol is not externally visible. return GVA_AvailableExternally; } // Functions specified with extern and inline in -fms-compatibility mode // forcibly get emitted. While the body of the function cannot be later // replaced, the function definition cannot be discarded. if (FD->isMSExternInline()) return GVA_StrongODR; return GVA_DiscardableODR; } static GVALinkage adjustGVALinkageForAttributes(const ASTContext &Context, const Decl *D, GVALinkage L) { // See http://msdn.microsoft.com/en-us/library/xa0d9ste.aspx // dllexport/dllimport on inline functions. if (D->hasAttr()) { if (L == GVA_DiscardableODR || L == GVA_StrongODR) return GVA_AvailableExternally; } else if (D->hasAttr()) { if (L == GVA_DiscardableODR) return GVA_StrongODR; } else if (Context.getLangOpts().CUDA && Context.getLangOpts().CUDAIsDevice && D->hasAttr()) { // Device-side functions with __global__ attribute must always be // visible externally so they can be launched from host. if (L == GVA_DiscardableODR || L == GVA_Internal) return GVA_StrongODR; } return L; } /// Adjust the GVALinkage for a declaration based on what an external AST source /// knows about whether there can be other definitions of this declaration. static GVALinkage adjustGVALinkageForExternalDefinitionKind(const ASTContext &Ctx, const Decl *D, GVALinkage L) { ExternalASTSource *Source = Ctx.getExternalSource(); if (!Source) return L; switch (Source->hasExternalDefinitions(D)) { case ExternalASTSource::EK_Never: // Other translation units rely on us to provide the definition. if (L == GVA_DiscardableODR) return GVA_StrongODR; break; case ExternalASTSource::EK_Always: return GVA_AvailableExternally; case ExternalASTSource::EK_ReplyHazy: break; } return L; } GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) const { return adjustGVALinkageForExternalDefinitionKind(*this, FD, adjustGVALinkageForAttributes(*this, FD, basicGVALinkageForFunction(*this, FD))); } static GVALinkage basicGVALinkageForVariable(const ASTContext &Context, const VarDecl *VD) { if (!VD->isExternallyVisible()) return GVA_Internal; if (VD->isStaticLocal()) { const DeclContext *LexicalContext = VD->getParentFunctionOrMethod(); while (LexicalContext && !isa(LexicalContext)) LexicalContext = LexicalContext->getLexicalParent(); // ObjC Blocks can create local variables that don't have a FunctionDecl // LexicalContext. if (!LexicalContext) return GVA_DiscardableODR; // Otherwise, let the static local variable inherit its linkage from the // nearest enclosing function. auto StaticLocalLinkage = Context.GetGVALinkageForFunction(cast(LexicalContext)); // Itanium ABI 5.2.2: "Each COMDAT group [for a static local variable] must // be emitted in any object with references to the symbol for the object it // contains, whether inline or out-of-line." // Similar behavior is observed with MSVC. An alternative ABI could use // StrongODR/AvailableExternally to match the function, but none are // known/supported currently. if (StaticLocalLinkage == GVA_StrongODR || StaticLocalLinkage == GVA_AvailableExternally) return GVA_DiscardableODR; return StaticLocalLinkage; } // MSVC treats in-class initialized static data members as definitions. // By giving them non-strong linkage, out-of-line definitions won't // cause link errors. if (Context.isMSStaticDataMemberInlineDefinition(VD)) return GVA_DiscardableODR; // Most non-template variables have strong linkage; inline variables are // linkonce_odr or (occasionally, for compatibility) weak_odr. GVALinkage StrongLinkage; switch (Context.getInlineVariableDefinitionKind(VD)) { case ASTContext::InlineVariableDefinitionKind::None: StrongLinkage = GVA_StrongExternal; break; case ASTContext::InlineVariableDefinitionKind::Weak: case ASTContext::InlineVariableDefinitionKind::WeakUnknown: StrongLinkage = GVA_DiscardableODR; break; case ASTContext::InlineVariableDefinitionKind::Strong: StrongLinkage = GVA_StrongODR; break; } switch (VD->getTemplateSpecializationKind()) { case TSK_Undeclared: return StrongLinkage; case TSK_ExplicitSpecialization: return Context.getTargetInfo().getCXXABI().isMicrosoft() && VD->isStaticDataMember() ? GVA_StrongODR : StrongLinkage; case TSK_ExplicitInstantiationDefinition: return GVA_StrongODR; case TSK_ExplicitInstantiationDeclaration: return GVA_AvailableExternally; case TSK_ImplicitInstantiation: return GVA_DiscardableODR; } llvm_unreachable("Invalid Linkage!"); } GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) { return adjustGVALinkageForExternalDefinitionKind(*this, VD, adjustGVALinkageForAttributes(*this, VD, basicGVALinkageForVariable(*this, VD))); } bool ASTContext::DeclMustBeEmitted(const Decl *D) { if (const auto *VD = dyn_cast(D)) { if (!VD->isFileVarDecl()) return false; // Global named register variables (GNU extension) are never emitted. if (VD->getStorageClass() == SC_Register) return false; if (VD->getDescribedVarTemplate() || isa(VD)) return false; } else if (const auto *FD = dyn_cast(D)) { // We never need to emit an uninstantiated function template. if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) return false; } else if (isa(D)) return true; else if (isa(D)) return true; else if (isa(D)) return true; else if (isa(D)) return !D->getDeclContext()->isDependentContext(); else if (isa(D)) return !D->getDeclContext()->isDependentContext(); else if (isa(D) || isa(D)) return !D->getDeclContext()->isDependentContext(); else if (isa(D)) return true; else return false; if (D->isFromASTFile() && !LangOpts.BuildingPCHWithObjectFile) { assert(getExternalSource() && "It's from an AST file; must have a source."); // On Windows, PCH files are built together with an object file. If this // declaration comes from such a PCH and DeclMustBeEmitted would return // true, it would have returned true and the decl would have been emitted // into that object file, so it doesn't need to be emitted here. // Note that decls are still emitted if they're referenced, as usual; // DeclMustBeEmitted is used to decide whether a decl must be emitted even // if it's not referenced. // // Explicit template instantiation definitions are tricky. If there was an // explicit template instantiation decl in the PCH before, it will look like // the definition comes from there, even if that was just the declaration. // (Explicit instantiation defs of variable templates always get emitted.) bool IsExpInstDef = isa(D) && cast(D)->getTemplateSpecializationKind() == TSK_ExplicitInstantiationDefinition; // Implicit member function definitions, such as operator= might not be // marked as template specializations, since they're not coming from a // template but synthesized directly on the class. IsExpInstDef |= isa(D) && cast(D)->getParent()->getTemplateSpecializationKind() == TSK_ExplicitInstantiationDefinition; if (getExternalSource()->DeclIsFromPCHWithObjectFile(D) && !IsExpInstDef) return false; } // If this is a member of a class template, we do not need to emit it. if (D->getDeclContext()->isDependentContext()) return false; // Weak references don't produce any output by themselves. if (D->hasAttr()) return false; // Aliases and used decls are required. if (D->hasAttr() || D->hasAttr()) return true; if (const auto *FD = dyn_cast(D)) { // Forward declarations aren't required. if (!FD->doesThisDeclarationHaveABody()) return FD->doesDeclarationForceExternallyVisibleDefinition(); // Constructors and destructors are required. if (FD->hasAttr() || FD->hasAttr()) return true; // The key function for a class is required. This rule only comes // into play when inline functions can be key functions, though. if (getTargetInfo().getCXXABI().canKeyFunctionBeInline()) { if (const auto *MD = dyn_cast(FD)) { const CXXRecordDecl *RD = MD->getParent(); if (MD->isOutOfLine() && RD->isDynamicClass()) { const CXXMethodDecl *KeyFunc = getCurrentKeyFunction(RD); if (KeyFunc && KeyFunc->getCanonicalDecl() == MD->getCanonicalDecl()) return true; } } } GVALinkage Linkage = GetGVALinkageForFunction(FD); // static, static inline, always_inline, and extern inline functions can // always be deferred. Normal inline functions can be deferred in C99/C++. // Implicit template instantiations can also be deferred in C++. return !isDiscardableGVALinkage(Linkage); } const auto *VD = cast(D); assert(VD->isFileVarDecl() && "Expected file scoped var"); // If the decl is marked as `declare target to`, it should be emitted for the // host and for the device. if (LangOpts.OpenMP && OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) return true; if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly && !isMSStaticDataMemberInlineDefinition(VD)) return false; // Variables that can be needed in other TUs are required. auto Linkage = GetGVALinkageForVariable(VD); if (!isDiscardableGVALinkage(Linkage)) return true; // We never need to emit a variable that is available in another TU. if (Linkage == GVA_AvailableExternally) return false; // Variables that have destruction with side-effects are required. if (VD->needsDestruction(*this)) return true; // Variables that have initialization with side-effects are required. if (VD->getInit() && VD->getInit()->HasSideEffects(*this) && // We can get a value-dependent initializer during error recovery. (VD->getInit()->isValueDependent() || !VD->evaluateValue())) return true; // Likewise, variables with tuple-like bindings are required if their // bindings have side-effects. if (const auto *DD = dyn_cast(VD)) for (const auto *BD : DD->bindings()) if (const auto *BindingVD = BD->getHoldingVar()) if (DeclMustBeEmitted(BindingVD)) return true; return false; } void ASTContext::forEachMultiversionedFunctionVersion( const FunctionDecl *FD, llvm::function_ref Pred) const { assert(FD->isMultiVersion() && "Only valid for multiversioned functions"); llvm::SmallDenseSet SeenDecls; FD = FD->getMostRecentDecl(); for (auto *CurDecl : FD->getDeclContext()->getRedeclContext()->lookup(FD->getDeclName())) { FunctionDecl *CurFD = CurDecl->getAsFunction()->getMostRecentDecl(); if (CurFD && hasSameType(CurFD->getType(), FD->getType()) && std::end(SeenDecls) == llvm::find(SeenDecls, CurFD)) { SeenDecls.insert(CurFD); Pred(CurFD); } } } CallingConv ASTContext::getDefaultCallingConvention(bool IsVariadic, bool IsCXXMethod, bool IsBuiltin) const { // Pass through to the C++ ABI object if (IsCXXMethod) return ABI->getDefaultMethodCallConv(IsVariadic); // Builtins ignore user-specified default calling convention and remain the // Target's default calling convention. if (!IsBuiltin) { switch (LangOpts.getDefaultCallingConv()) { case LangOptions::DCC_None: break; case LangOptions::DCC_CDecl: return CC_C; case LangOptions::DCC_FastCall: if (getTargetInfo().hasFeature("sse2") && !IsVariadic) return CC_X86FastCall; break; case LangOptions::DCC_StdCall: if (!IsVariadic) return CC_X86StdCall; break; case LangOptions::DCC_VectorCall: // __vectorcall cannot be applied to variadic functions. if (!IsVariadic) return CC_X86VectorCall; break; case LangOptions::DCC_RegCall: // __regcall cannot be applied to variadic functions. if (!IsVariadic) return CC_X86RegCall; break; } } return Target->getDefaultCallingConv(); } bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const { // Pass through to the C++ ABI object return ABI->isNearlyEmpty(RD); } VTableContextBase *ASTContext::getVTableContext() { if (!VTContext.get()) { auto ABI = Target->getCXXABI(); if (ABI.isMicrosoft()) VTContext.reset(new MicrosoftVTableContext(*this)); else { auto ComponentLayout = getLangOpts().RelativeCXXABIVTables ? ItaniumVTableContext::Relative : ItaniumVTableContext::Pointer; VTContext.reset(new ItaniumVTableContext(*this, ComponentLayout)); } } return VTContext.get(); } MangleContext *ASTContext::createMangleContext(const TargetInfo *T) { if (!T) T = Target; switch (T->getCXXABI().getKind()) { case TargetCXXABI::Fuchsia: case TargetCXXABI::GenericAArch64: case TargetCXXABI::GenericItanium: case TargetCXXABI::GenericARM: case TargetCXXABI::GenericMIPS: case TargetCXXABI::iOS: case TargetCXXABI::iOS64: case TargetCXXABI::WebAssembly: case TargetCXXABI::WatchOS: case TargetCXXABI::XL: return ItaniumMangleContext::create(*this, getDiagnostics()); case TargetCXXABI::Microsoft: return MicrosoftMangleContext::create(*this, getDiagnostics()); } llvm_unreachable("Unsupported ABI"); } CXXABI::~CXXABI() = default; size_t ASTContext::getSideTableAllocatedMemory() const { return ASTRecordLayouts.getMemorySize() + llvm::capacity_in_bytes(ObjCLayouts) + llvm::capacity_in_bytes(KeyFunctions) + llvm::capacity_in_bytes(ObjCImpls) + llvm::capacity_in_bytes(BlockVarCopyInits) + llvm::capacity_in_bytes(DeclAttrs) + llvm::capacity_in_bytes(TemplateOrInstantiation) + llvm::capacity_in_bytes(InstantiatedFromUsingDecl) + llvm::capacity_in_bytes(InstantiatedFromUsingShadowDecl) + llvm::capacity_in_bytes(InstantiatedFromUnnamedFieldDecl) + llvm::capacity_in_bytes(OverriddenMethods) + llvm::capacity_in_bytes(Types) + llvm::capacity_in_bytes(VariableArrayTypes); } /// getIntTypeForBitwidth - /// sets integer QualTy according to specified details: /// bitwidth, signed/unsigned. /// Returns empty type if there is no appropriate target types. QualType ASTContext::getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const { TargetInfo::IntType Ty = getTargetInfo().getIntTypeByWidth(DestWidth, Signed); CanQualType QualTy = getFromTargetType(Ty); if (!QualTy && DestWidth == 128) return Signed ? Int128Ty : UnsignedInt128Ty; return QualTy; } /// getRealTypeForBitwidth - /// sets floating point QualTy according to specified bitwidth. /// Returns empty type if there is no appropriate target types. QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth, bool ExplicitIEEE) const { TargetInfo::RealType Ty = getTargetInfo().getRealTypeByWidth(DestWidth, ExplicitIEEE); switch (Ty) { case TargetInfo::Float: return FloatTy; case TargetInfo::Double: return DoubleTy; case TargetInfo::LongDouble: return LongDoubleTy; case TargetInfo::Float128: return Float128Ty; case TargetInfo::NoFloat: return {}; } llvm_unreachable("Unhandled TargetInfo::RealType value"); } void ASTContext::setManglingNumber(const NamedDecl *ND, unsigned Number) { if (Number > 1) MangleNumbers[ND] = Number; } unsigned ASTContext::getManglingNumber(const NamedDecl *ND) const { auto I = MangleNumbers.find(ND); return I != MangleNumbers.end() ? I->second : 1; } void ASTContext::setStaticLocalNumber(const VarDecl *VD, unsigned Number) { if (Number > 1) StaticLocalNumbers[VD] = Number; } unsigned ASTContext::getStaticLocalNumber(const VarDecl *VD) const { auto I = StaticLocalNumbers.find(VD); return I != StaticLocalNumbers.end() ? I->second : 1; } MangleNumberingContext & ASTContext::getManglingNumberContext(const DeclContext *DC) { assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C. std::unique_ptr &MCtx = MangleNumberingContexts[DC]; if (!MCtx) MCtx = createMangleNumberingContext(); return *MCtx; } MangleNumberingContext & ASTContext::getManglingNumberContext(NeedExtraManglingDecl_t, const Decl *D) { assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C. std::unique_ptr &MCtx = ExtraMangleNumberingContexts[D]; if (!MCtx) MCtx = createMangleNumberingContext(); return *MCtx; } std::unique_ptr ASTContext::createMangleNumberingContext() const { return ABI->createMangleNumberingContext(); } const CXXConstructorDecl * ASTContext::getCopyConstructorForExceptionObject(CXXRecordDecl *RD) { return ABI->getCopyConstructorForExceptionObject( cast(RD->getFirstDecl())); } void ASTContext::addCopyConstructorForExceptionObject(CXXRecordDecl *RD, CXXConstructorDecl *CD) { return ABI->addCopyConstructorForExceptionObject( cast(RD->getFirstDecl()), cast(CD->getFirstDecl())); } void ASTContext::addTypedefNameForUnnamedTagDecl(TagDecl *TD, TypedefNameDecl *DD) { return ABI->addTypedefNameForUnnamedTagDecl(TD, DD); } TypedefNameDecl * ASTContext::getTypedefNameForUnnamedTagDecl(const TagDecl *TD) { return ABI->getTypedefNameForUnnamedTagDecl(TD); } void ASTContext::addDeclaratorForUnnamedTagDecl(TagDecl *TD, DeclaratorDecl *DD) { return ABI->addDeclaratorForUnnamedTagDecl(TD, DD); } DeclaratorDecl *ASTContext::getDeclaratorForUnnamedTagDecl(const TagDecl *TD) { return ABI->getDeclaratorForUnnamedTagDecl(TD); } void ASTContext::setParameterIndex(const ParmVarDecl *D, unsigned int index) { ParamIndices[D] = index; } unsigned ASTContext::getParameterIndex(const ParmVarDecl *D) const { ParameterIndexTable::const_iterator I = ParamIndices.find(D); assert(I != ParamIndices.end() && "ParmIndices lacks entry set by ParmVarDecl"); return I->second; } QualType ASTContext::getStringLiteralArrayType(QualType EltTy, unsigned Length) const { // A C++ string literal has a const-qualified element type (C++ 2.13.4p1). if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings) EltTy = EltTy.withConst(); EltTy = adjustStringLiteralBaseType(EltTy); // Get an array type for the string, according to C99 6.4.5. This includes // the null terminator character. return getConstantArrayType(EltTy, llvm::APInt(32, Length + 1), nullptr, ArrayType::Normal, /*IndexTypeQuals*/ 0); } StringLiteral * ASTContext::getPredefinedStringLiteralFromCache(StringRef Key) const { StringLiteral *&Result = StringLiteralCache[Key]; if (!Result) Result = StringLiteral::Create( *this, Key, StringLiteral::Ascii, /*Pascal*/ false, getStringLiteralArrayType(CharTy, Key.size()), SourceLocation()); return Result; } MSGuidDecl * ASTContext::getMSGuidDecl(MSGuidDecl::Parts Parts) const { assert(MSGuidTagDecl && "building MS GUID without MS extensions?"); llvm::FoldingSetNodeID ID; MSGuidDecl::Profile(ID, Parts); void *InsertPos; if (MSGuidDecl *Existing = MSGuidDecls.FindNodeOrInsertPos(ID, InsertPos)) return Existing; QualType GUIDType = getMSGuidType().withConst(); MSGuidDecl *New = MSGuidDecl::Create(*this, GUIDType, Parts); MSGuidDecls.InsertNode(New, InsertPos); return New; } bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const { const llvm::Triple &T = getTargetInfo().getTriple(); if (!T.isOSDarwin()) return false; if (!(T.isiOS() && T.isOSVersionLT(7)) && !(T.isMacOSX() && T.isOSVersionLT(10, 9))) return false; QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); CharUnits sizeChars = getTypeSizeInChars(AtomicTy); uint64_t Size = sizeChars.getQuantity(); CharUnits alignChars = getTypeAlignInChars(AtomicTy); unsigned Align = alignChars.getQuantity(); unsigned MaxInlineWidthInBits = getTargetInfo().getMaxAtomicInlineWidth(); return (Size != Align || toBits(sizeChars) > MaxInlineWidthInBits); } bool ASTContext::ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl, const ObjCMethodDecl *MethodImpl) { // No point trying to match an unavailable/deprecated mothod. if (MethodDecl->hasAttr() || MethodDecl->hasAttr()) return false; if (MethodDecl->getObjCDeclQualifier() != MethodImpl->getObjCDeclQualifier()) return false; if (!hasSameType(MethodDecl->getReturnType(), MethodImpl->getReturnType())) return false; if (MethodDecl->param_size() != MethodImpl->param_size()) return false; for (ObjCMethodDecl::param_const_iterator IM = MethodImpl->param_begin(), IF = MethodDecl->param_begin(), EM = MethodImpl->param_end(), EF = MethodDecl->param_end(); IM != EM && IF != EF; ++IM, ++IF) { const ParmVarDecl *DeclVar = (*IF); const ParmVarDecl *ImplVar = (*IM); if (ImplVar->getObjCDeclQualifier() != DeclVar->getObjCDeclQualifier()) return false; if (!hasSameType(DeclVar->getType(), ImplVar->getType())) return false; } return (MethodDecl->isVariadic() == MethodImpl->isVariadic()); } uint64_t ASTContext::getTargetNullPointerValue(QualType QT) const { LangAS AS; if (QT->getUnqualifiedDesugaredType()->isNullPtrType()) AS = LangAS::Default; else AS = QT->getPointeeType().getAddressSpace(); return getTargetInfo().getNullPointerValue(AS); } unsigned ASTContext::getTargetAddressSpace(LangAS AS) const { if (isTargetAddressSpace(AS)) return toTargetAddressSpace(AS); else return (*AddrSpaceMap)[(unsigned)AS]; } QualType ASTContext::getCorrespondingSaturatedType(QualType Ty) const { assert(Ty->isFixedPointType()); if (Ty->isSaturatedFixedPointType()) return Ty; switch (Ty->castAs()->getKind()) { default: llvm_unreachable("Not a fixed point type!"); case BuiltinType::ShortAccum: return SatShortAccumTy; case BuiltinType::Accum: return SatAccumTy; case BuiltinType::LongAccum: return SatLongAccumTy; case BuiltinType::UShortAccum: return SatUnsignedShortAccumTy; case BuiltinType::UAccum: return SatUnsignedAccumTy; case BuiltinType::ULongAccum: return SatUnsignedLongAccumTy; case BuiltinType::ShortFract: return SatShortFractTy; case BuiltinType::Fract: return SatFractTy; case BuiltinType::LongFract: return SatLongFractTy; case BuiltinType::UShortFract: return SatUnsignedShortFractTy; case BuiltinType::UFract: return SatUnsignedFractTy; case BuiltinType::ULongFract: return SatUnsignedLongFractTy; } } LangAS ASTContext::getLangASForBuiltinAddressSpace(unsigned AS) const { if (LangOpts.OpenCL) return getTargetInfo().getOpenCLBuiltinAddressSpace(AS); if (LangOpts.CUDA) return getTargetInfo().getCUDABuiltinAddressSpace(AS); return getLangASFromTargetAS(AS); } // Explicitly instantiate this in case a Redeclarable is used from a TU that // doesn't include ASTContext.h template clang::LazyGenerationalUpdatePtr< const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::ValueType clang::LazyGenerationalUpdatePtr< const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::makeValue( const clang::ASTContext &Ctx, Decl *Value); unsigned char ASTContext::getFixedPointScale(QualType Ty) const { assert(Ty->isFixedPointType()); const TargetInfo &Target = getTargetInfo(); switch (Ty->castAs()->getKind()) { default: llvm_unreachable("Not a fixed point type!"); case BuiltinType::ShortAccum: case BuiltinType::SatShortAccum: return Target.getShortAccumScale(); case BuiltinType::Accum: case BuiltinType::SatAccum: return Target.getAccumScale(); case BuiltinType::LongAccum: case BuiltinType::SatLongAccum: return Target.getLongAccumScale(); case BuiltinType::UShortAccum: case BuiltinType::SatUShortAccum: return Target.getUnsignedShortAccumScale(); case BuiltinType::UAccum: case BuiltinType::SatUAccum: return Target.getUnsignedAccumScale(); case BuiltinType::ULongAccum: case BuiltinType::SatULongAccum: return Target.getUnsignedLongAccumScale(); case BuiltinType::ShortFract: case BuiltinType::SatShortFract: return Target.getShortFractScale(); case BuiltinType::Fract: case BuiltinType::SatFract: return Target.getFractScale(); case BuiltinType::LongFract: case BuiltinType::SatLongFract: return Target.getLongFractScale(); case BuiltinType::UShortFract: case BuiltinType::SatUShortFract: return Target.getUnsignedShortFractScale(); case BuiltinType::UFract: case BuiltinType::SatUFract: return Target.getUnsignedFractScale(); case BuiltinType::ULongFract: case BuiltinType::SatULongFract: return Target.getUnsignedLongFractScale(); } } unsigned char ASTContext::getFixedPointIBits(QualType Ty) const { assert(Ty->isFixedPointType()); const TargetInfo &Target = getTargetInfo(); switch (Ty->castAs()->getKind()) { default: llvm_unreachable("Not a fixed point type!"); case BuiltinType::ShortAccum: case BuiltinType::SatShortAccum: return Target.getShortAccumIBits(); case BuiltinType::Accum: case BuiltinType::SatAccum: return Target.getAccumIBits(); case BuiltinType::LongAccum: case BuiltinType::SatLongAccum: return Target.getLongAccumIBits(); case BuiltinType::UShortAccum: case BuiltinType::SatUShortAccum: return Target.getUnsignedShortAccumIBits(); case BuiltinType::UAccum: case BuiltinType::SatUAccum: return Target.getUnsignedAccumIBits(); case BuiltinType::ULongAccum: case BuiltinType::SatULongAccum: return Target.getUnsignedLongAccumIBits(); case BuiltinType::ShortFract: case BuiltinType::SatShortFract: case BuiltinType::Fract: case BuiltinType::SatFract: case BuiltinType::LongFract: case BuiltinType::SatLongFract: case BuiltinType::UShortFract: case BuiltinType::SatUShortFract: case BuiltinType::UFract: case BuiltinType::SatUFract: case BuiltinType::ULongFract: case BuiltinType::SatULongFract: return 0; } } FixedPointSemantics ASTContext::getFixedPointSemantics(QualType Ty) const { assert((Ty->isFixedPointType() || Ty->isIntegerType()) && "Can only get the fixed point semantics for a " "fixed point or integer type."); if (Ty->isIntegerType()) return FixedPointSemantics::GetIntegerSemantics(getIntWidth(Ty), Ty->isSignedIntegerType()); bool isSigned = Ty->isSignedFixedPointType(); return FixedPointSemantics( static_cast(getTypeSize(Ty)), getFixedPointScale(Ty), isSigned, Ty->isSaturatedFixedPointType(), !isSigned && getTargetInfo().doUnsignedFixedPointTypesHavePadding()); } APFixedPoint ASTContext::getFixedPointMax(QualType Ty) const { assert(Ty->isFixedPointType()); return APFixedPoint::getMax(getFixedPointSemantics(Ty)); } APFixedPoint ASTContext::getFixedPointMin(QualType Ty) const { assert(Ty->isFixedPointType()); return APFixedPoint::getMin(getFixedPointSemantics(Ty)); } QualType ASTContext::getCorrespondingSignedFixedPointType(QualType Ty) const { assert(Ty->isUnsignedFixedPointType() && "Expected unsigned fixed point type"); switch (Ty->castAs()->getKind()) { case BuiltinType::UShortAccum: return ShortAccumTy; case BuiltinType::UAccum: return AccumTy; case BuiltinType::ULongAccum: return LongAccumTy; case BuiltinType::SatUShortAccum: return SatShortAccumTy; case BuiltinType::SatUAccum: return SatAccumTy; case BuiltinType::SatULongAccum: return SatLongAccumTy; case BuiltinType::UShortFract: return ShortFractTy; case BuiltinType::UFract: return FractTy; case BuiltinType::ULongFract: return LongFractTy; case BuiltinType::SatUShortFract: return SatShortFractTy; case BuiltinType::SatUFract: return SatFractTy; case BuiltinType::SatULongFract: return SatLongFractTy; default: llvm_unreachable("Unexpected unsigned fixed point type"); } } ParsedTargetAttr ASTContext::filterFunctionTargetAttrs(const TargetAttr *TD) const { assert(TD != nullptr); ParsedTargetAttr ParsedAttr = TD->parse(); ParsedAttr.Features.erase( llvm::remove_if(ParsedAttr.Features, [&](const std::string &Feat) { return !Target->isValidFeatureName( StringRef{Feat}.substr(1)); }), ParsedAttr.Features.end()); return ParsedAttr; } void ASTContext::getFunctionFeatureMap(llvm::StringMap &FeatureMap, const FunctionDecl *FD) const { if (FD) getFunctionFeatureMap(FeatureMap, GlobalDecl().getWithDecl(FD)); else Target->initFeatureMap(FeatureMap, getDiagnostics(), Target->getTargetOpts().CPU, Target->getTargetOpts().Features); } // Fills in the supplied string map with the set of target features for the // passed in function. void ASTContext::getFunctionFeatureMap(llvm::StringMap &FeatureMap, GlobalDecl GD) const { StringRef TargetCPU = Target->getTargetOpts().CPU; const FunctionDecl *FD = GD.getDecl()->getAsFunction(); if (const auto *TD = FD->getAttr()) { ParsedTargetAttr ParsedAttr = filterFunctionTargetAttrs(TD); // Make a copy of the features as passed on the command line into the // beginning of the additional features from the function to override. ParsedAttr.Features.insert( ParsedAttr.Features.begin(), Target->getTargetOpts().FeaturesAsWritten.begin(), Target->getTargetOpts().FeaturesAsWritten.end()); if (ParsedAttr.Architecture != "" && Target->isValidCPUName(ParsedAttr.Architecture)) TargetCPU = ParsedAttr.Architecture; // Now populate the feature map, first with the TargetCPU which is either // the default or a new one from the target attribute string. Then we'll use // the passed in features (FeaturesAsWritten) along with the new ones from // the attribute. Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, ParsedAttr.Features); } else if (const auto *SD = FD->getAttr()) { llvm::SmallVector FeaturesTmp; Target->getCPUSpecificCPUDispatchFeatures( SD->getCPUName(GD.getMultiVersionIndex())->getName(), FeaturesTmp); std::vector Features(FeaturesTmp.begin(), FeaturesTmp.end()); Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); } else { FeatureMap = Target->getTargetOpts().FeatureMap; } } OMPTraitInfo &ASTContext::getNewOMPTraitInfo() { OMPTraitInfoVector.emplace_back(new OMPTraitInfo()); return *OMPTraitInfoVector.back(); } const DiagnosticBuilder & clang::operator<<(const DiagnosticBuilder &DB, const ASTContext::SectionInfo &Section) { if (Section.Decl) return DB << Section.Decl; return DB << "a prior #pragma section"; } diff --git a/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp b/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp index 3779e0cb872b..e0a186307e93 100644 --- a/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp +++ b/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp @@ -1,9002 +1,9003 @@ //===- ASTImporter.cpp - Importing ASTs from other Contexts ---------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the ASTImporter class which imports AST nodes from one // context into another context. // //===----------------------------------------------------------------------===// #include "clang/AST/ASTImporter.h" #include "clang/AST/ASTImporterSharedState.h" #include "clang/AST/ASTContext.h" #include "clang/AST/ASTDiagnostic.h" #include "clang/AST/ASTStructuralEquivalence.h" #include "clang/AST/Attr.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclAccessPair.h" #include "clang/AST/DeclBase.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclFriend.h" #include "clang/AST/DeclGroup.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclVisitor.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LambdaCapture.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/OperationKinds.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtObjC.h" #include "clang/AST/StmtVisitor.h" #include "clang/AST/TemplateBase.h" #include "clang/AST/TemplateName.h" #include "clang/AST/Type.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeVisitor.h" #include "clang/AST/UnresolvedSet.h" #include "clang/Basic/Builtins.h" #include "clang/Basic/ExceptionSpecificationType.h" #include "clang/Basic/FileManager.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/LangOptions.h" #include "clang/Basic/SourceLocation.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/Specifiers.h" #include "llvm/ADT/APSInt.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/None.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/ScopeExit.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MemoryBuffer.h" #include #include #include #include #include #include namespace clang { using llvm::make_error; using llvm::Error; using llvm::Expected; using ExpectedType = llvm::Expected; using ExpectedStmt = llvm::Expected; using ExpectedExpr = llvm::Expected; using ExpectedDecl = llvm::Expected; using ExpectedSLoc = llvm::Expected; using ExpectedName = llvm::Expected; std::string ImportError::toString() const { // FIXME: Improve error texts. switch (Error) { case NameConflict: return "NameConflict"; case UnsupportedConstruct: return "UnsupportedConstruct"; case Unknown: return "Unknown error"; } llvm_unreachable("Invalid error code."); return "Invalid error code."; } void ImportError::log(raw_ostream &OS) const { OS << toString(); } std::error_code ImportError::convertToErrorCode() const { llvm_unreachable("Function not implemented."); } char ImportError::ID; template SmallVector getCanonicalForwardRedeclChain(Redeclarable* D) { SmallVector Redecls; for (auto *R : D->getFirstDecl()->redecls()) { if (R != D->getFirstDecl()) Redecls.push_back(R); } Redecls.push_back(D->getFirstDecl()); std::reverse(Redecls.begin(), Redecls.end()); return Redecls; } SmallVector getCanonicalForwardRedeclChain(Decl* D) { if (auto *FD = dyn_cast(D)) return getCanonicalForwardRedeclChain(FD); if (auto *VD = dyn_cast(D)) return getCanonicalForwardRedeclChain(VD); if (auto *TD = dyn_cast(D)) return getCanonicalForwardRedeclChain(TD); llvm_unreachable("Bad declaration kind"); } void updateFlags(const Decl *From, Decl *To) { // Check if some flags or attrs are new in 'From' and copy into 'To'. // FIXME: Other flags or attrs? if (From->isUsed(false) && !To->isUsed(false)) To->setIsUsed(); } class ASTNodeImporter : public TypeVisitor, public DeclVisitor, public StmtVisitor { ASTImporter &Importer; // Use this instead of Importer.importInto . template LLVM_NODISCARD Error importInto(ImportT &To, const ImportT &From) { return Importer.importInto(To, From); } // Use this to import pointers of specific type. template LLVM_NODISCARD Error importInto(ImportT *&To, ImportT *From) { auto ToOrErr = Importer.Import(From); if (ToOrErr) To = cast_or_null(*ToOrErr); return ToOrErr.takeError(); } // Call the import function of ASTImporter for a baseclass of type `T` and // cast the return value to `T`. template Expected import(T *From) { auto ToOrErr = Importer.Import(From); if (!ToOrErr) return ToOrErr.takeError(); return cast_or_null(*ToOrErr); } template Expected import(const T *From) { return import(const_cast(From)); } // Call the import function of ASTImporter for type `T`. template Expected import(const T &From) { return Importer.Import(From); } // Import an Optional by importing the contained T, if any. template Expected> import(Optional From) { if (!From) return Optional(); return import(*From); } // Helper for chaining together multiple imports. If an error is detected, // subsequent imports will return default constructed nodes, so that failure // can be detected with a single conditional branch after a sequence of // imports. template T importChecked(Error &Err, const T &From) { // Don't attempt to import nodes if we hit an error earlier. if (Err) return T{}; Expected MaybeVal = import(From); if (!MaybeVal) { Err = MaybeVal.takeError(); return T{}; } return *MaybeVal; } // Wrapper for an overload set. template struct CallOverloadedCreateFun { template decltype(auto) operator()(Args &&... args) { return ToDeclT::Create(std::forward(args)...); } }; // Always use these functions to create a Decl during import. There are // certain tasks which must be done after the Decl was created, e.g. we // must immediately register that as an imported Decl. The parameter `ToD` // will be set to the newly created Decl or if had been imported before // then to the already imported Decl. Returns a bool value set to true if // the `FromD` had been imported before. template LLVM_NODISCARD bool GetImportedOrCreateDecl(ToDeclT *&ToD, FromDeclT *FromD, Args &&... args) { // There may be several overloads of ToDeclT::Create. We must make sure // to call the one which would be chosen by the arguments, thus we use a // wrapper for the overload set. CallOverloadedCreateFun OC; return GetImportedOrCreateSpecialDecl(ToD, OC, FromD, std::forward(args)...); } // Use this overload if a special Type is needed to be created. E.g if we // want to create a `TypeAliasDecl` and assign that to a `TypedefNameDecl` // then: // TypedefNameDecl *ToTypedef; // GetImportedOrCreateDecl(ToTypedef, FromD, ...); template LLVM_NODISCARD bool GetImportedOrCreateDecl(ToDeclT *&ToD, FromDeclT *FromD, Args &&... args) { CallOverloadedCreateFun OC; return GetImportedOrCreateSpecialDecl(ToD, OC, FromD, std::forward(args)...); } // Use this version if a special create function must be // used, e.g. CXXRecordDecl::CreateLambda . template LLVM_NODISCARD bool GetImportedOrCreateSpecialDecl(ToDeclT *&ToD, CreateFunT CreateFun, FromDeclT *FromD, Args &&... args) { if (Importer.getImportDeclErrorIfAny(FromD)) { ToD = nullptr; return true; // Already imported but with error. } ToD = cast_or_null(Importer.GetAlreadyImportedOrNull(FromD)); if (ToD) return true; // Already imported. ToD = CreateFun(std::forward(args)...); // Keep track of imported Decls. Importer.RegisterImportedDecl(FromD, ToD); InitializeImportedDecl(FromD, ToD); return false; // A new Decl is created. } void InitializeImportedDecl(Decl *FromD, Decl *ToD) { ToD->IdentifierNamespace = FromD->IdentifierNamespace; if (FromD->hasAttrs()) for (const Attr *FromAttr : FromD->getAttrs()) { // FIXME: Return of the error here is not possible until store of // import errors is implemented. auto ToAttrOrErr = import(FromAttr); if (ToAttrOrErr) ToD->addAttr(*ToAttrOrErr); else llvm::consumeError(ToAttrOrErr.takeError()); } if (FromD->isUsed()) ToD->setIsUsed(); if (FromD->isImplicit()) ToD->setImplicit(); } // Check if we have found an existing definition. Returns with that // definition if yes, otherwise returns null. Decl *FindAndMapDefinition(FunctionDecl *D, FunctionDecl *FoundFunction) { const FunctionDecl *Definition = nullptr; if (D->doesThisDeclarationHaveABody() && FoundFunction->hasBody(Definition)) return Importer.MapImported(D, const_cast(Definition)); return nullptr; } void addDeclToContexts(Decl *FromD, Decl *ToD) { if (Importer.isMinimalImport()) { // In minimal import case the decl must be added even if it is not // contained in original context, for LLDB compatibility. // FIXME: Check if a better solution is possible. if (!FromD->getDescribedTemplate() && FromD->getFriendObjectKind() == Decl::FOK_None) ToD->getLexicalDeclContext()->addDeclInternal(ToD); return; } DeclContext *FromDC = FromD->getDeclContext(); DeclContext *FromLexicalDC = FromD->getLexicalDeclContext(); DeclContext *ToDC = ToD->getDeclContext(); DeclContext *ToLexicalDC = ToD->getLexicalDeclContext(); bool Visible = false; if (FromDC->containsDeclAndLoad(FromD)) { ToDC->addDeclInternal(ToD); Visible = true; } if (ToDC != ToLexicalDC && FromLexicalDC->containsDeclAndLoad(FromD)) { ToLexicalDC->addDeclInternal(ToD); Visible = true; } // If the Decl was added to any context, it was made already visible. // Otherwise it is still possible that it should be visible. if (!Visible) { if (auto *FromNamed = dyn_cast(FromD)) { auto *ToNamed = cast(ToD); DeclContextLookupResult FromLookup = FromDC->lookup(FromNamed->getDeclName()); for (NamedDecl *ND : FromLookup) if (ND == FromNamed) { ToDC->makeDeclVisibleInContext(ToNamed); break; } } } } public: explicit ASTNodeImporter(ASTImporter &Importer) : Importer(Importer) {} using TypeVisitor::Visit; using DeclVisitor::Visit; using StmtVisitor::Visit; // Importing types ExpectedType VisitType(const Type *T); ExpectedType VisitAtomicType(const AtomicType *T); ExpectedType VisitBuiltinType(const BuiltinType *T); ExpectedType VisitDecayedType(const DecayedType *T); ExpectedType VisitComplexType(const ComplexType *T); ExpectedType VisitPointerType(const PointerType *T); ExpectedType VisitBlockPointerType(const BlockPointerType *T); ExpectedType VisitLValueReferenceType(const LValueReferenceType *T); ExpectedType VisitRValueReferenceType(const RValueReferenceType *T); ExpectedType VisitMemberPointerType(const MemberPointerType *T); ExpectedType VisitConstantArrayType(const ConstantArrayType *T); ExpectedType VisitIncompleteArrayType(const IncompleteArrayType *T); ExpectedType VisitVariableArrayType(const VariableArrayType *T); ExpectedType VisitDependentSizedArrayType(const DependentSizedArrayType *T); // FIXME: DependentSizedExtVectorType ExpectedType VisitVectorType(const VectorType *T); ExpectedType VisitExtVectorType(const ExtVectorType *T); ExpectedType VisitFunctionNoProtoType(const FunctionNoProtoType *T); ExpectedType VisitFunctionProtoType(const FunctionProtoType *T); ExpectedType VisitUnresolvedUsingType(const UnresolvedUsingType *T); ExpectedType VisitParenType(const ParenType *T); ExpectedType VisitTypedefType(const TypedefType *T); ExpectedType VisitTypeOfExprType(const TypeOfExprType *T); // FIXME: DependentTypeOfExprType ExpectedType VisitTypeOfType(const TypeOfType *T); ExpectedType VisitDecltypeType(const DecltypeType *T); ExpectedType VisitUnaryTransformType(const UnaryTransformType *T); ExpectedType VisitAutoType(const AutoType *T); ExpectedType VisitInjectedClassNameType(const InjectedClassNameType *T); // FIXME: DependentDecltypeType ExpectedType VisitRecordType(const RecordType *T); ExpectedType VisitEnumType(const EnumType *T); ExpectedType VisitAttributedType(const AttributedType *T); ExpectedType VisitTemplateTypeParmType(const TemplateTypeParmType *T); ExpectedType VisitSubstTemplateTypeParmType( const SubstTemplateTypeParmType *T); ExpectedType VisitTemplateSpecializationType( const TemplateSpecializationType *T); ExpectedType VisitElaboratedType(const ElaboratedType *T); ExpectedType VisitDependentNameType(const DependentNameType *T); ExpectedType VisitPackExpansionType(const PackExpansionType *T); ExpectedType VisitDependentTemplateSpecializationType( const DependentTemplateSpecializationType *T); ExpectedType VisitObjCInterfaceType(const ObjCInterfaceType *T); ExpectedType VisitObjCObjectType(const ObjCObjectType *T); ExpectedType VisitObjCObjectPointerType(const ObjCObjectPointerType *T); // Importing declarations Error ImportDeclParts( NamedDecl *D, DeclContext *&DC, DeclContext *&LexicalDC, DeclarationName &Name, NamedDecl *&ToD, SourceLocation &Loc); Error ImportDefinitionIfNeeded(Decl *FromD, Decl *ToD = nullptr); Error ImportDeclarationNameLoc( const DeclarationNameInfo &From, DeclarationNameInfo &To); Error ImportDeclContext(DeclContext *FromDC, bool ForceImport = false); Error ImportDeclContext( Decl *From, DeclContext *&ToDC, DeclContext *&ToLexicalDC); Error ImportImplicitMethods(const CXXRecordDecl *From, CXXRecordDecl *To); Expected ImportCastPath(CastExpr *E); using Designator = DesignatedInitExpr::Designator; /// What we should import from the definition. enum ImportDefinitionKind { /// Import the default subset of the definition, which might be /// nothing (if minimal import is set) or might be everything (if minimal /// import is not set). IDK_Default, /// Import everything. IDK_Everything, /// Import only the bare bones needed to establish a valid /// DeclContext. IDK_Basic }; bool shouldForceImportDeclContext(ImportDefinitionKind IDK) { return IDK == IDK_Everything || (IDK == IDK_Default && !Importer.isMinimalImport()); } Error ImportInitializer(VarDecl *From, VarDecl *To); Error ImportDefinition( RecordDecl *From, RecordDecl *To, ImportDefinitionKind Kind = IDK_Default); Error ImportDefinition( EnumDecl *From, EnumDecl *To, ImportDefinitionKind Kind = IDK_Default); Error ImportDefinition( ObjCInterfaceDecl *From, ObjCInterfaceDecl *To, ImportDefinitionKind Kind = IDK_Default); Error ImportDefinition( ObjCProtocolDecl *From, ObjCProtocolDecl *To, ImportDefinitionKind Kind = IDK_Default); Error ImportTemplateArguments( const TemplateArgument *FromArgs, unsigned NumFromArgs, SmallVectorImpl &ToArgs); Expected ImportTemplateArgument(const TemplateArgument &From); template Error ImportTemplateArgumentListInfo( const InContainerTy &Container, TemplateArgumentListInfo &ToTAInfo); template Error ImportTemplateArgumentListInfo( SourceLocation FromLAngleLoc, SourceLocation FromRAngleLoc, const InContainerTy &Container, TemplateArgumentListInfo &Result); using TemplateArgsTy = SmallVector; using FunctionTemplateAndArgsTy = std::tuple; Expected ImportFunctionTemplateWithTemplateArgsFromSpecialization( FunctionDecl *FromFD); Error ImportTemplateParameterLists(const DeclaratorDecl *FromD, DeclaratorDecl *ToD); Error ImportTemplateInformation(FunctionDecl *FromFD, FunctionDecl *ToFD); Error ImportFunctionDeclBody(FunctionDecl *FromFD, FunctionDecl *ToFD); Error ImportDefaultArgOfParmVarDecl(const ParmVarDecl *FromParam, ParmVarDecl *ToParam); template bool hasSameVisibilityContextAndLinkage(T *Found, T *From); bool IsStructuralMatch(Decl *From, Decl *To, bool Complain); bool IsStructuralMatch(RecordDecl *FromRecord, RecordDecl *ToRecord, bool Complain = true); bool IsStructuralMatch(VarDecl *FromVar, VarDecl *ToVar, bool Complain = true); bool IsStructuralMatch(EnumDecl *FromEnum, EnumDecl *ToRecord); bool IsStructuralMatch(EnumConstantDecl *FromEC, EnumConstantDecl *ToEC); bool IsStructuralMatch(FunctionTemplateDecl *From, FunctionTemplateDecl *To); bool IsStructuralMatch(FunctionDecl *From, FunctionDecl *To); bool IsStructuralMatch(ClassTemplateDecl *From, ClassTemplateDecl *To); bool IsStructuralMatch(VarTemplateDecl *From, VarTemplateDecl *To); ExpectedDecl VisitDecl(Decl *D); ExpectedDecl VisitImportDecl(ImportDecl *D); ExpectedDecl VisitEmptyDecl(EmptyDecl *D); ExpectedDecl VisitAccessSpecDecl(AccessSpecDecl *D); ExpectedDecl VisitStaticAssertDecl(StaticAssertDecl *D); ExpectedDecl VisitTranslationUnitDecl(TranslationUnitDecl *D); ExpectedDecl VisitNamespaceDecl(NamespaceDecl *D); ExpectedDecl VisitNamespaceAliasDecl(NamespaceAliasDecl *D); ExpectedDecl VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias); ExpectedDecl VisitTypedefDecl(TypedefDecl *D); ExpectedDecl VisitTypeAliasDecl(TypeAliasDecl *D); ExpectedDecl VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D); ExpectedDecl VisitLabelDecl(LabelDecl *D); ExpectedDecl VisitEnumDecl(EnumDecl *D); ExpectedDecl VisitRecordDecl(RecordDecl *D); ExpectedDecl VisitEnumConstantDecl(EnumConstantDecl *D); ExpectedDecl VisitFunctionDecl(FunctionDecl *D); ExpectedDecl VisitCXXMethodDecl(CXXMethodDecl *D); ExpectedDecl VisitCXXConstructorDecl(CXXConstructorDecl *D); ExpectedDecl VisitCXXDestructorDecl(CXXDestructorDecl *D); ExpectedDecl VisitCXXConversionDecl(CXXConversionDecl *D); ExpectedDecl VisitFieldDecl(FieldDecl *D); ExpectedDecl VisitIndirectFieldDecl(IndirectFieldDecl *D); ExpectedDecl VisitFriendDecl(FriendDecl *D); ExpectedDecl VisitObjCIvarDecl(ObjCIvarDecl *D); ExpectedDecl VisitVarDecl(VarDecl *D); ExpectedDecl VisitImplicitParamDecl(ImplicitParamDecl *D); ExpectedDecl VisitParmVarDecl(ParmVarDecl *D); ExpectedDecl VisitObjCMethodDecl(ObjCMethodDecl *D); ExpectedDecl VisitObjCTypeParamDecl(ObjCTypeParamDecl *D); ExpectedDecl VisitObjCCategoryDecl(ObjCCategoryDecl *D); ExpectedDecl VisitObjCProtocolDecl(ObjCProtocolDecl *D); ExpectedDecl VisitLinkageSpecDecl(LinkageSpecDecl *D); ExpectedDecl VisitUsingDecl(UsingDecl *D); ExpectedDecl VisitUsingShadowDecl(UsingShadowDecl *D); ExpectedDecl VisitUsingDirectiveDecl(UsingDirectiveDecl *D); ExpectedDecl VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D); ExpectedDecl VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D); ExpectedDecl VisitBuiltinTemplateDecl(BuiltinTemplateDecl *D); ExpectedDecl VisitLifetimeExtendedTemporaryDecl(LifetimeExtendedTemporaryDecl *D); Expected ImportObjCTypeParamList(ObjCTypeParamList *list); ExpectedDecl VisitObjCInterfaceDecl(ObjCInterfaceDecl *D); ExpectedDecl VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D); ExpectedDecl VisitObjCImplementationDecl(ObjCImplementationDecl *D); ExpectedDecl VisitObjCPropertyDecl(ObjCPropertyDecl *D); ExpectedDecl VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D); ExpectedDecl VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D); ExpectedDecl VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D); ExpectedDecl VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D); ExpectedDecl VisitClassTemplateDecl(ClassTemplateDecl *D); ExpectedDecl VisitClassTemplateSpecializationDecl( ClassTemplateSpecializationDecl *D); ExpectedDecl VisitVarTemplateDecl(VarTemplateDecl *D); ExpectedDecl VisitVarTemplateSpecializationDecl(VarTemplateSpecializationDecl *D); ExpectedDecl VisitFunctionTemplateDecl(FunctionTemplateDecl *D); // Importing statements ExpectedStmt VisitStmt(Stmt *S); ExpectedStmt VisitGCCAsmStmt(GCCAsmStmt *S); ExpectedStmt VisitDeclStmt(DeclStmt *S); ExpectedStmt VisitNullStmt(NullStmt *S); ExpectedStmt VisitCompoundStmt(CompoundStmt *S); ExpectedStmt VisitCaseStmt(CaseStmt *S); ExpectedStmt VisitDefaultStmt(DefaultStmt *S); ExpectedStmt VisitLabelStmt(LabelStmt *S); ExpectedStmt VisitAttributedStmt(AttributedStmt *S); ExpectedStmt VisitIfStmt(IfStmt *S); ExpectedStmt VisitSwitchStmt(SwitchStmt *S); ExpectedStmt VisitWhileStmt(WhileStmt *S); ExpectedStmt VisitDoStmt(DoStmt *S); ExpectedStmt VisitForStmt(ForStmt *S); ExpectedStmt VisitGotoStmt(GotoStmt *S); ExpectedStmt VisitIndirectGotoStmt(IndirectGotoStmt *S); ExpectedStmt VisitContinueStmt(ContinueStmt *S); ExpectedStmt VisitBreakStmt(BreakStmt *S); ExpectedStmt VisitReturnStmt(ReturnStmt *S); // FIXME: MSAsmStmt // FIXME: SEHExceptStmt // FIXME: SEHFinallyStmt // FIXME: SEHTryStmt // FIXME: SEHLeaveStmt // FIXME: CapturedStmt ExpectedStmt VisitCXXCatchStmt(CXXCatchStmt *S); ExpectedStmt VisitCXXTryStmt(CXXTryStmt *S); ExpectedStmt VisitCXXForRangeStmt(CXXForRangeStmt *S); // FIXME: MSDependentExistsStmt ExpectedStmt VisitObjCForCollectionStmt(ObjCForCollectionStmt *S); ExpectedStmt VisitObjCAtCatchStmt(ObjCAtCatchStmt *S); ExpectedStmt VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S); ExpectedStmt VisitObjCAtTryStmt(ObjCAtTryStmt *S); ExpectedStmt VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S); ExpectedStmt VisitObjCAtThrowStmt(ObjCAtThrowStmt *S); ExpectedStmt VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S); // Importing expressions ExpectedStmt VisitExpr(Expr *E); ExpectedStmt VisitVAArgExpr(VAArgExpr *E); ExpectedStmt VisitChooseExpr(ChooseExpr *E); ExpectedStmt VisitGNUNullExpr(GNUNullExpr *E); ExpectedStmt VisitPredefinedExpr(PredefinedExpr *E); ExpectedStmt VisitDeclRefExpr(DeclRefExpr *E); ExpectedStmt VisitImplicitValueInitExpr(ImplicitValueInitExpr *E); ExpectedStmt VisitDesignatedInitExpr(DesignatedInitExpr *E); ExpectedStmt VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E); ExpectedStmt VisitIntegerLiteral(IntegerLiteral *E); ExpectedStmt VisitFloatingLiteral(FloatingLiteral *E); ExpectedStmt VisitImaginaryLiteral(ImaginaryLiteral *E); ExpectedStmt VisitFixedPointLiteral(FixedPointLiteral *E); ExpectedStmt VisitCharacterLiteral(CharacterLiteral *E); ExpectedStmt VisitStringLiteral(StringLiteral *E); ExpectedStmt VisitCompoundLiteralExpr(CompoundLiteralExpr *E); ExpectedStmt VisitAtomicExpr(AtomicExpr *E); ExpectedStmt VisitAddrLabelExpr(AddrLabelExpr *E); ExpectedStmt VisitConstantExpr(ConstantExpr *E); ExpectedStmt VisitParenExpr(ParenExpr *E); ExpectedStmt VisitParenListExpr(ParenListExpr *E); ExpectedStmt VisitStmtExpr(StmtExpr *E); ExpectedStmt VisitUnaryOperator(UnaryOperator *E); ExpectedStmt VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E); ExpectedStmt VisitBinaryOperator(BinaryOperator *E); ExpectedStmt VisitConditionalOperator(ConditionalOperator *E); ExpectedStmt VisitBinaryConditionalOperator(BinaryConditionalOperator *E); ExpectedStmt VisitOpaqueValueExpr(OpaqueValueExpr *E); ExpectedStmt VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E); ExpectedStmt VisitExpressionTraitExpr(ExpressionTraitExpr *E); ExpectedStmt VisitArraySubscriptExpr(ArraySubscriptExpr *E); ExpectedStmt VisitCompoundAssignOperator(CompoundAssignOperator *E); ExpectedStmt VisitImplicitCastExpr(ImplicitCastExpr *E); ExpectedStmt VisitExplicitCastExpr(ExplicitCastExpr *E); ExpectedStmt VisitOffsetOfExpr(OffsetOfExpr *OE); ExpectedStmt VisitCXXThrowExpr(CXXThrowExpr *E); ExpectedStmt VisitCXXNoexceptExpr(CXXNoexceptExpr *E); ExpectedStmt VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E); ExpectedStmt VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E); ExpectedStmt VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E); ExpectedStmt VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *E); ExpectedStmt VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E); ExpectedStmt VisitPackExpansionExpr(PackExpansionExpr *E); ExpectedStmt VisitSizeOfPackExpr(SizeOfPackExpr *E); ExpectedStmt VisitCXXNewExpr(CXXNewExpr *E); ExpectedStmt VisitCXXDeleteExpr(CXXDeleteExpr *E); ExpectedStmt VisitCXXConstructExpr(CXXConstructExpr *E); ExpectedStmt VisitCXXMemberCallExpr(CXXMemberCallExpr *E); ExpectedStmt VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E); ExpectedStmt VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E); ExpectedStmt VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *E); ExpectedStmt VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E); ExpectedStmt VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E); ExpectedStmt VisitExprWithCleanups(ExprWithCleanups *E); ExpectedStmt VisitCXXThisExpr(CXXThisExpr *E); ExpectedStmt VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E); ExpectedStmt VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E); ExpectedStmt VisitMemberExpr(MemberExpr *E); ExpectedStmt VisitCallExpr(CallExpr *E); ExpectedStmt VisitLambdaExpr(LambdaExpr *LE); ExpectedStmt VisitInitListExpr(InitListExpr *E); ExpectedStmt VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E); ExpectedStmt VisitCXXInheritedCtorInitExpr(CXXInheritedCtorInitExpr *E); ExpectedStmt VisitArrayInitLoopExpr(ArrayInitLoopExpr *E); ExpectedStmt VisitArrayInitIndexExpr(ArrayInitIndexExpr *E); ExpectedStmt VisitCXXDefaultInitExpr(CXXDefaultInitExpr *E); ExpectedStmt VisitCXXNamedCastExpr(CXXNamedCastExpr *E); ExpectedStmt VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E); ExpectedStmt VisitTypeTraitExpr(TypeTraitExpr *E); ExpectedStmt VisitCXXTypeidExpr(CXXTypeidExpr *E); template Error ImportArrayChecked(IIter Ibegin, IIter Iend, OIter Obegin) { using ItemT = std::remove_reference_t; for (; Ibegin != Iend; ++Ibegin, ++Obegin) { Expected ToOrErr = import(*Ibegin); if (!ToOrErr) return ToOrErr.takeError(); *Obegin = *ToOrErr; } return Error::success(); } // Import every item from a container structure into an output container. // If error occurs, stops at first error and returns the error. // The output container should have space for all needed elements (it is not // expanded, new items are put into from the beginning). template Error ImportContainerChecked( const InContainerTy &InContainer, OutContainerTy &OutContainer) { return ImportArrayChecked( InContainer.begin(), InContainer.end(), OutContainer.begin()); } template Error ImportArrayChecked(const InContainerTy &InContainer, OIter Obegin) { return ImportArrayChecked(InContainer.begin(), InContainer.end(), Obegin); } Error ImportOverriddenMethods(CXXMethodDecl *ToMethod, CXXMethodDecl *FromMethod); Expected FindFunctionTemplateSpecialization( FunctionDecl *FromFD); // Returns true if the given function has a placeholder return type and // that type is declared inside the body of the function. // E.g. auto f() { struct X{}; return X(); } bool hasAutoReturnTypeDeclaredInside(FunctionDecl *D); }; template Error ASTNodeImporter::ImportTemplateArgumentListInfo( SourceLocation FromLAngleLoc, SourceLocation FromRAngleLoc, const InContainerTy &Container, TemplateArgumentListInfo &Result) { auto ToLAngleLocOrErr = import(FromLAngleLoc); if (!ToLAngleLocOrErr) return ToLAngleLocOrErr.takeError(); auto ToRAngleLocOrErr = import(FromRAngleLoc); if (!ToRAngleLocOrErr) return ToRAngleLocOrErr.takeError(); TemplateArgumentListInfo ToTAInfo(*ToLAngleLocOrErr, *ToRAngleLocOrErr); if (auto Err = ImportTemplateArgumentListInfo(Container, ToTAInfo)) return Err; Result = ToTAInfo; return Error::success(); } template <> Error ASTNodeImporter::ImportTemplateArgumentListInfo( const TemplateArgumentListInfo &From, TemplateArgumentListInfo &Result) { return ImportTemplateArgumentListInfo( From.getLAngleLoc(), From.getRAngleLoc(), From.arguments(), Result); } template <> Error ASTNodeImporter::ImportTemplateArgumentListInfo< ASTTemplateArgumentListInfo>( const ASTTemplateArgumentListInfo &From, TemplateArgumentListInfo &Result) { return ImportTemplateArgumentListInfo( From.LAngleLoc, From.RAngleLoc, From.arguments(), Result); } Expected ASTNodeImporter::ImportFunctionTemplateWithTemplateArgsFromSpecialization( FunctionDecl *FromFD) { assert(FromFD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplateSpecialization); FunctionTemplateAndArgsTy Result; auto *FTSInfo = FromFD->getTemplateSpecializationInfo(); if (Error Err = importInto(std::get<0>(Result), FTSInfo->getTemplate())) return std::move(Err); // Import template arguments. auto TemplArgs = FTSInfo->TemplateArguments->asArray(); if (Error Err = ImportTemplateArguments(TemplArgs.data(), TemplArgs.size(), std::get<1>(Result))) return std::move(Err); return Result; } template <> Expected ASTNodeImporter::import(TemplateParameterList *From) { SmallVector To(From->size()); if (Error Err = ImportContainerChecked(*From, To)) return std::move(Err); ExpectedExpr ToRequiresClause = import(From->getRequiresClause()); if (!ToRequiresClause) return ToRequiresClause.takeError(); auto ToTemplateLocOrErr = import(From->getTemplateLoc()); if (!ToTemplateLocOrErr) return ToTemplateLocOrErr.takeError(); auto ToLAngleLocOrErr = import(From->getLAngleLoc()); if (!ToLAngleLocOrErr) return ToLAngleLocOrErr.takeError(); auto ToRAngleLocOrErr = import(From->getRAngleLoc()); if (!ToRAngleLocOrErr) return ToRAngleLocOrErr.takeError(); return TemplateParameterList::Create( Importer.getToContext(), *ToTemplateLocOrErr, *ToLAngleLocOrErr, To, *ToRAngleLocOrErr, *ToRequiresClause); } template <> Expected ASTNodeImporter::import(const TemplateArgument &From) { switch (From.getKind()) { case TemplateArgument::Null: return TemplateArgument(); case TemplateArgument::Type: { ExpectedType ToTypeOrErr = import(From.getAsType()); if (!ToTypeOrErr) return ToTypeOrErr.takeError(); return TemplateArgument(*ToTypeOrErr); } case TemplateArgument::Integral: { ExpectedType ToTypeOrErr = import(From.getIntegralType()); if (!ToTypeOrErr) return ToTypeOrErr.takeError(); return TemplateArgument(From, *ToTypeOrErr); } case TemplateArgument::Declaration: { Expected ToOrErr = import(From.getAsDecl()); if (!ToOrErr) return ToOrErr.takeError(); ExpectedType ToTypeOrErr = import(From.getParamTypeForDecl()); if (!ToTypeOrErr) return ToTypeOrErr.takeError(); return TemplateArgument(*ToOrErr, *ToTypeOrErr); } case TemplateArgument::NullPtr: { ExpectedType ToTypeOrErr = import(From.getNullPtrType()); if (!ToTypeOrErr) return ToTypeOrErr.takeError(); return TemplateArgument(*ToTypeOrErr, /*isNullPtr*/true); } case TemplateArgument::Template: { Expected ToTemplateOrErr = import(From.getAsTemplate()); if (!ToTemplateOrErr) return ToTemplateOrErr.takeError(); return TemplateArgument(*ToTemplateOrErr); } case TemplateArgument::TemplateExpansion: { Expected ToTemplateOrErr = import(From.getAsTemplateOrTemplatePattern()); if (!ToTemplateOrErr) return ToTemplateOrErr.takeError(); return TemplateArgument( *ToTemplateOrErr, From.getNumTemplateExpansions()); } case TemplateArgument::Expression: if (ExpectedExpr ToExpr = import(From.getAsExpr())) return TemplateArgument(*ToExpr); else return ToExpr.takeError(); case TemplateArgument::Pack: { SmallVector ToPack; ToPack.reserve(From.pack_size()); if (Error Err = ImportTemplateArguments( From.pack_begin(), From.pack_size(), ToPack)) return std::move(Err); return TemplateArgument( llvm::makeArrayRef(ToPack).copy(Importer.getToContext())); } } llvm_unreachable("Invalid template argument kind"); } template <> Expected ASTNodeImporter::import(const TemplateArgumentLoc &TALoc) { Expected ArgOrErr = import(TALoc.getArgument()); if (!ArgOrErr) return ArgOrErr.takeError(); TemplateArgument Arg = *ArgOrErr; TemplateArgumentLocInfo FromInfo = TALoc.getLocInfo(); TemplateArgumentLocInfo ToInfo; if (Arg.getKind() == TemplateArgument::Expression) { ExpectedExpr E = import(FromInfo.getAsExpr()); if (!E) return E.takeError(); ToInfo = TemplateArgumentLocInfo(*E); } else if (Arg.getKind() == TemplateArgument::Type) { if (auto TSIOrErr = import(FromInfo.getAsTypeSourceInfo())) ToInfo = TemplateArgumentLocInfo(*TSIOrErr); else return TSIOrErr.takeError(); } else { auto ToTemplateQualifierLocOrErr = import(FromInfo.getTemplateQualifierLoc()); if (!ToTemplateQualifierLocOrErr) return ToTemplateQualifierLocOrErr.takeError(); auto ToTemplateNameLocOrErr = import(FromInfo.getTemplateNameLoc()); if (!ToTemplateNameLocOrErr) return ToTemplateNameLocOrErr.takeError(); auto ToTemplateEllipsisLocOrErr = import(FromInfo.getTemplateEllipsisLoc()); if (!ToTemplateEllipsisLocOrErr) return ToTemplateEllipsisLocOrErr.takeError(); ToInfo = TemplateArgumentLocInfo( *ToTemplateQualifierLocOrErr, *ToTemplateNameLocOrErr, *ToTemplateEllipsisLocOrErr); } return TemplateArgumentLoc(Arg, ToInfo); } template <> Expected ASTNodeImporter::import(const DeclGroupRef &DG) { if (DG.isNull()) return DeclGroupRef::Create(Importer.getToContext(), nullptr, 0); size_t NumDecls = DG.end() - DG.begin(); SmallVector ToDecls; ToDecls.reserve(NumDecls); for (Decl *FromD : DG) { if (auto ToDOrErr = import(FromD)) ToDecls.push_back(*ToDOrErr); else return ToDOrErr.takeError(); } return DeclGroupRef::Create(Importer.getToContext(), ToDecls.begin(), NumDecls); } template <> Expected ASTNodeImporter::import(const Designator &D) { if (D.isFieldDesignator()) { IdentifierInfo *ToFieldName = Importer.Import(D.getFieldName()); ExpectedSLoc ToDotLocOrErr = import(D.getDotLoc()); if (!ToDotLocOrErr) return ToDotLocOrErr.takeError(); ExpectedSLoc ToFieldLocOrErr = import(D.getFieldLoc()); if (!ToFieldLocOrErr) return ToFieldLocOrErr.takeError(); return Designator(ToFieldName, *ToDotLocOrErr, *ToFieldLocOrErr); } ExpectedSLoc ToLBracketLocOrErr = import(D.getLBracketLoc()); if (!ToLBracketLocOrErr) return ToLBracketLocOrErr.takeError(); ExpectedSLoc ToRBracketLocOrErr = import(D.getRBracketLoc()); if (!ToRBracketLocOrErr) return ToRBracketLocOrErr.takeError(); if (D.isArrayDesignator()) return Designator(D.getFirstExprIndex(), *ToLBracketLocOrErr, *ToRBracketLocOrErr); ExpectedSLoc ToEllipsisLocOrErr = import(D.getEllipsisLoc()); if (!ToEllipsisLocOrErr) return ToEllipsisLocOrErr.takeError(); assert(D.isArrayRangeDesignator()); return Designator( D.getFirstExprIndex(), *ToLBracketLocOrErr, *ToEllipsisLocOrErr, *ToRBracketLocOrErr); } template <> Expected ASTNodeImporter::import(const LambdaCapture &From) { VarDecl *Var = nullptr; if (From.capturesVariable()) { if (auto VarOrErr = import(From.getCapturedVar())) Var = *VarOrErr; else return VarOrErr.takeError(); } auto LocationOrErr = import(From.getLocation()); if (!LocationOrErr) return LocationOrErr.takeError(); SourceLocation EllipsisLoc; if (From.isPackExpansion()) if (Error Err = importInto(EllipsisLoc, From.getEllipsisLoc())) return std::move(Err); return LambdaCapture( *LocationOrErr, From.isImplicit(), From.getCaptureKind(), Var, EllipsisLoc); } template bool ASTNodeImporter::hasSameVisibilityContextAndLinkage(T *Found, T *From) { if (Found->getLinkageInternal() != From->getLinkageInternal()) return false; if (From->hasExternalFormalLinkage()) return Found->hasExternalFormalLinkage(); if (Importer.GetFromTU(Found) != From->getTranslationUnitDecl()) return false; if (From->isInAnonymousNamespace()) return Found->isInAnonymousNamespace(); else return !Found->isInAnonymousNamespace() && !Found->hasExternalFormalLinkage(); } template <> bool ASTNodeImporter::hasSameVisibilityContextAndLinkage(TypedefNameDecl *Found, TypedefNameDecl *From) { if (Found->getLinkageInternal() != From->getLinkageInternal()) return false; if (From->isInAnonymousNamespace() && Found->isInAnonymousNamespace()) return Importer.GetFromTU(Found) == From->getTranslationUnitDecl(); return From->isInAnonymousNamespace() == Found->isInAnonymousNamespace(); } } // namespace clang //---------------------------------------------------------------------------- // Import Types //---------------------------------------------------------------------------- using namespace clang; ExpectedType ASTNodeImporter::VisitType(const Type *T) { Importer.FromDiag(SourceLocation(), diag::err_unsupported_ast_node) << T->getTypeClassName(); return make_error(ImportError::UnsupportedConstruct); } ExpectedType ASTNodeImporter::VisitAtomicType(const AtomicType *T){ ExpectedType UnderlyingTypeOrErr = import(T->getValueType()); if (!UnderlyingTypeOrErr) return UnderlyingTypeOrErr.takeError(); return Importer.getToContext().getAtomicType(*UnderlyingTypeOrErr); } ExpectedType ASTNodeImporter::VisitBuiltinType(const BuiltinType *T) { switch (T->getKind()) { #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ case BuiltinType::Id: \ return Importer.getToContext().SingletonId; #include "clang/Basic/OpenCLImageTypes.def" #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ case BuiltinType::Id: \ return Importer.getToContext().Id##Ty; #include "clang/Basic/OpenCLExtensionTypes.def" #define SVE_TYPE(Name, Id, SingletonId) \ case BuiltinType::Id: \ return Importer.getToContext().SingletonId; #include "clang/Basic/AArch64SVEACLETypes.def" #define SHARED_SINGLETON_TYPE(Expansion) #define BUILTIN_TYPE(Id, SingletonId) \ case BuiltinType::Id: return Importer.getToContext().SingletonId; #include "clang/AST/BuiltinTypes.def" // FIXME: for Char16, Char32, and NullPtr, make sure that the "to" // context supports C++. // FIXME: for ObjCId, ObjCClass, and ObjCSel, make sure that the "to" // context supports ObjC. case BuiltinType::Char_U: // The context we're importing from has an unsigned 'char'. If we're // importing into a context with a signed 'char', translate to // 'unsigned char' instead. if (Importer.getToContext().getLangOpts().CharIsSigned) return Importer.getToContext().UnsignedCharTy; return Importer.getToContext().CharTy; case BuiltinType::Char_S: // The context we're importing from has an unsigned 'char'. If we're // importing into a context with a signed 'char', translate to // 'unsigned char' instead. if (!Importer.getToContext().getLangOpts().CharIsSigned) return Importer.getToContext().SignedCharTy; return Importer.getToContext().CharTy; case BuiltinType::WChar_S: case BuiltinType::WChar_U: // FIXME: If not in C++, shall we translate to the C equivalent of // wchar_t? return Importer.getToContext().WCharTy; } llvm_unreachable("Invalid BuiltinType Kind!"); } ExpectedType ASTNodeImporter::VisitDecayedType(const DecayedType *T) { ExpectedType ToOriginalTypeOrErr = import(T->getOriginalType()); if (!ToOriginalTypeOrErr) return ToOriginalTypeOrErr.takeError(); return Importer.getToContext().getDecayedType(*ToOriginalTypeOrErr); } ExpectedType ASTNodeImporter::VisitComplexType(const ComplexType *T) { ExpectedType ToElementTypeOrErr = import(T->getElementType()); if (!ToElementTypeOrErr) return ToElementTypeOrErr.takeError(); return Importer.getToContext().getComplexType(*ToElementTypeOrErr); } ExpectedType ASTNodeImporter::VisitPointerType(const PointerType *T) { ExpectedType ToPointeeTypeOrErr = import(T->getPointeeType()); if (!ToPointeeTypeOrErr) return ToPointeeTypeOrErr.takeError(); return Importer.getToContext().getPointerType(*ToPointeeTypeOrErr); } ExpectedType ASTNodeImporter::VisitBlockPointerType(const BlockPointerType *T) { // FIXME: Check for blocks support in "to" context. ExpectedType ToPointeeTypeOrErr = import(T->getPointeeType()); if (!ToPointeeTypeOrErr) return ToPointeeTypeOrErr.takeError(); return Importer.getToContext().getBlockPointerType(*ToPointeeTypeOrErr); } ExpectedType ASTNodeImporter::VisitLValueReferenceType(const LValueReferenceType *T) { // FIXME: Check for C++ support in "to" context. ExpectedType ToPointeeTypeOrErr = import(T->getPointeeTypeAsWritten()); if (!ToPointeeTypeOrErr) return ToPointeeTypeOrErr.takeError(); return Importer.getToContext().getLValueReferenceType(*ToPointeeTypeOrErr); } ExpectedType ASTNodeImporter::VisitRValueReferenceType(const RValueReferenceType *T) { // FIXME: Check for C++0x support in "to" context. ExpectedType ToPointeeTypeOrErr = import(T->getPointeeTypeAsWritten()); if (!ToPointeeTypeOrErr) return ToPointeeTypeOrErr.takeError(); return Importer.getToContext().getRValueReferenceType(*ToPointeeTypeOrErr); } ExpectedType ASTNodeImporter::VisitMemberPointerType(const MemberPointerType *T) { // FIXME: Check for C++ support in "to" context. ExpectedType ToPointeeTypeOrErr = import(T->getPointeeType()); if (!ToPointeeTypeOrErr) return ToPointeeTypeOrErr.takeError(); ExpectedType ClassTypeOrErr = import(QualType(T->getClass(), 0)); if (!ClassTypeOrErr) return ClassTypeOrErr.takeError(); return Importer.getToContext().getMemberPointerType( *ToPointeeTypeOrErr, (*ClassTypeOrErr).getTypePtr()); } ExpectedType ASTNodeImporter::VisitConstantArrayType(const ConstantArrayType *T) { Error Err = Error::success(); auto ToElementType = importChecked(Err, T->getElementType()); auto ToSizeExpr = importChecked(Err, T->getSizeExpr()); if (Err) return std::move(Err); return Importer.getToContext().getConstantArrayType( ToElementType, T->getSize(), ToSizeExpr, T->getSizeModifier(), T->getIndexTypeCVRQualifiers()); } ExpectedType ASTNodeImporter::VisitIncompleteArrayType(const IncompleteArrayType *T) { ExpectedType ToElementTypeOrErr = import(T->getElementType()); if (!ToElementTypeOrErr) return ToElementTypeOrErr.takeError(); return Importer.getToContext().getIncompleteArrayType(*ToElementTypeOrErr, T->getSizeModifier(), T->getIndexTypeCVRQualifiers()); } ExpectedType ASTNodeImporter::VisitVariableArrayType(const VariableArrayType *T) { Error Err = Error::success(); QualType ToElementType = importChecked(Err, T->getElementType()); Expr *ToSizeExpr = importChecked(Err, T->getSizeExpr()); SourceRange ToBracketsRange = importChecked(Err, T->getBracketsRange()); if (Err) return std::move(Err); return Importer.getToContext().getVariableArrayType( ToElementType, ToSizeExpr, T->getSizeModifier(), T->getIndexTypeCVRQualifiers(), ToBracketsRange); } ExpectedType ASTNodeImporter::VisitDependentSizedArrayType( const DependentSizedArrayType *T) { Error Err = Error::success(); QualType ToElementType = importChecked(Err, T->getElementType()); Expr *ToSizeExpr = importChecked(Err, T->getSizeExpr()); SourceRange ToBracketsRange = importChecked(Err, T->getBracketsRange()); if (Err) return std::move(Err); // SizeExpr may be null if size is not specified directly. // For example, 'int a[]'. return Importer.getToContext().getDependentSizedArrayType( ToElementType, ToSizeExpr, T->getSizeModifier(), T->getIndexTypeCVRQualifiers(), ToBracketsRange); } ExpectedType ASTNodeImporter::VisitVectorType(const VectorType *T) { ExpectedType ToElementTypeOrErr = import(T->getElementType()); if (!ToElementTypeOrErr) return ToElementTypeOrErr.takeError(); return Importer.getToContext().getVectorType(*ToElementTypeOrErr, T->getNumElements(), T->getVectorKind()); } ExpectedType ASTNodeImporter::VisitExtVectorType(const ExtVectorType *T) { ExpectedType ToElementTypeOrErr = import(T->getElementType()); if (!ToElementTypeOrErr) return ToElementTypeOrErr.takeError(); return Importer.getToContext().getExtVectorType(*ToElementTypeOrErr, T->getNumElements()); } ExpectedType ASTNodeImporter::VisitFunctionNoProtoType(const FunctionNoProtoType *T) { // FIXME: What happens if we're importing a function without a prototype // into C++? Should we make it variadic? ExpectedType ToReturnTypeOrErr = import(T->getReturnType()); if (!ToReturnTypeOrErr) return ToReturnTypeOrErr.takeError(); return Importer.getToContext().getFunctionNoProtoType(*ToReturnTypeOrErr, T->getExtInfo()); } ExpectedType ASTNodeImporter::VisitFunctionProtoType(const FunctionProtoType *T) { ExpectedType ToReturnTypeOrErr = import(T->getReturnType()); if (!ToReturnTypeOrErr) return ToReturnTypeOrErr.takeError(); // Import argument types SmallVector ArgTypes; for (const auto &A : T->param_types()) { ExpectedType TyOrErr = import(A); if (!TyOrErr) return TyOrErr.takeError(); ArgTypes.push_back(*TyOrErr); } // Import exception types SmallVector ExceptionTypes; for (const auto &E : T->exceptions()) { ExpectedType TyOrErr = import(E); if (!TyOrErr) return TyOrErr.takeError(); ExceptionTypes.push_back(*TyOrErr); } FunctionProtoType::ExtProtoInfo FromEPI = T->getExtProtoInfo(); Error Err = Error::success(); FunctionProtoType::ExtProtoInfo ToEPI; ToEPI.ExtInfo = FromEPI.ExtInfo; ToEPI.Variadic = FromEPI.Variadic; ToEPI.HasTrailingReturn = FromEPI.HasTrailingReturn; ToEPI.TypeQuals = FromEPI.TypeQuals; ToEPI.RefQualifier = FromEPI.RefQualifier; ToEPI.ExceptionSpec.Type = FromEPI.ExceptionSpec.Type; ToEPI.ExceptionSpec.NoexceptExpr = importChecked(Err, FromEPI.ExceptionSpec.NoexceptExpr); ToEPI.ExceptionSpec.SourceDecl = importChecked(Err, FromEPI.ExceptionSpec.SourceDecl); ToEPI.ExceptionSpec.SourceTemplate = importChecked(Err, FromEPI.ExceptionSpec.SourceTemplate); ToEPI.ExceptionSpec.Exceptions = ExceptionTypes; if (Err) return std::move(Err); return Importer.getToContext().getFunctionType( *ToReturnTypeOrErr, ArgTypes, ToEPI); } ExpectedType ASTNodeImporter::VisitUnresolvedUsingType( const UnresolvedUsingType *T) { Error Err = Error::success(); auto ToD = importChecked(Err, T->getDecl()); auto ToPrevD = importChecked(Err, T->getDecl()->getPreviousDecl()); if (Err) return std::move(Err); return Importer.getToContext().getTypeDeclType( ToD, cast_or_null(ToPrevD)); } ExpectedType ASTNodeImporter::VisitParenType(const ParenType *T) { ExpectedType ToInnerTypeOrErr = import(T->getInnerType()); if (!ToInnerTypeOrErr) return ToInnerTypeOrErr.takeError(); return Importer.getToContext().getParenType(*ToInnerTypeOrErr); } ExpectedType ASTNodeImporter::VisitTypedefType(const TypedefType *T) { Expected ToDeclOrErr = import(T->getDecl()); if (!ToDeclOrErr) return ToDeclOrErr.takeError(); return Importer.getToContext().getTypeDeclType(*ToDeclOrErr); } ExpectedType ASTNodeImporter::VisitTypeOfExprType(const TypeOfExprType *T) { ExpectedExpr ToExprOrErr = import(T->getUnderlyingExpr()); if (!ToExprOrErr) return ToExprOrErr.takeError(); return Importer.getToContext().getTypeOfExprType(*ToExprOrErr); } ExpectedType ASTNodeImporter::VisitTypeOfType(const TypeOfType *T) { ExpectedType ToUnderlyingTypeOrErr = import(T->getUnderlyingType()); if (!ToUnderlyingTypeOrErr) return ToUnderlyingTypeOrErr.takeError(); return Importer.getToContext().getTypeOfType(*ToUnderlyingTypeOrErr); } ExpectedType ASTNodeImporter::VisitDecltypeType(const DecltypeType *T) { // FIXME: Make sure that the "to" context supports C++0x! ExpectedExpr ToExprOrErr = import(T->getUnderlyingExpr()); if (!ToExprOrErr) return ToExprOrErr.takeError(); ExpectedType ToUnderlyingTypeOrErr = import(T->getUnderlyingType()); if (!ToUnderlyingTypeOrErr) return ToUnderlyingTypeOrErr.takeError(); return Importer.getToContext().getDecltypeType( *ToExprOrErr, *ToUnderlyingTypeOrErr); } ExpectedType ASTNodeImporter::VisitUnaryTransformType(const UnaryTransformType *T) { ExpectedType ToBaseTypeOrErr = import(T->getBaseType()); if (!ToBaseTypeOrErr) return ToBaseTypeOrErr.takeError(); ExpectedType ToUnderlyingTypeOrErr = import(T->getUnderlyingType()); if (!ToUnderlyingTypeOrErr) return ToUnderlyingTypeOrErr.takeError(); return Importer.getToContext().getUnaryTransformType( *ToBaseTypeOrErr, *ToUnderlyingTypeOrErr, T->getUTTKind()); } ExpectedType ASTNodeImporter::VisitAutoType(const AutoType *T) { // FIXME: Make sure that the "to" context supports C++11! ExpectedType ToDeducedTypeOrErr = import(T->getDeducedType()); if (!ToDeducedTypeOrErr) return ToDeducedTypeOrErr.takeError(); ExpectedDecl ToTypeConstraintConcept = import(T->getTypeConstraintConcept()); if (!ToTypeConstraintConcept) return ToTypeConstraintConcept.takeError(); SmallVector ToTemplateArgs; ArrayRef FromTemplateArgs = T->getTypeConstraintArguments(); if (Error Err = ImportTemplateArguments(FromTemplateArgs.data(), FromTemplateArgs.size(), ToTemplateArgs)) return std::move(Err); return Importer.getToContext().getAutoType( *ToDeducedTypeOrErr, T->getKeyword(), /*IsDependent*/false, /*IsPack=*/false, cast_or_null(*ToTypeConstraintConcept), ToTemplateArgs); } ExpectedType ASTNodeImporter::VisitInjectedClassNameType( const InjectedClassNameType *T) { Expected ToDeclOrErr = import(T->getDecl()); if (!ToDeclOrErr) return ToDeclOrErr.takeError(); ExpectedType ToInjTypeOrErr = import(T->getInjectedSpecializationType()); if (!ToInjTypeOrErr) return ToInjTypeOrErr.takeError(); // FIXME: ASTContext::getInjectedClassNameType is not suitable for AST reading // See comments in InjectedClassNameType definition for details // return Importer.getToContext().getInjectedClassNameType(D, InjType); enum { TypeAlignmentInBits = 4, TypeAlignment = 1 << TypeAlignmentInBits }; return QualType(new (Importer.getToContext(), TypeAlignment) InjectedClassNameType(*ToDeclOrErr, *ToInjTypeOrErr), 0); } ExpectedType ASTNodeImporter::VisitRecordType(const RecordType *T) { Expected ToDeclOrErr = import(T->getDecl()); if (!ToDeclOrErr) return ToDeclOrErr.takeError(); return Importer.getToContext().getTagDeclType(*ToDeclOrErr); } ExpectedType ASTNodeImporter::VisitEnumType(const EnumType *T) { Expected ToDeclOrErr = import(T->getDecl()); if (!ToDeclOrErr) return ToDeclOrErr.takeError(); return Importer.getToContext().getTagDeclType(*ToDeclOrErr); } ExpectedType ASTNodeImporter::VisitAttributedType(const AttributedType *T) { ExpectedType ToModifiedTypeOrErr = import(T->getModifiedType()); if (!ToModifiedTypeOrErr) return ToModifiedTypeOrErr.takeError(); ExpectedType ToEquivalentTypeOrErr = import(T->getEquivalentType()); if (!ToEquivalentTypeOrErr) return ToEquivalentTypeOrErr.takeError(); return Importer.getToContext().getAttributedType(T->getAttrKind(), *ToModifiedTypeOrErr, *ToEquivalentTypeOrErr); } ExpectedType ASTNodeImporter::VisitTemplateTypeParmType( const TemplateTypeParmType *T) { Expected ToDeclOrErr = import(T->getDecl()); if (!ToDeclOrErr) return ToDeclOrErr.takeError(); return Importer.getToContext().getTemplateTypeParmType( T->getDepth(), T->getIndex(), T->isParameterPack(), *ToDeclOrErr); } ExpectedType ASTNodeImporter::VisitSubstTemplateTypeParmType( const SubstTemplateTypeParmType *T) { ExpectedType ReplacedOrErr = import(QualType(T->getReplacedParameter(), 0)); if (!ReplacedOrErr) return ReplacedOrErr.takeError(); const TemplateTypeParmType *Replaced = cast((*ReplacedOrErr).getTypePtr()); ExpectedType ToReplacementTypeOrErr = import(T->getReplacementType()); if (!ToReplacementTypeOrErr) return ToReplacementTypeOrErr.takeError(); return Importer.getToContext().getSubstTemplateTypeParmType( Replaced, (*ToReplacementTypeOrErr).getCanonicalType()); } ExpectedType ASTNodeImporter::VisitTemplateSpecializationType( const TemplateSpecializationType *T) { auto ToTemplateOrErr = import(T->getTemplateName()); if (!ToTemplateOrErr) return ToTemplateOrErr.takeError(); SmallVector ToTemplateArgs; if (Error Err = ImportTemplateArguments( T->getArgs(), T->getNumArgs(), ToTemplateArgs)) return std::move(Err); QualType ToCanonType; if (!QualType(T, 0).isCanonical()) { QualType FromCanonType = Importer.getFromContext().getCanonicalType(QualType(T, 0)); if (ExpectedType TyOrErr = import(FromCanonType)) ToCanonType = *TyOrErr; else return TyOrErr.takeError(); } return Importer.getToContext().getTemplateSpecializationType(*ToTemplateOrErr, ToTemplateArgs, ToCanonType); } ExpectedType ASTNodeImporter::VisitElaboratedType(const ElaboratedType *T) { // Note: the qualifier in an ElaboratedType is optional. auto ToQualifierOrErr = import(T->getQualifier()); if (!ToQualifierOrErr) return ToQualifierOrErr.takeError(); ExpectedType ToNamedTypeOrErr = import(T->getNamedType()); if (!ToNamedTypeOrErr) return ToNamedTypeOrErr.takeError(); Expected ToOwnedTagDeclOrErr = import(T->getOwnedTagDecl()); if (!ToOwnedTagDeclOrErr) return ToOwnedTagDeclOrErr.takeError(); return Importer.getToContext().getElaboratedType(T->getKeyword(), *ToQualifierOrErr, *ToNamedTypeOrErr, *ToOwnedTagDeclOrErr); } ExpectedType ASTNodeImporter::VisitPackExpansionType(const PackExpansionType *T) { ExpectedType ToPatternOrErr = import(T->getPattern()); if (!ToPatternOrErr) return ToPatternOrErr.takeError(); return Importer.getToContext().getPackExpansionType(*ToPatternOrErr, - T->getNumExpansions()); + T->getNumExpansions(), + /*ExpactPack=*/false); } ExpectedType ASTNodeImporter::VisitDependentTemplateSpecializationType( const DependentTemplateSpecializationType *T) { auto ToQualifierOrErr = import(T->getQualifier()); if (!ToQualifierOrErr) return ToQualifierOrErr.takeError(); IdentifierInfo *ToName = Importer.Import(T->getIdentifier()); SmallVector ToPack; ToPack.reserve(T->getNumArgs()); if (Error Err = ImportTemplateArguments( T->getArgs(), T->getNumArgs(), ToPack)) return std::move(Err); return Importer.getToContext().getDependentTemplateSpecializationType( T->getKeyword(), *ToQualifierOrErr, ToName, ToPack); } ExpectedType ASTNodeImporter::VisitDependentNameType(const DependentNameType *T) { auto ToQualifierOrErr = import(T->getQualifier()); if (!ToQualifierOrErr) return ToQualifierOrErr.takeError(); IdentifierInfo *Name = Importer.Import(T->getIdentifier()); QualType Canon; if (T != T->getCanonicalTypeInternal().getTypePtr()) { if (ExpectedType TyOrErr = import(T->getCanonicalTypeInternal())) Canon = (*TyOrErr).getCanonicalType(); else return TyOrErr.takeError(); } return Importer.getToContext().getDependentNameType(T->getKeyword(), *ToQualifierOrErr, Name, Canon); } ExpectedType ASTNodeImporter::VisitObjCInterfaceType(const ObjCInterfaceType *T) { Expected ToDeclOrErr = import(T->getDecl()); if (!ToDeclOrErr) return ToDeclOrErr.takeError(); return Importer.getToContext().getObjCInterfaceType(*ToDeclOrErr); } ExpectedType ASTNodeImporter::VisitObjCObjectType(const ObjCObjectType *T) { ExpectedType ToBaseTypeOrErr = import(T->getBaseType()); if (!ToBaseTypeOrErr) return ToBaseTypeOrErr.takeError(); SmallVector TypeArgs; for (auto TypeArg : T->getTypeArgsAsWritten()) { if (ExpectedType TyOrErr = import(TypeArg)) TypeArgs.push_back(*TyOrErr); else return TyOrErr.takeError(); } SmallVector Protocols; for (auto *P : T->quals()) { if (Expected ProtocolOrErr = import(P)) Protocols.push_back(*ProtocolOrErr); else return ProtocolOrErr.takeError(); } return Importer.getToContext().getObjCObjectType(*ToBaseTypeOrErr, TypeArgs, Protocols, T->isKindOfTypeAsWritten()); } ExpectedType ASTNodeImporter::VisitObjCObjectPointerType(const ObjCObjectPointerType *T) { ExpectedType ToPointeeTypeOrErr = import(T->getPointeeType()); if (!ToPointeeTypeOrErr) return ToPointeeTypeOrErr.takeError(); return Importer.getToContext().getObjCObjectPointerType(*ToPointeeTypeOrErr); } //---------------------------------------------------------------------------- // Import Declarations //---------------------------------------------------------------------------- Error ASTNodeImporter::ImportDeclParts( NamedDecl *D, DeclContext *&DC, DeclContext *&LexicalDC, DeclarationName &Name, NamedDecl *&ToD, SourceLocation &Loc) { // Check if RecordDecl is in FunctionDecl parameters to avoid infinite loop. // example: int struct_in_proto(struct data_t{int a;int b;} *d); // FIXME: We could support these constructs by importing a different type of // this parameter and by importing the original type of the parameter only // after the FunctionDecl is created. See // VisitFunctionDecl::UsedDifferentProtoType. DeclContext *OrigDC = D->getDeclContext(); FunctionDecl *FunDecl; if (isa(D) && (FunDecl = dyn_cast(OrigDC)) && FunDecl->hasBody()) { auto getLeafPointeeType = [](const Type *T) { while (T->isPointerType() || T->isArrayType()) { T = T->getPointeeOrArrayElementType(); } return T; }; for (const ParmVarDecl *P : FunDecl->parameters()) { const Type *LeafT = getLeafPointeeType(P->getType().getCanonicalType().getTypePtr()); auto *RT = dyn_cast(LeafT); if (RT && RT->getDecl() == D) { Importer.FromDiag(D->getLocation(), diag::err_unsupported_ast_node) << D->getDeclKindName(); return make_error(ImportError::UnsupportedConstruct); } } } // Import the context of this declaration. if (Error Err = ImportDeclContext(D, DC, LexicalDC)) return Err; // Import the name of this declaration. if (Error Err = importInto(Name, D->getDeclName())) return Err; // Import the location of this declaration. if (Error Err = importInto(Loc, D->getLocation())) return Err; ToD = cast_or_null(Importer.GetAlreadyImportedOrNull(D)); if (ToD) if (Error Err = ASTNodeImporter(*this).ImportDefinitionIfNeeded(D, ToD)) return Err; return Error::success(); } Error ASTNodeImporter::ImportDefinitionIfNeeded(Decl *FromD, Decl *ToD) { if (!FromD) return Error::success(); if (!ToD) if (Error Err = importInto(ToD, FromD)) return Err; if (RecordDecl *FromRecord = dyn_cast(FromD)) { if (RecordDecl *ToRecord = cast(ToD)) { if (FromRecord->getDefinition() && FromRecord->isCompleteDefinition() && !ToRecord->getDefinition()) { if (Error Err = ImportDefinition(FromRecord, ToRecord)) return Err; } } return Error::success(); } if (EnumDecl *FromEnum = dyn_cast(FromD)) { if (EnumDecl *ToEnum = cast(ToD)) { if (FromEnum->getDefinition() && !ToEnum->getDefinition()) { if (Error Err = ImportDefinition(FromEnum, ToEnum)) return Err; } } return Error::success(); } return Error::success(); } Error ASTNodeImporter::ImportDeclarationNameLoc( const DeclarationNameInfo &From, DeclarationNameInfo& To) { // NOTE: To.Name and To.Loc are already imported. // We only have to import To.LocInfo. switch (To.getName().getNameKind()) { case DeclarationName::Identifier: case DeclarationName::ObjCZeroArgSelector: case DeclarationName::ObjCOneArgSelector: case DeclarationName::ObjCMultiArgSelector: case DeclarationName::CXXUsingDirective: case DeclarationName::CXXDeductionGuideName: return Error::success(); case DeclarationName::CXXOperatorName: { if (auto ToRangeOrErr = import(From.getCXXOperatorNameRange())) To.setCXXOperatorNameRange(*ToRangeOrErr); else return ToRangeOrErr.takeError(); return Error::success(); } case DeclarationName::CXXLiteralOperatorName: { if (ExpectedSLoc LocOrErr = import(From.getCXXLiteralOperatorNameLoc())) To.setCXXLiteralOperatorNameLoc(*LocOrErr); else return LocOrErr.takeError(); return Error::success(); } case DeclarationName::CXXConstructorName: case DeclarationName::CXXDestructorName: case DeclarationName::CXXConversionFunctionName: { if (auto ToTInfoOrErr = import(From.getNamedTypeInfo())) To.setNamedTypeInfo(*ToTInfoOrErr); else return ToTInfoOrErr.takeError(); return Error::success(); } } llvm_unreachable("Unknown name kind."); } Error ASTNodeImporter::ImportDeclContext(DeclContext *FromDC, bool ForceImport) { if (Importer.isMinimalImport() && !ForceImport) { auto ToDCOrErr = Importer.ImportContext(FromDC); return ToDCOrErr.takeError(); } // We use strict error handling in case of records and enums, but not // with e.g. namespaces. // // FIXME Clients of the ASTImporter should be able to choose an // appropriate error handling strategy for their needs. For instance, // they may not want to mark an entire namespace as erroneous merely // because there is an ODR error with two typedefs. As another example, // the client may allow EnumConstantDecls with same names but with // different values in two distinct translation units. bool AccumulateChildErrors = isa(FromDC); Error ChildErrors = Error::success(); for (auto *From : FromDC->decls()) { ExpectedDecl ImportedOrErr = import(From); // If we are in the process of ImportDefinition(...) for a RecordDecl we // want to make sure that we are also completing each FieldDecl. There // are currently cases where this does not happen and this is correctness // fix since operations such as code generation will expect this to be so. if (ImportedOrErr) { FieldDecl *FieldFrom = dyn_cast_or_null(From); Decl *ImportedDecl = *ImportedOrErr; FieldDecl *FieldTo = dyn_cast_or_null(ImportedDecl); if (FieldFrom && FieldTo) { const RecordType *RecordFrom = FieldFrom->getType()->getAs(); const RecordType *RecordTo = FieldTo->getType()->getAs(); if (RecordFrom && RecordTo) { RecordDecl *FromRecordDecl = RecordFrom->getDecl(); RecordDecl *ToRecordDecl = RecordTo->getDecl(); if (FromRecordDecl->isCompleteDefinition() && !ToRecordDecl->isCompleteDefinition()) { Error Err = ImportDefinition(FromRecordDecl, ToRecordDecl); if (Err && AccumulateChildErrors) ChildErrors = joinErrors(std::move(ChildErrors), std::move(Err)); else consumeError(std::move(Err)); } } } } else { if (AccumulateChildErrors) ChildErrors = joinErrors(std::move(ChildErrors), ImportedOrErr.takeError()); else consumeError(ImportedOrErr.takeError()); } } // We reorder declarations in RecordDecls because they may have another order // in the "to" context than they have in the "from" context. This may happen // e.g when we import a class like this: // struct declToImport { // int a = c + b; // int b = 1; // int c = 2; // }; // During the import of `a` we import first the dependencies in sequence, // thus the order would be `c`, `b`, `a`. We will get the normal order by // first removing the already imported members and then adding them in the // order as they apper in the "from" context. // // Keeping field order is vital because it determines structure layout. // // Here and below, we cannot call field_begin() method and its callers on // ToDC if it has an external storage. Calling field_begin() will // automatically load all the fields by calling // LoadFieldsFromExternalStorage(). LoadFieldsFromExternalStorage() would // call ASTImporter::Import(). This is because the ExternalASTSource // interface in LLDB is implemented by the means of the ASTImporter. However, // calling an import at this point would result in an uncontrolled import, we // must avoid that. const auto *FromRD = dyn_cast(FromDC); if (!FromRD) return ChildErrors; auto ToDCOrErr = Importer.ImportContext(FromDC); if (!ToDCOrErr) { consumeError(std::move(ChildErrors)); return ToDCOrErr.takeError(); } DeclContext *ToDC = *ToDCOrErr; // Remove all declarations, which may be in wrong order in the // lexical DeclContext and then add them in the proper order. for (auto *D : FromRD->decls()) { if (isa(D) || isa(D) || isa(D)) { assert(D && "DC contains a null decl"); Decl *ToD = Importer.GetAlreadyImportedOrNull(D); // Remove only the decls which we successfully imported. if (ToD) { assert(ToDC == ToD->getLexicalDeclContext() && ToDC->containsDecl(ToD)); // Remove the decl from its wrong place in the linked list. ToDC->removeDecl(ToD); // Add the decl to the end of the linked list. // This time it will be at the proper place because the enclosing for // loop iterates in the original (good) order of the decls. ToDC->addDeclInternal(ToD); } } } return ChildErrors; } Error ASTNodeImporter::ImportDeclContext( Decl *FromD, DeclContext *&ToDC, DeclContext *&ToLexicalDC) { auto ToDCOrErr = Importer.ImportContext(FromD->getDeclContext()); if (!ToDCOrErr) return ToDCOrErr.takeError(); ToDC = *ToDCOrErr; if (FromD->getDeclContext() != FromD->getLexicalDeclContext()) { auto ToLexicalDCOrErr = Importer.ImportContext( FromD->getLexicalDeclContext()); if (!ToLexicalDCOrErr) return ToLexicalDCOrErr.takeError(); ToLexicalDC = *ToLexicalDCOrErr; } else ToLexicalDC = ToDC; return Error::success(); } Error ASTNodeImporter::ImportImplicitMethods( const CXXRecordDecl *From, CXXRecordDecl *To) { assert(From->isCompleteDefinition() && To->getDefinition() == To && "Import implicit methods to or from non-definition"); for (CXXMethodDecl *FromM : From->methods()) if (FromM->isImplicit()) { Expected ToMOrErr = import(FromM); if (!ToMOrErr) return ToMOrErr.takeError(); } return Error::success(); } static Error setTypedefNameForAnonDecl(TagDecl *From, TagDecl *To, ASTImporter &Importer) { if (TypedefNameDecl *FromTypedef = From->getTypedefNameForAnonDecl()) { if (ExpectedDecl ToTypedefOrErr = Importer.Import(FromTypedef)) To->setTypedefNameForAnonDecl(cast(*ToTypedefOrErr)); else return ToTypedefOrErr.takeError(); } return Error::success(); } Error ASTNodeImporter::ImportDefinition( RecordDecl *From, RecordDecl *To, ImportDefinitionKind Kind) { auto DefinitionCompleter = [To]() { // There are cases in LLDB when we first import a class without its // members. The class will have DefinitionData, but no members. Then, // importDefinition is called from LLDB, which tries to get the members, so // when we get here, the class already has the DefinitionData set, so we // must unset the CompleteDefinition here to be able to complete again the // definition. To->setCompleteDefinition(false); To->completeDefinition(); }; if (To->getDefinition() || To->isBeingDefined()) { if (Kind == IDK_Everything || // In case of lambdas, the class already has a definition ptr set, but // the contained decls are not imported yet. Also, isBeingDefined was // set in CXXRecordDecl::CreateLambda. We must import the contained // decls here and finish the definition. (To->isLambda() && shouldForceImportDeclContext(Kind))) { if (To->isLambda()) { auto *FromCXXRD = cast(From); SmallVector ToCaptures; ToCaptures.reserve(FromCXXRD->capture_size()); for (const auto &FromCapture : FromCXXRD->captures()) { if (auto ToCaptureOrErr = import(FromCapture)) ToCaptures.push_back(*ToCaptureOrErr); else return ToCaptureOrErr.takeError(); } cast(To)->setCaptures(ToCaptures); } Error Result = ImportDeclContext(From, /*ForceImport=*/true); // Finish the definition of the lambda, set isBeingDefined to false. if (To->isLambda()) DefinitionCompleter(); return Result; } return Error::success(); } To->startDefinition(); // Complete the definition even if error is returned. // The RecordDecl may be already part of the AST so it is better to // have it in complete state even if something is wrong with it. auto DefinitionCompleterScopeExit = llvm::make_scope_exit(DefinitionCompleter); if (Error Err = setTypedefNameForAnonDecl(From, To, Importer)) return Err; // Add base classes. auto *ToCXX = dyn_cast(To); auto *FromCXX = dyn_cast(From); if (ToCXX && FromCXX && ToCXX->dataPtr() && FromCXX->dataPtr()) { struct CXXRecordDecl::DefinitionData &ToData = ToCXX->data(); struct CXXRecordDecl::DefinitionData &FromData = FromCXX->data(); #define FIELD(Name, Width, Merge) \ ToData.Name = FromData.Name; #include "clang/AST/CXXRecordDeclDefinitionBits.def" // Copy over the data stored in RecordDeclBits ToCXX->setArgPassingRestrictions(FromCXX->getArgPassingRestrictions()); SmallVector Bases; for (const auto &Base1 : FromCXX->bases()) { ExpectedType TyOrErr = import(Base1.getType()); if (!TyOrErr) return TyOrErr.takeError(); SourceLocation EllipsisLoc; if (Base1.isPackExpansion()) { if (ExpectedSLoc LocOrErr = import(Base1.getEllipsisLoc())) EllipsisLoc = *LocOrErr; else return LocOrErr.takeError(); } // Ensure that we have a definition for the base. if (Error Err = ImportDefinitionIfNeeded(Base1.getType()->getAsCXXRecordDecl())) return Err; auto RangeOrErr = import(Base1.getSourceRange()); if (!RangeOrErr) return RangeOrErr.takeError(); auto TSIOrErr = import(Base1.getTypeSourceInfo()); if (!TSIOrErr) return TSIOrErr.takeError(); Bases.push_back( new (Importer.getToContext()) CXXBaseSpecifier( *RangeOrErr, Base1.isVirtual(), Base1.isBaseOfClass(), Base1.getAccessSpecifierAsWritten(), *TSIOrErr, EllipsisLoc)); } if (!Bases.empty()) ToCXX->setBases(Bases.data(), Bases.size()); } if (shouldForceImportDeclContext(Kind)) if (Error Err = ImportDeclContext(From, /*ForceImport=*/true)) return Err; return Error::success(); } Error ASTNodeImporter::ImportInitializer(VarDecl *From, VarDecl *To) { if (To->getAnyInitializer()) return Error::success(); Expr *FromInit = From->getInit(); if (!FromInit) return Error::success(); ExpectedExpr ToInitOrErr = import(FromInit); if (!ToInitOrErr) return ToInitOrErr.takeError(); To->setInit(*ToInitOrErr); if (From->isInitKnownICE()) { EvaluatedStmt *Eval = To->ensureEvaluatedStmt(); Eval->CheckedICE = true; Eval->IsICE = From->isInitICE(); } // FIXME: Other bits to merge? return Error::success(); } Error ASTNodeImporter::ImportDefinition( EnumDecl *From, EnumDecl *To, ImportDefinitionKind Kind) { if (To->getDefinition() || To->isBeingDefined()) { if (Kind == IDK_Everything) return ImportDeclContext(From, /*ForceImport=*/true); return Error::success(); } To->startDefinition(); if (Error Err = setTypedefNameForAnonDecl(From, To, Importer)) return Err; ExpectedType ToTypeOrErr = import(Importer.getFromContext().getTypeDeclType(From)); if (!ToTypeOrErr) return ToTypeOrErr.takeError(); ExpectedType ToPromotionTypeOrErr = import(From->getPromotionType()); if (!ToPromotionTypeOrErr) return ToPromotionTypeOrErr.takeError(); if (shouldForceImportDeclContext(Kind)) if (Error Err = ImportDeclContext(From, /*ForceImport=*/true)) return Err; // FIXME: we might need to merge the number of positive or negative bits // if the enumerator lists don't match. To->completeDefinition(*ToTypeOrErr, *ToPromotionTypeOrErr, From->getNumPositiveBits(), From->getNumNegativeBits()); return Error::success(); } Error ASTNodeImporter::ImportTemplateArguments( const TemplateArgument *FromArgs, unsigned NumFromArgs, SmallVectorImpl &ToArgs) { for (unsigned I = 0; I != NumFromArgs; ++I) { if (auto ToOrErr = import(FromArgs[I])) ToArgs.push_back(*ToOrErr); else return ToOrErr.takeError(); } return Error::success(); } // FIXME: Do not forget to remove this and use only 'import'. Expected ASTNodeImporter::ImportTemplateArgument(const TemplateArgument &From) { return import(From); } template Error ASTNodeImporter::ImportTemplateArgumentListInfo( const InContainerTy &Container, TemplateArgumentListInfo &ToTAInfo) { for (const auto &FromLoc : Container) { if (auto ToLocOrErr = import(FromLoc)) ToTAInfo.addArgument(*ToLocOrErr); else return ToLocOrErr.takeError(); } return Error::success(); } static StructuralEquivalenceKind getStructuralEquivalenceKind(const ASTImporter &Importer) { return Importer.isMinimalImport() ? StructuralEquivalenceKind::Minimal : StructuralEquivalenceKind::Default; } bool ASTNodeImporter::IsStructuralMatch(Decl *From, Decl *To, bool Complain) { StructuralEquivalenceContext Ctx( Importer.getFromContext(), Importer.getToContext(), Importer.getNonEquivalentDecls(), getStructuralEquivalenceKind(Importer), false, Complain); return Ctx.IsEquivalent(From, To); } bool ASTNodeImporter::IsStructuralMatch(RecordDecl *FromRecord, RecordDecl *ToRecord, bool Complain) { // Eliminate a potential failure point where we attempt to re-import // something we're trying to import while completing ToRecord. Decl *ToOrigin = Importer.GetOriginalDecl(ToRecord); if (ToOrigin) { auto *ToOriginRecord = dyn_cast(ToOrigin); if (ToOriginRecord) ToRecord = ToOriginRecord; } StructuralEquivalenceContext Ctx(Importer.getFromContext(), ToRecord->getASTContext(), Importer.getNonEquivalentDecls(), getStructuralEquivalenceKind(Importer), false, Complain); return Ctx.IsEquivalent(FromRecord, ToRecord); } bool ASTNodeImporter::IsStructuralMatch(VarDecl *FromVar, VarDecl *ToVar, bool Complain) { StructuralEquivalenceContext Ctx( Importer.getFromContext(), Importer.getToContext(), Importer.getNonEquivalentDecls(), getStructuralEquivalenceKind(Importer), false, Complain); return Ctx.IsEquivalent(FromVar, ToVar); } bool ASTNodeImporter::IsStructuralMatch(EnumDecl *FromEnum, EnumDecl *ToEnum) { // Eliminate a potential failure point where we attempt to re-import // something we're trying to import while completing ToEnum. if (Decl *ToOrigin = Importer.GetOriginalDecl(ToEnum)) if (auto *ToOriginEnum = dyn_cast(ToOrigin)) ToEnum = ToOriginEnum; StructuralEquivalenceContext Ctx( Importer.getFromContext(), Importer.getToContext(), Importer.getNonEquivalentDecls(), getStructuralEquivalenceKind(Importer)); return Ctx.IsEquivalent(FromEnum, ToEnum); } bool ASTNodeImporter::IsStructuralMatch(FunctionTemplateDecl *From, FunctionTemplateDecl *To) { StructuralEquivalenceContext Ctx( Importer.getFromContext(), Importer.getToContext(), Importer.getNonEquivalentDecls(), getStructuralEquivalenceKind(Importer), false, false); return Ctx.IsEquivalent(From, To); } bool ASTNodeImporter::IsStructuralMatch(FunctionDecl *From, FunctionDecl *To) { StructuralEquivalenceContext Ctx( Importer.getFromContext(), Importer.getToContext(), Importer.getNonEquivalentDecls(), getStructuralEquivalenceKind(Importer), false, false); return Ctx.IsEquivalent(From, To); } bool ASTNodeImporter::IsStructuralMatch(EnumConstantDecl *FromEC, EnumConstantDecl *ToEC) { const llvm::APSInt &FromVal = FromEC->getInitVal(); const llvm::APSInt &ToVal = ToEC->getInitVal(); return FromVal.isSigned() == ToVal.isSigned() && FromVal.getBitWidth() == ToVal.getBitWidth() && FromVal == ToVal; } bool ASTNodeImporter::IsStructuralMatch(ClassTemplateDecl *From, ClassTemplateDecl *To) { StructuralEquivalenceContext Ctx(Importer.getFromContext(), Importer.getToContext(), Importer.getNonEquivalentDecls(), getStructuralEquivalenceKind(Importer)); return Ctx.IsEquivalent(From, To); } bool ASTNodeImporter::IsStructuralMatch(VarTemplateDecl *From, VarTemplateDecl *To) { StructuralEquivalenceContext Ctx(Importer.getFromContext(), Importer.getToContext(), Importer.getNonEquivalentDecls(), getStructuralEquivalenceKind(Importer)); return Ctx.IsEquivalent(From, To); } ExpectedDecl ASTNodeImporter::VisitDecl(Decl *D) { Importer.FromDiag(D->getLocation(), diag::err_unsupported_ast_node) << D->getDeclKindName(); return make_error(ImportError::UnsupportedConstruct); } ExpectedDecl ASTNodeImporter::VisitImportDecl(ImportDecl *D) { Importer.FromDiag(D->getLocation(), diag::err_unsupported_ast_node) << D->getDeclKindName(); return make_error(ImportError::UnsupportedConstruct); } ExpectedDecl ASTNodeImporter::VisitEmptyDecl(EmptyDecl *D) { // Import the context of this declaration. DeclContext *DC, *LexicalDC; if (Error Err = ImportDeclContext(D, DC, LexicalDC)) return std::move(Err); // Import the location of this declaration. ExpectedSLoc LocOrErr = import(D->getLocation()); if (!LocOrErr) return LocOrErr.takeError(); EmptyDecl *ToD; if (GetImportedOrCreateDecl(ToD, D, Importer.getToContext(), DC, *LocOrErr)) return ToD; ToD->setLexicalDeclContext(LexicalDC); LexicalDC->addDeclInternal(ToD); return ToD; } ExpectedDecl ASTNodeImporter::VisitTranslationUnitDecl(TranslationUnitDecl *D) { TranslationUnitDecl *ToD = Importer.getToContext().getTranslationUnitDecl(); Importer.MapImported(D, ToD); return ToD; } ExpectedDecl ASTNodeImporter::VisitAccessSpecDecl(AccessSpecDecl *D) { ExpectedSLoc LocOrErr = import(D->getLocation()); if (!LocOrErr) return LocOrErr.takeError(); auto ColonLocOrErr = import(D->getColonLoc()); if (!ColonLocOrErr) return ColonLocOrErr.takeError(); // Import the context of this declaration. auto DCOrErr = Importer.ImportContext(D->getDeclContext()); if (!DCOrErr) return DCOrErr.takeError(); DeclContext *DC = *DCOrErr; AccessSpecDecl *ToD; if (GetImportedOrCreateDecl(ToD, D, Importer.getToContext(), D->getAccess(), DC, *LocOrErr, *ColonLocOrErr)) return ToD; // Lexical DeclContext and Semantic DeclContext // is always the same for the accessSpec. ToD->setLexicalDeclContext(DC); DC->addDeclInternal(ToD); return ToD; } ExpectedDecl ASTNodeImporter::VisitStaticAssertDecl(StaticAssertDecl *D) { auto DCOrErr = Importer.ImportContext(D->getDeclContext()); if (!DCOrErr) return DCOrErr.takeError(); DeclContext *DC = *DCOrErr; DeclContext *LexicalDC = DC; Error Err = Error::success(); auto ToLocation = importChecked(Err, D->getLocation()); auto ToRParenLoc = importChecked(Err, D->getRParenLoc()); auto ToAssertExpr = importChecked(Err, D->getAssertExpr()); auto ToMessage = importChecked(Err, D->getMessage()); if (Err) return std::move(Err); StaticAssertDecl *ToD; if (GetImportedOrCreateDecl( ToD, D, Importer.getToContext(), DC, ToLocation, ToAssertExpr, ToMessage, ToRParenLoc, D->isFailed())) return ToD; ToD->setLexicalDeclContext(LexicalDC); LexicalDC->addDeclInternal(ToD); return ToD; } ExpectedDecl ASTNodeImporter::VisitNamespaceDecl(NamespaceDecl *D) { // Import the major distinguishing characteristics of this namespace. DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; NamedDecl *ToD; if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return std::move(Err); if (ToD) return ToD; NamespaceDecl *MergeWithNamespace = nullptr; if (!Name) { // This is an anonymous namespace. Adopt an existing anonymous // namespace if we can. // FIXME: Not testable. if (auto *TU = dyn_cast(DC)) MergeWithNamespace = TU->getAnonymousNamespace(); else MergeWithNamespace = cast(DC)->getAnonymousNamespace(); } else { SmallVector ConflictingDecls; auto FoundDecls = Importer.findDeclsInToCtx(DC, Name); for (auto *FoundDecl : FoundDecls) { if (!FoundDecl->isInIdentifierNamespace(Decl::IDNS_Namespace)) continue; if (auto *FoundNS = dyn_cast(FoundDecl)) { MergeWithNamespace = FoundNS; ConflictingDecls.clear(); break; } ConflictingDecls.push_back(FoundDecl); } if (!ConflictingDecls.empty()) { ExpectedName NameOrErr = Importer.HandleNameConflict( Name, DC, Decl::IDNS_Namespace, ConflictingDecls.data(), ConflictingDecls.size()); if (NameOrErr) Name = NameOrErr.get(); else return NameOrErr.takeError(); } } ExpectedSLoc BeginLocOrErr = import(D->getBeginLoc()); if (!BeginLocOrErr) return BeginLocOrErr.takeError(); ExpectedSLoc RBraceLocOrErr = import(D->getRBraceLoc()); if (!RBraceLocOrErr) return RBraceLocOrErr.takeError(); // Create the "to" namespace, if needed. NamespaceDecl *ToNamespace = MergeWithNamespace; if (!ToNamespace) { if (GetImportedOrCreateDecl( ToNamespace, D, Importer.getToContext(), DC, D->isInline(), *BeginLocOrErr, Loc, Name.getAsIdentifierInfo(), /*PrevDecl=*/nullptr)) return ToNamespace; ToNamespace->setRBraceLoc(*RBraceLocOrErr); ToNamespace->setLexicalDeclContext(LexicalDC); LexicalDC->addDeclInternal(ToNamespace); // If this is an anonymous namespace, register it as the anonymous // namespace within its context. if (!Name) { if (auto *TU = dyn_cast(DC)) TU->setAnonymousNamespace(ToNamespace); else cast(DC)->setAnonymousNamespace(ToNamespace); } } Importer.MapImported(D, ToNamespace); if (Error Err = ImportDeclContext(D)) return std::move(Err); return ToNamespace; } ExpectedDecl ASTNodeImporter::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) { // Import the major distinguishing characteristics of this namespace. DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; NamedDecl *LookupD; if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, LookupD, Loc)) return std::move(Err); if (LookupD) return LookupD; // NOTE: No conflict resolution is done for namespace aliases now. Error Err = Error::success(); auto ToNamespaceLoc = importChecked(Err, D->getNamespaceLoc()); auto ToAliasLoc = importChecked(Err, D->getAliasLoc()); auto ToQualifierLoc = importChecked(Err, D->getQualifierLoc()); auto ToTargetNameLoc = importChecked(Err, D->getTargetNameLoc()); auto ToNamespace = importChecked(Err, D->getNamespace()); if (Err) return std::move(Err); IdentifierInfo *ToIdentifier = Importer.Import(D->getIdentifier()); NamespaceAliasDecl *ToD; if (GetImportedOrCreateDecl( ToD, D, Importer.getToContext(), DC, ToNamespaceLoc, ToAliasLoc, ToIdentifier, ToQualifierLoc, ToTargetNameLoc, ToNamespace)) return ToD; ToD->setLexicalDeclContext(LexicalDC); LexicalDC->addDeclInternal(ToD); return ToD; } ExpectedDecl ASTNodeImporter::VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias) { // Import the major distinguishing characteristics of this typedef. DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; NamedDecl *ToD; if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return std::move(Err); if (ToD) return ToD; // If this typedef is not in block scope, determine whether we've // seen a typedef with the same name (that we can merge with) or any // other entity by that name (which name lookup could conflict with). // Note: Repeated typedefs are not valid in C99: // 'typedef int T; typedef int T;' is invalid // We do not care about this now. if (!DC->isFunctionOrMethod()) { SmallVector ConflictingDecls; unsigned IDNS = Decl::IDNS_Ordinary; auto FoundDecls = Importer.findDeclsInToCtx(DC, Name); for (auto *FoundDecl : FoundDecls) { if (!FoundDecl->isInIdentifierNamespace(IDNS)) continue; if (auto *FoundTypedef = dyn_cast(FoundDecl)) { if (!hasSameVisibilityContextAndLinkage(FoundTypedef, D)) continue; QualType FromUT = D->getUnderlyingType(); QualType FoundUT = FoundTypedef->getUnderlyingType(); if (Importer.IsStructurallyEquivalent(FromUT, FoundUT)) { // If the "From" context has a complete underlying type but we // already have a complete underlying type then return with that. if (!FromUT->isIncompleteType() && !FoundUT->isIncompleteType()) return Importer.MapImported(D, FoundTypedef); // FIXME Handle redecl chain. When you do that make consistent changes // in ASTImporterLookupTable too. } else { ConflictingDecls.push_back(FoundDecl); } } } if (!ConflictingDecls.empty()) { ExpectedName NameOrErr = Importer.HandleNameConflict( Name, DC, IDNS, ConflictingDecls.data(), ConflictingDecls.size()); if (NameOrErr) Name = NameOrErr.get(); else return NameOrErr.takeError(); } } Error Err = Error::success(); auto ToUnderlyingType = importChecked(Err, D->getUnderlyingType()); auto ToTypeSourceInfo = importChecked(Err, D->getTypeSourceInfo()); auto ToBeginLoc = importChecked(Err, D->getBeginLoc()); if (Err) return std::move(Err); // Create the new typedef node. // FIXME: ToUnderlyingType is not used. (void)ToUnderlyingType; TypedefNameDecl *ToTypedef; if (IsAlias) { if (GetImportedOrCreateDecl( ToTypedef, D, Importer.getToContext(), DC, ToBeginLoc, Loc, Name.getAsIdentifierInfo(), ToTypeSourceInfo)) return ToTypedef; } else if (GetImportedOrCreateDecl( ToTypedef, D, Importer.getToContext(), DC, ToBeginLoc, Loc, Name.getAsIdentifierInfo(), ToTypeSourceInfo)) return ToTypedef; ToTypedef->setAccess(D->getAccess()); ToTypedef->setLexicalDeclContext(LexicalDC); // Templated declarations should not appear in DeclContext. TypeAliasDecl *FromAlias = IsAlias ? cast(D) : nullptr; if (!FromAlias || !FromAlias->getDescribedAliasTemplate()) LexicalDC->addDeclInternal(ToTypedef); return ToTypedef; } ExpectedDecl ASTNodeImporter::VisitTypedefDecl(TypedefDecl *D) { return VisitTypedefNameDecl(D, /*IsAlias=*/false); } ExpectedDecl ASTNodeImporter::VisitTypeAliasDecl(TypeAliasDecl *D) { return VisitTypedefNameDecl(D, /*IsAlias=*/true); } ExpectedDecl ASTNodeImporter::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) { // Import the major distinguishing characteristics of this typedef. DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; NamedDecl *FoundD; if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, FoundD, Loc)) return std::move(Err); if (FoundD) return FoundD; // If this typedef is not in block scope, determine whether we've // seen a typedef with the same name (that we can merge with) or any // other entity by that name (which name lookup could conflict with). if (!DC->isFunctionOrMethod()) { SmallVector ConflictingDecls; unsigned IDNS = Decl::IDNS_Ordinary; auto FoundDecls = Importer.findDeclsInToCtx(DC, Name); for (auto *FoundDecl : FoundDecls) { if (!FoundDecl->isInIdentifierNamespace(IDNS)) continue; if (auto *FoundAlias = dyn_cast(FoundDecl)) return Importer.MapImported(D, FoundAlias); ConflictingDecls.push_back(FoundDecl); } if (!ConflictingDecls.empty()) { ExpectedName NameOrErr = Importer.HandleNameConflict( Name, DC, IDNS, ConflictingDecls.data(), ConflictingDecls.size()); if (NameOrErr) Name = NameOrErr.get(); else return NameOrErr.takeError(); } } Error Err = Error::success(); auto ToTemplateParameters = importChecked(Err, D->getTemplateParameters()); auto ToTemplatedDecl = importChecked(Err, D->getTemplatedDecl()); if (Err) return std::move(Err); TypeAliasTemplateDecl *ToAlias; if (GetImportedOrCreateDecl(ToAlias, D, Importer.getToContext(), DC, Loc, Name, ToTemplateParameters, ToTemplatedDecl)) return ToAlias; ToTemplatedDecl->setDescribedAliasTemplate(ToAlias); ToAlias->setAccess(D->getAccess()); ToAlias->setLexicalDeclContext(LexicalDC); LexicalDC->addDeclInternal(ToAlias); return ToAlias; } ExpectedDecl ASTNodeImporter::VisitLabelDecl(LabelDecl *D) { // Import the major distinguishing characteristics of this label. DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; NamedDecl *ToD; if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return std::move(Err); if (ToD) return ToD; assert(LexicalDC->isFunctionOrMethod()); LabelDecl *ToLabel; if (D->isGnuLocal()) { ExpectedSLoc BeginLocOrErr = import(D->getBeginLoc()); if (!BeginLocOrErr) return BeginLocOrErr.takeError(); if (GetImportedOrCreateDecl(ToLabel, D, Importer.getToContext(), DC, Loc, Name.getAsIdentifierInfo(), *BeginLocOrErr)) return ToLabel; } else { if (GetImportedOrCreateDecl(ToLabel, D, Importer.getToContext(), DC, Loc, Name.getAsIdentifierInfo())) return ToLabel; } Expected ToStmtOrErr = import(D->getStmt()); if (!ToStmtOrErr) return ToStmtOrErr.takeError(); ToLabel->setStmt(*ToStmtOrErr); ToLabel->setLexicalDeclContext(LexicalDC); LexicalDC->addDeclInternal(ToLabel); return ToLabel; } ExpectedDecl ASTNodeImporter::VisitEnumDecl(EnumDecl *D) { // Import the major distinguishing characteristics of this enum. DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; NamedDecl *ToD; if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return std::move(Err); if (ToD) return ToD; // Figure out what enum name we're looking for. unsigned IDNS = Decl::IDNS_Tag; DeclarationName SearchName = Name; if (!SearchName && D->getTypedefNameForAnonDecl()) { if (Error Err = importInto( SearchName, D->getTypedefNameForAnonDecl()->getDeclName())) return std::move(Err); IDNS = Decl::IDNS_Ordinary; } else if (Importer.getToContext().getLangOpts().CPlusPlus) IDNS |= Decl::IDNS_Ordinary; // We may already have an enum of the same name; try to find and match it. EnumDecl *PrevDecl = nullptr; if (!DC->isFunctionOrMethod() && SearchName) { SmallVector ConflictingDecls; auto FoundDecls = Importer.findDeclsInToCtx(DC, SearchName); for (auto *FoundDecl : FoundDecls) { if (!FoundDecl->isInIdentifierNamespace(IDNS)) continue; if (auto *Typedef = dyn_cast(FoundDecl)) { if (const auto *Tag = Typedef->getUnderlyingType()->getAs()) FoundDecl = Tag->getDecl(); } if (auto *FoundEnum = dyn_cast(FoundDecl)) { if (!hasSameVisibilityContextAndLinkage(FoundEnum, D)) continue; if (IsStructuralMatch(D, FoundEnum)) { EnumDecl *FoundDef = FoundEnum->getDefinition(); if (D->isThisDeclarationADefinition() && FoundDef) return Importer.MapImported(D, FoundDef); PrevDecl = FoundEnum->getMostRecentDecl(); break; } ConflictingDecls.push_back(FoundDecl); } } if (!ConflictingDecls.empty()) { ExpectedName NameOrErr = Importer.HandleNameConflict( SearchName, DC, IDNS, ConflictingDecls.data(), ConflictingDecls.size()); if (NameOrErr) Name = NameOrErr.get(); else return NameOrErr.takeError(); } } Error Err = Error::success(); auto ToBeginLoc = importChecked(Err, D->getBeginLoc()); auto ToQualifierLoc = importChecked(Err, D->getQualifierLoc()); auto ToIntegerType = importChecked(Err, D->getIntegerType()); auto ToBraceRange = importChecked(Err, D->getBraceRange()); if (Err) return std::move(Err); // Create the enum declaration. EnumDecl *D2; if (GetImportedOrCreateDecl( D2, D, Importer.getToContext(), DC, ToBeginLoc, Loc, Name.getAsIdentifierInfo(), PrevDecl, D->isScoped(), D->isScopedUsingClassTag(), D->isFixed())) return D2; D2->setQualifierInfo(ToQualifierLoc); D2->setIntegerType(ToIntegerType); D2->setBraceRange(ToBraceRange); D2->setAccess(D->getAccess()); D2->setLexicalDeclContext(LexicalDC); LexicalDC->addDeclInternal(D2); // Import the definition if (D->isCompleteDefinition()) if (Error Err = ImportDefinition(D, D2)) return std::move(Err); return D2; } ExpectedDecl ASTNodeImporter::VisitRecordDecl(RecordDecl *D) { bool IsFriendTemplate = false; if (auto *DCXX = dyn_cast(D)) { IsFriendTemplate = DCXX->getDescribedClassTemplate() && DCXX->getDescribedClassTemplate()->getFriendObjectKind() != Decl::FOK_None; } // Import the major distinguishing characteristics of this record. DeclContext *DC = nullptr, *LexicalDC = nullptr; DeclarationName Name; SourceLocation Loc; NamedDecl *ToD = nullptr; if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return std::move(Err); if (ToD) return ToD; // Figure out what structure name we're looking for. unsigned IDNS = Decl::IDNS_Tag; DeclarationName SearchName = Name; if (!SearchName && D->getTypedefNameForAnonDecl()) { if (Error Err = importInto( SearchName, D->getTypedefNameForAnonDecl()->getDeclName())) return std::move(Err); IDNS = Decl::IDNS_Ordinary; } else if (Importer.getToContext().getLangOpts().CPlusPlus) IDNS |= Decl::IDNS_Ordinary | Decl::IDNS_TagFriend; // We may already have a record of the same name; try to find and match it. RecordDecl *PrevDecl = nullptr; if (!DC->isFunctionOrMethod() && !D->isLambda()) { SmallVector ConflictingDecls; auto FoundDecls = Importer.findDeclsInToCtx(DC, SearchName); if (!FoundDecls.empty()) { // We're going to have to compare D against potentially conflicting Decls, // so complete it. if (D->hasExternalLexicalStorage() && !D->isCompleteDefinition()) D->getASTContext().getExternalSource()->CompleteType(D); } for (auto *FoundDecl : FoundDecls) { if (!FoundDecl->isInIdentifierNamespace(IDNS)) continue; Decl *Found = FoundDecl; if (auto *Typedef = dyn_cast(Found)) { if (const auto *Tag = Typedef->getUnderlyingType()->getAs()) Found = Tag->getDecl(); } if (auto *FoundRecord = dyn_cast(Found)) { // Do not emit false positive diagnostic in case of unnamed // struct/union and in case of anonymous structs. Would be false // because there may be several anonymous/unnamed structs in a class. // E.g. these are both valid: // struct A { // unnamed structs // struct { struct A *next; } entry0; // struct { struct A *next; } entry1; // }; // struct X { struct { int a; }; struct { int b; }; }; // anon structs if (!SearchName) if (!IsStructuralMatch(D, FoundRecord, false)) continue; if (!hasSameVisibilityContextAndLinkage(FoundRecord, D)) continue; if (IsStructuralMatch(D, FoundRecord)) { RecordDecl *FoundDef = FoundRecord->getDefinition(); if (D->isThisDeclarationADefinition() && FoundDef) { // FIXME: Structural equivalence check should check for same // user-defined methods. Importer.MapImported(D, FoundDef); if (const auto *DCXX = dyn_cast(D)) { auto *FoundCXX = dyn_cast(FoundDef); assert(FoundCXX && "Record type mismatch"); if (!Importer.isMinimalImport()) // FoundDef may not have every implicit method that D has // because implicit methods are created only if they are used. if (Error Err = ImportImplicitMethods(DCXX, FoundCXX)) return std::move(Err); } } PrevDecl = FoundRecord->getMostRecentDecl(); break; } ConflictingDecls.push_back(FoundDecl); } // kind is RecordDecl } // for if (!ConflictingDecls.empty() && SearchName) { ExpectedName NameOrErr = Importer.HandleNameConflict( SearchName, DC, IDNS, ConflictingDecls.data(), ConflictingDecls.size()); if (NameOrErr) Name = NameOrErr.get(); else return NameOrErr.takeError(); } } ExpectedSLoc BeginLocOrErr = import(D->getBeginLoc()); if (!BeginLocOrErr) return BeginLocOrErr.takeError(); // Create the record declaration. RecordDecl *D2 = nullptr; CXXRecordDecl *D2CXX = nullptr; if (auto *DCXX = dyn_cast(D)) { if (DCXX->isLambda()) { auto TInfoOrErr = import(DCXX->getLambdaTypeInfo()); if (!TInfoOrErr) return TInfoOrErr.takeError(); if (GetImportedOrCreateSpecialDecl( D2CXX, CXXRecordDecl::CreateLambda, D, Importer.getToContext(), DC, *TInfoOrErr, Loc, DCXX->isDependentLambda(), DCXX->isGenericLambda(), DCXX->getLambdaCaptureDefault())) return D2CXX; ExpectedDecl CDeclOrErr = import(DCXX->getLambdaContextDecl()); if (!CDeclOrErr) return CDeclOrErr.takeError(); D2CXX->setLambdaMangling(DCXX->getLambdaManglingNumber(), *CDeclOrErr, DCXX->hasKnownLambdaInternalLinkage()); } else if (DCXX->isInjectedClassName()) { // We have to be careful to do a similar dance to the one in // Sema::ActOnStartCXXMemberDeclarations const bool DelayTypeCreation = true; if (GetImportedOrCreateDecl( D2CXX, D, Importer.getToContext(), D->getTagKind(), DC, *BeginLocOrErr, Loc, Name.getAsIdentifierInfo(), cast_or_null(PrevDecl), DelayTypeCreation)) return D2CXX; Importer.getToContext().getTypeDeclType( D2CXX, dyn_cast(DC)); } else { if (GetImportedOrCreateDecl(D2CXX, D, Importer.getToContext(), D->getTagKind(), DC, *BeginLocOrErr, Loc, Name.getAsIdentifierInfo(), cast_or_null(PrevDecl))) return D2CXX; } D2 = D2CXX; D2->setAccess(D->getAccess()); D2->setLexicalDeclContext(LexicalDC); addDeclToContexts(D, D2); if (ClassTemplateDecl *FromDescribed = DCXX->getDescribedClassTemplate()) { ClassTemplateDecl *ToDescribed; if (Error Err = importInto(ToDescribed, FromDescribed)) return std::move(Err); D2CXX->setDescribedClassTemplate(ToDescribed); if (!DCXX->isInjectedClassName() && !IsFriendTemplate) { // In a record describing a template the type should be an // InjectedClassNameType (see Sema::CheckClassTemplate). Update the // previously set type to the correct value here (ToDescribed is not // available at record create). // FIXME: The previous type is cleared but not removed from // ASTContext's internal storage. CXXRecordDecl *Injected = nullptr; for (NamedDecl *Found : D2CXX->noload_lookup(Name)) { auto *Record = dyn_cast(Found); if (Record && Record->isInjectedClassName()) { Injected = Record; break; } } // Create an injected type for the whole redecl chain. SmallVector Redecls = getCanonicalForwardRedeclChain(D2CXX); for (auto *R : Redecls) { auto *RI = cast(R); RI->setTypeForDecl(nullptr); // Below we create a new injected type and assign that to the // canonical decl, subsequent declarations in the chain will reuse // that type. Importer.getToContext().getInjectedClassNameType( RI, ToDescribed->getInjectedClassNameSpecialization()); } // Set the new type for the previous injected decl too. if (Injected) { Injected->setTypeForDecl(nullptr); Importer.getToContext().getTypeDeclType(Injected, D2CXX); } } } else if (MemberSpecializationInfo *MemberInfo = DCXX->getMemberSpecializationInfo()) { TemplateSpecializationKind SK = MemberInfo->getTemplateSpecializationKind(); CXXRecordDecl *FromInst = DCXX->getInstantiatedFromMemberClass(); if (Expected ToInstOrErr = import(FromInst)) D2CXX->setInstantiationOfMemberClass(*ToInstOrErr, SK); else return ToInstOrErr.takeError(); if (ExpectedSLoc POIOrErr = import(MemberInfo->getPointOfInstantiation())) D2CXX->getMemberSpecializationInfo()->setPointOfInstantiation( *POIOrErr); else return POIOrErr.takeError(); } } else { if (GetImportedOrCreateDecl(D2, D, Importer.getToContext(), D->getTagKind(), DC, *BeginLocOrErr, Loc, Name.getAsIdentifierInfo(), PrevDecl)) return D2; D2->setLexicalDeclContext(LexicalDC); addDeclToContexts(D, D2); } if (auto BraceRangeOrErr = import(D->getBraceRange())) D2->setBraceRange(*BraceRangeOrErr); else return BraceRangeOrErr.takeError(); if (auto QualifierLocOrErr = import(D->getQualifierLoc())) D2->setQualifierInfo(*QualifierLocOrErr); else return QualifierLocOrErr.takeError(); if (D->isAnonymousStructOrUnion()) D2->setAnonymousStructOrUnion(true); if (D->isCompleteDefinition()) if (Error Err = ImportDefinition(D, D2, IDK_Default)) return std::move(Err); return D2; } ExpectedDecl ASTNodeImporter::VisitEnumConstantDecl(EnumConstantDecl *D) { // Import the major distinguishing characteristics of this enumerator. DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; NamedDecl *ToD; if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return std::move(Err); if (ToD) return ToD; // Determine whether there are any other declarations with the same name and // in the same context. if (!LexicalDC->isFunctionOrMethod()) { SmallVector ConflictingDecls; unsigned IDNS = Decl::IDNS_Ordinary; auto FoundDecls = Importer.findDeclsInToCtx(DC, Name); for (auto *FoundDecl : FoundDecls) { if (!FoundDecl->isInIdentifierNamespace(IDNS)) continue; if (auto *FoundEnumConstant = dyn_cast(FoundDecl)) { if (IsStructuralMatch(D, FoundEnumConstant)) return Importer.MapImported(D, FoundEnumConstant); ConflictingDecls.push_back(FoundDecl); } } if (!ConflictingDecls.empty()) { ExpectedName NameOrErr = Importer.HandleNameConflict( Name, DC, IDNS, ConflictingDecls.data(), ConflictingDecls.size()); if (NameOrErr) Name = NameOrErr.get(); else return NameOrErr.takeError(); } } ExpectedType TypeOrErr = import(D->getType()); if (!TypeOrErr) return TypeOrErr.takeError(); ExpectedExpr InitOrErr = import(D->getInitExpr()); if (!InitOrErr) return InitOrErr.takeError(); EnumConstantDecl *ToEnumerator; if (GetImportedOrCreateDecl( ToEnumerator, D, Importer.getToContext(), cast(DC), Loc, Name.getAsIdentifierInfo(), *TypeOrErr, *InitOrErr, D->getInitVal())) return ToEnumerator; ToEnumerator->setAccess(D->getAccess()); ToEnumerator->setLexicalDeclContext(LexicalDC); LexicalDC->addDeclInternal(ToEnumerator); return ToEnumerator; } Error ASTNodeImporter::ImportTemplateParameterLists(const DeclaratorDecl *FromD, DeclaratorDecl *ToD) { unsigned int Num = FromD->getNumTemplateParameterLists(); if (Num == 0) return Error::success(); SmallVector ToTPLists(Num); for (unsigned int I = 0; I < Num; ++I) if (Expected ToTPListOrErr = import(FromD->getTemplateParameterList(I))) ToTPLists[I] = *ToTPListOrErr; else return ToTPListOrErr.takeError(); ToD->setTemplateParameterListsInfo(Importer.ToContext, ToTPLists); return Error::success(); } Error ASTNodeImporter::ImportTemplateInformation( FunctionDecl *FromFD, FunctionDecl *ToFD) { switch (FromFD->getTemplatedKind()) { case FunctionDecl::TK_NonTemplate: case FunctionDecl::TK_FunctionTemplate: return Error::success(); case FunctionDecl::TK_MemberSpecialization: { TemplateSpecializationKind TSK = FromFD->getTemplateSpecializationKind(); if (Expected InstFDOrErr = import(FromFD->getInstantiatedFromMemberFunction())) ToFD->setInstantiationOfMemberFunction(*InstFDOrErr, TSK); else return InstFDOrErr.takeError(); if (ExpectedSLoc POIOrErr = import( FromFD->getMemberSpecializationInfo()->getPointOfInstantiation())) ToFD->getMemberSpecializationInfo()->setPointOfInstantiation(*POIOrErr); else return POIOrErr.takeError(); return Error::success(); } case FunctionDecl::TK_FunctionTemplateSpecialization: { auto FunctionAndArgsOrErr = ImportFunctionTemplateWithTemplateArgsFromSpecialization(FromFD); if (!FunctionAndArgsOrErr) return FunctionAndArgsOrErr.takeError(); TemplateArgumentList *ToTAList = TemplateArgumentList::CreateCopy( Importer.getToContext(), std::get<1>(*FunctionAndArgsOrErr)); auto *FTSInfo = FromFD->getTemplateSpecializationInfo(); TemplateArgumentListInfo ToTAInfo; const auto *FromTAArgsAsWritten = FTSInfo->TemplateArgumentsAsWritten; if (FromTAArgsAsWritten) if (Error Err = ImportTemplateArgumentListInfo( *FromTAArgsAsWritten, ToTAInfo)) return Err; ExpectedSLoc POIOrErr = import(FTSInfo->getPointOfInstantiation()); if (!POIOrErr) return POIOrErr.takeError(); if (Error Err = ImportTemplateParameterLists(FromFD, ToFD)) return Err; TemplateSpecializationKind TSK = FTSInfo->getTemplateSpecializationKind(); ToFD->setFunctionTemplateSpecialization( std::get<0>(*FunctionAndArgsOrErr), ToTAList, /* InsertPos= */ nullptr, TSK, FromTAArgsAsWritten ? &ToTAInfo : nullptr, *POIOrErr); return Error::success(); } case FunctionDecl::TK_DependentFunctionTemplateSpecialization: { auto *FromInfo = FromFD->getDependentSpecializationInfo(); UnresolvedSet<8> TemplDecls; unsigned NumTemplates = FromInfo->getNumTemplates(); for (unsigned I = 0; I < NumTemplates; I++) { if (Expected ToFTDOrErr = import(FromInfo->getTemplate(I))) TemplDecls.addDecl(*ToFTDOrErr); else return ToFTDOrErr.takeError(); } // Import TemplateArgumentListInfo. TemplateArgumentListInfo ToTAInfo; if (Error Err = ImportTemplateArgumentListInfo( FromInfo->getLAngleLoc(), FromInfo->getRAngleLoc(), llvm::makeArrayRef( FromInfo->getTemplateArgs(), FromInfo->getNumTemplateArgs()), ToTAInfo)) return Err; ToFD->setDependentTemplateSpecialization(Importer.getToContext(), TemplDecls, ToTAInfo); return Error::success(); } } llvm_unreachable("All cases should be covered!"); } Expected ASTNodeImporter::FindFunctionTemplateSpecialization(FunctionDecl *FromFD) { auto FunctionAndArgsOrErr = ImportFunctionTemplateWithTemplateArgsFromSpecialization(FromFD); if (!FunctionAndArgsOrErr) return FunctionAndArgsOrErr.takeError(); FunctionTemplateDecl *Template; TemplateArgsTy ToTemplArgs; std::tie(Template, ToTemplArgs) = *FunctionAndArgsOrErr; void *InsertPos = nullptr; auto *FoundSpec = Template->findSpecialization(ToTemplArgs, InsertPos); return FoundSpec; } Error ASTNodeImporter::ImportFunctionDeclBody(FunctionDecl *FromFD, FunctionDecl *ToFD) { if (Stmt *FromBody = FromFD->getBody()) { if (ExpectedStmt ToBodyOrErr = import(FromBody)) ToFD->setBody(*ToBodyOrErr); else return ToBodyOrErr.takeError(); } return Error::success(); } // Returns true if the given D has a DeclContext up to the TranslationUnitDecl // which is equal to the given DC. static bool isAncestorDeclContextOf(const DeclContext *DC, const Decl *D) { const DeclContext *DCi = D->getDeclContext(); while (DCi != D->getTranslationUnitDecl()) { if (DCi == DC) return true; DCi = DCi->getParent(); } return false; } bool ASTNodeImporter::hasAutoReturnTypeDeclaredInside(FunctionDecl *D) { QualType FromTy = D->getType(); const FunctionProtoType *FromFPT = FromTy->getAs(); assert(FromFPT && "Must be called on FunctionProtoType"); if (AutoType *AutoT = FromFPT->getReturnType()->getContainedAutoType()) { QualType DeducedT = AutoT->getDeducedType(); if (const RecordType *RecordT = DeducedT.isNull() ? nullptr : dyn_cast(DeducedT)) { RecordDecl *RD = RecordT->getDecl(); assert(RD); if (isAncestorDeclContextOf(D, RD)) { assert(RD->getLexicalDeclContext() == RD->getDeclContext()); return true; } } } if (const TypedefType *TypedefT = dyn_cast(FromFPT->getReturnType())) { TypedefNameDecl *TD = TypedefT->getDecl(); assert(TD); if (isAncestorDeclContextOf(D, TD)) { assert(TD->getLexicalDeclContext() == TD->getDeclContext()); return true; } } return false; } ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) { SmallVector Redecls = getCanonicalForwardRedeclChain(D); auto RedeclIt = Redecls.begin(); // Import the first part of the decl chain. I.e. import all previous // declarations starting from the canonical decl. for (; RedeclIt != Redecls.end() && *RedeclIt != D; ++RedeclIt) { ExpectedDecl ToRedeclOrErr = import(*RedeclIt); if (!ToRedeclOrErr) return ToRedeclOrErr.takeError(); } assert(*RedeclIt == D); // Import the major distinguishing characteristics of this function. DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; NamedDecl *ToD; if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return std::move(Err); if (ToD) return ToD; FunctionDecl *FoundByLookup = nullptr; FunctionTemplateDecl *FromFT = D->getDescribedFunctionTemplate(); // If this is a function template specialization, then try to find the same // existing specialization in the "to" context. The lookup below will not // find any specialization, but would find the primary template; thus, we // have to skip normal lookup in case of specializations. // FIXME handle member function templates (TK_MemberSpecialization) similarly? if (D->getTemplatedKind() == FunctionDecl::TK_FunctionTemplateSpecialization) { auto FoundFunctionOrErr = FindFunctionTemplateSpecialization(D); if (!FoundFunctionOrErr) return FoundFunctionOrErr.takeError(); if (FunctionDecl *FoundFunction = *FoundFunctionOrErr) { if (Decl *Def = FindAndMapDefinition(D, FoundFunction)) return Def; FoundByLookup = FoundFunction; } } // Try to find a function in our own ("to") context with the same name, same // type, and in the same context as the function we're importing. else if (!LexicalDC->isFunctionOrMethod()) { SmallVector ConflictingDecls; unsigned IDNS = Decl::IDNS_Ordinary | Decl::IDNS_OrdinaryFriend; auto FoundDecls = Importer.findDeclsInToCtx(DC, Name); for (auto *FoundDecl : FoundDecls) { if (!FoundDecl->isInIdentifierNamespace(IDNS)) continue; if (auto *FoundFunction = dyn_cast(FoundDecl)) { if (!hasSameVisibilityContextAndLinkage(FoundFunction, D)) continue; if (IsStructuralMatch(D, FoundFunction)) { if (Decl *Def = FindAndMapDefinition(D, FoundFunction)) return Def; FoundByLookup = FoundFunction; break; } // FIXME: Check for overloading more carefully, e.g., by boosting // Sema::IsOverload out to the AST library. // Function overloading is okay in C++. if (Importer.getToContext().getLangOpts().CPlusPlus) continue; // Complain about inconsistent function types. Importer.ToDiag(Loc, diag::warn_odr_function_type_inconsistent) << Name << D->getType() << FoundFunction->getType(); Importer.ToDiag(FoundFunction->getLocation(), diag::note_odr_value_here) << FoundFunction->getType(); ConflictingDecls.push_back(FoundDecl); } } if (!ConflictingDecls.empty()) { ExpectedName NameOrErr = Importer.HandleNameConflict( Name, DC, IDNS, ConflictingDecls.data(), ConflictingDecls.size()); if (NameOrErr) Name = NameOrErr.get(); else return NameOrErr.takeError(); } } // We do not allow more than one in-class declaration of a function. This is // because AST clients like VTableBuilder asserts on this. VTableBuilder // assumes there is only one in-class declaration. Building a redecl // chain would result in more than one in-class declaration for // overrides (even if they are part of the same redecl chain inside the // derived class.) if (FoundByLookup) { if (isa(FoundByLookup)) { if (D->getLexicalDeclContext() == D->getDeclContext()) { if (!D->doesThisDeclarationHaveABody()) { if (FunctionTemplateDecl *DescribedD = D->getDescribedFunctionTemplate()) { // Handle a "templated" function together with its described // template. This avoids need for a similar check at import of the // described template. assert(FoundByLookup->getDescribedFunctionTemplate() && "Templated function mapped to non-templated?"); Importer.MapImported(DescribedD, FoundByLookup->getDescribedFunctionTemplate()); } return Importer.MapImported(D, FoundByLookup); } else { // Let's continue and build up the redecl chain in this case. // FIXME Merge the functions into one decl. } } } } DeclarationNameInfo NameInfo(Name, Loc); // Import additional name location/type info. if (Error Err = ImportDeclarationNameLoc(D->getNameInfo(), NameInfo)) return std::move(Err); QualType FromTy = D->getType(); // Set to true if we do not import the type of the function as is. There are // cases when the original type would result in an infinite recursion during // the import. To avoid an infinite recursion when importing, we create the // FunctionDecl with a simplified function type and update it only after the // relevant AST nodes are already imported. bool UsedDifferentProtoType = false; if (const auto *FromFPT = FromTy->getAs()) { QualType FromReturnTy = FromFPT->getReturnType(); // Functions with auto return type may define a struct inside their body // and the return type could refer to that struct. // E.g.: auto foo() { struct X{}; return X(); } // To avoid an infinite recursion when importing, create the FunctionDecl // with a simplified return type. if (hasAutoReturnTypeDeclaredInside(D)) { FromReturnTy = Importer.getFromContext().VoidTy; UsedDifferentProtoType = true; } FunctionProtoType::ExtProtoInfo FromEPI = FromFPT->getExtProtoInfo(); // FunctionProtoType::ExtProtoInfo's ExceptionSpecDecl can point to the // FunctionDecl that we are importing the FunctionProtoType for. // To avoid an infinite recursion when importing, create the FunctionDecl // with a simplified function type. if (FromEPI.ExceptionSpec.SourceDecl || FromEPI.ExceptionSpec.SourceTemplate || FromEPI.ExceptionSpec.NoexceptExpr) { FunctionProtoType::ExtProtoInfo DefaultEPI; FromEPI = DefaultEPI; UsedDifferentProtoType = true; } FromTy = Importer.getFromContext().getFunctionType( FromReturnTy, FromFPT->getParamTypes(), FromEPI); } Error Err = Error::success(); auto T = importChecked(Err, FromTy); auto TInfo = importChecked(Err, D->getTypeSourceInfo()); auto ToInnerLocStart = importChecked(Err, D->getInnerLocStart()); auto ToEndLoc = importChecked(Err, D->getEndLoc()); auto ToQualifierLoc = importChecked(Err, D->getQualifierLoc()); auto TrailingRequiresClause = importChecked(Err, D->getTrailingRequiresClause()); if (Err) return std::move(Err); // Import the function parameters. SmallVector Parameters; for (auto P : D->parameters()) { if (Expected ToPOrErr = import(P)) Parameters.push_back(*ToPOrErr); else return ToPOrErr.takeError(); } // Create the imported function. FunctionDecl *ToFunction = nullptr; if (auto *FromConstructor = dyn_cast(D)) { Expr *ExplicitExpr = nullptr; if (FromConstructor->getExplicitSpecifier().getExpr()) { auto Imp = import(FromConstructor->getExplicitSpecifier().getExpr()); if (!Imp) return Imp.takeError(); ExplicitExpr = *Imp; } if (GetImportedOrCreateDecl( ToFunction, D, Importer.getToContext(), cast(DC), ToInnerLocStart, NameInfo, T, TInfo, ExplicitSpecifier( ExplicitExpr, FromConstructor->getExplicitSpecifier().getKind()), D->isInlineSpecified(), D->isImplicit(), D->getConstexprKind(), InheritedConstructor(), // FIXME: Properly import inherited // constructor info TrailingRequiresClause)) return ToFunction; } else if (CXXDestructorDecl *FromDtor = dyn_cast(D)) { Error Err = Error::success(); auto ToOperatorDelete = importChecked( Err, const_cast(FromDtor->getOperatorDelete())); auto ToThisArg = importChecked(Err, FromDtor->getOperatorDeleteThisArg()); if (Err) return std::move(Err); if (GetImportedOrCreateDecl( ToFunction, D, Importer.getToContext(), cast(DC), ToInnerLocStart, NameInfo, T, TInfo, D->isInlineSpecified(), D->isImplicit(), D->getConstexprKind(), TrailingRequiresClause)) return ToFunction; CXXDestructorDecl *ToDtor = cast(ToFunction); ToDtor->setOperatorDelete(ToOperatorDelete, ToThisArg); } else if (CXXConversionDecl *FromConversion = dyn_cast(D)) { Expr *ExplicitExpr = nullptr; if (FromConversion->getExplicitSpecifier().getExpr()) { auto Imp = import(FromConversion->getExplicitSpecifier().getExpr()); if (!Imp) return Imp.takeError(); ExplicitExpr = *Imp; } if (GetImportedOrCreateDecl( ToFunction, D, Importer.getToContext(), cast(DC), ToInnerLocStart, NameInfo, T, TInfo, D->isInlineSpecified(), ExplicitSpecifier(ExplicitExpr, FromConversion->getExplicitSpecifier().getKind()), D->getConstexprKind(), SourceLocation(), TrailingRequiresClause)) return ToFunction; } else if (auto *Method = dyn_cast(D)) { if (GetImportedOrCreateDecl( ToFunction, D, Importer.getToContext(), cast(DC), ToInnerLocStart, NameInfo, T, TInfo, Method->getStorageClass(), Method->isInlineSpecified(), D->getConstexprKind(), SourceLocation(), TrailingRequiresClause)) return ToFunction; } else { if (GetImportedOrCreateDecl( ToFunction, D, Importer.getToContext(), DC, ToInnerLocStart, NameInfo, T, TInfo, D->getStorageClass(), D->isInlineSpecified(), D->hasWrittenPrototype(), D->getConstexprKind(), TrailingRequiresClause)) return ToFunction; } // Connect the redecl chain. if (FoundByLookup) { auto *Recent = const_cast( FoundByLookup->getMostRecentDecl()); ToFunction->setPreviousDecl(Recent); // FIXME Probably we should merge exception specifications. E.g. In the // "To" context the existing function may have exception specification with // noexcept-unevaluated, while the newly imported function may have an // evaluated noexcept. A call to adjustExceptionSpec() on the imported // decl and its redeclarations may be required. } ToFunction->setQualifierInfo(ToQualifierLoc); ToFunction->setAccess(D->getAccess()); ToFunction->setLexicalDeclContext(LexicalDC); ToFunction->setVirtualAsWritten(D->isVirtualAsWritten()); ToFunction->setTrivial(D->isTrivial()); ToFunction->setPure(D->isPure()); ToFunction->setDefaulted(D->isDefaulted()); ToFunction->setExplicitlyDefaulted(D->isExplicitlyDefaulted()); ToFunction->setDeletedAsWritten(D->isDeletedAsWritten()); ToFunction->setRangeEnd(ToEndLoc); // Set the parameters. for (auto *Param : Parameters) { Param->setOwningFunction(ToFunction); ToFunction->addDeclInternal(Param); } ToFunction->setParams(Parameters); // We need to complete creation of FunctionProtoTypeLoc manually with setting // params it refers to. if (TInfo) { if (auto ProtoLoc = TInfo->getTypeLoc().IgnoreParens().getAs()) { for (unsigned I = 0, N = Parameters.size(); I != N; ++I) ProtoLoc.setParam(I, Parameters[I]); } } // Import the describing template function, if any. if (FromFT) { auto ToFTOrErr = import(FromFT); if (!ToFTOrErr) return ToFTOrErr.takeError(); } // Import Ctor initializers. if (auto *FromConstructor = dyn_cast(D)) { if (unsigned NumInitializers = FromConstructor->getNumCtorInitializers()) { SmallVector CtorInitializers(NumInitializers); // Import first, then allocate memory and copy if there was no error. if (Error Err = ImportContainerChecked( FromConstructor->inits(), CtorInitializers)) return std::move(Err); auto **Memory = new (Importer.getToContext()) CXXCtorInitializer *[NumInitializers]; std::copy(CtorInitializers.begin(), CtorInitializers.end(), Memory); auto *ToCtor = cast(ToFunction); ToCtor->setCtorInitializers(Memory); ToCtor->setNumCtorInitializers(NumInitializers); } } if (D->doesThisDeclarationHaveABody()) { Error Err = ImportFunctionDeclBody(D, ToFunction); if (Err) return std::move(Err); } // Import and set the original type in case we used another type. if (UsedDifferentProtoType) { if (ExpectedType TyOrErr = import(D->getType())) ToFunction->setType(*TyOrErr); else return TyOrErr.takeError(); } // FIXME: Other bits to merge? // If it is a template, import all related things. if (Error Err = ImportTemplateInformation(D, ToFunction)) return std::move(Err); addDeclToContexts(D, ToFunction); if (auto *FromCXXMethod = dyn_cast(D)) if (Error Err = ImportOverriddenMethods(cast(ToFunction), FromCXXMethod)) return std::move(Err); // Import the rest of the chain. I.e. import all subsequent declarations. for (++RedeclIt; RedeclIt != Redecls.end(); ++RedeclIt) { ExpectedDecl ToRedeclOrErr = import(*RedeclIt); if (!ToRedeclOrErr) return ToRedeclOrErr.takeError(); } return ToFunction; } ExpectedDecl ASTNodeImporter::VisitCXXMethodDecl(CXXMethodDecl *D) { return VisitFunctionDecl(D); } ExpectedDecl ASTNodeImporter::VisitCXXConstructorDecl(CXXConstructorDecl *D) { return VisitCXXMethodDecl(D); } ExpectedDecl ASTNodeImporter::VisitCXXDestructorDecl(CXXDestructorDecl *D) { return VisitCXXMethodDecl(D); } ExpectedDecl ASTNodeImporter::VisitCXXConversionDecl(CXXConversionDecl *D) { return VisitCXXMethodDecl(D); } ExpectedDecl ASTNodeImporter::VisitFieldDecl(FieldDecl *D) { // Import the major distinguishing characteristics of a variable. DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; NamedDecl *ToD; if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return std::move(Err); if (ToD) return ToD; // Determine whether we've already imported this field. auto FoundDecls = Importer.findDeclsInToCtx(DC, Name); for (auto *FoundDecl : FoundDecls) { if (FieldDecl *FoundField = dyn_cast(FoundDecl)) { // For anonymous fields, match up by index. if (!Name && ASTImporter::getFieldIndex(D) != ASTImporter::getFieldIndex(FoundField)) continue; if (Importer.IsStructurallyEquivalent(D->getType(), FoundField->getType())) { Importer.MapImported(D, FoundField); // In case of a FieldDecl of a ClassTemplateSpecializationDecl, the // initializer of a FieldDecl might not had been instantiated in the // "To" context. However, the "From" context might instantiated that, // thus we have to merge that. if (Expr *FromInitializer = D->getInClassInitializer()) { // We don't have yet the initializer set. if (FoundField->hasInClassInitializer() && !FoundField->getInClassInitializer()) { if (ExpectedExpr ToInitializerOrErr = import(FromInitializer)) FoundField->setInClassInitializer(*ToInitializerOrErr); else { // We can't return error here, // since we already mapped D as imported. // FIXME: warning message? consumeError(ToInitializerOrErr.takeError()); return FoundField; } } } return FoundField; } // FIXME: Why is this case not handled with calling HandleNameConflict? Importer.ToDiag(Loc, diag::warn_odr_field_type_inconsistent) << Name << D->getType() << FoundField->getType(); Importer.ToDiag(FoundField->getLocation(), diag::note_odr_value_here) << FoundField->getType(); return make_error(ImportError::NameConflict); } } Error Err = Error::success(); auto ToType = importChecked(Err, D->getType()); auto ToTInfo = importChecked(Err, D->getTypeSourceInfo()); auto ToBitWidth = importChecked(Err, D->getBitWidth()); auto ToInnerLocStart = importChecked(Err, D->getInnerLocStart()); auto ToInitializer = importChecked(Err, D->getInClassInitializer()); if (Err) return std::move(Err); FieldDecl *ToField; if (GetImportedOrCreateDecl(ToField, D, Importer.getToContext(), DC, ToInnerLocStart, Loc, Name.getAsIdentifierInfo(), ToType, ToTInfo, ToBitWidth, D->isMutable(), D->getInClassInitStyle())) return ToField; ToField->setAccess(D->getAccess()); ToField->setLexicalDeclContext(LexicalDC); if (ToInitializer) ToField->setInClassInitializer(ToInitializer); ToField->setImplicit(D->isImplicit()); LexicalDC->addDeclInternal(ToField); return ToField; } ExpectedDecl ASTNodeImporter::VisitIndirectFieldDecl(IndirectFieldDecl *D) { // Import the major distinguishing characteristics of a variable. DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; NamedDecl *ToD; if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return std::move(Err); if (ToD) return ToD; // Determine whether we've already imported this field. auto FoundDecls = Importer.findDeclsInToCtx(DC, Name); for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) { if (auto *FoundField = dyn_cast(FoundDecls[I])) { // For anonymous indirect fields, match up by index. if (!Name && ASTImporter::getFieldIndex(D) != ASTImporter::getFieldIndex(FoundField)) continue; if (Importer.IsStructurallyEquivalent(D->getType(), FoundField->getType(), !Name.isEmpty())) { Importer.MapImported(D, FoundField); return FoundField; } // If there are more anonymous fields to check, continue. if (!Name && I < N-1) continue; // FIXME: Why is this case not handled with calling HandleNameConflict? Importer.ToDiag(Loc, diag::warn_odr_field_type_inconsistent) << Name << D->getType() << FoundField->getType(); Importer.ToDiag(FoundField->getLocation(), diag::note_odr_value_here) << FoundField->getType(); return make_error(ImportError::NameConflict); } } // Import the type. auto TypeOrErr = import(D->getType()); if (!TypeOrErr) return TypeOrErr.takeError(); auto **NamedChain = new (Importer.getToContext()) NamedDecl*[D->getChainingSize()]; unsigned i = 0; for (auto *PI : D->chain()) if (Expected ToD = import(PI)) NamedChain[i++] = *ToD; else return ToD.takeError(); llvm::MutableArrayRef CH = {NamedChain, D->getChainingSize()}; IndirectFieldDecl *ToIndirectField; if (GetImportedOrCreateDecl(ToIndirectField, D, Importer.getToContext(), DC, Loc, Name.getAsIdentifierInfo(), *TypeOrErr, CH)) // FIXME here we leak `NamedChain` which is allocated before return ToIndirectField; ToIndirectField->setAccess(D->getAccess()); ToIndirectField->setLexicalDeclContext(LexicalDC); LexicalDC->addDeclInternal(ToIndirectField); return ToIndirectField; } /// Used as return type of getFriendCountAndPosition. struct FriendCountAndPosition { /// Number of similar looking friends. unsigned int TotalCount; /// Index of the specific FriendDecl. unsigned int IndexOfDecl; }; template static FriendCountAndPosition getFriendCountAndPosition( const FriendDecl *FD, llvm::function_ref GetCanTypeOrDecl) { unsigned int FriendCount = 0; llvm::Optional FriendPosition; const auto *RD = cast(FD->getLexicalDeclContext()); T TypeOrDecl = GetCanTypeOrDecl(FD); for (const FriendDecl *FoundFriend : RD->friends()) { if (FoundFriend == FD) { FriendPosition = FriendCount; ++FriendCount; } else if (!FoundFriend->getFriendDecl() == !FD->getFriendDecl() && GetCanTypeOrDecl(FoundFriend) == TypeOrDecl) { ++FriendCount; } } assert(FriendPosition && "Friend decl not found in own parent."); return {FriendCount, *FriendPosition}; } static FriendCountAndPosition getFriendCountAndPosition(const FriendDecl *FD) { if (FD->getFriendType()) return getFriendCountAndPosition(FD, [](const FriendDecl *F) { if (TypeSourceInfo *TSI = F->getFriendType()) return TSI->getType().getCanonicalType(); llvm_unreachable("Wrong friend object type."); }); else return getFriendCountAndPosition(FD, [](const FriendDecl *F) { if (Decl *D = F->getFriendDecl()) return D->getCanonicalDecl(); llvm_unreachable("Wrong friend object type."); }); } ExpectedDecl ASTNodeImporter::VisitFriendDecl(FriendDecl *D) { // Import the major distinguishing characteristics of a declaration. DeclContext *DC, *LexicalDC; if (Error Err = ImportDeclContext(D, DC, LexicalDC)) return std::move(Err); // Determine whether we've already imported this decl. // FriendDecl is not a NamedDecl so we cannot use lookup. // We try to maintain order and count of redundant friend declarations. const auto *RD = cast(DC); FriendDecl *ImportedFriend = RD->getFirstFriend(); SmallVector ImportedEquivalentFriends; while (ImportedFriend) { bool Match = false; if (D->getFriendDecl() && ImportedFriend->getFriendDecl()) { Match = IsStructuralMatch(D->getFriendDecl(), ImportedFriend->getFriendDecl(), /*Complain=*/false); } else if (D->getFriendType() && ImportedFriend->getFriendType()) { Match = Importer.IsStructurallyEquivalent( D->getFriendType()->getType(), ImportedFriend->getFriendType()->getType(), /*Complain=*/false); } if (Match) ImportedEquivalentFriends.push_back(ImportedFriend); ImportedFriend = ImportedFriend->getNextFriend(); } FriendCountAndPosition CountAndPosition = getFriendCountAndPosition(D); assert(ImportedEquivalentFriends.size() <= CountAndPosition.TotalCount && "Class with non-matching friends is imported, ODR check wrong?"); if (ImportedEquivalentFriends.size() == CountAndPosition.TotalCount) return Importer.MapImported( D, ImportedEquivalentFriends[CountAndPosition.IndexOfDecl]); // Not found. Create it. // The declarations will be put into order later by ImportDeclContext. FriendDecl::FriendUnion ToFU; if (NamedDecl *FriendD = D->getFriendDecl()) { NamedDecl *ToFriendD; if (Error Err = importInto(ToFriendD, FriendD)) return std::move(Err); if (FriendD->getFriendObjectKind() != Decl::FOK_None && !(FriendD->isInIdentifierNamespace(Decl::IDNS_NonMemberOperator))) ToFriendD->setObjectOfFriendDecl(false); ToFU = ToFriendD; } else { // The friend is a type, not a decl. if (auto TSIOrErr = import(D->getFriendType())) ToFU = *TSIOrErr; else return TSIOrErr.takeError(); } SmallVector ToTPLists(D->NumTPLists); auto **FromTPLists = D->getTrailingObjects(); for (unsigned I = 0; I < D->NumTPLists; I++) { if (auto ListOrErr = import(FromTPLists[I])) ToTPLists[I] = *ListOrErr; else return ListOrErr.takeError(); } auto LocationOrErr = import(D->getLocation()); if (!LocationOrErr) return LocationOrErr.takeError(); auto FriendLocOrErr = import(D->getFriendLoc()); if (!FriendLocOrErr) return FriendLocOrErr.takeError(); FriendDecl *FrD; if (GetImportedOrCreateDecl(FrD, D, Importer.getToContext(), DC, *LocationOrErr, ToFU, *FriendLocOrErr, ToTPLists)) return FrD; FrD->setAccess(D->getAccess()); FrD->setLexicalDeclContext(LexicalDC); LexicalDC->addDeclInternal(FrD); return FrD; } ExpectedDecl ASTNodeImporter::VisitObjCIvarDecl(ObjCIvarDecl *D) { // Import the major distinguishing characteristics of an ivar. DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; NamedDecl *ToD; if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return std::move(Err); if (ToD) return ToD; // Determine whether we've already imported this ivar auto FoundDecls = Importer.findDeclsInToCtx(DC, Name); for (auto *FoundDecl : FoundDecls) { if (ObjCIvarDecl *FoundIvar = dyn_cast(FoundDecl)) { if (Importer.IsStructurallyEquivalent(D->getType(), FoundIvar->getType())) { Importer.MapImported(D, FoundIvar); return FoundIvar; } Importer.ToDiag(Loc, diag::warn_odr_ivar_type_inconsistent) << Name << D->getType() << FoundIvar->getType(); Importer.ToDiag(FoundIvar->getLocation(), diag::note_odr_value_here) << FoundIvar->getType(); return make_error(ImportError::NameConflict); } } Error Err = Error::success(); auto ToType = importChecked(Err, D->getType()); auto ToTypeSourceInfo = importChecked(Err, D->getTypeSourceInfo()); auto ToBitWidth = importChecked(Err, D->getBitWidth()); auto ToInnerLocStart = importChecked(Err, D->getInnerLocStart()); if (Err) return std::move(Err); ObjCIvarDecl *ToIvar; if (GetImportedOrCreateDecl( ToIvar, D, Importer.getToContext(), cast(DC), ToInnerLocStart, Loc, Name.getAsIdentifierInfo(), ToType, ToTypeSourceInfo, D->getAccessControl(),ToBitWidth, D->getSynthesize())) return ToIvar; ToIvar->setLexicalDeclContext(LexicalDC); LexicalDC->addDeclInternal(ToIvar); return ToIvar; } ExpectedDecl ASTNodeImporter::VisitVarDecl(VarDecl *D) { SmallVector Redecls = getCanonicalForwardRedeclChain(D); auto RedeclIt = Redecls.begin(); // Import the first part of the decl chain. I.e. import all previous // declarations starting from the canonical decl. for (; RedeclIt != Redecls.end() && *RedeclIt != D; ++RedeclIt) { ExpectedDecl RedeclOrErr = import(*RedeclIt); if (!RedeclOrErr) return RedeclOrErr.takeError(); } assert(*RedeclIt == D); // Import the major distinguishing characteristics of a variable. DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; NamedDecl *ToD; if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return std::move(Err); if (ToD) return ToD; // Try to find a variable in our own ("to") context with the same name and // in the same context as the variable we're importing. VarDecl *FoundByLookup = nullptr; if (D->isFileVarDecl()) { SmallVector ConflictingDecls; unsigned IDNS = Decl::IDNS_Ordinary; auto FoundDecls = Importer.findDeclsInToCtx(DC, Name); for (auto *FoundDecl : FoundDecls) { if (!FoundDecl->isInIdentifierNamespace(IDNS)) continue; if (auto *FoundVar = dyn_cast(FoundDecl)) { if (!hasSameVisibilityContextAndLinkage(FoundVar, D)) continue; if (Importer.IsStructurallyEquivalent(D->getType(), FoundVar->getType())) { // The VarDecl in the "From" context has a definition, but in the // "To" context we already have a definition. VarDecl *FoundDef = FoundVar->getDefinition(); if (D->isThisDeclarationADefinition() && FoundDef) // FIXME Check for ODR error if the two definitions have // different initializers? return Importer.MapImported(D, FoundDef); // The VarDecl in the "From" context has an initializer, but in the // "To" context we already have an initializer. const VarDecl *FoundDInit = nullptr; if (D->getInit() && FoundVar->getAnyInitializer(FoundDInit)) // FIXME Diagnose ODR error if the two initializers are different? return Importer.MapImported(D, const_cast(FoundDInit)); FoundByLookup = FoundVar; break; } const ArrayType *FoundArray = Importer.getToContext().getAsArrayType(FoundVar->getType()); const ArrayType *TArray = Importer.getToContext().getAsArrayType(D->getType()); if (FoundArray && TArray) { if (isa(FoundArray) && isa(TArray)) { // Import the type. if (auto TyOrErr = import(D->getType())) FoundVar->setType(*TyOrErr); else return TyOrErr.takeError(); FoundByLookup = FoundVar; break; } else if (isa(TArray) && isa(FoundArray)) { FoundByLookup = FoundVar; break; } } Importer.ToDiag(Loc, diag::warn_odr_variable_type_inconsistent) << Name << D->getType() << FoundVar->getType(); Importer.ToDiag(FoundVar->getLocation(), diag::note_odr_value_here) << FoundVar->getType(); ConflictingDecls.push_back(FoundDecl); } } if (!ConflictingDecls.empty()) { ExpectedName NameOrErr = Importer.HandleNameConflict( Name, DC, IDNS, ConflictingDecls.data(), ConflictingDecls.size()); if (NameOrErr) Name = NameOrErr.get(); else return NameOrErr.takeError(); } } Error Err = Error::success(); auto ToType = importChecked(Err, D->getType()); auto ToTypeSourceInfo = importChecked(Err, D->getTypeSourceInfo()); auto ToInnerLocStart = importChecked(Err, D->getInnerLocStart()); auto ToQualifierLoc = importChecked(Err, D->getQualifierLoc()); if (Err) return std::move(Err); // Create the imported variable. VarDecl *ToVar; if (GetImportedOrCreateDecl(ToVar, D, Importer.getToContext(), DC, ToInnerLocStart, Loc, Name.getAsIdentifierInfo(), ToType, ToTypeSourceInfo, D->getStorageClass())) return ToVar; ToVar->setQualifierInfo(ToQualifierLoc); ToVar->setAccess(D->getAccess()); ToVar->setLexicalDeclContext(LexicalDC); if (FoundByLookup) { auto *Recent = const_cast(FoundByLookup->getMostRecentDecl()); ToVar->setPreviousDecl(Recent); } // Import the described template, if any. if (D->getDescribedVarTemplate()) { auto ToVTOrErr = import(D->getDescribedVarTemplate()); if (!ToVTOrErr) return ToVTOrErr.takeError(); } if (Error Err = ImportInitializer(D, ToVar)) return std::move(Err); if (D->isConstexpr()) ToVar->setConstexpr(true); addDeclToContexts(D, ToVar); // Import the rest of the chain. I.e. import all subsequent declarations. for (++RedeclIt; RedeclIt != Redecls.end(); ++RedeclIt) { ExpectedDecl RedeclOrErr = import(*RedeclIt); if (!RedeclOrErr) return RedeclOrErr.takeError(); } return ToVar; } ExpectedDecl ASTNodeImporter::VisitImplicitParamDecl(ImplicitParamDecl *D) { // Parameters are created in the translation unit's context, then moved // into the function declaration's context afterward. DeclContext *DC = Importer.getToContext().getTranslationUnitDecl(); Error Err = Error::success(); auto ToDeclName = importChecked(Err, D->getDeclName()); auto ToLocation = importChecked(Err, D->getLocation()); auto ToType = importChecked(Err, D->getType()); if (Err) return std::move(Err); // Create the imported parameter. ImplicitParamDecl *ToParm = nullptr; if (GetImportedOrCreateDecl(ToParm, D, Importer.getToContext(), DC, ToLocation, ToDeclName.getAsIdentifierInfo(), ToType, D->getParameterKind())) return ToParm; return ToParm; } Error ASTNodeImporter::ImportDefaultArgOfParmVarDecl( const ParmVarDecl *FromParam, ParmVarDecl *ToParam) { ToParam->setHasInheritedDefaultArg(FromParam->hasInheritedDefaultArg()); ToParam->setKNRPromoted(FromParam->isKNRPromoted()); if (FromParam->hasUninstantiatedDefaultArg()) { if (auto ToDefArgOrErr = import(FromParam->getUninstantiatedDefaultArg())) ToParam->setUninstantiatedDefaultArg(*ToDefArgOrErr); else return ToDefArgOrErr.takeError(); } else if (FromParam->hasUnparsedDefaultArg()) { ToParam->setUnparsedDefaultArg(); } else if (FromParam->hasDefaultArg()) { if (auto ToDefArgOrErr = import(FromParam->getDefaultArg())) ToParam->setDefaultArg(*ToDefArgOrErr); else return ToDefArgOrErr.takeError(); } return Error::success(); } ExpectedDecl ASTNodeImporter::VisitParmVarDecl(ParmVarDecl *D) { // Parameters are created in the translation unit's context, then moved // into the function declaration's context afterward. DeclContext *DC = Importer.getToContext().getTranslationUnitDecl(); Error Err = Error::success(); auto ToDeclName = importChecked(Err, D->getDeclName()); auto ToLocation = importChecked(Err, D->getLocation()); auto ToInnerLocStart = importChecked(Err, D->getInnerLocStart()); auto ToType = importChecked(Err, D->getType()); auto ToTypeSourceInfo = importChecked(Err, D->getTypeSourceInfo()); if (Err) return std::move(Err); ParmVarDecl *ToParm; if (GetImportedOrCreateDecl(ToParm, D, Importer.getToContext(), DC, ToInnerLocStart, ToLocation, ToDeclName.getAsIdentifierInfo(), ToType, ToTypeSourceInfo, D->getStorageClass(), /*DefaultArg*/ nullptr)) return ToParm; // Set the default argument. It should be no problem if it was already done. // Do not import the default expression before GetImportedOrCreateDecl call // to avoid possible infinite import loop because circular dependency. if (Error Err = ImportDefaultArgOfParmVarDecl(D, ToParm)) return std::move(Err); if (D->isObjCMethodParameter()) { ToParm->setObjCMethodScopeInfo(D->getFunctionScopeIndex()); ToParm->setObjCDeclQualifier(D->getObjCDeclQualifier()); } else { ToParm->setScopeInfo(D->getFunctionScopeDepth(), D->getFunctionScopeIndex()); } return ToParm; } ExpectedDecl ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) { // Import the major distinguishing characteristics of a method. DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; NamedDecl *ToD; if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return std::move(Err); if (ToD) return ToD; auto FoundDecls = Importer.findDeclsInToCtx(DC, Name); for (auto *FoundDecl : FoundDecls) { if (auto *FoundMethod = dyn_cast(FoundDecl)) { if (FoundMethod->isInstanceMethod() != D->isInstanceMethod()) continue; // Check return types. if (!Importer.IsStructurallyEquivalent(D->getReturnType(), FoundMethod->getReturnType())) { Importer.ToDiag(Loc, diag::warn_odr_objc_method_result_type_inconsistent) << D->isInstanceMethod() << Name << D->getReturnType() << FoundMethod->getReturnType(); Importer.ToDiag(FoundMethod->getLocation(), diag::note_odr_objc_method_here) << D->isInstanceMethod() << Name; return make_error(ImportError::NameConflict); } // Check the number of parameters. if (D->param_size() != FoundMethod->param_size()) { Importer.ToDiag(Loc, diag::warn_odr_objc_method_num_params_inconsistent) << D->isInstanceMethod() << Name << D->param_size() << FoundMethod->param_size(); Importer.ToDiag(FoundMethod->getLocation(), diag::note_odr_objc_method_here) << D->isInstanceMethod() << Name; return make_error(ImportError::NameConflict); } // Check parameter types. for (ObjCMethodDecl::param_iterator P = D->param_begin(), PEnd = D->param_end(), FoundP = FoundMethod->param_begin(); P != PEnd; ++P, ++FoundP) { if (!Importer.IsStructurallyEquivalent((*P)->getType(), (*FoundP)->getType())) { Importer.FromDiag((*P)->getLocation(), diag::warn_odr_objc_method_param_type_inconsistent) << D->isInstanceMethod() << Name << (*P)->getType() << (*FoundP)->getType(); Importer.ToDiag((*FoundP)->getLocation(), diag::note_odr_value_here) << (*FoundP)->getType(); return make_error(ImportError::NameConflict); } } // Check variadic/non-variadic. // Check the number of parameters. if (D->isVariadic() != FoundMethod->isVariadic()) { Importer.ToDiag(Loc, diag::warn_odr_objc_method_variadic_inconsistent) << D->isInstanceMethod() << Name; Importer.ToDiag(FoundMethod->getLocation(), diag::note_odr_objc_method_here) << D->isInstanceMethod() << Name; return make_error(ImportError::NameConflict); } // FIXME: Any other bits we need to merge? return Importer.MapImported(D, FoundMethod); } } Error Err = Error::success(); auto ToEndLoc = importChecked(Err, D->getEndLoc()); auto ToReturnType = importChecked(Err, D->getReturnType()); auto ToReturnTypeSourceInfo = importChecked(Err, D->getReturnTypeSourceInfo()); if (Err) return std::move(Err); ObjCMethodDecl *ToMethod; if (GetImportedOrCreateDecl( ToMethod, D, Importer.getToContext(), Loc, ToEndLoc, Name.getObjCSelector(), ToReturnType, ToReturnTypeSourceInfo, DC, D->isInstanceMethod(), D->isVariadic(), D->isPropertyAccessor(), D->isSynthesizedAccessorStub(), D->isImplicit(), D->isDefined(), D->getImplementationControl(), D->hasRelatedResultType())) return ToMethod; // FIXME: When we decide to merge method definitions, we'll need to // deal with implicit parameters. // Import the parameters SmallVector ToParams; for (auto *FromP : D->parameters()) { if (Expected ToPOrErr = import(FromP)) ToParams.push_back(*ToPOrErr); else return ToPOrErr.takeError(); } // Set the parameters. for (auto *ToParam : ToParams) { ToParam->setOwningFunction(ToMethod); ToMethod->addDeclInternal(ToParam); } SmallVector FromSelLocs; D->getSelectorLocs(FromSelLocs); SmallVector ToSelLocs(FromSelLocs.size()); if (Error Err = ImportContainerChecked(FromSelLocs, ToSelLocs)) return std::move(Err); ToMethod->setMethodParams(Importer.getToContext(), ToParams, ToSelLocs); ToMethod->setLexicalDeclContext(LexicalDC); LexicalDC->addDeclInternal(ToMethod); // Implicit params are declared when Sema encounters the definition but this // never happens when the method is imported. Manually declare the implicit // params now that the MethodDecl knows its class interface. if (D->getSelfDecl()) ToMethod->createImplicitParams(Importer.getToContext(), ToMethod->getClassInterface()); return ToMethod; } ExpectedDecl ASTNodeImporter::VisitObjCTypeParamDecl(ObjCTypeParamDecl *D) { // Import the major distinguishing characteristics of a category. DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; NamedDecl *ToD; if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return std::move(Err); if (ToD) return ToD; Error Err = Error::success(); auto ToVarianceLoc = importChecked(Err, D->getVarianceLoc()); auto ToLocation = importChecked(Err, D->getLocation()); auto ToColonLoc = importChecked(Err, D->getColonLoc()); auto ToTypeSourceInfo = importChecked(Err, D->getTypeSourceInfo()); if (Err) return std::move(Err); ObjCTypeParamDecl *Result; if (GetImportedOrCreateDecl( Result, D, Importer.getToContext(), DC, D->getVariance(), ToVarianceLoc, D->getIndex(), ToLocation, Name.getAsIdentifierInfo(), ToColonLoc, ToTypeSourceInfo)) return Result; Result->setLexicalDeclContext(LexicalDC); return Result; } ExpectedDecl ASTNodeImporter::VisitObjCCategoryDecl(ObjCCategoryDecl *D) { // Import the major distinguishing characteristics of a category. DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; NamedDecl *ToD; if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return std::move(Err); if (ToD) return ToD; ObjCInterfaceDecl *ToInterface; if (Error Err = importInto(ToInterface, D->getClassInterface())) return std::move(Err); // Determine if we've already encountered this category. ObjCCategoryDecl *MergeWithCategory = ToInterface->FindCategoryDeclaration(Name.getAsIdentifierInfo()); ObjCCategoryDecl *ToCategory = MergeWithCategory; if (!ToCategory) { Error Err = Error::success(); auto ToAtStartLoc = importChecked(Err, D->getAtStartLoc()); auto ToCategoryNameLoc = importChecked(Err, D->getCategoryNameLoc()); auto ToIvarLBraceLoc = importChecked(Err, D->getIvarLBraceLoc()); auto ToIvarRBraceLoc = importChecked(Err, D->getIvarRBraceLoc()); if (Err) return std::move(Err); if (GetImportedOrCreateDecl(ToCategory, D, Importer.getToContext(), DC, ToAtStartLoc, Loc, ToCategoryNameLoc, Name.getAsIdentifierInfo(), ToInterface, /*TypeParamList=*/nullptr, ToIvarLBraceLoc, ToIvarRBraceLoc)) return ToCategory; ToCategory->setLexicalDeclContext(LexicalDC); LexicalDC->addDeclInternal(ToCategory); // Import the type parameter list after MapImported, to avoid // loops when bringing in their DeclContext. if (auto PListOrErr = ImportObjCTypeParamList(D->getTypeParamList())) ToCategory->setTypeParamList(*PListOrErr); else return PListOrErr.takeError(); // Import protocols SmallVector Protocols; SmallVector ProtocolLocs; ObjCCategoryDecl::protocol_loc_iterator FromProtoLoc = D->protocol_loc_begin(); for (ObjCCategoryDecl::protocol_iterator FromProto = D->protocol_begin(), FromProtoEnd = D->protocol_end(); FromProto != FromProtoEnd; ++FromProto, ++FromProtoLoc) { if (Expected ToProtoOrErr = import(*FromProto)) Protocols.push_back(*ToProtoOrErr); else return ToProtoOrErr.takeError(); if (ExpectedSLoc ToProtoLocOrErr = import(*FromProtoLoc)) ProtocolLocs.push_back(*ToProtoLocOrErr); else return ToProtoLocOrErr.takeError(); } // FIXME: If we're merging, make sure that the protocol list is the same. ToCategory->setProtocolList(Protocols.data(), Protocols.size(), ProtocolLocs.data(), Importer.getToContext()); } else { Importer.MapImported(D, ToCategory); } // Import all of the members of this category. if (Error Err = ImportDeclContext(D)) return std::move(Err); // If we have an implementation, import it as well. if (D->getImplementation()) { if (Expected ToImplOrErr = import(D->getImplementation())) ToCategory->setImplementation(*ToImplOrErr); else return ToImplOrErr.takeError(); } return ToCategory; } Error ASTNodeImporter::ImportDefinition( ObjCProtocolDecl *From, ObjCProtocolDecl *To, ImportDefinitionKind Kind) { if (To->getDefinition()) { if (shouldForceImportDeclContext(Kind)) if (Error Err = ImportDeclContext(From)) return Err; return Error::success(); } // Start the protocol definition To->startDefinition(); // Import protocols SmallVector Protocols; SmallVector ProtocolLocs; ObjCProtocolDecl::protocol_loc_iterator FromProtoLoc = From->protocol_loc_begin(); for (ObjCProtocolDecl::protocol_iterator FromProto = From->protocol_begin(), FromProtoEnd = From->protocol_end(); FromProto != FromProtoEnd; ++FromProto, ++FromProtoLoc) { if (Expected ToProtoOrErr = import(*FromProto)) Protocols.push_back(*ToProtoOrErr); else return ToProtoOrErr.takeError(); if (ExpectedSLoc ToProtoLocOrErr = import(*FromProtoLoc)) ProtocolLocs.push_back(*ToProtoLocOrErr); else return ToProtoLocOrErr.takeError(); } // FIXME: If we're merging, make sure that the protocol list is the same. To->setProtocolList(Protocols.data(), Protocols.size(), ProtocolLocs.data(), Importer.getToContext()); if (shouldForceImportDeclContext(Kind)) { // Import all of the members of this protocol. if (Error Err = ImportDeclContext(From, /*ForceImport=*/true)) return Err; } return Error::success(); } ExpectedDecl ASTNodeImporter::VisitObjCProtocolDecl(ObjCProtocolDecl *D) { // If this protocol has a definition in the translation unit we're coming // from, but this particular declaration is not that definition, import the // definition and map to that. ObjCProtocolDecl *Definition = D->getDefinition(); if (Definition && Definition != D) { if (ExpectedDecl ImportedDefOrErr = import(Definition)) return Importer.MapImported(D, *ImportedDefOrErr); else return ImportedDefOrErr.takeError(); } // Import the major distinguishing characteristics of a protocol. DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; NamedDecl *ToD; if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return std::move(Err); if (ToD) return ToD; ObjCProtocolDecl *MergeWithProtocol = nullptr; auto FoundDecls = Importer.findDeclsInToCtx(DC, Name); for (auto *FoundDecl : FoundDecls) { if (!FoundDecl->isInIdentifierNamespace(Decl::IDNS_ObjCProtocol)) continue; if ((MergeWithProtocol = dyn_cast(FoundDecl))) break; } ObjCProtocolDecl *ToProto = MergeWithProtocol; if (!ToProto) { auto ToAtBeginLocOrErr = import(D->getAtStartLoc()); if (!ToAtBeginLocOrErr) return ToAtBeginLocOrErr.takeError(); if (GetImportedOrCreateDecl(ToProto, D, Importer.getToContext(), DC, Name.getAsIdentifierInfo(), Loc, *ToAtBeginLocOrErr, /*PrevDecl=*/nullptr)) return ToProto; ToProto->setLexicalDeclContext(LexicalDC); LexicalDC->addDeclInternal(ToProto); } Importer.MapImported(D, ToProto); if (D->isThisDeclarationADefinition()) if (Error Err = ImportDefinition(D, ToProto)) return std::move(Err); return ToProto; } ExpectedDecl ASTNodeImporter::VisitLinkageSpecDecl(LinkageSpecDecl *D) { DeclContext *DC, *LexicalDC; if (Error Err = ImportDeclContext(D, DC, LexicalDC)) return std::move(Err); ExpectedSLoc ExternLocOrErr = import(D->getExternLoc()); if (!ExternLocOrErr) return ExternLocOrErr.takeError(); ExpectedSLoc LangLocOrErr = import(D->getLocation()); if (!LangLocOrErr) return LangLocOrErr.takeError(); bool HasBraces = D->hasBraces(); LinkageSpecDecl *ToLinkageSpec; if (GetImportedOrCreateDecl(ToLinkageSpec, D, Importer.getToContext(), DC, *ExternLocOrErr, *LangLocOrErr, D->getLanguage(), HasBraces)) return ToLinkageSpec; if (HasBraces) { ExpectedSLoc RBraceLocOrErr = import(D->getRBraceLoc()); if (!RBraceLocOrErr) return RBraceLocOrErr.takeError(); ToLinkageSpec->setRBraceLoc(*RBraceLocOrErr); } ToLinkageSpec->setLexicalDeclContext(LexicalDC); LexicalDC->addDeclInternal(ToLinkageSpec); return ToLinkageSpec; } ExpectedDecl ASTNodeImporter::VisitUsingDecl(UsingDecl *D) { DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; NamedDecl *ToD = nullptr; if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return std::move(Err); if (ToD) return ToD; Error Err = Error::success(); auto ToLoc = importChecked(Err, D->getNameInfo().getLoc()); auto ToUsingLoc = importChecked(Err, D->getUsingLoc()); auto ToQualifierLoc = importChecked(Err, D->getQualifierLoc()); if (Err) return std::move(Err); DeclarationNameInfo NameInfo(Name, ToLoc); if (Error Err = ImportDeclarationNameLoc(D->getNameInfo(), NameInfo)) return std::move(Err); UsingDecl *ToUsing; if (GetImportedOrCreateDecl(ToUsing, D, Importer.getToContext(), DC, ToUsingLoc, ToQualifierLoc, NameInfo, D->hasTypename())) return ToUsing; ToUsing->setLexicalDeclContext(LexicalDC); LexicalDC->addDeclInternal(ToUsing); if (NamedDecl *FromPattern = Importer.getFromContext().getInstantiatedFromUsingDecl(D)) { if (Expected ToPatternOrErr = import(FromPattern)) Importer.getToContext().setInstantiatedFromUsingDecl( ToUsing, *ToPatternOrErr); else return ToPatternOrErr.takeError(); } for (UsingShadowDecl *FromShadow : D->shadows()) { if (Expected ToShadowOrErr = import(FromShadow)) ToUsing->addShadowDecl(*ToShadowOrErr); else // FIXME: We return error here but the definition is already created // and available with lookups. How to fix this?.. return ToShadowOrErr.takeError(); } return ToUsing; } ExpectedDecl ASTNodeImporter::VisitUsingShadowDecl(UsingShadowDecl *D) { DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; NamedDecl *ToD = nullptr; if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return std::move(Err); if (ToD) return ToD; Expected ToUsingOrErr = import(D->getUsingDecl()); if (!ToUsingOrErr) return ToUsingOrErr.takeError(); Expected ToTargetOrErr = import(D->getTargetDecl()); if (!ToTargetOrErr) return ToTargetOrErr.takeError(); UsingShadowDecl *ToShadow; if (GetImportedOrCreateDecl(ToShadow, D, Importer.getToContext(), DC, Loc, *ToUsingOrErr, *ToTargetOrErr)) return ToShadow; ToShadow->setLexicalDeclContext(LexicalDC); ToShadow->setAccess(D->getAccess()); if (UsingShadowDecl *FromPattern = Importer.getFromContext().getInstantiatedFromUsingShadowDecl(D)) { if (Expected ToPatternOrErr = import(FromPattern)) Importer.getToContext().setInstantiatedFromUsingShadowDecl( ToShadow, *ToPatternOrErr); else // FIXME: We return error here but the definition is already created // and available with lookups. How to fix this?.. return ToPatternOrErr.takeError(); } LexicalDC->addDeclInternal(ToShadow); return ToShadow; } ExpectedDecl ASTNodeImporter::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) { DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; NamedDecl *ToD = nullptr; if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return std::move(Err); if (ToD) return ToD; auto ToComAncestorOrErr = Importer.ImportContext(D->getCommonAncestor()); if (!ToComAncestorOrErr) return ToComAncestorOrErr.takeError(); Error Err = Error::success(); auto ToNominatedNamespace = importChecked(Err, D->getNominatedNamespace()); auto ToUsingLoc = importChecked(Err, D->getUsingLoc()); auto ToNamespaceKeyLocation = importChecked(Err, D->getNamespaceKeyLocation()); auto ToQualifierLoc = importChecked(Err, D->getQualifierLoc()); auto ToIdentLocation = importChecked(Err, D->getIdentLocation()); if (Err) return std::move(Err); UsingDirectiveDecl *ToUsingDir; if (GetImportedOrCreateDecl(ToUsingDir, D, Importer.getToContext(), DC, ToUsingLoc, ToNamespaceKeyLocation, ToQualifierLoc, ToIdentLocation, ToNominatedNamespace, *ToComAncestorOrErr)) return ToUsingDir; ToUsingDir->setLexicalDeclContext(LexicalDC); LexicalDC->addDeclInternal(ToUsingDir); return ToUsingDir; } ExpectedDecl ASTNodeImporter::VisitUnresolvedUsingValueDecl( UnresolvedUsingValueDecl *D) { DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; NamedDecl *ToD = nullptr; if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return std::move(Err); if (ToD) return ToD; Error Err = Error::success(); auto ToLoc = importChecked(Err, D->getNameInfo().getLoc()); auto ToUsingLoc = importChecked(Err, D->getUsingLoc()); auto ToQualifierLoc = importChecked(Err, D->getQualifierLoc()); auto ToEllipsisLoc = importChecked(Err, D->getEllipsisLoc()); if (Err) return std::move(Err); DeclarationNameInfo NameInfo(Name, ToLoc); if (Error Err = ImportDeclarationNameLoc(D->getNameInfo(), NameInfo)) return std::move(Err); UnresolvedUsingValueDecl *ToUsingValue; if (GetImportedOrCreateDecl(ToUsingValue, D, Importer.getToContext(), DC, ToUsingLoc, ToQualifierLoc, NameInfo, ToEllipsisLoc)) return ToUsingValue; ToUsingValue->setAccess(D->getAccess()); ToUsingValue->setLexicalDeclContext(LexicalDC); LexicalDC->addDeclInternal(ToUsingValue); return ToUsingValue; } ExpectedDecl ASTNodeImporter::VisitUnresolvedUsingTypenameDecl( UnresolvedUsingTypenameDecl *D) { DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; NamedDecl *ToD = nullptr; if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return std::move(Err); if (ToD) return ToD; Error Err = Error::success(); auto ToUsingLoc = importChecked(Err, D->getUsingLoc()); auto ToTypenameLoc = importChecked(Err, D->getTypenameLoc()); auto ToQualifierLoc = importChecked(Err, D->getQualifierLoc()); auto ToEllipsisLoc = importChecked(Err, D->getEllipsisLoc()); if (Err) return std::move(Err); UnresolvedUsingTypenameDecl *ToUsing; if (GetImportedOrCreateDecl(ToUsing, D, Importer.getToContext(), DC, ToUsingLoc, ToTypenameLoc, ToQualifierLoc, Loc, Name, ToEllipsisLoc)) return ToUsing; ToUsing->setAccess(D->getAccess()); ToUsing->setLexicalDeclContext(LexicalDC); LexicalDC->addDeclInternal(ToUsing); return ToUsing; } ExpectedDecl ASTNodeImporter::VisitBuiltinTemplateDecl(BuiltinTemplateDecl *D) { Decl* ToD = nullptr; switch (D->getBuiltinTemplateKind()) { case BuiltinTemplateKind::BTK__make_integer_seq: ToD = Importer.getToContext().getMakeIntegerSeqDecl(); break; case BuiltinTemplateKind::BTK__type_pack_element: ToD = Importer.getToContext().getTypePackElementDecl(); break; } assert(ToD && "BuiltinTemplateDecl of unsupported kind!"); Importer.MapImported(D, ToD); return ToD; } Error ASTNodeImporter::ImportDefinition( ObjCInterfaceDecl *From, ObjCInterfaceDecl *To, ImportDefinitionKind Kind) { if (To->getDefinition()) { // Check consistency of superclass. ObjCInterfaceDecl *FromSuper = From->getSuperClass(); if (FromSuper) { if (auto FromSuperOrErr = import(FromSuper)) FromSuper = *FromSuperOrErr; else return FromSuperOrErr.takeError(); } ObjCInterfaceDecl *ToSuper = To->getSuperClass(); if ((bool)FromSuper != (bool)ToSuper || (FromSuper && !declaresSameEntity(FromSuper, ToSuper))) { Importer.ToDiag(To->getLocation(), diag::warn_odr_objc_superclass_inconsistent) << To->getDeclName(); if (ToSuper) Importer.ToDiag(To->getSuperClassLoc(), diag::note_odr_objc_superclass) << To->getSuperClass()->getDeclName(); else Importer.ToDiag(To->getLocation(), diag::note_odr_objc_missing_superclass); if (From->getSuperClass()) Importer.FromDiag(From->getSuperClassLoc(), diag::note_odr_objc_superclass) << From->getSuperClass()->getDeclName(); else Importer.FromDiag(From->getLocation(), diag::note_odr_objc_missing_superclass); } if (shouldForceImportDeclContext(Kind)) if (Error Err = ImportDeclContext(From)) return Err; return Error::success(); } // Start the definition. To->startDefinition(); // If this class has a superclass, import it. if (From->getSuperClass()) { if (auto SuperTInfoOrErr = import(From->getSuperClassTInfo())) To->setSuperClass(*SuperTInfoOrErr); else return SuperTInfoOrErr.takeError(); } // Import protocols SmallVector Protocols; SmallVector ProtocolLocs; ObjCInterfaceDecl::protocol_loc_iterator FromProtoLoc = From->protocol_loc_begin(); for (ObjCInterfaceDecl::protocol_iterator FromProto = From->protocol_begin(), FromProtoEnd = From->protocol_end(); FromProto != FromProtoEnd; ++FromProto, ++FromProtoLoc) { if (Expected ToProtoOrErr = import(*FromProto)) Protocols.push_back(*ToProtoOrErr); else return ToProtoOrErr.takeError(); if (ExpectedSLoc ToProtoLocOrErr = import(*FromProtoLoc)) ProtocolLocs.push_back(*ToProtoLocOrErr); else return ToProtoLocOrErr.takeError(); } // FIXME: If we're merging, make sure that the protocol list is the same. To->setProtocolList(Protocols.data(), Protocols.size(), ProtocolLocs.data(), Importer.getToContext()); // Import categories. When the categories themselves are imported, they'll // hook themselves into this interface. for (auto *Cat : From->known_categories()) { auto ToCatOrErr = import(Cat); if (!ToCatOrErr) return ToCatOrErr.takeError(); } // If we have an @implementation, import it as well. if (From->getImplementation()) { if (Expected ToImplOrErr = import(From->getImplementation())) To->setImplementation(*ToImplOrErr); else return ToImplOrErr.takeError(); } if (shouldForceImportDeclContext(Kind)) { // Import all of the members of this class. if (Error Err = ImportDeclContext(From, /*ForceImport=*/true)) return Err; } return Error::success(); } Expected ASTNodeImporter::ImportObjCTypeParamList(ObjCTypeParamList *list) { if (!list) return nullptr; SmallVector toTypeParams; for (auto *fromTypeParam : *list) { if (auto toTypeParamOrErr = import(fromTypeParam)) toTypeParams.push_back(*toTypeParamOrErr); else return toTypeParamOrErr.takeError(); } auto LAngleLocOrErr = import(list->getLAngleLoc()); if (!LAngleLocOrErr) return LAngleLocOrErr.takeError(); auto RAngleLocOrErr = import(list->getRAngleLoc()); if (!RAngleLocOrErr) return RAngleLocOrErr.takeError(); return ObjCTypeParamList::create(Importer.getToContext(), *LAngleLocOrErr, toTypeParams, *RAngleLocOrErr); } ExpectedDecl ASTNodeImporter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) { // If this class has a definition in the translation unit we're coming from, // but this particular declaration is not that definition, import the // definition and map to that. ObjCInterfaceDecl *Definition = D->getDefinition(); if (Definition && Definition != D) { if (ExpectedDecl ImportedDefOrErr = import(Definition)) return Importer.MapImported(D, *ImportedDefOrErr); else return ImportedDefOrErr.takeError(); } // Import the major distinguishing characteristics of an @interface. DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; NamedDecl *ToD; if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return std::move(Err); if (ToD) return ToD; // Look for an existing interface with the same name. ObjCInterfaceDecl *MergeWithIface = nullptr; auto FoundDecls = Importer.findDeclsInToCtx(DC, Name); for (auto *FoundDecl : FoundDecls) { if (!FoundDecl->isInIdentifierNamespace(Decl::IDNS_Ordinary)) continue; if ((MergeWithIface = dyn_cast(FoundDecl))) break; } // Create an interface declaration, if one does not already exist. ObjCInterfaceDecl *ToIface = MergeWithIface; if (!ToIface) { ExpectedSLoc AtBeginLocOrErr = import(D->getAtStartLoc()); if (!AtBeginLocOrErr) return AtBeginLocOrErr.takeError(); if (GetImportedOrCreateDecl( ToIface, D, Importer.getToContext(), DC, *AtBeginLocOrErr, Name.getAsIdentifierInfo(), /*TypeParamList=*/nullptr, /*PrevDecl=*/nullptr, Loc, D->isImplicitInterfaceDecl())) return ToIface; ToIface->setLexicalDeclContext(LexicalDC); LexicalDC->addDeclInternal(ToIface); } Importer.MapImported(D, ToIface); // Import the type parameter list after MapImported, to avoid // loops when bringing in their DeclContext. if (auto ToPListOrErr = ImportObjCTypeParamList(D->getTypeParamListAsWritten())) ToIface->setTypeParamList(*ToPListOrErr); else return ToPListOrErr.takeError(); if (D->isThisDeclarationADefinition()) if (Error Err = ImportDefinition(D, ToIface)) return std::move(Err); return ToIface; } ExpectedDecl ASTNodeImporter::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D) { ObjCCategoryDecl *Category; if (Error Err = importInto(Category, D->getCategoryDecl())) return std::move(Err); ObjCCategoryImplDecl *ToImpl = Category->getImplementation(); if (!ToImpl) { DeclContext *DC, *LexicalDC; if (Error Err = ImportDeclContext(D, DC, LexicalDC)) return std::move(Err); Error Err = Error::success(); auto ToLocation = importChecked(Err, D->getLocation()); auto ToAtStartLoc = importChecked(Err, D->getAtStartLoc()); auto ToCategoryNameLoc = importChecked(Err, D->getCategoryNameLoc()); if (Err) return std::move(Err); if (GetImportedOrCreateDecl( ToImpl, D, Importer.getToContext(), DC, Importer.Import(D->getIdentifier()), Category->getClassInterface(), ToLocation, ToAtStartLoc, ToCategoryNameLoc)) return ToImpl; ToImpl->setLexicalDeclContext(LexicalDC); LexicalDC->addDeclInternal(ToImpl); Category->setImplementation(ToImpl); } Importer.MapImported(D, ToImpl); if (Error Err = ImportDeclContext(D)) return std::move(Err); return ToImpl; } ExpectedDecl ASTNodeImporter::VisitObjCImplementationDecl(ObjCImplementationDecl *D) { // Find the corresponding interface. ObjCInterfaceDecl *Iface; if (Error Err = importInto(Iface, D->getClassInterface())) return std::move(Err); // Import the superclass, if any. ObjCInterfaceDecl *Super; if (Error Err = importInto(Super, D->getSuperClass())) return std::move(Err); ObjCImplementationDecl *Impl = Iface->getImplementation(); if (!Impl) { // We haven't imported an implementation yet. Create a new @implementation // now. DeclContext *DC, *LexicalDC; if (Error Err = ImportDeclContext(D, DC, LexicalDC)) return std::move(Err); Error Err = Error::success(); auto ToLocation = importChecked(Err, D->getLocation()); auto ToAtStartLoc = importChecked(Err, D->getAtStartLoc()); auto ToSuperClassLoc = importChecked(Err, D->getSuperClassLoc()); auto ToIvarLBraceLoc = importChecked(Err, D->getIvarLBraceLoc()); auto ToIvarRBraceLoc = importChecked(Err, D->getIvarRBraceLoc()); if (Err) return std::move(Err); if (GetImportedOrCreateDecl(Impl, D, Importer.getToContext(), DC, Iface, Super, ToLocation, ToAtStartLoc, ToSuperClassLoc, ToIvarLBraceLoc, ToIvarRBraceLoc)) return Impl; Impl->setLexicalDeclContext(LexicalDC); // Associate the implementation with the class it implements. Iface->setImplementation(Impl); Importer.MapImported(D, Iface->getImplementation()); } else { Importer.MapImported(D, Iface->getImplementation()); // Verify that the existing @implementation has the same superclass. if ((Super && !Impl->getSuperClass()) || (!Super && Impl->getSuperClass()) || (Super && Impl->getSuperClass() && !declaresSameEntity(Super->getCanonicalDecl(), Impl->getSuperClass()))) { Importer.ToDiag(Impl->getLocation(), diag::warn_odr_objc_superclass_inconsistent) << Iface->getDeclName(); // FIXME: It would be nice to have the location of the superclass // below. if (Impl->getSuperClass()) Importer.ToDiag(Impl->getLocation(), diag::note_odr_objc_superclass) << Impl->getSuperClass()->getDeclName(); else Importer.ToDiag(Impl->getLocation(), diag::note_odr_objc_missing_superclass); if (D->getSuperClass()) Importer.FromDiag(D->getLocation(), diag::note_odr_objc_superclass) << D->getSuperClass()->getDeclName(); else Importer.FromDiag(D->getLocation(), diag::note_odr_objc_missing_superclass); return make_error(ImportError::NameConflict); } } // Import all of the members of this @implementation. if (Error Err = ImportDeclContext(D)) return std::move(Err); return Impl; } ExpectedDecl ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) { // Import the major distinguishing characteristics of an @property. DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; NamedDecl *ToD; if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return std::move(Err); if (ToD) return ToD; // Check whether we have already imported this property. auto FoundDecls = Importer.findDeclsInToCtx(DC, Name); for (auto *FoundDecl : FoundDecls) { if (auto *FoundProp = dyn_cast(FoundDecl)) { // Check property types. if (!Importer.IsStructurallyEquivalent(D->getType(), FoundProp->getType())) { Importer.ToDiag(Loc, diag::warn_odr_objc_property_type_inconsistent) << Name << D->getType() << FoundProp->getType(); Importer.ToDiag(FoundProp->getLocation(), diag::note_odr_value_here) << FoundProp->getType(); return make_error(ImportError::NameConflict); } // FIXME: Check property attributes, getters, setters, etc.? // Consider these properties to be equivalent. Importer.MapImported(D, FoundProp); return FoundProp; } } Error Err = Error::success(); auto ToType = importChecked(Err, D->getType()); auto ToTypeSourceInfo = importChecked(Err, D->getTypeSourceInfo()); auto ToAtLoc = importChecked(Err, D->getAtLoc()); auto ToLParenLoc = importChecked(Err, D->getLParenLoc()); if (Err) return std::move(Err); // Create the new property. ObjCPropertyDecl *ToProperty; if (GetImportedOrCreateDecl( ToProperty, D, Importer.getToContext(), DC, Loc, Name.getAsIdentifierInfo(), ToAtLoc, ToLParenLoc, ToType, ToTypeSourceInfo, D->getPropertyImplementation())) return ToProperty; auto ToGetterName = importChecked(Err, D->getGetterName()); auto ToSetterName = importChecked(Err, D->getSetterName()); auto ToGetterNameLoc = importChecked(Err, D->getGetterNameLoc()); auto ToSetterNameLoc = importChecked(Err, D->getSetterNameLoc()); auto ToGetterMethodDecl = importChecked(Err, D->getGetterMethodDecl()); auto ToSetterMethodDecl = importChecked(Err, D->getSetterMethodDecl()); auto ToPropertyIvarDecl = importChecked(Err, D->getPropertyIvarDecl()); if (Err) return std::move(Err); ToProperty->setLexicalDeclContext(LexicalDC); LexicalDC->addDeclInternal(ToProperty); ToProperty->setPropertyAttributes(D->getPropertyAttributes()); ToProperty->setPropertyAttributesAsWritten( D->getPropertyAttributesAsWritten()); ToProperty->setGetterName(ToGetterName, ToGetterNameLoc); ToProperty->setSetterName(ToSetterName, ToSetterNameLoc); ToProperty->setGetterMethodDecl(ToGetterMethodDecl); ToProperty->setSetterMethodDecl(ToSetterMethodDecl); ToProperty->setPropertyIvarDecl(ToPropertyIvarDecl); return ToProperty; } ExpectedDecl ASTNodeImporter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) { ObjCPropertyDecl *Property; if (Error Err = importInto(Property, D->getPropertyDecl())) return std::move(Err); DeclContext *DC, *LexicalDC; if (Error Err = ImportDeclContext(D, DC, LexicalDC)) return std::move(Err); auto *InImpl = cast(LexicalDC); // Import the ivar (for an @synthesize). ObjCIvarDecl *Ivar = nullptr; if (Error Err = importInto(Ivar, D->getPropertyIvarDecl())) return std::move(Err); ObjCPropertyImplDecl *ToImpl = InImpl->FindPropertyImplDecl(Property->getIdentifier(), Property->getQueryKind()); if (!ToImpl) { Error Err = Error::success(); auto ToBeginLoc = importChecked(Err, D->getBeginLoc()); auto ToLocation = importChecked(Err, D->getLocation()); auto ToPropertyIvarDeclLoc = importChecked(Err, D->getPropertyIvarDeclLoc()); if (Err) return std::move(Err); if (GetImportedOrCreateDecl(ToImpl, D, Importer.getToContext(), DC, ToBeginLoc, ToLocation, Property, D->getPropertyImplementation(), Ivar, ToPropertyIvarDeclLoc)) return ToImpl; ToImpl->setLexicalDeclContext(LexicalDC); LexicalDC->addDeclInternal(ToImpl); } else { // Check that we have the same kind of property implementation (@synthesize // vs. @dynamic). if (D->getPropertyImplementation() != ToImpl->getPropertyImplementation()) { Importer.ToDiag(ToImpl->getLocation(), diag::warn_odr_objc_property_impl_kind_inconsistent) << Property->getDeclName() << (ToImpl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic); Importer.FromDiag(D->getLocation(), diag::note_odr_objc_property_impl_kind) << D->getPropertyDecl()->getDeclName() << (D->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic); return make_error(ImportError::NameConflict); } // For @synthesize, check that we have the same if (D->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize && Ivar != ToImpl->getPropertyIvarDecl()) { Importer.ToDiag(ToImpl->getPropertyIvarDeclLoc(), diag::warn_odr_objc_synthesize_ivar_inconsistent) << Property->getDeclName() << ToImpl->getPropertyIvarDecl()->getDeclName() << Ivar->getDeclName(); Importer.FromDiag(D->getPropertyIvarDeclLoc(), diag::note_odr_objc_synthesize_ivar_here) << D->getPropertyIvarDecl()->getDeclName(); return make_error(ImportError::NameConflict); } // Merge the existing implementation with the new implementation. Importer.MapImported(D, ToImpl); } return ToImpl; } ExpectedDecl ASTNodeImporter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) { // For template arguments, we adopt the translation unit as our declaration // context. This context will be fixed when the actual template declaration // is created. // FIXME: Import default argument and constraint expression. ExpectedSLoc BeginLocOrErr = import(D->getBeginLoc()); if (!BeginLocOrErr) return BeginLocOrErr.takeError(); ExpectedSLoc LocationOrErr = import(D->getLocation()); if (!LocationOrErr) return LocationOrErr.takeError(); TemplateTypeParmDecl *ToD = nullptr; if (GetImportedOrCreateDecl( ToD, D, Importer.getToContext(), Importer.getToContext().getTranslationUnitDecl(), *BeginLocOrErr, *LocationOrErr, D->getDepth(), D->getIndex(), Importer.Import(D->getIdentifier()), D->wasDeclaredWithTypename(), D->isParameterPack(), D->hasTypeConstraint())) return ToD; // Import the type-constraint if (const TypeConstraint *TC = D->getTypeConstraint()) { Error Err = Error::success(); auto ToNNS = importChecked(Err, TC->getNestedNameSpecifierLoc()); auto ToName = importChecked(Err, TC->getConceptNameInfo().getName()); auto ToNameLoc = importChecked(Err, TC->getConceptNameInfo().getLoc()); auto ToFoundDecl = importChecked(Err, TC->getFoundDecl()); auto ToNamedConcept = importChecked(Err, TC->getNamedConcept()); auto ToIDC = importChecked(Err, TC->getImmediatelyDeclaredConstraint()); if (Err) return std::move(Err); TemplateArgumentListInfo ToTAInfo; const auto *ASTTemplateArgs = TC->getTemplateArgsAsWritten(); if (ASTTemplateArgs) if (Error Err = ImportTemplateArgumentListInfo(*ASTTemplateArgs, ToTAInfo)) return std::move(Err); ToD->setTypeConstraint(ToNNS, DeclarationNameInfo(ToName, ToNameLoc), ToFoundDecl, ToNamedConcept, ASTTemplateArgs ? ASTTemplateArgumentListInfo::Create(Importer.getToContext(), ToTAInfo) : nullptr, ToIDC); } return ToD; } ExpectedDecl ASTNodeImporter::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) { Error Err = Error::success(); auto ToDeclName = importChecked(Err, D->getDeclName()); auto ToLocation = importChecked(Err, D->getLocation()); auto ToType = importChecked(Err, D->getType()); auto ToTypeSourceInfo = importChecked(Err, D->getTypeSourceInfo()); auto ToInnerLocStart = importChecked(Err, D->getInnerLocStart()); if (Err) return std::move(Err); // FIXME: Import default argument. NonTypeTemplateParmDecl *ToD = nullptr; (void)GetImportedOrCreateDecl( ToD, D, Importer.getToContext(), Importer.getToContext().getTranslationUnitDecl(), ToInnerLocStart, ToLocation, D->getDepth(), D->getPosition(), ToDeclName.getAsIdentifierInfo(), ToType, D->isParameterPack(), ToTypeSourceInfo); return ToD; } ExpectedDecl ASTNodeImporter::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) { // Import the name of this declaration. auto NameOrErr = import(D->getDeclName()); if (!NameOrErr) return NameOrErr.takeError(); // Import the location of this declaration. ExpectedSLoc LocationOrErr = import(D->getLocation()); if (!LocationOrErr) return LocationOrErr.takeError(); // Import template parameters. auto TemplateParamsOrErr = import(D->getTemplateParameters()); if (!TemplateParamsOrErr) return TemplateParamsOrErr.takeError(); // FIXME: Import default argument. TemplateTemplateParmDecl *ToD = nullptr; (void)GetImportedOrCreateDecl( ToD, D, Importer.getToContext(), Importer.getToContext().getTranslationUnitDecl(), *LocationOrErr, D->getDepth(), D->getPosition(), D->isParameterPack(), (*NameOrErr).getAsIdentifierInfo(), *TemplateParamsOrErr); return ToD; } // Returns the definition for a (forward) declaration of a TemplateDecl, if // it has any definition in the redecl chain. template static auto getTemplateDefinition(T *D) -> T * { assert(D->getTemplatedDecl() && "Should be called on templates only"); auto *ToTemplatedDef = D->getTemplatedDecl()->getDefinition(); if (!ToTemplatedDef) return nullptr; auto *TemplateWithDef = ToTemplatedDef->getDescribedTemplate(); return cast_or_null(TemplateWithDef); } ExpectedDecl ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) { // Import the major distinguishing characteristics of this class template. DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; NamedDecl *ToD; if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return std::move(Err); if (ToD) return ToD; ClassTemplateDecl *FoundByLookup = nullptr; // We may already have a template of the same name; try to find and match it. if (!DC->isFunctionOrMethod()) { SmallVector ConflictingDecls; auto FoundDecls = Importer.findDeclsInToCtx(DC, Name); for (auto *FoundDecl : FoundDecls) { if (!FoundDecl->isInIdentifierNamespace(Decl::IDNS_Ordinary | Decl::IDNS_TagFriend)) continue; Decl *Found = FoundDecl; auto *FoundTemplate = dyn_cast(Found); if (FoundTemplate) { if (!hasSameVisibilityContextAndLinkage(FoundTemplate, D)) continue; if (IsStructuralMatch(D, FoundTemplate)) { ClassTemplateDecl *TemplateWithDef = getTemplateDefinition(FoundTemplate); if (D->isThisDeclarationADefinition() && TemplateWithDef) return Importer.MapImported(D, TemplateWithDef); if (!FoundByLookup) FoundByLookup = FoundTemplate; // Search in all matches because there may be multiple decl chains, // see ASTTests test ImportExistingFriendClassTemplateDef. continue; } ConflictingDecls.push_back(FoundDecl); } } if (!ConflictingDecls.empty()) { ExpectedName NameOrErr = Importer.HandleNameConflict( Name, DC, Decl::IDNS_Ordinary, ConflictingDecls.data(), ConflictingDecls.size()); if (NameOrErr) Name = NameOrErr.get(); else return NameOrErr.takeError(); } } CXXRecordDecl *FromTemplated = D->getTemplatedDecl(); // Create the declaration that is being templated. CXXRecordDecl *ToTemplated; if (Error Err = importInto(ToTemplated, FromTemplated)) return std::move(Err); // Create the class template declaration itself. auto TemplateParamsOrErr = import(D->getTemplateParameters()); if (!TemplateParamsOrErr) return TemplateParamsOrErr.takeError(); ClassTemplateDecl *D2; if (GetImportedOrCreateDecl(D2, D, Importer.getToContext(), DC, Loc, Name, *TemplateParamsOrErr, ToTemplated)) return D2; ToTemplated->setDescribedClassTemplate(D2); D2->setAccess(D->getAccess()); D2->setLexicalDeclContext(LexicalDC); addDeclToContexts(D, D2); if (FoundByLookup) { auto *Recent = const_cast(FoundByLookup->getMostRecentDecl()); // It is possible that during the import of the class template definition // we start the import of a fwd friend decl of the very same class template // and we add the fwd friend decl to the lookup table. But the ToTemplated // had been created earlier and by that time the lookup could not find // anything existing, so it has no previous decl. Later, (still during the // import of the fwd friend decl) we start to import the definition again // and this time the lookup finds the previous fwd friend class template. // In this case we must set up the previous decl for the templated decl. if (!ToTemplated->getPreviousDecl()) { assert(FoundByLookup->getTemplatedDecl() && "Found decl must have its templated decl set"); CXXRecordDecl *PrevTemplated = FoundByLookup->getTemplatedDecl()->getMostRecentDecl(); if (ToTemplated != PrevTemplated) ToTemplated->setPreviousDecl(PrevTemplated); } D2->setPreviousDecl(Recent); } if (FromTemplated->isCompleteDefinition() && !ToTemplated->isCompleteDefinition()) { // FIXME: Import definition! } return D2; } ExpectedDecl ASTNodeImporter::VisitClassTemplateSpecializationDecl( ClassTemplateSpecializationDecl *D) { ClassTemplateDecl *ClassTemplate; if (Error Err = importInto(ClassTemplate, D->getSpecializedTemplate())) return std::move(Err); // Import the context of this declaration. DeclContext *DC, *LexicalDC; if (Error Err = ImportDeclContext(D, DC, LexicalDC)) return std::move(Err); // Import template arguments. SmallVector TemplateArgs; if (Error Err = ImportTemplateArguments( D->getTemplateArgs().data(), D->getTemplateArgs().size(), TemplateArgs)) return std::move(Err); // Try to find an existing specialization with these template arguments and // template parameter list. void *InsertPos = nullptr; ClassTemplateSpecializationDecl *PrevDecl = nullptr; ClassTemplatePartialSpecializationDecl *PartialSpec = dyn_cast(D); // Import template parameters. TemplateParameterList *ToTPList = nullptr; if (PartialSpec) { auto ToTPListOrErr = import(PartialSpec->getTemplateParameters()); if (!ToTPListOrErr) return ToTPListOrErr.takeError(); ToTPList = *ToTPListOrErr; PrevDecl = ClassTemplate->findPartialSpecialization(TemplateArgs, *ToTPListOrErr, InsertPos); } else PrevDecl = ClassTemplate->findSpecialization(TemplateArgs, InsertPos); if (PrevDecl) { if (IsStructuralMatch(D, PrevDecl)) { if (D->isThisDeclarationADefinition() && PrevDecl->getDefinition()) { Importer.MapImported(D, PrevDecl->getDefinition()); // Import those default field initializers which have been // instantiated in the "From" context, but not in the "To" context. for (auto *FromField : D->fields()) { auto ToOrErr = import(FromField); if (!ToOrErr) return ToOrErr.takeError(); } // Import those methods which have been instantiated in the // "From" context, but not in the "To" context. for (CXXMethodDecl *FromM : D->methods()) { auto ToOrErr = import(FromM); if (!ToOrErr) return ToOrErr.takeError(); } // TODO Import instantiated default arguments. // TODO Import instantiated exception specifications. // // Generally, ASTCommon.h/DeclUpdateKind enum gives a very good hint // what else could be fused during an AST merge. return PrevDecl; } } else { // ODR violation. // FIXME HandleNameConflict return make_error(ImportError::NameConflict); } } // Import the location of this declaration. ExpectedSLoc BeginLocOrErr = import(D->getBeginLoc()); if (!BeginLocOrErr) return BeginLocOrErr.takeError(); ExpectedSLoc IdLocOrErr = import(D->getLocation()); if (!IdLocOrErr) return IdLocOrErr.takeError(); // Create the specialization. ClassTemplateSpecializationDecl *D2 = nullptr; if (PartialSpec) { // Import TemplateArgumentListInfo. TemplateArgumentListInfo ToTAInfo; const auto &ASTTemplateArgs = *PartialSpec->getTemplateArgsAsWritten(); if (Error Err = ImportTemplateArgumentListInfo(ASTTemplateArgs, ToTAInfo)) return std::move(Err); QualType CanonInjType; if (Error Err = importInto( CanonInjType, PartialSpec->getInjectedSpecializationType())) return std::move(Err); CanonInjType = CanonInjType.getCanonicalType(); if (GetImportedOrCreateDecl( D2, D, Importer.getToContext(), D->getTagKind(), DC, *BeginLocOrErr, *IdLocOrErr, ToTPList, ClassTemplate, llvm::makeArrayRef(TemplateArgs.data(), TemplateArgs.size()), ToTAInfo, CanonInjType, cast_or_null(PrevDecl))) return D2; // Update InsertPos, because preceding import calls may have invalidated // it by adding new specializations. auto *PartSpec2 = cast(D2); if (!ClassTemplate->findPartialSpecialization(TemplateArgs, ToTPList, InsertPos)) // Add this partial specialization to the class template. ClassTemplate->AddPartialSpecialization(PartSpec2, InsertPos); } else { // Not a partial specialization. if (GetImportedOrCreateDecl( D2, D, Importer.getToContext(), D->getTagKind(), DC, *BeginLocOrErr, *IdLocOrErr, ClassTemplate, TemplateArgs, PrevDecl)) return D2; // Update InsertPos, because preceding import calls may have invalidated // it by adding new specializations. if (!ClassTemplate->findSpecialization(TemplateArgs, InsertPos)) // Add this specialization to the class template. ClassTemplate->AddSpecialization(D2, InsertPos); } D2->setSpecializationKind(D->getSpecializationKind()); // Set the context of this specialization/instantiation. D2->setLexicalDeclContext(LexicalDC); // Add to the DC only if it was an explicit specialization/instantiation. if (D2->isExplicitInstantiationOrSpecialization()) { LexicalDC->addDeclInternal(D2); } if (auto BraceRangeOrErr = import(D->getBraceRange())) D2->setBraceRange(*BraceRangeOrErr); else return BraceRangeOrErr.takeError(); // Import the qualifier, if any. if (auto LocOrErr = import(D->getQualifierLoc())) D2->setQualifierInfo(*LocOrErr); else return LocOrErr.takeError(); if (auto *TSI = D->getTypeAsWritten()) { if (auto TInfoOrErr = import(TSI)) D2->setTypeAsWritten(*TInfoOrErr); else return TInfoOrErr.takeError(); if (auto LocOrErr = import(D->getTemplateKeywordLoc())) D2->setTemplateKeywordLoc(*LocOrErr); else return LocOrErr.takeError(); if (auto LocOrErr = import(D->getExternLoc())) D2->setExternLoc(*LocOrErr); else return LocOrErr.takeError(); } if (D->getPointOfInstantiation().isValid()) { if (auto POIOrErr = import(D->getPointOfInstantiation())) D2->setPointOfInstantiation(*POIOrErr); else return POIOrErr.takeError(); } D2->setTemplateSpecializationKind(D->getTemplateSpecializationKind()); if (D->isCompleteDefinition()) if (Error Err = ImportDefinition(D, D2)) return std::move(Err); return D2; } ExpectedDecl ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) { // Import the major distinguishing characteristics of this variable template. DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; NamedDecl *ToD; if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return std::move(Err); if (ToD) return ToD; // We may already have a template of the same name; try to find and match it. assert(!DC->isFunctionOrMethod() && "Variable templates cannot be declared at function scope"); SmallVector ConflictingDecls; auto FoundDecls = Importer.findDeclsInToCtx(DC, Name); VarTemplateDecl *FoundByLookup = nullptr; for (auto *FoundDecl : FoundDecls) { if (!FoundDecl->isInIdentifierNamespace(Decl::IDNS_Ordinary)) continue; if (VarTemplateDecl *FoundTemplate = dyn_cast(FoundDecl)) { // Use the templated decl, some linkage flags are set only there. if (!hasSameVisibilityContextAndLinkage(FoundTemplate->getTemplatedDecl(), D->getTemplatedDecl())) continue; if (IsStructuralMatch(D, FoundTemplate)) { // The Decl in the "From" context has a definition, but in the // "To" context we already have a definition. VarTemplateDecl *FoundDef = getTemplateDefinition(FoundTemplate); if (D->isThisDeclarationADefinition() && FoundDef) // FIXME Check for ODR error if the two definitions have // different initializers? return Importer.MapImported(D, FoundDef); FoundByLookup = FoundTemplate; break; } ConflictingDecls.push_back(FoundDecl); } } if (!ConflictingDecls.empty()) { ExpectedName NameOrErr = Importer.HandleNameConflict( Name, DC, Decl::IDNS_Ordinary, ConflictingDecls.data(), ConflictingDecls.size()); if (NameOrErr) Name = NameOrErr.get(); else return NameOrErr.takeError(); } VarDecl *DTemplated = D->getTemplatedDecl(); // Import the type. // FIXME: Value not used? ExpectedType TypeOrErr = import(DTemplated->getType()); if (!TypeOrErr) return TypeOrErr.takeError(); // Create the declaration that is being templated. VarDecl *ToTemplated; if (Error Err = importInto(ToTemplated, DTemplated)) return std::move(Err); // Create the variable template declaration itself. auto TemplateParamsOrErr = import(D->getTemplateParameters()); if (!TemplateParamsOrErr) return TemplateParamsOrErr.takeError(); VarTemplateDecl *ToVarTD; if (GetImportedOrCreateDecl(ToVarTD, D, Importer.getToContext(), DC, Loc, Name, *TemplateParamsOrErr, ToTemplated)) return ToVarTD; ToTemplated->setDescribedVarTemplate(ToVarTD); ToVarTD->setAccess(D->getAccess()); ToVarTD->setLexicalDeclContext(LexicalDC); LexicalDC->addDeclInternal(ToVarTD); if (FoundByLookup) { auto *Recent = const_cast(FoundByLookup->getMostRecentDecl()); if (!ToTemplated->getPreviousDecl()) { auto *PrevTemplated = FoundByLookup->getTemplatedDecl()->getMostRecentDecl(); if (ToTemplated != PrevTemplated) ToTemplated->setPreviousDecl(PrevTemplated); } ToVarTD->setPreviousDecl(Recent); } if (DTemplated->isThisDeclarationADefinition() && !ToTemplated->isThisDeclarationADefinition()) { // FIXME: Import definition! } return ToVarTD; } ExpectedDecl ASTNodeImporter::VisitVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *D) { // If this record has a definition in the translation unit we're coming from, // but this particular declaration is not that definition, import the // definition and map to that. VarDecl *Definition = D->getDefinition(); if (Definition && Definition != D) { if (ExpectedDecl ImportedDefOrErr = import(Definition)) return Importer.MapImported(D, *ImportedDefOrErr); else return ImportedDefOrErr.takeError(); } VarTemplateDecl *VarTemplate = nullptr; if (Error Err = importInto(VarTemplate, D->getSpecializedTemplate())) return std::move(Err); // Import the context of this declaration. DeclContext *DC, *LexicalDC; if (Error Err = ImportDeclContext(D, DC, LexicalDC)) return std::move(Err); // Import the location of this declaration. ExpectedSLoc BeginLocOrErr = import(D->getBeginLoc()); if (!BeginLocOrErr) return BeginLocOrErr.takeError(); auto IdLocOrErr = import(D->getLocation()); if (!IdLocOrErr) return IdLocOrErr.takeError(); // Import template arguments. SmallVector TemplateArgs; if (Error Err = ImportTemplateArguments( D->getTemplateArgs().data(), D->getTemplateArgs().size(), TemplateArgs)) return std::move(Err); // Try to find an existing specialization with these template arguments. void *InsertPos = nullptr; VarTemplateSpecializationDecl *D2 = VarTemplate->findSpecialization( TemplateArgs, InsertPos); if (D2) { // We already have a variable template specialization with these template // arguments. // FIXME: Check for specialization vs. instantiation errors. if (VarDecl *FoundDef = D2->getDefinition()) { if (!D->isThisDeclarationADefinition() || IsStructuralMatch(D, FoundDef)) { // The record types structurally match, or the "from" translation // unit only had a forward declaration anyway; call it the same // variable. return Importer.MapImported(D, FoundDef); } } } else { // Import the type. QualType T; if (Error Err = importInto(T, D->getType())) return std::move(Err); auto TInfoOrErr = import(D->getTypeSourceInfo()); if (!TInfoOrErr) return TInfoOrErr.takeError(); TemplateArgumentListInfo ToTAInfo; if (Error Err = ImportTemplateArgumentListInfo( D->getTemplateArgsInfo(), ToTAInfo)) return std::move(Err); using PartVarSpecDecl = VarTemplatePartialSpecializationDecl; // Create a new specialization. if (auto *FromPartial = dyn_cast(D)) { // Import TemplateArgumentListInfo TemplateArgumentListInfo ArgInfos; const auto *FromTAArgsAsWritten = FromPartial->getTemplateArgsAsWritten(); // NOTE: FromTAArgsAsWritten and template parameter list are non-null. if (Error Err = ImportTemplateArgumentListInfo( *FromTAArgsAsWritten, ArgInfos)) return std::move(Err); auto ToTPListOrErr = import(FromPartial->getTemplateParameters()); if (!ToTPListOrErr) return ToTPListOrErr.takeError(); PartVarSpecDecl *ToPartial; if (GetImportedOrCreateDecl(ToPartial, D, Importer.getToContext(), DC, *BeginLocOrErr, *IdLocOrErr, *ToTPListOrErr, VarTemplate, T, *TInfoOrErr, D->getStorageClass(), TemplateArgs, ArgInfos)) return ToPartial; if (Expected ToInstOrErr = import( FromPartial->getInstantiatedFromMember())) ToPartial->setInstantiatedFromMember(*ToInstOrErr); else return ToInstOrErr.takeError(); if (FromPartial->isMemberSpecialization()) ToPartial->setMemberSpecialization(); D2 = ToPartial; } else { // Full specialization if (GetImportedOrCreateDecl(D2, D, Importer.getToContext(), DC, *BeginLocOrErr, *IdLocOrErr, VarTemplate, T, *TInfoOrErr, D->getStorageClass(), TemplateArgs)) return D2; } if (D->getPointOfInstantiation().isValid()) { if (ExpectedSLoc POIOrErr = import(D->getPointOfInstantiation())) D2->setPointOfInstantiation(*POIOrErr); else return POIOrErr.takeError(); } D2->setSpecializationKind(D->getSpecializationKind()); D2->setTemplateArgsInfo(ToTAInfo); // Add this specialization to the class template. VarTemplate->AddSpecialization(D2, InsertPos); // Import the qualifier, if any. if (auto LocOrErr = import(D->getQualifierLoc())) D2->setQualifierInfo(*LocOrErr); else return LocOrErr.takeError(); if (D->isConstexpr()) D2->setConstexpr(true); // Add the specialization to this context. D2->setLexicalDeclContext(LexicalDC); LexicalDC->addDeclInternal(D2); D2->setAccess(D->getAccess()); } if (Error Err = ImportInitializer(D, D2)) return std::move(Err); return D2; } ExpectedDecl ASTNodeImporter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) { DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; NamedDecl *ToD; if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return std::move(Err); if (ToD) return ToD; const FunctionTemplateDecl *FoundByLookup = nullptr; // Try to find a function in our own ("to") context with the same name, same // type, and in the same context as the function we're importing. // FIXME Split this into a separate function. if (!LexicalDC->isFunctionOrMethod()) { unsigned IDNS = Decl::IDNS_Ordinary | Decl::IDNS_OrdinaryFriend; auto FoundDecls = Importer.findDeclsInToCtx(DC, Name); for (auto *FoundDecl : FoundDecls) { if (!FoundDecl->isInIdentifierNamespace(IDNS)) continue; if (auto *FoundTemplate = dyn_cast(FoundDecl)) { if (!hasSameVisibilityContextAndLinkage(FoundTemplate, D)) continue; if (IsStructuralMatch(D, FoundTemplate)) { FunctionTemplateDecl *TemplateWithDef = getTemplateDefinition(FoundTemplate); if (D->isThisDeclarationADefinition() && TemplateWithDef) return Importer.MapImported(D, TemplateWithDef); FoundByLookup = FoundTemplate; break; // TODO: handle conflicting names } } } } auto ParamsOrErr = import(D->getTemplateParameters()); if (!ParamsOrErr) return ParamsOrErr.takeError(); FunctionDecl *TemplatedFD; if (Error Err = importInto(TemplatedFD, D->getTemplatedDecl())) return std::move(Err); FunctionTemplateDecl *ToFunc; if (GetImportedOrCreateDecl(ToFunc, D, Importer.getToContext(), DC, Loc, Name, *ParamsOrErr, TemplatedFD)) return ToFunc; TemplatedFD->setDescribedFunctionTemplate(ToFunc); ToFunc->setAccess(D->getAccess()); ToFunc->setLexicalDeclContext(LexicalDC); LexicalDC->addDeclInternal(ToFunc); if (FoundByLookup) { auto *Recent = const_cast(FoundByLookup->getMostRecentDecl()); if (!TemplatedFD->getPreviousDecl()) { assert(FoundByLookup->getTemplatedDecl() && "Found decl must have its templated decl set"); auto *PrevTemplated = FoundByLookup->getTemplatedDecl()->getMostRecentDecl(); if (TemplatedFD != PrevTemplated) TemplatedFD->setPreviousDecl(PrevTemplated); } ToFunc->setPreviousDecl(Recent); } return ToFunc; } //---------------------------------------------------------------------------- // Import Statements //---------------------------------------------------------------------------- ExpectedStmt ASTNodeImporter::VisitStmt(Stmt *S) { Importer.FromDiag(S->getBeginLoc(), diag::err_unsupported_ast_node) << S->getStmtClassName(); return make_error(ImportError::UnsupportedConstruct); } ExpectedStmt ASTNodeImporter::VisitGCCAsmStmt(GCCAsmStmt *S) { if (Importer.returnWithErrorInTest()) return make_error(ImportError::UnsupportedConstruct); SmallVector Names; for (unsigned I = 0, E = S->getNumOutputs(); I != E; I++) { IdentifierInfo *ToII = Importer.Import(S->getOutputIdentifier(I)); // ToII is nullptr when no symbolic name is given for output operand // see ParseStmtAsm::ParseAsmOperandsOpt Names.push_back(ToII); } for (unsigned I = 0, E = S->getNumInputs(); I != E; I++) { IdentifierInfo *ToII = Importer.Import(S->getInputIdentifier(I)); // ToII is nullptr when no symbolic name is given for input operand // see ParseStmtAsm::ParseAsmOperandsOpt Names.push_back(ToII); } SmallVector Clobbers; for (unsigned I = 0, E = S->getNumClobbers(); I != E; I++) { if (auto ClobberOrErr = import(S->getClobberStringLiteral(I))) Clobbers.push_back(*ClobberOrErr); else return ClobberOrErr.takeError(); } SmallVector Constraints; for (unsigned I = 0, E = S->getNumOutputs(); I != E; I++) { if (auto OutputOrErr = import(S->getOutputConstraintLiteral(I))) Constraints.push_back(*OutputOrErr); else return OutputOrErr.takeError(); } for (unsigned I = 0, E = S->getNumInputs(); I != E; I++) { if (auto InputOrErr = import(S->getInputConstraintLiteral(I))) Constraints.push_back(*InputOrErr); else return InputOrErr.takeError(); } SmallVector Exprs(S->getNumOutputs() + S->getNumInputs() + S->getNumLabels()); if (Error Err = ImportContainerChecked(S->outputs(), Exprs)) return std::move(Err); if (Error Err = ImportArrayChecked(S->inputs(), Exprs.begin() + S->getNumOutputs())) return std::move(Err); if (Error Err = ImportArrayChecked( S->labels(), Exprs.begin() + S->getNumOutputs() + S->getNumInputs())) return std::move(Err); ExpectedSLoc AsmLocOrErr = import(S->getAsmLoc()); if (!AsmLocOrErr) return AsmLocOrErr.takeError(); auto AsmStrOrErr = import(S->getAsmString()); if (!AsmStrOrErr) return AsmStrOrErr.takeError(); ExpectedSLoc RParenLocOrErr = import(S->getRParenLoc()); if (!RParenLocOrErr) return RParenLocOrErr.takeError(); return new (Importer.getToContext()) GCCAsmStmt( Importer.getToContext(), *AsmLocOrErr, S->isSimple(), S->isVolatile(), S->getNumOutputs(), S->getNumInputs(), Names.data(), Constraints.data(), Exprs.data(), *AsmStrOrErr, S->getNumClobbers(), Clobbers.data(), S->getNumLabels(), *RParenLocOrErr); } ExpectedStmt ASTNodeImporter::VisitDeclStmt(DeclStmt *S) { Error Err = Error::success(); auto ToDG = importChecked(Err, S->getDeclGroup()); auto ToBeginLoc = importChecked(Err, S->getBeginLoc()); auto ToEndLoc = importChecked(Err, S->getEndLoc()); if (Err) return std::move(Err); return new (Importer.getToContext()) DeclStmt(ToDG, ToBeginLoc, ToEndLoc); } ExpectedStmt ASTNodeImporter::VisitNullStmt(NullStmt *S) { ExpectedSLoc ToSemiLocOrErr = import(S->getSemiLoc()); if (!ToSemiLocOrErr) return ToSemiLocOrErr.takeError(); return new (Importer.getToContext()) NullStmt( *ToSemiLocOrErr, S->hasLeadingEmptyMacro()); } ExpectedStmt ASTNodeImporter::VisitCompoundStmt(CompoundStmt *S) { SmallVector ToStmts(S->size()); if (Error Err = ImportContainerChecked(S->body(), ToStmts)) return std::move(Err); ExpectedSLoc ToLBracLocOrErr = import(S->getLBracLoc()); if (!ToLBracLocOrErr) return ToLBracLocOrErr.takeError(); ExpectedSLoc ToRBracLocOrErr = import(S->getRBracLoc()); if (!ToRBracLocOrErr) return ToRBracLocOrErr.takeError(); return CompoundStmt::Create( Importer.getToContext(), ToStmts, *ToLBracLocOrErr, *ToRBracLocOrErr); } ExpectedStmt ASTNodeImporter::VisitCaseStmt(CaseStmt *S) { Error Err = Error::success(); auto ToLHS = importChecked(Err, S->getLHS()); auto ToRHS = importChecked(Err, S->getRHS()); auto ToSubStmt = importChecked(Err, S->getSubStmt()); auto ToCaseLoc = importChecked(Err, S->getCaseLoc()); auto ToEllipsisLoc = importChecked(Err, S->getEllipsisLoc()); auto ToColonLoc = importChecked(Err, S->getColonLoc()); if (Err) return std::move(Err); auto *ToStmt = CaseStmt::Create(Importer.getToContext(), ToLHS, ToRHS, ToCaseLoc, ToEllipsisLoc, ToColonLoc); ToStmt->setSubStmt(ToSubStmt); return ToStmt; } ExpectedStmt ASTNodeImporter::VisitDefaultStmt(DefaultStmt *S) { Error Err = Error::success(); auto ToDefaultLoc = importChecked(Err, S->getDefaultLoc()); auto ToColonLoc = importChecked(Err, S->getColonLoc()); auto ToSubStmt = importChecked(Err, S->getSubStmt()); if (Err) return std::move(Err); return new (Importer.getToContext()) DefaultStmt( ToDefaultLoc, ToColonLoc, ToSubStmt); } ExpectedStmt ASTNodeImporter::VisitLabelStmt(LabelStmt *S) { Error Err = Error::success(); auto ToIdentLoc = importChecked(Err, S->getIdentLoc()); auto ToLabelDecl = importChecked(Err, S->getDecl()); auto ToSubStmt = importChecked(Err, S->getSubStmt()); if (Err) return std::move(Err); return new (Importer.getToContext()) LabelStmt( ToIdentLoc, ToLabelDecl, ToSubStmt); } ExpectedStmt ASTNodeImporter::VisitAttributedStmt(AttributedStmt *S) { ExpectedSLoc ToAttrLocOrErr = import(S->getAttrLoc()); if (!ToAttrLocOrErr) return ToAttrLocOrErr.takeError(); ArrayRef FromAttrs(S->getAttrs()); SmallVector ToAttrs(FromAttrs.size()); if (Error Err = ImportContainerChecked(FromAttrs, ToAttrs)) return std::move(Err); ExpectedStmt ToSubStmtOrErr = import(S->getSubStmt()); if (!ToSubStmtOrErr) return ToSubStmtOrErr.takeError(); return AttributedStmt::Create( Importer.getToContext(), *ToAttrLocOrErr, ToAttrs, *ToSubStmtOrErr); } ExpectedStmt ASTNodeImporter::VisitIfStmt(IfStmt *S) { Error Err = Error::success(); auto ToIfLoc = importChecked(Err, S->getIfLoc()); auto ToInit = importChecked(Err, S->getInit()); auto ToConditionVariable = importChecked(Err, S->getConditionVariable()); auto ToCond = importChecked(Err, S->getCond()); auto ToThen = importChecked(Err, S->getThen()); auto ToElseLoc = importChecked(Err, S->getElseLoc()); auto ToElse = importChecked(Err, S->getElse()); if (Err) return std::move(Err); return IfStmt::Create(Importer.getToContext(), ToIfLoc, S->isConstexpr(), ToInit, ToConditionVariable, ToCond, ToThen, ToElseLoc, ToElse); } ExpectedStmt ASTNodeImporter::VisitSwitchStmt(SwitchStmt *S) { Error Err = Error::success(); auto ToInit = importChecked(Err, S->getInit()); auto ToConditionVariable = importChecked(Err, S->getConditionVariable()); auto ToCond = importChecked(Err, S->getCond()); auto ToBody = importChecked(Err, S->getBody()); auto ToSwitchLoc = importChecked(Err, S->getSwitchLoc()); if (Err) return std::move(Err); auto *ToStmt = SwitchStmt::Create(Importer.getToContext(), ToInit, ToConditionVariable, ToCond); ToStmt->setBody(ToBody); ToStmt->setSwitchLoc(ToSwitchLoc); // Now we have to re-chain the cases. SwitchCase *LastChainedSwitchCase = nullptr; for (SwitchCase *SC = S->getSwitchCaseList(); SC != nullptr; SC = SC->getNextSwitchCase()) { Expected ToSCOrErr = import(SC); if (!ToSCOrErr) return ToSCOrErr.takeError(); if (LastChainedSwitchCase) LastChainedSwitchCase->setNextSwitchCase(*ToSCOrErr); else ToStmt->setSwitchCaseList(*ToSCOrErr); LastChainedSwitchCase = *ToSCOrErr; } return ToStmt; } ExpectedStmt ASTNodeImporter::VisitWhileStmt(WhileStmt *S) { Error Err = Error::success(); auto ToConditionVariable = importChecked(Err, S->getConditionVariable()); auto ToCond = importChecked(Err, S->getCond()); auto ToBody = importChecked(Err, S->getBody()); auto ToWhileLoc = importChecked(Err, S->getWhileLoc()); auto ToLParenLoc = importChecked(Err, S->getLParenLoc()); auto ToRParenLoc = importChecked(Err, S->getRParenLoc()); if (Err) return std::move(Err); return WhileStmt::Create(Importer.getToContext(), ToConditionVariable, ToCond, ToBody, ToWhileLoc, ToLParenLoc, ToRParenLoc); } ExpectedStmt ASTNodeImporter::VisitDoStmt(DoStmt *S) { Error Err = Error::success(); auto ToBody = importChecked(Err, S->getBody()); auto ToCond = importChecked(Err, S->getCond()); auto ToDoLoc = importChecked(Err, S->getDoLoc()); auto ToWhileLoc = importChecked(Err, S->getWhileLoc()); auto ToRParenLoc = importChecked(Err, S->getRParenLoc()); if (Err) return std::move(Err); return new (Importer.getToContext()) DoStmt( ToBody, ToCond, ToDoLoc, ToWhileLoc, ToRParenLoc); } ExpectedStmt ASTNodeImporter::VisitForStmt(ForStmt *S) { Error Err = Error::success(); auto ToInit = importChecked(Err, S->getInit()); auto ToCond = importChecked(Err, S->getCond()); auto ToConditionVariable = importChecked(Err, S->getConditionVariable()); auto ToInc = importChecked(Err, S->getInc()); auto ToBody = importChecked(Err, S->getBody()); auto ToForLoc = importChecked(Err, S->getForLoc()); auto ToLParenLoc = importChecked(Err, S->getLParenLoc()); auto ToRParenLoc = importChecked(Err, S->getRParenLoc()); if (Err) return std::move(Err); return new (Importer.getToContext()) ForStmt( Importer.getToContext(), ToInit, ToCond, ToConditionVariable, ToInc, ToBody, ToForLoc, ToLParenLoc, ToRParenLoc); } ExpectedStmt ASTNodeImporter::VisitGotoStmt(GotoStmt *S) { Error Err = Error::success(); auto ToLabel = importChecked(Err, S->getLabel()); auto ToGotoLoc = importChecked(Err, S->getGotoLoc()); auto ToLabelLoc = importChecked(Err, S->getLabelLoc()); if (Err) return std::move(Err); return new (Importer.getToContext()) GotoStmt( ToLabel, ToGotoLoc, ToLabelLoc); } ExpectedStmt ASTNodeImporter::VisitIndirectGotoStmt(IndirectGotoStmt *S) { Error Err = Error::success(); auto ToGotoLoc = importChecked(Err, S->getGotoLoc()); auto ToStarLoc = importChecked(Err, S->getStarLoc()); auto ToTarget = importChecked(Err, S->getTarget()); if (Err) return std::move(Err); return new (Importer.getToContext()) IndirectGotoStmt( ToGotoLoc, ToStarLoc, ToTarget); } ExpectedStmt ASTNodeImporter::VisitContinueStmt(ContinueStmt *S) { ExpectedSLoc ToContinueLocOrErr = import(S->getContinueLoc()); if (!ToContinueLocOrErr) return ToContinueLocOrErr.takeError(); return new (Importer.getToContext()) ContinueStmt(*ToContinueLocOrErr); } ExpectedStmt ASTNodeImporter::VisitBreakStmt(BreakStmt *S) { auto ToBreakLocOrErr = import(S->getBreakLoc()); if (!ToBreakLocOrErr) return ToBreakLocOrErr.takeError(); return new (Importer.getToContext()) BreakStmt(*ToBreakLocOrErr); } ExpectedStmt ASTNodeImporter::VisitReturnStmt(ReturnStmt *S) { Error Err = Error::success(); auto ToReturnLoc = importChecked(Err, S->getReturnLoc()); auto ToRetValue = importChecked(Err, S->getRetValue()); auto ToNRVOCandidate = importChecked(Err, S->getNRVOCandidate()); if (Err) return std::move(Err); return ReturnStmt::Create(Importer.getToContext(), ToReturnLoc, ToRetValue, ToNRVOCandidate); } ExpectedStmt ASTNodeImporter::VisitCXXCatchStmt(CXXCatchStmt *S) { Error Err = Error::success(); auto ToCatchLoc = importChecked(Err, S->getCatchLoc()); auto ToExceptionDecl = importChecked(Err, S->getExceptionDecl()); auto ToHandlerBlock = importChecked(Err, S->getHandlerBlock()); if (Err) return std::move(Err); return new (Importer.getToContext()) CXXCatchStmt ( ToCatchLoc, ToExceptionDecl, ToHandlerBlock); } ExpectedStmt ASTNodeImporter::VisitCXXTryStmt(CXXTryStmt *S) { ExpectedSLoc ToTryLocOrErr = import(S->getTryLoc()); if (!ToTryLocOrErr) return ToTryLocOrErr.takeError(); ExpectedStmt ToTryBlockOrErr = import(S->getTryBlock()); if (!ToTryBlockOrErr) return ToTryBlockOrErr.takeError(); SmallVector ToHandlers(S->getNumHandlers()); for (unsigned HI = 0, HE = S->getNumHandlers(); HI != HE; ++HI) { CXXCatchStmt *FromHandler = S->getHandler(HI); if (auto ToHandlerOrErr = import(FromHandler)) ToHandlers[HI] = *ToHandlerOrErr; else return ToHandlerOrErr.takeError(); } return CXXTryStmt::Create( Importer.getToContext(), *ToTryLocOrErr,*ToTryBlockOrErr, ToHandlers); } ExpectedStmt ASTNodeImporter::VisitCXXForRangeStmt(CXXForRangeStmt *S) { Error Err = Error::success(); auto ToInit = importChecked(Err, S->getInit()); auto ToRangeStmt = importChecked(Err, S->getRangeStmt()); auto ToBeginStmt = importChecked(Err, S->getBeginStmt()); auto ToEndStmt = importChecked(Err, S->getEndStmt()); auto ToCond = importChecked(Err, S->getCond()); auto ToInc = importChecked(Err, S->getInc()); auto ToLoopVarStmt = importChecked(Err, S->getLoopVarStmt()); auto ToBody = importChecked(Err, S->getBody()); auto ToForLoc = importChecked(Err, S->getForLoc()); auto ToCoawaitLoc = importChecked(Err, S->getCoawaitLoc()); auto ToColonLoc = importChecked(Err, S->getColonLoc()); auto ToRParenLoc = importChecked(Err, S->getRParenLoc()); if (Err) return std::move(Err); return new (Importer.getToContext()) CXXForRangeStmt( ToInit, ToRangeStmt, ToBeginStmt, ToEndStmt, ToCond, ToInc, ToLoopVarStmt, ToBody, ToForLoc, ToCoawaitLoc, ToColonLoc, ToRParenLoc); } ExpectedStmt ASTNodeImporter::VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) { Error Err = Error::success(); auto ToElement = importChecked(Err, S->getElement()); auto ToCollection = importChecked(Err, S->getCollection()); auto ToBody = importChecked(Err, S->getBody()); auto ToForLoc = importChecked(Err, S->getForLoc()); auto ToRParenLoc = importChecked(Err, S->getRParenLoc()); if (Err) return std::move(Err); return new (Importer.getToContext()) ObjCForCollectionStmt(ToElement, ToCollection, ToBody, ToForLoc, ToRParenLoc); } ExpectedStmt ASTNodeImporter::VisitObjCAtCatchStmt(ObjCAtCatchStmt *S) { Error Err = Error::success(); auto ToAtCatchLoc = importChecked(Err, S->getAtCatchLoc()); auto ToRParenLoc = importChecked(Err, S->getRParenLoc()); auto ToCatchParamDecl = importChecked(Err, S->getCatchParamDecl()); auto ToCatchBody = importChecked(Err, S->getCatchBody()); if (Err) return std::move(Err); return new (Importer.getToContext()) ObjCAtCatchStmt ( ToAtCatchLoc, ToRParenLoc, ToCatchParamDecl, ToCatchBody); } ExpectedStmt ASTNodeImporter::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S) { ExpectedSLoc ToAtFinallyLocOrErr = import(S->getAtFinallyLoc()); if (!ToAtFinallyLocOrErr) return ToAtFinallyLocOrErr.takeError(); ExpectedStmt ToAtFinallyStmtOrErr = import(S->getFinallyBody()); if (!ToAtFinallyStmtOrErr) return ToAtFinallyStmtOrErr.takeError(); return new (Importer.getToContext()) ObjCAtFinallyStmt(*ToAtFinallyLocOrErr, *ToAtFinallyStmtOrErr); } ExpectedStmt ASTNodeImporter::VisitObjCAtTryStmt(ObjCAtTryStmt *S) { Error Err = Error::success(); auto ToAtTryLoc = importChecked(Err, S->getAtTryLoc()); auto ToTryBody = importChecked(Err, S->getTryBody()); auto ToFinallyStmt = importChecked(Err, S->getFinallyStmt()); if (Err) return std::move(Err); SmallVector ToCatchStmts(S->getNumCatchStmts()); for (unsigned CI = 0, CE = S->getNumCatchStmts(); CI != CE; ++CI) { ObjCAtCatchStmt *FromCatchStmt = S->getCatchStmt(CI); if (ExpectedStmt ToCatchStmtOrErr = import(FromCatchStmt)) ToCatchStmts[CI] = *ToCatchStmtOrErr; else return ToCatchStmtOrErr.takeError(); } return ObjCAtTryStmt::Create(Importer.getToContext(), ToAtTryLoc, ToTryBody, ToCatchStmts.begin(), ToCatchStmts.size(), ToFinallyStmt); } ExpectedStmt ASTNodeImporter::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S) { Error Err = Error::success(); auto ToAtSynchronizedLoc = importChecked(Err, S->getAtSynchronizedLoc()); auto ToSynchExpr = importChecked(Err, S->getSynchExpr()); auto ToSynchBody = importChecked(Err, S->getSynchBody()); if (Err) return std::move(Err); return new (Importer.getToContext()) ObjCAtSynchronizedStmt( ToAtSynchronizedLoc, ToSynchExpr, ToSynchBody); } ExpectedStmt ASTNodeImporter::VisitObjCAtThrowStmt(ObjCAtThrowStmt *S) { ExpectedSLoc ToThrowLocOrErr = import(S->getThrowLoc()); if (!ToThrowLocOrErr) return ToThrowLocOrErr.takeError(); ExpectedExpr ToThrowExprOrErr = import(S->getThrowExpr()); if (!ToThrowExprOrErr) return ToThrowExprOrErr.takeError(); return new (Importer.getToContext()) ObjCAtThrowStmt( *ToThrowLocOrErr, *ToThrowExprOrErr); } ExpectedStmt ASTNodeImporter::VisitObjCAutoreleasePoolStmt( ObjCAutoreleasePoolStmt *S) { ExpectedSLoc ToAtLocOrErr = import(S->getAtLoc()); if (!ToAtLocOrErr) return ToAtLocOrErr.takeError(); ExpectedStmt ToSubStmtOrErr = import(S->getSubStmt()); if (!ToSubStmtOrErr) return ToSubStmtOrErr.takeError(); return new (Importer.getToContext()) ObjCAutoreleasePoolStmt(*ToAtLocOrErr, *ToSubStmtOrErr); } //---------------------------------------------------------------------------- // Import Expressions //---------------------------------------------------------------------------- ExpectedStmt ASTNodeImporter::VisitExpr(Expr *E) { Importer.FromDiag(E->getBeginLoc(), diag::err_unsupported_ast_node) << E->getStmtClassName(); return make_error(ImportError::UnsupportedConstruct); } ExpectedStmt ASTNodeImporter::VisitVAArgExpr(VAArgExpr *E) { Error Err = Error::success(); auto ToBuiltinLoc = importChecked(Err, E->getBuiltinLoc()); auto ToSubExpr = importChecked(Err, E->getSubExpr()); auto ToWrittenTypeInfo = importChecked(Err, E->getWrittenTypeInfo()); auto ToRParenLoc = importChecked(Err, E->getRParenLoc()); auto ToType = importChecked(Err, E->getType()); if (Err) return std::move(Err); return new (Importer.getToContext()) VAArgExpr( ToBuiltinLoc, ToSubExpr, ToWrittenTypeInfo, ToRParenLoc, ToType, E->isMicrosoftABI()); } ExpectedStmt ASTNodeImporter::VisitChooseExpr(ChooseExpr *E) { Error Err = Error::success(); auto ToCond = importChecked(Err, E->getCond()); auto ToLHS = importChecked(Err, E->getLHS()); auto ToRHS = importChecked(Err, E->getRHS()); auto ToBuiltinLoc = importChecked(Err, E->getBuiltinLoc()); auto ToRParenLoc = importChecked(Err, E->getRParenLoc()); auto ToType = importChecked(Err, E->getType()); if (Err) return std::move(Err); ExprValueKind VK = E->getValueKind(); ExprObjectKind OK = E->getObjectKind(); // The value of CondIsTrue only matters if the value is not // condition-dependent. bool CondIsTrue = !E->isConditionDependent() && E->isConditionTrue(); return new (Importer.getToContext()) ChooseExpr(ToBuiltinLoc, ToCond, ToLHS, ToRHS, ToType, VK, OK, ToRParenLoc, CondIsTrue); } ExpectedStmt ASTNodeImporter::VisitGNUNullExpr(GNUNullExpr *E) { ExpectedType TypeOrErr = import(E->getType()); if (!TypeOrErr) return TypeOrErr.takeError(); ExpectedSLoc BeginLocOrErr = import(E->getBeginLoc()); if (!BeginLocOrErr) return BeginLocOrErr.takeError(); return new (Importer.getToContext()) GNUNullExpr(*TypeOrErr, *BeginLocOrErr); } ExpectedStmt ASTNodeImporter::VisitPredefinedExpr(PredefinedExpr *E) { Error Err = Error::success(); auto ToBeginLoc = importChecked(Err, E->getBeginLoc()); auto ToType = importChecked(Err, E->getType()); auto ToFunctionName = importChecked(Err, E->getFunctionName()); if (Err) return std::move(Err); return PredefinedExpr::Create(Importer.getToContext(), ToBeginLoc, ToType, E->getIdentKind(), ToFunctionName); } ExpectedStmt ASTNodeImporter::VisitDeclRefExpr(DeclRefExpr *E) { Error Err = Error::success(); auto ToQualifierLoc = importChecked(Err, E->getQualifierLoc()); auto ToTemplateKeywordLoc = importChecked(Err, E->getTemplateKeywordLoc()); auto ToDecl = importChecked(Err, E->getDecl()); auto ToLocation = importChecked(Err, E->getLocation()); auto ToType = importChecked(Err, E->getType()); if (Err) return std::move(Err); NamedDecl *ToFoundD = nullptr; if (E->getDecl() != E->getFoundDecl()) { auto FoundDOrErr = import(E->getFoundDecl()); if (!FoundDOrErr) return FoundDOrErr.takeError(); ToFoundD = *FoundDOrErr; } TemplateArgumentListInfo ToTAInfo; TemplateArgumentListInfo *ToResInfo = nullptr; if (E->hasExplicitTemplateArgs()) { if (Error Err = ImportTemplateArgumentListInfo(E->getLAngleLoc(), E->getRAngleLoc(), E->template_arguments(), ToTAInfo)) return std::move(Err); ToResInfo = &ToTAInfo; } auto *ToE = DeclRefExpr::Create( Importer.getToContext(), ToQualifierLoc, ToTemplateKeywordLoc, ToDecl, E->refersToEnclosingVariableOrCapture(), ToLocation, ToType, E->getValueKind(), ToFoundD, ToResInfo, E->isNonOdrUse()); if (E->hadMultipleCandidates()) ToE->setHadMultipleCandidates(true); return ToE; } ExpectedStmt ASTNodeImporter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) { ExpectedType TypeOrErr = import(E->getType()); if (!TypeOrErr) return TypeOrErr.takeError(); return new (Importer.getToContext()) ImplicitValueInitExpr(*TypeOrErr); } ExpectedStmt ASTNodeImporter::VisitDesignatedInitExpr(DesignatedInitExpr *E) { ExpectedExpr ToInitOrErr = import(E->getInit()); if (!ToInitOrErr) return ToInitOrErr.takeError(); ExpectedSLoc ToEqualOrColonLocOrErr = import(E->getEqualOrColonLoc()); if (!ToEqualOrColonLocOrErr) return ToEqualOrColonLocOrErr.takeError(); SmallVector ToIndexExprs(E->getNumSubExprs() - 1); // List elements from the second, the first is Init itself for (unsigned I = 1, N = E->getNumSubExprs(); I < N; I++) { if (ExpectedExpr ToArgOrErr = import(E->getSubExpr(I))) ToIndexExprs[I - 1] = *ToArgOrErr; else return ToArgOrErr.takeError(); } SmallVector ToDesignators(E->size()); if (Error Err = ImportContainerChecked(E->designators(), ToDesignators)) return std::move(Err); return DesignatedInitExpr::Create( Importer.getToContext(), ToDesignators, ToIndexExprs, *ToEqualOrColonLocOrErr, E->usesGNUSyntax(), *ToInitOrErr); } ExpectedStmt ASTNodeImporter::VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E) { ExpectedType ToTypeOrErr = import(E->getType()); if (!ToTypeOrErr) return ToTypeOrErr.takeError(); ExpectedSLoc ToLocationOrErr = import(E->getLocation()); if (!ToLocationOrErr) return ToLocationOrErr.takeError(); return new (Importer.getToContext()) CXXNullPtrLiteralExpr( *ToTypeOrErr, *ToLocationOrErr); } ExpectedStmt ASTNodeImporter::VisitIntegerLiteral(IntegerLiteral *E) { ExpectedType ToTypeOrErr = import(E->getType()); if (!ToTypeOrErr) return ToTypeOrErr.takeError(); ExpectedSLoc ToLocationOrErr = import(E->getLocation()); if (!ToLocationOrErr) return ToLocationOrErr.takeError(); return IntegerLiteral::Create( Importer.getToContext(), E->getValue(), *ToTypeOrErr, *ToLocationOrErr); } ExpectedStmt ASTNodeImporter::VisitFloatingLiteral(FloatingLiteral *E) { ExpectedType ToTypeOrErr = import(E->getType()); if (!ToTypeOrErr) return ToTypeOrErr.takeError(); ExpectedSLoc ToLocationOrErr = import(E->getLocation()); if (!ToLocationOrErr) return ToLocationOrErr.takeError(); return FloatingLiteral::Create( Importer.getToContext(), E->getValue(), E->isExact(), *ToTypeOrErr, *ToLocationOrErr); } ExpectedStmt ASTNodeImporter::VisitImaginaryLiteral(ImaginaryLiteral *E) { auto ToTypeOrErr = import(E->getType()); if (!ToTypeOrErr) return ToTypeOrErr.takeError(); ExpectedExpr ToSubExprOrErr = import(E->getSubExpr()); if (!ToSubExprOrErr) return ToSubExprOrErr.takeError(); return new (Importer.getToContext()) ImaginaryLiteral( *ToSubExprOrErr, *ToTypeOrErr); } ExpectedStmt ASTNodeImporter::VisitFixedPointLiteral(FixedPointLiteral *E) { auto ToTypeOrErr = import(E->getType()); if (!ToTypeOrErr) return ToTypeOrErr.takeError(); ExpectedSLoc ToLocationOrErr = import(E->getLocation()); if (!ToLocationOrErr) return ToLocationOrErr.takeError(); return new (Importer.getToContext()) FixedPointLiteral( Importer.getToContext(), E->getValue(), *ToTypeOrErr, *ToLocationOrErr, Importer.getToContext().getFixedPointScale(*ToTypeOrErr)); } ExpectedStmt ASTNodeImporter::VisitCharacterLiteral(CharacterLiteral *E) { ExpectedType ToTypeOrErr = import(E->getType()); if (!ToTypeOrErr) return ToTypeOrErr.takeError(); ExpectedSLoc ToLocationOrErr = import(E->getLocation()); if (!ToLocationOrErr) return ToLocationOrErr.takeError(); return new (Importer.getToContext()) CharacterLiteral( E->getValue(), E->getKind(), *ToTypeOrErr, *ToLocationOrErr); } ExpectedStmt ASTNodeImporter::VisitStringLiteral(StringLiteral *E) { ExpectedType ToTypeOrErr = import(E->getType()); if (!ToTypeOrErr) return ToTypeOrErr.takeError(); SmallVector ToLocations(E->getNumConcatenated()); if (Error Err = ImportArrayChecked( E->tokloc_begin(), E->tokloc_end(), ToLocations.begin())) return std::move(Err); return StringLiteral::Create( Importer.getToContext(), E->getBytes(), E->getKind(), E->isPascal(), *ToTypeOrErr, ToLocations.data(), ToLocations.size()); } ExpectedStmt ASTNodeImporter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { Error Err = Error::success(); auto ToLParenLoc = importChecked(Err, E->getLParenLoc()); auto ToTypeSourceInfo = importChecked(Err, E->getTypeSourceInfo()); auto ToType = importChecked(Err, E->getType()); auto ToInitializer = importChecked(Err, E->getInitializer()); if (Err) return std::move(Err); return new (Importer.getToContext()) CompoundLiteralExpr( ToLParenLoc, ToTypeSourceInfo, ToType, E->getValueKind(), ToInitializer, E->isFileScope()); } ExpectedStmt ASTNodeImporter::VisitAtomicExpr(AtomicExpr *E) { Error Err = Error::success(); auto ToBuiltinLoc = importChecked(Err, E->getBuiltinLoc()); auto ToType = importChecked(Err, E->getType()); auto ToRParenLoc = importChecked(Err, E->getRParenLoc()); if (Err) return std::move(Err); SmallVector ToExprs(E->getNumSubExprs()); if (Error Err = ImportArrayChecked( E->getSubExprs(), E->getSubExprs() + E->getNumSubExprs(), ToExprs.begin())) return std::move(Err); return new (Importer.getToContext()) AtomicExpr( ToBuiltinLoc, ToExprs, ToType, E->getOp(), ToRParenLoc); } ExpectedStmt ASTNodeImporter::VisitAddrLabelExpr(AddrLabelExpr *E) { Error Err = Error::success(); auto ToAmpAmpLoc = importChecked(Err, E->getAmpAmpLoc()); auto ToLabelLoc = importChecked(Err, E->getLabelLoc()); auto ToLabel = importChecked(Err, E->getLabel()); auto ToType = importChecked(Err, E->getType()); if (Err) return std::move(Err); return new (Importer.getToContext()) AddrLabelExpr( ToAmpAmpLoc, ToLabelLoc, ToLabel, ToType); } ExpectedStmt ASTNodeImporter::VisitConstantExpr(ConstantExpr *E) { Error Err = Error::success(); auto ToSubExpr = importChecked(Err, E->getSubExpr()); if (Err) return std::move(Err); // TODO : Handle APValue::ValueKind that require importing. APValue::ValueKind Kind = E->getResultAPValueKind(); if (Kind == APValue::Int || Kind == APValue::Float || Kind == APValue::FixedPoint || Kind == APValue::ComplexFloat || Kind == APValue::ComplexInt) return ConstantExpr::Create(Importer.getToContext(), ToSubExpr, E->getAPValueResult()); return ConstantExpr::Create(Importer.getToContext(), ToSubExpr); } ExpectedStmt ASTNodeImporter::VisitParenExpr(ParenExpr *E) { Error Err = Error::success(); auto ToLParen = importChecked(Err, E->getLParen()); auto ToRParen = importChecked(Err, E->getRParen()); auto ToSubExpr = importChecked(Err, E->getSubExpr()); if (Err) return std::move(Err); return new (Importer.getToContext()) ParenExpr(ToLParen, ToRParen, ToSubExpr); } ExpectedStmt ASTNodeImporter::VisitParenListExpr(ParenListExpr *E) { SmallVector ToExprs(E->getNumExprs()); if (Error Err = ImportContainerChecked(E->exprs(), ToExprs)) return std::move(Err); ExpectedSLoc ToLParenLocOrErr = import(E->getLParenLoc()); if (!ToLParenLocOrErr) return ToLParenLocOrErr.takeError(); ExpectedSLoc ToRParenLocOrErr = import(E->getRParenLoc()); if (!ToRParenLocOrErr) return ToRParenLocOrErr.takeError(); return ParenListExpr::Create(Importer.getToContext(), *ToLParenLocOrErr, ToExprs, *ToRParenLocOrErr); } ExpectedStmt ASTNodeImporter::VisitStmtExpr(StmtExpr *E) { Error Err = Error::success(); auto ToSubStmt = importChecked(Err, E->getSubStmt()); auto ToType = importChecked(Err, E->getType()); auto ToLParenLoc = importChecked(Err, E->getLParenLoc()); auto ToRParenLoc = importChecked(Err, E->getRParenLoc()); if (Err) return std::move(Err); return new (Importer.getToContext()) StmtExpr(ToSubStmt, ToType, ToLParenLoc, ToRParenLoc, E->getTemplateDepth()); } ExpectedStmt ASTNodeImporter::VisitUnaryOperator(UnaryOperator *E) { Error Err = Error::success(); auto ToSubExpr = importChecked(Err, E->getSubExpr()); auto ToType = importChecked(Err, E->getType()); auto ToOperatorLoc = importChecked(Err, E->getOperatorLoc()); if (Err) return std::move(Err); return UnaryOperator::Create( Importer.getToContext(), ToSubExpr, E->getOpcode(), ToType, E->getValueKind(), E->getObjectKind(), ToOperatorLoc, E->canOverflow(), E->getFPOptionsOverride()); } ExpectedStmt ASTNodeImporter::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E) { Error Err = Error::success(); auto ToType = importChecked(Err, E->getType()); auto ToOperatorLoc = importChecked(Err, E->getOperatorLoc()); auto ToRParenLoc = importChecked(Err, E->getRParenLoc()); if (Err) return std::move(Err); if (E->isArgumentType()) { Expected ToArgumentTypeInfoOrErr = import(E->getArgumentTypeInfo()); if (!ToArgumentTypeInfoOrErr) return ToArgumentTypeInfoOrErr.takeError(); return new (Importer.getToContext()) UnaryExprOrTypeTraitExpr( E->getKind(), *ToArgumentTypeInfoOrErr, ToType, ToOperatorLoc, ToRParenLoc); } ExpectedExpr ToArgumentExprOrErr = import(E->getArgumentExpr()); if (!ToArgumentExprOrErr) return ToArgumentExprOrErr.takeError(); return new (Importer.getToContext()) UnaryExprOrTypeTraitExpr( E->getKind(), *ToArgumentExprOrErr, ToType, ToOperatorLoc, ToRParenLoc); } ExpectedStmt ASTNodeImporter::VisitBinaryOperator(BinaryOperator *E) { Error Err = Error::success(); auto ToLHS = importChecked(Err, E->getLHS()); auto ToRHS = importChecked(Err, E->getRHS()); auto ToType = importChecked(Err, E->getType()); auto ToOperatorLoc = importChecked(Err, E->getOperatorLoc()); if (Err) return std::move(Err); return BinaryOperator::Create( Importer.getToContext(), ToLHS, ToRHS, E->getOpcode(), ToType, E->getValueKind(), E->getObjectKind(), ToOperatorLoc, E->getFPFeatures(Importer.getFromContext().getLangOpts())); } ExpectedStmt ASTNodeImporter::VisitConditionalOperator(ConditionalOperator *E) { Error Err = Error::success(); auto ToCond = importChecked(Err, E->getCond()); auto ToQuestionLoc = importChecked(Err, E->getQuestionLoc()); auto ToLHS = importChecked(Err, E->getLHS()); auto ToColonLoc = importChecked(Err, E->getColonLoc()); auto ToRHS = importChecked(Err, E->getRHS()); auto ToType = importChecked(Err, E->getType()); if (Err) return std::move(Err); return new (Importer.getToContext()) ConditionalOperator( ToCond, ToQuestionLoc, ToLHS, ToColonLoc, ToRHS, ToType, E->getValueKind(), E->getObjectKind()); } ExpectedStmt ASTNodeImporter::VisitBinaryConditionalOperator(BinaryConditionalOperator *E) { Error Err = Error::success(); auto ToCommon = importChecked(Err, E->getCommon()); auto ToOpaqueValue = importChecked(Err, E->getOpaqueValue()); auto ToCond = importChecked(Err, E->getCond()); auto ToTrueExpr = importChecked(Err, E->getTrueExpr()); auto ToFalseExpr = importChecked(Err, E->getFalseExpr()); auto ToQuestionLoc = importChecked(Err, E->getQuestionLoc()); auto ToColonLoc = importChecked(Err, E->getColonLoc()); auto ToType = importChecked(Err, E->getType()); if (Err) return std::move(Err); return new (Importer.getToContext()) BinaryConditionalOperator( ToCommon, ToOpaqueValue, ToCond, ToTrueExpr, ToFalseExpr, ToQuestionLoc, ToColonLoc, ToType, E->getValueKind(), E->getObjectKind()); } ExpectedStmt ASTNodeImporter::VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E) { Error Err = Error::success(); auto ToBeginLoc = importChecked(Err, E->getBeginLoc()); auto ToQueriedTypeSourceInfo = importChecked(Err, E->getQueriedTypeSourceInfo()); auto ToDimensionExpression = importChecked(Err, E->getDimensionExpression()); auto ToEndLoc = importChecked(Err, E->getEndLoc()); auto ToType = importChecked(Err, E->getType()); if (Err) return std::move(Err); return new (Importer.getToContext()) ArrayTypeTraitExpr( ToBeginLoc, E->getTrait(), ToQueriedTypeSourceInfo, E->getValue(), ToDimensionExpression, ToEndLoc, ToType); } ExpectedStmt ASTNodeImporter::VisitExpressionTraitExpr(ExpressionTraitExpr *E) { Error Err = Error::success(); auto ToBeginLoc = importChecked(Err, E->getBeginLoc()); auto ToQueriedExpression = importChecked(Err, E->getQueriedExpression()); auto ToEndLoc = importChecked(Err, E->getEndLoc()); auto ToType = importChecked(Err, E->getType()); if (Err) return std::move(Err); return new (Importer.getToContext()) ExpressionTraitExpr( ToBeginLoc, E->getTrait(), ToQueriedExpression, E->getValue(), ToEndLoc, ToType); } ExpectedStmt ASTNodeImporter::VisitOpaqueValueExpr(OpaqueValueExpr *E) { Error Err = Error::success(); auto ToLocation = importChecked(Err, E->getLocation()); auto ToType = importChecked(Err, E->getType()); auto ToSourceExpr = importChecked(Err, E->getSourceExpr()); if (Err) return std::move(Err); return new (Importer.getToContext()) OpaqueValueExpr( ToLocation, ToType, E->getValueKind(), E->getObjectKind(), ToSourceExpr); } ExpectedStmt ASTNodeImporter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) { Error Err = Error::success(); auto ToLHS = importChecked(Err, E->getLHS()); auto ToRHS = importChecked(Err, E->getRHS()); auto ToType = importChecked(Err, E->getType()); auto ToRBracketLoc = importChecked(Err, E->getRBracketLoc()); if (Err) return std::move(Err); return new (Importer.getToContext()) ArraySubscriptExpr( ToLHS, ToRHS, ToType, E->getValueKind(), E->getObjectKind(), ToRBracketLoc); } ExpectedStmt ASTNodeImporter::VisitCompoundAssignOperator(CompoundAssignOperator *E) { Error Err = Error::success(); auto ToLHS = importChecked(Err, E->getLHS()); auto ToRHS = importChecked(Err, E->getRHS()); auto ToType = importChecked(Err, E->getType()); auto ToComputationLHSType = importChecked(Err, E->getComputationLHSType()); auto ToComputationResultType = importChecked(Err, E->getComputationResultType()); auto ToOperatorLoc = importChecked(Err, E->getOperatorLoc()); if (Err) return std::move(Err); return CompoundAssignOperator::Create( Importer.getToContext(), ToLHS, ToRHS, E->getOpcode(), ToType, E->getValueKind(), E->getObjectKind(), ToOperatorLoc, E->getFPFeatures(Importer.getFromContext().getLangOpts()), ToComputationLHSType, ToComputationResultType); } Expected ASTNodeImporter::ImportCastPath(CastExpr *CE) { CXXCastPath Path; for (auto I = CE->path_begin(), E = CE->path_end(); I != E; ++I) { if (auto SpecOrErr = import(*I)) Path.push_back(*SpecOrErr); else return SpecOrErr.takeError(); } return Path; } ExpectedStmt ASTNodeImporter::VisitImplicitCastExpr(ImplicitCastExpr *E) { ExpectedType ToTypeOrErr = import(E->getType()); if (!ToTypeOrErr) return ToTypeOrErr.takeError(); ExpectedExpr ToSubExprOrErr = import(E->getSubExpr()); if (!ToSubExprOrErr) return ToSubExprOrErr.takeError(); Expected ToBasePathOrErr = ImportCastPath(E); if (!ToBasePathOrErr) return ToBasePathOrErr.takeError(); return ImplicitCastExpr::Create( Importer.getToContext(), *ToTypeOrErr, E->getCastKind(), *ToSubExprOrErr, &(*ToBasePathOrErr), E->getValueKind()); } ExpectedStmt ASTNodeImporter::VisitExplicitCastExpr(ExplicitCastExpr *E) { Error Err = Error::success(); auto ToType = importChecked(Err, E->getType()); auto ToSubExpr = importChecked(Err, E->getSubExpr()); auto ToTypeInfoAsWritten = importChecked(Err, E->getTypeInfoAsWritten()); if (Err) return std::move(Err); Expected ToBasePathOrErr = ImportCastPath(E); if (!ToBasePathOrErr) return ToBasePathOrErr.takeError(); CXXCastPath *ToBasePath = &(*ToBasePathOrErr); switch (E->getStmtClass()) { case Stmt::CStyleCastExprClass: { auto *CCE = cast(E); ExpectedSLoc ToLParenLocOrErr = import(CCE->getLParenLoc()); if (!ToLParenLocOrErr) return ToLParenLocOrErr.takeError(); ExpectedSLoc ToRParenLocOrErr = import(CCE->getRParenLoc()); if (!ToRParenLocOrErr) return ToRParenLocOrErr.takeError(); return CStyleCastExpr::Create( Importer.getToContext(), ToType, E->getValueKind(), E->getCastKind(), ToSubExpr, ToBasePath, ToTypeInfoAsWritten, *ToLParenLocOrErr, *ToRParenLocOrErr); } case Stmt::CXXFunctionalCastExprClass: { auto *FCE = cast(E); ExpectedSLoc ToLParenLocOrErr = import(FCE->getLParenLoc()); if (!ToLParenLocOrErr) return ToLParenLocOrErr.takeError(); ExpectedSLoc ToRParenLocOrErr = import(FCE->getRParenLoc()); if (!ToRParenLocOrErr) return ToRParenLocOrErr.takeError(); return CXXFunctionalCastExpr::Create( Importer.getToContext(), ToType, E->getValueKind(), ToTypeInfoAsWritten, E->getCastKind(), ToSubExpr, ToBasePath, *ToLParenLocOrErr, *ToRParenLocOrErr); } case Stmt::ObjCBridgedCastExprClass: { auto *OCE = cast(E); ExpectedSLoc ToLParenLocOrErr = import(OCE->getLParenLoc()); if (!ToLParenLocOrErr) return ToLParenLocOrErr.takeError(); ExpectedSLoc ToBridgeKeywordLocOrErr = import(OCE->getBridgeKeywordLoc()); if (!ToBridgeKeywordLocOrErr) return ToBridgeKeywordLocOrErr.takeError(); return new (Importer.getToContext()) ObjCBridgedCastExpr( *ToLParenLocOrErr, OCE->getBridgeKind(), E->getCastKind(), *ToBridgeKeywordLocOrErr, ToTypeInfoAsWritten, ToSubExpr); } default: llvm_unreachable("Cast expression of unsupported type!"); return make_error(ImportError::UnsupportedConstruct); } } ExpectedStmt ASTNodeImporter::VisitOffsetOfExpr(OffsetOfExpr *E) { SmallVector ToNodes; for (int I = 0, N = E->getNumComponents(); I < N; ++I) { const OffsetOfNode &FromNode = E->getComponent(I); SourceLocation ToBeginLoc, ToEndLoc; if (FromNode.getKind() != OffsetOfNode::Base) { Error Err = Error::success(); ToBeginLoc = importChecked(Err, FromNode.getBeginLoc()); ToEndLoc = importChecked(Err, FromNode.getEndLoc()); if (Err) return std::move(Err); } switch (FromNode.getKind()) { case OffsetOfNode::Array: ToNodes.push_back( OffsetOfNode(ToBeginLoc, FromNode.getArrayExprIndex(), ToEndLoc)); break; case OffsetOfNode::Base: { auto ToBSOrErr = import(FromNode.getBase()); if (!ToBSOrErr) return ToBSOrErr.takeError(); ToNodes.push_back(OffsetOfNode(*ToBSOrErr)); break; } case OffsetOfNode::Field: { auto ToFieldOrErr = import(FromNode.getField()); if (!ToFieldOrErr) return ToFieldOrErr.takeError(); ToNodes.push_back(OffsetOfNode(ToBeginLoc, *ToFieldOrErr, ToEndLoc)); break; } case OffsetOfNode::Identifier: { IdentifierInfo *ToII = Importer.Import(FromNode.getFieldName()); ToNodes.push_back(OffsetOfNode(ToBeginLoc, ToII, ToEndLoc)); break; } } } SmallVector ToExprs(E->getNumExpressions()); for (int I = 0, N = E->getNumExpressions(); I < N; ++I) { ExpectedExpr ToIndexExprOrErr = import(E->getIndexExpr(I)); if (!ToIndexExprOrErr) return ToIndexExprOrErr.takeError(); ToExprs[I] = *ToIndexExprOrErr; } Error Err = Error::success(); auto ToType = importChecked(Err, E->getType()); auto ToTypeSourceInfo = importChecked(Err, E->getTypeSourceInfo()); auto ToOperatorLoc = importChecked(Err, E->getOperatorLoc()); auto ToRParenLoc = importChecked(Err, E->getRParenLoc()); if (Err) return std::move(Err); return OffsetOfExpr::Create( Importer.getToContext(), ToType, ToOperatorLoc, ToTypeSourceInfo, ToNodes, ToExprs, ToRParenLoc); } ExpectedStmt ASTNodeImporter::VisitCXXNoexceptExpr(CXXNoexceptExpr *E) { Error Err = Error::success(); auto ToType = importChecked(Err, E->getType()); auto ToOperand = importChecked(Err, E->getOperand()); auto ToBeginLoc = importChecked(Err, E->getBeginLoc()); auto ToEndLoc = importChecked(Err, E->getEndLoc()); if (Err) return std::move(Err); CanThrowResult ToCanThrow; if (E->isValueDependent()) ToCanThrow = CT_Dependent; else ToCanThrow = E->getValue() ? CT_Can : CT_Cannot; return new (Importer.getToContext()) CXXNoexceptExpr( ToType, ToOperand, ToCanThrow, ToBeginLoc, ToEndLoc); } ExpectedStmt ASTNodeImporter::VisitCXXThrowExpr(CXXThrowExpr *E) { Error Err = Error::success(); auto ToSubExpr = importChecked(Err, E->getSubExpr()); auto ToType = importChecked(Err, E->getType()); auto ToThrowLoc = importChecked(Err, E->getThrowLoc()); if (Err) return std::move(Err); return new (Importer.getToContext()) CXXThrowExpr( ToSubExpr, ToType, ToThrowLoc, E->isThrownVariableInScope()); } ExpectedStmt ASTNodeImporter::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) { ExpectedSLoc ToUsedLocOrErr = import(E->getUsedLocation()); if (!ToUsedLocOrErr) return ToUsedLocOrErr.takeError(); auto ToParamOrErr = import(E->getParam()); if (!ToParamOrErr) return ToParamOrErr.takeError(); auto UsedContextOrErr = Importer.ImportContext(E->getUsedContext()); if (!UsedContextOrErr) return UsedContextOrErr.takeError(); // Import the default arg if it was not imported yet. // This is needed because it can happen that during the import of the // default expression (from VisitParmVarDecl) the same ParmVarDecl is // encountered here. The default argument for a ParmVarDecl is set in the // ParmVarDecl only after it is imported (set in VisitParmVarDecl if not here, // see VisitParmVarDecl). ParmVarDecl *ToParam = *ToParamOrErr; if (!ToParam->getDefaultArg()) { Optional FromParam = Importer.getImportedFromDecl(ToParam); assert(FromParam && "ParmVarDecl was not imported?"); if (Error Err = ImportDefaultArgOfParmVarDecl(*FromParam, ToParam)) return std::move(Err); } return CXXDefaultArgExpr::Create(Importer.getToContext(), *ToUsedLocOrErr, *ToParamOrErr, *UsedContextOrErr); } ExpectedStmt ASTNodeImporter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) { Error Err = Error::success(); auto ToType = importChecked(Err, E->getType()); auto ToTypeSourceInfo = importChecked(Err, E->getTypeSourceInfo()); auto ToRParenLoc = importChecked(Err, E->getRParenLoc()); if (Err) return std::move(Err); return new (Importer.getToContext()) CXXScalarValueInitExpr( ToType, ToTypeSourceInfo, ToRParenLoc); } ExpectedStmt ASTNodeImporter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) { ExpectedExpr ToSubExprOrErr = import(E->getSubExpr()); if (!ToSubExprOrErr) return ToSubExprOrErr.takeError(); auto ToDtorOrErr = import(E->getTemporary()->getDestructor()); if (!ToDtorOrErr) return ToDtorOrErr.takeError(); ASTContext &ToCtx = Importer.getToContext(); CXXTemporary *Temp = CXXTemporary::Create(ToCtx, *ToDtorOrErr); return CXXBindTemporaryExpr::Create(ToCtx, Temp, *ToSubExprOrErr); } ExpectedStmt ASTNodeImporter::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *E) { Error Err = Error::success(); auto ToConstructor = importChecked(Err, E->getConstructor()); auto ToType = importChecked(Err, E->getType()); auto ToTypeSourceInfo = importChecked(Err, E->getTypeSourceInfo()); auto ToParenOrBraceRange = importChecked(Err, E->getParenOrBraceRange()); if (Err) return std::move(Err); SmallVector ToArgs(E->getNumArgs()); if (Error Err = ImportContainerChecked(E->arguments(), ToArgs)) return std::move(Err); return CXXTemporaryObjectExpr::Create( Importer.getToContext(), ToConstructor, ToType, ToTypeSourceInfo, ToArgs, ToParenOrBraceRange, E->hadMultipleCandidates(), E->isListInitialization(), E->isStdInitListInitialization(), E->requiresZeroInitialization()); } ExpectedDecl ASTNodeImporter::VisitLifetimeExtendedTemporaryDecl( LifetimeExtendedTemporaryDecl *D) { DeclContext *DC, *LexicalDC; if (Error Err = ImportDeclContext(D, DC, LexicalDC)) return std::move(Err); Error Err = Error::success(); auto Temporary = importChecked(Err, D->getTemporaryExpr()); auto ExtendingDecl = importChecked(Err, D->getExtendingDecl()); if (Err) return std::move(Err); // FIXME: Should ManglingNumber get numbers associated with 'to' context? LifetimeExtendedTemporaryDecl *To; if (GetImportedOrCreateDecl(To, D, Temporary, ExtendingDecl, D->getManglingNumber())) return To; To->setLexicalDeclContext(LexicalDC); LexicalDC->addDeclInternal(To); return To; } ExpectedStmt ASTNodeImporter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) { Error Err = Error::success(); auto ToType = importChecked(Err, E->getType()); Expr *ToTemporaryExpr = importChecked( Err, E->getLifetimeExtendedTemporaryDecl() ? nullptr : E->getSubExpr()); auto ToMaterializedDecl = importChecked(Err, E->getLifetimeExtendedTemporaryDecl()); if (Err) return std::move(Err); if (!ToTemporaryExpr) ToTemporaryExpr = cast(ToMaterializedDecl->getTemporaryExpr()); auto *ToMTE = new (Importer.getToContext()) MaterializeTemporaryExpr( ToType, ToTemporaryExpr, E->isBoundToLvalueReference(), ToMaterializedDecl); return ToMTE; } ExpectedStmt ASTNodeImporter::VisitPackExpansionExpr(PackExpansionExpr *E) { Error Err = Error::success(); auto ToType = importChecked(Err, E->getType()); auto ToPattern = importChecked(Err, E->getPattern()); auto ToEllipsisLoc = importChecked(Err, E->getEllipsisLoc()); if (Err) return std::move(Err); return new (Importer.getToContext()) PackExpansionExpr( ToType, ToPattern, ToEllipsisLoc, E->getNumExpansions()); } ExpectedStmt ASTNodeImporter::VisitSizeOfPackExpr(SizeOfPackExpr *E) { Error Err = Error::success(); auto ToOperatorLoc = importChecked(Err, E->getOperatorLoc()); auto ToPack = importChecked(Err, E->getPack()); auto ToPackLoc = importChecked(Err, E->getPackLoc()); auto ToRParenLoc = importChecked(Err, E->getRParenLoc()); if (Err) return std::move(Err); Optional Length; if (!E->isValueDependent()) Length = E->getPackLength(); SmallVector ToPartialArguments; if (E->isPartiallySubstituted()) { if (Error Err = ImportTemplateArguments( E->getPartialArguments().data(), E->getPartialArguments().size(), ToPartialArguments)) return std::move(Err); } return SizeOfPackExpr::Create( Importer.getToContext(), ToOperatorLoc, ToPack, ToPackLoc, ToRParenLoc, Length, ToPartialArguments); } ExpectedStmt ASTNodeImporter::VisitCXXNewExpr(CXXNewExpr *E) { Error Err = Error::success(); auto ToOperatorNew = importChecked(Err, E->getOperatorNew()); auto ToOperatorDelete = importChecked(Err, E->getOperatorDelete()); auto ToTypeIdParens = importChecked(Err, E->getTypeIdParens()); auto ToArraySize = importChecked(Err, E->getArraySize()); auto ToInitializer = importChecked(Err, E->getInitializer()); auto ToType = importChecked(Err, E->getType()); auto ToAllocatedTypeSourceInfo = importChecked(Err, E->getAllocatedTypeSourceInfo()); auto ToSourceRange = importChecked(Err, E->getSourceRange()); auto ToDirectInitRange = importChecked(Err, E->getDirectInitRange()); if (Err) return std::move(Err); SmallVector ToPlacementArgs(E->getNumPlacementArgs()); if (Error Err = ImportContainerChecked(E->placement_arguments(), ToPlacementArgs)) return std::move(Err); return CXXNewExpr::Create( Importer.getToContext(), E->isGlobalNew(), ToOperatorNew, ToOperatorDelete, E->passAlignment(), E->doesUsualArrayDeleteWantSize(), ToPlacementArgs, ToTypeIdParens, ToArraySize, E->getInitializationStyle(), ToInitializer, ToType, ToAllocatedTypeSourceInfo, ToSourceRange, ToDirectInitRange); } ExpectedStmt ASTNodeImporter::VisitCXXDeleteExpr(CXXDeleteExpr *E) { Error Err = Error::success(); auto ToType = importChecked(Err, E->getType()); auto ToOperatorDelete = importChecked(Err, E->getOperatorDelete()); auto ToArgument = importChecked(Err, E->getArgument()); auto ToBeginLoc = importChecked(Err, E->getBeginLoc()); if (Err) return std::move(Err); return new (Importer.getToContext()) CXXDeleteExpr( ToType, E->isGlobalDelete(), E->isArrayForm(), E->isArrayFormAsWritten(), E->doesUsualArrayDeleteWantSize(), ToOperatorDelete, ToArgument, ToBeginLoc); } ExpectedStmt ASTNodeImporter::VisitCXXConstructExpr(CXXConstructExpr *E) { Error Err = Error::success(); auto ToType = importChecked(Err, E->getType()); auto ToLocation = importChecked(Err, E->getLocation()); auto ToConstructor = importChecked(Err, E->getConstructor()); auto ToParenOrBraceRange = importChecked(Err, E->getParenOrBraceRange()); if (Err) return std::move(Err); SmallVector ToArgs(E->getNumArgs()); if (Error Err = ImportContainerChecked(E->arguments(), ToArgs)) return std::move(Err); return CXXConstructExpr::Create( Importer.getToContext(), ToType, ToLocation, ToConstructor, E->isElidable(), ToArgs, E->hadMultipleCandidates(), E->isListInitialization(), E->isStdInitListInitialization(), E->requiresZeroInitialization(), E->getConstructionKind(), ToParenOrBraceRange); } ExpectedStmt ASTNodeImporter::VisitExprWithCleanups(ExprWithCleanups *E) { ExpectedExpr ToSubExprOrErr = import(E->getSubExpr()); if (!ToSubExprOrErr) return ToSubExprOrErr.takeError(); SmallVector ToObjects(E->getNumObjects()); if (Error Err = ImportContainerChecked(E->getObjects(), ToObjects)) return std::move(Err); return ExprWithCleanups::Create( Importer.getToContext(), *ToSubExprOrErr, E->cleanupsHaveSideEffects(), ToObjects); } ExpectedStmt ASTNodeImporter::VisitCXXMemberCallExpr(CXXMemberCallExpr *E) { Error Err = Error::success(); auto ToCallee = importChecked(Err, E->getCallee()); auto ToType = importChecked(Err, E->getType()); auto ToRParenLoc = importChecked(Err, E->getRParenLoc()); if (Err) return std::move(Err); SmallVector ToArgs(E->getNumArgs()); if (Error Err = ImportContainerChecked(E->arguments(), ToArgs)) return std::move(Err); return CXXMemberCallExpr::Create(Importer.getToContext(), ToCallee, ToArgs, ToType, E->getValueKind(), ToRParenLoc); } ExpectedStmt ASTNodeImporter::VisitCXXThisExpr(CXXThisExpr *E) { ExpectedType ToTypeOrErr = import(E->getType()); if (!ToTypeOrErr) return ToTypeOrErr.takeError(); ExpectedSLoc ToLocationOrErr = import(E->getLocation()); if (!ToLocationOrErr) return ToLocationOrErr.takeError(); return new (Importer.getToContext()) CXXThisExpr( *ToLocationOrErr, *ToTypeOrErr, E->isImplicit()); } ExpectedStmt ASTNodeImporter::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E) { ExpectedType ToTypeOrErr = import(E->getType()); if (!ToTypeOrErr) return ToTypeOrErr.takeError(); ExpectedSLoc ToLocationOrErr = import(E->getLocation()); if (!ToLocationOrErr) return ToLocationOrErr.takeError(); return new (Importer.getToContext()) CXXBoolLiteralExpr( E->getValue(), *ToTypeOrErr, *ToLocationOrErr); } ExpectedStmt ASTNodeImporter::VisitMemberExpr(MemberExpr *E) { Error Err = Error::success(); auto ToBase = importChecked(Err, E->getBase()); auto ToOperatorLoc = importChecked(Err, E->getOperatorLoc()); auto ToQualifierLoc = importChecked(Err, E->getQualifierLoc()); auto ToTemplateKeywordLoc = importChecked(Err, E->getTemplateKeywordLoc()); auto ToMemberDecl = importChecked(Err, E->getMemberDecl()); auto ToType = importChecked(Err, E->getType()); auto ToDecl = importChecked(Err, E->getFoundDecl().getDecl()); auto ToName = importChecked(Err, E->getMemberNameInfo().getName()); auto ToLoc = importChecked(Err, E->getMemberNameInfo().getLoc()); if (Err) return std::move(Err); DeclAccessPair ToFoundDecl = DeclAccessPair::make(ToDecl, E->getFoundDecl().getAccess()); DeclarationNameInfo ToMemberNameInfo(ToName, ToLoc); TemplateArgumentListInfo ToTAInfo, *ResInfo = nullptr; if (E->hasExplicitTemplateArgs()) { if (Error Err = ImportTemplateArgumentListInfo(E->getLAngleLoc(), E->getRAngleLoc(), E->template_arguments(), ToTAInfo)) return std::move(Err); ResInfo = &ToTAInfo; } return MemberExpr::Create(Importer.getToContext(), ToBase, E->isArrow(), ToOperatorLoc, ToQualifierLoc, ToTemplateKeywordLoc, ToMemberDecl, ToFoundDecl, ToMemberNameInfo, ResInfo, ToType, E->getValueKind(), E->getObjectKind(), E->isNonOdrUse()); } ExpectedStmt ASTNodeImporter::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) { Error Err = Error::success(); auto ToBase = importChecked(Err, E->getBase()); auto ToOperatorLoc = importChecked(Err, E->getOperatorLoc()); auto ToQualifierLoc = importChecked(Err, E->getQualifierLoc()); auto ToScopeTypeInfo = importChecked(Err, E->getScopeTypeInfo()); auto ToColonColonLoc = importChecked(Err, E->getColonColonLoc()); auto ToTildeLoc = importChecked(Err, E->getTildeLoc()); if (Err) return std::move(Err); PseudoDestructorTypeStorage Storage; if (IdentifierInfo *FromII = E->getDestroyedTypeIdentifier()) { IdentifierInfo *ToII = Importer.Import(FromII); ExpectedSLoc ToDestroyedTypeLocOrErr = import(E->getDestroyedTypeLoc()); if (!ToDestroyedTypeLocOrErr) return ToDestroyedTypeLocOrErr.takeError(); Storage = PseudoDestructorTypeStorage(ToII, *ToDestroyedTypeLocOrErr); } else { if (auto ToTIOrErr = import(E->getDestroyedTypeInfo())) Storage = PseudoDestructorTypeStorage(*ToTIOrErr); else return ToTIOrErr.takeError(); } return new (Importer.getToContext()) CXXPseudoDestructorExpr( Importer.getToContext(), ToBase, E->isArrow(), ToOperatorLoc, ToQualifierLoc, ToScopeTypeInfo, ToColonColonLoc, ToTildeLoc, Storage); } ExpectedStmt ASTNodeImporter::VisitCXXDependentScopeMemberExpr( CXXDependentScopeMemberExpr *E) { Error Err = Error::success(); auto ToType = importChecked(Err, E->getType()); auto ToOperatorLoc = importChecked(Err, E->getOperatorLoc()); auto ToQualifierLoc = importChecked(Err, E->getQualifierLoc()); auto ToTemplateKeywordLoc = importChecked(Err, E->getTemplateKeywordLoc()); auto ToFirstQualifierFoundInScope = importChecked(Err, E->getFirstQualifierFoundInScope()); if (Err) return std::move(Err); Expr *ToBase = nullptr; if (!E->isImplicitAccess()) { if (ExpectedExpr ToBaseOrErr = import(E->getBase())) ToBase = *ToBaseOrErr; else return ToBaseOrErr.takeError(); } TemplateArgumentListInfo ToTAInfo, *ResInfo = nullptr; if (E->hasExplicitTemplateArgs()) { if (Error Err = ImportTemplateArgumentListInfo(E->getLAngleLoc(), E->getRAngleLoc(), E->template_arguments(), ToTAInfo)) return std::move(Err); ResInfo = &ToTAInfo; } auto ToMember = importChecked(Err, E->getMember()); auto ToMemberLoc = importChecked(Err, E->getMemberLoc()); if (Err) return std::move(Err); DeclarationNameInfo ToMemberNameInfo(ToMember, ToMemberLoc); // Import additional name location/type info. if (Error Err = ImportDeclarationNameLoc(E->getMemberNameInfo(), ToMemberNameInfo)) return std::move(Err); return CXXDependentScopeMemberExpr::Create( Importer.getToContext(), ToBase, ToType, E->isArrow(), ToOperatorLoc, ToQualifierLoc, ToTemplateKeywordLoc, ToFirstQualifierFoundInScope, ToMemberNameInfo, ResInfo); } ExpectedStmt ASTNodeImporter::VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E) { Error Err = Error::success(); auto ToQualifierLoc = importChecked(Err, E->getQualifierLoc()); auto ToTemplateKeywordLoc = importChecked(Err, E->getTemplateKeywordLoc()); auto ToDeclName = importChecked(Err, E->getDeclName()); auto ToNameLoc = importChecked(Err, E->getNameInfo().getLoc()); auto ToLAngleLoc = importChecked(Err, E->getLAngleLoc()); auto ToRAngleLoc = importChecked(Err, E->getRAngleLoc()); if (Err) return std::move(Err); DeclarationNameInfo ToNameInfo(ToDeclName, ToNameLoc); if (Error Err = ImportDeclarationNameLoc(E->getNameInfo(), ToNameInfo)) return std::move(Err); TemplateArgumentListInfo ToTAInfo(ToLAngleLoc, ToRAngleLoc); TemplateArgumentListInfo *ResInfo = nullptr; if (E->hasExplicitTemplateArgs()) { if (Error Err = ImportTemplateArgumentListInfo(E->template_arguments(), ToTAInfo)) return std::move(Err); ResInfo = &ToTAInfo; } return DependentScopeDeclRefExpr::Create( Importer.getToContext(), ToQualifierLoc, ToTemplateKeywordLoc, ToNameInfo, ResInfo); } ExpectedStmt ASTNodeImporter::VisitCXXUnresolvedConstructExpr( CXXUnresolvedConstructExpr *E) { Error Err = Error::success(); auto ToLParenLoc = importChecked(Err, E->getLParenLoc()); auto ToRParenLoc = importChecked(Err, E->getRParenLoc()); auto ToTypeSourceInfo = importChecked(Err, E->getTypeSourceInfo()); if (Err) return std::move(Err); SmallVector ToArgs(E->arg_size()); if (Error Err = ImportArrayChecked(E->arg_begin(), E->arg_end(), ToArgs.begin())) return std::move(Err); return CXXUnresolvedConstructExpr::Create( Importer.getToContext(), ToTypeSourceInfo, ToLParenLoc, llvm::makeArrayRef(ToArgs), ToRParenLoc); } ExpectedStmt ASTNodeImporter::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) { Expected ToNamingClassOrErr = import(E->getNamingClass()); if (!ToNamingClassOrErr) return ToNamingClassOrErr.takeError(); auto ToQualifierLocOrErr = import(E->getQualifierLoc()); if (!ToQualifierLocOrErr) return ToQualifierLocOrErr.takeError(); Error Err = Error::success(); auto ToName = importChecked(Err, E->getName()); auto ToNameLoc = importChecked(Err, E->getNameLoc()); if (Err) return std::move(Err); DeclarationNameInfo ToNameInfo(ToName, ToNameLoc); // Import additional name location/type info. if (Error Err = ImportDeclarationNameLoc(E->getNameInfo(), ToNameInfo)) return std::move(Err); UnresolvedSet<8> ToDecls; for (auto *D : E->decls()) if (auto ToDOrErr = import(D)) ToDecls.addDecl(cast(*ToDOrErr)); else return ToDOrErr.takeError(); if (E->hasExplicitTemplateArgs()) { TemplateArgumentListInfo ToTAInfo; if (Error Err = ImportTemplateArgumentListInfo( E->getLAngleLoc(), E->getRAngleLoc(), E->template_arguments(), ToTAInfo)) return std::move(Err); ExpectedSLoc ToTemplateKeywordLocOrErr = import(E->getTemplateKeywordLoc()); if (!ToTemplateKeywordLocOrErr) return ToTemplateKeywordLocOrErr.takeError(); return UnresolvedLookupExpr::Create( Importer.getToContext(), *ToNamingClassOrErr, *ToQualifierLocOrErr, *ToTemplateKeywordLocOrErr, ToNameInfo, E->requiresADL(), &ToTAInfo, ToDecls.begin(), ToDecls.end()); } return UnresolvedLookupExpr::Create( Importer.getToContext(), *ToNamingClassOrErr, *ToQualifierLocOrErr, ToNameInfo, E->requiresADL(), E->isOverloaded(), ToDecls.begin(), ToDecls.end()); } ExpectedStmt ASTNodeImporter::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E) { Error Err = Error::success(); auto ToType = importChecked(Err, E->getType()); auto ToOperatorLoc = importChecked(Err, E->getOperatorLoc()); auto ToQualifierLoc = importChecked(Err, E->getQualifierLoc()); auto ToTemplateKeywordLoc = importChecked(Err, E->getTemplateKeywordLoc()); auto ToName = importChecked(Err, E->getName()); auto ToNameLoc = importChecked(Err, E->getNameLoc()); if (Err) return std::move(Err); DeclarationNameInfo ToNameInfo(ToName, ToNameLoc); // Import additional name location/type info. if (Error Err = ImportDeclarationNameLoc(E->getNameInfo(), ToNameInfo)) return std::move(Err); UnresolvedSet<8> ToDecls; for (Decl *D : E->decls()) if (auto ToDOrErr = import(D)) ToDecls.addDecl(cast(*ToDOrErr)); else return ToDOrErr.takeError(); TemplateArgumentListInfo ToTAInfo; TemplateArgumentListInfo *ResInfo = nullptr; if (E->hasExplicitTemplateArgs()) { TemplateArgumentListInfo FromTAInfo; E->copyTemplateArgumentsInto(FromTAInfo); if (Error Err = ImportTemplateArgumentListInfo(FromTAInfo, ToTAInfo)) return std::move(Err); ResInfo = &ToTAInfo; } Expr *ToBase = nullptr; if (!E->isImplicitAccess()) { if (ExpectedExpr ToBaseOrErr = import(E->getBase())) ToBase = *ToBaseOrErr; else return ToBaseOrErr.takeError(); } return UnresolvedMemberExpr::Create( Importer.getToContext(), E->hasUnresolvedUsing(), ToBase, ToType, E->isArrow(), ToOperatorLoc, ToQualifierLoc, ToTemplateKeywordLoc, ToNameInfo, ResInfo, ToDecls.begin(), ToDecls.end()); } ExpectedStmt ASTNodeImporter::VisitCallExpr(CallExpr *E) { Error Err = Error::success(); auto ToCallee = importChecked(Err, E->getCallee()); auto ToType = importChecked(Err, E->getType()); auto ToRParenLoc = importChecked(Err, E->getRParenLoc()); if (Err) return std::move(Err); unsigned NumArgs = E->getNumArgs(); llvm::SmallVector ToArgs(NumArgs); if (Error Err = ImportContainerChecked(E->arguments(), ToArgs)) return std::move(Err); if (const auto *OCE = dyn_cast(E)) { return CXXOperatorCallExpr::Create( Importer.getToContext(), OCE->getOperator(), ToCallee, ToArgs, ToType, OCE->getValueKind(), ToRParenLoc, OCE->getFPFeatures(), OCE->getADLCallKind()); } return CallExpr::Create(Importer.getToContext(), ToCallee, ToArgs, ToType, E->getValueKind(), ToRParenLoc, /*MinNumArgs=*/0, E->getADLCallKind()); } ExpectedStmt ASTNodeImporter::VisitLambdaExpr(LambdaExpr *E) { CXXRecordDecl *FromClass = E->getLambdaClass(); auto ToClassOrErr = import(FromClass); if (!ToClassOrErr) return ToClassOrErr.takeError(); CXXRecordDecl *ToClass = *ToClassOrErr; auto ToCallOpOrErr = import(E->getCallOperator()); if (!ToCallOpOrErr) return ToCallOpOrErr.takeError(); SmallVector ToCaptureInits(E->capture_size()); if (Error Err = ImportContainerChecked(E->capture_inits(), ToCaptureInits)) return std::move(Err); Error Err = Error::success(); auto ToIntroducerRange = importChecked(Err, E->getIntroducerRange()); auto ToCaptureDefaultLoc = importChecked(Err, E->getCaptureDefaultLoc()); auto ToEndLoc = importChecked(Err, E->getEndLoc()); if (Err) return std::move(Err); return LambdaExpr::Create(Importer.getToContext(), ToClass, ToIntroducerRange, E->getCaptureDefault(), ToCaptureDefaultLoc, E->hasExplicitParameters(), E->hasExplicitResultType(), ToCaptureInits, ToEndLoc, E->containsUnexpandedParameterPack()); } ExpectedStmt ASTNodeImporter::VisitInitListExpr(InitListExpr *E) { Error Err = Error::success(); auto ToLBraceLoc = importChecked(Err, E->getLBraceLoc()); auto ToRBraceLoc = importChecked(Err, E->getRBraceLoc()); auto ToType = importChecked(Err, E->getType()); if (Err) return std::move(Err); SmallVector ToExprs(E->getNumInits()); if (Error Err = ImportContainerChecked(E->inits(), ToExprs)) return std::move(Err); ASTContext &ToCtx = Importer.getToContext(); InitListExpr *To = new (ToCtx) InitListExpr( ToCtx, ToLBraceLoc, ToExprs, ToRBraceLoc); To->setType(ToType); if (E->hasArrayFiller()) { if (ExpectedExpr ToFillerOrErr = import(E->getArrayFiller())) To->setArrayFiller(*ToFillerOrErr); else return ToFillerOrErr.takeError(); } if (FieldDecl *FromFD = E->getInitializedFieldInUnion()) { if (auto ToFDOrErr = import(FromFD)) To->setInitializedFieldInUnion(*ToFDOrErr); else return ToFDOrErr.takeError(); } if (InitListExpr *SyntForm = E->getSyntacticForm()) { if (auto ToSyntFormOrErr = import(SyntForm)) To->setSyntacticForm(*ToSyntFormOrErr); else return ToSyntFormOrErr.takeError(); } // Copy InitListExprBitfields, which are not handled in the ctor of // InitListExpr. To->sawArrayRangeDesignator(E->hadArrayRangeDesignator()); return To; } ExpectedStmt ASTNodeImporter::VisitCXXStdInitializerListExpr( CXXStdInitializerListExpr *E) { ExpectedType ToTypeOrErr = import(E->getType()); if (!ToTypeOrErr) return ToTypeOrErr.takeError(); ExpectedExpr ToSubExprOrErr = import(E->getSubExpr()); if (!ToSubExprOrErr) return ToSubExprOrErr.takeError(); return new (Importer.getToContext()) CXXStdInitializerListExpr( *ToTypeOrErr, *ToSubExprOrErr); } ExpectedStmt ASTNodeImporter::VisitCXXInheritedCtorInitExpr( CXXInheritedCtorInitExpr *E) { Error Err = Error::success(); auto ToLocation = importChecked(Err, E->getLocation()); auto ToType = importChecked(Err, E->getType()); auto ToConstructor = importChecked(Err, E->getConstructor()); if (Err) return std::move(Err); return new (Importer.getToContext()) CXXInheritedCtorInitExpr( ToLocation, ToType, ToConstructor, E->constructsVBase(), E->inheritedFromVBase()); } ExpectedStmt ASTNodeImporter::VisitArrayInitLoopExpr(ArrayInitLoopExpr *E) { Error Err = Error::success(); auto ToType = importChecked(Err, E->getType()); auto ToCommonExpr = importChecked(Err, E->getCommonExpr()); auto ToSubExpr = importChecked(Err, E->getSubExpr()); if (Err) return std::move(Err); return new (Importer.getToContext()) ArrayInitLoopExpr( ToType, ToCommonExpr, ToSubExpr); } ExpectedStmt ASTNodeImporter::VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) { ExpectedType ToTypeOrErr = import(E->getType()); if (!ToTypeOrErr) return ToTypeOrErr.takeError(); return new (Importer.getToContext()) ArrayInitIndexExpr(*ToTypeOrErr); } ExpectedStmt ASTNodeImporter::VisitCXXDefaultInitExpr(CXXDefaultInitExpr *E) { ExpectedSLoc ToBeginLocOrErr = import(E->getBeginLoc()); if (!ToBeginLocOrErr) return ToBeginLocOrErr.takeError(); auto ToFieldOrErr = import(E->getField()); if (!ToFieldOrErr) return ToFieldOrErr.takeError(); auto UsedContextOrErr = Importer.ImportContext(E->getUsedContext()); if (!UsedContextOrErr) return UsedContextOrErr.takeError(); return CXXDefaultInitExpr::Create( Importer.getToContext(), *ToBeginLocOrErr, *ToFieldOrErr, *UsedContextOrErr); } ExpectedStmt ASTNodeImporter::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) { Error Err = Error::success(); auto ToType = importChecked(Err, E->getType()); auto ToSubExpr = importChecked(Err, E->getSubExpr()); auto ToTypeInfoAsWritten = importChecked(Err, E->getTypeInfoAsWritten()); auto ToOperatorLoc = importChecked(Err, E->getOperatorLoc()); auto ToRParenLoc = importChecked(Err, E->getRParenLoc()); auto ToAngleBrackets = importChecked(Err, E->getAngleBrackets()); if (Err) return std::move(Err); ExprValueKind VK = E->getValueKind(); CastKind CK = E->getCastKind(); auto ToBasePathOrErr = ImportCastPath(E); if (!ToBasePathOrErr) return ToBasePathOrErr.takeError(); if (isa(E)) { return CXXStaticCastExpr::Create( Importer.getToContext(), ToType, VK, CK, ToSubExpr, &(*ToBasePathOrErr), ToTypeInfoAsWritten, ToOperatorLoc, ToRParenLoc, ToAngleBrackets); } else if (isa(E)) { return CXXDynamicCastExpr::Create( Importer.getToContext(), ToType, VK, CK, ToSubExpr, &(*ToBasePathOrErr), ToTypeInfoAsWritten, ToOperatorLoc, ToRParenLoc, ToAngleBrackets); } else if (isa(E)) { return CXXReinterpretCastExpr::Create( Importer.getToContext(), ToType, VK, CK, ToSubExpr, &(*ToBasePathOrErr), ToTypeInfoAsWritten, ToOperatorLoc, ToRParenLoc, ToAngleBrackets); } else if (isa(E)) { return CXXConstCastExpr::Create( Importer.getToContext(), ToType, VK, ToSubExpr, ToTypeInfoAsWritten, ToOperatorLoc, ToRParenLoc, ToAngleBrackets); } else { llvm_unreachable("Unknown cast type"); return make_error(); } } ExpectedStmt ASTNodeImporter::VisitSubstNonTypeTemplateParmExpr( SubstNonTypeTemplateParmExpr *E) { Error Err = Error::success(); auto ToType = importChecked(Err, E->getType()); auto ToExprLoc = importChecked(Err, E->getExprLoc()); auto ToParameter = importChecked(Err, E->getParameter()); auto ToReplacement = importChecked(Err, E->getReplacement()); if (Err) return std::move(Err); return new (Importer.getToContext()) SubstNonTypeTemplateParmExpr( ToType, E->getValueKind(), ToExprLoc, ToParameter, ToReplacement); } ExpectedStmt ASTNodeImporter::VisitTypeTraitExpr(TypeTraitExpr *E) { Error Err = Error::success(); auto ToType = importChecked(Err, E->getType()); auto ToBeginLoc = importChecked(Err, E->getBeginLoc()); auto ToEndLoc = importChecked(Err, E->getEndLoc()); if (Err) return std::move(Err); SmallVector ToArgs(E->getNumArgs()); if (Error Err = ImportContainerChecked(E->getArgs(), ToArgs)) return std::move(Err); // According to Sema::BuildTypeTrait(), if E is value-dependent, // Value is always false. bool ToValue = (E->isValueDependent() ? false : E->getValue()); return TypeTraitExpr::Create( Importer.getToContext(), ToType, ToBeginLoc, E->getTrait(), ToArgs, ToEndLoc, ToValue); } ExpectedStmt ASTNodeImporter::VisitCXXTypeidExpr(CXXTypeidExpr *E) { ExpectedType ToTypeOrErr = import(E->getType()); if (!ToTypeOrErr) return ToTypeOrErr.takeError(); auto ToSourceRangeOrErr = import(E->getSourceRange()); if (!ToSourceRangeOrErr) return ToSourceRangeOrErr.takeError(); if (E->isTypeOperand()) { if (auto ToTSIOrErr = import(E->getTypeOperandSourceInfo())) return new (Importer.getToContext()) CXXTypeidExpr( *ToTypeOrErr, *ToTSIOrErr, *ToSourceRangeOrErr); else return ToTSIOrErr.takeError(); } ExpectedExpr ToExprOperandOrErr = import(E->getExprOperand()); if (!ToExprOperandOrErr) return ToExprOperandOrErr.takeError(); return new (Importer.getToContext()) CXXTypeidExpr( *ToTypeOrErr, *ToExprOperandOrErr, *ToSourceRangeOrErr); } Error ASTNodeImporter::ImportOverriddenMethods(CXXMethodDecl *ToMethod, CXXMethodDecl *FromMethod) { Error ImportErrors = Error::success(); for (auto *FromOverriddenMethod : FromMethod->overridden_methods()) { if (auto ImportedOrErr = import(FromOverriddenMethod)) ToMethod->getCanonicalDecl()->addOverriddenMethod(cast( (*ImportedOrErr)->getCanonicalDecl())); else ImportErrors = joinErrors(std::move(ImportErrors), ImportedOrErr.takeError()); } return ImportErrors; } ASTImporter::ASTImporter(ASTContext &ToContext, FileManager &ToFileManager, ASTContext &FromContext, FileManager &FromFileManager, bool MinimalImport, std::shared_ptr SharedState) : SharedState(SharedState), ToContext(ToContext), FromContext(FromContext), ToFileManager(ToFileManager), FromFileManager(FromFileManager), Minimal(MinimalImport), ODRHandling(ODRHandlingType::Conservative) { // Create a default state without the lookup table: LLDB case. if (!SharedState) { this->SharedState = std::make_shared(); } ImportedDecls[FromContext.getTranslationUnitDecl()] = ToContext.getTranslationUnitDecl(); } ASTImporter::~ASTImporter() = default; Optional ASTImporter::getFieldIndex(Decl *F) { assert(F && (isa(*F) || isa(*F)) && "Try to get field index for non-field."); auto *Owner = dyn_cast(F->getDeclContext()); if (!Owner) return None; unsigned Index = 0; for (const auto *D : Owner->decls()) { if (D == F) return Index; if (isa(*D) || isa(*D)) ++Index; } llvm_unreachable("Field was not found in its parent context."); return None; } ASTImporter::FoundDeclsTy ASTImporter::findDeclsInToCtx(DeclContext *DC, DeclarationName Name) { // We search in the redecl context because of transparent contexts. // E.g. a simple C language enum is a transparent context: // enum E { A, B }; // Now if we had a global variable in the TU // int A; // then the enum constant 'A' and the variable 'A' violates ODR. // We can diagnose this only if we search in the redecl context. DeclContext *ReDC = DC->getRedeclContext(); if (SharedState->getLookupTable()) { ASTImporterLookupTable::LookupResult LookupResult = SharedState->getLookupTable()->lookup(ReDC, Name); return FoundDeclsTy(LookupResult.begin(), LookupResult.end()); } else { DeclContext::lookup_result NoloadLookupResult = ReDC->noload_lookup(Name); FoundDeclsTy Result(NoloadLookupResult.begin(), NoloadLookupResult.end()); // We must search by the slow case of localUncachedLookup because that is // working even if there is no LookupPtr for the DC. We could use // DC::buildLookup() to create the LookupPtr, but that would load external // decls again, we must avoid that case. // Also, even if we had the LookupPtr, we must find Decls which are not // in the LookupPtr, so we need the slow case. // These cases are handled in ASTImporterLookupTable, but we cannot use // that with LLDB since that traverses through the AST which initiates the // load of external decls again via DC::decls(). And again, we must avoid // loading external decls during the import. if (Result.empty()) ReDC->localUncachedLookup(Name, Result); return Result; } } void ASTImporter::AddToLookupTable(Decl *ToD) { SharedState->addDeclToLookup(ToD); } Expected ASTImporter::ImportImpl(Decl *FromD) { // Import the decl using ASTNodeImporter. ASTNodeImporter Importer(*this); return Importer.Visit(FromD); } void ASTImporter::RegisterImportedDecl(Decl *FromD, Decl *ToD) { MapImported(FromD, ToD); } llvm::Expected ASTImporter::Import(ExprWithCleanups::CleanupObject From) { if (auto *CLE = From.dyn_cast()) { if (Expected R = Import(CLE)) return ExprWithCleanups::CleanupObject(cast(*R)); } // FIXME: Handle BlockDecl when we implement importing BlockExpr in // ASTNodeImporter. return make_error(ImportError::UnsupportedConstruct); } Expected ASTImporter::Import(QualType FromT) { if (FromT.isNull()) return QualType{}; const Type *FromTy = FromT.getTypePtr(); // Check whether we've already imported this type. llvm::DenseMap::iterator Pos = ImportedTypes.find(FromTy); if (Pos != ImportedTypes.end()) return ToContext.getQualifiedType(Pos->second, FromT.getLocalQualifiers()); // Import the type ASTNodeImporter Importer(*this); ExpectedType ToTOrErr = Importer.Visit(FromTy); if (!ToTOrErr) return ToTOrErr.takeError(); // Record the imported type. ImportedTypes[FromTy] = (*ToTOrErr).getTypePtr(); return ToContext.getQualifiedType(*ToTOrErr, FromT.getLocalQualifiers()); } Expected ASTImporter::Import(TypeSourceInfo *FromTSI) { if (!FromTSI) return FromTSI; // FIXME: For now we just create a "trivial" type source info based // on the type and a single location. Implement a real version of this. ExpectedType TOrErr = Import(FromTSI->getType()); if (!TOrErr) return TOrErr.takeError(); ExpectedSLoc BeginLocOrErr = Import(FromTSI->getTypeLoc().getBeginLoc()); if (!BeginLocOrErr) return BeginLocOrErr.takeError(); return ToContext.getTrivialTypeSourceInfo(*TOrErr, *BeginLocOrErr); } Expected ASTImporter::Import(const Attr *FromAttr) { Attr *ToAttr = nullptr; SourceRange ToRange; if (Error Err = importInto(ToRange, FromAttr->getRange())) return std::move(Err); // FIXME: Is there some kind of AttrVisitor to use here? switch (FromAttr->getKind()) { case attr::Aligned: { auto *From = cast(FromAttr); AlignedAttr *To; auto CreateAlign = [&](bool IsAlignmentExpr, void *Alignment) { return AlignedAttr::Create(ToContext, IsAlignmentExpr, Alignment, ToRange, From->getSyntax(), From->getSemanticSpelling()); }; if (From->isAlignmentExpr()) { if (auto ToEOrErr = Import(From->getAlignmentExpr())) To = CreateAlign(true, *ToEOrErr); else return ToEOrErr.takeError(); } else { if (auto ToTOrErr = Import(From->getAlignmentType())) To = CreateAlign(false, *ToTOrErr); else return ToTOrErr.takeError(); } To->setInherited(From->isInherited()); To->setPackExpansion(From->isPackExpansion()); To->setImplicit(From->isImplicit()); ToAttr = To; break; } default: // FIXME: 'clone' copies every member but some of them should be imported. // Handle other Attrs that have parameters that should be imported. ToAttr = FromAttr->clone(ToContext); ToAttr->setRange(ToRange); break; } assert(ToAttr && "Attribute should be created."); return ToAttr; } Decl *ASTImporter::GetAlreadyImportedOrNull(const Decl *FromD) const { auto Pos = ImportedDecls.find(FromD); if (Pos != ImportedDecls.end()) return Pos->second; else return nullptr; } TranslationUnitDecl *ASTImporter::GetFromTU(Decl *ToD) { auto FromDPos = ImportedFromDecls.find(ToD); if (FromDPos == ImportedFromDecls.end()) return nullptr; return FromDPos->second->getTranslationUnitDecl(); } Expected ASTImporter::Import(Decl *FromD) { if (!FromD) return nullptr; // Push FromD to the stack, and remove that when we return. ImportPath.push(FromD); auto ImportPathBuilder = llvm::make_scope_exit([this]() { ImportPath.pop(); }); // Check whether there was a previous failed import. // If yes return the existing error. if (auto Error = getImportDeclErrorIfAny(FromD)) return make_error(*Error); // Check whether we've already imported this declaration. Decl *ToD = GetAlreadyImportedOrNull(FromD); if (ToD) { // Already imported (possibly from another TU) and with an error. if (auto Error = SharedState->getImportDeclErrorIfAny(ToD)) { setImportDeclError(FromD, *Error); return make_error(*Error); } // If FromD has some updated flags after last import, apply it updateFlags(FromD, ToD); // If we encounter a cycle during an import then we save the relevant part // of the import path associated to the Decl. if (ImportPath.hasCycleAtBack()) SavedImportPaths[FromD].push_back(ImportPath.copyCycleAtBack()); return ToD; } // Import the declaration. ExpectedDecl ToDOrErr = ImportImpl(FromD); if (!ToDOrErr) { // Failed to import. auto Pos = ImportedDecls.find(FromD); if (Pos != ImportedDecls.end()) { // Import failed after the object was created. // Remove all references to it. auto *ToD = Pos->second; ImportedDecls.erase(Pos); // ImportedDecls and ImportedFromDecls are not symmetric. It may happen // (e.g. with namespaces) that several decls from the 'from' context are // mapped to the same decl in the 'to' context. If we removed entries // from the LookupTable here then we may end up removing them multiple // times. // The Lookuptable contains decls only which are in the 'to' context. // Remove from the Lookuptable only if it is *imported* into the 'to' // context (and do not remove it if it was added during the initial // traverse of the 'to' context). auto PosF = ImportedFromDecls.find(ToD); if (PosF != ImportedFromDecls.end()) { SharedState->removeDeclFromLookup(ToD); ImportedFromDecls.erase(PosF); } // FIXME: AST may contain remaining references to the failed object. // However, the ImportDeclErrors in the shared state contains all the // failed objects together with their error. } // Error encountered for the first time. // After takeError the error is not usable any more in ToDOrErr. // Get a copy of the error object (any more simple solution for this?). ImportError ErrOut; handleAllErrors(ToDOrErr.takeError(), [&ErrOut](const ImportError &E) { ErrOut = E; }); setImportDeclError(FromD, ErrOut); // Set the error for the mapped to Decl, which is in the "to" context. if (Pos != ImportedDecls.end()) SharedState->setImportDeclError(Pos->second, ErrOut); // Set the error for all nodes which have been created before we // recognized the error. for (const auto &Path : SavedImportPaths[FromD]) for (Decl *FromDi : Path) { setImportDeclError(FromDi, ErrOut); //FIXME Should we remove these Decls from ImportedDecls? // Set the error for the mapped to Decl, which is in the "to" context. auto Ii = ImportedDecls.find(FromDi); if (Ii != ImportedDecls.end()) SharedState->setImportDeclError(Ii->second, ErrOut); // FIXME Should we remove these Decls from the LookupTable, // and from ImportedFromDecls? } SavedImportPaths.erase(FromD); // Do not return ToDOrErr, error was taken out of it. return make_error(ErrOut); } ToD = *ToDOrErr; // FIXME: Handle the "already imported with error" case. We can get here // nullptr only if GetImportedOrCreateDecl returned nullptr (after a // previously failed create was requested). // Later GetImportedOrCreateDecl can be updated to return the error. if (!ToD) { auto Err = getImportDeclErrorIfAny(FromD); assert(Err); return make_error(*Err); } // We could import from the current TU without error. But previously we // already had imported a Decl as `ToD` from another TU (with another // ASTImporter object) and with an error. if (auto Error = SharedState->getImportDeclErrorIfAny(ToD)) { setImportDeclError(FromD, *Error); return make_error(*Error); } // Make sure that ImportImpl registered the imported decl. assert(ImportedDecls.count(FromD) != 0 && "Missing call to MapImported?"); // Notify subclasses. Imported(FromD, ToD); updateFlags(FromD, ToD); SavedImportPaths.erase(FromD); return ToDOrErr; } Expected ASTImporter::ImportContext(DeclContext *FromDC) { if (!FromDC) return FromDC; ExpectedDecl ToDCOrErr = Import(cast(FromDC)); if (!ToDCOrErr) return ToDCOrErr.takeError(); auto *ToDC = cast(*ToDCOrErr); // When we're using a record/enum/Objective-C class/protocol as a context, we // need it to have a definition. if (auto *ToRecord = dyn_cast(ToDC)) { auto *FromRecord = cast(FromDC); if (ToRecord->isCompleteDefinition()) return ToDC; // If FromRecord is not defined we need to force it to be. // Simply calling CompleteDecl(...) for a RecordDecl will break some cases // it will start the definition but we never finish it. // If there are base classes they won't be imported and we will // be missing anything that we inherit from those bases. if (FromRecord->getASTContext().getExternalSource() && !FromRecord->isCompleteDefinition()) FromRecord->getASTContext().getExternalSource()->CompleteType(FromRecord); if (FromRecord->isCompleteDefinition()) if (Error Err = ASTNodeImporter(*this).ImportDefinition( FromRecord, ToRecord, ASTNodeImporter::IDK_Basic)) return std::move(Err); } else if (auto *ToEnum = dyn_cast(ToDC)) { auto *FromEnum = cast(FromDC); if (ToEnum->isCompleteDefinition()) { // Do nothing. } else if (FromEnum->isCompleteDefinition()) { if (Error Err = ASTNodeImporter(*this).ImportDefinition( FromEnum, ToEnum, ASTNodeImporter::IDK_Basic)) return std::move(Err); } else { CompleteDecl(ToEnum); } } else if (auto *ToClass = dyn_cast(ToDC)) { auto *FromClass = cast(FromDC); if (ToClass->getDefinition()) { // Do nothing. } else if (ObjCInterfaceDecl *FromDef = FromClass->getDefinition()) { if (Error Err = ASTNodeImporter(*this).ImportDefinition( FromDef, ToClass, ASTNodeImporter::IDK_Basic)) return std::move(Err); } else { CompleteDecl(ToClass); } } else if (auto *ToProto = dyn_cast(ToDC)) { auto *FromProto = cast(FromDC); if (ToProto->getDefinition()) { // Do nothing. } else if (ObjCProtocolDecl *FromDef = FromProto->getDefinition()) { if (Error Err = ASTNodeImporter(*this).ImportDefinition( FromDef, ToProto, ASTNodeImporter::IDK_Basic)) return std::move(Err); } else { CompleteDecl(ToProto); } } return ToDC; } Expected ASTImporter::Import(Expr *FromE) { if (ExpectedStmt ToSOrErr = Import(cast_or_null(FromE))) return cast_or_null(*ToSOrErr); else return ToSOrErr.takeError(); } Expected ASTImporter::Import(Stmt *FromS) { if (!FromS) return nullptr; // Check whether we've already imported this statement. llvm::DenseMap::iterator Pos = ImportedStmts.find(FromS); if (Pos != ImportedStmts.end()) return Pos->second; // Import the statement. ASTNodeImporter Importer(*this); ExpectedStmt ToSOrErr = Importer.Visit(FromS); if (!ToSOrErr) return ToSOrErr; if (auto *ToE = dyn_cast(*ToSOrErr)) { auto *FromE = cast(FromS); // Copy ExprBitfields, which may not be handled in Expr subclasses // constructors. ToE->setValueKind(FromE->getValueKind()); ToE->setObjectKind(FromE->getObjectKind()); ToE->setDependence(FromE->getDependence()); } // Record the imported statement object. ImportedStmts[FromS] = *ToSOrErr; return ToSOrErr; } Expected ASTImporter::Import(NestedNameSpecifier *FromNNS) { if (!FromNNS) return nullptr; NestedNameSpecifier *Prefix = nullptr; if (Error Err = importInto(Prefix, FromNNS->getPrefix())) return std::move(Err); switch (FromNNS->getKind()) { case NestedNameSpecifier::Identifier: assert(FromNNS->getAsIdentifier() && "NNS should contain identifier."); return NestedNameSpecifier::Create(ToContext, Prefix, Import(FromNNS->getAsIdentifier())); case NestedNameSpecifier::Namespace: if (ExpectedDecl NSOrErr = Import(FromNNS->getAsNamespace())) { return NestedNameSpecifier::Create(ToContext, Prefix, cast(*NSOrErr)); } else return NSOrErr.takeError(); case NestedNameSpecifier::NamespaceAlias: if (ExpectedDecl NSADOrErr = Import(FromNNS->getAsNamespaceAlias())) return NestedNameSpecifier::Create(ToContext, Prefix, cast(*NSADOrErr)); else return NSADOrErr.takeError(); case NestedNameSpecifier::Global: return NestedNameSpecifier::GlobalSpecifier(ToContext); case NestedNameSpecifier::Super: if (ExpectedDecl RDOrErr = Import(FromNNS->getAsRecordDecl())) return NestedNameSpecifier::SuperSpecifier(ToContext, cast(*RDOrErr)); else return RDOrErr.takeError(); case NestedNameSpecifier::TypeSpec: case NestedNameSpecifier::TypeSpecWithTemplate: if (Expected TyOrErr = Import(QualType(FromNNS->getAsType(), 0u))) { bool TSTemplate = FromNNS->getKind() == NestedNameSpecifier::TypeSpecWithTemplate; return NestedNameSpecifier::Create(ToContext, Prefix, TSTemplate, TyOrErr->getTypePtr()); } else { return TyOrErr.takeError(); } } llvm_unreachable("Invalid nested name specifier kind"); } Expected ASTImporter::Import(NestedNameSpecifierLoc FromNNS) { // Copied from NestedNameSpecifier mostly. SmallVector NestedNames; NestedNameSpecifierLoc NNS = FromNNS; // Push each of the nested-name-specifiers's onto a stack for // serialization in reverse order. while (NNS) { NestedNames.push_back(NNS); NNS = NNS.getPrefix(); } NestedNameSpecifierLocBuilder Builder; while (!NestedNames.empty()) { NNS = NestedNames.pop_back_val(); NestedNameSpecifier *Spec = nullptr; if (Error Err = importInto(Spec, NNS.getNestedNameSpecifier())) return std::move(Err); NestedNameSpecifier::SpecifierKind Kind = Spec->getKind(); SourceLocation ToLocalBeginLoc, ToLocalEndLoc; if (Kind != NestedNameSpecifier::Super) { if (Error Err = importInto(ToLocalBeginLoc, NNS.getLocalBeginLoc())) return std::move(Err); if (Kind != NestedNameSpecifier::Global) if (Error Err = importInto(ToLocalEndLoc, NNS.getLocalEndLoc())) return std::move(Err); } switch (Kind) { case NestedNameSpecifier::Identifier: Builder.Extend(getToContext(), Spec->getAsIdentifier(), ToLocalBeginLoc, ToLocalEndLoc); break; case NestedNameSpecifier::Namespace: Builder.Extend(getToContext(), Spec->getAsNamespace(), ToLocalBeginLoc, ToLocalEndLoc); break; case NestedNameSpecifier::NamespaceAlias: Builder.Extend(getToContext(), Spec->getAsNamespaceAlias(), ToLocalBeginLoc, ToLocalEndLoc); break; case NestedNameSpecifier::TypeSpec: case NestedNameSpecifier::TypeSpecWithTemplate: { SourceLocation ToTLoc; if (Error Err = importInto(ToTLoc, NNS.getTypeLoc().getBeginLoc())) return std::move(Err); TypeSourceInfo *TSI = getToContext().getTrivialTypeSourceInfo( QualType(Spec->getAsType(), 0), ToTLoc); if (Kind == NestedNameSpecifier::TypeSpecWithTemplate) // ToLocalBeginLoc is here the location of the 'template' keyword. Builder.Extend(getToContext(), ToLocalBeginLoc, TSI->getTypeLoc(), ToLocalEndLoc); else // No location for 'template' keyword here. Builder.Extend(getToContext(), SourceLocation{}, TSI->getTypeLoc(), ToLocalEndLoc); break; } case NestedNameSpecifier::Global: Builder.MakeGlobal(getToContext(), ToLocalBeginLoc); break; case NestedNameSpecifier::Super: { auto ToSourceRangeOrErr = Import(NNS.getSourceRange()); if (!ToSourceRangeOrErr) return ToSourceRangeOrErr.takeError(); Builder.MakeSuper(getToContext(), Spec->getAsRecordDecl(), ToSourceRangeOrErr->getBegin(), ToSourceRangeOrErr->getEnd()); } } } return Builder.getWithLocInContext(getToContext()); } Expected ASTImporter::Import(TemplateName From) { switch (From.getKind()) { case TemplateName::Template: if (ExpectedDecl ToTemplateOrErr = Import(From.getAsTemplateDecl())) return TemplateName(cast(*ToTemplateOrErr)); else return ToTemplateOrErr.takeError(); case TemplateName::OverloadedTemplate: { OverloadedTemplateStorage *FromStorage = From.getAsOverloadedTemplate(); UnresolvedSet<2> ToTemplates; for (auto *I : *FromStorage) { if (auto ToOrErr = Import(I)) ToTemplates.addDecl(cast(*ToOrErr)); else return ToOrErr.takeError(); } return ToContext.getOverloadedTemplateName(ToTemplates.begin(), ToTemplates.end()); } case TemplateName::AssumedTemplate: { AssumedTemplateStorage *FromStorage = From.getAsAssumedTemplateName(); auto DeclNameOrErr = Import(FromStorage->getDeclName()); if (!DeclNameOrErr) return DeclNameOrErr.takeError(); return ToContext.getAssumedTemplateName(*DeclNameOrErr); } case TemplateName::QualifiedTemplate: { QualifiedTemplateName *QTN = From.getAsQualifiedTemplateName(); auto QualifierOrErr = Import(QTN->getQualifier()); if (!QualifierOrErr) return QualifierOrErr.takeError(); if (ExpectedDecl ToTemplateOrErr = Import(From.getAsTemplateDecl())) return ToContext.getQualifiedTemplateName( *QualifierOrErr, QTN->hasTemplateKeyword(), cast(*ToTemplateOrErr)); else return ToTemplateOrErr.takeError(); } case TemplateName::DependentTemplate: { DependentTemplateName *DTN = From.getAsDependentTemplateName(); auto QualifierOrErr = Import(DTN->getQualifier()); if (!QualifierOrErr) return QualifierOrErr.takeError(); if (DTN->isIdentifier()) { return ToContext.getDependentTemplateName(*QualifierOrErr, Import(DTN->getIdentifier())); } return ToContext.getDependentTemplateName(*QualifierOrErr, DTN->getOperator()); } case TemplateName::SubstTemplateTemplateParm: { SubstTemplateTemplateParmStorage *Subst = From.getAsSubstTemplateTemplateParm(); ExpectedDecl ParamOrErr = Import(Subst->getParameter()); if (!ParamOrErr) return ParamOrErr.takeError(); auto ReplacementOrErr = Import(Subst->getReplacement()); if (!ReplacementOrErr) return ReplacementOrErr.takeError(); return ToContext.getSubstTemplateTemplateParm( cast(*ParamOrErr), *ReplacementOrErr); } case TemplateName::SubstTemplateTemplateParmPack: { SubstTemplateTemplateParmPackStorage *SubstPack = From.getAsSubstTemplateTemplateParmPack(); ExpectedDecl ParamOrErr = Import(SubstPack->getParameterPack()); if (!ParamOrErr) return ParamOrErr.takeError(); ASTNodeImporter Importer(*this); auto ArgPackOrErr = Importer.ImportTemplateArgument(SubstPack->getArgumentPack()); if (!ArgPackOrErr) return ArgPackOrErr.takeError(); return ToContext.getSubstTemplateTemplateParmPack( cast(*ParamOrErr), *ArgPackOrErr); } } llvm_unreachable("Invalid template name kind"); } Expected ASTImporter::Import(SourceLocation FromLoc) { if (FromLoc.isInvalid()) return SourceLocation{}; SourceManager &FromSM = FromContext.getSourceManager(); bool IsBuiltin = FromSM.isWrittenInBuiltinFile(FromLoc); std::pair Decomposed = FromSM.getDecomposedLoc(FromLoc); Expected ToFileIDOrErr = Import(Decomposed.first, IsBuiltin); if (!ToFileIDOrErr) return ToFileIDOrErr.takeError(); SourceManager &ToSM = ToContext.getSourceManager(); return ToSM.getComposedLoc(*ToFileIDOrErr, Decomposed.second); } Expected ASTImporter::Import(SourceRange FromRange) { SourceLocation ToBegin, ToEnd; if (Error Err = importInto(ToBegin, FromRange.getBegin())) return std::move(Err); if (Error Err = importInto(ToEnd, FromRange.getEnd())) return std::move(Err); return SourceRange(ToBegin, ToEnd); } Expected ASTImporter::Import(FileID FromID, bool IsBuiltin) { llvm::DenseMap::iterator Pos = ImportedFileIDs.find(FromID); if (Pos != ImportedFileIDs.end()) return Pos->second; SourceManager &FromSM = FromContext.getSourceManager(); SourceManager &ToSM = ToContext.getSourceManager(); const SrcMgr::SLocEntry &FromSLoc = FromSM.getSLocEntry(FromID); // Map the FromID to the "to" source manager. FileID ToID; if (FromSLoc.isExpansion()) { const SrcMgr::ExpansionInfo &FromEx = FromSLoc.getExpansion(); ExpectedSLoc ToSpLoc = Import(FromEx.getSpellingLoc()); if (!ToSpLoc) return ToSpLoc.takeError(); ExpectedSLoc ToExLocS = Import(FromEx.getExpansionLocStart()); if (!ToExLocS) return ToExLocS.takeError(); unsigned TokenLen = FromSM.getFileIDSize(FromID); SourceLocation MLoc; if (FromEx.isMacroArgExpansion()) { MLoc = ToSM.createMacroArgExpansionLoc(*ToSpLoc, *ToExLocS, TokenLen); } else { if (ExpectedSLoc ToExLocE = Import(FromEx.getExpansionLocEnd())) MLoc = ToSM.createExpansionLoc(*ToSpLoc, *ToExLocS, *ToExLocE, TokenLen, FromEx.isExpansionTokenRange()); else return ToExLocE.takeError(); } ToID = ToSM.getFileID(MLoc); } else { const SrcMgr::ContentCache *Cache = FromSLoc.getFile().getContentCache(); if (!IsBuiltin && !Cache->BufferOverridden) { // Include location of this file. ExpectedSLoc ToIncludeLoc = Import(FromSLoc.getFile().getIncludeLoc()); if (!ToIncludeLoc) return ToIncludeLoc.takeError(); // Every FileID that is not the main FileID needs to have a valid include // location so that the include chain points to the main FileID. When // importing the main FileID (which has no include location), we need to // create a fake include location in the main file to keep this property // intact. SourceLocation ToIncludeLocOrFakeLoc = *ToIncludeLoc; if (FromID == FromSM.getMainFileID()) ToIncludeLocOrFakeLoc = ToSM.getLocForStartOfFile(ToSM.getMainFileID()); if (Cache->OrigEntry && Cache->OrigEntry->getDir()) { // FIXME: We probably want to use getVirtualFile(), so we don't hit the // disk again // FIXME: We definitely want to re-use the existing MemoryBuffer, rather // than mmap the files several times. auto Entry = ToFileManager.getFile(Cache->OrigEntry->getName()); // FIXME: The filename may be a virtual name that does probably not // point to a valid file and we get no Entry here. In this case try with // the memory buffer below. if (Entry) ToID = ToSM.createFileID(*Entry, ToIncludeLocOrFakeLoc, FromSLoc.getFile().getFileCharacteristic()); } } if (ToID.isInvalid() || IsBuiltin) { // FIXME: We want to re-use the existing MemoryBuffer! bool Invalid = true; const llvm::MemoryBuffer *FromBuf = Cache->getBuffer(FromContext.getDiagnostics(), FromSM.getFileManager(), SourceLocation{}, &Invalid); if (!FromBuf || Invalid) // FIXME: Use a new error kind? return llvm::make_error(ImportError::Unknown); std::unique_ptr ToBuf = llvm::MemoryBuffer::getMemBufferCopy(FromBuf->getBuffer(), FromBuf->getBufferIdentifier()); ToID = ToSM.createFileID(std::move(ToBuf), FromSLoc.getFile().getFileCharacteristic()); } } assert(ToID.isValid() && "Unexpected invalid fileID was created."); ImportedFileIDs[FromID] = ToID; if (FileIDImportHandler) FileIDImportHandler(ToID, FromID); return ToID; } Expected ASTImporter::Import(CXXCtorInitializer *From) { ExpectedExpr ToExprOrErr = Import(From->getInit()); if (!ToExprOrErr) return ToExprOrErr.takeError(); auto LParenLocOrErr = Import(From->getLParenLoc()); if (!LParenLocOrErr) return LParenLocOrErr.takeError(); auto RParenLocOrErr = Import(From->getRParenLoc()); if (!RParenLocOrErr) return RParenLocOrErr.takeError(); if (From->isBaseInitializer()) { auto ToTInfoOrErr = Import(From->getTypeSourceInfo()); if (!ToTInfoOrErr) return ToTInfoOrErr.takeError(); SourceLocation EllipsisLoc; if (From->isPackExpansion()) if (Error Err = importInto(EllipsisLoc, From->getEllipsisLoc())) return std::move(Err); return new (ToContext) CXXCtorInitializer( ToContext, *ToTInfoOrErr, From->isBaseVirtual(), *LParenLocOrErr, *ToExprOrErr, *RParenLocOrErr, EllipsisLoc); } else if (From->isMemberInitializer()) { ExpectedDecl ToFieldOrErr = Import(From->getMember()); if (!ToFieldOrErr) return ToFieldOrErr.takeError(); auto MemberLocOrErr = Import(From->getMemberLocation()); if (!MemberLocOrErr) return MemberLocOrErr.takeError(); return new (ToContext) CXXCtorInitializer( ToContext, cast_or_null(*ToFieldOrErr), *MemberLocOrErr, *LParenLocOrErr, *ToExprOrErr, *RParenLocOrErr); } else if (From->isIndirectMemberInitializer()) { ExpectedDecl ToIFieldOrErr = Import(From->getIndirectMember()); if (!ToIFieldOrErr) return ToIFieldOrErr.takeError(); auto MemberLocOrErr = Import(From->getMemberLocation()); if (!MemberLocOrErr) return MemberLocOrErr.takeError(); return new (ToContext) CXXCtorInitializer( ToContext, cast_or_null(*ToIFieldOrErr), *MemberLocOrErr, *LParenLocOrErr, *ToExprOrErr, *RParenLocOrErr); } else if (From->isDelegatingInitializer()) { auto ToTInfoOrErr = Import(From->getTypeSourceInfo()); if (!ToTInfoOrErr) return ToTInfoOrErr.takeError(); return new (ToContext) CXXCtorInitializer(ToContext, *ToTInfoOrErr, *LParenLocOrErr, *ToExprOrErr, *RParenLocOrErr); } else { // FIXME: assert? return make_error(); } } Expected ASTImporter::Import(const CXXBaseSpecifier *BaseSpec) { auto Pos = ImportedCXXBaseSpecifiers.find(BaseSpec); if (Pos != ImportedCXXBaseSpecifiers.end()) return Pos->second; Expected ToSourceRange = Import(BaseSpec->getSourceRange()); if (!ToSourceRange) return ToSourceRange.takeError(); Expected ToTSI = Import(BaseSpec->getTypeSourceInfo()); if (!ToTSI) return ToTSI.takeError(); ExpectedSLoc ToEllipsisLoc = Import(BaseSpec->getEllipsisLoc()); if (!ToEllipsisLoc) return ToEllipsisLoc.takeError(); CXXBaseSpecifier *Imported = new (ToContext) CXXBaseSpecifier( *ToSourceRange, BaseSpec->isVirtual(), BaseSpec->isBaseOfClass(), BaseSpec->getAccessSpecifierAsWritten(), *ToTSI, *ToEllipsisLoc); ImportedCXXBaseSpecifiers[BaseSpec] = Imported; return Imported; } Error ASTImporter::ImportDefinition(Decl *From) { ExpectedDecl ToOrErr = Import(From); if (!ToOrErr) return ToOrErr.takeError(); Decl *To = *ToOrErr; auto *FromDC = cast(From); ASTNodeImporter Importer(*this); if (auto *ToRecord = dyn_cast(To)) { if (!ToRecord->getDefinition()) { return Importer.ImportDefinition( cast(FromDC), ToRecord, ASTNodeImporter::IDK_Everything); } } if (auto *ToEnum = dyn_cast(To)) { if (!ToEnum->getDefinition()) { return Importer.ImportDefinition( cast(FromDC), ToEnum, ASTNodeImporter::IDK_Everything); } } if (auto *ToIFace = dyn_cast(To)) { if (!ToIFace->getDefinition()) { return Importer.ImportDefinition( cast(FromDC), ToIFace, ASTNodeImporter::IDK_Everything); } } if (auto *ToProto = dyn_cast(To)) { if (!ToProto->getDefinition()) { return Importer.ImportDefinition( cast(FromDC), ToProto, ASTNodeImporter::IDK_Everything); } } return Importer.ImportDeclContext(FromDC, true); } Expected ASTImporter::Import(DeclarationName FromName) { if (!FromName) return DeclarationName{}; switch (FromName.getNameKind()) { case DeclarationName::Identifier: return DeclarationName(Import(FromName.getAsIdentifierInfo())); case DeclarationName::ObjCZeroArgSelector: case DeclarationName::ObjCOneArgSelector: case DeclarationName::ObjCMultiArgSelector: if (auto ToSelOrErr = Import(FromName.getObjCSelector())) return DeclarationName(*ToSelOrErr); else return ToSelOrErr.takeError(); case DeclarationName::CXXConstructorName: { if (auto ToTyOrErr = Import(FromName.getCXXNameType())) return ToContext.DeclarationNames.getCXXConstructorName( ToContext.getCanonicalType(*ToTyOrErr)); else return ToTyOrErr.takeError(); } case DeclarationName::CXXDestructorName: { if (auto ToTyOrErr = Import(FromName.getCXXNameType())) return ToContext.DeclarationNames.getCXXDestructorName( ToContext.getCanonicalType(*ToTyOrErr)); else return ToTyOrErr.takeError(); } case DeclarationName::CXXDeductionGuideName: { if (auto ToTemplateOrErr = Import(FromName.getCXXDeductionGuideTemplate())) return ToContext.DeclarationNames.getCXXDeductionGuideName( cast(*ToTemplateOrErr)); else return ToTemplateOrErr.takeError(); } case DeclarationName::CXXConversionFunctionName: { if (auto ToTyOrErr = Import(FromName.getCXXNameType())) return ToContext.DeclarationNames.getCXXConversionFunctionName( ToContext.getCanonicalType(*ToTyOrErr)); else return ToTyOrErr.takeError(); } case DeclarationName::CXXOperatorName: return ToContext.DeclarationNames.getCXXOperatorName( FromName.getCXXOverloadedOperator()); case DeclarationName::CXXLiteralOperatorName: return ToContext.DeclarationNames.getCXXLiteralOperatorName( Import(FromName.getCXXLiteralIdentifier())); case DeclarationName::CXXUsingDirective: // FIXME: STATICS! return DeclarationName::getUsingDirectiveName(); } llvm_unreachable("Invalid DeclarationName Kind!"); } IdentifierInfo *ASTImporter::Import(const IdentifierInfo *FromId) { if (!FromId) return nullptr; IdentifierInfo *ToId = &ToContext.Idents.get(FromId->getName()); if (!ToId->getBuiltinID() && FromId->getBuiltinID()) ToId->setBuiltinID(FromId->getBuiltinID()); return ToId; } Expected ASTImporter::Import(Selector FromSel) { if (FromSel.isNull()) return Selector{}; SmallVector Idents; Idents.push_back(Import(FromSel.getIdentifierInfoForSlot(0))); for (unsigned I = 1, N = FromSel.getNumArgs(); I < N; ++I) Idents.push_back(Import(FromSel.getIdentifierInfoForSlot(I))); return ToContext.Selectors.getSelector(FromSel.getNumArgs(), Idents.data()); } Expected ASTImporter::HandleNameConflict(DeclarationName Name, DeclContext *DC, unsigned IDNS, NamedDecl **Decls, unsigned NumDecls) { if (ODRHandling == ODRHandlingType::Conservative) // Report error at any name conflict. return make_error(ImportError::NameConflict); else // Allow to create the new Decl with the same name. return Name; } DiagnosticBuilder ASTImporter::ToDiag(SourceLocation Loc, unsigned DiagID) { if (LastDiagFromFrom) ToContext.getDiagnostics().notePriorDiagnosticFrom( FromContext.getDiagnostics()); LastDiagFromFrom = false; return ToContext.getDiagnostics().Report(Loc, DiagID); } DiagnosticBuilder ASTImporter::FromDiag(SourceLocation Loc, unsigned DiagID) { if (!LastDiagFromFrom) FromContext.getDiagnostics().notePriorDiagnosticFrom( ToContext.getDiagnostics()); LastDiagFromFrom = true; return FromContext.getDiagnostics().Report(Loc, DiagID); } void ASTImporter::CompleteDecl (Decl *D) { if (auto *ID = dyn_cast(D)) { if (!ID->getDefinition()) ID->startDefinition(); } else if (auto *PD = dyn_cast(D)) { if (!PD->getDefinition()) PD->startDefinition(); } else if (auto *TD = dyn_cast(D)) { if (!TD->getDefinition() && !TD->isBeingDefined()) { TD->startDefinition(); TD->setCompleteDefinition(true); } } else { assert(0 && "CompleteDecl called on a Decl that can't be completed"); } } Decl *ASTImporter::MapImported(Decl *From, Decl *To) { llvm::DenseMap::iterator Pos = ImportedDecls.find(From); assert((Pos == ImportedDecls.end() || Pos->second == To) && "Try to import an already imported Decl"); if (Pos != ImportedDecls.end()) return Pos->second; ImportedDecls[From] = To; // This mapping should be maintained only in this function. Therefore do not // check for additional consistency. ImportedFromDecls[To] = From; AddToLookupTable(To); return To; } llvm::Optional ASTImporter::getImportDeclErrorIfAny(Decl *FromD) const { auto Pos = ImportDeclErrors.find(FromD); if (Pos != ImportDeclErrors.end()) return Pos->second; else return Optional(); } void ASTImporter::setImportDeclError(Decl *From, ImportError Error) { auto InsertRes = ImportDeclErrors.insert({From, Error}); (void)InsertRes; // Either we set the error for the first time, or we already had set one and // now we want to set the same error. assert(InsertRes.second || InsertRes.first->second.Error == Error.Error); } bool ASTImporter::IsStructurallyEquivalent(QualType From, QualType To, bool Complain) { llvm::DenseMap::iterator Pos = ImportedTypes.find(From.getTypePtr()); if (Pos != ImportedTypes.end()) { if (ExpectedType ToFromOrErr = Import(From)) { if (ToContext.hasSameType(*ToFromOrErr, To)) return true; } else { llvm::consumeError(ToFromOrErr.takeError()); } } StructuralEquivalenceContext Ctx(FromContext, ToContext, NonEquivalentDecls, getStructuralEquivalenceKind(*this), false, Complain); return Ctx.IsEquivalent(From, To); } diff --git a/contrib/llvm-project/clang/lib/AST/Type.cpp b/contrib/llvm-project/clang/lib/AST/Type.cpp index 10a6a2610130..ce011eddbd2e 100644 --- a/contrib/llvm-project/clang/lib/AST/Type.cpp +++ b/contrib/llvm-project/clang/lib/AST/Type.cpp @@ -1,4343 +1,4346 @@ //===- Type.cpp - Type representation and manipulation --------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements type-related functionality. // //===----------------------------------------------------------------------===// #include "clang/AST/Type.h" #include "Linkage.h" #include "clang/AST/ASTContext.h" #include "clang/AST/Attr.h" #include "clang/AST/CharUnits.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclBase.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DependenceFlags.h" #include "clang/AST/Expr.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/NonTrivialTypeVisitor.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/TemplateBase.h" #include "clang/AST/TemplateName.h" #include "clang/AST/TypeVisitor.h" #include "clang/Basic/AddressSpaces.h" #include "clang/Basic/ExceptionSpecificationType.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/LangOptions.h" #include "clang/Basic/Linkage.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TargetCXXABI.h" #include "clang/Basic/TargetInfo.h" #include "clang/Basic/Visibility.h" #include "llvm/ADT/APInt.h" #include "llvm/ADT/APSInt.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/FoldingSet.h" #include "llvm/ADT/None.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" #include #include #include #include #include using namespace clang; bool Qualifiers::isStrictSupersetOf(Qualifiers Other) const { return (*this != Other) && // CVR qualifiers superset (((Mask & CVRMask) | (Other.Mask & CVRMask)) == (Mask & CVRMask)) && // ObjC GC qualifiers superset ((getObjCGCAttr() == Other.getObjCGCAttr()) || (hasObjCGCAttr() && !Other.hasObjCGCAttr())) && // Address space superset. ((getAddressSpace() == Other.getAddressSpace()) || (hasAddressSpace()&& !Other.hasAddressSpace())) && // Lifetime qualifier superset. ((getObjCLifetime() == Other.getObjCLifetime()) || (hasObjCLifetime() && !Other.hasObjCLifetime())); } const IdentifierInfo* QualType::getBaseTypeIdentifier() const { const Type* ty = getTypePtr(); NamedDecl *ND = nullptr; if (ty->isPointerType() || ty->isReferenceType()) return ty->getPointeeType().getBaseTypeIdentifier(); else if (ty->isRecordType()) ND = ty->castAs()->getDecl(); else if (ty->isEnumeralType()) ND = ty->castAs()->getDecl(); else if (ty->getTypeClass() == Type::Typedef) ND = ty->castAs()->getDecl(); else if (ty->isArrayType()) return ty->castAsArrayTypeUnsafe()-> getElementType().getBaseTypeIdentifier(); if (ND) return ND->getIdentifier(); return nullptr; } bool QualType::mayBeDynamicClass() const { const auto *ClassDecl = getTypePtr()->getPointeeCXXRecordDecl(); return ClassDecl && ClassDecl->mayBeDynamicClass(); } bool QualType::mayBeNotDynamicClass() const { const auto *ClassDecl = getTypePtr()->getPointeeCXXRecordDecl(); return !ClassDecl || ClassDecl->mayBeNonDynamicClass(); } bool QualType::isConstant(QualType T, const ASTContext &Ctx) { if (T.isConstQualified()) return true; if (const ArrayType *AT = Ctx.getAsArrayType(T)) return AT->getElementType().isConstant(Ctx); return T.getAddressSpace() == LangAS::opencl_constant; } // C++ [temp.dep.type]p1: // A type is dependent if it is... // - an array type constructed from any dependent type or whose // size is specified by a constant expression that is // value-dependent, ArrayType::ArrayType(TypeClass tc, QualType et, QualType can, ArraySizeModifier sm, unsigned tq, const Expr *sz) // Note, we need to check for DependentSizedArrayType explicitly here // because we use a DependentSizedArrayType with no size expression as the // type of a dependent array of unknown bound with a dependent braced // initializer: // // template int arr[] = {N...}; : Type(tc, can, et->getDependence() | (sz ? toTypeDependence( turnValueToTypeDependence(sz->getDependence())) : TypeDependence::None) | (tc == VariableArray ? TypeDependence::VariablyModified : TypeDependence::None) | (tc == DependentSizedArray ? TypeDependence::DependentInstantiation : TypeDependence::None)), ElementType(et) { ArrayTypeBits.IndexTypeQuals = tq; ArrayTypeBits.SizeModifier = sm; } unsigned ConstantArrayType::getNumAddressingBits(const ASTContext &Context, QualType ElementType, const llvm::APInt &NumElements) { uint64_t ElementSize = Context.getTypeSizeInChars(ElementType).getQuantity(); // Fast path the common cases so we can avoid the conservative computation // below, which in common cases allocates "large" APSInt values, which are // slow. // If the element size is a power of 2, we can directly compute the additional // number of addressing bits beyond those required for the element count. if (llvm::isPowerOf2_64(ElementSize)) { return NumElements.getActiveBits() + llvm::Log2_64(ElementSize); } // If both the element count and element size fit in 32-bits, we can do the // computation directly in 64-bits. if ((ElementSize >> 32) == 0 && NumElements.getBitWidth() <= 64 && (NumElements.getZExtValue() >> 32) == 0) { uint64_t TotalSize = NumElements.getZExtValue() * ElementSize; return 64 - llvm::countLeadingZeros(TotalSize); } // Otherwise, use APSInt to handle arbitrary sized values. llvm::APSInt SizeExtended(NumElements, true); unsigned SizeTypeBits = Context.getTypeSize(Context.getSizeType()); SizeExtended = SizeExtended.extend(std::max(SizeTypeBits, SizeExtended.getBitWidth()) * 2); llvm::APSInt TotalSize(llvm::APInt(SizeExtended.getBitWidth(), ElementSize)); TotalSize *= SizeExtended; return TotalSize.getActiveBits(); } unsigned ConstantArrayType::getMaxSizeBits(const ASTContext &Context) { unsigned Bits = Context.getTypeSize(Context.getSizeType()); // Limit the number of bits in size_t so that maximal bit size fits 64 bit // integer (see PR8256). We can do this as currently there is no hardware // that supports full 64-bit virtual space. if (Bits > 61) Bits = 61; return Bits; } void ConstantArrayType::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, QualType ET, const llvm::APInt &ArraySize, const Expr *SizeExpr, ArraySizeModifier SizeMod, unsigned TypeQuals) { ID.AddPointer(ET.getAsOpaquePtr()); ID.AddInteger(ArraySize.getZExtValue()); ID.AddInteger(SizeMod); ID.AddInteger(TypeQuals); ID.AddBoolean(SizeExpr != 0); if (SizeExpr) SizeExpr->Profile(ID, Context, true); } DependentSizedArrayType::DependentSizedArrayType(const ASTContext &Context, QualType et, QualType can, Expr *e, ArraySizeModifier sm, unsigned tq, SourceRange brackets) : ArrayType(DependentSizedArray, et, can, sm, tq, e), Context(Context), SizeExpr((Stmt*) e), Brackets(brackets) {} void DependentSizedArrayType::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, QualType ET, ArraySizeModifier SizeMod, unsigned TypeQuals, Expr *E) { ID.AddPointer(ET.getAsOpaquePtr()); ID.AddInteger(SizeMod); ID.AddInteger(TypeQuals); E->Profile(ID, Context, true); } DependentVectorType::DependentVectorType(const ASTContext &Context, QualType ElementType, QualType CanonType, Expr *SizeExpr, SourceLocation Loc, VectorType::VectorKind VecKind) : Type(DependentVector, CanonType, TypeDependence::DependentInstantiation | ElementType->getDependence() | (SizeExpr ? toTypeDependence(SizeExpr->getDependence()) : TypeDependence::None)), Context(Context), ElementType(ElementType), SizeExpr(SizeExpr), Loc(Loc) { VectorTypeBits.VecKind = VecKind; } void DependentVectorType::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, QualType ElementType, const Expr *SizeExpr, VectorType::VectorKind VecKind) { ID.AddPointer(ElementType.getAsOpaquePtr()); ID.AddInteger(VecKind); SizeExpr->Profile(ID, Context, true); } DependentSizedExtVectorType::DependentSizedExtVectorType( const ASTContext &Context, QualType ElementType, QualType can, Expr *SizeExpr, SourceLocation loc) : Type(DependentSizedExtVector, can, TypeDependence::DependentInstantiation | ElementType->getDependence() | (SizeExpr ? toTypeDependence(SizeExpr->getDependence()) : TypeDependence::None)), Context(Context), SizeExpr(SizeExpr), ElementType(ElementType), loc(loc) { } void DependentSizedExtVectorType::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, QualType ElementType, Expr *SizeExpr) { ID.AddPointer(ElementType.getAsOpaquePtr()); SizeExpr->Profile(ID, Context, true); } DependentAddressSpaceType::DependentAddressSpaceType(const ASTContext &Context, QualType PointeeType, QualType can, Expr *AddrSpaceExpr, SourceLocation loc) : Type(DependentAddressSpace, can, TypeDependence::DependentInstantiation | PointeeType->getDependence() | (AddrSpaceExpr ? toTypeDependence(AddrSpaceExpr->getDependence()) : TypeDependence::None)), Context(Context), AddrSpaceExpr(AddrSpaceExpr), PointeeType(PointeeType), loc(loc) {} void DependentAddressSpaceType::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, QualType PointeeType, Expr *AddrSpaceExpr) { ID.AddPointer(PointeeType.getAsOpaquePtr()); AddrSpaceExpr->Profile(ID, Context, true); } MatrixType::MatrixType(TypeClass tc, QualType matrixType, QualType canonType, const Expr *RowExpr, const Expr *ColumnExpr) : Type(tc, canonType, (RowExpr ? (matrixType->getDependence() | TypeDependence::Dependent | TypeDependence::Instantiation | (matrixType->isVariablyModifiedType() ? TypeDependence::VariablyModified : TypeDependence::None) | (matrixType->containsUnexpandedParameterPack() || (RowExpr && RowExpr->containsUnexpandedParameterPack()) || (ColumnExpr && ColumnExpr->containsUnexpandedParameterPack()) ? TypeDependence::UnexpandedPack : TypeDependence::None)) : matrixType->getDependence())), ElementType(matrixType) {} ConstantMatrixType::ConstantMatrixType(QualType matrixType, unsigned nRows, unsigned nColumns, QualType canonType) : ConstantMatrixType(ConstantMatrix, matrixType, nRows, nColumns, canonType) {} ConstantMatrixType::ConstantMatrixType(TypeClass tc, QualType matrixType, unsigned nRows, unsigned nColumns, QualType canonType) : MatrixType(tc, matrixType, canonType) { ConstantMatrixTypeBits.NumRows = nRows; ConstantMatrixTypeBits.NumColumns = nColumns; } DependentSizedMatrixType::DependentSizedMatrixType( const ASTContext &CTX, QualType ElementType, QualType CanonicalType, Expr *RowExpr, Expr *ColumnExpr, SourceLocation loc) : MatrixType(DependentSizedMatrix, ElementType, CanonicalType, RowExpr, ColumnExpr), Context(CTX), RowExpr(RowExpr), ColumnExpr(ColumnExpr), loc(loc) {} void DependentSizedMatrixType::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &CTX, QualType ElementType, Expr *RowExpr, Expr *ColumnExpr) { ID.AddPointer(ElementType.getAsOpaquePtr()); RowExpr->Profile(ID, CTX, true); ColumnExpr->Profile(ID, CTX, true); } VectorType::VectorType(QualType vecType, unsigned nElements, QualType canonType, VectorKind vecKind) : VectorType(Vector, vecType, nElements, canonType, vecKind) {} VectorType::VectorType(TypeClass tc, QualType vecType, unsigned nElements, QualType canonType, VectorKind vecKind) : Type(tc, canonType, vecType->getDependence()), ElementType(vecType) { VectorTypeBits.VecKind = vecKind; VectorTypeBits.NumElements = nElements; } ExtIntType::ExtIntType(bool IsUnsigned, unsigned NumBits) : Type(ExtInt, QualType{}, TypeDependence::None), IsUnsigned(IsUnsigned), NumBits(NumBits) {} DependentExtIntType::DependentExtIntType(const ASTContext &Context, bool IsUnsigned, Expr *NumBitsExpr) : Type(DependentExtInt, QualType{}, toTypeDependence(NumBitsExpr->getDependence())), Context(Context), ExprAndUnsigned(NumBitsExpr, IsUnsigned) {} bool DependentExtIntType::isUnsigned() const { return ExprAndUnsigned.getInt(); } clang::Expr *DependentExtIntType::getNumBitsExpr() const { return ExprAndUnsigned.getPointer(); } void DependentExtIntType::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, bool IsUnsigned, Expr *NumBitsExpr) { ID.AddBoolean(IsUnsigned); NumBitsExpr->Profile(ID, Context, true); } /// getArrayElementTypeNoTypeQual - If this is an array type, return the /// element type of the array, potentially with type qualifiers missing. /// This method should never be used when type qualifiers are meaningful. const Type *Type::getArrayElementTypeNoTypeQual() const { // If this is directly an array type, return it. if (const auto *ATy = dyn_cast(this)) return ATy->getElementType().getTypePtr(); // If the canonical form of this type isn't the right kind, reject it. if (!isa(CanonicalType)) return nullptr; // If this is a typedef for an array type, strip the typedef off without // losing all typedef information. return cast(getUnqualifiedDesugaredType()) ->getElementType().getTypePtr(); } /// getDesugaredType - Return the specified type with any "sugar" removed from /// the type. This takes off typedefs, typeof's etc. If the outer level of /// the type is already concrete, it returns it unmodified. This is similar /// to getting the canonical type, but it doesn't remove *all* typedefs. For /// example, it returns "T*" as "T*", (not as "int*"), because the pointer is /// concrete. QualType QualType::getDesugaredType(QualType T, const ASTContext &Context) { SplitQualType split = getSplitDesugaredType(T); return Context.getQualifiedType(split.Ty, split.Quals); } QualType QualType::getSingleStepDesugaredTypeImpl(QualType type, const ASTContext &Context) { SplitQualType split = type.split(); QualType desugar = split.Ty->getLocallyUnqualifiedSingleStepDesugaredType(); return Context.getQualifiedType(desugar, split.Quals); } // Check that no type class is polymorphic. LLVM style RTTI should be used // instead. If absolutely needed an exception can still be added here by // defining the appropriate macro (but please don't do this). #define TYPE(CLASS, BASE) \ static_assert(!std::is_polymorphic::value, \ #CLASS "Type should not be polymorphic!"); #include "clang/AST/TypeNodes.inc" // Check that no type class has a non-trival destructor. Types are // allocated with the BumpPtrAllocator from ASTContext and therefore // their destructor is not executed. // // FIXME: ConstantArrayType is not trivially destructible because of its // APInt member. It should be replaced in favor of ASTContext allocation. #define TYPE(CLASS, BASE) \ static_assert(std::is_trivially_destructible::value || \ std::is_same::value, \ #CLASS "Type should be trivially destructible!"); #include "clang/AST/TypeNodes.inc" QualType Type::getLocallyUnqualifiedSingleStepDesugaredType() const { switch (getTypeClass()) { #define ABSTRACT_TYPE(Class, Parent) #define TYPE(Class, Parent) \ case Type::Class: { \ const auto *ty = cast(this); \ if (!ty->isSugared()) return QualType(ty, 0); \ return ty->desugar(); \ } #include "clang/AST/TypeNodes.inc" } llvm_unreachable("bad type kind!"); } SplitQualType QualType::getSplitDesugaredType(QualType T) { QualifierCollector Qs; QualType Cur = T; while (true) { const Type *CurTy = Qs.strip(Cur); switch (CurTy->getTypeClass()) { #define ABSTRACT_TYPE(Class, Parent) #define TYPE(Class, Parent) \ case Type::Class: { \ const auto *Ty = cast(CurTy); \ if (!Ty->isSugared()) \ return SplitQualType(Ty, Qs); \ Cur = Ty->desugar(); \ break; \ } #include "clang/AST/TypeNodes.inc" } } } SplitQualType QualType::getSplitUnqualifiedTypeImpl(QualType type) { SplitQualType split = type.split(); // All the qualifiers we've seen so far. Qualifiers quals = split.Quals; // The last type node we saw with any nodes inside it. const Type *lastTypeWithQuals = split.Ty; while (true) { QualType next; // Do a single-step desugar, aborting the loop if the type isn't // sugared. switch (split.Ty->getTypeClass()) { #define ABSTRACT_TYPE(Class, Parent) #define TYPE(Class, Parent) \ case Type::Class: { \ const auto *ty = cast(split.Ty); \ if (!ty->isSugared()) goto done; \ next = ty->desugar(); \ break; \ } #include "clang/AST/TypeNodes.inc" } // Otherwise, split the underlying type. If that yields qualifiers, // update the information. split = next.split(); if (!split.Quals.empty()) { lastTypeWithQuals = split.Ty; quals.addConsistentQualifiers(split.Quals); } } done: return SplitQualType(lastTypeWithQuals, quals); } QualType QualType::IgnoreParens(QualType T) { // FIXME: this seems inherently un-qualifiers-safe. while (const auto *PT = T->getAs()) T = PT->getInnerType(); return T; } /// This will check for a T (which should be a Type which can act as /// sugar, such as a TypedefType) by removing any existing sugar until it /// reaches a T or a non-sugared type. template static const T *getAsSugar(const Type *Cur) { while (true) { if (const auto *Sugar = dyn_cast(Cur)) return Sugar; switch (Cur->getTypeClass()) { #define ABSTRACT_TYPE(Class, Parent) #define TYPE(Class, Parent) \ case Type::Class: { \ const auto *Ty = cast(Cur); \ if (!Ty->isSugared()) return 0; \ Cur = Ty->desugar().getTypePtr(); \ break; \ } #include "clang/AST/TypeNodes.inc" } } } template <> const TypedefType *Type::getAs() const { return getAsSugar(this); } template <> const TemplateSpecializationType *Type::getAs() const { return getAsSugar(this); } template <> const AttributedType *Type::getAs() const { return getAsSugar(this); } /// getUnqualifiedDesugaredType - Pull any qualifiers and syntactic /// sugar off the given type. This should produce an object of the /// same dynamic type as the canonical type. const Type *Type::getUnqualifiedDesugaredType() const { const Type *Cur = this; while (true) { switch (Cur->getTypeClass()) { #define ABSTRACT_TYPE(Class, Parent) #define TYPE(Class, Parent) \ case Class: { \ const auto *Ty = cast(Cur); \ if (!Ty->isSugared()) return Cur; \ Cur = Ty->desugar().getTypePtr(); \ break; \ } #include "clang/AST/TypeNodes.inc" } } } bool Type::isClassType() const { if (const auto *RT = getAs()) return RT->getDecl()->isClass(); return false; } bool Type::isStructureType() const { if (const auto *RT = getAs()) return RT->getDecl()->isStruct(); return false; } bool Type::isObjCBoxableRecordType() const { if (const auto *RT = getAs()) return RT->getDecl()->hasAttr(); return false; } bool Type::isInterfaceType() const { if (const auto *RT = getAs()) return RT->getDecl()->isInterface(); return false; } bool Type::isStructureOrClassType() const { if (const auto *RT = getAs()) { RecordDecl *RD = RT->getDecl(); return RD->isStruct() || RD->isClass() || RD->isInterface(); } return false; } bool Type::isVoidPointerType() const { if (const auto *PT = getAs()) return PT->getPointeeType()->isVoidType(); return false; } bool Type::isUnionType() const { if (const auto *RT = getAs()) return RT->getDecl()->isUnion(); return false; } bool Type::isComplexType() const { if (const auto *CT = dyn_cast(CanonicalType)) return CT->getElementType()->isFloatingType(); return false; } bool Type::isComplexIntegerType() const { // Check for GCC complex integer extension. return getAsComplexIntegerType(); } bool Type::isScopedEnumeralType() const { if (const auto *ET = getAs()) return ET->getDecl()->isScoped(); return false; } const ComplexType *Type::getAsComplexIntegerType() const { if (const auto *Complex = getAs()) if (Complex->getElementType()->isIntegerType()) return Complex; return nullptr; } QualType Type::getPointeeType() const { if (const auto *PT = getAs()) return PT->getPointeeType(); if (const auto *OPT = getAs()) return OPT->getPointeeType(); if (const auto *BPT = getAs()) return BPT->getPointeeType(); if (const auto *RT = getAs()) return RT->getPointeeType(); if (const auto *MPT = getAs()) return MPT->getPointeeType(); if (const auto *DT = getAs()) return DT->getPointeeType(); return {}; } const RecordType *Type::getAsStructureType() const { // If this is directly a structure type, return it. if (const auto *RT = dyn_cast(this)) { if (RT->getDecl()->isStruct()) return RT; } // If the canonical form of this type isn't the right kind, reject it. if (const auto *RT = dyn_cast(CanonicalType)) { if (!RT->getDecl()->isStruct()) return nullptr; // If this is a typedef for a structure type, strip the typedef off without // losing all typedef information. return cast(getUnqualifiedDesugaredType()); } return nullptr; } const RecordType *Type::getAsUnionType() const { // If this is directly a union type, return it. if (const auto *RT = dyn_cast(this)) { if (RT->getDecl()->isUnion()) return RT; } // If the canonical form of this type isn't the right kind, reject it. if (const auto *RT = dyn_cast(CanonicalType)) { if (!RT->getDecl()->isUnion()) return nullptr; // If this is a typedef for a union type, strip the typedef off without // losing all typedef information. return cast(getUnqualifiedDesugaredType()); } return nullptr; } bool Type::isObjCIdOrObjectKindOfType(const ASTContext &ctx, const ObjCObjectType *&bound) const { bound = nullptr; const auto *OPT = getAs(); if (!OPT) return false; // Easy case: id. if (OPT->isObjCIdType()) return true; // If it's not a __kindof type, reject it now. if (!OPT->isKindOfType()) return false; // If it's Class or qualified Class, it's not an object type. if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) return false; // Figure out the type bound for the __kindof type. bound = OPT->getObjectType()->stripObjCKindOfTypeAndQuals(ctx) ->getAs(); return true; } bool Type::isObjCClassOrClassKindOfType() const { const auto *OPT = getAs(); if (!OPT) return false; // Easy case: Class. if (OPT->isObjCClassType()) return true; // If it's not a __kindof type, reject it now. if (!OPT->isKindOfType()) return false; // If it's Class or qualified Class, it's a class __kindof type. return OPT->isObjCClassType() || OPT->isObjCQualifiedClassType(); } ObjCTypeParamType::ObjCTypeParamType(const ObjCTypeParamDecl *D, QualType can, ArrayRef protocols) : Type(ObjCTypeParam, can, can->getDependence() & ~TypeDependence::UnexpandedPack), OTPDecl(const_cast(D)) { initialize(protocols); } ObjCObjectType::ObjCObjectType(QualType Canonical, QualType Base, ArrayRef typeArgs, ArrayRef protocols, bool isKindOf) : Type(ObjCObject, Canonical, Base->getDependence()), BaseType(Base) { ObjCObjectTypeBits.IsKindOf = isKindOf; ObjCObjectTypeBits.NumTypeArgs = typeArgs.size(); assert(getTypeArgsAsWritten().size() == typeArgs.size() && "bitfield overflow in type argument count"); if (!typeArgs.empty()) memcpy(getTypeArgStorage(), typeArgs.data(), typeArgs.size() * sizeof(QualType)); for (auto typeArg : typeArgs) { addDependence(typeArg->getDependence() & ~TypeDependence::VariablyModified); } // Initialize the protocol qualifiers. The protocol storage is known // after we set number of type arguments. initialize(protocols); } bool ObjCObjectType::isSpecialized() const { // If we have type arguments written here, the type is specialized. if (ObjCObjectTypeBits.NumTypeArgs > 0) return true; // Otherwise, check whether the base type is specialized. if (const auto objcObject = getBaseType()->getAs()) { // Terminate when we reach an interface type. if (isa(objcObject)) return false; return objcObject->isSpecialized(); } // Not specialized. return false; } ArrayRef ObjCObjectType::getTypeArgs() const { // We have type arguments written on this type. if (isSpecializedAsWritten()) return getTypeArgsAsWritten(); // Look at the base type, which might have type arguments. if (const auto objcObject = getBaseType()->getAs()) { // Terminate when we reach an interface type. if (isa(objcObject)) return {}; return objcObject->getTypeArgs(); } // No type arguments. return {}; } bool ObjCObjectType::isKindOfType() const { if (isKindOfTypeAsWritten()) return true; // Look at the base type, which might have type arguments. if (const auto objcObject = getBaseType()->getAs()) { // Terminate when we reach an interface type. if (isa(objcObject)) return false; return objcObject->isKindOfType(); } // Not a "__kindof" type. return false; } QualType ObjCObjectType::stripObjCKindOfTypeAndQuals( const ASTContext &ctx) const { if (!isKindOfType() && qual_empty()) return QualType(this, 0); // Recursively strip __kindof. SplitQualType splitBaseType = getBaseType().split(); QualType baseType(splitBaseType.Ty, 0); if (const auto *baseObj = splitBaseType.Ty->getAs()) baseType = baseObj->stripObjCKindOfTypeAndQuals(ctx); return ctx.getObjCObjectType(ctx.getQualifiedType(baseType, splitBaseType.Quals), getTypeArgsAsWritten(), /*protocols=*/{}, /*isKindOf=*/false); } const ObjCObjectPointerType *ObjCObjectPointerType::stripObjCKindOfTypeAndQuals( const ASTContext &ctx) const { if (!isKindOfType() && qual_empty()) return this; QualType obj = getObjectType()->stripObjCKindOfTypeAndQuals(ctx); return ctx.getObjCObjectPointerType(obj)->castAs(); } namespace { /// Visitor used to perform a simple type transformation that does not change /// the semantics of the type. template struct SimpleTransformVisitor : public TypeVisitor { ASTContext &Ctx; QualType recurse(QualType type) { // Split out the qualifiers from the type. SplitQualType splitType = type.split(); // Visit the type itself. QualType result = static_cast(this)->Visit(splitType.Ty); if (result.isNull()) return result; // Reconstruct the transformed type by applying the local qualifiers // from the split type. return Ctx.getQualifiedType(result, splitType.Quals); } public: explicit SimpleTransformVisitor(ASTContext &ctx) : Ctx(ctx) {} // None of the clients of this transformation can occur where // there are dependent types, so skip dependent types. #define TYPE(Class, Base) #define DEPENDENT_TYPE(Class, Base) \ QualType Visit##Class##Type(const Class##Type *T) { return QualType(T, 0); } #include "clang/AST/TypeNodes.inc" #define TRIVIAL_TYPE_CLASS(Class) \ QualType Visit##Class##Type(const Class##Type *T) { return QualType(T, 0); } #define SUGARED_TYPE_CLASS(Class) \ QualType Visit##Class##Type(const Class##Type *T) { \ if (!T->isSugared()) \ return QualType(T, 0); \ QualType desugaredType = recurse(T->desugar()); \ if (desugaredType.isNull()) \ return {}; \ if (desugaredType.getAsOpaquePtr() == T->desugar().getAsOpaquePtr()) \ return QualType(T, 0); \ return desugaredType; \ } TRIVIAL_TYPE_CLASS(Builtin) QualType VisitComplexType(const ComplexType *T) { QualType elementType = recurse(T->getElementType()); if (elementType.isNull()) return {}; if (elementType.getAsOpaquePtr() == T->getElementType().getAsOpaquePtr()) return QualType(T, 0); return Ctx.getComplexType(elementType); } QualType VisitPointerType(const PointerType *T) { QualType pointeeType = recurse(T->getPointeeType()); if (pointeeType.isNull()) return {}; if (pointeeType.getAsOpaquePtr() == T->getPointeeType().getAsOpaquePtr()) return QualType(T, 0); return Ctx.getPointerType(pointeeType); } QualType VisitBlockPointerType(const BlockPointerType *T) { QualType pointeeType = recurse(T->getPointeeType()); if (pointeeType.isNull()) return {}; if (pointeeType.getAsOpaquePtr() == T->getPointeeType().getAsOpaquePtr()) return QualType(T, 0); return Ctx.getBlockPointerType(pointeeType); } QualType VisitLValueReferenceType(const LValueReferenceType *T) { QualType pointeeType = recurse(T->getPointeeTypeAsWritten()); if (pointeeType.isNull()) return {}; if (pointeeType.getAsOpaquePtr() == T->getPointeeTypeAsWritten().getAsOpaquePtr()) return QualType(T, 0); return Ctx.getLValueReferenceType(pointeeType, T->isSpelledAsLValue()); } QualType VisitRValueReferenceType(const RValueReferenceType *T) { QualType pointeeType = recurse(T->getPointeeTypeAsWritten()); if (pointeeType.isNull()) return {}; if (pointeeType.getAsOpaquePtr() == T->getPointeeTypeAsWritten().getAsOpaquePtr()) return QualType(T, 0); return Ctx.getRValueReferenceType(pointeeType); } QualType VisitMemberPointerType(const MemberPointerType *T) { QualType pointeeType = recurse(T->getPointeeType()); if (pointeeType.isNull()) return {}; if (pointeeType.getAsOpaquePtr() == T->getPointeeType().getAsOpaquePtr()) return QualType(T, 0); return Ctx.getMemberPointerType(pointeeType, T->getClass()); } QualType VisitConstantArrayType(const ConstantArrayType *T) { QualType elementType = recurse(T->getElementType()); if (elementType.isNull()) return {}; if (elementType.getAsOpaquePtr() == T->getElementType().getAsOpaquePtr()) return QualType(T, 0); return Ctx.getConstantArrayType(elementType, T->getSize(), T->getSizeExpr(), T->getSizeModifier(), T->getIndexTypeCVRQualifiers()); } QualType VisitVariableArrayType(const VariableArrayType *T) { QualType elementType = recurse(T->getElementType()); if (elementType.isNull()) return {}; if (elementType.getAsOpaquePtr() == T->getElementType().getAsOpaquePtr()) return QualType(T, 0); return Ctx.getVariableArrayType(elementType, T->getSizeExpr(), T->getSizeModifier(), T->getIndexTypeCVRQualifiers(), T->getBracketsRange()); } QualType VisitIncompleteArrayType(const IncompleteArrayType *T) { QualType elementType = recurse(T->getElementType()); if (elementType.isNull()) return {}; if (elementType.getAsOpaquePtr() == T->getElementType().getAsOpaquePtr()) return QualType(T, 0); return Ctx.getIncompleteArrayType(elementType, T->getSizeModifier(), T->getIndexTypeCVRQualifiers()); } QualType VisitVectorType(const VectorType *T) { QualType elementType = recurse(T->getElementType()); if (elementType.isNull()) return {}; if (elementType.getAsOpaquePtr() == T->getElementType().getAsOpaquePtr()) return QualType(T, 0); return Ctx.getVectorType(elementType, T->getNumElements(), T->getVectorKind()); } QualType VisitExtVectorType(const ExtVectorType *T) { QualType elementType = recurse(T->getElementType()); if (elementType.isNull()) return {}; if (elementType.getAsOpaquePtr() == T->getElementType().getAsOpaquePtr()) return QualType(T, 0); return Ctx.getExtVectorType(elementType, T->getNumElements()); } QualType VisitConstantMatrixType(const ConstantMatrixType *T) { QualType elementType = recurse(T->getElementType()); if (elementType.isNull()) return {}; if (elementType.getAsOpaquePtr() == T->getElementType().getAsOpaquePtr()) return QualType(T, 0); return Ctx.getConstantMatrixType(elementType, T->getNumRows(), T->getNumColumns()); } QualType VisitFunctionNoProtoType(const FunctionNoProtoType *T) { QualType returnType = recurse(T->getReturnType()); if (returnType.isNull()) return {}; if (returnType.getAsOpaquePtr() == T->getReturnType().getAsOpaquePtr()) return QualType(T, 0); return Ctx.getFunctionNoProtoType(returnType, T->getExtInfo()); } QualType VisitFunctionProtoType(const FunctionProtoType *T) { QualType returnType = recurse(T->getReturnType()); if (returnType.isNull()) return {}; // Transform parameter types. SmallVector paramTypes; bool paramChanged = false; for (auto paramType : T->getParamTypes()) { QualType newParamType = recurse(paramType); if (newParamType.isNull()) return {}; if (newParamType.getAsOpaquePtr() != paramType.getAsOpaquePtr()) paramChanged = true; paramTypes.push_back(newParamType); } // Transform extended info. FunctionProtoType::ExtProtoInfo info = T->getExtProtoInfo(); bool exceptionChanged = false; if (info.ExceptionSpec.Type == EST_Dynamic) { SmallVector exceptionTypes; for (auto exceptionType : info.ExceptionSpec.Exceptions) { QualType newExceptionType = recurse(exceptionType); if (newExceptionType.isNull()) return {}; if (newExceptionType.getAsOpaquePtr() != exceptionType.getAsOpaquePtr()) exceptionChanged = true; exceptionTypes.push_back(newExceptionType); } if (exceptionChanged) { info.ExceptionSpec.Exceptions = llvm::makeArrayRef(exceptionTypes).copy(Ctx); } } if (returnType.getAsOpaquePtr() == T->getReturnType().getAsOpaquePtr() && !paramChanged && !exceptionChanged) return QualType(T, 0); return Ctx.getFunctionType(returnType, paramTypes, info); } QualType VisitParenType(const ParenType *T) { QualType innerType = recurse(T->getInnerType()); if (innerType.isNull()) return {}; if (innerType.getAsOpaquePtr() == T->getInnerType().getAsOpaquePtr()) return QualType(T, 0); return Ctx.getParenType(innerType); } SUGARED_TYPE_CLASS(Typedef) SUGARED_TYPE_CLASS(ObjCTypeParam) SUGARED_TYPE_CLASS(MacroQualified) QualType VisitAdjustedType(const AdjustedType *T) { QualType originalType = recurse(T->getOriginalType()); if (originalType.isNull()) return {}; QualType adjustedType = recurse(T->getAdjustedType()); if (adjustedType.isNull()) return {}; if (originalType.getAsOpaquePtr() == T->getOriginalType().getAsOpaquePtr() && adjustedType.getAsOpaquePtr() == T->getAdjustedType().getAsOpaquePtr()) return QualType(T, 0); return Ctx.getAdjustedType(originalType, adjustedType); } QualType VisitDecayedType(const DecayedType *T) { QualType originalType = recurse(T->getOriginalType()); if (originalType.isNull()) return {}; if (originalType.getAsOpaquePtr() == T->getOriginalType().getAsOpaquePtr()) return QualType(T, 0); return Ctx.getDecayedType(originalType); } SUGARED_TYPE_CLASS(TypeOfExpr) SUGARED_TYPE_CLASS(TypeOf) SUGARED_TYPE_CLASS(Decltype) SUGARED_TYPE_CLASS(UnaryTransform) TRIVIAL_TYPE_CLASS(Record) TRIVIAL_TYPE_CLASS(Enum) // FIXME: Non-trivial to implement, but important for C++ SUGARED_TYPE_CLASS(Elaborated) QualType VisitAttributedType(const AttributedType *T) { QualType modifiedType = recurse(T->getModifiedType()); if (modifiedType.isNull()) return {}; QualType equivalentType = recurse(T->getEquivalentType()); if (equivalentType.isNull()) return {}; if (modifiedType.getAsOpaquePtr() == T->getModifiedType().getAsOpaquePtr() && equivalentType.getAsOpaquePtr() == T->getEquivalentType().getAsOpaquePtr()) return QualType(T, 0); return Ctx.getAttributedType(T->getAttrKind(), modifiedType, equivalentType); } QualType VisitSubstTemplateTypeParmType(const SubstTemplateTypeParmType *T) { QualType replacementType = recurse(T->getReplacementType()); if (replacementType.isNull()) return {}; if (replacementType.getAsOpaquePtr() == T->getReplacementType().getAsOpaquePtr()) return QualType(T, 0); return Ctx.getSubstTemplateTypeParmType(T->getReplacedParameter(), replacementType); } // FIXME: Non-trivial to implement, but important for C++ SUGARED_TYPE_CLASS(TemplateSpecialization) QualType VisitAutoType(const AutoType *T) { if (!T->isDeduced()) return QualType(T, 0); QualType deducedType = recurse(T->getDeducedType()); if (deducedType.isNull()) return {}; if (deducedType.getAsOpaquePtr() == T->getDeducedType().getAsOpaquePtr()) return QualType(T, 0); return Ctx.getAutoType(deducedType, T->getKeyword(), T->isDependentType(), /*IsPack=*/false, T->getTypeConstraintConcept(), T->getTypeConstraintArguments()); } - // FIXME: Non-trivial to implement, but important for C++ - SUGARED_TYPE_CLASS(PackExpansion) - QualType VisitObjCObjectType(const ObjCObjectType *T) { QualType baseType = recurse(T->getBaseType()); if (baseType.isNull()) return {}; // Transform type arguments. bool typeArgChanged = false; SmallVector typeArgs; for (auto typeArg : T->getTypeArgsAsWritten()) { QualType newTypeArg = recurse(typeArg); if (newTypeArg.isNull()) return {}; if (newTypeArg.getAsOpaquePtr() != typeArg.getAsOpaquePtr()) typeArgChanged = true; typeArgs.push_back(newTypeArg); } if (baseType.getAsOpaquePtr() == T->getBaseType().getAsOpaquePtr() && !typeArgChanged) return QualType(T, 0); return Ctx.getObjCObjectType(baseType, typeArgs, llvm::makeArrayRef(T->qual_begin(), T->getNumProtocols()), T->isKindOfTypeAsWritten()); } TRIVIAL_TYPE_CLASS(ObjCInterface) QualType VisitObjCObjectPointerType(const ObjCObjectPointerType *T) { QualType pointeeType = recurse(T->getPointeeType()); if (pointeeType.isNull()) return {}; if (pointeeType.getAsOpaquePtr() == T->getPointeeType().getAsOpaquePtr()) return QualType(T, 0); return Ctx.getObjCObjectPointerType(pointeeType); } QualType VisitAtomicType(const AtomicType *T) { QualType valueType = recurse(T->getValueType()); if (valueType.isNull()) return {}; if (valueType.getAsOpaquePtr() == T->getValueType().getAsOpaquePtr()) return QualType(T, 0); return Ctx.getAtomicType(valueType); } #undef TRIVIAL_TYPE_CLASS #undef SUGARED_TYPE_CLASS }; struct SubstObjCTypeArgsVisitor : public SimpleTransformVisitor { using BaseType = SimpleTransformVisitor; ArrayRef TypeArgs; ObjCSubstitutionContext SubstContext; SubstObjCTypeArgsVisitor(ASTContext &ctx, ArrayRef typeArgs, ObjCSubstitutionContext context) : BaseType(ctx), TypeArgs(typeArgs), SubstContext(context) {} QualType VisitObjCTypeParamType(const ObjCTypeParamType *OTPTy) { // Replace an Objective-C type parameter reference with the corresponding // type argument. ObjCTypeParamDecl *typeParam = OTPTy->getDecl(); // If we have type arguments, use them. if (!TypeArgs.empty()) { QualType argType = TypeArgs[typeParam->getIndex()]; if (OTPTy->qual_empty()) return argType; // Apply protocol lists if exists. bool hasError; SmallVector protocolsVec; protocolsVec.append(OTPTy->qual_begin(), OTPTy->qual_end()); ArrayRef protocolsToApply = protocolsVec; return Ctx.applyObjCProtocolQualifiers( argType, protocolsToApply, hasError, true/*allowOnPointerType*/); } switch (SubstContext) { case ObjCSubstitutionContext::Ordinary: case ObjCSubstitutionContext::Parameter: case ObjCSubstitutionContext::Superclass: // Substitute the bound. return typeParam->getUnderlyingType(); case ObjCSubstitutionContext::Result: case ObjCSubstitutionContext::Property: { // Substitute the __kindof form of the underlying type. const auto *objPtr = typeParam->getUnderlyingType()->castAs(); // __kindof types, id, and Class don't need an additional // __kindof. if (objPtr->isKindOfType() || objPtr->isObjCIdOrClassType()) return typeParam->getUnderlyingType(); // Add __kindof. const auto *obj = objPtr->getObjectType(); QualType resultTy = Ctx.getObjCObjectType( obj->getBaseType(), obj->getTypeArgsAsWritten(), obj->getProtocols(), /*isKindOf=*/true); // Rebuild object pointer type. return Ctx.getObjCObjectPointerType(resultTy); } } llvm_unreachable("Unexpected ObjCSubstitutionContext!"); } QualType VisitFunctionType(const FunctionType *funcType) { // If we have a function type, update the substitution context // appropriately. //Substitute result type. QualType returnType = funcType->getReturnType().substObjCTypeArgs( Ctx, TypeArgs, ObjCSubstitutionContext::Result); if (returnType.isNull()) return {}; // Handle non-prototyped functions, which only substitute into the result // type. if (isa(funcType)) { // If the return type was unchanged, do nothing. if (returnType.getAsOpaquePtr() == funcType->getReturnType().getAsOpaquePtr()) return BaseType::VisitFunctionType(funcType); // Otherwise, build a new type. return Ctx.getFunctionNoProtoType(returnType, funcType->getExtInfo()); } const auto *funcProtoType = cast(funcType); // Transform parameter types. SmallVector paramTypes; bool paramChanged = false; for (auto paramType : funcProtoType->getParamTypes()) { QualType newParamType = paramType.substObjCTypeArgs( Ctx, TypeArgs, ObjCSubstitutionContext::Parameter); if (newParamType.isNull()) return {}; if (newParamType.getAsOpaquePtr() != paramType.getAsOpaquePtr()) paramChanged = true; paramTypes.push_back(newParamType); } // Transform extended info. FunctionProtoType::ExtProtoInfo info = funcProtoType->getExtProtoInfo(); bool exceptionChanged = false; if (info.ExceptionSpec.Type == EST_Dynamic) { SmallVector exceptionTypes; for (auto exceptionType : info.ExceptionSpec.Exceptions) { QualType newExceptionType = exceptionType.substObjCTypeArgs( Ctx, TypeArgs, ObjCSubstitutionContext::Ordinary); if (newExceptionType.isNull()) return {}; if (newExceptionType.getAsOpaquePtr() != exceptionType.getAsOpaquePtr()) exceptionChanged = true; exceptionTypes.push_back(newExceptionType); } if (exceptionChanged) { info.ExceptionSpec.Exceptions = llvm::makeArrayRef(exceptionTypes).copy(Ctx); } } if (returnType.getAsOpaquePtr() == funcProtoType->getReturnType().getAsOpaquePtr() && !paramChanged && !exceptionChanged) return BaseType::VisitFunctionType(funcType); return Ctx.getFunctionType(returnType, paramTypes, info); } QualType VisitObjCObjectType(const ObjCObjectType *objcObjectType) { // Substitute into the type arguments of a specialized Objective-C object // type. if (objcObjectType->isSpecializedAsWritten()) { SmallVector newTypeArgs; bool anyChanged = false; for (auto typeArg : objcObjectType->getTypeArgsAsWritten()) { QualType newTypeArg = typeArg.substObjCTypeArgs( Ctx, TypeArgs, ObjCSubstitutionContext::Ordinary); if (newTypeArg.isNull()) return {}; if (newTypeArg.getAsOpaquePtr() != typeArg.getAsOpaquePtr()) { // If we're substituting based on an unspecialized context type, // produce an unspecialized type. ArrayRef protocols( objcObjectType->qual_begin(), objcObjectType->getNumProtocols()); if (TypeArgs.empty() && SubstContext != ObjCSubstitutionContext::Superclass) { return Ctx.getObjCObjectType( objcObjectType->getBaseType(), {}, protocols, objcObjectType->isKindOfTypeAsWritten()); } anyChanged = true; } newTypeArgs.push_back(newTypeArg); } if (anyChanged) { ArrayRef protocols( objcObjectType->qual_begin(), objcObjectType->getNumProtocols()); return Ctx.getObjCObjectType(objcObjectType->getBaseType(), newTypeArgs, protocols, objcObjectType->isKindOfTypeAsWritten()); } } return BaseType::VisitObjCObjectType(objcObjectType); } QualType VisitAttributedType(const AttributedType *attrType) { QualType newType = BaseType::VisitAttributedType(attrType); if (newType.isNull()) return {}; const auto *newAttrType = dyn_cast(newType.getTypePtr()); if (!newAttrType || newAttrType->getAttrKind() != attr::ObjCKindOf) return newType; // Find out if it's an Objective-C object or object pointer type; QualType newEquivType = newAttrType->getEquivalentType(); const ObjCObjectPointerType *ptrType = newEquivType->getAs(); const ObjCObjectType *objType = ptrType ? ptrType->getObjectType() : newEquivType->getAs(); if (!objType) return newType; // Rebuild the "equivalent" type, which pushes __kindof down into // the object type. newEquivType = Ctx.getObjCObjectType( objType->getBaseType(), objType->getTypeArgsAsWritten(), objType->getProtocols(), // There is no need to apply kindof on an unqualified id type. /*isKindOf=*/objType->isObjCUnqualifiedId() ? false : true); // If we started with an object pointer type, rebuild it. if (ptrType) newEquivType = Ctx.getObjCObjectPointerType(newEquivType); // Rebuild the attributed type. return Ctx.getAttributedType(newAttrType->getAttrKind(), newAttrType->getModifiedType(), newEquivType); } }; struct StripObjCKindOfTypeVisitor : public SimpleTransformVisitor { using BaseType = SimpleTransformVisitor; explicit StripObjCKindOfTypeVisitor(ASTContext &ctx) : BaseType(ctx) {} QualType VisitObjCObjectType(const ObjCObjectType *objType) { if (!objType->isKindOfType()) return BaseType::VisitObjCObjectType(objType); QualType baseType = objType->getBaseType().stripObjCKindOfType(Ctx); return Ctx.getObjCObjectType(baseType, objType->getTypeArgsAsWritten(), objType->getProtocols(), /*isKindOf=*/false); } }; } // namespace /// Substitute the given type arguments for Objective-C type /// parameters within the given type, recursively. QualType QualType::substObjCTypeArgs(ASTContext &ctx, ArrayRef typeArgs, ObjCSubstitutionContext context) const { SubstObjCTypeArgsVisitor visitor(ctx, typeArgs, context); return visitor.recurse(*this); } QualType QualType::substObjCMemberType(QualType objectType, const DeclContext *dc, ObjCSubstitutionContext context) const { if (auto subs = objectType->getObjCSubstitutions(dc)) return substObjCTypeArgs(dc->getParentASTContext(), *subs, context); return *this; } QualType QualType::stripObjCKindOfType(const ASTContext &constCtx) const { // FIXME: Because ASTContext::getAttributedType() is non-const. auto &ctx = const_cast(constCtx); StripObjCKindOfTypeVisitor visitor(ctx); return visitor.recurse(*this); } QualType QualType::getAtomicUnqualifiedType() const { if (const auto AT = getTypePtr()->getAs()) return AT->getValueType().getUnqualifiedType(); return getUnqualifiedType(); } Optional> Type::getObjCSubstitutions( const DeclContext *dc) const { // Look through method scopes. if (const auto method = dyn_cast(dc)) dc = method->getDeclContext(); // Find the class or category in which the type we're substituting // was declared. const auto *dcClassDecl = dyn_cast(dc); const ObjCCategoryDecl *dcCategoryDecl = nullptr; ObjCTypeParamList *dcTypeParams = nullptr; if (dcClassDecl) { // If the class does not have any type parameters, there's no // substitution to do. dcTypeParams = dcClassDecl->getTypeParamList(); if (!dcTypeParams) return None; } else { // If we are in neither a class nor a category, there's no // substitution to perform. dcCategoryDecl = dyn_cast(dc); if (!dcCategoryDecl) return None; // If the category does not have any type parameters, there's no // substitution to do. dcTypeParams = dcCategoryDecl->getTypeParamList(); if (!dcTypeParams) return None; dcClassDecl = dcCategoryDecl->getClassInterface(); if (!dcClassDecl) return None; } assert(dcTypeParams && "No substitutions to perform"); assert(dcClassDecl && "No class context"); // Find the underlying object type. const ObjCObjectType *objectType; if (const auto *objectPointerType = getAs()) { objectType = objectPointerType->getObjectType(); } else if (getAs()) { ASTContext &ctx = dc->getParentASTContext(); objectType = ctx.getObjCObjectType(ctx.ObjCBuiltinIdTy, {}, {}) ->castAs(); } else { objectType = getAs(); } /// Extract the class from the receiver object type. ObjCInterfaceDecl *curClassDecl = objectType ? objectType->getInterface() : nullptr; if (!curClassDecl) { // If we don't have a context type (e.g., this is "id" or some // variant thereof), substitute the bounds. return llvm::ArrayRef(); } // Follow the superclass chain until we've mapped the receiver type // to the same class as the context. while (curClassDecl != dcClassDecl) { // Map to the superclass type. QualType superType = objectType->getSuperClassType(); if (superType.isNull()) { objectType = nullptr; break; } objectType = superType->castAs(); curClassDecl = objectType->getInterface(); } // If we don't have a receiver type, or the receiver type does not // have type arguments, substitute in the defaults. if (!objectType || objectType->isUnspecialized()) { return llvm::ArrayRef(); } // The receiver type has the type arguments we want. return objectType->getTypeArgs(); } bool Type::acceptsObjCTypeParams() const { if (auto *IfaceT = getAsObjCInterfaceType()) { if (auto *ID = IfaceT->getInterface()) { if (ID->getTypeParamList()) return true; } } return false; } void ObjCObjectType::computeSuperClassTypeSlow() const { // Retrieve the class declaration for this type. If there isn't one // (e.g., this is some variant of "id" or "Class"), then there is no // superclass type. ObjCInterfaceDecl *classDecl = getInterface(); if (!classDecl) { CachedSuperClassType.setInt(true); return; } // Extract the superclass type. const ObjCObjectType *superClassObjTy = classDecl->getSuperClassType(); if (!superClassObjTy) { CachedSuperClassType.setInt(true); return; } ObjCInterfaceDecl *superClassDecl = superClassObjTy->getInterface(); if (!superClassDecl) { CachedSuperClassType.setInt(true); return; } // If the superclass doesn't have type parameters, then there is no // substitution to perform. QualType superClassType(superClassObjTy, 0); ObjCTypeParamList *superClassTypeParams = superClassDecl->getTypeParamList(); if (!superClassTypeParams) { CachedSuperClassType.setPointerAndInt( superClassType->castAs(), true); return; } // If the superclass reference is unspecialized, return it. if (superClassObjTy->isUnspecialized()) { CachedSuperClassType.setPointerAndInt(superClassObjTy, true); return; } // If the subclass is not parameterized, there aren't any type // parameters in the superclass reference to substitute. ObjCTypeParamList *typeParams = classDecl->getTypeParamList(); if (!typeParams) { CachedSuperClassType.setPointerAndInt( superClassType->castAs(), true); return; } // If the subclass type isn't specialized, return the unspecialized // superclass. if (isUnspecialized()) { QualType unspecializedSuper = classDecl->getASTContext().getObjCInterfaceType( superClassObjTy->getInterface()); CachedSuperClassType.setPointerAndInt( unspecializedSuper->castAs(), true); return; } // Substitute the provided type arguments into the superclass type. ArrayRef typeArgs = getTypeArgs(); assert(typeArgs.size() == typeParams->size()); CachedSuperClassType.setPointerAndInt( superClassType.substObjCTypeArgs(classDecl->getASTContext(), typeArgs, ObjCSubstitutionContext::Superclass) ->castAs(), true); } const ObjCInterfaceType *ObjCObjectPointerType::getInterfaceType() const { if (auto interfaceDecl = getObjectType()->getInterface()) { return interfaceDecl->getASTContext().getObjCInterfaceType(interfaceDecl) ->castAs(); } return nullptr; } QualType ObjCObjectPointerType::getSuperClassType() const { QualType superObjectType = getObjectType()->getSuperClassType(); if (superObjectType.isNull()) return superObjectType; ASTContext &ctx = getInterfaceDecl()->getASTContext(); return ctx.getObjCObjectPointerType(superObjectType); } const ObjCObjectType *Type::getAsObjCQualifiedInterfaceType() const { // There is no sugar for ObjCObjectType's, just return the canonical // type pointer if it is the right class. There is no typedef information to // return and these cannot be Address-space qualified. if (const auto *T = getAs()) if (T->getNumProtocols() && T->getInterface()) return T; return nullptr; } bool Type::isObjCQualifiedInterfaceType() const { return getAsObjCQualifiedInterfaceType() != nullptr; } const ObjCObjectPointerType *Type::getAsObjCQualifiedIdType() const { // There is no sugar for ObjCQualifiedIdType's, just return the canonical // type pointer if it is the right class. if (const auto *OPT = getAs()) { if (OPT->isObjCQualifiedIdType()) return OPT; } return nullptr; } const ObjCObjectPointerType *Type::getAsObjCQualifiedClassType() const { // There is no sugar for ObjCQualifiedClassType's, just return the canonical // type pointer if it is the right class. if (const auto *OPT = getAs()) { if (OPT->isObjCQualifiedClassType()) return OPT; } return nullptr; } const ObjCObjectType *Type::getAsObjCInterfaceType() const { if (const auto *OT = getAs()) { if (OT->getInterface()) return OT; } return nullptr; } const ObjCObjectPointerType *Type::getAsObjCInterfacePointerType() const { if (const auto *OPT = getAs()) { if (OPT->getInterfaceType()) return OPT; } return nullptr; } const CXXRecordDecl *Type::getPointeeCXXRecordDecl() const { QualType PointeeType; if (const auto *PT = getAs()) PointeeType = PT->getPointeeType(); else if (const auto *RT = getAs()) PointeeType = RT->getPointeeType(); else return nullptr; if (const auto *RT = PointeeType->getAs()) return dyn_cast(RT->getDecl()); return nullptr; } CXXRecordDecl *Type::getAsCXXRecordDecl() const { return dyn_cast_or_null(getAsTagDecl()); } RecordDecl *Type::getAsRecordDecl() const { return dyn_cast_or_null(getAsTagDecl()); } TagDecl *Type::getAsTagDecl() const { if (const auto *TT = getAs()) return TT->getDecl(); if (const auto *Injected = getAs()) return Injected->getDecl(); return nullptr; } bool Type::hasAttr(attr::Kind AK) const { const Type *Cur = this; while (const auto *AT = Cur->getAs()) { if (AT->getAttrKind() == AK) return true; Cur = AT->getEquivalentType().getTypePtr(); } return false; } namespace { class GetContainedDeducedTypeVisitor : public TypeVisitor { bool Syntactic; public: GetContainedDeducedTypeVisitor(bool Syntactic = false) : Syntactic(Syntactic) {} using TypeVisitor::Visit; Type *Visit(QualType T) { if (T.isNull()) return nullptr; return Visit(T.getTypePtr()); } // The deduced type itself. Type *VisitDeducedType(const DeducedType *AT) { return const_cast(AT); } // Only these types can contain the desired 'auto' type. Type *VisitElaboratedType(const ElaboratedType *T) { return Visit(T->getNamedType()); } Type *VisitPointerType(const PointerType *T) { return Visit(T->getPointeeType()); } Type *VisitBlockPointerType(const BlockPointerType *T) { return Visit(T->getPointeeType()); } Type *VisitReferenceType(const ReferenceType *T) { return Visit(T->getPointeeTypeAsWritten()); } Type *VisitMemberPointerType(const MemberPointerType *T) { return Visit(T->getPointeeType()); } Type *VisitArrayType(const ArrayType *T) { return Visit(T->getElementType()); } Type *VisitDependentSizedExtVectorType( const DependentSizedExtVectorType *T) { return Visit(T->getElementType()); } Type *VisitVectorType(const VectorType *T) { return Visit(T->getElementType()); } Type *VisitDependentSizedMatrixType(const DependentSizedMatrixType *T) { return Visit(T->getElementType()); } Type *VisitConstantMatrixType(const ConstantMatrixType *T) { return Visit(T->getElementType()); } Type *VisitFunctionProtoType(const FunctionProtoType *T) { if (Syntactic && T->hasTrailingReturn()) return const_cast(T); return VisitFunctionType(T); } Type *VisitFunctionType(const FunctionType *T) { return Visit(T->getReturnType()); } Type *VisitParenType(const ParenType *T) { return Visit(T->getInnerType()); } Type *VisitAttributedType(const AttributedType *T) { return Visit(T->getModifiedType()); } Type *VisitMacroQualifiedType(const MacroQualifiedType *T) { return Visit(T->getUnderlyingType()); } Type *VisitAdjustedType(const AdjustedType *T) { return Visit(T->getOriginalType()); } Type *VisitPackExpansionType(const PackExpansionType *T) { return Visit(T->getPattern()); } }; } // namespace DeducedType *Type::getContainedDeducedType() const { return cast_or_null( GetContainedDeducedTypeVisitor().Visit(this)); } bool Type::hasAutoForTrailingReturnType() const { return dyn_cast_or_null( GetContainedDeducedTypeVisitor(true).Visit(this)); } bool Type::hasIntegerRepresentation() const { if (const auto *VT = dyn_cast(CanonicalType)) return VT->getElementType()->isIntegerType(); else return isIntegerType(); } /// Determine whether this type is an integral type. /// /// This routine determines whether the given type is an integral type per /// C++ [basic.fundamental]p7. Although the C standard does not define the /// term "integral type", it has a similar term "integer type", and in C++ /// the two terms are equivalent. However, C's "integer type" includes /// enumeration types, while C++'s "integer type" does not. The \c ASTContext /// parameter is used to determine whether we should be following the C or /// C++ rules when determining whether this type is an integral/integer type. /// /// For cases where C permits "an integer type" and C++ permits "an integral /// type", use this routine. /// /// For cases where C permits "an integer type" and C++ permits "an integral /// or enumeration type", use \c isIntegralOrEnumerationType() instead. /// /// \param Ctx The context in which this type occurs. /// /// \returns true if the type is considered an integral type, false otherwise. bool Type::isIntegralType(const ASTContext &Ctx) const { if (const auto *BT = dyn_cast(CanonicalType)) return BT->getKind() >= BuiltinType::Bool && BT->getKind() <= BuiltinType::Int128; // Complete enum types are integral in C. if (!Ctx.getLangOpts().CPlusPlus) if (const auto *ET = dyn_cast(CanonicalType)) return ET->getDecl()->isComplete(); return isExtIntType(); } bool Type::isIntegralOrUnscopedEnumerationType() const { if (const auto *BT = dyn_cast(CanonicalType)) return BT->getKind() >= BuiltinType::Bool && BT->getKind() <= BuiltinType::Int128; if (isExtIntType()) return true; return isUnscopedEnumerationType(); } bool Type::isUnscopedEnumerationType() const { if (const auto *ET = dyn_cast(CanonicalType)) return !ET->getDecl()->isScoped(); return false; } bool Type::isCharType() const { if (const auto *BT = dyn_cast(CanonicalType)) return BT->getKind() == BuiltinType::Char_U || BT->getKind() == BuiltinType::UChar || BT->getKind() == BuiltinType::Char_S || BT->getKind() == BuiltinType::SChar; return false; } bool Type::isWideCharType() const { if (const auto *BT = dyn_cast(CanonicalType)) return BT->getKind() == BuiltinType::WChar_S || BT->getKind() == BuiltinType::WChar_U; return false; } bool Type::isChar8Type() const { if (const BuiltinType *BT = dyn_cast(CanonicalType)) return BT->getKind() == BuiltinType::Char8; return false; } bool Type::isChar16Type() const { if (const auto *BT = dyn_cast(CanonicalType)) return BT->getKind() == BuiltinType::Char16; return false; } bool Type::isChar32Type() const { if (const auto *BT = dyn_cast(CanonicalType)) return BT->getKind() == BuiltinType::Char32; return false; } /// Determine whether this type is any of the built-in character /// types. bool Type::isAnyCharacterType() const { const auto *BT = dyn_cast(CanonicalType); if (!BT) return false; switch (BT->getKind()) { default: return false; case BuiltinType::Char_U: case BuiltinType::UChar: case BuiltinType::WChar_U: case BuiltinType::Char8: case BuiltinType::Char16: case BuiltinType::Char32: case BuiltinType::Char_S: case BuiltinType::SChar: case BuiltinType::WChar_S: return true; } } /// isSignedIntegerType - Return true if this is an integer type that is /// signed, according to C99 6.2.5p4 [char, signed char, short, int, long..], /// an enum decl which has a signed representation bool Type::isSignedIntegerType() const { if (const auto *BT = dyn_cast(CanonicalType)) { return BT->getKind() >= BuiltinType::Char_S && BT->getKind() <= BuiltinType::Int128; } if (const EnumType *ET = dyn_cast(CanonicalType)) { // Incomplete enum types are not treated as integer types. // FIXME: In C++, enum types are never integer types. if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped()) return ET->getDecl()->getIntegerType()->isSignedIntegerType(); } if (const ExtIntType *IT = dyn_cast(CanonicalType)) return IT->isSigned(); return false; } bool Type::isSignedIntegerOrEnumerationType() const { if (const auto *BT = dyn_cast(CanonicalType)) { return BT->getKind() >= BuiltinType::Char_S && BT->getKind() <= BuiltinType::Int128; } if (const auto *ET = dyn_cast(CanonicalType)) { if (ET->getDecl()->isComplete()) return ET->getDecl()->getIntegerType()->isSignedIntegerType(); } if (const ExtIntType *IT = dyn_cast(CanonicalType)) return IT->isSigned(); return false; } bool Type::hasSignedIntegerRepresentation() const { if (const auto *VT = dyn_cast(CanonicalType)) return VT->getElementType()->isSignedIntegerOrEnumerationType(); else return isSignedIntegerOrEnumerationType(); } /// isUnsignedIntegerType - Return true if this is an integer type that is /// unsigned, according to C99 6.2.5p6 [which returns true for _Bool], an enum /// decl which has an unsigned representation bool Type::isUnsignedIntegerType() const { if (const auto *BT = dyn_cast(CanonicalType)) { return BT->getKind() >= BuiltinType::Bool && BT->getKind() <= BuiltinType::UInt128; } if (const auto *ET = dyn_cast(CanonicalType)) { // Incomplete enum types are not treated as integer types. // FIXME: In C++, enum types are never integer types. if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped()) return ET->getDecl()->getIntegerType()->isUnsignedIntegerType(); } if (const ExtIntType *IT = dyn_cast(CanonicalType)) return IT->isUnsigned(); return false; } bool Type::isUnsignedIntegerOrEnumerationType() const { if (const auto *BT = dyn_cast(CanonicalType)) { return BT->getKind() >= BuiltinType::Bool && BT->getKind() <= BuiltinType::UInt128; } if (const auto *ET = dyn_cast(CanonicalType)) { if (ET->getDecl()->isComplete()) return ET->getDecl()->getIntegerType()->isUnsignedIntegerType(); } if (const ExtIntType *IT = dyn_cast(CanonicalType)) return IT->isUnsigned(); return false; } bool Type::hasUnsignedIntegerRepresentation() const { if (const auto *VT = dyn_cast(CanonicalType)) return VT->getElementType()->isUnsignedIntegerOrEnumerationType(); else return isUnsignedIntegerOrEnumerationType(); } bool Type::isFloatingType() const { if (const auto *BT = dyn_cast(CanonicalType)) return BT->getKind() >= BuiltinType::Half && BT->getKind() <= BuiltinType::Float128; if (const auto *CT = dyn_cast(CanonicalType)) return CT->getElementType()->isFloatingType(); return false; } bool Type::hasFloatingRepresentation() const { if (const auto *VT = dyn_cast(CanonicalType)) return VT->getElementType()->isFloatingType(); else return isFloatingType(); } bool Type::isRealFloatingType() const { if (const auto *BT = dyn_cast(CanonicalType)) return BT->isFloatingPoint(); return false; } bool Type::isRealType() const { if (const auto *BT = dyn_cast(CanonicalType)) return BT->getKind() >= BuiltinType::Bool && BT->getKind() <= BuiltinType::Float128; if (const auto *ET = dyn_cast(CanonicalType)) return ET->getDecl()->isComplete() && !ET->getDecl()->isScoped(); return isExtIntType(); } bool Type::isArithmeticType() const { if (const auto *BT = dyn_cast(CanonicalType)) return BT->getKind() >= BuiltinType::Bool && BT->getKind() <= BuiltinType::Float128 && BT->getKind() != BuiltinType::BFloat16; if (const auto *ET = dyn_cast(CanonicalType)) // GCC allows forward declaration of enum types (forbid by C99 6.7.2.3p2). // If a body isn't seen by the time we get here, return false. // // C++0x: Enumerations are not arithmetic types. For now, just return // false for scoped enumerations since that will disable any // unwanted implicit conversions. return !ET->getDecl()->isScoped() && ET->getDecl()->isComplete(); return isa(CanonicalType) || isExtIntType(); } Type::ScalarTypeKind Type::getScalarTypeKind() const { assert(isScalarType()); const Type *T = CanonicalType.getTypePtr(); if (const auto *BT = dyn_cast(T)) { if (BT->getKind() == BuiltinType::Bool) return STK_Bool; if (BT->getKind() == BuiltinType::NullPtr) return STK_CPointer; if (BT->isInteger()) return STK_Integral; if (BT->isFloatingPoint()) return STK_Floating; if (BT->isFixedPointType()) return STK_FixedPoint; llvm_unreachable("unknown scalar builtin type"); } else if (isa(T)) { return STK_CPointer; } else if (isa(T)) { return STK_BlockPointer; } else if (isa(T)) { return STK_ObjCObjectPointer; } else if (isa(T)) { return STK_MemberPointer; } else if (isa(T)) { assert(cast(T)->getDecl()->isComplete()); return STK_Integral; } else if (const auto *CT = dyn_cast(T)) { if (CT->getElementType()->isRealFloatingType()) return STK_FloatingComplex; return STK_IntegralComplex; } else if (isExtIntType()) { return STK_Integral; } llvm_unreachable("unknown scalar type"); } /// Determines whether the type is a C++ aggregate type or C /// aggregate or union type. /// /// An aggregate type is an array or a class type (struct, union, or /// class) that has no user-declared constructors, no private or /// protected non-static data members, no base classes, and no virtual /// functions (C++ [dcl.init.aggr]p1). The notion of an aggregate type /// subsumes the notion of C aggregates (C99 6.2.5p21) because it also /// includes union types. bool Type::isAggregateType() const { if (const auto *Record = dyn_cast(CanonicalType)) { if (const auto *ClassDecl = dyn_cast(Record->getDecl())) return ClassDecl->isAggregate(); return true; } return isa(CanonicalType); } /// isConstantSizeType - Return true if this is not a variable sized type, /// according to the rules of C99 6.7.5p3. It is not legal to call this on /// incomplete types or dependent types. bool Type::isConstantSizeType() const { assert(!isIncompleteType() && "This doesn't make sense for incomplete types"); assert(!isDependentType() && "This doesn't make sense for dependent types"); // The VAT must have a size, as it is known to be complete. return !isa(CanonicalType); } /// isIncompleteType - Return true if this is an incomplete type (C99 6.2.5p1) /// - a type that can describe objects, but which lacks information needed to /// determine its size. bool Type::isIncompleteType(NamedDecl **Def) const { if (Def) *Def = nullptr; switch (CanonicalType->getTypeClass()) { default: return false; case Builtin: // Void is the only incomplete builtin type. Per C99 6.2.5p19, it can never // be completed. return isVoidType(); case Enum: { EnumDecl *EnumD = cast(CanonicalType)->getDecl(); if (Def) *Def = EnumD; return !EnumD->isComplete(); } case Record: { // A tagged type (struct/union/enum/class) is incomplete if the decl is a // forward declaration, but not a full definition (C99 6.2.5p22). RecordDecl *Rec = cast(CanonicalType)->getDecl(); if (Def) *Def = Rec; return !Rec->isCompleteDefinition(); } case ConstantArray: // An array is incomplete if its element type is incomplete // (C++ [dcl.array]p1). // We don't handle variable arrays (they're not allowed in C++) or // dependent-sized arrays (dependent types are never treated as incomplete). return cast(CanonicalType)->getElementType() ->isIncompleteType(Def); case IncompleteArray: // An array of unknown size is an incomplete type (C99 6.2.5p22). return true; case MemberPointer: { // Member pointers in the MS ABI have special behavior in // RequireCompleteType: they attach a MSInheritanceAttr to the CXXRecordDecl // to indicate which inheritance model to use. auto *MPTy = cast(CanonicalType); const Type *ClassTy = MPTy->getClass(); // Member pointers with dependent class types don't get special treatment. if (ClassTy->isDependentType()) return false; const CXXRecordDecl *RD = ClassTy->getAsCXXRecordDecl(); ASTContext &Context = RD->getASTContext(); // Member pointers not in the MS ABI don't get special treatment. if (!Context.getTargetInfo().getCXXABI().isMicrosoft()) return false; // The inheritance attribute might only be present on the most recent // CXXRecordDecl, use that one. RD = RD->getMostRecentNonInjectedDecl(); // Nothing interesting to do if the inheritance attribute is already set. if (RD->hasAttr()) return false; return true; } case ObjCObject: return cast(CanonicalType)->getBaseType() ->isIncompleteType(Def); case ObjCInterface: { // ObjC interfaces are incomplete if they are @class, not @interface. ObjCInterfaceDecl *Interface = cast(CanonicalType)->getDecl(); if (Def) *Def = Interface; return !Interface->hasDefinition(); } } } bool Type::isSizelessBuiltinType() const { if (const BuiltinType *BT = getAs()) { switch (BT->getKind()) { // SVE Types #define SVE_TYPE(Name, Id, SingletonId) case BuiltinType::Id: #include "clang/Basic/AArch64SVEACLETypes.def" return true; default: return false; } } return false; } bool Type::isSizelessType() const { return isSizelessBuiltinType(); } bool QualType::isPODType(const ASTContext &Context) const { // C++11 has a more relaxed definition of POD. if (Context.getLangOpts().CPlusPlus11) return isCXX11PODType(Context); return isCXX98PODType(Context); } bool QualType::isCXX98PODType(const ASTContext &Context) const { // The compiler shouldn't query this for incomplete types, but the user might. // We return false for that case. Except for incomplete arrays of PODs, which // are PODs according to the standard. if (isNull()) return false; if ((*this)->isIncompleteArrayType()) return Context.getBaseElementType(*this).isCXX98PODType(Context); if ((*this)->isIncompleteType()) return false; if (hasNonTrivialObjCLifetime()) return false; QualType CanonicalType = getTypePtr()->CanonicalType; switch (CanonicalType->getTypeClass()) { // Everything not explicitly mentioned is not POD. default: return false; case Type::VariableArray: case Type::ConstantArray: // IncompleteArray is handled above. return Context.getBaseElementType(*this).isCXX98PODType(Context); case Type::ObjCObjectPointer: case Type::BlockPointer: case Type::Builtin: case Type::Complex: case Type::Pointer: case Type::MemberPointer: case Type::Vector: case Type::ExtVector: case Type::ExtInt: return true; case Type::Enum: return true; case Type::Record: if (const auto *ClassDecl = dyn_cast(cast(CanonicalType)->getDecl())) return ClassDecl->isPOD(); // C struct/union is POD. return true; } } bool QualType::isTrivialType(const ASTContext &Context) const { // The compiler shouldn't query this for incomplete types, but the user might. // We return false for that case. Except for incomplete arrays of PODs, which // are PODs according to the standard. if (isNull()) return false; if ((*this)->isArrayType()) return Context.getBaseElementType(*this).isTrivialType(Context); if ((*this)->isSizelessBuiltinType()) return true; // Return false for incomplete types after skipping any incomplete array // types which are expressly allowed by the standard and thus our API. if ((*this)->isIncompleteType()) return false; if (hasNonTrivialObjCLifetime()) return false; QualType CanonicalType = getTypePtr()->CanonicalType; if (CanonicalType->isDependentType()) return false; // C++0x [basic.types]p9: // Scalar types, trivial class types, arrays of such types, and // cv-qualified versions of these types are collectively called trivial // types. // As an extension, Clang treats vector types as Scalar types. if (CanonicalType->isScalarType() || CanonicalType->isVectorType()) return true; if (const auto *RT = CanonicalType->getAs()) { if (const auto *ClassDecl = dyn_cast(RT->getDecl())) { // C++11 [class]p6: // A trivial class is a class that has a default constructor, // has no non-trivial default constructors, and is trivially // copyable. return ClassDecl->hasDefaultConstructor() && !ClassDecl->hasNonTrivialDefaultConstructor() && ClassDecl->isTriviallyCopyable(); } return true; } // No other types can match. return false; } bool QualType::isTriviallyCopyableType(const ASTContext &Context) const { if ((*this)->isArrayType()) return Context.getBaseElementType(*this).isTriviallyCopyableType(Context); if (hasNonTrivialObjCLifetime()) return false; // C++11 [basic.types]p9 - See Core 2094 // Scalar types, trivially copyable class types, arrays of such types, and // cv-qualified versions of these types are collectively // called trivially copyable types. QualType CanonicalType = getCanonicalType(); if (CanonicalType->isDependentType()) return false; if (CanonicalType->isSizelessBuiltinType()) return true; // Return false for incomplete types after skipping any incomplete array types // which are expressly allowed by the standard and thus our API. if (CanonicalType->isIncompleteType()) return false; // As an extension, Clang treats vector types as Scalar types. if (CanonicalType->isScalarType() || CanonicalType->isVectorType()) return true; if (const auto *RT = CanonicalType->getAs()) { if (const auto *ClassDecl = dyn_cast(RT->getDecl())) { if (!ClassDecl->isTriviallyCopyable()) return false; } return true; } // No other types can match. return false; } bool QualType::isNonWeakInMRRWithObjCWeak(const ASTContext &Context) const { return !Context.getLangOpts().ObjCAutoRefCount && Context.getLangOpts().ObjCWeak && getObjCLifetime() != Qualifiers::OCL_Weak; } bool QualType::hasNonTrivialToPrimitiveDefaultInitializeCUnion(const RecordDecl *RD) { return RD->hasNonTrivialToPrimitiveDefaultInitializeCUnion(); } bool QualType::hasNonTrivialToPrimitiveDestructCUnion(const RecordDecl *RD) { return RD->hasNonTrivialToPrimitiveDestructCUnion(); } bool QualType::hasNonTrivialToPrimitiveCopyCUnion(const RecordDecl *RD) { return RD->hasNonTrivialToPrimitiveCopyCUnion(); } QualType::PrimitiveDefaultInitializeKind QualType::isNonTrivialToPrimitiveDefaultInitialize() const { if (const auto *RT = getTypePtr()->getBaseElementTypeUnsafe()->getAs()) if (RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) return PDIK_Struct; switch (getQualifiers().getObjCLifetime()) { case Qualifiers::OCL_Strong: return PDIK_ARCStrong; case Qualifiers::OCL_Weak: return PDIK_ARCWeak; default: return PDIK_Trivial; } } QualType::PrimitiveCopyKind QualType::isNonTrivialToPrimitiveCopy() const { if (const auto *RT = getTypePtr()->getBaseElementTypeUnsafe()->getAs()) if (RT->getDecl()->isNonTrivialToPrimitiveCopy()) return PCK_Struct; Qualifiers Qs = getQualifiers(); switch (Qs.getObjCLifetime()) { case Qualifiers::OCL_Strong: return PCK_ARCStrong; case Qualifiers::OCL_Weak: return PCK_ARCWeak; default: return Qs.hasVolatile() ? PCK_VolatileTrivial : PCK_Trivial; } } QualType::PrimitiveCopyKind QualType::isNonTrivialToPrimitiveDestructiveMove() const { return isNonTrivialToPrimitiveCopy(); } bool Type::isLiteralType(const ASTContext &Ctx) const { if (isDependentType()) return false; // C++1y [basic.types]p10: // A type is a literal type if it is: // -- cv void; or if (Ctx.getLangOpts().CPlusPlus14 && isVoidType()) return true; // C++11 [basic.types]p10: // A type is a literal type if it is: // [...] // -- an array of literal type other than an array of runtime bound; or if (isVariableArrayType()) return false; const Type *BaseTy = getBaseElementTypeUnsafe(); assert(BaseTy && "NULL element type"); // Return false for incomplete types after skipping any incomplete array // types; those are expressly allowed by the standard and thus our API. if (BaseTy->isIncompleteType()) return false; // C++11 [basic.types]p10: // A type is a literal type if it is: // -- a scalar type; or // As an extension, Clang treats vector types and complex types as // literal types. if (BaseTy->isScalarType() || BaseTy->isVectorType() || BaseTy->isAnyComplexType()) return true; // -- a reference type; or if (BaseTy->isReferenceType()) return true; // -- a class type that has all of the following properties: if (const auto *RT = BaseTy->getAs()) { // -- a trivial destructor, // -- every constructor call and full-expression in the // brace-or-equal-initializers for non-static data members (if any) // is a constant expression, // -- it is an aggregate type or has at least one constexpr // constructor or constructor template that is not a copy or move // constructor, and // -- all non-static data members and base classes of literal types // // We resolve DR1361 by ignoring the second bullet. if (const auto *ClassDecl = dyn_cast(RT->getDecl())) return ClassDecl->isLiteral(); return true; } // We treat _Atomic T as a literal type if T is a literal type. if (const auto *AT = BaseTy->getAs()) return AT->getValueType()->isLiteralType(Ctx); // If this type hasn't been deduced yet, then conservatively assume that // it'll work out to be a literal type. if (isa(BaseTy->getCanonicalTypeInternal())) return true; return false; } bool Type::isStandardLayoutType() const { if (isDependentType()) return false; // C++0x [basic.types]p9: // Scalar types, standard-layout class types, arrays of such types, and // cv-qualified versions of these types are collectively called // standard-layout types. const Type *BaseTy = getBaseElementTypeUnsafe(); assert(BaseTy && "NULL element type"); // Return false for incomplete types after skipping any incomplete array // types which are expressly allowed by the standard and thus our API. if (BaseTy->isIncompleteType()) return false; // As an extension, Clang treats vector types as Scalar types. if (BaseTy->isScalarType() || BaseTy->isVectorType()) return true; if (const auto *RT = BaseTy->getAs()) { if (const auto *ClassDecl = dyn_cast(RT->getDecl())) if (!ClassDecl->isStandardLayout()) return false; // Default to 'true' for non-C++ class types. // FIXME: This is a bit dubious, but plain C structs should trivially meet // all the requirements of standard layout classes. return true; } // No other types can match. return false; } // This is effectively the intersection of isTrivialType and // isStandardLayoutType. We implement it directly to avoid redundant // conversions from a type to a CXXRecordDecl. bool QualType::isCXX11PODType(const ASTContext &Context) const { const Type *ty = getTypePtr(); if (ty->isDependentType()) return false; if (hasNonTrivialObjCLifetime()) return false; // C++11 [basic.types]p9: // Scalar types, POD classes, arrays of such types, and cv-qualified // versions of these types are collectively called trivial types. const Type *BaseTy = ty->getBaseElementTypeUnsafe(); assert(BaseTy && "NULL element type"); if (BaseTy->isSizelessBuiltinType()) return true; // Return false for incomplete types after skipping any incomplete array // types which are expressly allowed by the standard and thus our API. if (BaseTy->isIncompleteType()) return false; // As an extension, Clang treats vector types as Scalar types. if (BaseTy->isScalarType() || BaseTy->isVectorType()) return true; if (const auto *RT = BaseTy->getAs()) { if (const auto *ClassDecl = dyn_cast(RT->getDecl())) { // C++11 [class]p10: // A POD struct is a non-union class that is both a trivial class [...] if (!ClassDecl->isTrivial()) return false; // C++11 [class]p10: // A POD struct is a non-union class that is both a trivial class and // a standard-layout class [...] if (!ClassDecl->isStandardLayout()) return false; // C++11 [class]p10: // A POD struct is a non-union class that is both a trivial class and // a standard-layout class, and has no non-static data members of type // non-POD struct, non-POD union (or array of such types). [...] // // We don't directly query the recursive aspect as the requirements for // both standard-layout classes and trivial classes apply recursively // already. } return true; } // No other types can match. return false; } bool Type::isNothrowT() const { if (const auto *RD = getAsCXXRecordDecl()) { IdentifierInfo *II = RD->getIdentifier(); if (II && II->isStr("nothrow_t") && RD->isInStdNamespace()) return true; } return false; } bool Type::isAlignValT() const { if (const auto *ET = getAs()) { IdentifierInfo *II = ET->getDecl()->getIdentifier(); if (II && II->isStr("align_val_t") && ET->getDecl()->isInStdNamespace()) return true; } return false; } bool Type::isStdByteType() const { if (const auto *ET = getAs()) { IdentifierInfo *II = ET->getDecl()->getIdentifier(); if (II && II->isStr("byte") && ET->getDecl()->isInStdNamespace()) return true; } return false; } bool Type::isPromotableIntegerType() const { if (const auto *BT = getAs()) switch (BT->getKind()) { case BuiltinType::Bool: case BuiltinType::Char_S: case BuiltinType::Char_U: case BuiltinType::SChar: case BuiltinType::UChar: case BuiltinType::Short: case BuiltinType::UShort: case BuiltinType::WChar_S: case BuiltinType::WChar_U: case BuiltinType::Char8: case BuiltinType::Char16: case BuiltinType::Char32: return true; default: return false; } // Enumerated types are promotable to their compatible integer types // (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2). if (const auto *ET = getAs()){ if (this->isDependentType() || ET->getDecl()->getPromotionType().isNull() || ET->getDecl()->isScoped()) return false; return true; } return false; } bool Type::isSpecifierType() const { // Note that this intentionally does not use the canonical type. switch (getTypeClass()) { case Builtin: case Record: case Enum: case Typedef: case Complex: case TypeOfExpr: case TypeOf: case TemplateTypeParm: case SubstTemplateTypeParm: case TemplateSpecialization: case Elaborated: case DependentName: case DependentTemplateSpecialization: case ObjCInterface: case ObjCObject: case ObjCObjectPointer: // FIXME: object pointers aren't really specifiers return true; default: return false; } } ElaboratedTypeKeyword TypeWithKeyword::getKeywordForTypeSpec(unsigned TypeSpec) { switch (TypeSpec) { default: return ETK_None; case TST_typename: return ETK_Typename; case TST_class: return ETK_Class; case TST_struct: return ETK_Struct; case TST_interface: return ETK_Interface; case TST_union: return ETK_Union; case TST_enum: return ETK_Enum; } } TagTypeKind TypeWithKeyword::getTagTypeKindForTypeSpec(unsigned TypeSpec) { switch(TypeSpec) { case TST_class: return TTK_Class; case TST_struct: return TTK_Struct; case TST_interface: return TTK_Interface; case TST_union: return TTK_Union; case TST_enum: return TTK_Enum; } llvm_unreachable("Type specifier is not a tag type kind."); } ElaboratedTypeKeyword TypeWithKeyword::getKeywordForTagTypeKind(TagTypeKind Kind) { switch (Kind) { case TTK_Class: return ETK_Class; case TTK_Struct: return ETK_Struct; case TTK_Interface: return ETK_Interface; case TTK_Union: return ETK_Union; case TTK_Enum: return ETK_Enum; } llvm_unreachable("Unknown tag type kind."); } TagTypeKind TypeWithKeyword::getTagTypeKindForKeyword(ElaboratedTypeKeyword Keyword) { switch (Keyword) { case ETK_Class: return TTK_Class; case ETK_Struct: return TTK_Struct; case ETK_Interface: return TTK_Interface; case ETK_Union: return TTK_Union; case ETK_Enum: return TTK_Enum; case ETK_None: // Fall through. case ETK_Typename: llvm_unreachable("Elaborated type keyword is not a tag type kind."); } llvm_unreachable("Unknown elaborated type keyword."); } bool TypeWithKeyword::KeywordIsTagTypeKind(ElaboratedTypeKeyword Keyword) { switch (Keyword) { case ETK_None: case ETK_Typename: return false; case ETK_Class: case ETK_Struct: case ETK_Interface: case ETK_Union: case ETK_Enum: return true; } llvm_unreachable("Unknown elaborated type keyword."); } StringRef TypeWithKeyword::getKeywordName(ElaboratedTypeKeyword Keyword) { switch (Keyword) { case ETK_None: return {}; case ETK_Typename: return "typename"; case ETK_Class: return "class"; case ETK_Struct: return "struct"; case ETK_Interface: return "__interface"; case ETK_Union: return "union"; case ETK_Enum: return "enum"; } llvm_unreachable("Unknown elaborated type keyword."); } DependentTemplateSpecializationType::DependentTemplateSpecializationType( ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS, const IdentifierInfo *Name, ArrayRef Args, QualType Canon) : TypeWithKeyword(Keyword, DependentTemplateSpecialization, Canon, TypeDependence::DependentInstantiation | (NNS ? toTypeDependence(NNS->getDependence()) : TypeDependence::None)), NNS(NNS), Name(Name) { DependentTemplateSpecializationTypeBits.NumArgs = Args.size(); assert((!NNS || NNS->isDependent()) && "DependentTemplateSpecializatonType requires dependent qualifier"); TemplateArgument *ArgBuffer = getArgBuffer(); for (const TemplateArgument &Arg : Args) { addDependence(toTypeDependence(Arg.getDependence() & TemplateArgumentDependence::UnexpandedPack)); new (ArgBuffer++) TemplateArgument(Arg); } } void DependentTemplateSpecializationType::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, ElaboratedTypeKeyword Keyword, NestedNameSpecifier *Qualifier, const IdentifierInfo *Name, ArrayRef Args) { ID.AddInteger(Keyword); ID.AddPointer(Qualifier); ID.AddPointer(Name); for (const TemplateArgument &Arg : Args) Arg.Profile(ID, Context); } bool Type::isElaboratedTypeSpecifier() const { ElaboratedTypeKeyword Keyword; if (const auto *Elab = dyn_cast(this)) Keyword = Elab->getKeyword(); else if (const auto *DepName = dyn_cast(this)) Keyword = DepName->getKeyword(); else if (const auto *DepTST = dyn_cast(this)) Keyword = DepTST->getKeyword(); else return false; return TypeWithKeyword::KeywordIsTagTypeKind(Keyword); } const char *Type::getTypeClassName() const { switch (TypeBits.TC) { #define ABSTRACT_TYPE(Derived, Base) #define TYPE(Derived, Base) case Derived: return #Derived; #include "clang/AST/TypeNodes.inc" } llvm_unreachable("Invalid type class."); } StringRef BuiltinType::getName(const PrintingPolicy &Policy) const { switch (getKind()) { case Void: return "void"; case Bool: return Policy.Bool ? "bool" : "_Bool"; case Char_S: return "char"; case Char_U: return "char"; case SChar: return "signed char"; case Short: return "short"; case Int: return "int"; case Long: return "long"; case LongLong: return "long long"; case Int128: return "__int128"; case UChar: return "unsigned char"; case UShort: return "unsigned short"; case UInt: return "unsigned int"; case ULong: return "unsigned long"; case ULongLong: return "unsigned long long"; case UInt128: return "unsigned __int128"; case Half: return Policy.Half ? "half" : "__fp16"; case BFloat16: return "__bf16"; case Float: return "float"; case Double: return "double"; case LongDouble: return "long double"; case ShortAccum: return "short _Accum"; case Accum: return "_Accum"; case LongAccum: return "long _Accum"; case UShortAccum: return "unsigned short _Accum"; case UAccum: return "unsigned _Accum"; case ULongAccum: return "unsigned long _Accum"; case BuiltinType::ShortFract: return "short _Fract"; case BuiltinType::Fract: return "_Fract"; case BuiltinType::LongFract: return "long _Fract"; case BuiltinType::UShortFract: return "unsigned short _Fract"; case BuiltinType::UFract: return "unsigned _Fract"; case BuiltinType::ULongFract: return "unsigned long _Fract"; case BuiltinType::SatShortAccum: return "_Sat short _Accum"; case BuiltinType::SatAccum: return "_Sat _Accum"; case BuiltinType::SatLongAccum: return "_Sat long _Accum"; case BuiltinType::SatUShortAccum: return "_Sat unsigned short _Accum"; case BuiltinType::SatUAccum: return "_Sat unsigned _Accum"; case BuiltinType::SatULongAccum: return "_Sat unsigned long _Accum"; case BuiltinType::SatShortFract: return "_Sat short _Fract"; case BuiltinType::SatFract: return "_Sat _Fract"; case BuiltinType::SatLongFract: return "_Sat long _Fract"; case BuiltinType::SatUShortFract: return "_Sat unsigned short _Fract"; case BuiltinType::SatUFract: return "_Sat unsigned _Fract"; case BuiltinType::SatULongFract: return "_Sat unsigned long _Fract"; case Float16: return "_Float16"; case Float128: return "__float128"; case WChar_S: case WChar_U: return Policy.MSWChar ? "__wchar_t" : "wchar_t"; case Char8: return "char8_t"; case Char16: return "char16_t"; case Char32: return "char32_t"; case NullPtr: return "nullptr_t"; case Overload: return ""; case BoundMember: return ""; case PseudoObject: return ""; case Dependent: return ""; case UnknownAny: return ""; case ARCUnbridgedCast: return ""; case BuiltinFn: return ""; case ObjCId: return "id"; case ObjCClass: return "Class"; case ObjCSel: return "SEL"; #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ case Id: \ return "__" #Access " " #ImgType "_t"; #include "clang/Basic/OpenCLImageTypes.def" case OCLSampler: return "sampler_t"; case OCLEvent: return "event_t"; case OCLClkEvent: return "clk_event_t"; case OCLQueue: return "queue_t"; case OCLReserveID: return "reserve_id_t"; case IncompleteMatrixIdx: return ""; case OMPArraySection: return ""; case OMPArrayShaping: return ""; case OMPIterator: return ""; #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ case Id: \ return #ExtType; #include "clang/Basic/OpenCLExtensionTypes.def" #define SVE_TYPE(Name, Id, SingletonId) \ case Id: \ return Name; #include "clang/Basic/AArch64SVEACLETypes.def" } llvm_unreachable("Invalid builtin type."); } QualType QualType::getNonPackExpansionType() const { // We never wrap type sugar around a PackExpansionType. if (auto *PET = dyn_cast(getTypePtr())) return PET->getPattern(); return *this; } QualType QualType::getNonLValueExprType(const ASTContext &Context) const { if (const auto *RefType = getTypePtr()->getAs()) return RefType->getPointeeType(); // C++0x [basic.lval]: // Class prvalues can have cv-qualified types; non-class prvalues always // have cv-unqualified types. // // See also C99 6.3.2.1p2. if (!Context.getLangOpts().CPlusPlus || (!getTypePtr()->isDependentType() && !getTypePtr()->isRecordType())) return getUnqualifiedType(); return *this; } StringRef FunctionType::getNameForCallConv(CallingConv CC) { switch (CC) { case CC_C: return "cdecl"; case CC_X86StdCall: return "stdcall"; case CC_X86FastCall: return "fastcall"; case CC_X86ThisCall: return "thiscall"; case CC_X86Pascal: return "pascal"; case CC_X86VectorCall: return "vectorcall"; case CC_Win64: return "ms_abi"; case CC_X86_64SysV: return "sysv_abi"; case CC_X86RegCall : return "regcall"; case CC_AAPCS: return "aapcs"; case CC_AAPCS_VFP: return "aapcs-vfp"; case CC_AArch64VectorCall: return "aarch64_vector_pcs"; case CC_IntelOclBicc: return "intel_ocl_bicc"; case CC_SpirFunction: return "spir_function"; case CC_OpenCLKernel: return "opencl_kernel"; case CC_Swift: return "swiftcall"; case CC_PreserveMost: return "preserve_most"; case CC_PreserveAll: return "preserve_all"; } llvm_unreachable("Invalid calling convention."); } FunctionProtoType::FunctionProtoType(QualType result, ArrayRef params, QualType canonical, const ExtProtoInfo &epi) : FunctionType(FunctionProto, result, canonical, result->getDependence(), epi.ExtInfo) { FunctionTypeBits.FastTypeQuals = epi.TypeQuals.getFastQualifiers(); FunctionTypeBits.RefQualifier = epi.RefQualifier; FunctionTypeBits.NumParams = params.size(); assert(getNumParams() == params.size() && "NumParams overflow!"); FunctionTypeBits.ExceptionSpecType = epi.ExceptionSpec.Type; FunctionTypeBits.HasExtParameterInfos = !!epi.ExtParameterInfos; FunctionTypeBits.Variadic = epi.Variadic; FunctionTypeBits.HasTrailingReturn = epi.HasTrailingReturn; // Fill in the extra trailing bitfields if present. if (hasExtraBitfields(epi.ExceptionSpec.Type)) { auto &ExtraBits = *getTrailingObjects(); ExtraBits.NumExceptionType = epi.ExceptionSpec.Exceptions.size(); } // Fill in the trailing argument array. auto *argSlot = getTrailingObjects(); for (unsigned i = 0; i != getNumParams(); ++i) { addDependence(params[i]->getDependence() & ~TypeDependence::VariablyModified); argSlot[i] = params[i]; } // Fill in the exception type array if present. if (getExceptionSpecType() == EST_Dynamic) { assert(hasExtraBitfields() && "missing trailing extra bitfields!"); auto *exnSlot = reinterpret_cast(getTrailingObjects()); unsigned I = 0; for (QualType ExceptionType : epi.ExceptionSpec.Exceptions) { // Note that, before C++17, a dependent exception specification does // *not* make a type dependent; it's not even part of the C++ type // system. addDependence( ExceptionType->getDependence() & (TypeDependence::Instantiation | TypeDependence::UnexpandedPack)); exnSlot[I++] = ExceptionType; } } // Fill in the Expr * in the exception specification if present. else if (isComputedNoexcept(getExceptionSpecType())) { assert(epi.ExceptionSpec.NoexceptExpr && "computed noexcept with no expr"); assert((getExceptionSpecType() == EST_DependentNoexcept) == epi.ExceptionSpec.NoexceptExpr->isValueDependent()); // Store the noexcept expression and context. *getTrailingObjects() = epi.ExceptionSpec.NoexceptExpr; addDependence( toTypeDependence(epi.ExceptionSpec.NoexceptExpr->getDependence()) & (TypeDependence::Instantiation | TypeDependence::UnexpandedPack)); } // Fill in the FunctionDecl * in the exception specification if present. else if (getExceptionSpecType() == EST_Uninstantiated) { // Store the function decl from which we will resolve our // exception specification. auto **slot = getTrailingObjects(); slot[0] = epi.ExceptionSpec.SourceDecl; slot[1] = epi.ExceptionSpec.SourceTemplate; // This exception specification doesn't make the type dependent, because // it's not instantiated as part of instantiating the type. } else if (getExceptionSpecType() == EST_Unevaluated) { // Store the function decl from which we will resolve our // exception specification. auto **slot = getTrailingObjects(); slot[0] = epi.ExceptionSpec.SourceDecl; } // If this is a canonical type, and its exception specification is dependent, // then it's a dependent type. This only happens in C++17 onwards. if (isCanonicalUnqualified()) { if (getExceptionSpecType() == EST_Dynamic || getExceptionSpecType() == EST_DependentNoexcept) { assert(hasDependentExceptionSpec() && "type should not be canonical"); addDependence(TypeDependence::DependentInstantiation); } } else if (getCanonicalTypeInternal()->isDependentType()) { // Ask our canonical type whether our exception specification was dependent. addDependence(TypeDependence::DependentInstantiation); } // Fill in the extra parameter info if present. if (epi.ExtParameterInfos) { auto *extParamInfos = getTrailingObjects(); for (unsigned i = 0; i != getNumParams(); ++i) extParamInfos[i] = epi.ExtParameterInfos[i]; } if (epi.TypeQuals.hasNonFastQualifiers()) { FunctionTypeBits.HasExtQuals = 1; *getTrailingObjects() = epi.TypeQuals; } else { FunctionTypeBits.HasExtQuals = 0; } // Fill in the Ellipsis location info if present. if (epi.Variadic) { auto &EllipsisLoc = *getTrailingObjects(); EllipsisLoc = epi.EllipsisLoc; } } bool FunctionProtoType::hasDependentExceptionSpec() const { if (Expr *NE = getNoexceptExpr()) return NE->isValueDependent(); for (QualType ET : exceptions()) // A pack expansion with a non-dependent pattern is still dependent, // because we don't know whether the pattern is in the exception spec // or not (that depends on whether the pack has 0 expansions). if (ET->isDependentType() || ET->getAs()) return true; return false; } bool FunctionProtoType::hasInstantiationDependentExceptionSpec() const { if (Expr *NE = getNoexceptExpr()) return NE->isInstantiationDependent(); for (QualType ET : exceptions()) if (ET->isInstantiationDependentType()) return true; return false; } CanThrowResult FunctionProtoType::canThrow() const { switch (getExceptionSpecType()) { case EST_Unparsed: case EST_Unevaluated: case EST_Uninstantiated: llvm_unreachable("should not call this with unresolved exception specs"); case EST_DynamicNone: case EST_BasicNoexcept: case EST_NoexceptTrue: case EST_NoThrow: return CT_Cannot; case EST_None: case EST_MSAny: case EST_NoexceptFalse: return CT_Can; case EST_Dynamic: // A dynamic exception specification is throwing unless every exception // type is an (unexpanded) pack expansion type. for (unsigned I = 0; I != getNumExceptions(); ++I) if (!getExceptionType(I)->getAs()) return CT_Can; return CT_Dependent; case EST_DependentNoexcept: return CT_Dependent; } llvm_unreachable("unexpected exception specification kind"); } bool FunctionProtoType::isTemplateVariadic() const { for (unsigned ArgIdx = getNumParams(); ArgIdx; --ArgIdx) if (isa(getParamType(ArgIdx - 1))) return true; return false; } void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, QualType Result, const QualType *ArgTys, unsigned NumParams, const ExtProtoInfo &epi, const ASTContext &Context, bool Canonical) { // We have to be careful not to get ambiguous profile encodings. // Note that valid type pointers are never ambiguous with anything else. // // The encoding grammar begins: // type type* bool int bool // If that final bool is true, then there is a section for the EH spec: // bool type* // This is followed by an optional "consumed argument" section of the // same length as the first type sequence: // bool* // Finally, we have the ext info and trailing return type flag: // int bool // // There is no ambiguity between the consumed arguments and an empty EH // spec because of the leading 'bool' which unambiguously indicates // whether the following bool is the EH spec or part of the arguments. ID.AddPointer(Result.getAsOpaquePtr()); for (unsigned i = 0; i != NumParams; ++i) ID.AddPointer(ArgTys[i].getAsOpaquePtr()); // This method is relatively performance sensitive, so as a performance // shortcut, use one AddInteger call instead of four for the next four // fields. assert(!(unsigned(epi.Variadic) & ~1) && !(unsigned(epi.RefQualifier) & ~3) && !(unsigned(epi.ExceptionSpec.Type) & ~15) && "Values larger than expected."); ID.AddInteger(unsigned(epi.Variadic) + (epi.RefQualifier << 1) + (epi.ExceptionSpec.Type << 3)); ID.Add(epi.TypeQuals); if (epi.ExceptionSpec.Type == EST_Dynamic) { for (QualType Ex : epi.ExceptionSpec.Exceptions) ID.AddPointer(Ex.getAsOpaquePtr()); } else if (isComputedNoexcept(epi.ExceptionSpec.Type)) { epi.ExceptionSpec.NoexceptExpr->Profile(ID, Context, Canonical); } else if (epi.ExceptionSpec.Type == EST_Uninstantiated || epi.ExceptionSpec.Type == EST_Unevaluated) { ID.AddPointer(epi.ExceptionSpec.SourceDecl->getCanonicalDecl()); } if (epi.ExtParameterInfos) { for (unsigned i = 0; i != NumParams; ++i) ID.AddInteger(epi.ExtParameterInfos[i].getOpaqueValue()); } epi.ExtInfo.Profile(ID); ID.AddBoolean(epi.HasTrailingReturn); } void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Ctx) { Profile(ID, getReturnType(), param_type_begin(), getNumParams(), getExtProtoInfo(), Ctx, isCanonicalUnqualified()); } +TypedefType::TypedefType(TypeClass tc, const TypedefNameDecl *D, QualType can) + : Type(tc, can, D->getUnderlyingType()->getDependence()), + Decl(const_cast(D)) { + assert(!isa(can) && "Invalid canonical type"); +} + QualType TypedefType::desugar() const { return getDecl()->getUnderlyingType(); } QualType MacroQualifiedType::desugar() const { return getUnderlyingType(); } QualType MacroQualifiedType::getModifiedType() const { // Step over MacroQualifiedTypes from the same macro to find the type // ultimately qualified by the macro qualifier. QualType Inner = cast(getUnderlyingType())->getModifiedType(); while (auto *InnerMQT = dyn_cast(Inner)) { if (InnerMQT->getMacroIdentifier() != getMacroIdentifier()) break; Inner = InnerMQT->getModifiedType(); } return Inner; } TypeOfExprType::TypeOfExprType(Expr *E, QualType can) : Type(TypeOfExpr, can, toTypeDependence(E->getDependence()) | (E->getType()->getDependence() & TypeDependence::VariablyModified)), TOExpr(E) {} bool TypeOfExprType::isSugared() const { return !TOExpr->isTypeDependent(); } QualType TypeOfExprType::desugar() const { if (isSugared()) return getUnderlyingExpr()->getType(); return QualType(this, 0); } void DependentTypeOfExprType::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, Expr *E) { E->Profile(ID, Context, true); } DecltypeType::DecltypeType(Expr *E, QualType underlyingType, QualType can) // C++11 [temp.type]p2: "If an expression e involves a template parameter, // decltype(e) denotes a unique dependent type." Hence a decltype type is // type-dependent even if its expression is only instantiation-dependent. : Type(Decltype, can, toTypeDependence(E->getDependence()) | (E->isInstantiationDependent() ? TypeDependence::Dependent : TypeDependence::None) | (E->getType()->getDependence() & TypeDependence::VariablyModified)), E(E), UnderlyingType(underlyingType) {} bool DecltypeType::isSugared() const { return !E->isInstantiationDependent(); } QualType DecltypeType::desugar() const { if (isSugared()) return getUnderlyingType(); return QualType(this, 0); } DependentDecltypeType::DependentDecltypeType(const ASTContext &Context, Expr *E) : DecltypeType(E, Context.DependentTy), Context(Context) {} void DependentDecltypeType::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, Expr *E) { E->Profile(ID, Context, true); } UnaryTransformType::UnaryTransformType(QualType BaseType, QualType UnderlyingType, UTTKind UKind, QualType CanonicalType) : Type(UnaryTransform, CanonicalType, BaseType->getDependence()), BaseType(BaseType), UnderlyingType(UnderlyingType), UKind(UKind) {} DependentUnaryTransformType::DependentUnaryTransformType(const ASTContext &C, QualType BaseType, UTTKind UKind) : UnaryTransformType(BaseType, C.DependentTy, UKind, QualType()) {} TagType::TagType(TypeClass TC, const TagDecl *D, QualType can) : Type(TC, can, D->isDependentType() ? TypeDependence::DependentInstantiation : TypeDependence::None), decl(const_cast(D)) {} static TagDecl *getInterestingTagDecl(TagDecl *decl) { for (auto I : decl->redecls()) { if (I->isCompleteDefinition() || I->isBeingDefined()) return I; } // If there's no definition (not even in progress), return what we have. return decl; } TagDecl *TagType::getDecl() const { return getInterestingTagDecl(decl); } bool TagType::isBeingDefined() const { return getDecl()->isBeingDefined(); } bool RecordType::hasConstFields() const { std::vector RecordTypeList; RecordTypeList.push_back(this); unsigned NextToCheckIndex = 0; while (RecordTypeList.size() > NextToCheckIndex) { for (FieldDecl *FD : RecordTypeList[NextToCheckIndex]->getDecl()->fields()) { QualType FieldTy = FD->getType(); if (FieldTy.isConstQualified()) return true; FieldTy = FieldTy.getCanonicalType(); if (const auto *FieldRecTy = FieldTy->getAs()) { if (llvm::find(RecordTypeList, FieldRecTy) == RecordTypeList.end()) RecordTypeList.push_back(FieldRecTy); } } ++NextToCheckIndex; } return false; } bool AttributedType::isQualifier() const { // FIXME: Generate this with TableGen. switch (getAttrKind()) { // These are type qualifiers in the traditional C sense: they annotate // something about a specific value/variable of a type. (They aren't // always part of the canonical type, though.) case attr::ObjCGC: case attr::ObjCOwnership: case attr::ObjCInertUnsafeUnretained: case attr::TypeNonNull: case attr::TypeNullable: case attr::TypeNullUnspecified: case attr::LifetimeBound: case attr::AddressSpace: return true; // All other type attributes aren't qualifiers; they rewrite the modified // type to be a semantically different type. default: return false; } } bool AttributedType::isMSTypeSpec() const { // FIXME: Generate this with TableGen? switch (getAttrKind()) { default: return false; case attr::Ptr32: case attr::Ptr64: case attr::SPtr: case attr::UPtr: return true; } llvm_unreachable("invalid attr kind"); } bool AttributedType::isCallingConv() const { // FIXME: Generate this with TableGen. switch (getAttrKind()) { default: return false; case attr::Pcs: case attr::CDecl: case attr::FastCall: case attr::StdCall: case attr::ThisCall: case attr::RegCall: case attr::SwiftCall: case attr::VectorCall: case attr::AArch64VectorPcs: case attr::Pascal: case attr::MSABI: case attr::SysVABI: case attr::IntelOclBicc: case attr::PreserveMost: case attr::PreserveAll: return true; } llvm_unreachable("invalid attr kind"); } CXXRecordDecl *InjectedClassNameType::getDecl() const { return cast(getInterestingTagDecl(Decl)); } IdentifierInfo *TemplateTypeParmType::getIdentifier() const { return isCanonicalUnqualified() ? nullptr : getDecl()->getIdentifier(); } SubstTemplateTypeParmPackType::SubstTemplateTypeParmPackType( const TemplateTypeParmType *Param, QualType Canon, const TemplateArgument &ArgPack) : Type(SubstTemplateTypeParmPack, Canon, TypeDependence::DependentInstantiation | TypeDependence::UnexpandedPack), Replaced(Param), Arguments(ArgPack.pack_begin()) { SubstTemplateTypeParmPackTypeBits.NumArgs = ArgPack.pack_size(); } TemplateArgument SubstTemplateTypeParmPackType::getArgumentPack() const { return TemplateArgument(llvm::makeArrayRef(Arguments, getNumArgs())); } void SubstTemplateTypeParmPackType::Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, getReplacedParameter(), getArgumentPack()); } void SubstTemplateTypeParmPackType::Profile(llvm::FoldingSetNodeID &ID, const TemplateTypeParmType *Replaced, const TemplateArgument &ArgPack) { ID.AddPointer(Replaced); ID.AddInteger(ArgPack.pack_size()); for (const auto &P : ArgPack.pack_elements()) ID.AddPointer(P.getAsType().getAsOpaquePtr()); } bool TemplateSpecializationType:: anyDependentTemplateArguments(const TemplateArgumentListInfo &Args, bool &InstantiationDependent) { return anyDependentTemplateArguments(Args.arguments(), InstantiationDependent); } bool TemplateSpecializationType:: anyDependentTemplateArguments(ArrayRef Args, bool &InstantiationDependent) { for (const TemplateArgumentLoc &ArgLoc : Args) { if (ArgLoc.getArgument().isDependent()) { InstantiationDependent = true; return true; } if (ArgLoc.getArgument().isInstantiationDependent()) InstantiationDependent = true; } return false; } TemplateSpecializationType::TemplateSpecializationType( TemplateName T, ArrayRef Args, QualType Canon, QualType AliasedType) : Type(TemplateSpecialization, Canon.isNull() ? QualType(this, 0) : Canon, (Canon.isNull() ? TypeDependence::DependentInstantiation : Canon->getDependence() & ~(TypeDependence::VariablyModified | TypeDependence::UnexpandedPack)) | (toTypeDependence(T.getDependence()) & TypeDependence::UnexpandedPack)), Template(T) { TemplateSpecializationTypeBits.NumArgs = Args.size(); TemplateSpecializationTypeBits.TypeAlias = !AliasedType.isNull(); assert(!T.getAsDependentTemplateName() && "Use DependentTemplateSpecializationType for dependent template-name"); assert((T.getKind() == TemplateName::Template || T.getKind() == TemplateName::SubstTemplateTemplateParm || T.getKind() == TemplateName::SubstTemplateTemplateParmPack) && "Unexpected template name for TemplateSpecializationType"); auto *TemplateArgs = reinterpret_cast(this + 1); for (const TemplateArgument &Arg : Args) { // Update instantiation-dependent, variably-modified, and error bits. // If the canonical type exists and is non-dependent, the template // specialization type can be non-dependent even if one of the type // arguments is. Given: // template using U = int; // U is always non-dependent, irrespective of the type T. // However, U contains an unexpanded parameter pack, even though // its expansion (and thus its desugared type) doesn't. addDependence(toTypeDependence(Arg.getDependence()) & ~TypeDependence::Dependent); if (Arg.getKind() == TemplateArgument::Type) addDependence(Arg.getAsType()->getDependence() & TypeDependence::VariablyModified); new (TemplateArgs++) TemplateArgument(Arg); } // Store the aliased type if this is a type alias template specialization. if (isTypeAlias()) { auto *Begin = reinterpret_cast(this + 1); *reinterpret_cast(Begin + getNumArgs()) = AliasedType; } } void TemplateSpecializationType::Profile(llvm::FoldingSetNodeID &ID, TemplateName T, ArrayRef Args, const ASTContext &Context) { T.Profile(ID); for (const TemplateArgument &Arg : Args) Arg.Profile(ID, Context); } QualType QualifierCollector::apply(const ASTContext &Context, QualType QT) const { if (!hasNonFastQualifiers()) return QT.withFastQualifiers(getFastQualifiers()); return Context.getQualifiedType(QT, *this); } QualType QualifierCollector::apply(const ASTContext &Context, const Type *T) const { if (!hasNonFastQualifiers()) return QualType(T, getFastQualifiers()); return Context.getQualifiedType(T, *this); } void ObjCObjectTypeImpl::Profile(llvm::FoldingSetNodeID &ID, QualType BaseType, ArrayRef typeArgs, ArrayRef protocols, bool isKindOf) { ID.AddPointer(BaseType.getAsOpaquePtr()); ID.AddInteger(typeArgs.size()); for (auto typeArg : typeArgs) ID.AddPointer(typeArg.getAsOpaquePtr()); ID.AddInteger(protocols.size()); for (auto proto : protocols) ID.AddPointer(proto); ID.AddBoolean(isKindOf); } void ObjCObjectTypeImpl::Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, getBaseType(), getTypeArgsAsWritten(), llvm::makeArrayRef(qual_begin(), getNumProtocols()), isKindOfTypeAsWritten()); } void ObjCTypeParamType::Profile(llvm::FoldingSetNodeID &ID, const ObjCTypeParamDecl *OTPDecl, QualType CanonicalType, ArrayRef protocols) { ID.AddPointer(OTPDecl); ID.AddPointer(CanonicalType.getAsOpaquePtr()); ID.AddInteger(protocols.size()); for (auto proto : protocols) ID.AddPointer(proto); } void ObjCTypeParamType::Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, getDecl(), getCanonicalTypeInternal(), llvm::makeArrayRef(qual_begin(), getNumProtocols())); } namespace { /// The cached properties of a type. class CachedProperties { Linkage L; bool local; public: CachedProperties(Linkage L, bool local) : L(L), local(local) {} Linkage getLinkage() const { return L; } bool hasLocalOrUnnamedType() const { return local; } friend CachedProperties merge(CachedProperties L, CachedProperties R) { Linkage MergedLinkage = minLinkage(L.L, R.L); return CachedProperties(MergedLinkage, L.hasLocalOrUnnamedType() | R.hasLocalOrUnnamedType()); } }; } // namespace static CachedProperties computeCachedProperties(const Type *T); namespace clang { /// The type-property cache. This is templated so as to be /// instantiated at an internal type to prevent unnecessary symbol /// leakage. template class TypePropertyCache { public: static CachedProperties get(QualType T) { return get(T.getTypePtr()); } static CachedProperties get(const Type *T) { ensure(T); return CachedProperties(T->TypeBits.getLinkage(), T->TypeBits.hasLocalOrUnnamedType()); } static void ensure(const Type *T) { // If the cache is valid, we're okay. if (T->TypeBits.isCacheValid()) return; // If this type is non-canonical, ask its canonical type for the // relevant information. if (!T->isCanonicalUnqualified()) { const Type *CT = T->getCanonicalTypeInternal().getTypePtr(); ensure(CT); T->TypeBits.CacheValid = true; T->TypeBits.CachedLinkage = CT->TypeBits.CachedLinkage; T->TypeBits.CachedLocalOrUnnamed = CT->TypeBits.CachedLocalOrUnnamed; return; } // Compute the cached properties and then set the cache. CachedProperties Result = computeCachedProperties(T); T->TypeBits.CacheValid = true; T->TypeBits.CachedLinkage = Result.getLinkage(); T->TypeBits.CachedLocalOrUnnamed = Result.hasLocalOrUnnamedType(); } }; } // namespace clang // Instantiate the friend template at a private class. In a // reasonable implementation, these symbols will be internal. // It is terrible that this is the best way to accomplish this. namespace { class Private {}; } // namespace using Cache = TypePropertyCache; static CachedProperties computeCachedProperties(const Type *T) { switch (T->getTypeClass()) { #define TYPE(Class,Base) #define NON_CANONICAL_TYPE(Class,Base) case Type::Class: #include "clang/AST/TypeNodes.inc" llvm_unreachable("didn't expect a non-canonical type here"); #define TYPE(Class,Base) #define DEPENDENT_TYPE(Class,Base) case Type::Class: #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class,Base) case Type::Class: #include "clang/AST/TypeNodes.inc" // Treat instantiation-dependent types as external. if (!T->isInstantiationDependentType()) T->dump(); assert(T->isInstantiationDependentType()); return CachedProperties(ExternalLinkage, false); case Type::Auto: case Type::DeducedTemplateSpecialization: // Give non-deduced 'auto' types external linkage. We should only see them // here in error recovery. return CachedProperties(ExternalLinkage, false); case Type::ExtInt: case Type::Builtin: // C++ [basic.link]p8: // A type is said to have linkage if and only if: // - it is a fundamental type (3.9.1); or return CachedProperties(ExternalLinkage, false); case Type::Record: case Type::Enum: { const TagDecl *Tag = cast(T)->getDecl(); // C++ [basic.link]p8: // - it is a class or enumeration type that is named (or has a name // for linkage purposes (7.1.3)) and the name has linkage; or // - it is a specialization of a class template (14); or Linkage L = Tag->getLinkageInternal(); bool IsLocalOrUnnamed = Tag->getDeclContext()->isFunctionOrMethod() || !Tag->hasNameForLinkage(); return CachedProperties(L, IsLocalOrUnnamed); } // C++ [basic.link]p8: // - it is a compound type (3.9.2) other than a class or enumeration, // compounded exclusively from types that have linkage; or case Type::Complex: return Cache::get(cast(T)->getElementType()); case Type::Pointer: return Cache::get(cast(T)->getPointeeType()); case Type::BlockPointer: return Cache::get(cast(T)->getPointeeType()); case Type::LValueReference: case Type::RValueReference: return Cache::get(cast(T)->getPointeeType()); case Type::MemberPointer: { const auto *MPT = cast(T); return merge(Cache::get(MPT->getClass()), Cache::get(MPT->getPointeeType())); } case Type::ConstantArray: case Type::IncompleteArray: case Type::VariableArray: return Cache::get(cast(T)->getElementType()); case Type::Vector: case Type::ExtVector: return Cache::get(cast(T)->getElementType()); case Type::ConstantMatrix: return Cache::get(cast(T)->getElementType()); case Type::FunctionNoProto: return Cache::get(cast(T)->getReturnType()); case Type::FunctionProto: { const auto *FPT = cast(T); CachedProperties result = Cache::get(FPT->getReturnType()); for (const auto &ai : FPT->param_types()) result = merge(result, Cache::get(ai)); return result; } case Type::ObjCInterface: { Linkage L = cast(T)->getDecl()->getLinkageInternal(); return CachedProperties(L, false); } case Type::ObjCObject: return Cache::get(cast(T)->getBaseType()); case Type::ObjCObjectPointer: return Cache::get(cast(T)->getPointeeType()); case Type::Atomic: return Cache::get(cast(T)->getValueType()); case Type::Pipe: return Cache::get(cast(T)->getElementType()); } llvm_unreachable("unhandled type class"); } /// Determine the linkage of this type. Linkage Type::getLinkage() const { Cache::ensure(this); return TypeBits.getLinkage(); } bool Type::hasUnnamedOrLocalType() const { Cache::ensure(this); return TypeBits.hasLocalOrUnnamedType(); } LinkageInfo LinkageComputer::computeTypeLinkageInfo(const Type *T) { switch (T->getTypeClass()) { #define TYPE(Class,Base) #define NON_CANONICAL_TYPE(Class,Base) case Type::Class: #include "clang/AST/TypeNodes.inc" llvm_unreachable("didn't expect a non-canonical type here"); #define TYPE(Class,Base) #define DEPENDENT_TYPE(Class,Base) case Type::Class: #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class,Base) case Type::Class: #include "clang/AST/TypeNodes.inc" // Treat instantiation-dependent types as external. assert(T->isInstantiationDependentType()); return LinkageInfo::external(); case Type::ExtInt: case Type::Builtin: return LinkageInfo::external(); case Type::Auto: case Type::DeducedTemplateSpecialization: return LinkageInfo::external(); case Type::Record: case Type::Enum: return getDeclLinkageAndVisibility(cast(T)->getDecl()); case Type::Complex: return computeTypeLinkageInfo(cast(T)->getElementType()); case Type::Pointer: return computeTypeLinkageInfo(cast(T)->getPointeeType()); case Type::BlockPointer: return computeTypeLinkageInfo(cast(T)->getPointeeType()); case Type::LValueReference: case Type::RValueReference: return computeTypeLinkageInfo(cast(T)->getPointeeType()); case Type::MemberPointer: { const auto *MPT = cast(T); LinkageInfo LV = computeTypeLinkageInfo(MPT->getClass()); LV.merge(computeTypeLinkageInfo(MPT->getPointeeType())); return LV; } case Type::ConstantArray: case Type::IncompleteArray: case Type::VariableArray: return computeTypeLinkageInfo(cast(T)->getElementType()); case Type::Vector: case Type::ExtVector: return computeTypeLinkageInfo(cast(T)->getElementType()); case Type::ConstantMatrix: return computeTypeLinkageInfo( cast(T)->getElementType()); case Type::FunctionNoProto: return computeTypeLinkageInfo(cast(T)->getReturnType()); case Type::FunctionProto: { const auto *FPT = cast(T); LinkageInfo LV = computeTypeLinkageInfo(FPT->getReturnType()); for (const auto &ai : FPT->param_types()) LV.merge(computeTypeLinkageInfo(ai)); return LV; } case Type::ObjCInterface: return getDeclLinkageAndVisibility(cast(T)->getDecl()); case Type::ObjCObject: return computeTypeLinkageInfo(cast(T)->getBaseType()); case Type::ObjCObjectPointer: return computeTypeLinkageInfo( cast(T)->getPointeeType()); case Type::Atomic: return computeTypeLinkageInfo(cast(T)->getValueType()); case Type::Pipe: return computeTypeLinkageInfo(cast(T)->getElementType()); } llvm_unreachable("unhandled type class"); } bool Type::isLinkageValid() const { if (!TypeBits.isCacheValid()) return true; Linkage L = LinkageComputer{} .computeTypeLinkageInfo(getCanonicalTypeInternal()) .getLinkage(); return L == TypeBits.getLinkage(); } LinkageInfo LinkageComputer::getTypeLinkageAndVisibility(const Type *T) { if (!T->isCanonicalUnqualified()) return computeTypeLinkageInfo(T->getCanonicalTypeInternal()); LinkageInfo LV = computeTypeLinkageInfo(T); assert(LV.getLinkage() == T->getLinkage()); return LV; } LinkageInfo Type::getLinkageAndVisibility() const { return LinkageComputer{}.getTypeLinkageAndVisibility(this); } Optional Type::getNullability(const ASTContext &Context) const { QualType Type(this, 0); while (const auto *AT = Type->getAs()) { // Check whether this is an attributed type with nullability // information. if (auto Nullability = AT->getImmediateNullability()) return Nullability; Type = AT->getEquivalentType(); } return None; } bool Type::canHaveNullability(bool ResultIfUnknown) const { QualType type = getCanonicalTypeInternal(); switch (type->getTypeClass()) { // We'll only see canonical types here. #define NON_CANONICAL_TYPE(Class, Parent) \ case Type::Class: \ llvm_unreachable("non-canonical type"); #define TYPE(Class, Parent) #include "clang/AST/TypeNodes.inc" // Pointer types. case Type::Pointer: case Type::BlockPointer: case Type::MemberPointer: case Type::ObjCObjectPointer: return true; // Dependent types that could instantiate to pointer types. case Type::UnresolvedUsing: case Type::TypeOfExpr: case Type::TypeOf: case Type::Decltype: case Type::UnaryTransform: case Type::TemplateTypeParm: case Type::SubstTemplateTypeParmPack: case Type::DependentName: case Type::DependentTemplateSpecialization: case Type::Auto: return ResultIfUnknown; // Dependent template specializations can instantiate to pointer // types unless they're known to be specializations of a class // template. case Type::TemplateSpecialization: if (TemplateDecl *templateDecl = cast(type.getTypePtr()) ->getTemplateName().getAsTemplateDecl()) { if (isa(templateDecl)) return false; } return ResultIfUnknown; case Type::Builtin: switch (cast(type.getTypePtr())->getKind()) { // Signed, unsigned, and floating-point types cannot have nullability. #define SIGNED_TYPE(Id, SingletonId) case BuiltinType::Id: #define UNSIGNED_TYPE(Id, SingletonId) case BuiltinType::Id: #define FLOATING_TYPE(Id, SingletonId) case BuiltinType::Id: #define BUILTIN_TYPE(Id, SingletonId) #include "clang/AST/BuiltinTypes.def" return false; // Dependent types that could instantiate to a pointer type. case BuiltinType::Dependent: case BuiltinType::Overload: case BuiltinType::BoundMember: case BuiltinType::PseudoObject: case BuiltinType::UnknownAny: case BuiltinType::ARCUnbridgedCast: return ResultIfUnknown; case BuiltinType::Void: case BuiltinType::ObjCId: case BuiltinType::ObjCClass: case BuiltinType::ObjCSel: #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ case BuiltinType::Id: #include "clang/Basic/OpenCLImageTypes.def" #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ case BuiltinType::Id: #include "clang/Basic/OpenCLExtensionTypes.def" case BuiltinType::OCLSampler: case BuiltinType::OCLEvent: case BuiltinType::OCLClkEvent: case BuiltinType::OCLQueue: case BuiltinType::OCLReserveID: #define SVE_TYPE(Name, Id, SingletonId) \ case BuiltinType::Id: #include "clang/Basic/AArch64SVEACLETypes.def" case BuiltinType::BuiltinFn: case BuiltinType::NullPtr: case BuiltinType::IncompleteMatrixIdx: case BuiltinType::OMPArraySection: case BuiltinType::OMPArrayShaping: case BuiltinType::OMPIterator: return false; } llvm_unreachable("unknown builtin type"); // Non-pointer types. case Type::Complex: case Type::LValueReference: case Type::RValueReference: case Type::ConstantArray: case Type::IncompleteArray: case Type::VariableArray: case Type::DependentSizedArray: case Type::DependentVector: case Type::DependentSizedExtVector: case Type::Vector: case Type::ExtVector: case Type::ConstantMatrix: case Type::DependentSizedMatrix: case Type::DependentAddressSpace: case Type::FunctionProto: case Type::FunctionNoProto: case Type::Record: case Type::DeducedTemplateSpecialization: case Type::Enum: case Type::InjectedClassName: case Type::PackExpansion: case Type::ObjCObject: case Type::ObjCInterface: case Type::Atomic: case Type::Pipe: case Type::ExtInt: case Type::DependentExtInt: return false; } llvm_unreachable("bad type kind!"); } llvm::Optional AttributedType::getImmediateNullability() const { if (getAttrKind() == attr::TypeNonNull) return NullabilityKind::NonNull; if (getAttrKind() == attr::TypeNullable) return NullabilityKind::Nullable; if (getAttrKind() == attr::TypeNullUnspecified) return NullabilityKind::Unspecified; return None; } Optional AttributedType::stripOuterNullability(QualType &T) { QualType AttrTy = T; if (auto MacroTy = dyn_cast(T)) AttrTy = MacroTy->getUnderlyingType(); if (auto attributed = dyn_cast(AttrTy)) { if (auto nullability = attributed->getImmediateNullability()) { T = attributed->getModifiedType(); return nullability; } } return None; } bool Type::isBlockCompatibleObjCPointerType(ASTContext &ctx) const { const auto *objcPtr = getAs(); if (!objcPtr) return false; if (objcPtr->isObjCIdType()) { // id is always okay. return true; } // Blocks are NSObjects. if (ObjCInterfaceDecl *iface = objcPtr->getInterfaceDecl()) { if (iface->getIdentifier() != ctx.getNSObjectName()) return false; // Continue to check qualifiers, below. } else if (objcPtr->isObjCQualifiedIdType()) { // Continue to check qualifiers, below. } else { return false; } // Check protocol qualifiers. for (ObjCProtocolDecl *proto : objcPtr->quals()) { // Blocks conform to NSObject and NSCopying. if (proto->getIdentifier() != ctx.getNSObjectName() && proto->getIdentifier() != ctx.getNSCopyingName()) return false; } return true; } Qualifiers::ObjCLifetime Type::getObjCARCImplicitLifetime() const { if (isObjCARCImplicitlyUnretainedType()) return Qualifiers::OCL_ExplicitNone; return Qualifiers::OCL_Strong; } bool Type::isObjCARCImplicitlyUnretainedType() const { assert(isObjCLifetimeType() && "cannot query implicit lifetime for non-inferrable type"); const Type *canon = getCanonicalTypeInternal().getTypePtr(); // Walk down to the base type. We don't care about qualifiers for this. while (const auto *array = dyn_cast(canon)) canon = array->getElementType().getTypePtr(); if (const auto *opt = dyn_cast(canon)) { // Class and Class don't require retention. if (opt->getObjectType()->isObjCClass()) return true; } return false; } bool Type::isObjCNSObjectType() const { const Type *cur = this; while (true) { if (const auto *typedefType = dyn_cast(cur)) return typedefType->getDecl()->hasAttr(); // Single-step desugar until we run out of sugar. QualType next = cur->getLocallyUnqualifiedSingleStepDesugaredType(); if (next.getTypePtr() == cur) return false; cur = next.getTypePtr(); } } bool Type::isObjCIndependentClassType() const { if (const auto *typedefType = dyn_cast(this)) return typedefType->getDecl()->hasAttr(); return false; } bool Type::isObjCRetainableType() const { return isObjCObjectPointerType() || isBlockPointerType() || isObjCNSObjectType(); } bool Type::isObjCIndirectLifetimeType() const { if (isObjCLifetimeType()) return true; if (const auto *OPT = getAs()) return OPT->getPointeeType()->isObjCIndirectLifetimeType(); if (const auto *Ref = getAs()) return Ref->getPointeeType()->isObjCIndirectLifetimeType(); if (const auto *MemPtr = getAs()) return MemPtr->getPointeeType()->isObjCIndirectLifetimeType(); return false; } /// Returns true if objects of this type have lifetime semantics under /// ARC. bool Type::isObjCLifetimeType() const { const Type *type = this; while (const ArrayType *array = type->getAsArrayTypeUnsafe()) type = array->getElementType().getTypePtr(); return type->isObjCRetainableType(); } /// Determine whether the given type T is a "bridgable" Objective-C type, /// which is either an Objective-C object pointer type or an bool Type::isObjCARCBridgableType() const { return isObjCObjectPointerType() || isBlockPointerType(); } /// Determine whether the given type T is a "bridgeable" C type. bool Type::isCARCBridgableType() const { const auto *Pointer = getAs(); if (!Pointer) return false; QualType Pointee = Pointer->getPointeeType(); return Pointee->isVoidType() || Pointee->isRecordType(); } /// Check if the specified type is the CUDA device builtin surface type. bool Type::isCUDADeviceBuiltinSurfaceType() const { if (const auto *RT = getAs()) return RT->getDecl()->hasAttr(); return false; } /// Check if the specified type is the CUDA device builtin texture type. bool Type::isCUDADeviceBuiltinTextureType() const { if (const auto *RT = getAs()) return RT->getDecl()->hasAttr(); return false; } bool Type::hasSizedVLAType() const { if (!isVariablyModifiedType()) return false; if (const auto *ptr = getAs()) return ptr->getPointeeType()->hasSizedVLAType(); if (const auto *ref = getAs()) return ref->getPointeeType()->hasSizedVLAType(); if (const ArrayType *arr = getAsArrayTypeUnsafe()) { if (isa(arr) && cast(arr)->getSizeExpr()) return true; return arr->getElementType()->hasSizedVLAType(); } return false; } QualType::DestructionKind QualType::isDestructedTypeImpl(QualType type) { switch (type.getObjCLifetime()) { case Qualifiers::OCL_None: case Qualifiers::OCL_ExplicitNone: case Qualifiers::OCL_Autoreleasing: break; case Qualifiers::OCL_Strong: return DK_objc_strong_lifetime; case Qualifiers::OCL_Weak: return DK_objc_weak_lifetime; } if (const auto *RT = type->getBaseElementTypeUnsafe()->getAs()) { const RecordDecl *RD = RT->getDecl(); if (const auto *CXXRD = dyn_cast(RD)) { /// Check if this is a C++ object with a non-trivial destructor. if (CXXRD->hasDefinition() && !CXXRD->hasTrivialDestructor()) return DK_cxx_destructor; } else { /// Check if this is a C struct that is non-trivial to destroy or an array /// that contains such a struct. if (RD->isNonTrivialToPrimitiveDestroy()) return DK_nontrivial_c_struct; } } return DK_none; } CXXRecordDecl *MemberPointerType::getMostRecentCXXRecordDecl() const { return getClass()->getAsCXXRecordDecl()->getMostRecentNonInjectedDecl(); } void clang::FixedPointValueToString(SmallVectorImpl &Str, llvm::APSInt Val, unsigned Scale) { FixedPointSemantics FXSema(Val.getBitWidth(), Scale, Val.isSigned(), /*IsSaturated=*/false, /*HasUnsignedPadding=*/false); APFixedPoint(Val, FXSema).toString(Str); } AutoType::AutoType(QualType DeducedAsType, AutoTypeKeyword Keyword, TypeDependence ExtraDependence, ConceptDecl *TypeConstraintConcept, ArrayRef TypeConstraintArgs) : DeducedType(Auto, DeducedAsType, ExtraDependence) { AutoTypeBits.Keyword = (unsigned)Keyword; AutoTypeBits.NumArgs = TypeConstraintArgs.size(); this->TypeConstraintConcept = TypeConstraintConcept; if (TypeConstraintConcept) { TemplateArgument *ArgBuffer = getArgBuffer(); for (const TemplateArgument &Arg : TypeConstraintArgs) { addDependence(toTypeDependence( Arg.getDependence() & TemplateArgumentDependence::UnexpandedPack)); new (ArgBuffer++) TemplateArgument(Arg); } } } void AutoType::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, QualType Deduced, AutoTypeKeyword Keyword, bool IsDependent, ConceptDecl *CD, ArrayRef Arguments) { ID.AddPointer(Deduced.getAsOpaquePtr()); ID.AddInteger((unsigned)Keyword); ID.AddBoolean(IsDependent); ID.AddPointer(CD); for (const TemplateArgument &Arg : Arguments) Arg.Profile(ID, Context); } diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp index 703f5087370a..143408401245 100644 --- a/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp +++ b/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp @@ -1,5009 +1,5008 @@ //===--- CGDebugInfo.cpp - Emit Debug Information for a Module ------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This coordinates the debug information generation while generating code. // //===----------------------------------------------------------------------===// #include "CGDebugInfo.h" #include "CGBlocks.h" #include "CGCXXABI.h" #include "CGObjCRuntime.h" #include "CGRecordLayout.h" #include "CodeGenFunction.h" #include "CodeGenModule.h" #include "ConstantEmitter.h" #include "clang/AST/ASTContext.h" #include "clang/AST/Attr.h" #include "clang/AST/DeclFriend.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/Expr.h" #include "clang/AST/RecordLayout.h" #include "clang/Basic/CodeGenOptions.h" #include "clang/Basic/FileManager.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/Version.h" #include "clang/Frontend/FrontendOptions.h" #include "clang/Lex/HeaderSearchOptions.h" #include "clang/Lex/ModuleMap.h" #include "clang/Lex/PreprocessorOptions.h" #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringExtras.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/Metadata.h" #include "llvm/IR/Module.h" #include "llvm/Support/FileSystem.h" #include "llvm/Support/MD5.h" #include "llvm/Support/Path.h" #include "llvm/Support/TimeProfiler.h" using namespace clang; using namespace clang::CodeGen; static uint32_t getTypeAlignIfRequired(const Type *Ty, const ASTContext &Ctx) { auto TI = Ctx.getTypeInfo(Ty); return TI.AlignIsRequired ? TI.Align : 0; } static uint32_t getTypeAlignIfRequired(QualType Ty, const ASTContext &Ctx) { return getTypeAlignIfRequired(Ty.getTypePtr(), Ctx); } static uint32_t getDeclAlignIfRequired(const Decl *D, const ASTContext &Ctx) { return D->hasAttr() ? D->getMaxAlignment() : 0; } CGDebugInfo::CGDebugInfo(CodeGenModule &CGM) : CGM(CGM), DebugKind(CGM.getCodeGenOpts().getDebugInfo()), DebugTypeExtRefs(CGM.getCodeGenOpts().DebugTypeExtRefs), DBuilder(CGM.getModule()) { for (const auto &KV : CGM.getCodeGenOpts().DebugPrefixMap) DebugPrefixMap[KV.first] = KV.second; CreateCompileUnit(); } CGDebugInfo::~CGDebugInfo() { assert(LexicalBlockStack.empty() && "Region stack mismatch, stack not empty!"); } ApplyDebugLocation::ApplyDebugLocation(CodeGenFunction &CGF, SourceLocation TemporaryLocation) : CGF(&CGF) { init(TemporaryLocation); } ApplyDebugLocation::ApplyDebugLocation(CodeGenFunction &CGF, bool DefaultToEmpty, SourceLocation TemporaryLocation) : CGF(&CGF) { init(TemporaryLocation, DefaultToEmpty); } void ApplyDebugLocation::init(SourceLocation TemporaryLocation, bool DefaultToEmpty) { auto *DI = CGF->getDebugInfo(); if (!DI) { CGF = nullptr; return; } OriginalLocation = CGF->Builder.getCurrentDebugLocation(); if (OriginalLocation && !DI->CGM.getExpressionLocationsEnabled()) return; if (TemporaryLocation.isValid()) { DI->EmitLocation(CGF->Builder, TemporaryLocation); return; } if (DefaultToEmpty) { CGF->Builder.SetCurrentDebugLocation(llvm::DebugLoc()); return; } // Construct a location that has a valid scope, but no line info. assert(!DI->LexicalBlockStack.empty()); CGF->Builder.SetCurrentDebugLocation(llvm::DebugLoc::get( 0, 0, DI->LexicalBlockStack.back(), DI->getInlinedAt())); } ApplyDebugLocation::ApplyDebugLocation(CodeGenFunction &CGF, const Expr *E) : CGF(&CGF) { init(E->getExprLoc()); } ApplyDebugLocation::ApplyDebugLocation(CodeGenFunction &CGF, llvm::DebugLoc Loc) : CGF(&CGF) { if (!CGF.getDebugInfo()) { this->CGF = nullptr; return; } OriginalLocation = CGF.Builder.getCurrentDebugLocation(); if (Loc) CGF.Builder.SetCurrentDebugLocation(std::move(Loc)); } ApplyDebugLocation::~ApplyDebugLocation() { // Query CGF so the location isn't overwritten when location updates are // temporarily disabled (for C++ default function arguments) if (CGF) CGF->Builder.SetCurrentDebugLocation(std::move(OriginalLocation)); } ApplyInlineDebugLocation::ApplyInlineDebugLocation(CodeGenFunction &CGF, GlobalDecl InlinedFn) : CGF(&CGF) { if (!CGF.getDebugInfo()) { this->CGF = nullptr; return; } auto &DI = *CGF.getDebugInfo(); SavedLocation = DI.getLocation(); assert((DI.getInlinedAt() == CGF.Builder.getCurrentDebugLocation()->getInlinedAt()) && "CGDebugInfo and IRBuilder are out of sync"); DI.EmitInlineFunctionStart(CGF.Builder, InlinedFn); } ApplyInlineDebugLocation::~ApplyInlineDebugLocation() { if (!CGF) return; auto &DI = *CGF->getDebugInfo(); DI.EmitInlineFunctionEnd(CGF->Builder); DI.EmitLocation(CGF->Builder, SavedLocation); } void CGDebugInfo::setLocation(SourceLocation Loc) { // If the new location isn't valid return. if (Loc.isInvalid()) return; CurLoc = CGM.getContext().getSourceManager().getExpansionLoc(Loc); // If we've changed files in the middle of a lexical scope go ahead // and create a new lexical scope with file node if it's different // from the one in the scope. if (LexicalBlockStack.empty()) return; SourceManager &SM = CGM.getContext().getSourceManager(); auto *Scope = cast(LexicalBlockStack.back()); PresumedLoc PCLoc = SM.getPresumedLoc(CurLoc); if (PCLoc.isInvalid() || Scope->getFile() == getOrCreateFile(CurLoc)) return; if (auto *LBF = dyn_cast(Scope)) { LexicalBlockStack.pop_back(); LexicalBlockStack.emplace_back(DBuilder.createLexicalBlockFile( LBF->getScope(), getOrCreateFile(CurLoc))); } else if (isa(Scope) || isa(Scope)) { LexicalBlockStack.pop_back(); LexicalBlockStack.emplace_back( DBuilder.createLexicalBlockFile(Scope, getOrCreateFile(CurLoc))); } } llvm::DIScope *CGDebugInfo::getDeclContextDescriptor(const Decl *D) { llvm::DIScope *Mod = getParentModuleOrNull(D); return getContextDescriptor(cast(D->getDeclContext()), Mod ? Mod : TheCU); } llvm::DIScope *CGDebugInfo::getContextDescriptor(const Decl *Context, llvm::DIScope *Default) { if (!Context) return Default; auto I = RegionMap.find(Context); if (I != RegionMap.end()) { llvm::Metadata *V = I->second; return dyn_cast_or_null(V); } // Check namespace. if (const auto *NSDecl = dyn_cast(Context)) return getOrCreateNamespace(NSDecl); if (const auto *RDecl = dyn_cast(Context)) if (!RDecl->isDependentType()) return getOrCreateType(CGM.getContext().getTypeDeclType(RDecl), TheCU->getFile()); return Default; } PrintingPolicy CGDebugInfo::getPrintingPolicy() const { PrintingPolicy PP = CGM.getContext().getPrintingPolicy(); // If we're emitting codeview, it's important to try to match MSVC's naming so // that visualizers written for MSVC will trigger for our class names. In // particular, we can't have spaces between arguments of standard templates // like basic_string and vector, but we must have spaces between consecutive // angle brackets that close nested template argument lists. if (CGM.getCodeGenOpts().EmitCodeView) { PP.MSVCFormatting = true; PP.SplitTemplateClosers = true; } else { // For DWARF, printing rules are underspecified. // SplitTemplateClosers yields better interop with GCC and GDB (PR46052). PP.SplitTemplateClosers = true; } // Apply -fdebug-prefix-map. PP.Callbacks = &PrintCB; return PP; } StringRef CGDebugInfo::getFunctionName(const FunctionDecl *FD) { assert(FD && "Invalid FunctionDecl!"); IdentifierInfo *FII = FD->getIdentifier(); FunctionTemplateSpecializationInfo *Info = FD->getTemplateSpecializationInfo(); // Emit the unqualified name in normal operation. LLVM and the debugger can // compute the fully qualified name from the scope chain. If we're only // emitting line table info, there won't be any scope chains, so emit the // fully qualified name here so that stack traces are more accurate. // FIXME: Do this when emitting DWARF as well as when emitting CodeView after // evaluating the size impact. bool UseQualifiedName = DebugKind == codegenoptions::DebugLineTablesOnly && CGM.getCodeGenOpts().EmitCodeView; if (!Info && FII && !UseQualifiedName) return FII->getName(); SmallString<128> NS; llvm::raw_svector_ostream OS(NS); if (!UseQualifiedName) FD->printName(OS); else FD->printQualifiedName(OS, getPrintingPolicy()); // Add any template specialization args. if (Info) { const TemplateArgumentList *TArgs = Info->TemplateArguments; printTemplateArgumentList(OS, TArgs->asArray(), getPrintingPolicy()); } // Copy this name on the side and use its reference. return internString(OS.str()); } StringRef CGDebugInfo::getObjCMethodName(const ObjCMethodDecl *OMD) { SmallString<256> MethodName; llvm::raw_svector_ostream OS(MethodName); OS << (OMD->isInstanceMethod() ? '-' : '+') << '['; const DeclContext *DC = OMD->getDeclContext(); if (const auto *OID = dyn_cast(DC)) { OS << OID->getName(); } else if (const auto *OID = dyn_cast(DC)) { OS << OID->getName(); } else if (const auto *OC = dyn_cast(DC)) { if (OC->IsClassExtension()) { OS << OC->getClassInterface()->getName(); } else { OS << OC->getIdentifier()->getNameStart() << '(' << OC->getIdentifier()->getNameStart() << ')'; } } else if (const auto *OCD = dyn_cast(DC)) { OS << OCD->getClassInterface()->getName() << '(' << OCD->getName() << ')'; } OS << ' ' << OMD->getSelector().getAsString() << ']'; return internString(OS.str()); } StringRef CGDebugInfo::getSelectorName(Selector S) { return internString(S.getAsString()); } StringRef CGDebugInfo::getClassName(const RecordDecl *RD) { if (isa(RD)) { SmallString<128> Name; llvm::raw_svector_ostream OS(Name); PrintingPolicy PP = getPrintingPolicy(); PP.PrintCanonicalTypes = true; RD->getNameForDiagnostic(OS, PP, /*Qualified*/ false); // Copy this name on the side and use its reference. return internString(Name); } // quick optimization to avoid having to intern strings that are already // stored reliably elsewhere if (const IdentifierInfo *II = RD->getIdentifier()) return II->getName(); // The CodeView printer in LLVM wants to see the names of unnamed types: it is // used to reconstruct the fully qualified type names. if (CGM.getCodeGenOpts().EmitCodeView) { if (const TypedefNameDecl *D = RD->getTypedefNameForAnonDecl()) { assert(RD->getDeclContext() == D->getDeclContext() && "Typedef should not be in another decl context!"); assert(D->getDeclName().getAsIdentifierInfo() && "Typedef was not named!"); return D->getDeclName().getAsIdentifierInfo()->getName(); } if (CGM.getLangOpts().CPlusPlus) { StringRef Name; ASTContext &Context = CGM.getContext(); if (const DeclaratorDecl *DD = Context.getDeclaratorForUnnamedTagDecl(RD)) // Anonymous types without a name for linkage purposes have their // declarator mangled in if they have one. Name = DD->getName(); else if (const TypedefNameDecl *TND = Context.getTypedefNameForUnnamedTagDecl(RD)) // Anonymous types without a name for linkage purposes have their // associate typedef mangled in if they have one. Name = TND->getName(); if (!Name.empty()) { SmallString<256> UnnamedType(" CGDebugInfo::computeChecksum(FileID FID, SmallString<32> &Checksum) const { Checksum.clear(); if (!CGM.getCodeGenOpts().EmitCodeView && CGM.getCodeGenOpts().DwarfVersion < 5) return None; SourceManager &SM = CGM.getContext().getSourceManager(); bool Invalid; const llvm::MemoryBuffer *MemBuffer = SM.getBuffer(FID, &Invalid); if (Invalid) return None; llvm::MD5 Hash; llvm::MD5::MD5Result Result; Hash.update(MemBuffer->getBuffer()); Hash.final(Result); Hash.stringifyResult(Result, Checksum); return llvm::DIFile::CSK_MD5; } Optional CGDebugInfo::getSource(const SourceManager &SM, FileID FID) { if (!CGM.getCodeGenOpts().EmbedSource) return None; bool SourceInvalid = false; StringRef Source = SM.getBufferData(FID, &SourceInvalid); if (SourceInvalid) return None; return Source; } llvm::DIFile *CGDebugInfo::getOrCreateFile(SourceLocation Loc) { if (!Loc.isValid()) // If Location is not valid then use main input file. return TheCU->getFile(); SourceManager &SM = CGM.getContext().getSourceManager(); PresumedLoc PLoc = SM.getPresumedLoc(Loc); StringRef FileName = PLoc.getFilename(); if (PLoc.isInvalid() || FileName.empty()) // If the location is not valid then use main input file. return TheCU->getFile(); // Cache the results. auto It = DIFileCache.find(FileName.data()); if (It != DIFileCache.end()) { // Verify that the information still exists. if (llvm::Metadata *V = It->second) return cast(V); } SmallString<32> Checksum; // Compute the checksum if possible. If the location is affected by a #line // directive that refers to a file, PLoc will have an invalid FileID, and we // will correctly get no checksum. Optional CSKind = computeChecksum(PLoc.getFileID(), Checksum); Optional> CSInfo; if (CSKind) CSInfo.emplace(*CSKind, Checksum); return createFile(FileName, CSInfo, getSource(SM, SM.getFileID(Loc))); } llvm::DIFile * CGDebugInfo::createFile(StringRef FileName, Optional> CSInfo, Optional Source) { StringRef Dir; StringRef File; std::string RemappedFile = remapDIPath(FileName); std::string CurDir = remapDIPath(getCurrentDirname()); SmallString<128> DirBuf; SmallString<128> FileBuf; if (llvm::sys::path::is_absolute(RemappedFile)) { // Strip the common prefix (if it is more than just "/") from current // directory and FileName for a more space-efficient encoding. auto FileIt = llvm::sys::path::begin(RemappedFile); auto FileE = llvm::sys::path::end(RemappedFile); auto CurDirIt = llvm::sys::path::begin(CurDir); auto CurDirE = llvm::sys::path::end(CurDir); for (; CurDirIt != CurDirE && *CurDirIt == *FileIt; ++CurDirIt, ++FileIt) llvm::sys::path::append(DirBuf, *CurDirIt); if (std::distance(llvm::sys::path::begin(CurDir), CurDirIt) == 1) { // Don't strip the common prefix if it is only the root "/" // since that would make LLVM diagnostic locations confusing. Dir = {}; File = RemappedFile; } else { for (; FileIt != FileE; ++FileIt) llvm::sys::path::append(FileBuf, *FileIt); Dir = DirBuf; File = FileBuf; } } else { Dir = CurDir; File = RemappedFile; } llvm::DIFile *F = DBuilder.createFile(File, Dir, CSInfo, Source); DIFileCache[FileName.data()].reset(F); return F; } std::string CGDebugInfo::remapDIPath(StringRef Path) const { if (DebugPrefixMap.empty()) return Path.str(); SmallString<256> P = Path; for (const auto &Entry : DebugPrefixMap) if (llvm::sys::path::replace_path_prefix(P, Entry.first, Entry.second)) break; return P.str().str(); } unsigned CGDebugInfo::getLineNumber(SourceLocation Loc) { if (Loc.isInvalid() && CurLoc.isInvalid()) return 0; SourceManager &SM = CGM.getContext().getSourceManager(); PresumedLoc PLoc = SM.getPresumedLoc(Loc.isValid() ? Loc : CurLoc); return PLoc.isValid() ? PLoc.getLine() : 0; } unsigned CGDebugInfo::getColumnNumber(SourceLocation Loc, bool Force) { // We may not want column information at all. if (!Force && !CGM.getCodeGenOpts().DebugColumnInfo) return 0; // If the location is invalid then use the current column. if (Loc.isInvalid() && CurLoc.isInvalid()) return 0; SourceManager &SM = CGM.getContext().getSourceManager(); PresumedLoc PLoc = SM.getPresumedLoc(Loc.isValid() ? Loc : CurLoc); return PLoc.isValid() ? PLoc.getColumn() : 0; } StringRef CGDebugInfo::getCurrentDirname() { if (!CGM.getCodeGenOpts().DebugCompilationDir.empty()) return CGM.getCodeGenOpts().DebugCompilationDir; if (!CWDName.empty()) return CWDName; SmallString<256> CWD; llvm::sys::fs::current_path(CWD); return CWDName = internString(CWD); } void CGDebugInfo::CreateCompileUnit() { SmallString<32> Checksum; Optional CSKind; Optional> CSInfo; // Should we be asking the SourceManager for the main file name, instead of // accepting it as an argument? This just causes the main file name to // mismatch with source locations and create extra lexical scopes or // mismatched debug info (a CU with a DW_AT_file of "-", because that's what // the driver passed, but functions/other things have DW_AT_file of "" // because that's what the SourceManager says) // Get absolute path name. SourceManager &SM = CGM.getContext().getSourceManager(); std::string MainFileName = CGM.getCodeGenOpts().MainFileName; if (MainFileName.empty()) MainFileName = ""; // The main file name provided via the "-main-file-name" option contains just // the file name itself with no path information. This file name may have had // a relative path, so we look into the actual file entry for the main // file to determine the real absolute path for the file. std::string MainFileDir; if (const FileEntry *MainFile = SM.getFileEntryForID(SM.getMainFileID())) { MainFileDir = std::string(MainFile->getDir()->getName()); if (!llvm::sys::path::is_absolute(MainFileName)) { llvm::SmallString<1024> MainFileDirSS(MainFileDir); llvm::sys::path::append(MainFileDirSS, MainFileName); MainFileName = std::string(llvm::sys::path::remove_leading_dotslash(MainFileDirSS)); } // If the main file name provided is identical to the input file name, and // if the input file is a preprocessed source, use the module name for // debug info. The module name comes from the name specified in the first // linemarker if the input is a preprocessed source. if (MainFile->getName() == MainFileName && FrontendOptions::getInputKindForExtension( MainFile->getName().rsplit('.').second) .isPreprocessed()) MainFileName = CGM.getModule().getName().str(); CSKind = computeChecksum(SM.getMainFileID(), Checksum); } llvm::dwarf::SourceLanguage LangTag; const LangOptions &LO = CGM.getLangOpts(); if (LO.CPlusPlus) { if (LO.ObjC) LangTag = llvm::dwarf::DW_LANG_ObjC_plus_plus; else if (LO.CPlusPlus14) LangTag = llvm::dwarf::DW_LANG_C_plus_plus_14; else if (LO.CPlusPlus11) LangTag = llvm::dwarf::DW_LANG_C_plus_plus_11; else LangTag = llvm::dwarf::DW_LANG_C_plus_plus; } else if (LO.ObjC) { LangTag = llvm::dwarf::DW_LANG_ObjC; } else if (LO.RenderScript) { LangTag = llvm::dwarf::DW_LANG_GOOGLE_RenderScript; } else if (LO.C99) { LangTag = llvm::dwarf::DW_LANG_C99; } else { LangTag = llvm::dwarf::DW_LANG_C89; } std::string Producer = getClangFullVersion(); // Figure out which version of the ObjC runtime we have. unsigned RuntimeVers = 0; if (LO.ObjC) RuntimeVers = LO.ObjCRuntime.isNonFragile() ? 2 : 1; llvm::DICompileUnit::DebugEmissionKind EmissionKind; switch (DebugKind) { case codegenoptions::NoDebugInfo: case codegenoptions::LocTrackingOnly: EmissionKind = llvm::DICompileUnit::NoDebug; break; case codegenoptions::DebugLineTablesOnly: EmissionKind = llvm::DICompileUnit::LineTablesOnly; break; case codegenoptions::DebugDirectivesOnly: EmissionKind = llvm::DICompileUnit::DebugDirectivesOnly; break; case codegenoptions::DebugInfoConstructor: case codegenoptions::LimitedDebugInfo: case codegenoptions::FullDebugInfo: EmissionKind = llvm::DICompileUnit::FullDebug; break; } uint64_t DwoId = 0; auto &CGOpts = CGM.getCodeGenOpts(); // The DIFile used by the CU is distinct from the main source // file. Its directory part specifies what becomes the // DW_AT_comp_dir (the compilation directory), even if the source // file was specified with an absolute path. if (CSKind) CSInfo.emplace(*CSKind, Checksum); llvm::DIFile *CUFile = DBuilder.createFile( remapDIPath(MainFileName), remapDIPath(getCurrentDirname()), CSInfo, getSource(SM, SM.getMainFileID())); StringRef Sysroot, SDK; if (CGM.getCodeGenOpts().getDebuggerTuning() == llvm::DebuggerKind::LLDB) { Sysroot = CGM.getHeaderSearchOpts().Sysroot; auto B = llvm::sys::path::rbegin(Sysroot); auto E = llvm::sys::path::rend(Sysroot); auto It = std::find_if(B, E, [](auto SDK) { return SDK.endswith(".sdk"); }); if (It != E) SDK = *It; } // Create new compile unit. TheCU = DBuilder.createCompileUnit( LangTag, CUFile, CGOpts.EmitVersionIdentMetadata ? Producer : "", LO.Optimize || CGOpts.PrepareForLTO || CGOpts.PrepareForThinLTO, CGOpts.DwarfDebugFlags, RuntimeVers, CGOpts.SplitDwarfFile, EmissionKind, DwoId, CGOpts.SplitDwarfInlining, CGOpts.DebugInfoForProfiling, CGM.getTarget().getTriple().isNVPTX() ? llvm::DICompileUnit::DebugNameTableKind::None : static_cast( CGOpts.DebugNameTable), CGOpts.DebugRangesBaseAddress, remapDIPath(Sysroot), SDK); } llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) { llvm::dwarf::TypeKind Encoding; StringRef BTName; switch (BT->getKind()) { #define BUILTIN_TYPE(Id, SingletonId) #define PLACEHOLDER_TYPE(Id, SingletonId) case BuiltinType::Id: #include "clang/AST/BuiltinTypes.def" case BuiltinType::Dependent: llvm_unreachable("Unexpected builtin type"); case BuiltinType::NullPtr: return DBuilder.createNullPtrType(); case BuiltinType::Void: return nullptr; case BuiltinType::ObjCClass: if (!ClassTy) ClassTy = DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type, "objc_class", TheCU, TheCU->getFile(), 0); return ClassTy; case BuiltinType::ObjCId: { // typedef struct objc_class *Class; // typedef struct objc_object { // Class isa; // } *id; if (ObjTy) return ObjTy; if (!ClassTy) ClassTy = DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type, "objc_class", TheCU, TheCU->getFile(), 0); unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy); auto *ISATy = DBuilder.createPointerType(ClassTy, Size); ObjTy = DBuilder.createStructType(TheCU, "objc_object", TheCU->getFile(), 0, 0, 0, llvm::DINode::FlagZero, nullptr, llvm::DINodeArray()); DBuilder.replaceArrays( ObjTy, DBuilder.getOrCreateArray(&*DBuilder.createMemberType( ObjTy, "isa", TheCU->getFile(), 0, Size, 0, 0, llvm::DINode::FlagZero, ISATy))); return ObjTy; } case BuiltinType::ObjCSel: { if (!SelTy) SelTy = DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type, "objc_selector", TheCU, TheCU->getFile(), 0); return SelTy; } #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ case BuiltinType::Id: \ return getOrCreateStructPtrType("opencl_" #ImgType "_" #Suffix "_t", \ SingletonId); #include "clang/Basic/OpenCLImageTypes.def" case BuiltinType::OCLSampler: return getOrCreateStructPtrType("opencl_sampler_t", OCLSamplerDITy); case BuiltinType::OCLEvent: return getOrCreateStructPtrType("opencl_event_t", OCLEventDITy); case BuiltinType::OCLClkEvent: return getOrCreateStructPtrType("opencl_clk_event_t", OCLClkEventDITy); case BuiltinType::OCLQueue: return getOrCreateStructPtrType("opencl_queue_t", OCLQueueDITy); case BuiltinType::OCLReserveID: return getOrCreateStructPtrType("opencl_reserve_id_t", OCLReserveIDDITy); #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ case BuiltinType::Id: \ return getOrCreateStructPtrType("opencl_" #ExtType, Id##Ty); #include "clang/Basic/OpenCLExtensionTypes.def" #define SVE_TYPE(Name, Id, SingletonId) case BuiltinType::Id: #include "clang/Basic/AArch64SVEACLETypes.def" { ASTContext::BuiltinVectorTypeInfo Info = CGM.getContext().getBuiltinVectorTypeInfo(BT); unsigned NumElemsPerVG = (Info.EC.Min * Info.NumVectors) / 2; // Debuggers can't extract 1bit from a vector, so will display a // bitpattern for svbool_t instead. if (Info.ElementType == CGM.getContext().BoolTy) { NumElemsPerVG /= 8; Info.ElementType = CGM.getContext().UnsignedCharTy; } auto *LowerBound = llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned( llvm::Type::getInt64Ty(CGM.getLLVMContext()), 0)); SmallVector Expr( {llvm::dwarf::DW_OP_constu, NumElemsPerVG, llvm::dwarf::DW_OP_bregx, /* AArch64::VG */ 46, 0, llvm::dwarf::DW_OP_mul, llvm::dwarf::DW_OP_constu, 1, llvm::dwarf::DW_OP_minus}); auto *UpperBound = DBuilder.createExpression(Expr); llvm::Metadata *Subscript = DBuilder.getOrCreateSubrange( /*count*/ nullptr, LowerBound, UpperBound, /*stride*/ nullptr); llvm::DINodeArray SubscriptArray = DBuilder.getOrCreateArray(Subscript); llvm::DIType *ElemTy = getOrCreateType(Info.ElementType, TheCU->getFile()); auto Align = getTypeAlignIfRequired(BT, CGM.getContext()); return DBuilder.createVectorType(/*Size*/ 0, Align, ElemTy, SubscriptArray); } case BuiltinType::UChar: case BuiltinType::Char_U: Encoding = llvm::dwarf::DW_ATE_unsigned_char; break; case BuiltinType::Char_S: case BuiltinType::SChar: Encoding = llvm::dwarf::DW_ATE_signed_char; break; case BuiltinType::Char8: case BuiltinType::Char16: case BuiltinType::Char32: Encoding = llvm::dwarf::DW_ATE_UTF; break; case BuiltinType::UShort: case BuiltinType::UInt: case BuiltinType::UInt128: case BuiltinType::ULong: case BuiltinType::WChar_U: case BuiltinType::ULongLong: Encoding = llvm::dwarf::DW_ATE_unsigned; break; case BuiltinType::Short: case BuiltinType::Int: case BuiltinType::Int128: case BuiltinType::Long: case BuiltinType::WChar_S: case BuiltinType::LongLong: Encoding = llvm::dwarf::DW_ATE_signed; break; case BuiltinType::Bool: Encoding = llvm::dwarf::DW_ATE_boolean; break; case BuiltinType::Half: case BuiltinType::Float: case BuiltinType::LongDouble: case BuiltinType::Float16: case BuiltinType::BFloat16: case BuiltinType::Float128: case BuiltinType::Double: // FIXME: For targets where long double and __float128 have the same size, // they are currently indistinguishable in the debugger without some // special treatment. However, there is currently no consensus on encoding // and this should be updated once a DWARF encoding exists for distinct // floating point types of the same size. Encoding = llvm::dwarf::DW_ATE_float; break; case BuiltinType::ShortAccum: case BuiltinType::Accum: case BuiltinType::LongAccum: case BuiltinType::ShortFract: case BuiltinType::Fract: case BuiltinType::LongFract: case BuiltinType::SatShortFract: case BuiltinType::SatFract: case BuiltinType::SatLongFract: case BuiltinType::SatShortAccum: case BuiltinType::SatAccum: case BuiltinType::SatLongAccum: Encoding = llvm::dwarf::DW_ATE_signed_fixed; break; case BuiltinType::UShortAccum: case BuiltinType::UAccum: case BuiltinType::ULongAccum: case BuiltinType::UShortFract: case BuiltinType::UFract: case BuiltinType::ULongFract: case BuiltinType::SatUShortAccum: case BuiltinType::SatUAccum: case BuiltinType::SatULongAccum: case BuiltinType::SatUShortFract: case BuiltinType::SatUFract: case BuiltinType::SatULongFract: Encoding = llvm::dwarf::DW_ATE_unsigned_fixed; break; } switch (BT->getKind()) { case BuiltinType::Long: BTName = "long int"; break; case BuiltinType::LongLong: BTName = "long long int"; break; case BuiltinType::ULong: BTName = "long unsigned int"; break; case BuiltinType::ULongLong: BTName = "long long unsigned int"; break; default: BTName = BT->getName(CGM.getLangOpts()); break; } // Bit size and offset of the type. uint64_t Size = CGM.getContext().getTypeSize(BT); return DBuilder.createBasicType(BTName, Size, Encoding); } llvm::DIType *CGDebugInfo::CreateType(const AutoType *Ty) { return DBuilder.createUnspecifiedType("auto"); } llvm::DIType *CGDebugInfo::CreateType(const ExtIntType *Ty) { StringRef Name = Ty->isUnsigned() ? "unsigned _ExtInt" : "_ExtInt"; llvm::dwarf::TypeKind Encoding = Ty->isUnsigned() ? llvm::dwarf::DW_ATE_unsigned : llvm::dwarf::DW_ATE_signed; return DBuilder.createBasicType(Name, CGM.getContext().getTypeSize(Ty), Encoding); } llvm::DIType *CGDebugInfo::CreateType(const ComplexType *Ty) { // Bit size and offset of the type. llvm::dwarf::TypeKind Encoding = llvm::dwarf::DW_ATE_complex_float; if (Ty->isComplexIntegerType()) Encoding = llvm::dwarf::DW_ATE_lo_user; uint64_t Size = CGM.getContext().getTypeSize(Ty); return DBuilder.createBasicType("complex", Size, Encoding); } llvm::DIType *CGDebugInfo::CreateQualifiedType(QualType Ty, llvm::DIFile *Unit) { QualifierCollector Qc; const Type *T = Qc.strip(Ty); // Ignore these qualifiers for now. Qc.removeObjCGCAttr(); Qc.removeAddressSpace(); Qc.removeObjCLifetime(); // We will create one Derived type for one qualifier and recurse to handle any // additional ones. llvm::dwarf::Tag Tag; if (Qc.hasConst()) { Tag = llvm::dwarf::DW_TAG_const_type; Qc.removeConst(); } else if (Qc.hasVolatile()) { Tag = llvm::dwarf::DW_TAG_volatile_type; Qc.removeVolatile(); } else if (Qc.hasRestrict()) { Tag = llvm::dwarf::DW_TAG_restrict_type; Qc.removeRestrict(); } else { assert(Qc.empty() && "Unknown type qualifier for debug info"); return getOrCreateType(QualType(T, 0), Unit); } auto *FromTy = getOrCreateType(Qc.apply(CGM.getContext(), T), Unit); // No need to fill in the Name, Line, Size, Alignment, Offset in case of // CVR derived types. return DBuilder.createQualifiedType(Tag, FromTy); } llvm::DIType *CGDebugInfo::CreateType(const ObjCObjectPointerType *Ty, llvm::DIFile *Unit) { // The frontend treats 'id' as a typedef to an ObjCObjectType, // whereas 'id' is treated as an ObjCPointerType. For the // debug info, we want to emit 'id' in both cases. if (Ty->isObjCQualifiedIdType()) return getOrCreateType(CGM.getContext().getObjCIdType(), Unit); return CreatePointerLikeType(llvm::dwarf::DW_TAG_pointer_type, Ty, Ty->getPointeeType(), Unit); } llvm::DIType *CGDebugInfo::CreateType(const PointerType *Ty, llvm::DIFile *Unit) { return CreatePointerLikeType(llvm::dwarf::DW_TAG_pointer_type, Ty, Ty->getPointeeType(), Unit); } /// \return whether a C++ mangling exists for the type defined by TD. static bool hasCXXMangling(const TagDecl *TD, llvm::DICompileUnit *TheCU) { switch (TheCU->getSourceLanguage()) { case llvm::dwarf::DW_LANG_C_plus_plus: case llvm::dwarf::DW_LANG_C_plus_plus_11: case llvm::dwarf::DW_LANG_C_plus_plus_14: return true; case llvm::dwarf::DW_LANG_ObjC_plus_plus: return isa(TD) || isa(TD); default: return false; } } // Determines if the debug info for this tag declaration needs a type // identifier. The purpose of the unique identifier is to deduplicate type // information for identical types across TUs. Because of the C++ one definition // rule (ODR), it is valid to assume that the type is defined the same way in // every TU and its debug info is equivalent. // // C does not have the ODR, and it is common for codebases to contain multiple // different definitions of a struct with the same name in different TUs. // Therefore, if the type doesn't have a C++ mangling, don't give it an // identifer. Type information in C is smaller and simpler than C++ type // information, so the increase in debug info size is negligible. // // If the type is not externally visible, it should be unique to the current TU, // and should not need an identifier to participate in type deduplication. // However, when emitting CodeView, the format internally uses these // unique type name identifers for references between debug info. For example, // the method of a class in an anonymous namespace uses the identifer to refer // to its parent class. The Microsoft C++ ABI attempts to provide unique names // for such types, so when emitting CodeView, always use identifiers for C++ // types. This may create problems when attempting to emit CodeView when the MS // C++ ABI is not in use. static bool needsTypeIdentifier(const TagDecl *TD, CodeGenModule &CGM, llvm::DICompileUnit *TheCU) { // We only add a type identifier for types with C++ name mangling. if (!hasCXXMangling(TD, TheCU)) return false; // Externally visible types with C++ mangling need a type identifier. if (TD->isExternallyVisible()) return true; // CodeView types with C++ mangling need a type identifier. if (CGM.getCodeGenOpts().EmitCodeView) return true; return false; } // Returns a unique type identifier string if one exists, or an empty string. static SmallString<256> getTypeIdentifier(const TagType *Ty, CodeGenModule &CGM, llvm::DICompileUnit *TheCU) { SmallString<256> Identifier; const TagDecl *TD = Ty->getDecl(); if (!needsTypeIdentifier(TD, CGM, TheCU)) return Identifier; if (const auto *RD = dyn_cast(TD)) if (RD->getDefinition()) if (RD->isDynamicClass() && CGM.getVTableLinkage(RD) == llvm::GlobalValue::ExternalLinkage) return Identifier; // TODO: This is using the RTTI name. Is there a better way to get // a unique string for a type? llvm::raw_svector_ostream Out(Identifier); CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(QualType(Ty, 0), Out); return Identifier; } /// \return the appropriate DWARF tag for a composite type. static llvm::dwarf::Tag getTagForRecord(const RecordDecl *RD) { llvm::dwarf::Tag Tag; if (RD->isStruct() || RD->isInterface()) Tag = llvm::dwarf::DW_TAG_structure_type; else if (RD->isUnion()) Tag = llvm::dwarf::DW_TAG_union_type; else { // FIXME: This could be a struct type giving a default visibility different // than C++ class type, but needs llvm metadata changes first. assert(RD->isClass()); Tag = llvm::dwarf::DW_TAG_class_type; } return Tag; } llvm::DICompositeType * CGDebugInfo::getOrCreateRecordFwdDecl(const RecordType *Ty, llvm::DIScope *Ctx) { const RecordDecl *RD = Ty->getDecl(); if (llvm::DIType *T = getTypeOrNull(CGM.getContext().getRecordType(RD))) return cast(T); llvm::DIFile *DefUnit = getOrCreateFile(RD->getLocation()); unsigned Line = getLineNumber(RD->getLocation()); StringRef RDName = getClassName(RD); uint64_t Size = 0; uint32_t Align = 0; llvm::DINode::DIFlags Flags = llvm::DINode::FlagFwdDecl; // Add flag to nontrivial forward declarations. To be consistent with MSVC, // add the flag if a record has no definition because we don't know whether // it will be trivial or not. if (const CXXRecordDecl *CXXRD = dyn_cast(RD)) if (!CXXRD->hasDefinition() || (CXXRD->hasDefinition() && !CXXRD->isTrivial())) Flags |= llvm::DINode::FlagNonTrivial; // Create the type. SmallString<256> Identifier = getTypeIdentifier(Ty, CGM, TheCU); llvm::DICompositeType *RetTy = DBuilder.createReplaceableCompositeType( getTagForRecord(RD), RDName, Ctx, DefUnit, Line, 0, Size, Align, Flags, Identifier); if (CGM.getCodeGenOpts().DebugFwdTemplateParams) if (auto *TSpecial = dyn_cast(RD)) DBuilder.replaceArrays(RetTy, llvm::DINodeArray(), CollectCXXTemplateParams(TSpecial, DefUnit)); ReplaceMap.emplace_back( std::piecewise_construct, std::make_tuple(Ty), std::make_tuple(static_cast(RetTy))); return RetTy; } llvm::DIType *CGDebugInfo::CreatePointerLikeType(llvm::dwarf::Tag Tag, const Type *Ty, QualType PointeeTy, llvm::DIFile *Unit) { // Bit size, align and offset of the type. // Size is always the size of a pointer. We can't use getTypeSize here // because that does not return the correct value for references. unsigned AddressSpace = CGM.getContext().getTargetAddressSpace(PointeeTy); uint64_t Size = CGM.getTarget().getPointerWidth(AddressSpace); auto Align = getTypeAlignIfRequired(Ty, CGM.getContext()); Optional DWARFAddressSpace = CGM.getTarget().getDWARFAddressSpace(AddressSpace); if (Tag == llvm::dwarf::DW_TAG_reference_type || Tag == llvm::dwarf::DW_TAG_rvalue_reference_type) return DBuilder.createReferenceType(Tag, getOrCreateType(PointeeTy, Unit), Size, Align, DWARFAddressSpace); else return DBuilder.createPointerType(getOrCreateType(PointeeTy, Unit), Size, Align, DWARFAddressSpace); } llvm::DIType *CGDebugInfo::getOrCreateStructPtrType(StringRef Name, llvm::DIType *&Cache) { if (Cache) return Cache; Cache = DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type, Name, TheCU, TheCU->getFile(), 0); unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy); Cache = DBuilder.createPointerType(Cache, Size); return Cache; } uint64_t CGDebugInfo::collectDefaultElementTypesForBlockPointer( const BlockPointerType *Ty, llvm::DIFile *Unit, llvm::DIDerivedType *DescTy, unsigned LineNo, SmallVectorImpl &EltTys) { QualType FType; // Advanced by calls to CreateMemberType in increments of FType, then // returned as the overall size of the default elements. uint64_t FieldOffset = 0; // Blocks in OpenCL have unique constraints which make the standard fields // redundant while requiring size and align fields for enqueue_kernel. See // initializeForBlockHeader in CGBlocks.cpp if (CGM.getLangOpts().OpenCL) { FType = CGM.getContext().IntTy; EltTys.push_back(CreateMemberType(Unit, FType, "__size", &FieldOffset)); EltTys.push_back(CreateMemberType(Unit, FType, "__align", &FieldOffset)); } else { FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy); EltTys.push_back(CreateMemberType(Unit, FType, "__isa", &FieldOffset)); FType = CGM.getContext().IntTy; EltTys.push_back(CreateMemberType(Unit, FType, "__flags", &FieldOffset)); EltTys.push_back(CreateMemberType(Unit, FType, "__reserved", &FieldOffset)); FType = CGM.getContext().getPointerType(Ty->getPointeeType()); EltTys.push_back(CreateMemberType(Unit, FType, "__FuncPtr", &FieldOffset)); FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy); uint64_t FieldSize = CGM.getContext().getTypeSize(Ty); uint32_t FieldAlign = CGM.getContext().getTypeAlign(Ty); EltTys.push_back(DBuilder.createMemberType( Unit, "__descriptor", nullptr, LineNo, FieldSize, FieldAlign, FieldOffset, llvm::DINode::FlagZero, DescTy)); FieldOffset += FieldSize; } return FieldOffset; } llvm::DIType *CGDebugInfo::CreateType(const BlockPointerType *Ty, llvm::DIFile *Unit) { SmallVector EltTys; QualType FType; uint64_t FieldOffset; llvm::DINodeArray Elements; FieldOffset = 0; FType = CGM.getContext().UnsignedLongTy; EltTys.push_back(CreateMemberType(Unit, FType, "reserved", &FieldOffset)); EltTys.push_back(CreateMemberType(Unit, FType, "Size", &FieldOffset)); Elements = DBuilder.getOrCreateArray(EltTys); EltTys.clear(); llvm::DINode::DIFlags Flags = llvm::DINode::FlagAppleBlock; auto *EltTy = DBuilder.createStructType(Unit, "__block_descriptor", nullptr, 0, FieldOffset, 0, Flags, nullptr, Elements); // Bit size, align and offset of the type. uint64_t Size = CGM.getContext().getTypeSize(Ty); auto *DescTy = DBuilder.createPointerType(EltTy, Size); FieldOffset = collectDefaultElementTypesForBlockPointer(Ty, Unit, DescTy, 0, EltTys); Elements = DBuilder.getOrCreateArray(EltTys); // The __block_literal_generic structs are marked with a special // DW_AT_APPLE_BLOCK attribute and are an implementation detail only // the debugger needs to know about. To allow type uniquing, emit // them without a name or a location. EltTy = DBuilder.createStructType(Unit, "", nullptr, 0, FieldOffset, 0, Flags, nullptr, Elements); return DBuilder.createPointerType(EltTy, Size); } llvm::DIType *CGDebugInfo::CreateType(const TemplateSpecializationType *Ty, llvm::DIFile *Unit) { assert(Ty->isTypeAlias()); llvm::DIType *Src = getOrCreateType(Ty->getAliasedType(), Unit); auto *AliasDecl = cast(Ty->getTemplateName().getAsTemplateDecl()) ->getTemplatedDecl(); if (AliasDecl->hasAttr()) return Src; SmallString<128> NS; llvm::raw_svector_ostream OS(NS); Ty->getTemplateName().print(OS, getPrintingPolicy(), /*qualified*/ false); printTemplateArgumentList(OS, Ty->template_arguments(), getPrintingPolicy()); SourceLocation Loc = AliasDecl->getLocation(); return DBuilder.createTypedef(Src, OS.str(), getOrCreateFile(Loc), getLineNumber(Loc), getDeclContextDescriptor(AliasDecl)); } llvm::DIType *CGDebugInfo::CreateType(const TypedefType *Ty, llvm::DIFile *Unit) { llvm::DIType *Underlying = getOrCreateType(Ty->getDecl()->getUnderlyingType(), Unit); if (Ty->getDecl()->hasAttr()) return Underlying; // We don't set size information, but do specify where the typedef was // declared. SourceLocation Loc = Ty->getDecl()->getLocation(); uint32_t Align = getDeclAlignIfRequired(Ty->getDecl(), CGM.getContext()); // Typedefs are derived from some other type. return DBuilder.createTypedef(Underlying, Ty->getDecl()->getName(), getOrCreateFile(Loc), getLineNumber(Loc), getDeclContextDescriptor(Ty->getDecl()), Align); } static unsigned getDwarfCC(CallingConv CC) { switch (CC) { case CC_C: // Avoid emitting DW_AT_calling_convention if the C convention was used. return 0; case CC_X86StdCall: return llvm::dwarf::DW_CC_BORLAND_stdcall; case CC_X86FastCall: return llvm::dwarf::DW_CC_BORLAND_msfastcall; case CC_X86ThisCall: return llvm::dwarf::DW_CC_BORLAND_thiscall; case CC_X86VectorCall: return llvm::dwarf::DW_CC_LLVM_vectorcall; case CC_X86Pascal: return llvm::dwarf::DW_CC_BORLAND_pascal; case CC_Win64: return llvm::dwarf::DW_CC_LLVM_Win64; case CC_X86_64SysV: return llvm::dwarf::DW_CC_LLVM_X86_64SysV; case CC_AAPCS: case CC_AArch64VectorCall: return llvm::dwarf::DW_CC_LLVM_AAPCS; case CC_AAPCS_VFP: return llvm::dwarf::DW_CC_LLVM_AAPCS_VFP; case CC_IntelOclBicc: return llvm::dwarf::DW_CC_LLVM_IntelOclBicc; case CC_SpirFunction: return llvm::dwarf::DW_CC_LLVM_SpirFunction; case CC_OpenCLKernel: return llvm::dwarf::DW_CC_LLVM_OpenCLKernel; case CC_Swift: return llvm::dwarf::DW_CC_LLVM_Swift; case CC_PreserveMost: return llvm::dwarf::DW_CC_LLVM_PreserveMost; case CC_PreserveAll: return llvm::dwarf::DW_CC_LLVM_PreserveAll; case CC_X86RegCall: return llvm::dwarf::DW_CC_LLVM_X86RegCall; } return 0; } llvm::DIType *CGDebugInfo::CreateType(const FunctionType *Ty, llvm::DIFile *Unit) { SmallVector EltTys; // Add the result type at least. EltTys.push_back(getOrCreateType(Ty->getReturnType(), Unit)); // Set up remainder of arguments if there is a prototype. // otherwise emit it as a variadic function. if (isa(Ty)) EltTys.push_back(DBuilder.createUnspecifiedParameter()); else if (const auto *FPT = dyn_cast(Ty)) { for (const QualType &ParamType : FPT->param_types()) EltTys.push_back(getOrCreateType(ParamType, Unit)); if (FPT->isVariadic()) EltTys.push_back(DBuilder.createUnspecifiedParameter()); } llvm::DITypeRefArray EltTypeArray = DBuilder.getOrCreateTypeArray(EltTys); return DBuilder.createSubroutineType(EltTypeArray, llvm::DINode::FlagZero, getDwarfCC(Ty->getCallConv())); } /// Convert an AccessSpecifier into the corresponding DINode flag. /// As an optimization, return 0 if the access specifier equals the /// default for the containing type. static llvm::DINode::DIFlags getAccessFlag(AccessSpecifier Access, const RecordDecl *RD) { AccessSpecifier Default = clang::AS_none; if (RD && RD->isClass()) Default = clang::AS_private; else if (RD && (RD->isStruct() || RD->isUnion())) Default = clang::AS_public; if (Access == Default) return llvm::DINode::FlagZero; switch (Access) { case clang::AS_private: return llvm::DINode::FlagPrivate; case clang::AS_protected: return llvm::DINode::FlagProtected; case clang::AS_public: return llvm::DINode::FlagPublic; case clang::AS_none: return llvm::DINode::FlagZero; } llvm_unreachable("unexpected access enumerator"); } llvm::DIType *CGDebugInfo::createBitFieldType(const FieldDecl *BitFieldDecl, llvm::DIScope *RecordTy, const RecordDecl *RD) { StringRef Name = BitFieldDecl->getName(); QualType Ty = BitFieldDecl->getType(); SourceLocation Loc = BitFieldDecl->getLocation(); llvm::DIFile *VUnit = getOrCreateFile(Loc); llvm::DIType *DebugType = getOrCreateType(Ty, VUnit); // Get the location for the field. llvm::DIFile *File = getOrCreateFile(Loc); unsigned Line = getLineNumber(Loc); const CGBitFieldInfo &BitFieldInfo = CGM.getTypes().getCGRecordLayout(RD).getBitFieldInfo(BitFieldDecl); uint64_t SizeInBits = BitFieldInfo.Size; assert(SizeInBits > 0 && "found named 0-width bitfield"); uint64_t StorageOffsetInBits = CGM.getContext().toBits(BitFieldInfo.StorageOffset); uint64_t Offset = BitFieldInfo.Offset; // The bit offsets for big endian machines are reversed for big // endian target, compensate for that as the DIDerivedType requires // un-reversed offsets. if (CGM.getDataLayout().isBigEndian()) Offset = BitFieldInfo.StorageSize - BitFieldInfo.Size - Offset; uint64_t OffsetInBits = StorageOffsetInBits + Offset; llvm::DINode::DIFlags Flags = getAccessFlag(BitFieldDecl->getAccess(), RD); return DBuilder.createBitFieldMemberType( RecordTy, Name, File, Line, SizeInBits, OffsetInBits, StorageOffsetInBits, Flags, DebugType); } llvm::DIType * CGDebugInfo::createFieldType(StringRef name, QualType type, SourceLocation loc, AccessSpecifier AS, uint64_t offsetInBits, uint32_t AlignInBits, llvm::DIFile *tunit, llvm::DIScope *scope, const RecordDecl *RD) { llvm::DIType *debugType = getOrCreateType(type, tunit); // Get the location for the field. llvm::DIFile *file = getOrCreateFile(loc); unsigned line = getLineNumber(loc); uint64_t SizeInBits = 0; auto Align = AlignInBits; if (!type->isIncompleteArrayType()) { TypeInfo TI = CGM.getContext().getTypeInfo(type); SizeInBits = TI.Width; if (!Align) Align = getTypeAlignIfRequired(type, CGM.getContext()); } llvm::DINode::DIFlags flags = getAccessFlag(AS, RD); return DBuilder.createMemberType(scope, name, file, line, SizeInBits, Align, offsetInBits, flags, debugType); } void CGDebugInfo::CollectRecordLambdaFields( const CXXRecordDecl *CXXDecl, SmallVectorImpl &elements, llvm::DIType *RecordTy) { // For C++11 Lambdas a Field will be the same as a Capture, but the Capture // has the name and the location of the variable so we should iterate over // both concurrently. const ASTRecordLayout &layout = CGM.getContext().getASTRecordLayout(CXXDecl); RecordDecl::field_iterator Field = CXXDecl->field_begin(); unsigned fieldno = 0; for (CXXRecordDecl::capture_const_iterator I = CXXDecl->captures_begin(), E = CXXDecl->captures_end(); I != E; ++I, ++Field, ++fieldno) { const LambdaCapture &C = *I; if (C.capturesVariable()) { SourceLocation Loc = C.getLocation(); assert(!Field->isBitField() && "lambdas don't have bitfield members!"); VarDecl *V = C.getCapturedVar(); StringRef VName = V->getName(); llvm::DIFile *VUnit = getOrCreateFile(Loc); auto Align = getDeclAlignIfRequired(V, CGM.getContext()); llvm::DIType *FieldType = createFieldType( VName, Field->getType(), Loc, Field->getAccess(), layout.getFieldOffset(fieldno), Align, VUnit, RecordTy, CXXDecl); elements.push_back(FieldType); } else if (C.capturesThis()) { // TODO: Need to handle 'this' in some way by probably renaming the // this of the lambda class and having a field member of 'this' or // by using AT_object_pointer for the function and having that be // used as 'this' for semantic references. FieldDecl *f = *Field; llvm::DIFile *VUnit = getOrCreateFile(f->getLocation()); QualType type = f->getType(); llvm::DIType *fieldType = createFieldType( "this", type, f->getLocation(), f->getAccess(), layout.getFieldOffset(fieldno), VUnit, RecordTy, CXXDecl); elements.push_back(fieldType); } } } llvm::DIDerivedType * CGDebugInfo::CreateRecordStaticField(const VarDecl *Var, llvm::DIType *RecordTy, const RecordDecl *RD) { // Create the descriptor for the static variable, with or without // constant initializers. Var = Var->getCanonicalDecl(); llvm::DIFile *VUnit = getOrCreateFile(Var->getLocation()); llvm::DIType *VTy = getOrCreateType(Var->getType(), VUnit); unsigned LineNumber = getLineNumber(Var->getLocation()); StringRef VName = Var->getName(); llvm::Constant *C = nullptr; if (Var->getInit()) { const APValue *Value = Var->evaluateValue(); if (Value) { if (Value->isInt()) C = llvm::ConstantInt::get(CGM.getLLVMContext(), Value->getInt()); if (Value->isFloat()) C = llvm::ConstantFP::get(CGM.getLLVMContext(), Value->getFloat()); } } llvm::DINode::DIFlags Flags = getAccessFlag(Var->getAccess(), RD); auto Align = getDeclAlignIfRequired(Var, CGM.getContext()); llvm::DIDerivedType *GV = DBuilder.createStaticMemberType( RecordTy, VName, VUnit, LineNumber, VTy, Flags, C, Align); StaticDataMemberCache[Var->getCanonicalDecl()].reset(GV); return GV; } void CGDebugInfo::CollectRecordNormalField( const FieldDecl *field, uint64_t OffsetInBits, llvm::DIFile *tunit, SmallVectorImpl &elements, llvm::DIType *RecordTy, const RecordDecl *RD) { StringRef name = field->getName(); QualType type = field->getType(); // Ignore unnamed fields unless they're anonymous structs/unions. if (name.empty() && !type->isRecordType()) return; llvm::DIType *FieldType; if (field->isBitField()) { FieldType = createBitFieldType(field, RecordTy, RD); } else { auto Align = getDeclAlignIfRequired(field, CGM.getContext()); FieldType = createFieldType(name, type, field->getLocation(), field->getAccess(), OffsetInBits, Align, tunit, RecordTy, RD); } elements.push_back(FieldType); } void CGDebugInfo::CollectRecordNestedType( const TypeDecl *TD, SmallVectorImpl &elements) { QualType Ty = CGM.getContext().getTypeDeclType(TD); // Injected class names are not considered nested records. if (isa(Ty)) return; SourceLocation Loc = TD->getLocation(); llvm::DIType *nestedType = getOrCreateType(Ty, getOrCreateFile(Loc)); elements.push_back(nestedType); } void CGDebugInfo::CollectRecordFields( const RecordDecl *record, llvm::DIFile *tunit, SmallVectorImpl &elements, llvm::DICompositeType *RecordTy) { const auto *CXXDecl = dyn_cast(record); if (CXXDecl && CXXDecl->isLambda()) CollectRecordLambdaFields(CXXDecl, elements, RecordTy); else { const ASTRecordLayout &layout = CGM.getContext().getASTRecordLayout(record); // Field number for non-static fields. unsigned fieldNo = 0; // Static and non-static members should appear in the same order as // the corresponding declarations in the source program. for (const auto *I : record->decls()) if (const auto *V = dyn_cast(I)) { if (V->hasAttr()) continue; // Skip variable template specializations when emitting CodeView. MSVC // doesn't emit them. if (CGM.getCodeGenOpts().EmitCodeView && isa(V)) continue; if (isa(V)) continue; // Reuse the existing static member declaration if one exists auto MI = StaticDataMemberCache.find(V->getCanonicalDecl()); if (MI != StaticDataMemberCache.end()) { assert(MI->second && "Static data member declaration should still exist"); elements.push_back(MI->second); } else { auto Field = CreateRecordStaticField(V, RecordTy, record); elements.push_back(Field); } } else if (const auto *field = dyn_cast(I)) { CollectRecordNormalField(field, layout.getFieldOffset(fieldNo), tunit, elements, RecordTy, record); // Bump field number for next field. ++fieldNo; } else if (CGM.getCodeGenOpts().EmitCodeView) { // Debug info for nested types is included in the member list only for // CodeView. if (const auto *nestedType = dyn_cast(I)) if (!nestedType->isImplicit() && nestedType->getDeclContext() == record) CollectRecordNestedType(nestedType, elements); } } } llvm::DISubroutineType * CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method, llvm::DIFile *Unit, bool decl) { const FunctionProtoType *Func = Method->getType()->getAs(); if (Method->isStatic()) return cast_or_null( getOrCreateType(QualType(Func, 0), Unit)); return getOrCreateInstanceMethodType(Method->getThisType(), Func, Unit, decl); } llvm::DISubroutineType * CGDebugInfo::getOrCreateInstanceMethodType(QualType ThisPtr, const FunctionProtoType *Func, llvm::DIFile *Unit, bool decl) { // Add "this" pointer. llvm::DITypeRefArray Args( cast(getOrCreateType(QualType(Func, 0), Unit)) ->getTypeArray()); assert(Args.size() && "Invalid number of arguments!"); SmallVector Elts; // First element is always return type. For 'void' functions it is NULL. QualType temp = Func->getReturnType(); if (temp->getTypeClass() == Type::Auto && decl) Elts.push_back(CreateType(cast(temp))); else Elts.push_back(Args[0]); // "this" pointer is always first argument. const CXXRecordDecl *RD = ThisPtr->getPointeeCXXRecordDecl(); if (isa(RD)) { // Create pointer type directly in this case. const PointerType *ThisPtrTy = cast(ThisPtr); QualType PointeeTy = ThisPtrTy->getPointeeType(); unsigned AS = CGM.getContext().getTargetAddressSpace(PointeeTy); uint64_t Size = CGM.getTarget().getPointerWidth(AS); auto Align = getTypeAlignIfRequired(ThisPtrTy, CGM.getContext()); llvm::DIType *PointeeType = getOrCreateType(PointeeTy, Unit); llvm::DIType *ThisPtrType = DBuilder.createPointerType(PointeeType, Size, Align); TypeCache[ThisPtr.getAsOpaquePtr()].reset(ThisPtrType); // TODO: This and the artificial type below are misleading, the // types aren't artificial the argument is, but the current // metadata doesn't represent that. ThisPtrType = DBuilder.createObjectPointerType(ThisPtrType); Elts.push_back(ThisPtrType); } else { llvm::DIType *ThisPtrType = getOrCreateType(ThisPtr, Unit); TypeCache[ThisPtr.getAsOpaquePtr()].reset(ThisPtrType); ThisPtrType = DBuilder.createObjectPointerType(ThisPtrType); Elts.push_back(ThisPtrType); } // Copy rest of the arguments. for (unsigned i = 1, e = Args.size(); i != e; ++i) Elts.push_back(Args[i]); llvm::DITypeRefArray EltTypeArray = DBuilder.getOrCreateTypeArray(Elts); llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero; if (Func->getExtProtoInfo().RefQualifier == RQ_LValue) Flags |= llvm::DINode::FlagLValueReference; if (Func->getExtProtoInfo().RefQualifier == RQ_RValue) Flags |= llvm::DINode::FlagRValueReference; return DBuilder.createSubroutineType(EltTypeArray, Flags, getDwarfCC(Func->getCallConv())); } /// isFunctionLocalClass - Return true if CXXRecordDecl is defined /// inside a function. static bool isFunctionLocalClass(const CXXRecordDecl *RD) { if (const auto *NRD = dyn_cast(RD->getDeclContext())) return isFunctionLocalClass(NRD); if (isa(RD->getDeclContext())) return true; return false; } llvm::DISubprogram *CGDebugInfo::CreateCXXMemberFunction( const CXXMethodDecl *Method, llvm::DIFile *Unit, llvm::DIType *RecordTy) { bool IsCtorOrDtor = isa(Method) || isa(Method); StringRef MethodName = getFunctionName(Method); llvm::DISubroutineType *MethodTy = getOrCreateMethodType(Method, Unit, true); // Since a single ctor/dtor corresponds to multiple functions, it doesn't // make sense to give a single ctor/dtor a linkage name. StringRef MethodLinkageName; // FIXME: 'isFunctionLocalClass' seems like an arbitrary/unintentional // property to use here. It may've been intended to model "is non-external // type" but misses cases of non-function-local but non-external classes such // as those in anonymous namespaces as well as the reverse - external types // that are function local, such as those in (non-local) inline functions. if (!IsCtorOrDtor && !isFunctionLocalClass(Method->getParent())) MethodLinkageName = CGM.getMangledName(Method); // Get the location for the method. llvm::DIFile *MethodDefUnit = nullptr; unsigned MethodLine = 0; if (!Method->isImplicit()) { MethodDefUnit = getOrCreateFile(Method->getLocation()); MethodLine = getLineNumber(Method->getLocation()); } // Collect virtual method info. llvm::DIType *ContainingType = nullptr; unsigned VIndex = 0; llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero; llvm::DISubprogram::DISPFlags SPFlags = llvm::DISubprogram::SPFlagZero; int ThisAdjustment = 0; if (Method->isVirtual()) { if (Method->isPure()) SPFlags |= llvm::DISubprogram::SPFlagPureVirtual; else SPFlags |= llvm::DISubprogram::SPFlagVirtual; if (CGM.getTarget().getCXXABI().isItaniumFamily()) { // It doesn't make sense to give a virtual destructor a vtable index, // since a single destructor has two entries in the vtable. if (!isa(Method)) VIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(Method); } else { // Emit MS ABI vftable information. There is only one entry for the // deleting dtor. const auto *DD = dyn_cast(Method); GlobalDecl GD = DD ? GlobalDecl(DD, Dtor_Deleting) : GlobalDecl(Method); MethodVFTableLocation ML = CGM.getMicrosoftVTableContext().getMethodVFTableLocation(GD); VIndex = ML.Index; // CodeView only records the vftable offset in the class that introduces // the virtual method. This is possible because, unlike Itanium, the MS // C++ ABI does not include all virtual methods from non-primary bases in // the vtable for the most derived class. For example, if C inherits from // A and B, C's primary vftable will not include B's virtual methods. if (Method->size_overridden_methods() == 0) Flags |= llvm::DINode::FlagIntroducedVirtual; // The 'this' adjustment accounts for both the virtual and non-virtual // portions of the adjustment. Presumably the debugger only uses it when // it knows the dynamic type of an object. ThisAdjustment = CGM.getCXXABI() .getVirtualFunctionPrologueThisAdjustment(GD) .getQuantity(); } ContainingType = RecordTy; } // We're checking for deleted C++ special member functions // [Ctors,Dtors, Copy/Move] auto checkAttrDeleted = [&](const auto *Method) { if (Method->getCanonicalDecl()->isDeleted()) SPFlags |= llvm::DISubprogram::SPFlagDeleted; }; switch (Method->getKind()) { case Decl::CXXConstructor: case Decl::CXXDestructor: checkAttrDeleted(Method); break; case Decl::CXXMethod: if (Method->isCopyAssignmentOperator() || Method->isMoveAssignmentOperator()) checkAttrDeleted(Method); break; default: break; } if (Method->isNoReturn()) Flags |= llvm::DINode::FlagNoReturn; if (Method->isStatic()) Flags |= llvm::DINode::FlagStaticMember; if (Method->isImplicit()) Flags |= llvm::DINode::FlagArtificial; Flags |= getAccessFlag(Method->getAccess(), Method->getParent()); if (const auto *CXXC = dyn_cast(Method)) { if (CXXC->isExplicit()) Flags |= llvm::DINode::FlagExplicit; } else if (const auto *CXXC = dyn_cast(Method)) { if (CXXC->isExplicit()) Flags |= llvm::DINode::FlagExplicit; } if (Method->hasPrototype()) Flags |= llvm::DINode::FlagPrototyped; if (Method->getRefQualifier() == RQ_LValue) Flags |= llvm::DINode::FlagLValueReference; if (Method->getRefQualifier() == RQ_RValue) Flags |= llvm::DINode::FlagRValueReference; if (CGM.getLangOpts().Optimize) SPFlags |= llvm::DISubprogram::SPFlagOptimized; // In this debug mode, emit type info for a class when its constructor type // info is emitted. if (DebugKind == codegenoptions::DebugInfoConstructor) if (const CXXConstructorDecl *CD = dyn_cast(Method)) completeClass(CD->getParent()); llvm::DINodeArray TParamsArray = CollectFunctionTemplateParams(Method, Unit); llvm::DISubprogram *SP = DBuilder.createMethod( RecordTy, MethodName, MethodLinkageName, MethodDefUnit, MethodLine, MethodTy, VIndex, ThisAdjustment, ContainingType, Flags, SPFlags, TParamsArray.get()); SPCache[Method->getCanonicalDecl()].reset(SP); return SP; } void CGDebugInfo::CollectCXXMemberFunctions( const CXXRecordDecl *RD, llvm::DIFile *Unit, SmallVectorImpl &EltTys, llvm::DIType *RecordTy) { // Since we want more than just the individual member decls if we // have templated functions iterate over every declaration to gather // the functions. for (const auto *I : RD->decls()) { const auto *Method = dyn_cast(I); // If the member is implicit, don't add it to the member list. This avoids // the member being added to type units by LLVM, while still allowing it // to be emitted into the type declaration/reference inside the compile // unit. // Ditto 'nodebug' methods, for consistency with CodeGenFunction.cpp. // FIXME: Handle Using(Shadow?)Decls here to create // DW_TAG_imported_declarations inside the class for base decls brought into // derived classes. GDB doesn't seem to notice/leverage these when I tried // it, so I'm not rushing to fix this. (GCC seems to produce them, if // referenced) if (!Method || Method->isImplicit() || Method->hasAttr()) continue; if (Method->getType()->castAs()->getContainedAutoType()) continue; // Reuse the existing member function declaration if it exists. // It may be associated with the declaration of the type & should be // reused as we're building the definition. // // This situation can arise in the vtable-based debug info reduction where // implicit members are emitted in a non-vtable TU. auto MI = SPCache.find(Method->getCanonicalDecl()); EltTys.push_back(MI == SPCache.end() ? CreateCXXMemberFunction(Method, Unit, RecordTy) : static_cast(MI->second)); } } void CGDebugInfo::CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile *Unit, SmallVectorImpl &EltTys, llvm::DIType *RecordTy) { llvm::DenseSet> SeenTypes; CollectCXXBasesAux(RD, Unit, EltTys, RecordTy, RD->bases(), SeenTypes, llvm::DINode::FlagZero); // If we are generating CodeView debug info, we also need to emit records for // indirect virtual base classes. if (CGM.getCodeGenOpts().EmitCodeView) { CollectCXXBasesAux(RD, Unit, EltTys, RecordTy, RD->vbases(), SeenTypes, llvm::DINode::FlagIndirectVirtualBase); } } void CGDebugInfo::CollectCXXBasesAux( const CXXRecordDecl *RD, llvm::DIFile *Unit, SmallVectorImpl &EltTys, llvm::DIType *RecordTy, const CXXRecordDecl::base_class_const_range &Bases, llvm::DenseSet> &SeenTypes, llvm::DINode::DIFlags StartingFlags) { const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD); for (const auto &BI : Bases) { const auto *Base = cast(BI.getType()->castAs()->getDecl()); if (!SeenTypes.insert(Base).second) continue; auto *BaseTy = getOrCreateType(BI.getType(), Unit); llvm::DINode::DIFlags BFlags = StartingFlags; uint64_t BaseOffset; uint32_t VBPtrOffset = 0; if (BI.isVirtual()) { if (CGM.getTarget().getCXXABI().isItaniumFamily()) { // virtual base offset offset is -ve. The code generator emits dwarf // expression where it expects +ve number. BaseOffset = 0 - CGM.getItaniumVTableContext() .getVirtualBaseOffsetOffset(RD, Base) .getQuantity(); } else { // In the MS ABI, store the vbtable offset, which is analogous to the // vbase offset offset in Itanium. BaseOffset = 4 * CGM.getMicrosoftVTableContext().getVBTableIndex(RD, Base); VBPtrOffset = CGM.getContext() .getASTRecordLayout(RD) .getVBPtrOffset() .getQuantity(); } BFlags |= llvm::DINode::FlagVirtual; } else BaseOffset = CGM.getContext().toBits(RL.getBaseClassOffset(Base)); // FIXME: Inconsistent units for BaseOffset. It is in bytes when // BI->isVirtual() and bits when not. BFlags |= getAccessFlag(BI.getAccessSpecifier(), RD); llvm::DIType *DTy = DBuilder.createInheritance(RecordTy, BaseTy, BaseOffset, VBPtrOffset, BFlags); EltTys.push_back(DTy); } } llvm::DINodeArray CGDebugInfo::CollectTemplateParams(const TemplateParameterList *TPList, ArrayRef TAList, llvm::DIFile *Unit) { SmallVector TemplateParams; for (unsigned i = 0, e = TAList.size(); i != e; ++i) { const TemplateArgument &TA = TAList[i]; StringRef Name; bool defaultParameter = false; if (TPList) Name = TPList->getParam(i)->getName(); switch (TA.getKind()) { case TemplateArgument::Type: { llvm::DIType *TTy = getOrCreateType(TA.getAsType(), Unit); if (TPList) if (auto *templateType = dyn_cast_or_null(TPList->getParam(i))) if (templateType->hasDefaultArgument()) defaultParameter = templateType->getDefaultArgument() == TA.getAsType(); TemplateParams.push_back(DBuilder.createTemplateTypeParameter( TheCU, Name, TTy, defaultParameter)); } break; case TemplateArgument::Integral: { llvm::DIType *TTy = getOrCreateType(TA.getIntegralType(), Unit); if (TPList && CGM.getCodeGenOpts().DwarfVersion >= 5) if (auto *templateType = dyn_cast_or_null(TPList->getParam(i))) if (templateType->hasDefaultArgument() && !templateType->getDefaultArgument()->isValueDependent()) defaultParameter = llvm::APSInt::isSameValue( templateType->getDefaultArgument()->EvaluateKnownConstInt( CGM.getContext()), TA.getAsIntegral()); TemplateParams.push_back(DBuilder.createTemplateValueParameter( TheCU, Name, TTy, defaultParameter, llvm::ConstantInt::get(CGM.getLLVMContext(), TA.getAsIntegral()))); } break; case TemplateArgument::Declaration: { const ValueDecl *D = TA.getAsDecl(); QualType T = TA.getParamTypeForDecl().getDesugaredType(CGM.getContext()); llvm::DIType *TTy = getOrCreateType(T, Unit); llvm::Constant *V = nullptr; // Skip retrieve the value if that template parameter has cuda device // attribute, i.e. that value is not available at the host side. if (!CGM.getLangOpts().CUDA || CGM.getLangOpts().CUDAIsDevice || !D->hasAttr()) { const CXXMethodDecl *MD; // Variable pointer template parameters have a value that is the address // of the variable. if (const auto *VD = dyn_cast(D)) V = CGM.GetAddrOfGlobalVar(VD); // Member function pointers have special support for building them, // though this is currently unsupported in LLVM CodeGen. else if ((MD = dyn_cast(D)) && MD->isInstance()) V = CGM.getCXXABI().EmitMemberFunctionPointer(MD); else if (const auto *FD = dyn_cast(D)) V = CGM.GetAddrOfFunction(FD); // Member data pointers have special handling too to compute the fixed // offset within the object. else if (const auto *MPT = dyn_cast(T.getTypePtr())) { // These five lines (& possibly the above member function pointer // handling) might be able to be refactored to use similar code in // CodeGenModule::getMemberPointerConstant uint64_t fieldOffset = CGM.getContext().getFieldOffset(D); CharUnits chars = CGM.getContext().toCharUnitsFromBits((int64_t)fieldOffset); V = CGM.getCXXABI().EmitMemberDataPointer(MPT, chars); } else if (const auto *GD = dyn_cast(D)) { V = CGM.GetAddrOfMSGuidDecl(GD).getPointer(); } assert(V && "Failed to find template parameter pointer"); V = V->stripPointerCasts(); } TemplateParams.push_back(DBuilder.createTemplateValueParameter( TheCU, Name, TTy, defaultParameter, cast_or_null(V))); } break; case TemplateArgument::NullPtr: { QualType T = TA.getNullPtrType(); llvm::DIType *TTy = getOrCreateType(T, Unit); llvm::Constant *V = nullptr; // Special case member data pointer null values since they're actually -1 // instead of zero. if (const auto *MPT = dyn_cast(T.getTypePtr())) // But treat member function pointers as simple zero integers because // it's easier than having a special case in LLVM's CodeGen. If LLVM // CodeGen grows handling for values of non-null member function // pointers then perhaps we could remove this special case and rely on // EmitNullMemberPointer for member function pointers. if (MPT->isMemberDataPointer()) V = CGM.getCXXABI().EmitNullMemberPointer(MPT); if (!V) V = llvm::ConstantInt::get(CGM.Int8Ty, 0); TemplateParams.push_back(DBuilder.createTemplateValueParameter( TheCU, Name, TTy, defaultParameter, V)); } break; case TemplateArgument::Template: TemplateParams.push_back(DBuilder.createTemplateTemplateParameter( TheCU, Name, nullptr, TA.getAsTemplate().getAsTemplateDecl()->getQualifiedNameAsString())); break; case TemplateArgument::Pack: TemplateParams.push_back(DBuilder.createTemplateParameterPack( TheCU, Name, nullptr, CollectTemplateParams(nullptr, TA.getPackAsArray(), Unit))); break; case TemplateArgument::Expression: { const Expr *E = TA.getAsExpr(); QualType T = E->getType(); if (E->isGLValue()) T = CGM.getContext().getLValueReferenceType(T); llvm::Constant *V = ConstantEmitter(CGM).emitAbstract(E, T); assert(V && "Expression in template argument isn't constant"); llvm::DIType *TTy = getOrCreateType(T, Unit); TemplateParams.push_back(DBuilder.createTemplateValueParameter( TheCU, Name, TTy, defaultParameter, V->stripPointerCasts())); } break; // And the following should never occur: case TemplateArgument::TemplateExpansion: case TemplateArgument::Null: llvm_unreachable( "These argument types shouldn't exist in concrete types"); } } return DBuilder.getOrCreateArray(TemplateParams); } llvm::DINodeArray CGDebugInfo::CollectFunctionTemplateParams(const FunctionDecl *FD, llvm::DIFile *Unit) { if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplateSpecialization) { const TemplateParameterList *TList = FD->getTemplateSpecializationInfo() ->getTemplate() ->getTemplateParameters(); return CollectTemplateParams( TList, FD->getTemplateSpecializationArgs()->asArray(), Unit); } return llvm::DINodeArray(); } llvm::DINodeArray CGDebugInfo::CollectVarTemplateParams(const VarDecl *VL, llvm::DIFile *Unit) { // Always get the full list of parameters, not just the ones from the // specialization. A partial specialization may have fewer parameters than // there are arguments. auto *TS = dyn_cast(VL); if (!TS) return llvm::DINodeArray(); VarTemplateDecl *T = TS->getSpecializedTemplate(); const TemplateParameterList *TList = T->getTemplateParameters(); auto TA = TS->getTemplateArgs().asArray(); return CollectTemplateParams(TList, TA, Unit); } llvm::DINodeArray CGDebugInfo::CollectCXXTemplateParams( const ClassTemplateSpecializationDecl *TSpecial, llvm::DIFile *Unit) { // Always get the full list of parameters, not just the ones from the // specialization. A partial specialization may have fewer parameters than // there are arguments. TemplateParameterList *TPList = TSpecial->getSpecializedTemplate()->getTemplateParameters(); const TemplateArgumentList &TAList = TSpecial->getTemplateArgs(); return CollectTemplateParams(TPList, TAList.asArray(), Unit); } llvm::DIType *CGDebugInfo::getOrCreateVTablePtrType(llvm::DIFile *Unit) { if (VTablePtrType) return VTablePtrType; ASTContext &Context = CGM.getContext(); /* Function type */ llvm::Metadata *STy = getOrCreateType(Context.IntTy, Unit); llvm::DITypeRefArray SElements = DBuilder.getOrCreateTypeArray(STy); llvm::DIType *SubTy = DBuilder.createSubroutineType(SElements); unsigned Size = Context.getTypeSize(Context.VoidPtrTy); unsigned VtblPtrAddressSpace = CGM.getTarget().getVtblPtrAddressSpace(); Optional DWARFAddressSpace = CGM.getTarget().getDWARFAddressSpace(VtblPtrAddressSpace); llvm::DIType *vtbl_ptr_type = DBuilder.createPointerType( SubTy, Size, 0, DWARFAddressSpace, "__vtbl_ptr_type"); VTablePtrType = DBuilder.createPointerType(vtbl_ptr_type, Size); return VTablePtrType; } StringRef CGDebugInfo::getVTableName(const CXXRecordDecl *RD) { // Copy the gdb compatible name on the side and use its reference. return internString("_vptr$", RD->getNameAsString()); } StringRef CGDebugInfo::getDynamicInitializerName(const VarDecl *VD, DynamicInitKind StubKind, llvm::Function *InitFn) { // If we're not emitting codeview, use the mangled name. For Itanium, this is // arbitrary. if (!CGM.getCodeGenOpts().EmitCodeView) return InitFn->getName(); // Print the normal qualified name for the variable, then break off the last // NNS, and add the appropriate other text. Clang always prints the global // variable name without template arguments, so we can use rsplit("::") and // then recombine the pieces. SmallString<128> QualifiedGV; StringRef Quals; StringRef GVName; { llvm::raw_svector_ostream OS(QualifiedGV); VD->printQualifiedName(OS, getPrintingPolicy()); std::tie(Quals, GVName) = OS.str().rsplit("::"); if (GVName.empty()) std::swap(Quals, GVName); } SmallString<128> InitName; llvm::raw_svector_ostream OS(InitName); if (!Quals.empty()) OS << Quals << "::"; switch (StubKind) { case DynamicInitKind::NoStub: llvm_unreachable("not an initializer"); case DynamicInitKind::Initializer: OS << "`dynamic initializer for '"; break; case DynamicInitKind::AtExit: OS << "`dynamic atexit destructor for '"; break; } OS << GVName; // Add any template specialization args. if (const auto *VTpl = dyn_cast(VD)) { printTemplateArgumentList(OS, VTpl->getTemplateArgs().asArray(), getPrintingPolicy()); } OS << '\''; return internString(OS.str()); } void CGDebugInfo::CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile *Unit, SmallVectorImpl &EltTys, llvm::DICompositeType *RecordTy) { // If this class is not dynamic then there is not any vtable info to collect. if (!RD->isDynamicClass()) return; // Don't emit any vtable shape or vptr info if this class doesn't have an // extendable vfptr. This can happen if the class doesn't have virtual // methods, or in the MS ABI if those virtual methods only come from virtually // inherited bases. const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD); if (!RL.hasExtendableVFPtr()) return; // CodeView needs to know how large the vtable of every dynamic class is, so // emit a special named pointer type into the element list. The vptr type // points to this type as well. llvm::DIType *VPtrTy = nullptr; bool NeedVTableShape = CGM.getCodeGenOpts().EmitCodeView && CGM.getTarget().getCXXABI().isMicrosoft(); if (NeedVTableShape) { uint64_t PtrWidth = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy); const VTableLayout &VFTLayout = CGM.getMicrosoftVTableContext().getVFTableLayout(RD, CharUnits::Zero()); unsigned VSlotCount = VFTLayout.vtable_components().size() - CGM.getLangOpts().RTTIData; unsigned VTableWidth = PtrWidth * VSlotCount; unsigned VtblPtrAddressSpace = CGM.getTarget().getVtblPtrAddressSpace(); Optional DWARFAddressSpace = CGM.getTarget().getDWARFAddressSpace(VtblPtrAddressSpace); // Create a very wide void* type and insert it directly in the element list. llvm::DIType *VTableType = DBuilder.createPointerType( nullptr, VTableWidth, 0, DWARFAddressSpace, "__vtbl_ptr_type"); EltTys.push_back(VTableType); // The vptr is a pointer to this special vtable type. VPtrTy = DBuilder.createPointerType(VTableType, PtrWidth); } // If there is a primary base then the artificial vptr member lives there. if (RL.getPrimaryBase()) return; if (!VPtrTy) VPtrTy = getOrCreateVTablePtrType(Unit); unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy); llvm::DIType *VPtrMember = DBuilder.createMemberType(Unit, getVTableName(RD), Unit, 0, Size, 0, 0, llvm::DINode::FlagArtificial, VPtrTy); EltTys.push_back(VPtrMember); } llvm::DIType *CGDebugInfo::getOrCreateRecordType(QualType RTy, SourceLocation Loc) { assert(CGM.getCodeGenOpts().hasReducedDebugInfo()); llvm::DIType *T = getOrCreateType(RTy, getOrCreateFile(Loc)); return T; } llvm::DIType *CGDebugInfo::getOrCreateInterfaceType(QualType D, SourceLocation Loc) { return getOrCreateStandaloneType(D, Loc); } llvm::DIType *CGDebugInfo::getOrCreateStandaloneType(QualType D, SourceLocation Loc) { assert(CGM.getCodeGenOpts().hasReducedDebugInfo()); assert(!D.isNull() && "null type"); llvm::DIType *T = getOrCreateType(D, getOrCreateFile(Loc)); assert(T && "could not create debug info for type"); RetainedTypes.push_back(D.getAsOpaquePtr()); return T; } void CGDebugInfo::addHeapAllocSiteMetadata(llvm::CallBase *CI, QualType AllocatedTy, SourceLocation Loc) { if (CGM.getCodeGenOpts().getDebugInfo() <= codegenoptions::DebugLineTablesOnly) return; llvm::MDNode *node; if (AllocatedTy->isVoidType()) node = llvm::MDNode::get(CGM.getLLVMContext(), None); else node = getOrCreateType(AllocatedTy, getOrCreateFile(Loc)); CI->setMetadata("heapallocsite", node); } void CGDebugInfo::completeType(const EnumDecl *ED) { if (DebugKind <= codegenoptions::DebugLineTablesOnly) return; QualType Ty = CGM.getContext().getEnumType(ED); void *TyPtr = Ty.getAsOpaquePtr(); auto I = TypeCache.find(TyPtr); if (I == TypeCache.end() || !cast(I->second)->isForwardDecl()) return; llvm::DIType *Res = CreateTypeDefinition(Ty->castAs()); assert(!Res->isForwardDecl()); TypeCache[TyPtr].reset(Res); } void CGDebugInfo::completeType(const RecordDecl *RD) { if (DebugKind > codegenoptions::LimitedDebugInfo || !CGM.getLangOpts().CPlusPlus) completeRequiredType(RD); } /// Return true if the class or any of its methods are marked dllimport. static bool isClassOrMethodDLLImport(const CXXRecordDecl *RD) { if (RD->hasAttr()) return true; for (const CXXMethodDecl *MD : RD->methods()) if (MD->hasAttr()) return true; return false; } /// Does a type definition exist in an imported clang module? static bool isDefinedInClangModule(const RecordDecl *RD) { // Only definitions that where imported from an AST file come from a module. if (!RD || !RD->isFromASTFile()) return false; // Anonymous entities cannot be addressed. Treat them as not from module. if (!RD->isExternallyVisible() && RD->getName().empty()) return false; if (auto *CXXDecl = dyn_cast(RD)) { if (!CXXDecl->isCompleteDefinition()) return false; // Check wether RD is a template. auto TemplateKind = CXXDecl->getTemplateSpecializationKind(); if (TemplateKind != TSK_Undeclared) { // Unfortunately getOwningModule() isn't accurate enough to find the // owning module of a ClassTemplateSpecializationDecl that is inside a // namespace spanning multiple modules. bool Explicit = false; if (auto *TD = dyn_cast(CXXDecl)) Explicit = TD->isExplicitInstantiationOrSpecialization(); if (!Explicit && CXXDecl->getEnclosingNamespaceContext()) return false; // This is a template, check the origin of the first member. if (CXXDecl->field_begin() == CXXDecl->field_end()) return TemplateKind == TSK_ExplicitInstantiationDeclaration; if (!CXXDecl->field_begin()->isFromASTFile()) return false; } } return true; } void CGDebugInfo::completeClassData(const RecordDecl *RD) { if (auto *CXXRD = dyn_cast(RD)) if (CXXRD->isDynamicClass() && CGM.getVTableLinkage(CXXRD) == llvm::GlobalValue::AvailableExternallyLinkage && !isClassOrMethodDLLImport(CXXRD)) return; if (DebugTypeExtRefs && isDefinedInClangModule(RD->getDefinition())) return; completeClass(RD); } void CGDebugInfo::completeClass(const RecordDecl *RD) { if (DebugKind <= codegenoptions::DebugLineTablesOnly) return; QualType Ty = CGM.getContext().getRecordType(RD); void *TyPtr = Ty.getAsOpaquePtr(); auto I = TypeCache.find(TyPtr); if (I != TypeCache.end() && !cast(I->second)->isForwardDecl()) return; llvm::DIType *Res = CreateTypeDefinition(Ty->castAs()); assert(!Res->isForwardDecl()); TypeCache[TyPtr].reset(Res); } static bool hasExplicitMemberDefinition(CXXRecordDecl::method_iterator I, CXXRecordDecl::method_iterator End) { for (CXXMethodDecl *MD : llvm::make_range(I, End)) if (FunctionDecl *Tmpl = MD->getInstantiatedFromMemberFunction()) if (!Tmpl->isImplicit() && Tmpl->isThisDeclarationADefinition() && !MD->getMemberSpecializationInfo()->isExplicitSpecialization()) return true; return false; } static bool shouldOmitDefinition(codegenoptions::DebugInfoKind DebugKind, bool DebugTypeExtRefs, const RecordDecl *RD, const LangOptions &LangOpts) { if (DebugTypeExtRefs && isDefinedInClangModule(RD->getDefinition())) return true; if (auto *ES = RD->getASTContext().getExternalSource()) if (ES->hasExternalDefinitions(RD) == ExternalASTSource::EK_Always) return true; if (DebugKind > codegenoptions::LimitedDebugInfo) return false; if (!LangOpts.CPlusPlus) return false; if (!RD->isCompleteDefinitionRequired()) return true; const auto *CXXDecl = dyn_cast(RD); if (!CXXDecl) return false; // Only emit complete debug info for a dynamic class when its vtable is // emitted. However, Microsoft debuggers don't resolve type information // across DLL boundaries, so skip this optimization if the class or any of its // methods are marked dllimport. This isn't a complete solution, since objects // without any dllimport methods can be used in one DLL and constructed in // another, but it is the current behavior of LimitedDebugInfo. if (CXXDecl->hasDefinition() && CXXDecl->isDynamicClass() && !isClassOrMethodDLLImport(CXXDecl)) return true; // In constructor debug mode, only emit debug info for a class when its // constructor is emitted. Skip this optimization if the class or any of // its methods are marked dllimport. if (DebugKind == codegenoptions::DebugInfoConstructor && !CXXDecl->isLambda() && !CXXDecl->hasConstexprNonCopyMoveConstructor() && !isClassOrMethodDLLImport(CXXDecl)) for (const auto *Ctor : CXXDecl->ctors()) if (Ctor->isUserProvided()) return true; TemplateSpecializationKind Spec = TSK_Undeclared; if (const auto *SD = dyn_cast(RD)) Spec = SD->getSpecializationKind(); if (Spec == TSK_ExplicitInstantiationDeclaration && hasExplicitMemberDefinition(CXXDecl->method_begin(), CXXDecl->method_end())) return true; return false; } void CGDebugInfo::completeRequiredType(const RecordDecl *RD) { if (shouldOmitDefinition(DebugKind, DebugTypeExtRefs, RD, CGM.getLangOpts())) return; QualType Ty = CGM.getContext().getRecordType(RD); llvm::DIType *T = getTypeOrNull(Ty); if (T && T->isForwardDecl()) completeClassData(RD); } llvm::DIType *CGDebugInfo::CreateType(const RecordType *Ty) { RecordDecl *RD = Ty->getDecl(); llvm::DIType *T = cast_or_null(getTypeOrNull(QualType(Ty, 0))); if (T || shouldOmitDefinition(DebugKind, DebugTypeExtRefs, RD, CGM.getLangOpts())) { if (!T) T = getOrCreateRecordFwdDecl(Ty, getDeclContextDescriptor(RD)); return T; } return CreateTypeDefinition(Ty); } llvm::DIType *CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) { RecordDecl *RD = Ty->getDecl(); // Get overall information about the record type for the debug info. llvm::DIFile *DefUnit = getOrCreateFile(RD->getLocation()); // Records and classes and unions can all be recursive. To handle them, we // first generate a debug descriptor for the struct as a forward declaration. // Then (if it is a definition) we go through and get debug info for all of // its members. Finally, we create a descriptor for the complete type (which // may refer to the forward decl if the struct is recursive) and replace all // uses of the forward declaration with the final definition. llvm::DICompositeType *FwdDecl = getOrCreateLimitedType(Ty, DefUnit); const RecordDecl *D = RD->getDefinition(); if (!D || !D->isCompleteDefinition()) return FwdDecl; if (const auto *CXXDecl = dyn_cast(RD)) CollectContainingType(CXXDecl, FwdDecl); // Push the struct on region stack. LexicalBlockStack.emplace_back(&*FwdDecl); RegionMap[Ty->getDecl()].reset(FwdDecl); // Convert all the elements. SmallVector EltTys; // what about nested types? // Note: The split of CXXDecl information here is intentional, the // gdb tests will depend on a certain ordering at printout. The debug // information offsets are still correct if we merge them all together // though. const auto *CXXDecl = dyn_cast(RD); if (CXXDecl) { CollectCXXBases(CXXDecl, DefUnit, EltTys, FwdDecl); CollectVTableInfo(CXXDecl, DefUnit, EltTys, FwdDecl); } // Collect data fields (including static variables and any initializers). CollectRecordFields(RD, DefUnit, EltTys, FwdDecl); if (CXXDecl) CollectCXXMemberFunctions(CXXDecl, DefUnit, EltTys, FwdDecl); LexicalBlockStack.pop_back(); RegionMap.erase(Ty->getDecl()); llvm::DINodeArray Elements = DBuilder.getOrCreateArray(EltTys); DBuilder.replaceArrays(FwdDecl, Elements); if (FwdDecl->isTemporary()) FwdDecl = llvm::MDNode::replaceWithPermanent(llvm::TempDICompositeType(FwdDecl)); RegionMap[Ty->getDecl()].reset(FwdDecl); return FwdDecl; } llvm::DIType *CGDebugInfo::CreateType(const ObjCObjectType *Ty, llvm::DIFile *Unit) { // Ignore protocols. return getOrCreateType(Ty->getBaseType(), Unit); } llvm::DIType *CGDebugInfo::CreateType(const ObjCTypeParamType *Ty, llvm::DIFile *Unit) { // Ignore protocols. SourceLocation Loc = Ty->getDecl()->getLocation(); // Use Typedefs to represent ObjCTypeParamType. return DBuilder.createTypedef( getOrCreateType(Ty->getDecl()->getUnderlyingType(), Unit), Ty->getDecl()->getName(), getOrCreateFile(Loc), getLineNumber(Loc), getDeclContextDescriptor(Ty->getDecl())); } /// \return true if Getter has the default name for the property PD. static bool hasDefaultGetterName(const ObjCPropertyDecl *PD, const ObjCMethodDecl *Getter) { assert(PD); if (!Getter) return true; assert(Getter->getDeclName().isObjCZeroArgSelector()); return PD->getName() == Getter->getDeclName().getObjCSelector().getNameForSlot(0); } /// \return true if Setter has the default name for the property PD. static bool hasDefaultSetterName(const ObjCPropertyDecl *PD, const ObjCMethodDecl *Setter) { assert(PD); if (!Setter) return true; assert(Setter->getDeclName().isObjCOneArgSelector()); return SelectorTable::constructSetterName(PD->getName()) == Setter->getDeclName().getObjCSelector().getNameForSlot(0); } llvm::DIType *CGDebugInfo::CreateType(const ObjCInterfaceType *Ty, llvm::DIFile *Unit) { ObjCInterfaceDecl *ID = Ty->getDecl(); if (!ID) return nullptr; // Return a forward declaration if this type was imported from a clang module, // and this is not the compile unit with the implementation of the type (which // may contain hidden ivars). if (DebugTypeExtRefs && ID->isFromASTFile() && ID->getDefinition() && !ID->getImplementation()) return DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type, ID->getName(), getDeclContextDescriptor(ID), Unit, 0); // Get overall information about the record type for the debug info. llvm::DIFile *DefUnit = getOrCreateFile(ID->getLocation()); unsigned Line = getLineNumber(ID->getLocation()); auto RuntimeLang = static_cast(TheCU->getSourceLanguage()); // If this is just a forward declaration return a special forward-declaration // debug type since we won't be able to lay out the entire type. ObjCInterfaceDecl *Def = ID->getDefinition(); if (!Def || !Def->getImplementation()) { llvm::DIScope *Mod = getParentModuleOrNull(ID); llvm::DIType *FwdDecl = DBuilder.createReplaceableCompositeType( llvm::dwarf::DW_TAG_structure_type, ID->getName(), Mod ? Mod : TheCU, DefUnit, Line, RuntimeLang); ObjCInterfaceCache.push_back(ObjCInterfaceCacheEntry(Ty, FwdDecl, Unit)); return FwdDecl; } return CreateTypeDefinition(Ty, Unit); } llvm::DIModule *CGDebugInfo::getOrCreateModuleRef(ASTSourceDescriptor Mod, bool CreateSkeletonCU) { // Use the Module pointer as the key into the cache. This is a // nullptr if the "Module" is a PCH, which is safe because we don't // support chained PCH debug info, so there can only be a single PCH. const Module *M = Mod.getModuleOrNull(); auto ModRef = ModuleCache.find(M); if (ModRef != ModuleCache.end()) return cast(ModRef->second); // Macro definitions that were defined with "-D" on the command line. SmallString<128> ConfigMacros; { llvm::raw_svector_ostream OS(ConfigMacros); const auto &PPOpts = CGM.getPreprocessorOpts(); unsigned I = 0; // Translate the macro definitions back into a command line. for (auto &M : PPOpts.Macros) { if (++I > 1) OS << " "; const std::string &Macro = M.first; bool Undef = M.second; OS << "\"-" << (Undef ? 'U' : 'D'); for (char c : Macro) switch (c) { case '\\': OS << "\\\\"; break; case '"': OS << "\\\""; break; default: OS << c; } OS << '\"'; } } bool IsRootModule = M ? !M->Parent : true; // When a module name is specified as -fmodule-name, that module gets a // clang::Module object, but it won't actually be built or imported; it will // be textual. if (CreateSkeletonCU && IsRootModule && Mod.getASTFile().empty() && M) assert(StringRef(M->Name).startswith(CGM.getLangOpts().ModuleName) && "clang module without ASTFile must be specified by -fmodule-name"); // Return a StringRef to the remapped Path. auto RemapPath = [this](StringRef Path) -> std::string { std::string Remapped = remapDIPath(Path); StringRef Relative(Remapped); StringRef CompDir = TheCU->getDirectory(); if (Relative.consume_front(CompDir)) Relative.consume_front(llvm::sys::path::get_separator()); return Relative.str(); }; if (CreateSkeletonCU && IsRootModule && !Mod.getASTFile().empty()) { // PCH files don't have a signature field in the control block, // but LLVM detects skeleton CUs by looking for a non-zero DWO id. // We use the lower 64 bits for debug info. uint64_t Signature = 0; if (const auto &ModSig = Mod.getSignature()) { for (unsigned I = 0; I != sizeof(Signature); ++I) Signature |= (uint64_t)ModSig[I] << (I * 8); } else { Signature = ~1ULL; } llvm::DIBuilder DIB(CGM.getModule()); SmallString<0> PCM; if (!llvm::sys::path::is_absolute(Mod.getASTFile())) PCM = Mod.getPath(); llvm::sys::path::append(PCM, Mod.getASTFile()); DIB.createCompileUnit( TheCU->getSourceLanguage(), // TODO: Support "Source" from external AST providers? DIB.createFile(Mod.getModuleName(), TheCU->getDirectory()), TheCU->getProducer(), false, StringRef(), 0, RemapPath(PCM), llvm::DICompileUnit::FullDebug, Signature); DIB.finalize(); } llvm::DIModule *Parent = IsRootModule ? nullptr : getOrCreateModuleRef(ASTSourceDescriptor(*M->Parent), CreateSkeletonCU); std::string IncludePath = Mod.getPath().str(); llvm::DIModule *DIMod = DBuilder.createModule(Parent, Mod.getModuleName(), ConfigMacros, RemapPath(IncludePath)); ModuleCache[M].reset(DIMod); return DIMod; } llvm::DIType *CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty, llvm::DIFile *Unit) { ObjCInterfaceDecl *ID = Ty->getDecl(); llvm::DIFile *DefUnit = getOrCreateFile(ID->getLocation()); unsigned Line = getLineNumber(ID->getLocation()); unsigned RuntimeLang = TheCU->getSourceLanguage(); // Bit size, align and offset of the type. uint64_t Size = CGM.getContext().getTypeSize(Ty); auto Align = getTypeAlignIfRequired(Ty, CGM.getContext()); llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero; if (ID->getImplementation()) Flags |= llvm::DINode::FlagObjcClassComplete; llvm::DIScope *Mod = getParentModuleOrNull(ID); llvm::DICompositeType *RealDecl = DBuilder.createStructType( Mod ? Mod : Unit, ID->getName(), DefUnit, Line, Size, Align, Flags, nullptr, llvm::DINodeArray(), RuntimeLang); QualType QTy(Ty, 0); TypeCache[QTy.getAsOpaquePtr()].reset(RealDecl); // Push the struct on region stack. LexicalBlockStack.emplace_back(RealDecl); RegionMap[Ty->getDecl()].reset(RealDecl); // Convert all the elements. SmallVector EltTys; ObjCInterfaceDecl *SClass = ID->getSuperClass(); if (SClass) { llvm::DIType *SClassTy = getOrCreateType(CGM.getContext().getObjCInterfaceType(SClass), Unit); if (!SClassTy) return nullptr; llvm::DIType *InhTag = DBuilder.createInheritance(RealDecl, SClassTy, 0, 0, llvm::DINode::FlagZero); EltTys.push_back(InhTag); } // Create entries for all of the properties. auto AddProperty = [&](const ObjCPropertyDecl *PD) { SourceLocation Loc = PD->getLocation(); llvm::DIFile *PUnit = getOrCreateFile(Loc); unsigned PLine = getLineNumber(Loc); ObjCMethodDecl *Getter = PD->getGetterMethodDecl(); ObjCMethodDecl *Setter = PD->getSetterMethodDecl(); llvm::MDNode *PropertyNode = DBuilder.createObjCProperty( PD->getName(), PUnit, PLine, hasDefaultGetterName(PD, Getter) ? "" : getSelectorName(PD->getGetterName()), hasDefaultSetterName(PD, Setter) ? "" : getSelectorName(PD->getSetterName()), PD->getPropertyAttributes(), getOrCreateType(PD->getType(), PUnit)); EltTys.push_back(PropertyNode); }; { llvm::SmallPtrSet PropertySet; for (const ObjCCategoryDecl *ClassExt : ID->known_extensions()) for (auto *PD : ClassExt->properties()) { PropertySet.insert(PD->getIdentifier()); AddProperty(PD); } for (const auto *PD : ID->properties()) { // Don't emit duplicate metadata for properties that were already in a // class extension. if (!PropertySet.insert(PD->getIdentifier()).second) continue; AddProperty(PD); } } const ASTRecordLayout &RL = CGM.getContext().getASTObjCInterfaceLayout(ID); unsigned FieldNo = 0; for (ObjCIvarDecl *Field = ID->all_declared_ivar_begin(); Field; Field = Field->getNextIvar(), ++FieldNo) { llvm::DIType *FieldTy = getOrCreateType(Field->getType(), Unit); if (!FieldTy) return nullptr; StringRef FieldName = Field->getName(); // Ignore unnamed fields. if (FieldName.empty()) continue; // Get the location for the field. llvm::DIFile *FieldDefUnit = getOrCreateFile(Field->getLocation()); unsigned FieldLine = getLineNumber(Field->getLocation()); QualType FType = Field->getType(); uint64_t FieldSize = 0; uint32_t FieldAlign = 0; if (!FType->isIncompleteArrayType()) { // Bit size, align and offset of the type. FieldSize = Field->isBitField() ? Field->getBitWidthValue(CGM.getContext()) : CGM.getContext().getTypeSize(FType); FieldAlign = getTypeAlignIfRequired(FType, CGM.getContext()); } uint64_t FieldOffset; if (CGM.getLangOpts().ObjCRuntime.isNonFragile()) { // We don't know the runtime offset of an ivar if we're using the // non-fragile ABI. For bitfields, use the bit offset into the first // byte of storage of the bitfield. For other fields, use zero. if (Field->isBitField()) { FieldOffset = CGM.getObjCRuntime().ComputeBitfieldBitOffset(CGM, ID, Field); FieldOffset %= CGM.getContext().getCharWidth(); } else { FieldOffset = 0; } } else { FieldOffset = RL.getFieldOffset(FieldNo); } llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero; if (Field->getAccessControl() == ObjCIvarDecl::Protected) Flags = llvm::DINode::FlagProtected; else if (Field->getAccessControl() == ObjCIvarDecl::Private) Flags = llvm::DINode::FlagPrivate; else if (Field->getAccessControl() == ObjCIvarDecl::Public) Flags = llvm::DINode::FlagPublic; llvm::MDNode *PropertyNode = nullptr; if (ObjCImplementationDecl *ImpD = ID->getImplementation()) { if (ObjCPropertyImplDecl *PImpD = ImpD->FindPropertyImplIvarDecl(Field->getIdentifier())) { if (ObjCPropertyDecl *PD = PImpD->getPropertyDecl()) { SourceLocation Loc = PD->getLocation(); llvm::DIFile *PUnit = getOrCreateFile(Loc); unsigned PLine = getLineNumber(Loc); ObjCMethodDecl *Getter = PImpD->getGetterMethodDecl(); ObjCMethodDecl *Setter = PImpD->getSetterMethodDecl(); PropertyNode = DBuilder.createObjCProperty( PD->getName(), PUnit, PLine, hasDefaultGetterName(PD, Getter) ? "" : getSelectorName(PD->getGetterName()), hasDefaultSetterName(PD, Setter) ? "" : getSelectorName(PD->getSetterName()), PD->getPropertyAttributes(), getOrCreateType(PD->getType(), PUnit)); } } } FieldTy = DBuilder.createObjCIVar(FieldName, FieldDefUnit, FieldLine, FieldSize, FieldAlign, FieldOffset, Flags, FieldTy, PropertyNode); EltTys.push_back(FieldTy); } llvm::DINodeArray Elements = DBuilder.getOrCreateArray(EltTys); DBuilder.replaceArrays(RealDecl, Elements); LexicalBlockStack.pop_back(); return RealDecl; } llvm::DIType *CGDebugInfo::CreateType(const VectorType *Ty, llvm::DIFile *Unit) { llvm::DIType *ElementTy = getOrCreateType(Ty->getElementType(), Unit); int64_t Count = Ty->getNumElements(); llvm::Metadata *Subscript; QualType QTy(Ty, 0); auto SizeExpr = SizeExprCache.find(QTy); if (SizeExpr != SizeExprCache.end()) Subscript = DBuilder.getOrCreateSubrange( SizeExpr->getSecond() /*count*/, nullptr /*lowerBound*/, nullptr /*upperBound*/, nullptr /*stride*/); else { auto *CountNode = llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned( llvm::Type::getInt64Ty(CGM.getLLVMContext()), Count ? Count : -1)); Subscript = DBuilder.getOrCreateSubrange( CountNode /*count*/, nullptr /*lowerBound*/, nullptr /*upperBound*/, nullptr /*stride*/); } llvm::DINodeArray SubscriptArray = DBuilder.getOrCreateArray(Subscript); uint64_t Size = CGM.getContext().getTypeSize(Ty); auto Align = getTypeAlignIfRequired(Ty, CGM.getContext()); return DBuilder.createVectorType(Size, Align, ElementTy, SubscriptArray); } llvm::DIType *CGDebugInfo::CreateType(const ConstantMatrixType *Ty, llvm::DIFile *Unit) { // FIXME: Create another debug type for matrices // For the time being, it treats it like a nested ArrayType. llvm::DIType *ElementTy = getOrCreateType(Ty->getElementType(), Unit); uint64_t Size = CGM.getContext().getTypeSize(Ty); uint32_t Align = getTypeAlignIfRequired(Ty, CGM.getContext()); // Create ranges for both dimensions. llvm::SmallVector Subscripts; auto *ColumnCountNode = llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned( llvm::Type::getInt64Ty(CGM.getLLVMContext()), Ty->getNumColumns())); auto *RowCountNode = llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned( llvm::Type::getInt64Ty(CGM.getLLVMContext()), Ty->getNumRows())); Subscripts.push_back(DBuilder.getOrCreateSubrange( ColumnCountNode /*count*/, nullptr /*lowerBound*/, nullptr /*upperBound*/, nullptr /*stride*/)); Subscripts.push_back(DBuilder.getOrCreateSubrange( RowCountNode /*count*/, nullptr /*lowerBound*/, nullptr /*upperBound*/, nullptr /*stride*/)); llvm::DINodeArray SubscriptArray = DBuilder.getOrCreateArray(Subscripts); return DBuilder.createArrayType(Size, Align, ElementTy, SubscriptArray); } llvm::DIType *CGDebugInfo::CreateType(const ArrayType *Ty, llvm::DIFile *Unit) { uint64_t Size; uint32_t Align; // FIXME: make getTypeAlign() aware of VLAs and incomplete array types if (const auto *VAT = dyn_cast(Ty)) { Size = 0; Align = getTypeAlignIfRequired(CGM.getContext().getBaseElementType(VAT), CGM.getContext()); } else if (Ty->isIncompleteArrayType()) { Size = 0; if (Ty->getElementType()->isIncompleteType()) Align = 0; else Align = getTypeAlignIfRequired(Ty->getElementType(), CGM.getContext()); } else if (Ty->isIncompleteType()) { Size = 0; Align = 0; } else { // Size and align of the whole array, not the element type. Size = CGM.getContext().getTypeSize(Ty); Align = getTypeAlignIfRequired(Ty, CGM.getContext()); } // Add the dimensions of the array. FIXME: This loses CV qualifiers from // interior arrays, do we care? Why aren't nested arrays represented the // obvious/recursive way? SmallVector Subscripts; QualType EltTy(Ty, 0); while ((Ty = dyn_cast(EltTy))) { // If the number of elements is known, then count is that number. Otherwise, // it's -1. This allows us to represent a subrange with an array of 0 // elements, like this: // // struct foo { // int x[0]; // }; int64_t Count = -1; // Count == -1 is an unbounded array. if (const auto *CAT = dyn_cast(Ty)) Count = CAT->getSize().getZExtValue(); else if (const auto *VAT = dyn_cast(Ty)) { if (Expr *Size = VAT->getSizeExpr()) { Expr::EvalResult Result; if (Size->EvaluateAsInt(Result, CGM.getContext())) Count = Result.Val.getInt().getExtValue(); } } auto SizeNode = SizeExprCache.find(EltTy); if (SizeNode != SizeExprCache.end()) Subscripts.push_back(DBuilder.getOrCreateSubrange( SizeNode->getSecond() /*count*/, nullptr /*lowerBound*/, nullptr /*upperBound*/, nullptr /*stride*/)); else { auto *CountNode = llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned( llvm::Type::getInt64Ty(CGM.getLLVMContext()), Count)); Subscripts.push_back(DBuilder.getOrCreateSubrange( CountNode /*count*/, nullptr /*lowerBound*/, nullptr /*upperBound*/, nullptr /*stride*/)); } EltTy = Ty->getElementType(); } llvm::DINodeArray SubscriptArray = DBuilder.getOrCreateArray(Subscripts); return DBuilder.createArrayType(Size, Align, getOrCreateType(EltTy, Unit), SubscriptArray); } llvm::DIType *CGDebugInfo::CreateType(const LValueReferenceType *Ty, llvm::DIFile *Unit) { return CreatePointerLikeType(llvm::dwarf::DW_TAG_reference_type, Ty, Ty->getPointeeType(), Unit); } llvm::DIType *CGDebugInfo::CreateType(const RValueReferenceType *Ty, llvm::DIFile *Unit) { return CreatePointerLikeType(llvm::dwarf::DW_TAG_rvalue_reference_type, Ty, Ty->getPointeeType(), Unit); } llvm::DIType *CGDebugInfo::CreateType(const MemberPointerType *Ty, llvm::DIFile *U) { llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero; uint64_t Size = 0; if (!Ty->isIncompleteType()) { Size = CGM.getContext().getTypeSize(Ty); // Set the MS inheritance model. There is no flag for the unspecified model. if (CGM.getTarget().getCXXABI().isMicrosoft()) { switch (Ty->getMostRecentCXXRecordDecl()->getMSInheritanceModel()) { case MSInheritanceModel::Single: Flags |= llvm::DINode::FlagSingleInheritance; break; case MSInheritanceModel::Multiple: Flags |= llvm::DINode::FlagMultipleInheritance; break; case MSInheritanceModel::Virtual: Flags |= llvm::DINode::FlagVirtualInheritance; break; case MSInheritanceModel::Unspecified: break; } } } llvm::DIType *ClassType = getOrCreateType(QualType(Ty->getClass(), 0), U); if (Ty->isMemberDataPointerType()) return DBuilder.createMemberPointerType( getOrCreateType(Ty->getPointeeType(), U), ClassType, Size, /*Align=*/0, Flags); const FunctionProtoType *FPT = Ty->getPointeeType()->getAs(); return DBuilder.createMemberPointerType( getOrCreateInstanceMethodType( CXXMethodDecl::getThisType(FPT, Ty->getMostRecentCXXRecordDecl()), FPT, U, false), ClassType, Size, /*Align=*/0, Flags); } llvm::DIType *CGDebugInfo::CreateType(const AtomicType *Ty, llvm::DIFile *U) { auto *FromTy = getOrCreateType(Ty->getValueType(), U); return DBuilder.createQualifiedType(llvm::dwarf::DW_TAG_atomic_type, FromTy); } llvm::DIType *CGDebugInfo::CreateType(const PipeType *Ty, llvm::DIFile *U) { return getOrCreateType(Ty->getElementType(), U); } llvm::DIType *CGDebugInfo::CreateEnumType(const EnumType *Ty) { const EnumDecl *ED = Ty->getDecl(); uint64_t Size = 0; uint32_t Align = 0; if (!ED->getTypeForDecl()->isIncompleteType()) { Size = CGM.getContext().getTypeSize(ED->getTypeForDecl()); Align = getDeclAlignIfRequired(ED, CGM.getContext()); } SmallString<256> Identifier = getTypeIdentifier(Ty, CGM, TheCU); bool isImportedFromModule = DebugTypeExtRefs && ED->isFromASTFile() && ED->getDefinition(); // If this is just a forward declaration, construct an appropriately // marked node and just return it. if (isImportedFromModule || !ED->getDefinition()) { // Note that it is possible for enums to be created as part of // their own declcontext. In this case a FwdDecl will be created // twice. This doesn't cause a problem because both FwdDecls are // entered into the ReplaceMap: finalize() will replace the first // FwdDecl with the second and then replace the second with // complete type. llvm::DIScope *EDContext = getDeclContextDescriptor(ED); llvm::DIFile *DefUnit = getOrCreateFile(ED->getLocation()); llvm::TempDIScope TmpContext(DBuilder.createReplaceableCompositeType( llvm::dwarf::DW_TAG_enumeration_type, "", TheCU, DefUnit, 0)); unsigned Line = getLineNumber(ED->getLocation()); StringRef EDName = ED->getName(); llvm::DIType *RetTy = DBuilder.createReplaceableCompositeType( llvm::dwarf::DW_TAG_enumeration_type, EDName, EDContext, DefUnit, Line, 0, Size, Align, llvm::DINode::FlagFwdDecl, Identifier); ReplaceMap.emplace_back( std::piecewise_construct, std::make_tuple(Ty), std::make_tuple(static_cast(RetTy))); return RetTy; } return CreateTypeDefinition(Ty); } llvm::DIType *CGDebugInfo::CreateTypeDefinition(const EnumType *Ty) { const EnumDecl *ED = Ty->getDecl(); uint64_t Size = 0; uint32_t Align = 0; if (!ED->getTypeForDecl()->isIncompleteType()) { Size = CGM.getContext().getTypeSize(ED->getTypeForDecl()); Align = getDeclAlignIfRequired(ED, CGM.getContext()); } SmallString<256> Identifier = getTypeIdentifier(Ty, CGM, TheCU); // Create elements for each enumerator. SmallVector Enumerators; ED = ED->getDefinition(); bool IsSigned = ED->getIntegerType()->isSignedIntegerType(); for (const auto *Enum : ED->enumerators()) { const auto &InitVal = Enum->getInitVal(); auto Value = IsSigned ? InitVal.getSExtValue() : InitVal.getZExtValue(); Enumerators.push_back( DBuilder.createEnumerator(Enum->getName(), Value, !IsSigned)); } // Return a CompositeType for the enum itself. llvm::DINodeArray EltArray = DBuilder.getOrCreateArray(Enumerators); llvm::DIFile *DefUnit = getOrCreateFile(ED->getLocation()); unsigned Line = getLineNumber(ED->getLocation()); llvm::DIScope *EnumContext = getDeclContextDescriptor(ED); llvm::DIType *ClassTy = getOrCreateType(ED->getIntegerType(), DefUnit); return DBuilder.createEnumerationType(EnumContext, ED->getName(), DefUnit, Line, Size, Align, EltArray, ClassTy, Identifier, ED->isScoped()); } llvm::DIMacro *CGDebugInfo::CreateMacro(llvm::DIMacroFile *Parent, unsigned MType, SourceLocation LineLoc, StringRef Name, StringRef Value) { unsigned Line = LineLoc.isInvalid() ? 0 : getLineNumber(LineLoc); return DBuilder.createMacro(Parent, Line, MType, Name, Value); } llvm::DIMacroFile *CGDebugInfo::CreateTempMacroFile(llvm::DIMacroFile *Parent, SourceLocation LineLoc, SourceLocation FileLoc) { llvm::DIFile *FName = getOrCreateFile(FileLoc); unsigned Line = LineLoc.isInvalid() ? 0 : getLineNumber(LineLoc); return DBuilder.createTempMacroFile(Parent, Line, FName); } static QualType UnwrapTypeForDebugInfo(QualType T, const ASTContext &C) { Qualifiers Quals; do { Qualifiers InnerQuals = T.getLocalQualifiers(); // Qualifiers::operator+() doesn't like it if you add a Qualifier // that is already there. Quals += Qualifiers::removeCommonQualifiers(Quals, InnerQuals); Quals += InnerQuals; QualType LastT = T; switch (T->getTypeClass()) { default: return C.getQualifiedType(T.getTypePtr(), Quals); case Type::TemplateSpecialization: { const auto *Spec = cast(T); if (Spec->isTypeAlias()) return C.getQualifiedType(T.getTypePtr(), Quals); T = Spec->desugar(); break; } case Type::TypeOfExpr: T = cast(T)->getUnderlyingExpr()->getType(); break; case Type::TypeOf: T = cast(T)->getUnderlyingType(); break; case Type::Decltype: T = cast(T)->getUnderlyingType(); break; case Type::UnaryTransform: T = cast(T)->getUnderlyingType(); break; case Type::Attributed: T = cast(T)->getEquivalentType(); break; case Type::Elaborated: T = cast(T)->getNamedType(); break; case Type::Paren: T = cast(T)->getInnerType(); break; case Type::MacroQualified: T = cast(T)->getUnderlyingType(); break; case Type::SubstTemplateTypeParm: T = cast(T)->getReplacementType(); break; case Type::Auto: case Type::DeducedTemplateSpecialization: { QualType DT = cast(T)->getDeducedType(); assert(!DT.isNull() && "Undeduced types shouldn't reach here."); T = DT; break; } case Type::Adjusted: case Type::Decayed: // Decayed and adjusted types use the adjusted type in LLVM and DWARF. T = cast(T)->getAdjustedType(); break; } assert(T != LastT && "Type unwrapping failed to unwrap!"); (void)LastT; } while (true); } llvm::DIType *CGDebugInfo::getTypeOrNull(QualType Ty) { // Unwrap the type as needed for debug information. Ty = UnwrapTypeForDebugInfo(Ty, CGM.getContext()); auto It = TypeCache.find(Ty.getAsOpaquePtr()); if (It != TypeCache.end()) { // Verify that the debug info still exists. if (llvm::Metadata *V = It->second) return cast(V); } return nullptr; } void CGDebugInfo::completeTemplateDefinition( const ClassTemplateSpecializationDecl &SD) { if (DebugKind <= codegenoptions::DebugLineTablesOnly) return; completeUnusedClass(SD); } void CGDebugInfo::completeUnusedClass(const CXXRecordDecl &D) { if (DebugKind <= codegenoptions::DebugLineTablesOnly) return; completeClassData(&D); // In case this type has no member function definitions being emitted, ensure // it is retained RetainedTypes.push_back(CGM.getContext().getRecordType(&D).getAsOpaquePtr()); } llvm::DIType *CGDebugInfo::getOrCreateType(QualType Ty, llvm::DIFile *Unit) { if (Ty.isNull()) return nullptr; llvm::TimeTraceScope TimeScope("DebugType", [&]() { std::string Name; llvm::raw_string_ostream OS(Name); Ty.print(OS, getPrintingPolicy()); return Name; }); // Unwrap the type as needed for debug information. Ty = UnwrapTypeForDebugInfo(Ty, CGM.getContext()); if (auto *T = getTypeOrNull(Ty)) return T; llvm::DIType *Res = CreateTypeNode(Ty, Unit); void *TyPtr = Ty.getAsOpaquePtr(); // And update the type cache. TypeCache[TyPtr].reset(Res); return Res; } llvm::DIModule *CGDebugInfo::getParentModuleOrNull(const Decl *D) { // A forward declaration inside a module header does not belong to the module. if (isa(D) && !cast(D)->getDefinition()) return nullptr; if (DebugTypeExtRefs && D->isFromASTFile()) { // Record a reference to an imported clang module or precompiled header. auto *Reader = CGM.getContext().getExternalSource(); auto Idx = D->getOwningModuleID(); auto Info = Reader->getSourceDescriptor(Idx); if (Info) return getOrCreateModuleRef(*Info, /*SkeletonCU=*/true); } else if (ClangModuleMap) { // We are building a clang module or a precompiled header. // // TODO: When D is a CXXRecordDecl or a C++ Enum, the ODR applies // and it wouldn't be necessary to specify the parent scope // because the type is already unique by definition (it would look // like the output of -fno-standalone-debug). On the other hand, // the parent scope helps a consumer to quickly locate the object // file where the type's definition is located, so it might be // best to make this behavior a command line or debugger tuning // option. if (Module *M = D->getOwningModule()) { // This is a (sub-)module. auto Info = ASTSourceDescriptor(*M); return getOrCreateModuleRef(Info, /*SkeletonCU=*/false); } else { // This the precompiled header being built. return getOrCreateModuleRef(PCHDescriptor, /*SkeletonCU=*/false); } } return nullptr; } llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit) { // Handle qualifiers, which recursively handles what they refer to. if (Ty.hasLocalQualifiers()) return CreateQualifiedType(Ty, Unit); // Work out details of type. switch (Ty->getTypeClass()) { #define TYPE(Class, Base) #define ABSTRACT_TYPE(Class, Base) #define NON_CANONICAL_TYPE(Class, Base) #define DEPENDENT_TYPE(Class, Base) case Type::Class: #include "clang/AST/TypeNodes.inc" llvm_unreachable("Dependent types cannot show up in debug information"); case Type::ExtVector: case Type::Vector: return CreateType(cast(Ty), Unit); case Type::ConstantMatrix: return CreateType(cast(Ty), Unit); case Type::ObjCObjectPointer: return CreateType(cast(Ty), Unit); case Type::ObjCObject: return CreateType(cast(Ty), Unit); case Type::ObjCTypeParam: return CreateType(cast(Ty), Unit); case Type::ObjCInterface: return CreateType(cast(Ty), Unit); case Type::Builtin: return CreateType(cast(Ty)); case Type::Complex: return CreateType(cast(Ty)); case Type::Pointer: return CreateType(cast(Ty), Unit); case Type::BlockPointer: return CreateType(cast(Ty), Unit); case Type::Typedef: return CreateType(cast(Ty), Unit); case Type::Record: return CreateType(cast(Ty)); case Type::Enum: return CreateEnumType(cast(Ty)); case Type::FunctionProto: case Type::FunctionNoProto: return CreateType(cast(Ty), Unit); case Type::ConstantArray: case Type::VariableArray: case Type::IncompleteArray: return CreateType(cast(Ty), Unit); case Type::LValueReference: return CreateType(cast(Ty), Unit); case Type::RValueReference: return CreateType(cast(Ty), Unit); case Type::MemberPointer: return CreateType(cast(Ty), Unit); case Type::Atomic: return CreateType(cast(Ty), Unit); case Type::ExtInt: return CreateType(cast(Ty)); case Type::Pipe: return CreateType(cast(Ty), Unit); case Type::TemplateSpecialization: return CreateType(cast(Ty), Unit); case Type::Auto: case Type::Attributed: case Type::Adjusted: case Type::Decayed: case Type::DeducedTemplateSpecialization: case Type::Elaborated: case Type::Paren: case Type::MacroQualified: case Type::SubstTemplateTypeParm: case Type::TypeOfExpr: case Type::TypeOf: case Type::Decltype: case Type::UnaryTransform: - case Type::PackExpansion: break; } llvm_unreachable("type should have been unwrapped!"); } llvm::DICompositeType *CGDebugInfo::getOrCreateLimitedType(const RecordType *Ty, llvm::DIFile *Unit) { QualType QTy(Ty, 0); auto *T = cast_or_null(getTypeOrNull(QTy)); // We may have cached a forward decl when we could have created // a non-forward decl. Go ahead and create a non-forward decl // now. if (T && !T->isForwardDecl()) return T; // Otherwise create the type. llvm::DICompositeType *Res = CreateLimitedType(Ty); // Propagate members from the declaration to the definition // CreateType(const RecordType*) will overwrite this with the members in the // correct order if the full type is needed. DBuilder.replaceArrays(Res, T ? T->getElements() : llvm::DINodeArray()); // And update the type cache. TypeCache[QTy.getAsOpaquePtr()].reset(Res); return Res; } // TODO: Currently used for context chains when limiting debug info. llvm::DICompositeType *CGDebugInfo::CreateLimitedType(const RecordType *Ty) { RecordDecl *RD = Ty->getDecl(); // Get overall information about the record type for the debug info. llvm::DIFile *DefUnit = getOrCreateFile(RD->getLocation()); unsigned Line = getLineNumber(RD->getLocation()); StringRef RDName = getClassName(RD); llvm::DIScope *RDContext = getDeclContextDescriptor(RD); // If we ended up creating the type during the context chain construction, // just return that. auto *T = cast_or_null( getTypeOrNull(CGM.getContext().getRecordType(RD))); if (T && (!T->isForwardDecl() || !RD->getDefinition())) return T; // If this is just a forward or incomplete declaration, construct an // appropriately marked node and just return it. const RecordDecl *D = RD->getDefinition(); if (!D || !D->isCompleteDefinition()) return getOrCreateRecordFwdDecl(Ty, RDContext); uint64_t Size = CGM.getContext().getTypeSize(Ty); auto Align = getDeclAlignIfRequired(D, CGM.getContext()); SmallString<256> Identifier = getTypeIdentifier(Ty, CGM, TheCU); // Explicitly record the calling convention and export symbols for C++ // records. auto Flags = llvm::DINode::FlagZero; if (auto CXXRD = dyn_cast(RD)) { if (CGM.getCXXABI().getRecordArgABI(CXXRD) == CGCXXABI::RAA_Indirect) Flags |= llvm::DINode::FlagTypePassByReference; else Flags |= llvm::DINode::FlagTypePassByValue; // Record if a C++ record is non-trivial type. if (!CXXRD->isTrivial()) Flags |= llvm::DINode::FlagNonTrivial; // Record exports it symbols to the containing structure. if (CXXRD->isAnonymousStructOrUnion()) Flags |= llvm::DINode::FlagExportSymbols; } llvm::DICompositeType *RealDecl = DBuilder.createReplaceableCompositeType( getTagForRecord(RD), RDName, RDContext, DefUnit, Line, 0, Size, Align, Flags, Identifier); // Elements of composite types usually have back to the type, creating // uniquing cycles. Distinct nodes are more efficient. switch (RealDecl->getTag()) { default: llvm_unreachable("invalid composite type tag"); case llvm::dwarf::DW_TAG_array_type: case llvm::dwarf::DW_TAG_enumeration_type: // Array elements and most enumeration elements don't have back references, // so they don't tend to be involved in uniquing cycles and there is some // chance of merging them when linking together two modules. Only make // them distinct if they are ODR-uniqued. if (Identifier.empty()) break; LLVM_FALLTHROUGH; case llvm::dwarf::DW_TAG_structure_type: case llvm::dwarf::DW_TAG_union_type: case llvm::dwarf::DW_TAG_class_type: // Immediately resolve to a distinct node. RealDecl = llvm::MDNode::replaceWithDistinct(llvm::TempDICompositeType(RealDecl)); break; } RegionMap[Ty->getDecl()].reset(RealDecl); TypeCache[QualType(Ty, 0).getAsOpaquePtr()].reset(RealDecl); if (const auto *TSpecial = dyn_cast(RD)) DBuilder.replaceArrays(RealDecl, llvm::DINodeArray(), CollectCXXTemplateParams(TSpecial, DefUnit)); return RealDecl; } void CGDebugInfo::CollectContainingType(const CXXRecordDecl *RD, llvm::DICompositeType *RealDecl) { // A class's primary base or the class itself contains the vtable. llvm::DICompositeType *ContainingType = nullptr; const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD); if (const CXXRecordDecl *PBase = RL.getPrimaryBase()) { // Seek non-virtual primary base root. while (1) { const ASTRecordLayout &BRL = CGM.getContext().getASTRecordLayout(PBase); const CXXRecordDecl *PBT = BRL.getPrimaryBase(); if (PBT && !BRL.isPrimaryBaseVirtual()) PBase = PBT; else break; } ContainingType = cast( getOrCreateType(QualType(PBase->getTypeForDecl(), 0), getOrCreateFile(RD->getLocation()))); } else if (RD->isDynamicClass()) ContainingType = RealDecl; DBuilder.replaceVTableHolder(RealDecl, ContainingType); } llvm::DIType *CGDebugInfo::CreateMemberType(llvm::DIFile *Unit, QualType FType, StringRef Name, uint64_t *Offset) { llvm::DIType *FieldTy = CGDebugInfo::getOrCreateType(FType, Unit); uint64_t FieldSize = CGM.getContext().getTypeSize(FType); auto FieldAlign = getTypeAlignIfRequired(FType, CGM.getContext()); llvm::DIType *Ty = DBuilder.createMemberType(Unit, Name, Unit, 0, FieldSize, FieldAlign, *Offset, llvm::DINode::FlagZero, FieldTy); *Offset += FieldSize; return Ty; } void CGDebugInfo::collectFunctionDeclProps(GlobalDecl GD, llvm::DIFile *Unit, StringRef &Name, StringRef &LinkageName, llvm::DIScope *&FDContext, llvm::DINodeArray &TParamsArray, llvm::DINode::DIFlags &Flags) { const auto *FD = cast(GD.getDecl()); Name = getFunctionName(FD); // Use mangled name as linkage name for C/C++ functions. if (FD->hasPrototype()) { LinkageName = CGM.getMangledName(GD); Flags |= llvm::DINode::FlagPrototyped; } // No need to replicate the linkage name if it isn't different from the // subprogram name, no need to have it at all unless coverage is enabled or // debug is set to more than just line tables or extra debug info is needed. if (LinkageName == Name || (!CGM.getCodeGenOpts().EmitGcovArcs && !CGM.getCodeGenOpts().EmitGcovNotes && !CGM.getCodeGenOpts().DebugInfoForProfiling && DebugKind <= codegenoptions::DebugLineTablesOnly)) LinkageName = StringRef(); if (CGM.getCodeGenOpts().hasReducedDebugInfo()) { if (const NamespaceDecl *NSDecl = dyn_cast_or_null(FD->getDeclContext())) FDContext = getOrCreateNamespace(NSDecl); else if (const RecordDecl *RDecl = dyn_cast_or_null(FD->getDeclContext())) { llvm::DIScope *Mod = getParentModuleOrNull(RDecl); FDContext = getContextDescriptor(RDecl, Mod ? Mod : TheCU); } // Check if it is a noreturn-marked function if (FD->isNoReturn()) Flags |= llvm::DINode::FlagNoReturn; // Collect template parameters. TParamsArray = CollectFunctionTemplateParams(FD, Unit); } } void CGDebugInfo::collectVarDeclProps(const VarDecl *VD, llvm::DIFile *&Unit, unsigned &LineNo, QualType &T, StringRef &Name, StringRef &LinkageName, llvm::MDTuple *&TemplateParameters, llvm::DIScope *&VDContext) { Unit = getOrCreateFile(VD->getLocation()); LineNo = getLineNumber(VD->getLocation()); setLocation(VD->getLocation()); T = VD->getType(); if (T->isIncompleteArrayType()) { // CodeGen turns int[] into int[1] so we'll do the same here. llvm::APInt ConstVal(32, 1); QualType ET = CGM.getContext().getAsArrayType(T)->getElementType(); T = CGM.getContext().getConstantArrayType(ET, ConstVal, nullptr, ArrayType::Normal, 0); } Name = VD->getName(); if (VD->getDeclContext() && !isa(VD->getDeclContext()) && !isa(VD->getDeclContext())) LinkageName = CGM.getMangledName(VD); if (LinkageName == Name) LinkageName = StringRef(); if (isa(VD)) { llvm::DINodeArray parameterNodes = CollectVarTemplateParams(VD, &*Unit); TemplateParameters = parameterNodes.get(); } else { TemplateParameters = nullptr; } // Since we emit declarations (DW_AT_members) for static members, place the // definition of those static members in the namespace they were declared in // in the source code (the lexical decl context). // FIXME: Generalize this for even non-member global variables where the // declaration and definition may have different lexical decl contexts, once // we have support for emitting declarations of (non-member) global variables. const DeclContext *DC = VD->isStaticDataMember() ? VD->getLexicalDeclContext() : VD->getDeclContext(); // When a record type contains an in-line initialization of a static data // member, and the record type is marked as __declspec(dllexport), an implicit // definition of the member will be created in the record context. DWARF // doesn't seem to have a nice way to describe this in a form that consumers // are likely to understand, so fake the "normal" situation of a definition // outside the class by putting it in the global scope. if (DC->isRecord()) DC = CGM.getContext().getTranslationUnitDecl(); llvm::DIScope *Mod = getParentModuleOrNull(VD); VDContext = getContextDescriptor(cast(DC), Mod ? Mod : TheCU); } llvm::DISubprogram *CGDebugInfo::getFunctionFwdDeclOrStub(GlobalDecl GD, bool Stub) { llvm::DINodeArray TParamsArray; StringRef Name, LinkageName; llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero; llvm::DISubprogram::DISPFlags SPFlags = llvm::DISubprogram::SPFlagZero; SourceLocation Loc = GD.getDecl()->getLocation(); llvm::DIFile *Unit = getOrCreateFile(Loc); llvm::DIScope *DContext = Unit; unsigned Line = getLineNumber(Loc); collectFunctionDeclProps(GD, Unit, Name, LinkageName, DContext, TParamsArray, Flags); auto *FD = cast(GD.getDecl()); // Build function type. SmallVector ArgTypes; for (const ParmVarDecl *Parm : FD->parameters()) ArgTypes.push_back(Parm->getType()); CallingConv CC = FD->getType()->castAs()->getCallConv(); QualType FnType = CGM.getContext().getFunctionType( FD->getReturnType(), ArgTypes, FunctionProtoType::ExtProtoInfo(CC)); if (!FD->isExternallyVisible()) SPFlags |= llvm::DISubprogram::SPFlagLocalToUnit; if (CGM.getLangOpts().Optimize) SPFlags |= llvm::DISubprogram::SPFlagOptimized; if (Stub) { Flags |= getCallSiteRelatedAttrs(); SPFlags |= llvm::DISubprogram::SPFlagDefinition; return DBuilder.createFunction( DContext, Name, LinkageName, Unit, Line, getOrCreateFunctionType(GD.getDecl(), FnType, Unit), 0, Flags, SPFlags, TParamsArray.get(), getFunctionDeclaration(FD)); } llvm::DISubprogram *SP = DBuilder.createTempFunctionFwdDecl( DContext, Name, LinkageName, Unit, Line, getOrCreateFunctionType(GD.getDecl(), FnType, Unit), 0, Flags, SPFlags, TParamsArray.get(), getFunctionDeclaration(FD)); const FunctionDecl *CanonDecl = FD->getCanonicalDecl(); FwdDeclReplaceMap.emplace_back(std::piecewise_construct, std::make_tuple(CanonDecl), std::make_tuple(SP)); return SP; } llvm::DISubprogram *CGDebugInfo::getFunctionForwardDeclaration(GlobalDecl GD) { return getFunctionFwdDeclOrStub(GD, /* Stub = */ false); } llvm::DISubprogram *CGDebugInfo::getFunctionStub(GlobalDecl GD) { return getFunctionFwdDeclOrStub(GD, /* Stub = */ true); } llvm::DIGlobalVariable * CGDebugInfo::getGlobalVariableForwardDeclaration(const VarDecl *VD) { QualType T; StringRef Name, LinkageName; SourceLocation Loc = VD->getLocation(); llvm::DIFile *Unit = getOrCreateFile(Loc); llvm::DIScope *DContext = Unit; unsigned Line = getLineNumber(Loc); llvm::MDTuple *TemplateParameters = nullptr; collectVarDeclProps(VD, Unit, Line, T, Name, LinkageName, TemplateParameters, DContext); auto Align = getDeclAlignIfRequired(VD, CGM.getContext()); auto *GV = DBuilder.createTempGlobalVariableFwdDecl( DContext, Name, LinkageName, Unit, Line, getOrCreateType(T, Unit), !VD->isExternallyVisible(), nullptr, TemplateParameters, Align); FwdDeclReplaceMap.emplace_back( std::piecewise_construct, std::make_tuple(cast(VD->getCanonicalDecl())), std::make_tuple(static_cast(GV))); return GV; } llvm::DINode *CGDebugInfo::getDeclarationOrDefinition(const Decl *D) { // We only need a declaration (not a definition) of the type - so use whatever // we would otherwise do to get a type for a pointee. (forward declarations in // limited debug info, full definitions (if the type definition is available) // in unlimited debug info) if (const auto *TD = dyn_cast(D)) return getOrCreateType(CGM.getContext().getTypeDeclType(TD), getOrCreateFile(TD->getLocation())); auto I = DeclCache.find(D->getCanonicalDecl()); if (I != DeclCache.end()) { auto N = I->second; if (auto *GVE = dyn_cast_or_null(N)) return GVE->getVariable(); return dyn_cast_or_null(N); } // No definition for now. Emit a forward definition that might be // merged with a potential upcoming definition. if (const auto *FD = dyn_cast(D)) return getFunctionForwardDeclaration(FD); else if (const auto *VD = dyn_cast(D)) return getGlobalVariableForwardDeclaration(VD); return nullptr; } llvm::DISubprogram *CGDebugInfo::getFunctionDeclaration(const Decl *D) { if (!D || DebugKind <= codegenoptions::DebugLineTablesOnly) return nullptr; const auto *FD = dyn_cast(D); if (!FD) return nullptr; // Setup context. auto *S = getDeclContextDescriptor(D); auto MI = SPCache.find(FD->getCanonicalDecl()); if (MI == SPCache.end()) { if (const auto *MD = dyn_cast(FD->getCanonicalDecl())) { return CreateCXXMemberFunction(MD, getOrCreateFile(MD->getLocation()), cast(S)); } } if (MI != SPCache.end()) { auto *SP = dyn_cast_or_null(MI->second); if (SP && !SP->isDefinition()) return SP; } for (auto NextFD : FD->redecls()) { auto MI = SPCache.find(NextFD->getCanonicalDecl()); if (MI != SPCache.end()) { auto *SP = dyn_cast_or_null(MI->second); if (SP && !SP->isDefinition()) return SP; } } return nullptr; } llvm::DISubprogram *CGDebugInfo::getObjCMethodDeclaration( const Decl *D, llvm::DISubroutineType *FnType, unsigned LineNo, llvm::DINode::DIFlags Flags, llvm::DISubprogram::DISPFlags SPFlags) { if (!D || DebugKind <= codegenoptions::DebugLineTablesOnly) return nullptr; const auto *OMD = dyn_cast(D); if (!OMD) return nullptr; if (CGM.getCodeGenOpts().DwarfVersion < 5 && !OMD->isDirectMethod()) return nullptr; if (OMD->isDirectMethod()) SPFlags |= llvm::DISubprogram::SPFlagObjCDirect; // Starting with DWARF V5 method declarations are emitted as children of // the interface type. auto *ID = dyn_cast_or_null(D->getDeclContext()); if (!ID) ID = OMD->getClassInterface(); if (!ID) return nullptr; QualType QTy(ID->getTypeForDecl(), 0); auto It = TypeCache.find(QTy.getAsOpaquePtr()); if (It == TypeCache.end()) return nullptr; auto *InterfaceType = cast(It->second); llvm::DISubprogram *FD = DBuilder.createFunction( InterfaceType, getObjCMethodName(OMD), StringRef(), InterfaceType->getFile(), LineNo, FnType, LineNo, Flags, SPFlags); DBuilder.finalizeSubprogram(FD); ObjCMethodCache[ID].push_back({FD, OMD->isDirectMethod()}); return FD; } // getOrCreateFunctionType - Construct type. If it is a c++ method, include // implicit parameter "this". llvm::DISubroutineType *CGDebugInfo::getOrCreateFunctionType(const Decl *D, QualType FnType, llvm::DIFile *F) { if (!D || DebugKind <= codegenoptions::DebugLineTablesOnly) // Create fake but valid subroutine type. Otherwise -verify would fail, and // subprogram DIE will miss DW_AT_decl_file and DW_AT_decl_line fields. return DBuilder.createSubroutineType(DBuilder.getOrCreateTypeArray(None)); if (const auto *Method = dyn_cast(D)) return getOrCreateMethodType(Method, F, false); const auto *FTy = FnType->getAs(); CallingConv CC = FTy ? FTy->getCallConv() : CallingConv::CC_C; if (const auto *OMethod = dyn_cast(D)) { // Add "self" and "_cmd" SmallVector Elts; // First element is always return type. For 'void' functions it is NULL. QualType ResultTy = OMethod->getReturnType(); // Replace the instancetype keyword with the actual type. if (ResultTy == CGM.getContext().getObjCInstanceType()) ResultTy = CGM.getContext().getPointerType( QualType(OMethod->getClassInterface()->getTypeForDecl(), 0)); Elts.push_back(getOrCreateType(ResultTy, F)); // "self" pointer is always first argument. QualType SelfDeclTy; if (auto *SelfDecl = OMethod->getSelfDecl()) SelfDeclTy = SelfDecl->getType(); else if (auto *FPT = dyn_cast(FnType)) if (FPT->getNumParams() > 1) SelfDeclTy = FPT->getParamType(0); if (!SelfDeclTy.isNull()) Elts.push_back( CreateSelfType(SelfDeclTy, getOrCreateType(SelfDeclTy, F))); // "_cmd" pointer is always second argument. Elts.push_back(DBuilder.createArtificialType( getOrCreateType(CGM.getContext().getObjCSelType(), F))); // Get rest of the arguments. for (const auto *PI : OMethod->parameters()) Elts.push_back(getOrCreateType(PI->getType(), F)); // Variadic methods need a special marker at the end of the type list. if (OMethod->isVariadic()) Elts.push_back(DBuilder.createUnspecifiedParameter()); llvm::DITypeRefArray EltTypeArray = DBuilder.getOrCreateTypeArray(Elts); return DBuilder.createSubroutineType(EltTypeArray, llvm::DINode::FlagZero, getDwarfCC(CC)); } // Handle variadic function types; they need an additional // unspecified parameter. if (const auto *FD = dyn_cast(D)) if (FD->isVariadic()) { SmallVector EltTys; EltTys.push_back(getOrCreateType(FD->getReturnType(), F)); if (const auto *FPT = dyn_cast(FnType)) for (QualType ParamType : FPT->param_types()) EltTys.push_back(getOrCreateType(ParamType, F)); EltTys.push_back(DBuilder.createUnspecifiedParameter()); llvm::DITypeRefArray EltTypeArray = DBuilder.getOrCreateTypeArray(EltTys); return DBuilder.createSubroutineType(EltTypeArray, llvm::DINode::FlagZero, getDwarfCC(CC)); } return cast(getOrCreateType(FnType, F)); } void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, SourceLocation Loc, SourceLocation ScopeLoc, QualType FnType, llvm::Function *Fn, bool CurFuncIsThunk, CGBuilderTy &Builder) { StringRef Name; StringRef LinkageName; FnBeginRegionCount.push_back(LexicalBlockStack.size()); const Decl *D = GD.getDecl(); bool HasDecl = (D != nullptr); llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero; llvm::DISubprogram::DISPFlags SPFlags = llvm::DISubprogram::SPFlagZero; llvm::DIFile *Unit = getOrCreateFile(Loc); llvm::DIScope *FDContext = Unit; llvm::DINodeArray TParamsArray; if (!HasDecl) { // Use llvm function name. LinkageName = Fn->getName(); } else if (const auto *FD = dyn_cast(D)) { // If there is a subprogram for this function available then use it. auto FI = SPCache.find(FD->getCanonicalDecl()); if (FI != SPCache.end()) { auto *SP = dyn_cast_or_null(FI->second); if (SP && SP->isDefinition()) { LexicalBlockStack.emplace_back(SP); RegionMap[D].reset(SP); return; } } collectFunctionDeclProps(GD, Unit, Name, LinkageName, FDContext, TParamsArray, Flags); } else if (const auto *OMD = dyn_cast(D)) { Name = getObjCMethodName(OMD); Flags |= llvm::DINode::FlagPrototyped; } else if (isa(D) && GD.getDynamicInitKind() != DynamicInitKind::NoStub) { // This is a global initializer or atexit destructor for a global variable. Name = getDynamicInitializerName(cast(D), GD.getDynamicInitKind(), Fn); } else { Name = Fn->getName(); if (isa(D)) LinkageName = Name; Flags |= llvm::DINode::FlagPrototyped; } if (Name.startswith("\01")) Name = Name.substr(1); if (!HasDecl || D->isImplicit() || D->hasAttr()) { Flags |= llvm::DINode::FlagArtificial; // Artificial functions should not silently reuse CurLoc. CurLoc = SourceLocation(); } if (CurFuncIsThunk) Flags |= llvm::DINode::FlagThunk; if (Fn->hasLocalLinkage()) SPFlags |= llvm::DISubprogram::SPFlagLocalToUnit; if (CGM.getLangOpts().Optimize) SPFlags |= llvm::DISubprogram::SPFlagOptimized; llvm::DINode::DIFlags FlagsForDef = Flags | getCallSiteRelatedAttrs(); llvm::DISubprogram::DISPFlags SPFlagsForDef = SPFlags | llvm::DISubprogram::SPFlagDefinition; unsigned LineNo = getLineNumber(Loc); unsigned ScopeLine = getLineNumber(ScopeLoc); llvm::DISubroutineType *DIFnType = getOrCreateFunctionType(D, FnType, Unit); llvm::DISubprogram *Decl = nullptr; if (D) Decl = isa(D) ? getObjCMethodDeclaration(D, DIFnType, LineNo, Flags, SPFlags) : getFunctionDeclaration(D); // FIXME: The function declaration we're constructing here is mostly reusing // declarations from CXXMethodDecl and not constructing new ones for arbitrary // FunctionDecls. When/if we fix this we can have FDContext be TheCU/null for // all subprograms instead of the actual context since subprogram definitions // are emitted as CU level entities by the backend. llvm::DISubprogram *SP = DBuilder.createFunction( FDContext, Name, LinkageName, Unit, LineNo, DIFnType, ScopeLine, FlagsForDef, SPFlagsForDef, TParamsArray.get(), Decl); Fn->setSubprogram(SP); // We might get here with a VarDecl in the case we're generating // code for the initialization of globals. Do not record these decls // as they will overwrite the actual VarDecl Decl in the cache. if (HasDecl && isa(D)) DeclCache[D->getCanonicalDecl()].reset(SP); // Push the function onto the lexical block stack. LexicalBlockStack.emplace_back(SP); if (HasDecl) RegionMap[D].reset(SP); } void CGDebugInfo::EmitFunctionDecl(GlobalDecl GD, SourceLocation Loc, QualType FnType, llvm::Function *Fn) { StringRef Name; StringRef LinkageName; const Decl *D = GD.getDecl(); if (!D) return; llvm::TimeTraceScope TimeScope("DebugFunction", [&]() { std::string Name; llvm::raw_string_ostream OS(Name); if (const NamedDecl *ND = dyn_cast(D)) ND->getNameForDiagnostic(OS, getPrintingPolicy(), /*Qualified=*/true); return Name; }); llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero; llvm::DIFile *Unit = getOrCreateFile(Loc); bool IsDeclForCallSite = Fn ? true : false; llvm::DIScope *FDContext = IsDeclForCallSite ? Unit : getDeclContextDescriptor(D); llvm::DINodeArray TParamsArray; if (isa(D)) { // If there is a DISubprogram for this function available then use it. collectFunctionDeclProps(GD, Unit, Name, LinkageName, FDContext, TParamsArray, Flags); } else if (const auto *OMD = dyn_cast(D)) { Name = getObjCMethodName(OMD); Flags |= llvm::DINode::FlagPrototyped; } else { llvm_unreachable("not a function or ObjC method"); } if (!Name.empty() && Name[0] == '\01') Name = Name.substr(1); if (D->isImplicit()) { Flags |= llvm::DINode::FlagArtificial; // Artificial functions without a location should not silently reuse CurLoc. if (Loc.isInvalid()) CurLoc = SourceLocation(); } unsigned LineNo = getLineNumber(Loc); unsigned ScopeLine = 0; llvm::DISubprogram::DISPFlags SPFlags = llvm::DISubprogram::SPFlagZero; if (CGM.getLangOpts().Optimize) SPFlags |= llvm::DISubprogram::SPFlagOptimized; llvm::DISubprogram *SP = DBuilder.createFunction( FDContext, Name, LinkageName, Unit, LineNo, getOrCreateFunctionType(D, FnType, Unit), ScopeLine, Flags, SPFlags, TParamsArray.get(), getFunctionDeclaration(D)); if (IsDeclForCallSite) Fn->setSubprogram(SP); DBuilder.finalizeSubprogram(SP); } void CGDebugInfo::EmitFuncDeclForCallSite(llvm::CallBase *CallOrInvoke, QualType CalleeType, const FunctionDecl *CalleeDecl) { if (!CallOrInvoke) return; auto *Func = CallOrInvoke->getCalledFunction(); if (!Func) return; if (Func->getSubprogram()) return; // Do not emit a declaration subprogram for a builtin, a function with nodebug // attribute, or if call site info isn't required. Also, elide declarations // for functions with reserved names, as call site-related features aren't // interesting in this case (& also, the compiler may emit calls to these // functions without debug locations, which makes the verifier complain). if (CalleeDecl->getBuiltinID() != 0 || CalleeDecl->hasAttr() || getCallSiteRelatedAttrs() == llvm::DINode::FlagZero) return; if (const auto *Id = CalleeDecl->getIdentifier()) if (Id->isReservedName()) return; // If there is no DISubprogram attached to the function being called, // create the one describing the function in order to have complete // call site debug info. if (!CalleeDecl->isStatic() && !CalleeDecl->isInlined()) EmitFunctionDecl(CalleeDecl, CalleeDecl->getLocation(), CalleeType, Func); } void CGDebugInfo::EmitInlineFunctionStart(CGBuilderTy &Builder, GlobalDecl GD) { const auto *FD = cast(GD.getDecl()); // If there is a subprogram for this function available then use it. auto FI = SPCache.find(FD->getCanonicalDecl()); llvm::DISubprogram *SP = nullptr; if (FI != SPCache.end()) SP = dyn_cast_or_null(FI->second); if (!SP || !SP->isDefinition()) SP = getFunctionStub(GD); FnBeginRegionCount.push_back(LexicalBlockStack.size()); LexicalBlockStack.emplace_back(SP); setInlinedAt(Builder.getCurrentDebugLocation()); EmitLocation(Builder, FD->getLocation()); } void CGDebugInfo::EmitInlineFunctionEnd(CGBuilderTy &Builder) { assert(CurInlinedAt && "unbalanced inline scope stack"); EmitFunctionEnd(Builder, nullptr); setInlinedAt(llvm::DebugLoc(CurInlinedAt).getInlinedAt()); } void CGDebugInfo::EmitLocation(CGBuilderTy &Builder, SourceLocation Loc) { // Update our current location setLocation(Loc); if (CurLoc.isInvalid() || CurLoc.isMacroID() || LexicalBlockStack.empty()) return; llvm::MDNode *Scope = LexicalBlockStack.back(); Builder.SetCurrentDebugLocation(llvm::DebugLoc::get( getLineNumber(CurLoc), getColumnNumber(CurLoc), Scope, CurInlinedAt)); } void CGDebugInfo::CreateLexicalBlock(SourceLocation Loc) { llvm::MDNode *Back = nullptr; if (!LexicalBlockStack.empty()) Back = LexicalBlockStack.back().get(); LexicalBlockStack.emplace_back(DBuilder.createLexicalBlock( cast(Back), getOrCreateFile(CurLoc), getLineNumber(CurLoc), getColumnNumber(CurLoc))); } void CGDebugInfo::AppendAddressSpaceXDeref( unsigned AddressSpace, SmallVectorImpl &Expr) const { Optional DWARFAddressSpace = CGM.getTarget().getDWARFAddressSpace(AddressSpace); if (!DWARFAddressSpace) return; Expr.push_back(llvm::dwarf::DW_OP_constu); Expr.push_back(DWARFAddressSpace.getValue()); Expr.push_back(llvm::dwarf::DW_OP_swap); Expr.push_back(llvm::dwarf::DW_OP_xderef); } void CGDebugInfo::EmitLexicalBlockStart(CGBuilderTy &Builder, SourceLocation Loc) { // Set our current location. setLocation(Loc); // Emit a line table change for the current location inside the new scope. Builder.SetCurrentDebugLocation( llvm::DebugLoc::get(getLineNumber(Loc), getColumnNumber(Loc), LexicalBlockStack.back(), CurInlinedAt)); if (DebugKind <= codegenoptions::DebugLineTablesOnly) return; // Create a new lexical block and push it on the stack. CreateLexicalBlock(Loc); } void CGDebugInfo::EmitLexicalBlockEnd(CGBuilderTy &Builder, SourceLocation Loc) { assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!"); // Provide an entry in the line table for the end of the block. EmitLocation(Builder, Loc); if (DebugKind <= codegenoptions::DebugLineTablesOnly) return; LexicalBlockStack.pop_back(); } void CGDebugInfo::EmitFunctionEnd(CGBuilderTy &Builder, llvm::Function *Fn) { assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!"); unsigned RCount = FnBeginRegionCount.back(); assert(RCount <= LexicalBlockStack.size() && "Region stack mismatch"); // Pop all regions for this function. while (LexicalBlockStack.size() != RCount) { // Provide an entry in the line table for the end of the block. EmitLocation(Builder, CurLoc); LexicalBlockStack.pop_back(); } FnBeginRegionCount.pop_back(); if (Fn && Fn->getSubprogram()) DBuilder.finalizeSubprogram(Fn->getSubprogram()); } CGDebugInfo::BlockByRefType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const VarDecl *VD, uint64_t *XOffset) { SmallVector EltTys; QualType FType; uint64_t FieldSize, FieldOffset; uint32_t FieldAlign; llvm::DIFile *Unit = getOrCreateFile(VD->getLocation()); QualType Type = VD->getType(); FieldOffset = 0; FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy); EltTys.push_back(CreateMemberType(Unit, FType, "__isa", &FieldOffset)); EltTys.push_back(CreateMemberType(Unit, FType, "__forwarding", &FieldOffset)); FType = CGM.getContext().IntTy; EltTys.push_back(CreateMemberType(Unit, FType, "__flags", &FieldOffset)); EltTys.push_back(CreateMemberType(Unit, FType, "__size", &FieldOffset)); bool HasCopyAndDispose = CGM.getContext().BlockRequiresCopying(Type, VD); if (HasCopyAndDispose) { FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy); EltTys.push_back( CreateMemberType(Unit, FType, "__copy_helper", &FieldOffset)); EltTys.push_back( CreateMemberType(Unit, FType, "__destroy_helper", &FieldOffset)); } bool HasByrefExtendedLayout; Qualifiers::ObjCLifetime Lifetime; if (CGM.getContext().getByrefLifetime(Type, Lifetime, HasByrefExtendedLayout) && HasByrefExtendedLayout) { FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy); EltTys.push_back( CreateMemberType(Unit, FType, "__byref_variable_layout", &FieldOffset)); } CharUnits Align = CGM.getContext().getDeclAlign(VD); if (Align > CGM.getContext().toCharUnitsFromBits( CGM.getTarget().getPointerAlign(0))) { CharUnits FieldOffsetInBytes = CGM.getContext().toCharUnitsFromBits(FieldOffset); CharUnits AlignedOffsetInBytes = FieldOffsetInBytes.alignTo(Align); CharUnits NumPaddingBytes = AlignedOffsetInBytes - FieldOffsetInBytes; if (NumPaddingBytes.isPositive()) { llvm::APInt pad(32, NumPaddingBytes.getQuantity()); FType = CGM.getContext().getConstantArrayType( CGM.getContext().CharTy, pad, nullptr, ArrayType::Normal, 0); EltTys.push_back(CreateMemberType(Unit, FType, "", &FieldOffset)); } } FType = Type; llvm::DIType *WrappedTy = getOrCreateType(FType, Unit); FieldSize = CGM.getContext().getTypeSize(FType); FieldAlign = CGM.getContext().toBits(Align); *XOffset = FieldOffset; llvm::DIType *FieldTy = DBuilder.createMemberType( Unit, VD->getName(), Unit, 0, FieldSize, FieldAlign, FieldOffset, llvm::DINode::FlagZero, WrappedTy); EltTys.push_back(FieldTy); FieldOffset += FieldSize; llvm::DINodeArray Elements = DBuilder.getOrCreateArray(EltTys); return {DBuilder.createStructType(Unit, "", Unit, 0, FieldOffset, 0, llvm::DINode::FlagZero, nullptr, Elements), WrappedTy}; } llvm::DILocalVariable *CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::Value *Storage, llvm::Optional ArgNo, CGBuilderTy &Builder, const bool UsePointerValue) { assert(CGM.getCodeGenOpts().hasReducedDebugInfo()); assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!"); if (VD->hasAttr()) return nullptr; bool Unwritten = VD->isImplicit() || (isa(VD->getDeclContext()) && cast(VD->getDeclContext())->isImplicit()); llvm::DIFile *Unit = nullptr; if (!Unwritten) Unit = getOrCreateFile(VD->getLocation()); llvm::DIType *Ty; uint64_t XOffset = 0; if (VD->hasAttr()) Ty = EmitTypeForVarWithBlocksAttr(VD, &XOffset).WrappedType; else Ty = getOrCreateType(VD->getType(), Unit); // If there is no debug info for this type then do not emit debug info // for this variable. if (!Ty) return nullptr; // Get location information. unsigned Line = 0; unsigned Column = 0; if (!Unwritten) { Line = getLineNumber(VD->getLocation()); Column = getColumnNumber(VD->getLocation()); } SmallVector Expr; llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero; if (VD->isImplicit()) Flags |= llvm::DINode::FlagArtificial; auto Align = getDeclAlignIfRequired(VD, CGM.getContext()); unsigned AddressSpace = CGM.getContext().getTargetAddressSpace(VD->getType()); AppendAddressSpaceXDeref(AddressSpace, Expr); // If this is implicit parameter of CXXThis or ObjCSelf kind, then give it an // object pointer flag. if (const auto *IPD = dyn_cast(VD)) { if (IPD->getParameterKind() == ImplicitParamDecl::CXXThis || IPD->getParameterKind() == ImplicitParamDecl::ObjCSelf) Flags |= llvm::DINode::FlagObjectPointer; } // Note: Older versions of clang used to emit byval references with an extra // DW_OP_deref, because they referenced the IR arg directly instead of // referencing an alloca. Newer versions of LLVM don't treat allocas // differently from other function arguments when used in a dbg.declare. auto *Scope = cast(LexicalBlockStack.back()); StringRef Name = VD->getName(); if (!Name.empty()) { if (VD->hasAttr()) { // Here, we need an offset *into* the alloca. CharUnits offset = CharUnits::fromQuantity(32); Expr.push_back(llvm::dwarf::DW_OP_plus_uconst); // offset of __forwarding field offset = CGM.getContext().toCharUnitsFromBits( CGM.getTarget().getPointerWidth(0)); Expr.push_back(offset.getQuantity()); Expr.push_back(llvm::dwarf::DW_OP_deref); Expr.push_back(llvm::dwarf::DW_OP_plus_uconst); // offset of x field offset = CGM.getContext().toCharUnitsFromBits(XOffset); Expr.push_back(offset.getQuantity()); } } else if (const auto *RT = dyn_cast(VD->getType())) { // If VD is an anonymous union then Storage represents value for // all union fields. const RecordDecl *RD = RT->getDecl(); if (RD->isUnion() && RD->isAnonymousStructOrUnion()) { // GDB has trouble finding local variables in anonymous unions, so we emit // artificial local variables for each of the members. // // FIXME: Remove this code as soon as GDB supports this. // The debug info verifier in LLVM operates based on the assumption that a // variable has the same size as its storage and we had to disable the // check for artificial variables. for (const auto *Field : RD->fields()) { llvm::DIType *FieldTy = getOrCreateType(Field->getType(), Unit); StringRef FieldName = Field->getName(); // Ignore unnamed fields. Do not ignore unnamed records. if (FieldName.empty() && !isa(Field->getType())) continue; // Use VarDecl's Tag, Scope and Line number. auto FieldAlign = getDeclAlignIfRequired(Field, CGM.getContext()); auto *D = DBuilder.createAutoVariable( Scope, FieldName, Unit, Line, FieldTy, CGM.getLangOpts().Optimize, Flags | llvm::DINode::FlagArtificial, FieldAlign); // Insert an llvm.dbg.declare into the current block. DBuilder.insertDeclare( Storage, D, DBuilder.createExpression(Expr), llvm::DebugLoc::get(Line, Column, Scope, CurInlinedAt), Builder.GetInsertBlock()); } } } // Clang stores the sret pointer provided by the caller in a static alloca. // Use DW_OP_deref to tell the debugger to load the pointer and treat it as // the address of the variable. if (UsePointerValue) { assert(std::find(Expr.begin(), Expr.end(), llvm::dwarf::DW_OP_deref) == Expr.end() && "Debug info already contains DW_OP_deref."); Expr.push_back(llvm::dwarf::DW_OP_deref); } // Create the descriptor for the variable. auto *D = ArgNo ? DBuilder.createParameterVariable( Scope, Name, *ArgNo, Unit, Line, Ty, CGM.getLangOpts().Optimize, Flags) : DBuilder.createAutoVariable(Scope, Name, Unit, Line, Ty, CGM.getLangOpts().Optimize, Flags, Align); // Insert an llvm.dbg.declare into the current block. DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(Expr), llvm::DebugLoc::get(Line, Column, Scope, CurInlinedAt), Builder.GetInsertBlock()); return D; } llvm::DILocalVariable * CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *VD, llvm::Value *Storage, CGBuilderTy &Builder, const bool UsePointerValue) { assert(CGM.getCodeGenOpts().hasReducedDebugInfo()); return EmitDeclare(VD, Storage, llvm::None, Builder, UsePointerValue); } void CGDebugInfo::EmitLabel(const LabelDecl *D, CGBuilderTy &Builder) { assert(CGM.getCodeGenOpts().hasReducedDebugInfo()); assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!"); if (D->hasAttr()) return; auto *Scope = cast(LexicalBlockStack.back()); llvm::DIFile *Unit = getOrCreateFile(D->getLocation()); // Get location information. unsigned Line = getLineNumber(D->getLocation()); unsigned Column = getColumnNumber(D->getLocation()); StringRef Name = D->getName(); // Create the descriptor for the label. auto *L = DBuilder.createLabel(Scope, Name, Unit, Line, CGM.getLangOpts().Optimize); // Insert an llvm.dbg.label into the current block. DBuilder.insertLabel(L, llvm::DebugLoc::get(Line, Column, Scope, CurInlinedAt), Builder.GetInsertBlock()); } llvm::DIType *CGDebugInfo::CreateSelfType(const QualType &QualTy, llvm::DIType *Ty) { llvm::DIType *CachedTy = getTypeOrNull(QualTy); if (CachedTy) Ty = CachedTy; return DBuilder.createObjectPointerType(Ty); } void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable( const VarDecl *VD, llvm::Value *Storage, CGBuilderTy &Builder, const CGBlockInfo &blockInfo, llvm::Instruction *InsertPoint) { assert(CGM.getCodeGenOpts().hasReducedDebugInfo()); assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!"); if (Builder.GetInsertBlock() == nullptr) return; if (VD->hasAttr()) return; bool isByRef = VD->hasAttr(); uint64_t XOffset = 0; llvm::DIFile *Unit = getOrCreateFile(VD->getLocation()); llvm::DIType *Ty; if (isByRef) Ty = EmitTypeForVarWithBlocksAttr(VD, &XOffset).WrappedType; else Ty = getOrCreateType(VD->getType(), Unit); // Self is passed along as an implicit non-arg variable in a // block. Mark it as the object pointer. if (const auto *IPD = dyn_cast(VD)) if (IPD->getParameterKind() == ImplicitParamDecl::ObjCSelf) Ty = CreateSelfType(VD->getType(), Ty); // Get location information. unsigned Line = getLineNumber(VD->getLocation()); unsigned Column = getColumnNumber(VD->getLocation()); const llvm::DataLayout &target = CGM.getDataLayout(); CharUnits offset = CharUnits::fromQuantity( target.getStructLayout(blockInfo.StructureType) ->getElementOffset(blockInfo.getCapture(VD).getIndex())); SmallVector addr; addr.push_back(llvm::dwarf::DW_OP_deref); addr.push_back(llvm::dwarf::DW_OP_plus_uconst); addr.push_back(offset.getQuantity()); if (isByRef) { addr.push_back(llvm::dwarf::DW_OP_deref); addr.push_back(llvm::dwarf::DW_OP_plus_uconst); // offset of __forwarding field offset = CGM.getContext().toCharUnitsFromBits(target.getPointerSizeInBits(0)); addr.push_back(offset.getQuantity()); addr.push_back(llvm::dwarf::DW_OP_deref); addr.push_back(llvm::dwarf::DW_OP_plus_uconst); // offset of x field offset = CGM.getContext().toCharUnitsFromBits(XOffset); addr.push_back(offset.getQuantity()); } // Create the descriptor for the variable. auto Align = getDeclAlignIfRequired(VD, CGM.getContext()); auto *D = DBuilder.createAutoVariable( cast(LexicalBlockStack.back()), VD->getName(), Unit, Line, Ty, false, llvm::DINode::FlagZero, Align); // Insert an llvm.dbg.declare into the current block. auto DL = llvm::DebugLoc::get(Line, Column, LexicalBlockStack.back(), CurInlinedAt); auto *Expr = DBuilder.createExpression(addr); if (InsertPoint) DBuilder.insertDeclare(Storage, D, Expr, DL, InsertPoint); else DBuilder.insertDeclare(Storage, D, Expr, DL, Builder.GetInsertBlock()); } void CGDebugInfo::EmitDeclareOfArgVariable(const VarDecl *VD, llvm::Value *AI, unsigned ArgNo, CGBuilderTy &Builder) { assert(CGM.getCodeGenOpts().hasReducedDebugInfo()); EmitDeclare(VD, AI, ArgNo, Builder); } namespace { struct BlockLayoutChunk { uint64_t OffsetInBits; const BlockDecl::Capture *Capture; }; bool operator<(const BlockLayoutChunk &l, const BlockLayoutChunk &r) { return l.OffsetInBits < r.OffsetInBits; } } // namespace void CGDebugInfo::collectDefaultFieldsForBlockLiteralDeclare( const CGBlockInfo &Block, const ASTContext &Context, SourceLocation Loc, const llvm::StructLayout &BlockLayout, llvm::DIFile *Unit, SmallVectorImpl &Fields) { // Blocks in OpenCL have unique constraints which make the standard fields // redundant while requiring size and align fields for enqueue_kernel. See // initializeForBlockHeader in CGBlocks.cpp if (CGM.getLangOpts().OpenCL) { Fields.push_back(createFieldType("__size", Context.IntTy, Loc, AS_public, BlockLayout.getElementOffsetInBits(0), Unit, Unit)); Fields.push_back(createFieldType("__align", Context.IntTy, Loc, AS_public, BlockLayout.getElementOffsetInBits(1), Unit, Unit)); } else { Fields.push_back(createFieldType("__isa", Context.VoidPtrTy, Loc, AS_public, BlockLayout.getElementOffsetInBits(0), Unit, Unit)); Fields.push_back(createFieldType("__flags", Context.IntTy, Loc, AS_public, BlockLayout.getElementOffsetInBits(1), Unit, Unit)); Fields.push_back( createFieldType("__reserved", Context.IntTy, Loc, AS_public, BlockLayout.getElementOffsetInBits(2), Unit, Unit)); auto *FnTy = Block.getBlockExpr()->getFunctionType(); auto FnPtrType = CGM.getContext().getPointerType(FnTy->desugar()); Fields.push_back(createFieldType("__FuncPtr", FnPtrType, Loc, AS_public, BlockLayout.getElementOffsetInBits(3), Unit, Unit)); Fields.push_back(createFieldType( "__descriptor", Context.getPointerType(Block.NeedsCopyDispose ? Context.getBlockDescriptorExtendedType() : Context.getBlockDescriptorType()), Loc, AS_public, BlockLayout.getElementOffsetInBits(4), Unit, Unit)); } } void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block, StringRef Name, unsigned ArgNo, llvm::AllocaInst *Alloca, CGBuilderTy &Builder) { assert(CGM.getCodeGenOpts().hasReducedDebugInfo()); ASTContext &C = CGM.getContext(); const BlockDecl *blockDecl = block.getBlockDecl(); // Collect some general information about the block's location. SourceLocation loc = blockDecl->getCaretLocation(); llvm::DIFile *tunit = getOrCreateFile(loc); unsigned line = getLineNumber(loc); unsigned column = getColumnNumber(loc); // Build the debug-info type for the block literal. getDeclContextDescriptor(blockDecl); const llvm::StructLayout *blockLayout = CGM.getDataLayout().getStructLayout(block.StructureType); SmallVector fields; collectDefaultFieldsForBlockLiteralDeclare(block, C, loc, *blockLayout, tunit, fields); // We want to sort the captures by offset, not because DWARF // requires this, but because we're paranoid about debuggers. SmallVector chunks; // 'this' capture. if (blockDecl->capturesCXXThis()) { BlockLayoutChunk chunk; chunk.OffsetInBits = blockLayout->getElementOffsetInBits(block.CXXThisIndex); chunk.Capture = nullptr; chunks.push_back(chunk); } // Variable captures. for (const auto &capture : blockDecl->captures()) { const VarDecl *variable = capture.getVariable(); const CGBlockInfo::Capture &captureInfo = block.getCapture(variable); // Ignore constant captures. if (captureInfo.isConstant()) continue; BlockLayoutChunk chunk; chunk.OffsetInBits = blockLayout->getElementOffsetInBits(captureInfo.getIndex()); chunk.Capture = &capture; chunks.push_back(chunk); } // Sort by offset. llvm::array_pod_sort(chunks.begin(), chunks.end()); for (const BlockLayoutChunk &Chunk : chunks) { uint64_t offsetInBits = Chunk.OffsetInBits; const BlockDecl::Capture *capture = Chunk.Capture; // If we have a null capture, this must be the C++ 'this' capture. if (!capture) { QualType type; if (auto *Method = cast_or_null(blockDecl->getNonClosureContext())) type = Method->getThisType(); else if (auto *RDecl = dyn_cast(blockDecl->getParent())) type = QualType(RDecl->getTypeForDecl(), 0); else llvm_unreachable("unexpected block declcontext"); fields.push_back(createFieldType("this", type, loc, AS_public, offsetInBits, tunit, tunit)); continue; } const VarDecl *variable = capture->getVariable(); StringRef name = variable->getName(); llvm::DIType *fieldType; if (capture->isByRef()) { TypeInfo PtrInfo = C.getTypeInfo(C.VoidPtrTy); auto Align = PtrInfo.AlignIsRequired ? PtrInfo.Align : 0; // FIXME: This recomputes the layout of the BlockByRefWrapper. uint64_t xoffset; fieldType = EmitTypeForVarWithBlocksAttr(variable, &xoffset).BlockByRefWrapper; fieldType = DBuilder.createPointerType(fieldType, PtrInfo.Width); fieldType = DBuilder.createMemberType(tunit, name, tunit, line, PtrInfo.Width, Align, offsetInBits, llvm::DINode::FlagZero, fieldType); } else { auto Align = getDeclAlignIfRequired(variable, CGM.getContext()); fieldType = createFieldType(name, variable->getType(), loc, AS_public, offsetInBits, Align, tunit, tunit); } fields.push_back(fieldType); } SmallString<36> typeName; llvm::raw_svector_ostream(typeName) << "__block_literal_" << CGM.getUniqueBlockCount(); llvm::DINodeArray fieldsArray = DBuilder.getOrCreateArray(fields); llvm::DIType *type = DBuilder.createStructType(tunit, typeName.str(), tunit, line, CGM.getContext().toBits(block.BlockSize), 0, llvm::DINode::FlagZero, nullptr, fieldsArray); type = DBuilder.createPointerType(type, CGM.PointerWidthInBits); // Get overall information about the block. llvm::DINode::DIFlags flags = llvm::DINode::FlagArtificial; auto *scope = cast(LexicalBlockStack.back()); // Create the descriptor for the parameter. auto *debugVar = DBuilder.createParameterVariable( scope, Name, ArgNo, tunit, line, type, CGM.getLangOpts().Optimize, flags); // Insert an llvm.dbg.declare into the current block. DBuilder.insertDeclare(Alloca, debugVar, DBuilder.createExpression(), llvm::DebugLoc::get(line, column, scope, CurInlinedAt), Builder.GetInsertBlock()); } llvm::DIDerivedType * CGDebugInfo::getOrCreateStaticDataMemberDeclarationOrNull(const VarDecl *D) { if (!D || !D->isStaticDataMember()) return nullptr; auto MI = StaticDataMemberCache.find(D->getCanonicalDecl()); if (MI != StaticDataMemberCache.end()) { assert(MI->second && "Static data member declaration should still exist"); return MI->second; } // If the member wasn't found in the cache, lazily construct and add it to the // type (used when a limited form of the type is emitted). auto DC = D->getDeclContext(); auto *Ctxt = cast(getDeclContextDescriptor(D)); return CreateRecordStaticField(D, Ctxt, cast(DC)); } llvm::DIGlobalVariableExpression *CGDebugInfo::CollectAnonRecordDecls( const RecordDecl *RD, llvm::DIFile *Unit, unsigned LineNo, StringRef LinkageName, llvm::GlobalVariable *Var, llvm::DIScope *DContext) { llvm::DIGlobalVariableExpression *GVE = nullptr; for (const auto *Field : RD->fields()) { llvm::DIType *FieldTy = getOrCreateType(Field->getType(), Unit); StringRef FieldName = Field->getName(); // Ignore unnamed fields, but recurse into anonymous records. if (FieldName.empty()) { if (const auto *RT = dyn_cast(Field->getType())) GVE = CollectAnonRecordDecls(RT->getDecl(), Unit, LineNo, LinkageName, Var, DContext); continue; } // Use VarDecl's Tag, Scope and Line number. GVE = DBuilder.createGlobalVariableExpression( DContext, FieldName, LinkageName, Unit, LineNo, FieldTy, Var->hasLocalLinkage()); Var->addDebugInfo(GVE); } return GVE; } void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var, const VarDecl *D) { assert(CGM.getCodeGenOpts().hasReducedDebugInfo()); if (D->hasAttr()) return; llvm::TimeTraceScope TimeScope("DebugGlobalVariable", [&]() { std::string Name; llvm::raw_string_ostream OS(Name); D->getNameForDiagnostic(OS, getPrintingPolicy(), /*Qualified=*/true); return Name; }); // If we already created a DIGlobalVariable for this declaration, just attach // it to the llvm::GlobalVariable. auto Cached = DeclCache.find(D->getCanonicalDecl()); if (Cached != DeclCache.end()) return Var->addDebugInfo( cast(Cached->second)); // Create global variable debug descriptor. llvm::DIFile *Unit = nullptr; llvm::DIScope *DContext = nullptr; unsigned LineNo; StringRef DeclName, LinkageName; QualType T; llvm::MDTuple *TemplateParameters = nullptr; collectVarDeclProps(D, Unit, LineNo, T, DeclName, LinkageName, TemplateParameters, DContext); // Attempt to store one global variable for the declaration - even if we // emit a lot of fields. llvm::DIGlobalVariableExpression *GVE = nullptr; // If this is an anonymous union then we'll want to emit a global // variable for each member of the anonymous union so that it's possible // to find the name of any field in the union. if (T->isUnionType() && DeclName.empty()) { const RecordDecl *RD = T->castAs()->getDecl(); assert(RD->isAnonymousStructOrUnion() && "unnamed non-anonymous struct or union?"); GVE = CollectAnonRecordDecls(RD, Unit, LineNo, LinkageName, Var, DContext); } else { auto Align = getDeclAlignIfRequired(D, CGM.getContext()); SmallVector Expr; unsigned AddressSpace = CGM.getContext().getTargetAddressSpace(D->getType()); if (CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) { if (D->hasAttr()) AddressSpace = CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared); else if (D->hasAttr()) AddressSpace = CGM.getContext().getTargetAddressSpace(LangAS::cuda_constant); } AppendAddressSpaceXDeref(AddressSpace, Expr); GVE = DBuilder.createGlobalVariableExpression( DContext, DeclName, LinkageName, Unit, LineNo, getOrCreateType(T, Unit), Var->hasLocalLinkage(), true, Expr.empty() ? nullptr : DBuilder.createExpression(Expr), getOrCreateStaticDataMemberDeclarationOrNull(D), TemplateParameters, Align); Var->addDebugInfo(GVE); } DeclCache[D->getCanonicalDecl()].reset(GVE); } void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD, const APValue &Init) { assert(CGM.getCodeGenOpts().hasReducedDebugInfo()); if (VD->hasAttr()) return; llvm::TimeTraceScope TimeScope("DebugConstGlobalVariable", [&]() { std::string Name; llvm::raw_string_ostream OS(Name); VD->getNameForDiagnostic(OS, getPrintingPolicy(), /*Qualified=*/true); return Name; }); auto Align = getDeclAlignIfRequired(VD, CGM.getContext()); // Create the descriptor for the variable. llvm::DIFile *Unit = getOrCreateFile(VD->getLocation()); StringRef Name = VD->getName(); llvm::DIType *Ty = getOrCreateType(VD->getType(), Unit); if (const auto *ECD = dyn_cast(VD)) { const auto *ED = cast(ECD->getDeclContext()); assert(isa(ED->getTypeForDecl()) && "Enum without EnumType?"); if (CGM.getCodeGenOpts().EmitCodeView) { // If CodeView, emit enums as global variables, unless they are defined // inside a class. We do this because MSVC doesn't emit S_CONSTANTs for // enums in classes, and because it is difficult to attach this scope // information to the global variable. if (isa(ED->getDeclContext())) return; } else { // If not CodeView, emit DW_TAG_enumeration_type if necessary. For // example: for "enum { ZERO };", a DW_TAG_enumeration_type is created the // first time `ZERO` is referenced in a function. llvm::DIType *EDTy = getOrCreateType(QualType(ED->getTypeForDecl(), 0), Unit); assert (EDTy->getTag() == llvm::dwarf::DW_TAG_enumeration_type); (void)EDTy; return; } } llvm::DIScope *DContext = nullptr; // Do not emit separate definitions for function local consts. if (isa(VD->getDeclContext())) return; // Emit definition for static members in CodeView. VD = cast(VD->getCanonicalDecl()); auto *VarD = dyn_cast(VD); if (VarD && VarD->isStaticDataMember()) { auto *RD = cast(VarD->getDeclContext()); getDeclContextDescriptor(VarD); // Ensure that the type is retained even though it's otherwise unreferenced. // // FIXME: This is probably unnecessary, since Ty should reference RD // through its scope. RetainedTypes.push_back( CGM.getContext().getRecordType(RD).getAsOpaquePtr()); if (!CGM.getCodeGenOpts().EmitCodeView) return; // Use the global scope for static members. DContext = getContextDescriptor( cast(CGM.getContext().getTranslationUnitDecl()), TheCU); } else { DContext = getDeclContextDescriptor(VD); } auto &GV = DeclCache[VD]; if (GV) return; llvm::DIExpression *InitExpr = nullptr; if (CGM.getContext().getTypeSize(VD->getType()) <= 64) { // FIXME: Add a representation for integer constants wider than 64 bits. if (Init.isInt()) InitExpr = DBuilder.createConstantValueExpression(Init.getInt().getExtValue()); else if (Init.isFloat()) InitExpr = DBuilder.createConstantValueExpression( Init.getFloat().bitcastToAPInt().getZExtValue()); } llvm::MDTuple *TemplateParameters = nullptr; if (isa(VD)) if (VarD) { llvm::DINodeArray parameterNodes = CollectVarTemplateParams(VarD, &*Unit); TemplateParameters = parameterNodes.get(); } GV.reset(DBuilder.createGlobalVariableExpression( DContext, Name, StringRef(), Unit, getLineNumber(VD->getLocation()), Ty, true, true, InitExpr, getOrCreateStaticDataMemberDeclarationOrNull(VarD), TemplateParameters, Align)); } void CGDebugInfo::EmitExternalVariable(llvm::GlobalVariable *Var, const VarDecl *D) { assert(CGM.getCodeGenOpts().hasReducedDebugInfo()); if (D->hasAttr()) return; auto Align = getDeclAlignIfRequired(D, CGM.getContext()); llvm::DIFile *Unit = getOrCreateFile(D->getLocation()); StringRef Name = D->getName(); llvm::DIType *Ty = getOrCreateType(D->getType(), Unit); llvm::DIScope *DContext = getDeclContextDescriptor(D); llvm::DIGlobalVariableExpression *GVE = DBuilder.createGlobalVariableExpression( DContext, Name, StringRef(), Unit, getLineNumber(D->getLocation()), Ty, false, false, nullptr, nullptr, nullptr, Align); Var->addDebugInfo(GVE); } llvm::DIScope *CGDebugInfo::getCurrentContextDescriptor(const Decl *D) { if (!LexicalBlockStack.empty()) return LexicalBlockStack.back(); llvm::DIScope *Mod = getParentModuleOrNull(D); return getContextDescriptor(D, Mod ? Mod : TheCU); } void CGDebugInfo::EmitUsingDirective(const UsingDirectiveDecl &UD) { if (!CGM.getCodeGenOpts().hasReducedDebugInfo()) return; const NamespaceDecl *NSDecl = UD.getNominatedNamespace(); if (!NSDecl->isAnonymousNamespace() || CGM.getCodeGenOpts().DebugExplicitImport) { auto Loc = UD.getLocation(); DBuilder.createImportedModule( getCurrentContextDescriptor(cast(UD.getDeclContext())), getOrCreateNamespace(NSDecl), getOrCreateFile(Loc), getLineNumber(Loc)); } } void CGDebugInfo::EmitUsingDecl(const UsingDecl &UD) { if (!CGM.getCodeGenOpts().hasReducedDebugInfo()) return; assert(UD.shadow_size() && "We shouldn't be codegening an invalid UsingDecl containing no decls"); // Emitting one decl is sufficient - debuggers can detect that this is an // overloaded name & provide lookup for all the overloads. const UsingShadowDecl &USD = **UD.shadow_begin(); // FIXME: Skip functions with undeduced auto return type for now since we // don't currently have the plumbing for separate declarations & definitions // of free functions and mismatched types (auto in the declaration, concrete // return type in the definition) if (const auto *FD = dyn_cast(USD.getUnderlyingDecl())) if (const auto *AT = FD->getType()->castAs()->getContainedAutoType()) if (AT->getDeducedType().isNull()) return; if (llvm::DINode *Target = getDeclarationOrDefinition(USD.getUnderlyingDecl())) { auto Loc = USD.getLocation(); DBuilder.createImportedDeclaration( getCurrentContextDescriptor(cast(USD.getDeclContext())), Target, getOrCreateFile(Loc), getLineNumber(Loc)); } } void CGDebugInfo::EmitImportDecl(const ImportDecl &ID) { if (CGM.getCodeGenOpts().getDebuggerTuning() != llvm::DebuggerKind::LLDB) return; if (Module *M = ID.getImportedModule()) { auto Info = ASTSourceDescriptor(*M); auto Loc = ID.getLocation(); DBuilder.createImportedDeclaration( getCurrentContextDescriptor(cast(ID.getDeclContext())), getOrCreateModuleRef(Info, DebugTypeExtRefs), getOrCreateFile(Loc), getLineNumber(Loc)); } } llvm::DIImportedEntity * CGDebugInfo::EmitNamespaceAlias(const NamespaceAliasDecl &NA) { if (!CGM.getCodeGenOpts().hasReducedDebugInfo()) return nullptr; auto &VH = NamespaceAliasCache[&NA]; if (VH) return cast(VH); llvm::DIImportedEntity *R; auto Loc = NA.getLocation(); if (const auto *Underlying = dyn_cast(NA.getAliasedNamespace())) // This could cache & dedup here rather than relying on metadata deduping. R = DBuilder.createImportedDeclaration( getCurrentContextDescriptor(cast(NA.getDeclContext())), EmitNamespaceAlias(*Underlying), getOrCreateFile(Loc), getLineNumber(Loc), NA.getName()); else R = DBuilder.createImportedDeclaration( getCurrentContextDescriptor(cast(NA.getDeclContext())), getOrCreateNamespace(cast(NA.getAliasedNamespace())), getOrCreateFile(Loc), getLineNumber(Loc), NA.getName()); VH.reset(R); return R; } llvm::DINamespace * CGDebugInfo::getOrCreateNamespace(const NamespaceDecl *NSDecl) { // Don't canonicalize the NamespaceDecl here: The DINamespace will be uniqued // if necessary, and this way multiple declarations of the same namespace in // different parent modules stay distinct. auto I = NamespaceCache.find(NSDecl); if (I != NamespaceCache.end()) return cast(I->second); llvm::DIScope *Context = getDeclContextDescriptor(NSDecl); // Don't trust the context if it is a DIModule (see comment above). llvm::DINamespace *NS = DBuilder.createNameSpace(Context, NSDecl->getName(), NSDecl->isInline()); NamespaceCache[NSDecl].reset(NS); return NS; } void CGDebugInfo::setDwoId(uint64_t Signature) { assert(TheCU && "no main compile unit"); TheCU->setDWOId(Signature); } void CGDebugInfo::finalize() { // Creating types might create further types - invalidating the current // element and the size(), so don't cache/reference them. for (size_t i = 0; i != ObjCInterfaceCache.size(); ++i) { ObjCInterfaceCacheEntry E = ObjCInterfaceCache[i]; llvm::DIType *Ty = E.Type->getDecl()->getDefinition() ? CreateTypeDefinition(E.Type, E.Unit) : E.Decl; DBuilder.replaceTemporary(llvm::TempDIType(E.Decl), Ty); } // Add methods to interface. for (const auto &P : ObjCMethodCache) { if (P.second.empty()) continue; QualType QTy(P.first->getTypeForDecl(), 0); auto It = TypeCache.find(QTy.getAsOpaquePtr()); assert(It != TypeCache.end()); llvm::DICompositeType *InterfaceDecl = cast(It->second); auto CurElts = InterfaceDecl->getElements(); SmallVector EltTys(CurElts.begin(), CurElts.end()); // For DWARF v4 or earlier, only add objc_direct methods. for (auto &SubprogramDirect : P.second) if (CGM.getCodeGenOpts().DwarfVersion >= 5 || SubprogramDirect.getInt()) EltTys.push_back(SubprogramDirect.getPointer()); llvm::DINodeArray Elements = DBuilder.getOrCreateArray(EltTys); DBuilder.replaceArrays(InterfaceDecl, Elements); } for (const auto &P : ReplaceMap) { assert(P.second); auto *Ty = cast(P.second); assert(Ty->isForwardDecl()); auto It = TypeCache.find(P.first); assert(It != TypeCache.end()); assert(It->second); DBuilder.replaceTemporary(llvm::TempDIType(Ty), cast(It->second)); } for (const auto &P : FwdDeclReplaceMap) { assert(P.second); llvm::TempMDNode FwdDecl(cast(P.second)); llvm::Metadata *Repl; auto It = DeclCache.find(P.first); // If there has been no definition for the declaration, call RAUW // with ourselves, that will destroy the temporary MDNode and // replace it with a standard one, avoiding leaking memory. if (It == DeclCache.end()) Repl = P.second; else Repl = It->second; if (auto *GVE = dyn_cast_or_null(Repl)) Repl = GVE->getVariable(); DBuilder.replaceTemporary(std::move(FwdDecl), cast(Repl)); } // We keep our own list of retained types, because we need to look // up the final type in the type cache. for (auto &RT : RetainedTypes) if (auto MD = TypeCache[RT]) DBuilder.retainType(cast(MD)); DBuilder.finalize(); } void CGDebugInfo::EmitExplicitCastType(QualType Ty) { if (!CGM.getCodeGenOpts().hasReducedDebugInfo()) return; if (auto *DieTy = getOrCreateType(Ty, TheCU->getFile())) // Don't ignore in case of explicit cast where it is referenced indirectly. DBuilder.retainType(DieTy); } llvm::DebugLoc CGDebugInfo::SourceLocToDebugLoc(SourceLocation Loc) { if (LexicalBlockStack.empty()) return llvm::DebugLoc(); llvm::MDNode *Scope = LexicalBlockStack.back(); return llvm::DebugLoc::get(getLineNumber(Loc), getColumnNumber(Loc), Scope); } llvm::DINode::DIFlags CGDebugInfo::getCallSiteRelatedAttrs() const { // Call site-related attributes are only useful in optimized programs, and // when there's a possibility of debugging backtraces. if (!CGM.getLangOpts().Optimize || DebugKind == codegenoptions::NoDebugInfo || DebugKind == codegenoptions::LocTrackingOnly) return llvm::DINode::FlagZero; // Call site-related attributes are available in DWARF v5. Some debuggers, // while not fully DWARF v5-compliant, may accept these attributes as if they // were part of DWARF v4. bool SupportsDWARFv4Ext = CGM.getCodeGenOpts().DwarfVersion == 4 && (CGM.getCodeGenOpts().getDebuggerTuning() == llvm::DebuggerKind::LLDB || CGM.getCodeGenOpts().getDebuggerTuning() == llvm::DebuggerKind::GDB); if (!SupportsDWARFv4Ext && CGM.getCodeGenOpts().DwarfVersion < 5) return llvm::DINode::FlagZero; return llvm::DINode::FlagAllCallsDescribed; } diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp index 8ce488f35dd3..8f79cc77f0e6 100644 --- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp +++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp @@ -1,2497 +1,2496 @@ //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This coordinates the per-function state used while generating code. // //===----------------------------------------------------------------------===// #include "CodeGenFunction.h" #include "CGBlocks.h" #include "CGCUDARuntime.h" #include "CGCXXABI.h" #include "CGCleanup.h" #include "CGDebugInfo.h" #include "CGOpenMPRuntime.h" #include "CodeGenModule.h" #include "CodeGenPGO.h" #include "TargetInfo.h" #include "clang/AST/ASTContext.h" #include "clang/AST/ASTLambda.h" #include "clang/AST/Attr.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtObjC.h" #include "clang/Basic/Builtins.h" #include "clang/Basic/CodeGenOptions.h" #include "clang/Basic/TargetInfo.h" #include "clang/CodeGen/CGFunctionInfo.h" #include "clang/Frontend/FrontendDiagnostic.h" #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/FPEnv.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/MDBuilder.h" #include "llvm/IR/Operator.h" #include "llvm/Transforms/Utils/PromoteMemToReg.h" using namespace clang; using namespace CodeGen; /// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time /// markers. static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts, const LangOptions &LangOpts) { if (CGOpts.DisableLifetimeMarkers) return false; // Sanitizers may use markers. if (CGOpts.SanitizeAddressUseAfterScope || LangOpts.Sanitize.has(SanitizerKind::HWAddress) || LangOpts.Sanitize.has(SanitizerKind::Memory)) return true; // For now, only in optimized builds. return CGOpts.OptimizationLevel != 0; } CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext) : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()), Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(), CGBuilderInserterTy(this)), SanOpts(CGM.getLangOpts().Sanitize), CurFPFeatures(CGM.getLangOpts()), DebugInfo(CGM.getModuleDebugInfo()), PGO(cgm), ShouldEmitLifetimeMarkers( shouldEmitLifetimeMarkers(CGM.getCodeGenOpts(), CGM.getLangOpts())) { if (!suppressNewContext) CGM.getCXXABI().getMangleContext().startNewFunction(); SetFastMathFlags(CurFPFeatures); SetFPModel(); } CodeGenFunction::~CodeGenFunction() { assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup"); if (getLangOpts().OpenMP && CurFn) CGM.getOpenMPRuntime().functionFinished(*this); // If we have an OpenMPIRBuilder we want to finalize functions (incl. // outlining etc) at some point. Doing it once the function codegen is done // seems to be a reasonable spot. We do it here, as opposed to the deletion // time of the CodeGenModule, because we have to ensure the IR has not yet // been "emitted" to the outside, thus, modifications are still sensible. if (CGM.getLangOpts().OpenMPIRBuilder) CGM.getOpenMPRuntime().getOMPBuilder().finalize(); } // Map the LangOption for exception behavior into // the corresponding enum in the IR. llvm::fp::ExceptionBehavior clang::ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind) { switch (Kind) { case LangOptions::FPE_Ignore: return llvm::fp::ebIgnore; case LangOptions::FPE_MayTrap: return llvm::fp::ebMayTrap; case LangOptions::FPE_Strict: return llvm::fp::ebStrict; } llvm_unreachable("Unsupported FP Exception Behavior"); } void CodeGenFunction::SetFPModel() { llvm::RoundingMode RM = getLangOpts().getFPRoundingMode(); auto fpExceptionBehavior = ToConstrainedExceptMD( getLangOpts().getFPExceptionMode()); Builder.setDefaultConstrainedRounding(RM); Builder.setDefaultConstrainedExcept(fpExceptionBehavior); Builder.setIsFPConstrained(fpExceptionBehavior != llvm::fp::ebIgnore || RM != llvm::RoundingMode::NearestTiesToEven); } void CodeGenFunction::SetFastMathFlags(FPOptions FPFeatures) { llvm::FastMathFlags FMF; FMF.setAllowReassoc(FPFeatures.getAllowFPReassociate()); FMF.setNoNaNs(FPFeatures.getNoHonorNaNs()); FMF.setNoInfs(FPFeatures.getNoHonorInfs()); FMF.setNoSignedZeros(FPFeatures.getNoSignedZero()); FMF.setAllowReciprocal(FPFeatures.getAllowReciprocal()); FMF.setApproxFunc(FPFeatures.getAllowApproxFunc()); FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement()); Builder.setFastMathFlags(FMF); } CodeGenFunction::CGFPOptionsRAII::CGFPOptionsRAII(CodeGenFunction &CGF, FPOptions FPFeatures) : CGF(CGF), OldFPFeatures(CGF.CurFPFeatures) { CGF.CurFPFeatures = FPFeatures; if (OldFPFeatures == FPFeatures) return; FMFGuard.emplace(CGF.Builder); llvm::RoundingMode NewRoundingBehavior = static_cast(FPFeatures.getRoundingMode()); CGF.Builder.setDefaultConstrainedRounding(NewRoundingBehavior); auto NewExceptionBehavior = ToConstrainedExceptMD(static_cast( FPFeatures.getFPExceptionMode())); CGF.Builder.setDefaultConstrainedExcept(NewExceptionBehavior); CGF.SetFastMathFlags(FPFeatures); assert((CGF.CurFuncDecl == nullptr || CGF.Builder.getIsFPConstrained() || isa(CGF.CurFuncDecl) || isa(CGF.CurFuncDecl) || (NewExceptionBehavior == llvm::fp::ebIgnore && NewRoundingBehavior == llvm::RoundingMode::NearestTiesToEven)) && "FPConstrained should be enabled on entire function"); auto mergeFnAttrValue = [&](StringRef Name, bool Value) { auto OldValue = CGF.CurFn->getFnAttribute(Name).getValueAsString() == "true"; auto NewValue = OldValue & Value; if (OldValue != NewValue) CGF.CurFn->addFnAttr(Name, llvm::toStringRef(NewValue)); }; mergeFnAttrValue("no-infs-fp-math", FPFeatures.getNoHonorInfs()); mergeFnAttrValue("no-nans-fp-math", FPFeatures.getNoHonorNaNs()); mergeFnAttrValue("no-signed-zeros-fp-math", FPFeatures.getNoSignedZero()); mergeFnAttrValue("unsafe-fp-math", FPFeatures.getAllowFPReassociate() && FPFeatures.getAllowReciprocal() && FPFeatures.getAllowApproxFunc() && FPFeatures.getNoSignedZero()); } CodeGenFunction::CGFPOptionsRAII::~CGFPOptionsRAII() { CGF.CurFPFeatures = OldFPFeatures; } LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) { LValueBaseInfo BaseInfo; TBAAAccessInfo TBAAInfo; CharUnits Alignment = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo); return LValue::MakeAddr(Address(V, Alignment), T, getContext(), BaseInfo, TBAAInfo); } /// Given a value of type T* that may not be to a complete object, /// construct an l-value with the natural pointee alignment of T. LValue CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) { LValueBaseInfo BaseInfo; TBAAAccessInfo TBAAInfo; CharUnits Align = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo, /* forPointeeType= */ true); return MakeAddrLValue(Address(V, Align), T, BaseInfo, TBAAInfo); } llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) { return CGM.getTypes().ConvertTypeForMem(T); } llvm::Type *CodeGenFunction::ConvertType(QualType T) { return CGM.getTypes().ConvertType(T); } TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) { type = type.getCanonicalType(); while (true) { switch (type->getTypeClass()) { #define TYPE(name, parent) #define ABSTRACT_TYPE(name, parent) #define NON_CANONICAL_TYPE(name, parent) case Type::name: #define DEPENDENT_TYPE(name, parent) case Type::name: #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name: #include "clang/AST/TypeNodes.inc" llvm_unreachable("non-canonical or dependent type in IR-generation"); case Type::Auto: case Type::DeducedTemplateSpecialization: llvm_unreachable("undeduced type in IR-generation"); // Various scalar types. case Type::Builtin: case Type::Pointer: case Type::BlockPointer: case Type::LValueReference: case Type::RValueReference: case Type::MemberPointer: case Type::Vector: case Type::ExtVector: case Type::ConstantMatrix: case Type::FunctionProto: case Type::FunctionNoProto: case Type::Enum: case Type::ObjCObjectPointer: case Type::Pipe: case Type::ExtInt: return TEK_Scalar; // Complexes. case Type::Complex: return TEK_Complex; // Arrays, records, and Objective-C objects. case Type::ConstantArray: case Type::IncompleteArray: case Type::VariableArray: case Type::Record: case Type::ObjCObject: case Type::ObjCInterface: return TEK_Aggregate; // We operate on atomic values according to their underlying type. case Type::Atomic: type = cast(type)->getValueType(); continue; } llvm_unreachable("unknown type kind!"); } } llvm::DebugLoc CodeGenFunction::EmitReturnBlock() { // For cleanliness, we try to avoid emitting the return block for // simple cases. llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); if (CurBB) { assert(!CurBB->getTerminator() && "Unexpected terminated block."); // We have a valid insert point, reuse it if it is empty or there are no // explicit jumps to the return block. if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) { ReturnBlock.getBlock()->replaceAllUsesWith(CurBB); delete ReturnBlock.getBlock(); ReturnBlock = JumpDest(); } else EmitBlock(ReturnBlock.getBlock()); return llvm::DebugLoc(); } // Otherwise, if the return block is the target of a single direct // branch then we can just put the code in that block instead. This // cleans up functions which started with a unified return block. if (ReturnBlock.getBlock()->hasOneUse()) { llvm::BranchInst *BI = dyn_cast(*ReturnBlock.getBlock()->user_begin()); if (BI && BI->isUnconditional() && BI->getSuccessor(0) == ReturnBlock.getBlock()) { // Record/return the DebugLoc of the simple 'return' expression to be used // later by the actual 'ret' instruction. llvm::DebugLoc Loc = BI->getDebugLoc(); Builder.SetInsertPoint(BI->getParent()); BI->eraseFromParent(); delete ReturnBlock.getBlock(); ReturnBlock = JumpDest(); return Loc; } } // FIXME: We are at an unreachable point, there is no reason to emit the block // unless it has uses. However, we still need a place to put the debug // region.end for now. EmitBlock(ReturnBlock.getBlock()); return llvm::DebugLoc(); } static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) { if (!BB) return; if (!BB->use_empty()) return CGF.CurFn->getBasicBlockList().push_back(BB); delete BB; } void CodeGenFunction::FinishFunction(SourceLocation EndLoc) { assert(BreakContinueStack.empty() && "mismatched push/pop in break/continue stack!"); bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0 && NumSimpleReturnExprs == NumReturnExprs && ReturnBlock.getBlock()->use_empty(); // Usually the return expression is evaluated before the cleanup // code. If the function contains only a simple return statement, // such as a constant, the location before the cleanup code becomes // the last useful breakpoint in the function, because the simple // return expression will be evaluated after the cleanup code. To be // safe, set the debug location for cleanup code to the location of // the return statement. Otherwise the cleanup code should be at the // end of the function's lexical scope. // // If there are multiple branches to the return block, the branch // instructions will get the location of the return statements and // all will be fine. if (CGDebugInfo *DI = getDebugInfo()) { if (OnlySimpleReturnStmts) DI->EmitLocation(Builder, LastStopPoint); else DI->EmitLocation(Builder, EndLoc); } // Pop any cleanups that might have been associated with the // parameters. Do this in whatever block we're currently in; it's // important to do this before we enter the return block or return // edges will be *really* confused. bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth; bool HasOnlyLifetimeMarkers = HasCleanups && EHStack.containsOnlyLifetimeMarkers(PrologueCleanupDepth); bool EmitRetDbgLoc = !HasCleanups || HasOnlyLifetimeMarkers; if (HasCleanups) { // Make sure the line table doesn't jump back into the body for // the ret after it's been at EndLoc. Optional AL; if (CGDebugInfo *DI = getDebugInfo()) { if (OnlySimpleReturnStmts) DI->EmitLocation(Builder, EndLoc); else // We may not have a valid end location. Try to apply it anyway, and // fall back to an artificial location if needed. AL = ApplyDebugLocation::CreateDefaultArtificial(*this, EndLoc); } PopCleanupBlocks(PrologueCleanupDepth); } // Emit function epilog (to return). llvm::DebugLoc Loc = EmitReturnBlock(); if (ShouldInstrumentFunction()) { if (CGM.getCodeGenOpts().InstrumentFunctions) CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit"); if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining) CurFn->addFnAttr("instrument-function-exit-inlined", "__cyg_profile_func_exit"); } // Emit debug descriptor for function end. if (CGDebugInfo *DI = getDebugInfo()) DI->EmitFunctionEnd(Builder, CurFn); // Reset the debug location to that of the simple 'return' expression, if any // rather than that of the end of the function's scope '}'. ApplyDebugLocation AL(*this, Loc); EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc); EmitEndEHSpec(CurCodeDecl); assert(EHStack.empty() && "did not remove all scopes from cleanup stack!"); // If someone did an indirect goto, emit the indirect goto block at the end of // the function. if (IndirectBranch) { EmitBlock(IndirectBranch->getParent()); Builder.ClearInsertionPoint(); } // If some of our locals escaped, insert a call to llvm.localescape in the // entry block. if (!EscapedLocals.empty()) { // Invert the map from local to index into a simple vector. There should be // no holes. SmallVector EscapeArgs; EscapeArgs.resize(EscapedLocals.size()); for (auto &Pair : EscapedLocals) EscapeArgs[Pair.second] = Pair.first; llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration( &CGM.getModule(), llvm::Intrinsic::localescape); CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs); } // Remove the AllocaInsertPt instruction, which is just a convenience for us. llvm::Instruction *Ptr = AllocaInsertPt; AllocaInsertPt = nullptr; Ptr->eraseFromParent(); // If someone took the address of a label but never did an indirect goto, we // made a zero entry PHI node, which is illegal, zap it now. if (IndirectBranch) { llvm::PHINode *PN = cast(IndirectBranch->getAddress()); if (PN->getNumIncomingValues() == 0) { PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType())); PN->eraseFromParent(); } } EmitIfUsed(*this, EHResumeBlock); EmitIfUsed(*this, TerminateLandingPad); EmitIfUsed(*this, TerminateHandler); EmitIfUsed(*this, UnreachableBlock); for (const auto &FuncletAndParent : TerminateFunclets) EmitIfUsed(*this, FuncletAndParent.second); if (CGM.getCodeGenOpts().EmitDeclMetadata) EmitDeclMetadata(); for (SmallVectorImpl >::iterator I = DeferredReplacements.begin(), E = DeferredReplacements.end(); I != E; ++I) { I->first->replaceAllUsesWith(I->second); I->first->eraseFromParent(); } // Eliminate CleanupDestSlot alloca by replacing it with SSA values and // PHIs if the current function is a coroutine. We don't do it for all // functions as it may result in slight increase in numbers of instructions // if compiled with no optimizations. We do it for coroutine as the lifetime // of CleanupDestSlot alloca make correct coroutine frame building very // difficult. if (NormalCleanupDest.isValid() && isCoroutine()) { llvm::DominatorTree DT(*CurFn); llvm::PromoteMemToReg( cast(NormalCleanupDest.getPointer()), DT); NormalCleanupDest = Address::invalid(); } // Scan function arguments for vector width. for (llvm::Argument &A : CurFn->args()) if (auto *VT = dyn_cast(A.getType())) LargestVectorWidth = std::max((uint64_t)LargestVectorWidth, VT->getPrimitiveSizeInBits().getKnownMinSize()); // Update vector width based on return type. if (auto *VT = dyn_cast(CurFn->getReturnType())) LargestVectorWidth = std::max((uint64_t)LargestVectorWidth, VT->getPrimitiveSizeInBits().getKnownMinSize()); // Add the required-vector-width attribute. This contains the max width from: // 1. min-vector-width attribute used in the source program. // 2. Any builtins used that have a vector width specified. // 3. Values passed in and out of inline assembly. // 4. Width of vector arguments and return types for this function. // 5. Width of vector aguments and return types for functions called by this // function. CurFn->addFnAttr("min-legal-vector-width", llvm::utostr(LargestVectorWidth)); // If we generated an unreachable return block, delete it now. if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) { Builder.ClearInsertionPoint(); ReturnBlock.getBlock()->eraseFromParent(); } if (ReturnValue.isValid()) { auto *RetAlloca = dyn_cast(ReturnValue.getPointer()); if (RetAlloca && RetAlloca->use_empty()) { RetAlloca->eraseFromParent(); ReturnValue = Address::invalid(); } } } /// ShouldInstrumentFunction - Return true if the current function should be /// instrumented with __cyg_profile_func_* calls bool CodeGenFunction::ShouldInstrumentFunction() { if (!CGM.getCodeGenOpts().InstrumentFunctions && !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining && !CGM.getCodeGenOpts().InstrumentFunctionEntryBare) return false; if (!CurFuncDecl || CurFuncDecl->hasAttr()) return false; return true; } /// ShouldXRayInstrument - Return true if the current function should be /// instrumented with XRay nop sleds. bool CodeGenFunction::ShouldXRayInstrumentFunction() const { return CGM.getCodeGenOpts().XRayInstrumentFunctions; } /// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to /// the __xray_customevent(...) builtin calls, when doing XRay instrumentation. bool CodeGenFunction::AlwaysEmitXRayCustomEvents() const { return CGM.getCodeGenOpts().XRayInstrumentFunctions && (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents || CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask == XRayInstrKind::Custom); } bool CodeGenFunction::AlwaysEmitXRayTypedEvents() const { return CGM.getCodeGenOpts().XRayInstrumentFunctions && (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents || CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask == XRayInstrKind::Typed); } llvm::Constant * CodeGenFunction::EncodeAddrForUseInPrologue(llvm::Function *F, llvm::Constant *Addr) { // Addresses stored in prologue data can't require run-time fixups and must // be PC-relative. Run-time fixups are undesirable because they necessitate // writable text segments, which are unsafe. And absolute addresses are // undesirable because they break PIE mode. // Add a layer of indirection through a private global. Taking its address // won't result in a run-time fixup, even if Addr has linkonce_odr linkage. auto *GV = new llvm::GlobalVariable(CGM.getModule(), Addr->getType(), /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, Addr); // Create a PC-relative address. auto *GOTAsInt = llvm::ConstantExpr::getPtrToInt(GV, IntPtrTy); auto *FuncAsInt = llvm::ConstantExpr::getPtrToInt(F, IntPtrTy); auto *PCRelAsInt = llvm::ConstantExpr::getSub(GOTAsInt, FuncAsInt); return (IntPtrTy == Int32Ty) ? PCRelAsInt : llvm::ConstantExpr::getTrunc(PCRelAsInt, Int32Ty); } llvm::Value * CodeGenFunction::DecodeAddrUsedInPrologue(llvm::Value *F, llvm::Value *EncodedAddr) { // Reconstruct the address of the global. auto *PCRelAsInt = Builder.CreateSExt(EncodedAddr, IntPtrTy); auto *FuncAsInt = Builder.CreatePtrToInt(F, IntPtrTy, "func_addr.int"); auto *GOTAsInt = Builder.CreateAdd(PCRelAsInt, FuncAsInt, "global_addr.int"); auto *GOTAddr = Builder.CreateIntToPtr(GOTAsInt, Int8PtrPtrTy, "global_addr"); // Load the original pointer through the global. return Builder.CreateLoad(Address(GOTAddr, getPointerAlign()), "decoded_addr"); } void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD, llvm::Function *Fn) { if (!FD->hasAttr()) return; llvm::LLVMContext &Context = getLLVMContext(); CGM.GenOpenCLArgMetadata(Fn, FD, this); if (const VecTypeHintAttr *A = FD->getAttr()) { QualType HintQTy = A->getTypeHint(); const ExtVectorType *HintEltQTy = HintQTy->getAs(); bool IsSignedInteger = HintQTy->isSignedIntegerType() || (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType()); llvm::Metadata *AttrMDArgs[] = { llvm::ConstantAsMetadata::get(llvm::UndefValue::get( CGM.getTypes().ConvertType(A->getTypeHint()))), llvm::ConstantAsMetadata::get(llvm::ConstantInt::get( llvm::IntegerType::get(Context, 32), llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))}; Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs)); } if (const WorkGroupSizeHintAttr *A = FD->getAttr()) { llvm::Metadata *AttrMDArgs[] = { llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())), llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())), llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))}; Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs)); } if (const ReqdWorkGroupSizeAttr *A = FD->getAttr()) { llvm::Metadata *AttrMDArgs[] = { llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())), llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())), llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))}; Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs)); } if (const OpenCLIntelReqdSubGroupSizeAttr *A = FD->getAttr()) { llvm::Metadata *AttrMDArgs[] = { llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))}; Fn->setMetadata("intel_reqd_sub_group_size", llvm::MDNode::get(Context, AttrMDArgs)); } } /// Determine whether the function F ends with a return stmt. static bool endsWithReturn(const Decl* F) { const Stmt *Body = nullptr; if (auto *FD = dyn_cast_or_null(F)) Body = FD->getBody(); else if (auto *OMD = dyn_cast_or_null(F)) Body = OMD->getBody(); if (auto *CS = dyn_cast_or_null(Body)) { auto LastStmt = CS->body_rbegin(); if (LastStmt != CS->body_rend()) return isa(*LastStmt); } return false; } void CodeGenFunction::markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn) { if (SanOpts.has(SanitizerKind::Thread)) { Fn->addFnAttr("sanitize_thread_no_checking_at_run_time"); Fn->removeFnAttr(llvm::Attribute::SanitizeThread); } } /// Check if the return value of this function requires sanitization. bool CodeGenFunction::requiresReturnValueCheck() const { return requiresReturnValueNullabilityCheck() || (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) && CurCodeDecl && CurCodeDecl->getAttr()); } static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) { auto *MD = dyn_cast_or_null(D); if (!MD || !MD->getDeclName().getAsIdentifierInfo() || !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") || (MD->getNumParams() != 1 && MD->getNumParams() != 2)) return false; if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType()) return false; if (MD->getNumParams() == 2) { auto *PT = MD->parameters()[1]->getType()->getAs(); if (!PT || !PT->isVoidPointerType() || !PT->getPointeeType().isConstQualified()) return false; } return true; } /// Return the UBSan prologue signature for \p FD if one is available. static llvm::Constant *getPrologueSignature(CodeGenModule &CGM, const FunctionDecl *FD) { if (const auto *MD = dyn_cast(FD)) if (!MD->isStatic()) return nullptr; return CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM); } void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc, SourceLocation StartLoc) { assert(!CurFn && "Do not use a CodeGenFunction object for more than one function"); const Decl *D = GD.getDecl(); DidCallStackSave = false; CurCodeDecl = D; if (const auto *FD = dyn_cast_or_null(D)) if (FD->usesSEHTry()) CurSEHParent = FD; CurFuncDecl = (D ? D->getNonClosureContext() : nullptr); FnRetTy = RetTy; CurFn = Fn; CurFnInfo = &FnInfo; assert(CurFn->isDeclaration() && "Function already has body?"); // If this function has been blacklisted for any of the enabled sanitizers, // disable the sanitizer for the function. do { #define SANITIZER(NAME, ID) \ if (SanOpts.empty()) \ break; \ if (SanOpts.has(SanitizerKind::ID)) \ if (CGM.isInSanitizerBlacklist(SanitizerKind::ID, Fn, Loc)) \ SanOpts.set(SanitizerKind::ID, false); #include "clang/Basic/Sanitizers.def" #undef SANITIZER } while (0); if (D) { // Apply the no_sanitize* attributes to SanOpts. for (auto Attr : D->specific_attrs()) { SanitizerMask mask = Attr->getMask(); SanOpts.Mask &= ~mask; if (mask & SanitizerKind::Address) SanOpts.set(SanitizerKind::KernelAddress, false); if (mask & SanitizerKind::KernelAddress) SanOpts.set(SanitizerKind::Address, false); if (mask & SanitizerKind::HWAddress) SanOpts.set(SanitizerKind::KernelHWAddress, false); if (mask & SanitizerKind::KernelHWAddress) SanOpts.set(SanitizerKind::HWAddress, false); } } // Apply sanitizer attributes to the function. if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress)) Fn->addFnAttr(llvm::Attribute::SanitizeAddress); if (SanOpts.hasOneOf(SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress)) Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress); if (SanOpts.has(SanitizerKind::MemTag)) Fn->addFnAttr(llvm::Attribute::SanitizeMemTag); if (SanOpts.has(SanitizerKind::Thread)) Fn->addFnAttr(llvm::Attribute::SanitizeThread); if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory)) Fn->addFnAttr(llvm::Attribute::SanitizeMemory); if (SanOpts.has(SanitizerKind::SafeStack)) Fn->addFnAttr(llvm::Attribute::SafeStack); if (SanOpts.has(SanitizerKind::ShadowCallStack)) Fn->addFnAttr(llvm::Attribute::ShadowCallStack); // Apply fuzzing attribute to the function. if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink)) Fn->addFnAttr(llvm::Attribute::OptForFuzzing); // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize, // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time. if (SanOpts.has(SanitizerKind::Thread)) { if (const auto *OMD = dyn_cast_or_null(D)) { IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0); if (OMD->getMethodFamily() == OMF_dealloc || OMD->getMethodFamily() == OMF_initialize || (OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) { markAsIgnoreThreadCheckingAtRuntime(Fn); } } } // Ignore unrelated casts in STL allocate() since the allocator must cast // from void* to T* before object initialization completes. Don't match on the // namespace because not all allocators are in std:: if (D && SanOpts.has(SanitizerKind::CFIUnrelatedCast)) { if (matchesStlAllocatorFn(D, getContext())) SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast; } // Ignore null checks in coroutine functions since the coroutines passes // are not aware of how to move the extra UBSan instructions across the split // coroutine boundaries. if (D && SanOpts.has(SanitizerKind::Null)) if (const auto *FD = dyn_cast(D)) if (FD->getBody() && FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass) SanOpts.Mask &= ~SanitizerKind::Null; // Apply xray attributes to the function (as a string, for now) if (const auto *XRayAttr = D ? D->getAttr() : nullptr) { if (CGM.getCodeGenOpts().XRayInstrumentationBundle.has( XRayInstrKind::FunctionEntry) || CGM.getCodeGenOpts().XRayInstrumentationBundle.has( XRayInstrKind::FunctionExit)) { if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction()) Fn->addFnAttr("function-instrument", "xray-always"); if (XRayAttr->neverXRayInstrument()) Fn->addFnAttr("function-instrument", "xray-never"); if (const auto *LogArgs = D->getAttr()) if (ShouldXRayInstrumentFunction()) Fn->addFnAttr("xray-log-args", llvm::utostr(LogArgs->getArgumentCount())); } } else { if (ShouldXRayInstrumentFunction() && !CGM.imbueXRayAttrs(Fn, Loc)) Fn->addFnAttr( "xray-instruction-threshold", llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold)); } if (ShouldXRayInstrumentFunction()) { if (CGM.getCodeGenOpts().XRayIgnoreLoops) Fn->addFnAttr("xray-ignore-loops"); if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has( XRayInstrKind::FunctionExit)) Fn->addFnAttr("xray-skip-exit"); if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has( XRayInstrKind::FunctionEntry)) Fn->addFnAttr("xray-skip-entry"); } unsigned Count, Offset; if (const auto *Attr = D ? D->getAttr() : nullptr) { Count = Attr->getCount(); Offset = Attr->getOffset(); } else { Count = CGM.getCodeGenOpts().PatchableFunctionEntryCount; Offset = CGM.getCodeGenOpts().PatchableFunctionEntryOffset; } if (Count && Offset <= Count) { Fn->addFnAttr("patchable-function-entry", std::to_string(Count - Offset)); if (Offset) Fn->addFnAttr("patchable-function-prefix", std::to_string(Offset)); } // Add no-jump-tables value. Fn->addFnAttr("no-jump-tables", llvm::toStringRef(CGM.getCodeGenOpts().NoUseJumpTables)); // Add no-inline-line-tables value. if (CGM.getCodeGenOpts().NoInlineLineTables) Fn->addFnAttr("no-inline-line-tables"); // Add profile-sample-accurate value. if (CGM.getCodeGenOpts().ProfileSampleAccurate) Fn->addFnAttr("profile-sample-accurate"); if (!CGM.getCodeGenOpts().SampleProfileFile.empty()) Fn->addFnAttr("use-sample-profile"); if (D && D->hasAttr()) Fn->addFnAttr("cfi-canonical-jump-table"); if (getLangOpts().OpenCL) { // Add metadata for a kernel function. if (const FunctionDecl *FD = dyn_cast_or_null(D)) EmitOpenCLKernelMetadata(FD, Fn); } // If we are checking function types, emit a function type signature as // prologue data. if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) { if (const FunctionDecl *FD = dyn_cast_or_null(D)) { if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) { // Remove any (C++17) exception specifications, to allow calling e.g. a // noexcept function through a non-noexcept pointer. auto ProtoTy = getContext().getFunctionTypeWithExceptionSpec(FD->getType(), EST_None); llvm::Constant *FTRTTIConst = CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true); llvm::Constant *FTRTTIConstEncoded = EncodeAddrForUseInPrologue(Fn, FTRTTIConst); llvm::Constant *PrologueStructElems[] = {PrologueSig, FTRTTIConstEncoded}; llvm::Constant *PrologueStructConst = llvm::ConstantStruct::getAnon(PrologueStructElems, /*Packed=*/true); Fn->setPrologueData(PrologueStructConst); } } } // If we're checking nullability, we need to know whether we can check the // return value. Initialize the flag to 'true' and refine it in EmitParmDecl. if (SanOpts.has(SanitizerKind::NullabilityReturn)) { auto Nullability = FnRetTy->getNullability(getContext()); if (Nullability && *Nullability == NullabilityKind::NonNull) { if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) && CurCodeDecl && CurCodeDecl->getAttr())) RetValNullabilityPrecondition = llvm::ConstantInt::getTrue(getLLVMContext()); } } // If we're in C++ mode and the function name is "main", it is guaranteed // to be norecurse by the standard (3.6.1.3 "The function main shall not be // used within a program"). // // OpenCL C 2.0 v2.2-11 s6.9.i: // Recursion is not supported. // // SYCL v1.2.1 s3.10: // kernels cannot include RTTI information, exception classes, // recursive code, virtual functions or make use of C++ libraries that // are not compiled for the device. if (const FunctionDecl *FD = dyn_cast_or_null(D)) { if ((getLangOpts().CPlusPlus && FD->isMain()) || getLangOpts().OpenCL || getLangOpts().SYCLIsDevice || (getLangOpts().CUDA && FD->hasAttr())) Fn->addFnAttr(llvm::Attribute::NoRecurse); } if (const FunctionDecl *FD = dyn_cast_or_null(D)) { Builder.setIsFPConstrained(FD->usesFPIntrin()); if (FD->usesFPIntrin()) Fn->addFnAttr(llvm::Attribute::StrictFP); } // If a custom alignment is used, force realigning to this alignment on // any main function which certainly will need it. if (const FunctionDecl *FD = dyn_cast_or_null(D)) if ((FD->isMain() || FD->isMSVCRTEntryPoint()) && CGM.getCodeGenOpts().StackAlignment) Fn->addFnAttr("stackrealign"); llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn); // Create a marker to make it easy to insert allocas into the entryblock // later. Don't create this with the builder, because we don't want it // folded. llvm::Value *Undef = llvm::UndefValue::get(Int32Ty); AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "allocapt", EntryBB); ReturnBlock = getJumpDestInCurrentScope("return"); Builder.SetInsertPoint(EntryBB); // If we're checking the return value, allocate space for a pointer to a // precise source location of the checked return statement. if (requiresReturnValueCheck()) { ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr"); InitTempAlloca(ReturnLocation, llvm::ConstantPointerNull::get(Int8PtrTy)); } // Emit subprogram debug descriptor. if (CGDebugInfo *DI = getDebugInfo()) { // Reconstruct the type from the argument list so that implicit parameters, // such as 'this' and 'vtt', show up in the debug info. Preserve the calling // convention. CallingConv CC = CallingConv::CC_C; if (auto *FD = dyn_cast_or_null(D)) if (const auto *SrcFnTy = FD->getType()->getAs()) CC = SrcFnTy->getCallConv(); SmallVector ArgTypes; for (const VarDecl *VD : Args) ArgTypes.push_back(VD->getType()); QualType FnType = getContext().getFunctionType( RetTy, ArgTypes, FunctionProtoType::ExtProtoInfo(CC)); DI->EmitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, CurFuncIsThunk, Builder); } if (ShouldInstrumentFunction()) { if (CGM.getCodeGenOpts().InstrumentFunctions) CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter"); if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining) CurFn->addFnAttr("instrument-function-entry-inlined", "__cyg_profile_func_enter"); if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare) CurFn->addFnAttr("instrument-function-entry-inlined", "__cyg_profile_func_enter_bare"); } // Since emitting the mcount call here impacts optimizations such as function // inlining, we just add an attribute to insert a mcount call in backend. // The attribute "counting-function" is set to mcount function name which is // architecture dependent. if (CGM.getCodeGenOpts().InstrumentForProfiling) { // Calls to fentry/mcount should not be generated if function has // the no_instrument_function attribute. if (!CurFuncDecl || !CurFuncDecl->hasAttr()) { if (CGM.getCodeGenOpts().CallFEntry) Fn->addFnAttr("fentry-call", "true"); else { Fn->addFnAttr("instrument-function-entry-inlined", getTarget().getMCountName()); } if (CGM.getCodeGenOpts().MNopMCount) { if (!CGM.getCodeGenOpts().CallFEntry) CGM.getDiags().Report(diag::err_opt_not_valid_without_opt) << "-mnop-mcount" << "-mfentry"; Fn->addFnAttr("mnop-mcount"); } if (CGM.getCodeGenOpts().RecordMCount) { if (!CGM.getCodeGenOpts().CallFEntry) CGM.getDiags().Report(diag::err_opt_not_valid_without_opt) << "-mrecord-mcount" << "-mfentry"; Fn->addFnAttr("mrecord-mcount"); } } } if (CGM.getCodeGenOpts().PackedStack) { if (getContext().getTargetInfo().getTriple().getArch() != llvm::Triple::systemz) CGM.getDiags().Report(diag::err_opt_not_valid_on_target) << "-mpacked-stack"; Fn->addFnAttr("packed-stack"); } if (RetTy->isVoidType()) { // Void type; nothing to return. ReturnValue = Address::invalid(); // Count the implicit return. if (!endsWithReturn(D)) ++NumReturnExprs; } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) { // Indirect return; emit returned value directly into sret slot. // This reduces code size, and affects correctness in C++. auto AI = CurFn->arg_begin(); if (CurFnInfo->getReturnInfo().isSRetAfterThis()) ++AI; ReturnValue = Address(&*AI, CurFnInfo->getReturnInfo().getIndirectAlign()); if (!CurFnInfo->getReturnInfo().getIndirectByVal()) { ReturnValuePointer = CreateDefaultAlignTempAlloca(Int8PtrTy, "result.ptr"); Builder.CreateStore(Builder.CreatePointerBitCastOrAddrSpaceCast( ReturnValue.getPointer(), Int8PtrTy), ReturnValuePointer); } } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca && !hasScalarEvaluationKind(CurFnInfo->getReturnType())) { // Load the sret pointer from the argument struct and return into that. unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex(); llvm::Function::arg_iterator EI = CurFn->arg_end(); --EI; llvm::Value *Addr = Builder.CreateStructGEP(nullptr, &*EI, Idx); ReturnValuePointer = Address(Addr, getPointerAlign()); Addr = Builder.CreateAlignedLoad(Addr, getPointerAlign(), "agg.result"); ReturnValue = Address(Addr, CGM.getNaturalTypeAlignment(RetTy)); } else { ReturnValue = CreateIRTemp(RetTy, "retval"); // Tell the epilog emitter to autorelease the result. We do this // now so that various specialized functions can suppress it // during their IR-generation. if (getLangOpts().ObjCAutoRefCount && !CurFnInfo->isReturnsRetained() && RetTy->isObjCRetainableType()) AutoreleaseResult = true; } EmitStartEHSpec(CurCodeDecl); PrologueCleanupDepth = EHStack.stable_begin(); // Emit OpenMP specific initialization of the device functions. if (getLangOpts().OpenMP && CurCodeDecl) CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl); EmitFunctionProlog(*CurFnInfo, CurFn, Args); if (D && isa(D) && cast(D)->isInstance()) { CGM.getCXXABI().EmitInstanceFunctionProlog(*this); const CXXMethodDecl *MD = cast(D); if (MD->getParent()->isLambda() && MD->getOverloadedOperator() == OO_Call) { // We're in a lambda; figure out the captures. MD->getParent()->getCaptureFields(LambdaCaptureFields, LambdaThisCaptureField); if (LambdaThisCaptureField) { // If the lambda captures the object referred to by '*this' - either by // value or by reference, make sure CXXThisValue points to the correct // object. // Get the lvalue for the field (which is a copy of the enclosing object // or contains the address of the enclosing object). LValue ThisFieldLValue = EmitLValueForLambdaField(LambdaThisCaptureField); if (!LambdaThisCaptureField->getType()->isPointerType()) { // If the enclosing object was captured by value, just use its address. CXXThisValue = ThisFieldLValue.getAddress(*this).getPointer(); } else { // Load the lvalue pointed to by the field, since '*this' was captured // by reference. CXXThisValue = EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal(); } } for (auto *FD : MD->getParent()->fields()) { if (FD->hasCapturedVLAType()) { auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD), SourceLocation()).getScalarVal(); auto VAT = FD->getCapturedVLAType(); VLASizeMap[VAT->getSizeExpr()] = ExprArg; } } } else { // Not in a lambda; just use 'this' from the method. // FIXME: Should we generate a new load for each use of 'this'? The // fast register allocator would be happier... CXXThisValue = CXXABIThisValue; } // Check the 'this' pointer once per function, if it's available. if (CXXABIThisValue) { SanitizerSet SkippedChecks; SkippedChecks.set(SanitizerKind::ObjectSize, true); QualType ThisTy = MD->getThisType(); // If this is the call operator of a lambda with no capture-default, it // may have a static invoker function, which may call this operator with // a null 'this' pointer. if (isLambdaCallOperator(MD) && MD->getParent()->getLambdaCaptureDefault() == LCD_None) SkippedChecks.set(SanitizerKind::Null, true); EmitTypeCheck(isa(MD) ? TCK_ConstructorCall : TCK_MemberCall, Loc, CXXABIThisValue, ThisTy, getContext().getTypeAlignInChars(ThisTy->getPointeeType()), SkippedChecks); } } // If any of the arguments have a variably modified type, make sure to // emit the type size. for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); i != e; ++i) { const VarDecl *VD = *i; // Dig out the type as written from ParmVarDecls; it's unclear whether // the standard (C99 6.9.1p10) requires this, but we're following the // precedent set by gcc. QualType Ty; if (const ParmVarDecl *PVD = dyn_cast(VD)) Ty = PVD->getOriginalType(); else Ty = VD->getType(); if (Ty->isVariablyModifiedType()) EmitVariablyModifiedType(Ty); } // Emit a location at the end of the prologue. if (CGDebugInfo *DI = getDebugInfo()) DI->EmitLocation(Builder, StartLoc); // TODO: Do we need to handle this in two places like we do with // target-features/target-cpu? if (CurFuncDecl) if (const auto *VecWidth = CurFuncDecl->getAttr()) LargestVectorWidth = VecWidth->getVectorWidth(); } void CodeGenFunction::EmitFunctionBody(const Stmt *Body) { incrementProfileCounter(Body); if (const CompoundStmt *S = dyn_cast(Body)) EmitCompoundStmtWithoutScope(*S); else EmitStmt(Body); } /// When instrumenting to collect profile data, the counts for some blocks /// such as switch cases need to not include the fall-through counts, so /// emit a branch around the instrumentation code. When not instrumenting, /// this just calls EmitBlock(). void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S) { llvm::BasicBlock *SkipCountBB = nullptr; if (HaveInsertPoint() && CGM.getCodeGenOpts().hasProfileClangInstr()) { // When instrumenting for profiling, the fallthrough to certain // statements needs to skip over the instrumentation code so that we // get an accurate count. SkipCountBB = createBasicBlock("skipcount"); EmitBranch(SkipCountBB); } EmitBlock(BB); uint64_t CurrentCount = getCurrentProfileCount(); incrementProfileCounter(S); setCurrentProfileCount(getCurrentProfileCount() + CurrentCount); if (SkipCountBB) EmitBlock(SkipCountBB); } /// Tries to mark the given function nounwind based on the /// non-existence of any throwing calls within it. We believe this is /// lightweight enough to do at -O0. static void TryMarkNoThrow(llvm::Function *F) { // LLVM treats 'nounwind' on a function as part of the type, so we // can't do this on functions that can be overwritten. if (F->isInterposable()) return; for (llvm::BasicBlock &BB : *F) for (llvm::Instruction &I : BB) if (I.mayThrow()) return; F->setDoesNotThrow(); } QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args) { const FunctionDecl *FD = cast(GD.getDecl()); QualType ResTy = FD->getReturnType(); const CXXMethodDecl *MD = dyn_cast(FD); if (MD && MD->isInstance()) { if (CGM.getCXXABI().HasThisReturn(GD)) ResTy = MD->getThisType(); else if (CGM.getCXXABI().hasMostDerivedReturn(GD)) ResTy = CGM.getContext().VoidPtrTy; CGM.getCXXABI().buildThisParam(*this, Args); } // The base version of an inheriting constructor whose constructed base is a // virtual base is not passed any arguments (because it doesn't actually call // the inherited constructor). bool PassedParams = true; if (const CXXConstructorDecl *CD = dyn_cast(FD)) if (auto Inherited = CD->getInheritedConstructor()) PassedParams = getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType()); if (PassedParams) { for (auto *Param : FD->parameters()) { Args.push_back(Param); if (!Param->hasAttr()) continue; auto *Implicit = ImplicitParamDecl::Create( getContext(), Param->getDeclContext(), Param->getLocation(), /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamDecl::Other); SizeArguments[Param] = Implicit; Args.push_back(Implicit); } } if (MD && (isa(MD) || isa(MD))) CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args); return ResTy; } static bool shouldUseUndefinedBehaviorReturnOptimization(const FunctionDecl *FD, const ASTContext &Context) { QualType T = FD->getReturnType(); // Avoid the optimization for functions that return a record type with a // trivial destructor or another trivially copyable type. if (const RecordType *RT = T.getCanonicalType()->getAs()) { if (const auto *ClassDecl = dyn_cast(RT->getDecl())) return !ClassDecl->hasTrivialDestructor(); } return !T.isTriviallyCopyableType(Context); } void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn, const CGFunctionInfo &FnInfo) { const FunctionDecl *FD = cast(GD.getDecl()); CurGD = GD; FunctionArgList Args; QualType ResTy = BuildFunctionArgList(GD, Args); // Check if we should generate debug info for this function. if (FD->hasAttr()) DebugInfo = nullptr; // disable debug info indefinitely for this function // The function might not have a body if we're generating thunks for a // function declaration. SourceRange BodyRange; if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange(); else BodyRange = FD->getLocation(); CurEHLocation = BodyRange.getEnd(); // Use the location of the start of the function to determine where // the function definition is located. By default use the location // of the declaration as the location for the subprogram. A function // may lack a declaration in the source code if it is created by code // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk). SourceLocation Loc = FD->getLocation(); // If this is a function specialization then use the pattern body // as the location for the function. if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern()) if (SpecDecl->hasBody(SpecDecl)) Loc = SpecDecl->getLocation(); Stmt *Body = FD->getBody(); // Initialize helper which will detect jumps which can cause invalid lifetime // markers. if (Body && ShouldEmitLifetimeMarkers) Bypasses.Init(Body); // Emit the standard function prologue. StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin()); // Generate the body of the function. PGO.assignRegionCounters(GD, CurFn); if (isa(FD)) EmitDestructorBody(Args); else if (isa(FD)) EmitConstructorBody(Args); else if (getLangOpts().CUDA && !getLangOpts().CUDAIsDevice && FD->hasAttr()) CGM.getCUDARuntime().emitDeviceStub(*this, Args); else if (isa(FD) && cast(FD)->isLambdaStaticInvoker()) { // The lambda static invoker function is special, because it forwards or // clones the body of the function call operator (but is actually static). EmitLambdaStaticInvokeBody(cast(FD)); } else if (FD->isDefaulted() && isa(FD) && (cast(FD)->isCopyAssignmentOperator() || cast(FD)->isMoveAssignmentOperator())) { // Implicit copy-assignment gets the same special treatment as implicit // copy-constructors. emitImplicitAssignmentOperatorBody(Args); } else if (Body) { EmitFunctionBody(Body); } else llvm_unreachable("no definition for emitted function"); // C++11 [stmt.return]p2: // Flowing off the end of a function [...] results in undefined behavior in // a value-returning function. // C11 6.9.1p12: // If the '}' that terminates a function is reached, and the value of the // function call is used by the caller, the behavior is undefined. if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && !SawAsmBlock && !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) { bool ShouldEmitUnreachable = CGM.getCodeGenOpts().StrictReturn || shouldUseUndefinedBehaviorReturnOptimization(FD, getContext()); if (SanOpts.has(SanitizerKind::Return)) { SanitizerScope SanScope(this); llvm::Value *IsFalse = Builder.getFalse(); EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return), SanitizerHandler::MissingReturn, EmitCheckSourceLocation(FD->getLocation()), None); } else if (ShouldEmitUnreachable) { if (CGM.getCodeGenOpts().OptimizationLevel == 0) EmitTrapCall(llvm::Intrinsic::trap); } if (SanOpts.has(SanitizerKind::Return) || ShouldEmitUnreachable) { Builder.CreateUnreachable(); Builder.ClearInsertionPoint(); } } // Emit the standard function epilogue. FinishFunction(BodyRange.getEnd()); // If we haven't marked the function nothrow through other means, do // a quick pass now to see if we can. if (!CurFn->doesNotThrow()) TryMarkNoThrow(CurFn); } /// ContainsLabel - Return true if the statement contains a label in it. If /// this statement is not executed normally, it not containing a label means /// that we can just remove the code. bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) { // Null statement, not a label! if (!S) return false; // If this is a label, we have to emit the code, consider something like: // if (0) { ... foo: bar(); } goto foo; // // TODO: If anyone cared, we could track __label__'s, since we know that you // can't jump to one from outside their declared region. if (isa(S)) return true; // If this is a case/default statement, and we haven't seen a switch, we have // to emit the code. if (isa(S) && !IgnoreCaseStmts) return true; // If this is a switch statement, we want to ignore cases below it. if (isa(S)) IgnoreCaseStmts = true; // Scan subexpressions for verboten labels. for (const Stmt *SubStmt : S->children()) if (ContainsLabel(SubStmt, IgnoreCaseStmts)) return true; return false; } /// containsBreak - Return true if the statement contains a break out of it. /// If the statement (recursively) contains a switch or loop with a break /// inside of it, this is fine. bool CodeGenFunction::containsBreak(const Stmt *S) { // Null statement, not a label! if (!S) return false; // If this is a switch or loop that defines its own break scope, then we can // include it and anything inside of it. if (isa(S) || isa(S) || isa(S) || isa(S)) return false; if (isa(S)) return true; // Scan subexpressions for verboten breaks. for (const Stmt *SubStmt : S->children()) if (containsBreak(SubStmt)) return true; return false; } bool CodeGenFunction::mightAddDeclToScope(const Stmt *S) { if (!S) return false; // Some statement kinds add a scope and thus never add a decl to the current // scope. Note, this list is longer than the list of statements that might // have an unscoped decl nested within them, but this way is conservatively // correct even if more statement kinds are added. if (isa(S) || isa(S) || isa(S) || isa(S) || isa(S) || isa(S) || isa(S) || isa(S) || isa(S) || isa(S)) return false; if (isa(S)) return true; for (const Stmt *SubStmt : S->children()) if (mightAddDeclToScope(SubStmt)) return true; return false; } /// ConstantFoldsToSimpleInteger - If the specified expression does not fold /// to a constant, or if it does but contains a label, return false. If it /// constant folds return true and set the boolean result in Result. bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond, bool &ResultBool, bool AllowLabels) { llvm::APSInt ResultInt; if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels)) return false; ResultBool = ResultInt.getBoolValue(); return true; } /// ConstantFoldsToSimpleInteger - If the specified expression does not fold /// to a constant, or if it does but contains a label, return false. If it /// constant folds return true and set the folded value. bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &ResultInt, bool AllowLabels) { // FIXME: Rename and handle conversion of other evaluatable things // to bool. Expr::EvalResult Result; if (!Cond->EvaluateAsInt(Result, getContext())) return false; // Not foldable, not integer or not fully evaluatable. llvm::APSInt Int = Result.Val.getInt(); if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond)) return false; // Contains a label. ResultInt = Int; return true; } /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if /// statement) to the specified blocks. Based on the condition, this might try /// to simplify the codegen of the conditional based on the branch. /// void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount) { Cond = Cond->IgnoreParens(); if (const BinaryOperator *CondBOp = dyn_cast(Cond)) { // Handle X && Y in a condition. if (CondBOp->getOpcode() == BO_LAnd) { // If we have "1 && X", simplify the code. "0 && X" would have constant // folded if the case was simple enough. bool ConstantBool = false; if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) && ConstantBool) { // br(1 && X) -> br(X). incrementProfileCounter(CondBOp); return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount); } // If we have "X && 1", simplify the code to use an uncond branch. // "X && 0" would have been constant folded to 0. if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) && ConstantBool) { // br(X && 1) -> br(X). return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock, TrueCount); } // Emit the LHS as a conditional. If the LHS conditional is false, we // want to jump to the FalseBlock. llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true"); // The counter tells us how often we evaluate RHS, and all of TrueCount // can be propagated to that branch. uint64_t RHSCount = getProfileCount(CondBOp->getRHS()); ConditionalEvaluation eval(*this); { ApplyDebugLocation DL(*this, Cond); EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount); EmitBlock(LHSTrue); } incrementProfileCounter(CondBOp); setCurrentProfileCount(getProfileCount(CondBOp->getRHS())); // Any temporaries created here are conditional. eval.begin(*this); EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount); eval.end(*this); return; } if (CondBOp->getOpcode() == BO_LOr) { // If we have "0 || X", simplify the code. "1 || X" would have constant // folded if the case was simple enough. bool ConstantBool = false; if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) && !ConstantBool) { // br(0 || X) -> br(X). incrementProfileCounter(CondBOp); return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount); } // If we have "X || 0", simplify the code to use an uncond branch. // "X || 1" would have been constant folded to 1. if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) && !ConstantBool) { // br(X || 0) -> br(X). return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock, TrueCount); } // Emit the LHS as a conditional. If the LHS conditional is true, we // want to jump to the TrueBlock. llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false"); // We have the count for entry to the RHS and for the whole expression // being true, so we can divy up True count between the short circuit and // the RHS. uint64_t LHSCount = getCurrentProfileCount() - getProfileCount(CondBOp->getRHS()); uint64_t RHSCount = TrueCount - LHSCount; ConditionalEvaluation eval(*this); { ApplyDebugLocation DL(*this, Cond); EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount); EmitBlock(LHSFalse); } incrementProfileCounter(CondBOp); setCurrentProfileCount(getProfileCount(CondBOp->getRHS())); // Any temporaries created here are conditional. eval.begin(*this); EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, RHSCount); eval.end(*this); return; } } if (const UnaryOperator *CondUOp = dyn_cast(Cond)) { // br(!x, t, f) -> br(x, f, t) if (CondUOp->getOpcode() == UO_LNot) { // Negate the count. uint64_t FalseCount = getCurrentProfileCount() - TrueCount; // Negate the condition and swap the destination blocks. return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock, FalseCount); } } if (const ConditionalOperator *CondOp = dyn_cast(Cond)) { // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f)) llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true"); llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false"); ConditionalEvaluation cond(*this); EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock, getProfileCount(CondOp)); // When computing PGO branch weights, we only know the overall count for // the true block. This code is essentially doing tail duplication of the // naive code-gen, introducing new edges for which counts are not // available. Divide the counts proportionally between the LHS and RHS of // the conditional operator. uint64_t LHSScaledTrueCount = 0; if (TrueCount) { double LHSRatio = getProfileCount(CondOp) / (double)getCurrentProfileCount(); LHSScaledTrueCount = TrueCount * LHSRatio; } cond.begin(*this); EmitBlock(LHSBlock); incrementProfileCounter(CondOp); { ApplyDebugLocation DL(*this, Cond); EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock, LHSScaledTrueCount); } cond.end(*this); cond.begin(*this); EmitBlock(RHSBlock); EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock, TrueCount - LHSScaledTrueCount); cond.end(*this); return; } if (const CXXThrowExpr *Throw = dyn_cast(Cond)) { // Conditional operator handling can give us a throw expression as a // condition for a case like: // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f) // Fold this to: // br(c, throw x, br(y, t, f)) EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false); return; } // If the branch has a condition wrapped by __builtin_unpredictable, // create metadata that specifies that the branch is unpredictable. // Don't bother if not optimizing because that metadata would not be used. llvm::MDNode *Unpredictable = nullptr; auto *Call = dyn_cast(Cond->IgnoreImpCasts()); if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) { auto *FD = dyn_cast_or_null(Call->getCalleeDecl()); if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) { llvm::MDBuilder MDHelper(getLLVMContext()); Unpredictable = MDHelper.createUnpredictable(); } } // Create branch weights based on the number of times we get here and the // number of times the condition should be true. uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount); llvm::MDNode *Weights = createProfileWeights(TrueCount, CurrentCount - TrueCount); // Emit the code with the fully general case. llvm::Value *CondV; { ApplyDebugLocation DL(*this, Cond); CondV = EvaluateExprAsBool(Cond); } Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights, Unpredictable); } /// ErrorUnsupported - Print out an error that codegen doesn't support the /// specified stmt yet. void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) { CGM.ErrorUnsupported(S, Type); } /// emitNonZeroVLAInit - Emit the "zero" initialization of a /// variable-length array whose elements have a non-zero bit-pattern. /// /// \param baseType the inner-most element type of the array /// \param src - a char* pointing to the bit-pattern for a single /// base element of the array /// \param sizeInChars - the total size of the VLA, in chars static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType, Address dest, Address src, llvm::Value *sizeInChars) { CGBuilderTy &Builder = CGF.Builder; CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType); llvm::Value *baseSizeInChars = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity()); Address begin = Builder.CreateElementBitCast(dest, CGF.Int8Ty, "vla.begin"); llvm::Value *end = Builder.CreateInBoundsGEP(begin.getPointer(), sizeInChars, "vla.end"); llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock(); llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop"); llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont"); // Make a loop over the VLA. C99 guarantees that the VLA element // count must be nonzero. CGF.EmitBlock(loopBB); llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur"); cur->addIncoming(begin.getPointer(), originBB); CharUnits curAlign = dest.getAlignment().alignmentOfArrayElement(baseSize); // memcpy the individual element bit-pattern. Builder.CreateMemCpy(Address(cur, curAlign), src, baseSizeInChars, /*volatile*/ false); // Go to the next element. llvm::Value *next = Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next"); // Leave if that's the end of the VLA. llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone"); Builder.CreateCondBr(done, contBB, loopBB); cur->addIncoming(next, loopBB); CGF.EmitBlock(contBB); } void CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) { // Ignore empty classes in C++. if (getLangOpts().CPlusPlus) { if (const RecordType *RT = Ty->getAs()) { if (cast(RT->getDecl())->isEmpty()) return; } } // Cast the dest ptr to the appropriate i8 pointer type. if (DestPtr.getElementType() != Int8Ty) DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty); // Get size and alignment info for this aggregate. CharUnits size = getContext().getTypeSizeInChars(Ty); llvm::Value *SizeVal; const VariableArrayType *vla; // Don't bother emitting a zero-byte memset. if (size.isZero()) { // But note that getTypeInfo returns 0 for a VLA. if (const VariableArrayType *vlaType = dyn_cast_or_null( getContext().getAsArrayType(Ty))) { auto VlaSize = getVLASize(vlaType); SizeVal = VlaSize.NumElts; CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type); if (!eltSize.isOne()) SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize)); vla = vlaType; } else { return; } } else { SizeVal = CGM.getSize(size); vla = nullptr; } // If the type contains a pointer to data member we can't memset it to zero. // Instead, create a null constant and copy it to the destination. // TODO: there are other patterns besides zero that we can usefully memset, // like -1, which happens to be the pattern used by member-pointers. if (!CGM.getTypes().isZeroInitializable(Ty)) { // For a VLA, emit a single element, then splat that over the VLA. if (vla) Ty = getContext().getBaseElementType(vla); llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty); llvm::GlobalVariable *NullVariable = new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(), /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, NullConstant, Twine()); CharUnits NullAlign = DestPtr.getAlignment(); NullVariable->setAlignment(NullAlign.getAsAlign()); Address SrcPtr(Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()), NullAlign); if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal); // Get and call the appropriate llvm.memcpy overload. Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false); return; } // Otherwise, just memset the whole thing to zero. This is legal // because in LLVM, all default initializers (other than the ones we just // handled above) are guaranteed to have a bit pattern of all zeros. Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false); } llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) { // Make sure that there is a block for the indirect goto. if (!IndirectBranch) GetIndirectGotoBlock(); llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock(); // Make sure the indirect branch includes all of the address-taken blocks. IndirectBranch->addDestination(BB); return llvm::BlockAddress::get(CurFn, BB); } llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() { // If we already made the indirect branch for indirect goto, return its block. if (IndirectBranch) return IndirectBranch->getParent(); CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto")); // Create the PHI node that indirect gotos will add entries to. llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0, "indirect.goto.dest"); // Create the indirect branch instruction. IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal); return IndirectBranch->getParent(); } /// Computes the length of an array in elements, as well as the base /// element type and a properly-typed first element pointer. llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType, QualType &baseType, Address &addr) { const ArrayType *arrayType = origArrayType; // If it's a VLA, we have to load the stored size. Note that // this is the size of the VLA in bytes, not its size in elements. llvm::Value *numVLAElements = nullptr; if (isa(arrayType)) { numVLAElements = getVLASize(cast(arrayType)).NumElts; // Walk into all VLAs. This doesn't require changes to addr, // which has type T* where T is the first non-VLA element type. do { QualType elementType = arrayType->getElementType(); arrayType = getContext().getAsArrayType(elementType); // If we only have VLA components, 'addr' requires no adjustment. if (!arrayType) { baseType = elementType; return numVLAElements; } } while (isa(arrayType)); // We get out here only if we find a constant array type // inside the VLA. } // We have some number of constant-length arrays, so addr should // have LLVM type [M x [N x [...]]]*. Build a GEP that walks // down to the first element of addr. SmallVector gepIndices; // GEP down to the array type. llvm::ConstantInt *zero = Builder.getInt32(0); gepIndices.push_back(zero); uint64_t countFromCLAs = 1; QualType eltType; llvm::ArrayType *llvmArrayType = dyn_cast(addr.getElementType()); while (llvmArrayType) { assert(isa(arrayType)); assert(cast(arrayType)->getSize().getZExtValue() == llvmArrayType->getNumElements()); gepIndices.push_back(zero); countFromCLAs *= llvmArrayType->getNumElements(); eltType = arrayType->getElementType(); llvmArrayType = dyn_cast(llvmArrayType->getElementType()); arrayType = getContext().getAsArrayType(arrayType->getElementType()); assert((!llvmArrayType || arrayType) && "LLVM and Clang types are out-of-synch"); } if (arrayType) { // From this point onwards, the Clang array type has been emitted // as some other type (probably a packed struct). Compute the array // size, and just emit the 'begin' expression as a bitcast. while (arrayType) { countFromCLAs *= cast(arrayType)->getSize().getZExtValue(); eltType = arrayType->getElementType(); arrayType = getContext().getAsArrayType(eltType); } llvm::Type *baseType = ConvertType(eltType); addr = Builder.CreateElementBitCast(addr, baseType, "array.begin"); } else { // Create the actual GEP. addr = Address(Builder.CreateInBoundsGEP(addr.getPointer(), gepIndices, "array.begin"), addr.getAlignment()); } baseType = eltType; llvm::Value *numElements = llvm::ConstantInt::get(SizeTy, countFromCLAs); // If we had any VLA dimensions, factor them in. if (numVLAElements) numElements = Builder.CreateNUWMul(numVLAElements, numElements); return numElements; } CodeGenFunction::VlaSizePair CodeGenFunction::getVLASize(QualType type) { const VariableArrayType *vla = getContext().getAsVariableArrayType(type); assert(vla && "type was not a variable array type!"); return getVLASize(vla); } CodeGenFunction::VlaSizePair CodeGenFunction::getVLASize(const VariableArrayType *type) { // The number of elements so far; always size_t. llvm::Value *numElements = nullptr; QualType elementType; do { elementType = type->getElementType(); llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()]; assert(vlaSize && "no size for VLA!"); assert(vlaSize->getType() == SizeTy); if (!numElements) { numElements = vlaSize; } else { // It's undefined behavior if this wraps around, so mark it that way. // FIXME: Teach -fsanitize=undefined to trap this. numElements = Builder.CreateNUWMul(numElements, vlaSize); } } while ((type = getContext().getAsVariableArrayType(elementType))); return { numElements, elementType }; } CodeGenFunction::VlaSizePair CodeGenFunction::getVLAElements1D(QualType type) { const VariableArrayType *vla = getContext().getAsVariableArrayType(type); assert(vla && "type was not a variable array type!"); return getVLAElements1D(vla); } CodeGenFunction::VlaSizePair CodeGenFunction::getVLAElements1D(const VariableArrayType *Vla) { llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()]; assert(VlaSize && "no size for VLA!"); assert(VlaSize->getType() == SizeTy); return { VlaSize, Vla->getElementType() }; } void CodeGenFunction::EmitVariablyModifiedType(QualType type) { assert(type->isVariablyModifiedType() && "Must pass variably modified type to EmitVLASizes!"); EnsureInsertPoint(); // We're going to walk down into the type and look for VLA // expressions. do { assert(type->isVariablyModifiedType()); const Type *ty = type.getTypePtr(); switch (ty->getTypeClass()) { #define TYPE(Class, Base) #define ABSTRACT_TYPE(Class, Base) #define NON_CANONICAL_TYPE(Class, Base) #define DEPENDENT_TYPE(Class, Base) case Type::Class: #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) #include "clang/AST/TypeNodes.inc" llvm_unreachable("unexpected dependent type!"); // These types are never variably-modified. case Type::Builtin: case Type::Complex: case Type::Vector: case Type::ExtVector: case Type::ConstantMatrix: case Type::Record: case Type::Enum: case Type::Elaborated: case Type::TemplateSpecialization: case Type::ObjCTypeParam: case Type::ObjCObject: case Type::ObjCInterface: case Type::ObjCObjectPointer: case Type::ExtInt: llvm_unreachable("type class is never variably-modified!"); case Type::Adjusted: type = cast(ty)->getAdjustedType(); break; case Type::Decayed: type = cast(ty)->getPointeeType(); break; case Type::Pointer: type = cast(ty)->getPointeeType(); break; case Type::BlockPointer: type = cast(ty)->getPointeeType(); break; case Type::LValueReference: case Type::RValueReference: type = cast(ty)->getPointeeType(); break; case Type::MemberPointer: type = cast(ty)->getPointeeType(); break; case Type::ConstantArray: case Type::IncompleteArray: // Losing element qualification here is fine. type = cast(ty)->getElementType(); break; case Type::VariableArray: { // Losing element qualification here is fine. const VariableArrayType *vat = cast(ty); // Unknown size indication requires no size computation. // Otherwise, evaluate and record it. if (const Expr *size = vat->getSizeExpr()) { // It's possible that we might have emitted this already, // e.g. with a typedef and a pointer to it. llvm::Value *&entry = VLASizeMap[size]; if (!entry) { llvm::Value *Size = EmitScalarExpr(size); // C11 6.7.6.2p5: // If the size is an expression that is not an integer constant // expression [...] each time it is evaluated it shall have a value // greater than zero. if (SanOpts.has(SanitizerKind::VLABound) && size->getType()->isSignedIntegerType()) { SanitizerScope SanScope(this); llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType()); llvm::Constant *StaticArgs[] = { EmitCheckSourceLocation(size->getBeginLoc()), EmitCheckTypeDescriptor(size->getType())}; EmitCheck(std::make_pair(Builder.CreateICmpSGT(Size, Zero), SanitizerKind::VLABound), SanitizerHandler::VLABoundNotPositive, StaticArgs, Size); } // Always zexting here would be wrong if it weren't // undefined behavior to have a negative bound. entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false); } } type = vat->getElementType(); break; } case Type::FunctionProto: case Type::FunctionNoProto: type = cast(ty)->getReturnType(); break; case Type::Paren: case Type::TypeOf: case Type::UnaryTransform: case Type::Attributed: case Type::SubstTemplateTypeParm: - case Type::PackExpansion: case Type::MacroQualified: // Keep walking after single level desugaring. type = type.getSingleStepDesugaredType(getContext()); break; case Type::Typedef: case Type::Decltype: case Type::Auto: case Type::DeducedTemplateSpecialization: // Stop walking: nothing to do. return; case Type::TypeOfExpr: // Stop walking: emit typeof expression. EmitIgnoredExpr(cast(ty)->getUnderlyingExpr()); return; case Type::Atomic: type = cast(ty)->getValueType(); break; case Type::Pipe: type = cast(ty)->getElementType(); break; } } while (type->isVariablyModifiedType()); } Address CodeGenFunction::EmitVAListRef(const Expr* E) { if (getContext().getBuiltinVaListType()->isArrayType()) return EmitPointerWithAlignment(E); return EmitLValue(E).getAddress(*this); } Address CodeGenFunction::EmitMSVAListRef(const Expr *E) { return EmitLValue(E).getAddress(*this); } void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init) { assert(Init.hasValue() && "Invalid DeclRefExpr initializer!"); if (CGDebugInfo *Dbg = getDebugInfo()) if (CGM.getCodeGenOpts().hasReducedDebugInfo()) Dbg->EmitGlobalVariable(E->getDecl(), Init); } CodeGenFunction::PeepholeProtection CodeGenFunction::protectFromPeepholes(RValue rvalue) { // At the moment, the only aggressive peephole we do in IR gen // is trunc(zext) folding, but if we add more, we can easily // extend this protection. if (!rvalue.isScalar()) return PeepholeProtection(); llvm::Value *value = rvalue.getScalarVal(); if (!isa(value)) return PeepholeProtection(); // Just make an extra bitcast. assert(HaveInsertPoint()); llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "", Builder.GetInsertBlock()); PeepholeProtection protection; protection.Inst = inst; return protection; } void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) { if (!protection.Inst) return; // In theory, we could try to duplicate the peepholes now, but whatever. protection.Inst->eraseFromParent(); } void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue) { llvm::Value *TheCheck; llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption( CGM.getDataLayout(), PtrValue, Alignment, OffsetValue, &TheCheck); if (SanOpts.has(SanitizerKind::Alignment)) { emitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment, OffsetValue, TheCheck, Assumption); } } void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue, const Expr *E, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue) { if (auto *CE = dyn_cast(E)) E = CE->getSubExprAsWritten(); QualType Ty = E->getType(); SourceLocation Loc = E->getExprLoc(); emitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment, OffsetValue); } llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn, llvm::Value *AnnotatedVal, StringRef AnnotationStr, SourceLocation Location) { llvm::Value *Args[4] = { AnnotatedVal, Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy), Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy), CGM.EmitAnnotationLineNo(Location) }; return Builder.CreateCall(AnnotationFn, Args); } void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) { assert(D->hasAttr() && "no annotate attribute"); // FIXME We create a new bitcast for every annotation because that's what // llvm-gcc was doing. for (const auto *I : D->specific_attrs()) EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation), Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()), I->getAnnotation(), D->getLocation()); } Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D, Address Addr) { assert(D->hasAttr() && "no annotate attribute"); llvm::Value *V = Addr.getPointer(); llvm::Type *VTy = V->getType(); llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation, CGM.Int8PtrTy); for (const auto *I : D->specific_attrs()) { // FIXME Always emit the cast inst so we can differentiate between // annotation on the first field of a struct and annotation on the struct // itself. if (VTy != CGM.Int8PtrTy) V = Builder.CreateBitCast(V, CGM.Int8PtrTy); V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation()); V = Builder.CreateBitCast(V, VTy); } return Address(V, Addr.getAlignment()); } CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { } CodeGenFunction::SanitizerScope::SanitizerScope(CodeGenFunction *CGF) : CGF(CGF) { assert(!CGF->IsSanitizerScope); CGF->IsSanitizerScope = true; } CodeGenFunction::SanitizerScope::~SanitizerScope() { CGF->IsSanitizerScope = false; } void CodeGenFunction::InsertHelper(llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB, llvm::BasicBlock::iterator InsertPt) const { LoopStack.InsertHelper(I); if (IsSanitizerScope) CGM.getSanitizerMetadata()->disableSanitizerForInstruction(I); } void CGBuilderInserter::InsertHelper( llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB, llvm::BasicBlock::iterator InsertPt) const { llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt); if (CGF) CGF->InsertHelper(I, Name, BB, InsertPt); } static bool hasRequiredFeatures(const SmallVectorImpl &ReqFeatures, CodeGenModule &CGM, const FunctionDecl *FD, std::string &FirstMissing) { // If there aren't any required features listed then go ahead and return. if (ReqFeatures.empty()) return false; // Now build up the set of caller features and verify that all the required // features are there. llvm::StringMap CallerFeatureMap; CGM.getContext().getFunctionFeatureMap(CallerFeatureMap, FD); // If we have at least one of the features in the feature list return // true, otherwise return false. return std::all_of( ReqFeatures.begin(), ReqFeatures.end(), [&](StringRef Feature) { SmallVector OrFeatures; Feature.split(OrFeatures, '|'); return llvm::any_of(OrFeatures, [&](StringRef Feature) { if (!CallerFeatureMap.lookup(Feature)) { FirstMissing = Feature.str(); return false; } return true; }); }); } // Emits an error if we don't have a valid set of target features for the // called function. void CodeGenFunction::checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl) { return checkTargetFeatures(E->getBeginLoc(), TargetDecl); } // Emits an error if we don't have a valid set of target features for the // called function. void CodeGenFunction::checkTargetFeatures(SourceLocation Loc, const FunctionDecl *TargetDecl) { // Early exit if this is an indirect call. if (!TargetDecl) return; // Get the current enclosing function if it exists. If it doesn't // we can't check the target features anyhow. const FunctionDecl *FD = dyn_cast_or_null(CurCodeDecl); if (!FD) return; // Grab the required features for the call. For a builtin this is listed in // the td file with the default cpu, for an always_inline function this is any // listed cpu and any listed features. unsigned BuiltinID = TargetDecl->getBuiltinID(); std::string MissingFeature; if (BuiltinID) { SmallVector ReqFeatures; const char *FeatureList = CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID); // Return if the builtin doesn't have any required features. if (!FeatureList || StringRef(FeatureList) == "") return; StringRef(FeatureList).split(ReqFeatures, ','); if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature)) CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature) << TargetDecl->getDeclName() << CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID); } else if (!TargetDecl->isMultiVersion() && TargetDecl->hasAttr()) { // Get the required features for the callee. const TargetAttr *TD = TargetDecl->getAttr(); ParsedTargetAttr ParsedAttr = CGM.getContext().filterFunctionTargetAttrs(TD); SmallVector ReqFeatures; llvm::StringMap CalleeFeatureMap; CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl); for (const auto &F : ParsedAttr.Features) { if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1))) ReqFeatures.push_back(StringRef(F).substr(1)); } for (const auto &F : CalleeFeatureMap) { // Only positive features are "required". if (F.getValue()) ReqFeatures.push_back(F.getKey()); } if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature)) CGM.getDiags().Report(Loc, diag::err_function_needs_feature) << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature; } } void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) { if (!CGM.getCodeGenOpts().SanitizeStats) return; llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint()); IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation()); CGM.getSanStats().create(IRB, SSK); } llvm::Value * CodeGenFunction::FormResolverCondition(const MultiVersionResolverOption &RO) { llvm::Value *Condition = nullptr; if (!RO.Conditions.Architecture.empty()) Condition = EmitX86CpuIs(RO.Conditions.Architecture); if (!RO.Conditions.Features.empty()) { llvm::Value *FeatureCond = EmitX86CpuSupports(RO.Conditions.Features); Condition = Condition ? Builder.CreateAnd(Condition, FeatureCond) : FeatureCond; } return Condition; } static void CreateMultiVersionResolverReturn(CodeGenModule &CGM, llvm::Function *Resolver, CGBuilderTy &Builder, llvm::Function *FuncToReturn, bool SupportsIFunc) { if (SupportsIFunc) { Builder.CreateRet(FuncToReturn); return; } llvm::SmallVector Args; llvm::for_each(Resolver->args(), [&](llvm::Argument &Arg) { Args.push_back(&Arg); }); llvm::CallInst *Result = Builder.CreateCall(FuncToReturn, Args); Result->setTailCallKind(llvm::CallInst::TCK_MustTail); if (Resolver->getReturnType()->isVoidTy()) Builder.CreateRetVoid(); else Builder.CreateRet(Result); } void CodeGenFunction::EmitMultiVersionResolver( llvm::Function *Resolver, ArrayRef Options) { assert(getContext().getTargetInfo().getTriple().isX86() && "Only implemented for x86 targets"); bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc(); // Main function's basic block. llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver); Builder.SetInsertPoint(CurBlock); EmitX86CpuInit(); for (const MultiVersionResolverOption &RO : Options) { Builder.SetInsertPoint(CurBlock); llvm::Value *Condition = FormResolverCondition(RO); // The 'default' or 'generic' case. if (!Condition) { assert(&RO == Options.end() - 1 && "Default or Generic case must be last"); CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function, SupportsIFunc); return; } llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver); CGBuilderTy RetBuilder(*this, RetBlock); CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function, SupportsIFunc); CurBlock = createBasicBlock("resolver_else", Resolver); Builder.CreateCondBr(Condition, RetBlock, CurBlock); } // If no generic/default, emit an unreachable. Builder.SetInsertPoint(CurBlock); llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap); TrapCall->setDoesNotReturn(); TrapCall->setDoesNotThrow(); Builder.CreateUnreachable(); Builder.ClearInsertionPoint(); } // Loc - where the diagnostic will point, where in the source code this // alignment has failed. // SecondaryLoc - if present (will be present if sufficiently different from // Loc), the diagnostic will additionally point a "Note:" to this location. // It should be the location where the __attribute__((assume_aligned)) // was written e.g. void CodeGenFunction::emitAlignmentAssumptionCheck( llvm::Value *Ptr, QualType Ty, SourceLocation Loc, SourceLocation SecondaryLoc, llvm::Value *Alignment, llvm::Value *OffsetValue, llvm::Value *TheCheck, llvm::Instruction *Assumption) { assert(Assumption && isa(Assumption) && cast(Assumption)->getCalledOperand() == llvm::Intrinsic::getDeclaration( Builder.GetInsertBlock()->getParent()->getParent(), llvm::Intrinsic::assume) && "Assumption should be a call to llvm.assume()."); assert(&(Builder.GetInsertBlock()->back()) == Assumption && "Assumption should be the last instruction of the basic block, " "since the basic block is still being generated."); if (!SanOpts.has(SanitizerKind::Alignment)) return; // Don't check pointers to volatile data. The behavior here is implementation- // defined. if (Ty->getPointeeType().isVolatileQualified()) return; // We need to temorairly remove the assumption so we can insert the // sanitizer check before it, else the check will be dropped by optimizations. Assumption->removeFromParent(); { SanitizerScope SanScope(this); if (!OffsetValue) OffsetValue = Builder.getInt1(0); // no offset. llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc), EmitCheckSourceLocation(SecondaryLoc), EmitCheckTypeDescriptor(Ty)}; llvm::Value *DynamicData[] = {EmitCheckValue(Ptr), EmitCheckValue(Alignment), EmitCheckValue(OffsetValue)}; EmitCheck({std::make_pair(TheCheck, SanitizerKind::Alignment)}, SanitizerHandler::AlignmentAssumption, StaticData, DynamicData); } // We are now in the (new, empty) "cont" basic block. // Reintroduce the assumption. Builder.Insert(Assumption); // FIXME: Assumption still has it's original basic block as it's Parent. } llvm::DebugLoc CodeGenFunction::SourceLocToDebugLoc(SourceLocation Location) { if (CGDebugInfo *DI = getDebugInfo()) return DI->SourceLocToDebugLoc(Location); return llvm::DebugLoc(); } diff --git a/contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp b/contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp index d301e6c732ab..610a77c0e6f2 100644 --- a/contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp +++ b/contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp @@ -1,19194 +1,19193 @@ //===--- SemaExpr.cpp - Semantic Analysis for Expressions -----------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements semantic analysis for expressions. // //===----------------------------------------------------------------------===// #include "TreeTransform.h" #include "UsedDeclVisitor.h" #include "clang/AST/ASTConsumer.h" #include "clang/AST/ASTContext.h" #include "clang/AST/ASTLambda.h" #include "clang/AST/ASTMutationListener.h" #include "clang/AST/CXXInheritance.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/EvaluatedExprVisitor.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExprOpenMP.h" #include "clang/AST/RecursiveASTVisitor.h" #include "clang/AST/TypeLoc.h" #include "clang/Basic/Builtins.h" #include "clang/Basic/FixedPoint.h" #include "clang/Basic/PartialDiagnostic.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/TargetInfo.h" #include "clang/Lex/LiteralSupport.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/DelayedDiagnostic.h" #include "clang/Sema/Designator.h" #include "clang/Sema/Initialization.h" #include "clang/Sema/Lookup.h" #include "clang/Sema/Overload.h" #include "clang/Sema/ParsedTemplate.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/SemaFixItUtils.h" #include "clang/Sema/SemaInternal.h" #include "clang/Sema/Template.h" #include "llvm/Support/ConvertUTF.h" #include "llvm/Support/SaveAndRestore.h" using namespace clang; using namespace sema; using llvm::RoundingMode; /// Determine whether the use of this declaration is valid, without /// emitting diagnostics. bool Sema::CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid) { // See if this is an auto-typed variable whose initializer we are parsing. if (ParsingInitForAutoVars.count(D)) return false; // See if this is a deleted function. if (FunctionDecl *FD = dyn_cast(D)) { if (FD->isDeleted()) return false; // If the function has a deduced return type, and we can't deduce it, // then we can't use it either. if (getLangOpts().CPlusPlus14 && FD->getReturnType()->isUndeducedType() && DeduceReturnType(FD, SourceLocation(), /*Diagnose*/ false)) return false; // See if this is an aligned allocation/deallocation function that is // unavailable. if (TreatUnavailableAsInvalid && isUnavailableAlignedAllocationFunction(*FD)) return false; } // See if this function is unavailable. if (TreatUnavailableAsInvalid && D->getAvailability() == AR_Unavailable && cast(CurContext)->getAvailability() != AR_Unavailable) return false; return true; } static void DiagnoseUnusedOfDecl(Sema &S, NamedDecl *D, SourceLocation Loc) { // Warn if this is used but marked unused. if (const auto *A = D->getAttr()) { // [[maybe_unused]] should not diagnose uses, but __attribute__((unused)) // should diagnose them. if (A->getSemanticSpelling() != UnusedAttr::CXX11_maybe_unused && A->getSemanticSpelling() != UnusedAttr::C2x_maybe_unused) { const Decl *DC = cast_or_null(S.getCurObjCLexicalContext()); if (DC && !DC->hasAttr()) S.Diag(Loc, diag::warn_used_but_marked_unused) << D->getDeclName(); } } } /// Emit a note explaining that this function is deleted. void Sema::NoteDeletedFunction(FunctionDecl *Decl) { assert(Decl && Decl->isDeleted()); if (Decl->isDefaulted()) { // If the method was explicitly defaulted, point at that declaration. if (!Decl->isImplicit()) Diag(Decl->getLocation(), diag::note_implicitly_deleted); // Try to diagnose why this special member function was implicitly // deleted. This might fail, if that reason no longer applies. DiagnoseDeletedDefaultedFunction(Decl); return; } auto *Ctor = dyn_cast(Decl); if (Ctor && Ctor->isInheritingConstructor()) return NoteDeletedInheritingConstructor(Ctor); Diag(Decl->getLocation(), diag::note_availability_specified_here) << Decl << 1; } /// Determine whether a FunctionDecl was ever declared with an /// explicit storage class. static bool hasAnyExplicitStorageClass(const FunctionDecl *D) { for (auto I : D->redecls()) { if (I->getStorageClass() != SC_None) return true; } return false; } /// Check whether we're in an extern inline function and referring to a /// variable or function with internal linkage (C11 6.7.4p3). /// /// This is only a warning because we used to silently accept this code, but /// in many cases it will not behave correctly. This is not enabled in C++ mode /// because the restriction language is a bit weaker (C++11 [basic.def.odr]p6) /// and so while there may still be user mistakes, most of the time we can't /// prove that there are errors. static void diagnoseUseOfInternalDeclInInlineFunction(Sema &S, const NamedDecl *D, SourceLocation Loc) { // This is disabled under C++; there are too many ways for this to fire in // contexts where the warning is a false positive, or where it is technically // correct but benign. if (S.getLangOpts().CPlusPlus) return; // Check if this is an inlined function or method. FunctionDecl *Current = S.getCurFunctionDecl(); if (!Current) return; if (!Current->isInlined()) return; if (!Current->isExternallyVisible()) return; // Check if the decl has internal linkage. if (D->getFormalLinkage() != InternalLinkage) return; // Downgrade from ExtWarn to Extension if // (1) the supposedly external inline function is in the main file, // and probably won't be included anywhere else. // (2) the thing we're referencing is a pure function. // (3) the thing we're referencing is another inline function. // This last can give us false negatives, but it's better than warning on // wrappers for simple C library functions. const FunctionDecl *UsedFn = dyn_cast(D); bool DowngradeWarning = S.getSourceManager().isInMainFile(Loc); if (!DowngradeWarning && UsedFn) DowngradeWarning = UsedFn->isInlined() || UsedFn->hasAttr(); S.Diag(Loc, DowngradeWarning ? diag::ext_internal_in_extern_inline_quiet : diag::ext_internal_in_extern_inline) << /*IsVar=*/!UsedFn << D; S.MaybeSuggestAddingStaticToDecl(Current); S.Diag(D->getCanonicalDecl()->getLocation(), diag::note_entity_declared_at) << D; } void Sema::MaybeSuggestAddingStaticToDecl(const FunctionDecl *Cur) { const FunctionDecl *First = Cur->getFirstDecl(); // Suggest "static" on the function, if possible. if (!hasAnyExplicitStorageClass(First)) { SourceLocation DeclBegin = First->getSourceRange().getBegin(); Diag(DeclBegin, diag::note_convert_inline_to_static) << Cur << FixItHint::CreateInsertion(DeclBegin, "static "); } } /// Determine whether the use of this declaration is valid, and /// emit any corresponding diagnostics. /// /// This routine diagnoses various problems with referencing /// declarations that can occur when using a declaration. For example, /// it might warn if a deprecated or unavailable declaration is being /// used, or produce an error (and return true) if a C++0x deleted /// function is being used. /// /// \returns true if there was an error (this declaration cannot be /// referenced), false otherwise. /// bool Sema::DiagnoseUseOfDecl(NamedDecl *D, ArrayRef Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks, ObjCInterfaceDecl *ClassReceiver) { SourceLocation Loc = Locs.front(); if (getLangOpts().CPlusPlus && isa(D)) { // If there were any diagnostics suppressed by template argument deduction, // emit them now. auto Pos = SuppressedDiagnostics.find(D->getCanonicalDecl()); if (Pos != SuppressedDiagnostics.end()) { for (const PartialDiagnosticAt &Suppressed : Pos->second) Diag(Suppressed.first, Suppressed.second); // Clear out the list of suppressed diagnostics, so that we don't emit // them again for this specialization. However, we don't obsolete this // entry from the table, because we want to avoid ever emitting these // diagnostics again. Pos->second.clear(); } // C++ [basic.start.main]p3: // The function 'main' shall not be used within a program. if (cast(D)->isMain()) Diag(Loc, diag::ext_main_used); diagnoseUnavailableAlignedAllocation(*cast(D), Loc); } // See if this is an auto-typed variable whose initializer we are parsing. if (ParsingInitForAutoVars.count(D)) { if (isa(D)) { Diag(Loc, diag::err_binding_cannot_appear_in_own_initializer) << D->getDeclName(); } else { Diag(Loc, diag::err_auto_variable_cannot_appear_in_own_initializer) << D->getDeclName() << cast(D)->getType(); } return true; } if (FunctionDecl *FD = dyn_cast(D)) { // See if this is a deleted function. if (FD->isDeleted()) { auto *Ctor = dyn_cast(FD); if (Ctor && Ctor->isInheritingConstructor()) Diag(Loc, diag::err_deleted_inherited_ctor_use) << Ctor->getParent() << Ctor->getInheritedConstructor().getConstructor()->getParent(); else Diag(Loc, diag::err_deleted_function_use); NoteDeletedFunction(FD); return true; } // [expr.prim.id]p4 // A program that refers explicitly or implicitly to a function with a // trailing requires-clause whose constraint-expression is not satisfied, // other than to declare it, is ill-formed. [...] // // See if this is a function with constraints that need to be satisfied. // Check this before deducing the return type, as it might instantiate the // definition. if (FD->getTrailingRequiresClause()) { ConstraintSatisfaction Satisfaction; if (CheckFunctionConstraints(FD, Satisfaction, Loc)) // A diagnostic will have already been generated (non-constant // constraint expression, for example) return true; if (!Satisfaction.IsSatisfied) { Diag(Loc, diag::err_reference_to_function_with_unsatisfied_constraints) << D; DiagnoseUnsatisfiedConstraint(Satisfaction); return true; } } // If the function has a deduced return type, and we can't deduce it, // then we can't use it either. if (getLangOpts().CPlusPlus14 && FD->getReturnType()->isUndeducedType() && DeduceReturnType(FD, Loc)) return true; if (getLangOpts().CUDA && !CheckCUDACall(Loc, FD)) return true; if (getLangOpts().SYCLIsDevice && !checkSYCLDeviceFunction(Loc, FD)) return true; } if (auto *MD = dyn_cast(D)) { // Lambdas are only default-constructible or assignable in C++2a onwards. if (MD->getParent()->isLambda() && ((isa(MD) && cast(MD)->isDefaultConstructor()) || MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator())) { Diag(Loc, diag::warn_cxx17_compat_lambda_def_ctor_assign) << !isa(MD); } } auto getReferencedObjCProp = [](const NamedDecl *D) -> const ObjCPropertyDecl * { if (const auto *MD = dyn_cast(D)) return MD->findPropertyDecl(); return nullptr; }; if (const ObjCPropertyDecl *ObjCPDecl = getReferencedObjCProp(D)) { if (diagnoseArgIndependentDiagnoseIfAttrs(ObjCPDecl, Loc)) return true; } else if (diagnoseArgIndependentDiagnoseIfAttrs(D, Loc)) { return true; } // [OpenMP 4.0], 2.15 declare reduction Directive, Restrictions // Only the variables omp_in and omp_out are allowed in the combiner. // Only the variables omp_priv and omp_orig are allowed in the // initializer-clause. auto *DRD = dyn_cast(CurContext); if (LangOpts.OpenMP && DRD && !CurContext->containsDecl(D) && isa(D)) { Diag(Loc, diag::err_omp_wrong_var_in_declare_reduction) << getCurFunction()->HasOMPDeclareReductionCombiner; Diag(D->getLocation(), diag::note_entity_declared_at) << D; return true; } // [OpenMP 5.0], 2.19.7.3. declare mapper Directive, Restrictions // List-items in map clauses on this construct may only refer to the declared // variable var and entities that could be referenced by a procedure defined // at the same location auto *DMD = dyn_cast(CurContext); if (LangOpts.OpenMP && DMD && !CurContext->containsDecl(D) && isa(D)) { Diag(Loc, diag::err_omp_declare_mapper_wrong_var) << DMD->getVarName().getAsString(); Diag(D->getLocation(), diag::note_entity_declared_at) << D; return true; } DiagnoseAvailabilityOfDecl(D, Locs, UnknownObjCClass, ObjCPropertyAccess, AvoidPartialAvailabilityChecks, ClassReceiver); DiagnoseUnusedOfDecl(*this, D, Loc); diagnoseUseOfInternalDeclInInlineFunction(*this, D, Loc); if (LangOpts.SYCLIsDevice || (LangOpts.OpenMP && LangOpts.OpenMPIsDevice)) { if (const auto *VD = dyn_cast(D)) checkDeviceDecl(VD, Loc); if (!Context.getTargetInfo().isTLSSupported()) if (const auto *VD = dyn_cast(D)) if (VD->getTLSKind() != VarDecl::TLS_None) targetDiag(*Locs.begin(), diag::err_thread_unsupported); } if (isa(D) && isa(D->getDeclContext()) && !isUnevaluatedContext()) { // C++ [expr.prim.req.nested] p3 // A local parameter shall only appear as an unevaluated operand // (Clause 8) within the constraint-expression. Diag(Loc, diag::err_requires_expr_parameter_referenced_in_evaluated_context) << D; Diag(D->getLocation(), diag::note_entity_declared_at) << D; return true; } return false; } /// DiagnoseSentinelCalls - This routine checks whether a call or /// message-send is to a declaration with the sentinel attribute, and /// if so, it checks that the requirements of the sentinel are /// satisfied. void Sema::DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef Args) { const SentinelAttr *attr = D->getAttr(); if (!attr) return; // The number of formal parameters of the declaration. unsigned numFormalParams; // The kind of declaration. This is also an index into a %select in // the diagnostic. enum CalleeType { CT_Function, CT_Method, CT_Block } calleeType; if (ObjCMethodDecl *MD = dyn_cast(D)) { numFormalParams = MD->param_size(); calleeType = CT_Method; } else if (FunctionDecl *FD = dyn_cast(D)) { numFormalParams = FD->param_size(); calleeType = CT_Function; } else if (isa(D)) { QualType type = cast(D)->getType(); const FunctionType *fn = nullptr; if (const PointerType *ptr = type->getAs()) { fn = ptr->getPointeeType()->getAs(); if (!fn) return; calleeType = CT_Function; } else if (const BlockPointerType *ptr = type->getAs()) { fn = ptr->getPointeeType()->castAs(); calleeType = CT_Block; } else { return; } if (const FunctionProtoType *proto = dyn_cast(fn)) { numFormalParams = proto->getNumParams(); } else { numFormalParams = 0; } } else { return; } // "nullPos" is the number of formal parameters at the end which // effectively count as part of the variadic arguments. This is // useful if you would prefer to not have *any* formal parameters, // but the language forces you to have at least one. unsigned nullPos = attr->getNullPos(); assert((nullPos == 0 || nullPos == 1) && "invalid null position on sentinel"); numFormalParams = (nullPos > numFormalParams ? 0 : numFormalParams - nullPos); // The number of arguments which should follow the sentinel. unsigned numArgsAfterSentinel = attr->getSentinel(); // If there aren't enough arguments for all the formal parameters, // the sentinel, and the args after the sentinel, complain. if (Args.size() < numFormalParams + numArgsAfterSentinel + 1) { Diag(Loc, diag::warn_not_enough_argument) << D->getDeclName(); Diag(D->getLocation(), diag::note_sentinel_here) << int(calleeType); return; } // Otherwise, find the sentinel expression. Expr *sentinelExpr = Args[Args.size() - numArgsAfterSentinel - 1]; if (!sentinelExpr) return; if (sentinelExpr->isValueDependent()) return; if (Context.isSentinelNullExpr(sentinelExpr)) return; // Pick a reasonable string to insert. Optimistically use 'nil', 'nullptr', // or 'NULL' if those are actually defined in the context. Only use // 'nil' for ObjC methods, where it's much more likely that the // variadic arguments form a list of object pointers. SourceLocation MissingNilLoc = getLocForEndOfToken(sentinelExpr->getEndLoc()); std::string NullValue; if (calleeType == CT_Method && PP.isMacroDefined("nil")) NullValue = "nil"; else if (getLangOpts().CPlusPlus11) NullValue = "nullptr"; else if (PP.isMacroDefined("NULL")) NullValue = "NULL"; else NullValue = "(void*) 0"; if (MissingNilLoc.isInvalid()) Diag(Loc, diag::warn_missing_sentinel) << int(calleeType); else Diag(MissingNilLoc, diag::warn_missing_sentinel) << int(calleeType) << FixItHint::CreateInsertion(MissingNilLoc, ", " + NullValue); Diag(D->getLocation(), diag::note_sentinel_here) << int(calleeType); } SourceRange Sema::getExprRange(Expr *E) const { return E ? E->getSourceRange() : SourceRange(); } //===----------------------------------------------------------------------===// // Standard Promotions and Conversions //===----------------------------------------------------------------------===// /// DefaultFunctionArrayConversion (C99 6.3.2.1p3, C99 6.3.2.1p4). ExprResult Sema::DefaultFunctionArrayConversion(Expr *E, bool Diagnose) { // Handle any placeholder expressions which made it here. if (E->getType()->isPlaceholderType()) { ExprResult result = CheckPlaceholderExpr(E); if (result.isInvalid()) return ExprError(); E = result.get(); } QualType Ty = E->getType(); assert(!Ty.isNull() && "DefaultFunctionArrayConversion - missing type"); if (Ty->isFunctionType()) { if (auto *DRE = dyn_cast(E->IgnoreParenCasts())) if (auto *FD = dyn_cast(DRE->getDecl())) if (!checkAddressOfFunctionIsAvailable(FD, Diagnose, E->getExprLoc())) return ExprError(); E = ImpCastExprToType(E, Context.getPointerType(Ty), CK_FunctionToPointerDecay).get(); } else if (Ty->isArrayType()) { // In C90 mode, arrays only promote to pointers if the array expression is // an lvalue. The relevant legalese is C90 6.2.2.1p3: "an lvalue that has // type 'array of type' is converted to an expression that has type 'pointer // to type'...". In C99 this was changed to: C99 6.3.2.1p3: "an expression // that has type 'array of type' ...". The relevant change is "an lvalue" // (C90) to "an expression" (C99). // // C++ 4.2p1: // An lvalue or rvalue of type "array of N T" or "array of unknown bound of // T" can be converted to an rvalue of type "pointer to T". // if (getLangOpts().C99 || getLangOpts().CPlusPlus || E->isLValue()) E = ImpCastExprToType(E, Context.getArrayDecayedType(Ty), CK_ArrayToPointerDecay).get(); } return E; } static void CheckForNullPointerDereference(Sema &S, Expr *E) { // Check to see if we are dereferencing a null pointer. If so, // and if not volatile-qualified, this is undefined behavior that the // optimizer will delete, so warn about it. People sometimes try to use this // to get a deterministic trap and are surprised by clang's behavior. This // only handles the pattern "*null", which is a very syntactic check. const auto *UO = dyn_cast(E->IgnoreParenCasts()); if (UO && UO->getOpcode() == UO_Deref && UO->getSubExpr()->getType()->isPointerType()) { const LangAS AS = UO->getSubExpr()->getType()->getPointeeType().getAddressSpace(); if ((!isTargetAddressSpace(AS) || (isTargetAddressSpace(AS) && toTargetAddressSpace(AS) == 0)) && UO->getSubExpr()->IgnoreParenCasts()->isNullPointerConstant( S.Context, Expr::NPC_ValueDependentIsNotNull) && !UO->getType().isVolatileQualified()) { S.DiagRuntimeBehavior(UO->getOperatorLoc(), UO, S.PDiag(diag::warn_indirection_through_null) << UO->getSubExpr()->getSourceRange()); S.DiagRuntimeBehavior(UO->getOperatorLoc(), UO, S.PDiag(diag::note_indirection_through_null)); } } } static void DiagnoseDirectIsaAccess(Sema &S, const ObjCIvarRefExpr *OIRE, SourceLocation AssignLoc, const Expr* RHS) { const ObjCIvarDecl *IV = OIRE->getDecl(); if (!IV) return; DeclarationName MemberName = IV->getDeclName(); IdentifierInfo *Member = MemberName.getAsIdentifierInfo(); if (!Member || !Member->isStr("isa")) return; const Expr *Base = OIRE->getBase(); QualType BaseType = Base->getType(); if (OIRE->isArrow()) BaseType = BaseType->getPointeeType(); if (const ObjCObjectType *OTy = BaseType->getAs()) if (ObjCInterfaceDecl *IDecl = OTy->getInterface()) { ObjCInterfaceDecl *ClassDeclared = nullptr; ObjCIvarDecl *IV = IDecl->lookupInstanceVariable(Member, ClassDeclared); if (!ClassDeclared->getSuperClass() && (*ClassDeclared->ivar_begin()) == IV) { if (RHS) { NamedDecl *ObjectSetClass = S.LookupSingleName(S.TUScope, &S.Context.Idents.get("object_setClass"), SourceLocation(), S.LookupOrdinaryName); if (ObjectSetClass) { SourceLocation RHSLocEnd = S.getLocForEndOfToken(RHS->getEndLoc()); S.Diag(OIRE->getExprLoc(), diag::warn_objc_isa_assign) << FixItHint::CreateInsertion(OIRE->getBeginLoc(), "object_setClass(") << FixItHint::CreateReplacement( SourceRange(OIRE->getOpLoc(), AssignLoc), ",") << FixItHint::CreateInsertion(RHSLocEnd, ")"); } else S.Diag(OIRE->getLocation(), diag::warn_objc_isa_assign); } else { NamedDecl *ObjectGetClass = S.LookupSingleName(S.TUScope, &S.Context.Idents.get("object_getClass"), SourceLocation(), S.LookupOrdinaryName); if (ObjectGetClass) S.Diag(OIRE->getExprLoc(), diag::warn_objc_isa_use) << FixItHint::CreateInsertion(OIRE->getBeginLoc(), "object_getClass(") << FixItHint::CreateReplacement( SourceRange(OIRE->getOpLoc(), OIRE->getEndLoc()), ")"); else S.Diag(OIRE->getLocation(), diag::warn_objc_isa_use); } S.Diag(IV->getLocation(), diag::note_ivar_decl); } } } ExprResult Sema::DefaultLvalueConversion(Expr *E) { // Handle any placeholder expressions which made it here. if (E->getType()->isPlaceholderType()) { ExprResult result = CheckPlaceholderExpr(E); if (result.isInvalid()) return ExprError(); E = result.get(); } // C++ [conv.lval]p1: // A glvalue of a non-function, non-array type T can be // converted to a prvalue. if (!E->isGLValue()) return E; QualType T = E->getType(); assert(!T.isNull() && "r-value conversion on typeless expression?"); // lvalue-to-rvalue conversion cannot be applied to function or array types. if (T->isFunctionType() || T->isArrayType()) return E; // We don't want to throw lvalue-to-rvalue casts on top of // expressions of certain types in C++. if (getLangOpts().CPlusPlus && (E->getType() == Context.OverloadTy || T->isDependentType() || T->isRecordType())) return E; // The C standard is actually really unclear on this point, and // DR106 tells us what the result should be but not why. It's // generally best to say that void types just doesn't undergo // lvalue-to-rvalue at all. Note that expressions of unqualified // 'void' type are never l-values, but qualified void can be. if (T->isVoidType()) return E; // OpenCL usually rejects direct accesses to values of 'half' type. if (getLangOpts().OpenCL && !getOpenCLOptions().isEnabled("cl_khr_fp16") && T->isHalfType()) { Diag(E->getExprLoc(), diag::err_opencl_half_load_store) << 0 << T; return ExprError(); } CheckForNullPointerDereference(*this, E); if (const ObjCIsaExpr *OISA = dyn_cast(E->IgnoreParenCasts())) { NamedDecl *ObjectGetClass = LookupSingleName(TUScope, &Context.Idents.get("object_getClass"), SourceLocation(), LookupOrdinaryName); if (ObjectGetClass) Diag(E->getExprLoc(), diag::warn_objc_isa_use) << FixItHint::CreateInsertion(OISA->getBeginLoc(), "object_getClass(") << FixItHint::CreateReplacement( SourceRange(OISA->getOpLoc(), OISA->getIsaMemberLoc()), ")"); else Diag(E->getExprLoc(), diag::warn_objc_isa_use); } else if (const ObjCIvarRefExpr *OIRE = dyn_cast(E->IgnoreParenCasts())) DiagnoseDirectIsaAccess(*this, OIRE, SourceLocation(), /* Expr*/nullptr); // C++ [conv.lval]p1: // [...] If T is a non-class type, the type of the prvalue is the // cv-unqualified version of T. Otherwise, the type of the // rvalue is T. // // C99 6.3.2.1p2: // If the lvalue has qualified type, the value has the unqualified // version of the type of the lvalue; otherwise, the value has the // type of the lvalue. if (T.hasQualifiers()) T = T.getUnqualifiedType(); // Under the MS ABI, lock down the inheritance model now. if (T->isMemberPointerType() && Context.getTargetInfo().getCXXABI().isMicrosoft()) (void)isCompleteType(E->getExprLoc(), T); ExprResult Res = CheckLValueToRValueConversionOperand(E); if (Res.isInvalid()) return Res; E = Res.get(); // Loading a __weak object implicitly retains the value, so we need a cleanup to // balance that. if (E->getType().getObjCLifetime() == Qualifiers::OCL_Weak) Cleanup.setExprNeedsCleanups(true); if (E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct) Cleanup.setExprNeedsCleanups(true); // C++ [conv.lval]p3: // If T is cv std::nullptr_t, the result is a null pointer constant. CastKind CK = T->isNullPtrType() ? CK_NullToPointer : CK_LValueToRValue; Res = ImplicitCastExpr::Create(Context, T, CK, E, nullptr, VK_RValue); // C11 6.3.2.1p2: // ... if the lvalue has atomic type, the value has the non-atomic version // of the type of the lvalue ... if (const AtomicType *Atomic = T->getAs()) { T = Atomic->getValueType().getUnqualifiedType(); Res = ImplicitCastExpr::Create(Context, T, CK_AtomicToNonAtomic, Res.get(), nullptr, VK_RValue); } return Res; } ExprResult Sema::DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose) { ExprResult Res = DefaultFunctionArrayConversion(E, Diagnose); if (Res.isInvalid()) return ExprError(); Res = DefaultLvalueConversion(Res.get()); if (Res.isInvalid()) return ExprError(); return Res; } /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult Sema::CallExprUnaryConversions(Expr *E) { QualType Ty = E->getType(); ExprResult Res = E; // Only do implicit cast for a function type, but not for a pointer // to function type. if (Ty->isFunctionType()) { Res = ImpCastExprToType(E, Context.getPointerType(Ty), CK_FunctionToPointerDecay); if (Res.isInvalid()) return ExprError(); } Res = DefaultLvalueConversion(Res.get()); if (Res.isInvalid()) return ExprError(); return Res.get(); } /// UsualUnaryConversions - Performs various conversions that are common to most /// operators (C99 6.3). The conversions of array and function types are /// sometimes suppressed. For example, the array->pointer conversion doesn't /// apply if the array is an argument to the sizeof or address (&) operators. /// In these instances, this routine should *not* be called. ExprResult Sema::UsualUnaryConversions(Expr *E) { // First, convert to an r-value. ExprResult Res = DefaultFunctionArrayLvalueConversion(E); if (Res.isInvalid()) return ExprError(); E = Res.get(); QualType Ty = E->getType(); assert(!Ty.isNull() && "UsualUnaryConversions - missing type"); // Half FP have to be promoted to float unless it is natively supported if (Ty->isHalfType() && !getLangOpts().NativeHalfType) return ImpCastExprToType(Res.get(), Context.FloatTy, CK_FloatingCast); // Try to perform integral promotions if the object has a theoretically // promotable type. if (Ty->isIntegralOrUnscopedEnumerationType()) { // C99 6.3.1.1p2: // // The following may be used in an expression wherever an int or // unsigned int may be used: // - an object or expression with an integer type whose integer // conversion rank is less than or equal to the rank of int // and unsigned int. // - A bit-field of type _Bool, int, signed int, or unsigned int. // // If an int can represent all values of the original type, the // value is converted to an int; otherwise, it is converted to an // unsigned int. These are called the integer promotions. All // other types are unchanged by the integer promotions. QualType PTy = Context.isPromotableBitField(E); if (!PTy.isNull()) { E = ImpCastExprToType(E, PTy, CK_IntegralCast).get(); return E; } if (Ty->isPromotableIntegerType()) { QualType PT = Context.getPromotedIntegerType(Ty); E = ImpCastExprToType(E, PT, CK_IntegralCast).get(); return E; } } return E; } /// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that /// do not have a prototype. Arguments that have type float or __fp16 /// are promoted to double. All other argument types are converted by /// UsualUnaryConversions(). ExprResult Sema::DefaultArgumentPromotion(Expr *E) { QualType Ty = E->getType(); assert(!Ty.isNull() && "DefaultArgumentPromotion - missing type"); ExprResult Res = UsualUnaryConversions(E); if (Res.isInvalid()) return ExprError(); E = Res.get(); // If this is a 'float' or '__fp16' (CVR qualified or typedef) // promote to double. // Note that default argument promotion applies only to float (and // half/fp16); it does not apply to _Float16. const BuiltinType *BTy = Ty->getAs(); if (BTy && (BTy->getKind() == BuiltinType::Half || BTy->getKind() == BuiltinType::Float)) { if (getLangOpts().OpenCL && !getOpenCLOptions().isEnabled("cl_khr_fp64")) { if (BTy->getKind() == BuiltinType::Half) { E = ImpCastExprToType(E, Context.FloatTy, CK_FloatingCast).get(); } } else { E = ImpCastExprToType(E, Context.DoubleTy, CK_FloatingCast).get(); } } // C++ performs lvalue-to-rvalue conversion as a default argument // promotion, even on class types, but note: // C++11 [conv.lval]p2: // When an lvalue-to-rvalue conversion occurs in an unevaluated // operand or a subexpression thereof the value contained in the // referenced object is not accessed. Otherwise, if the glvalue // has a class type, the conversion copy-initializes a temporary // of type T from the glvalue and the result of the conversion // is a prvalue for the temporary. // FIXME: add some way to gate this entire thing for correctness in // potentially potentially evaluated contexts. if (getLangOpts().CPlusPlus && E->isGLValue() && !isUnevaluatedContext()) { ExprResult Temp = PerformCopyInitialization( InitializedEntity::InitializeTemporary(E->getType()), E->getExprLoc(), E); if (Temp.isInvalid()) return ExprError(); E = Temp.get(); } return E; } /// Determine the degree of POD-ness for an expression. /// Incomplete types are considered POD, since this check can be performed /// when we're in an unevaluated context. Sema::VarArgKind Sema::isValidVarArgType(const QualType &Ty) { if (Ty->isIncompleteType()) { // C++11 [expr.call]p7: // After these conversions, if the argument does not have arithmetic, // enumeration, pointer, pointer to member, or class type, the program // is ill-formed. // // Since we've already performed array-to-pointer and function-to-pointer // decay, the only such type in C++ is cv void. This also handles // initializer lists as variadic arguments. if (Ty->isVoidType()) return VAK_Invalid; if (Ty->isObjCObjectType()) return VAK_Invalid; return VAK_Valid; } if (Ty.isDestructedType() == QualType::DK_nontrivial_c_struct) return VAK_Invalid; if (Ty.isCXX98PODType(Context)) return VAK_Valid; // C++11 [expr.call]p7: // Passing a potentially-evaluated argument of class type (Clause 9) // having a non-trivial copy constructor, a non-trivial move constructor, // or a non-trivial destructor, with no corresponding parameter, // is conditionally-supported with implementation-defined semantics. if (getLangOpts().CPlusPlus11 && !Ty->isDependentType()) if (CXXRecordDecl *Record = Ty->getAsCXXRecordDecl()) if (!Record->hasNonTrivialCopyConstructor() && !Record->hasNonTrivialMoveConstructor() && !Record->hasNonTrivialDestructor()) return VAK_ValidInCXX11; if (getLangOpts().ObjCAutoRefCount && Ty->isObjCLifetimeType()) return VAK_Valid; if (Ty->isObjCObjectType()) return VAK_Invalid; if (getLangOpts().MSVCCompat) return VAK_MSVCUndefined; // FIXME: In C++11, these cases are conditionally-supported, meaning we're // permitted to reject them. We should consider doing so. return VAK_Undefined; } void Sema::checkVariadicArgument(const Expr *E, VariadicCallType CT) { // Don't allow one to pass an Objective-C interface to a vararg. const QualType &Ty = E->getType(); VarArgKind VAK = isValidVarArgType(Ty); // Complain about passing non-POD types through varargs. switch (VAK) { case VAK_ValidInCXX11: DiagRuntimeBehavior( E->getBeginLoc(), nullptr, PDiag(diag::warn_cxx98_compat_pass_non_pod_arg_to_vararg) << Ty << CT); LLVM_FALLTHROUGH; case VAK_Valid: if (Ty->isRecordType()) { // This is unlikely to be what the user intended. If the class has a // 'c_str' member function, the user probably meant to call that. DiagRuntimeBehavior(E->getBeginLoc(), nullptr, PDiag(diag::warn_pass_class_arg_to_vararg) << Ty << CT << hasCStrMethod(E) << ".c_str()"); } break; case VAK_Undefined: case VAK_MSVCUndefined: DiagRuntimeBehavior(E->getBeginLoc(), nullptr, PDiag(diag::warn_cannot_pass_non_pod_arg_to_vararg) << getLangOpts().CPlusPlus11 << Ty << CT); break; case VAK_Invalid: if (Ty.isDestructedType() == QualType::DK_nontrivial_c_struct) Diag(E->getBeginLoc(), diag::err_cannot_pass_non_trivial_c_struct_to_vararg) << Ty << CT; else if (Ty->isObjCObjectType()) DiagRuntimeBehavior(E->getBeginLoc(), nullptr, PDiag(diag::err_cannot_pass_objc_interface_to_vararg) << Ty << CT); else Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg) << isa(E) << Ty << CT; break; } } /// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but /// will create a trap if the resulting type is not a POD type. ExprResult Sema::DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl) { if (const BuiltinType *PlaceholderTy = E->getType()->getAsPlaceholderType()) { // Strip the unbridged-cast placeholder expression off, if applicable. if (PlaceholderTy->getKind() == BuiltinType::ARCUnbridgedCast && (CT == VariadicMethod || (FDecl && FDecl->hasAttr()))) { E = stripARCUnbridgedCast(E); // Otherwise, do normal placeholder checking. } else { ExprResult ExprRes = CheckPlaceholderExpr(E); if (ExprRes.isInvalid()) return ExprError(); E = ExprRes.get(); } } ExprResult ExprRes = DefaultArgumentPromotion(E); if (ExprRes.isInvalid()) return ExprError(); // Copy blocks to the heap. if (ExprRes.get()->getType()->isBlockPointerType()) maybeExtendBlockObject(ExprRes); E = ExprRes.get(); // Diagnostics regarding non-POD argument types are // emitted along with format string checking in Sema::CheckFunctionCall(). if (isValidVarArgType(E->getType()) == VAK_Undefined) { // Turn this into a trap. CXXScopeSpec SS; SourceLocation TemplateKWLoc; UnqualifiedId Name; Name.setIdentifier(PP.getIdentifierInfo("__builtin_trap"), E->getBeginLoc()); ExprResult TrapFn = ActOnIdExpression(TUScope, SS, TemplateKWLoc, Name, /*HasTrailingLParen=*/true, /*IsAddressOfOperand=*/false); if (TrapFn.isInvalid()) return ExprError(); ExprResult Call = BuildCallExpr(TUScope, TrapFn.get(), E->getBeginLoc(), None, E->getEndLoc()); if (Call.isInvalid()) return ExprError(); ExprResult Comma = ActOnBinOp(TUScope, E->getBeginLoc(), tok::comma, Call.get(), E); if (Comma.isInvalid()) return ExprError(); return Comma.get(); } if (!getLangOpts().CPlusPlus && RequireCompleteType(E->getExprLoc(), E->getType(), diag::err_call_incomplete_argument)) return ExprError(); return E; } /// Converts an integer to complex float type. Helper function of /// UsualArithmeticConversions() /// /// \return false if the integer expression is an integer type and is /// successfully converted to the complex type. static bool handleIntegerToComplexFloatConversion(Sema &S, ExprResult &IntExpr, ExprResult &ComplexExpr, QualType IntTy, QualType ComplexTy, bool SkipCast) { if (IntTy->isComplexType() || IntTy->isRealFloatingType()) return true; if (SkipCast) return false; if (IntTy->isIntegerType()) { QualType fpTy = cast(ComplexTy)->getElementType(); IntExpr = S.ImpCastExprToType(IntExpr.get(), fpTy, CK_IntegralToFloating); IntExpr = S.ImpCastExprToType(IntExpr.get(), ComplexTy, CK_FloatingRealToComplex); } else { assert(IntTy->isComplexIntegerType()); IntExpr = S.ImpCastExprToType(IntExpr.get(), ComplexTy, CK_IntegralComplexToFloatingComplex); } return false; } /// Handle arithmetic conversion with complex types. Helper function of /// UsualArithmeticConversions() static QualType handleComplexFloatConversion(Sema &S, ExprResult &LHS, ExprResult &RHS, QualType LHSType, QualType RHSType, bool IsCompAssign) { // if we have an integer operand, the result is the complex type. if (!handleIntegerToComplexFloatConversion(S, RHS, LHS, RHSType, LHSType, /*skipCast*/false)) return LHSType; if (!handleIntegerToComplexFloatConversion(S, LHS, RHS, LHSType, RHSType, /*skipCast*/IsCompAssign)) return RHSType; // This handles complex/complex, complex/float, or float/complex. // When both operands are complex, the shorter operand is converted to the // type of the longer, and that is the type of the result. This corresponds // to what is done when combining two real floating-point operands. // The fun begins when size promotion occur across type domains. // From H&S 6.3.4: When one operand is complex and the other is a real // floating-point type, the less precise type is converted, within it's // real or complex domain, to the precision of the other type. For example, // when combining a "long double" with a "double _Complex", the // "double _Complex" is promoted to "long double _Complex". // Compute the rank of the two types, regardless of whether they are complex. int Order = S.Context.getFloatingTypeOrder(LHSType, RHSType); auto *LHSComplexType = dyn_cast(LHSType); auto *RHSComplexType = dyn_cast(RHSType); QualType LHSElementType = LHSComplexType ? LHSComplexType->getElementType() : LHSType; QualType RHSElementType = RHSComplexType ? RHSComplexType->getElementType() : RHSType; QualType ResultType = S.Context.getComplexType(LHSElementType); if (Order < 0) { // Promote the precision of the LHS if not an assignment. ResultType = S.Context.getComplexType(RHSElementType); if (!IsCompAssign) { if (LHSComplexType) LHS = S.ImpCastExprToType(LHS.get(), ResultType, CK_FloatingComplexCast); else LHS = S.ImpCastExprToType(LHS.get(), RHSElementType, CK_FloatingCast); } } else if (Order > 0) { // Promote the precision of the RHS. if (RHSComplexType) RHS = S.ImpCastExprToType(RHS.get(), ResultType, CK_FloatingComplexCast); else RHS = S.ImpCastExprToType(RHS.get(), LHSElementType, CK_FloatingCast); } return ResultType; } /// Handle arithmetic conversion from integer to float. Helper function /// of UsualArithmeticConversions() static QualType handleIntToFloatConversion(Sema &S, ExprResult &FloatExpr, ExprResult &IntExpr, QualType FloatTy, QualType IntTy, bool ConvertFloat, bool ConvertInt) { if (IntTy->isIntegerType()) { if (ConvertInt) // Convert intExpr to the lhs floating point type. IntExpr = S.ImpCastExprToType(IntExpr.get(), FloatTy, CK_IntegralToFloating); return FloatTy; } // Convert both sides to the appropriate complex float. assert(IntTy->isComplexIntegerType()); QualType result = S.Context.getComplexType(FloatTy); // _Complex int -> _Complex float if (ConvertInt) IntExpr = S.ImpCastExprToType(IntExpr.get(), result, CK_IntegralComplexToFloatingComplex); // float -> _Complex float if (ConvertFloat) FloatExpr = S.ImpCastExprToType(FloatExpr.get(), result, CK_FloatingRealToComplex); return result; } /// Handle arithmethic conversion with floating point types. Helper /// function of UsualArithmeticConversions() static QualType handleFloatConversion(Sema &S, ExprResult &LHS, ExprResult &RHS, QualType LHSType, QualType RHSType, bool IsCompAssign) { bool LHSFloat = LHSType->isRealFloatingType(); bool RHSFloat = RHSType->isRealFloatingType(); // If we have two real floating types, convert the smaller operand // to the bigger result. if (LHSFloat && RHSFloat) { int order = S.Context.getFloatingTypeOrder(LHSType, RHSType); if (order > 0) { RHS = S.ImpCastExprToType(RHS.get(), LHSType, CK_FloatingCast); return LHSType; } assert(order < 0 && "illegal float comparison"); if (!IsCompAssign) LHS = S.ImpCastExprToType(LHS.get(), RHSType, CK_FloatingCast); return RHSType; } if (LHSFloat) { // Half FP has to be promoted to float unless it is natively supported if (LHSType->isHalfType() && !S.getLangOpts().NativeHalfType) LHSType = S.Context.FloatTy; return handleIntToFloatConversion(S, LHS, RHS, LHSType, RHSType, /*ConvertFloat=*/!IsCompAssign, /*ConvertInt=*/ true); } assert(RHSFloat); return handleIntToFloatConversion(S, RHS, LHS, RHSType, LHSType, /*convertInt=*/ true, /*convertFloat=*/!IsCompAssign); } /// Diagnose attempts to convert between __float128 and long double if /// there is no support for such conversion. Helper function of /// UsualArithmeticConversions(). static bool unsupportedTypeConversion(const Sema &S, QualType LHSType, QualType RHSType) { /* No issue converting if at least one of the types is not a floating point type or the two types have the same rank. */ if (!LHSType->isFloatingType() || !RHSType->isFloatingType() || S.Context.getFloatingTypeOrder(LHSType, RHSType) == 0) return false; assert(LHSType->isFloatingType() && RHSType->isFloatingType() && "The remaining types must be floating point types."); auto *LHSComplex = LHSType->getAs(); auto *RHSComplex = RHSType->getAs(); QualType LHSElemType = LHSComplex ? LHSComplex->getElementType() : LHSType; QualType RHSElemType = RHSComplex ? RHSComplex->getElementType() : RHSType; // No issue if the two types have the same representation if (&S.Context.getFloatTypeSemantics(LHSElemType) == &S.Context.getFloatTypeSemantics(RHSElemType)) return false; bool Float128AndLongDouble = (LHSElemType == S.Context.Float128Ty && RHSElemType == S.Context.LongDoubleTy); Float128AndLongDouble |= (LHSElemType == S.Context.LongDoubleTy && RHSElemType == S.Context.Float128Ty); // We've handled the situation where __float128 and long double have the same // representation. We allow all conversions for all possible long double types // except PPC's double double. return Float128AndLongDouble && (&S.Context.getFloatTypeSemantics(S.Context.LongDoubleTy) == &llvm::APFloat::PPCDoubleDouble()); } typedef ExprResult PerformCastFn(Sema &S, Expr *operand, QualType toType); namespace { /// These helper callbacks are placed in an anonymous namespace to /// permit their use as function template parameters. ExprResult doIntegralCast(Sema &S, Expr *op, QualType toType) { return S.ImpCastExprToType(op, toType, CK_IntegralCast); } ExprResult doComplexIntegralCast(Sema &S, Expr *op, QualType toType) { return S.ImpCastExprToType(op, S.Context.getComplexType(toType), CK_IntegralComplexCast); } } /// Handle integer arithmetic conversions. Helper function of /// UsualArithmeticConversions() template static QualType handleIntegerConversion(Sema &S, ExprResult &LHS, ExprResult &RHS, QualType LHSType, QualType RHSType, bool IsCompAssign) { // The rules for this case are in C99 6.3.1.8 int order = S.Context.getIntegerTypeOrder(LHSType, RHSType); bool LHSSigned = LHSType->hasSignedIntegerRepresentation(); bool RHSSigned = RHSType->hasSignedIntegerRepresentation(); if (LHSSigned == RHSSigned) { // Same signedness; use the higher-ranked type if (order >= 0) { RHS = (*doRHSCast)(S, RHS.get(), LHSType); return LHSType; } else if (!IsCompAssign) LHS = (*doLHSCast)(S, LHS.get(), RHSType); return RHSType; } else if (order != (LHSSigned ? 1 : -1)) { // The unsigned type has greater than or equal rank to the // signed type, so use the unsigned type if (RHSSigned) { RHS = (*doRHSCast)(S, RHS.get(), LHSType); return LHSType; } else if (!IsCompAssign) LHS = (*doLHSCast)(S, LHS.get(), RHSType); return RHSType; } else if (S.Context.getIntWidth(LHSType) != S.Context.getIntWidth(RHSType)) { // The two types are different widths; if we are here, that // means the signed type is larger than the unsigned type, so // use the signed type. if (LHSSigned) { RHS = (*doRHSCast)(S, RHS.get(), LHSType); return LHSType; } else if (!IsCompAssign) LHS = (*doLHSCast)(S, LHS.get(), RHSType); return RHSType; } else { // The signed type is higher-ranked than the unsigned type, // but isn't actually any bigger (like unsigned int and long // on most 32-bit systems). Use the unsigned type corresponding // to the signed type. QualType result = S.Context.getCorrespondingUnsignedType(LHSSigned ? LHSType : RHSType); RHS = (*doRHSCast)(S, RHS.get(), result); if (!IsCompAssign) LHS = (*doLHSCast)(S, LHS.get(), result); return result; } } /// Handle conversions with GCC complex int extension. Helper function /// of UsualArithmeticConversions() static QualType handleComplexIntConversion(Sema &S, ExprResult &LHS, ExprResult &RHS, QualType LHSType, QualType RHSType, bool IsCompAssign) { const ComplexType *LHSComplexInt = LHSType->getAsComplexIntegerType(); const ComplexType *RHSComplexInt = RHSType->getAsComplexIntegerType(); if (LHSComplexInt && RHSComplexInt) { QualType LHSEltType = LHSComplexInt->getElementType(); QualType RHSEltType = RHSComplexInt->getElementType(); QualType ScalarType = handleIntegerConversion (S, LHS, RHS, LHSEltType, RHSEltType, IsCompAssign); return S.Context.getComplexType(ScalarType); } if (LHSComplexInt) { QualType LHSEltType = LHSComplexInt->getElementType(); QualType ScalarType = handleIntegerConversion (S, LHS, RHS, LHSEltType, RHSType, IsCompAssign); QualType ComplexType = S.Context.getComplexType(ScalarType); RHS = S.ImpCastExprToType(RHS.get(), ComplexType, CK_IntegralRealToComplex); return ComplexType; } assert(RHSComplexInt); QualType RHSEltType = RHSComplexInt->getElementType(); QualType ScalarType = handleIntegerConversion (S, LHS, RHS, LHSType, RHSEltType, IsCompAssign); QualType ComplexType = S.Context.getComplexType(ScalarType); if (!IsCompAssign) LHS = S.ImpCastExprToType(LHS.get(), ComplexType, CK_IntegralRealToComplex); return ComplexType; } /// Return the rank of a given fixed point or integer type. The value itself /// doesn't matter, but the values must be increasing with proper increasing /// rank as described in N1169 4.1.1. static unsigned GetFixedPointRank(QualType Ty) { const auto *BTy = Ty->getAs(); assert(BTy && "Expected a builtin type."); switch (BTy->getKind()) { case BuiltinType::ShortFract: case BuiltinType::UShortFract: case BuiltinType::SatShortFract: case BuiltinType::SatUShortFract: return 1; case BuiltinType::Fract: case BuiltinType::UFract: case BuiltinType::SatFract: case BuiltinType::SatUFract: return 2; case BuiltinType::LongFract: case BuiltinType::ULongFract: case BuiltinType::SatLongFract: case BuiltinType::SatULongFract: return 3; case BuiltinType::ShortAccum: case BuiltinType::UShortAccum: case BuiltinType::SatShortAccum: case BuiltinType::SatUShortAccum: return 4; case BuiltinType::Accum: case BuiltinType::UAccum: case BuiltinType::SatAccum: case BuiltinType::SatUAccum: return 5; case BuiltinType::LongAccum: case BuiltinType::ULongAccum: case BuiltinType::SatLongAccum: case BuiltinType::SatULongAccum: return 6; default: if (BTy->isInteger()) return 0; llvm_unreachable("Unexpected fixed point or integer type"); } } /// handleFixedPointConversion - Fixed point operations between fixed /// point types and integers or other fixed point types do not fall under /// usual arithmetic conversion since these conversions could result in loss /// of precsision (N1169 4.1.4). These operations should be calculated with /// the full precision of their result type (N1169 4.1.6.2.1). static QualType handleFixedPointConversion(Sema &S, QualType LHSTy, QualType RHSTy) { assert((LHSTy->isFixedPointType() || RHSTy->isFixedPointType()) && "Expected at least one of the operands to be a fixed point type"); assert((LHSTy->isFixedPointOrIntegerType() || RHSTy->isFixedPointOrIntegerType()) && "Special fixed point arithmetic operation conversions are only " "applied to ints or other fixed point types"); // If one operand has signed fixed-point type and the other operand has // unsigned fixed-point type, then the unsigned fixed-point operand is // converted to its corresponding signed fixed-point type and the resulting // type is the type of the converted operand. if (RHSTy->isSignedFixedPointType() && LHSTy->isUnsignedFixedPointType()) LHSTy = S.Context.getCorrespondingSignedFixedPointType(LHSTy); else if (RHSTy->isUnsignedFixedPointType() && LHSTy->isSignedFixedPointType()) RHSTy = S.Context.getCorrespondingSignedFixedPointType(RHSTy); // The result type is the type with the highest rank, whereby a fixed-point // conversion rank is always greater than an integer conversion rank; if the // type of either of the operands is a saturating fixedpoint type, the result // type shall be the saturating fixed-point type corresponding to the type // with the highest rank; the resulting value is converted (taking into // account rounding and overflow) to the precision of the resulting type. // Same ranks between signed and unsigned types are resolved earlier, so both // types are either signed or both unsigned at this point. unsigned LHSTyRank = GetFixedPointRank(LHSTy); unsigned RHSTyRank = GetFixedPointRank(RHSTy); QualType ResultTy = LHSTyRank > RHSTyRank ? LHSTy : RHSTy; if (LHSTy->isSaturatedFixedPointType() || RHSTy->isSaturatedFixedPointType()) ResultTy = S.Context.getCorrespondingSaturatedType(ResultTy); return ResultTy; } /// Check that the usual arithmetic conversions can be performed on this pair of /// expressions that might be of enumeration type. static void checkEnumArithmeticConversions(Sema &S, Expr *LHS, Expr *RHS, SourceLocation Loc, Sema::ArithConvKind ACK) { // C++2a [expr.arith.conv]p1: // If one operand is of enumeration type and the other operand is of a // different enumeration type or a floating-point type, this behavior is // deprecated ([depr.arith.conv.enum]). // // Warn on this in all language modes. Produce a deprecation warning in C++20. // Eventually we will presumably reject these cases (in C++23 onwards?). QualType L = LHS->getType(), R = RHS->getType(); bool LEnum = L->isUnscopedEnumerationType(), REnum = R->isUnscopedEnumerationType(); bool IsCompAssign = ACK == Sema::ACK_CompAssign; if ((!IsCompAssign && LEnum && R->isFloatingType()) || (REnum && L->isFloatingType())) { S.Diag(Loc, S.getLangOpts().CPlusPlus20 ? diag::warn_arith_conv_enum_float_cxx20 : diag::warn_arith_conv_enum_float) << LHS->getSourceRange() << RHS->getSourceRange() << (int)ACK << LEnum << L << R; } else if (!IsCompAssign && LEnum && REnum && !S.Context.hasSameUnqualifiedType(L, R)) { unsigned DiagID; if (!L->castAs()->getDecl()->hasNameForLinkage() || !R->castAs()->getDecl()->hasNameForLinkage()) { // If either enumeration type is unnamed, it's less likely that the // user cares about this, but this situation is still deprecated in // C++2a. Use a different warning group. DiagID = S.getLangOpts().CPlusPlus20 ? diag::warn_arith_conv_mixed_anon_enum_types_cxx20 : diag::warn_arith_conv_mixed_anon_enum_types; } else if (ACK == Sema::ACK_Conditional) { // Conditional expressions are separated out because they have // historically had a different warning flag. DiagID = S.getLangOpts().CPlusPlus20 ? diag::warn_conditional_mixed_enum_types_cxx20 : diag::warn_conditional_mixed_enum_types; } else if (ACK == Sema::ACK_Comparison) { // Comparison expressions are separated out because they have // historically had a different warning flag. DiagID = S.getLangOpts().CPlusPlus20 ? diag::warn_comparison_mixed_enum_types_cxx20 : diag::warn_comparison_mixed_enum_types; } else { DiagID = S.getLangOpts().CPlusPlus20 ? diag::warn_arith_conv_mixed_enum_types_cxx20 : diag::warn_arith_conv_mixed_enum_types; } S.Diag(Loc, DiagID) << LHS->getSourceRange() << RHS->getSourceRange() << (int)ACK << L << R; } } /// UsualArithmeticConversions - Performs various conversions that are common to /// binary operators (C99 6.3.1.8). If both operands aren't arithmetic, this /// routine returns the first non-arithmetic type found. The client is /// responsible for emitting appropriate error diagnostics. QualType Sema::UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, ArithConvKind ACK) { checkEnumArithmeticConversions(*this, LHS.get(), RHS.get(), Loc, ACK); if (ACK != ACK_CompAssign) { LHS = UsualUnaryConversions(LHS.get()); if (LHS.isInvalid()) return QualType(); } RHS = UsualUnaryConversions(RHS.get()); if (RHS.isInvalid()) return QualType(); // For conversion purposes, we ignore any qualifiers. // For example, "const float" and "float" are equivalent. QualType LHSType = Context.getCanonicalType(LHS.get()->getType()).getUnqualifiedType(); QualType RHSType = Context.getCanonicalType(RHS.get()->getType()).getUnqualifiedType(); // For conversion purposes, we ignore any atomic qualifier on the LHS. if (const AtomicType *AtomicLHS = LHSType->getAs()) LHSType = AtomicLHS->getValueType(); // If both types are identical, no conversion is needed. if (LHSType == RHSType) return LHSType; // If either side is a non-arithmetic type (e.g. a pointer), we are done. // The caller can deal with this (e.g. pointer + int). if (!LHSType->isArithmeticType() || !RHSType->isArithmeticType()) return QualType(); // Apply unary and bitfield promotions to the LHS's type. QualType LHSUnpromotedType = LHSType; if (LHSType->isPromotableIntegerType()) LHSType = Context.getPromotedIntegerType(LHSType); QualType LHSBitfieldPromoteTy = Context.isPromotableBitField(LHS.get()); if (!LHSBitfieldPromoteTy.isNull()) LHSType = LHSBitfieldPromoteTy; if (LHSType != LHSUnpromotedType && ACK != ACK_CompAssign) LHS = ImpCastExprToType(LHS.get(), LHSType, CK_IntegralCast); // If both types are identical, no conversion is needed. if (LHSType == RHSType) return LHSType; // ExtInt types aren't subject to conversions between them or normal integers, // so this fails. if(LHSType->isExtIntType() || RHSType->isExtIntType()) return QualType(); // At this point, we have two different arithmetic types. // Diagnose attempts to convert between __float128 and long double where // such conversions currently can't be handled. if (unsupportedTypeConversion(*this, LHSType, RHSType)) return QualType(); // Handle complex types first (C99 6.3.1.8p1). if (LHSType->isComplexType() || RHSType->isComplexType()) return handleComplexFloatConversion(*this, LHS, RHS, LHSType, RHSType, ACK == ACK_CompAssign); // Now handle "real" floating types (i.e. float, double, long double). if (LHSType->isRealFloatingType() || RHSType->isRealFloatingType()) return handleFloatConversion(*this, LHS, RHS, LHSType, RHSType, ACK == ACK_CompAssign); // Handle GCC complex int extension. if (LHSType->isComplexIntegerType() || RHSType->isComplexIntegerType()) return handleComplexIntConversion(*this, LHS, RHS, LHSType, RHSType, ACK == ACK_CompAssign); if (LHSType->isFixedPointType() || RHSType->isFixedPointType()) return handleFixedPointConversion(*this, LHSType, RHSType); // Finally, we have two differing integer types. return handleIntegerConversion (*this, LHS, RHS, LHSType, RHSType, ACK == ACK_CompAssign); } //===----------------------------------------------------------------------===// // Semantic Analysis for various Expression Types //===----------------------------------------------------------------------===// ExprResult Sema::ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef ArgTypes, ArrayRef ArgExprs) { unsigned NumAssocs = ArgTypes.size(); assert(NumAssocs == ArgExprs.size()); TypeSourceInfo **Types = new TypeSourceInfo*[NumAssocs]; for (unsigned i = 0; i < NumAssocs; ++i) { if (ArgTypes[i]) (void) GetTypeFromParser(ArgTypes[i], &Types[i]); else Types[i] = nullptr; } ExprResult ER = CreateGenericSelectionExpr(KeyLoc, DefaultLoc, RParenLoc, ControllingExpr, llvm::makeArrayRef(Types, NumAssocs), ArgExprs); delete [] Types; return ER; } ExprResult Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef Types, ArrayRef Exprs) { unsigned NumAssocs = Types.size(); assert(NumAssocs == Exprs.size()); // Decay and strip qualifiers for the controlling expression type, and handle // placeholder type replacement. See committee discussion from WG14 DR423. { EnterExpressionEvaluationContext Unevaluated( *this, Sema::ExpressionEvaluationContext::Unevaluated); ExprResult R = DefaultFunctionArrayLvalueConversion(ControllingExpr); if (R.isInvalid()) return ExprError(); ControllingExpr = R.get(); } // The controlling expression is an unevaluated operand, so side effects are // likely unintended. if (!inTemplateInstantiation() && ControllingExpr->HasSideEffects(Context, false)) Diag(ControllingExpr->getExprLoc(), diag::warn_side_effects_unevaluated_context); bool TypeErrorFound = false, IsResultDependent = ControllingExpr->isTypeDependent(), ContainsUnexpandedParameterPack = ControllingExpr->containsUnexpandedParameterPack(); for (unsigned i = 0; i < NumAssocs; ++i) { if (Exprs[i]->containsUnexpandedParameterPack()) ContainsUnexpandedParameterPack = true; if (Types[i]) { if (Types[i]->getType()->containsUnexpandedParameterPack()) ContainsUnexpandedParameterPack = true; if (Types[i]->getType()->isDependentType()) { IsResultDependent = true; } else { // C11 6.5.1.1p2 "The type name in a generic association shall specify a // complete object type other than a variably modified type." unsigned D = 0; if (Types[i]->getType()->isIncompleteType()) D = diag::err_assoc_type_incomplete; else if (!Types[i]->getType()->isObjectType()) D = diag::err_assoc_type_nonobject; else if (Types[i]->getType()->isVariablyModifiedType()) D = diag::err_assoc_type_variably_modified; if (D != 0) { Diag(Types[i]->getTypeLoc().getBeginLoc(), D) << Types[i]->getTypeLoc().getSourceRange() << Types[i]->getType(); TypeErrorFound = true; } // C11 6.5.1.1p2 "No two generic associations in the same generic // selection shall specify compatible types." for (unsigned j = i+1; j < NumAssocs; ++j) if (Types[j] && !Types[j]->getType()->isDependentType() && Context.typesAreCompatible(Types[i]->getType(), Types[j]->getType())) { Diag(Types[j]->getTypeLoc().getBeginLoc(), diag::err_assoc_compatible_types) << Types[j]->getTypeLoc().getSourceRange() << Types[j]->getType() << Types[i]->getType(); Diag(Types[i]->getTypeLoc().getBeginLoc(), diag::note_compat_assoc) << Types[i]->getTypeLoc().getSourceRange() << Types[i]->getType(); TypeErrorFound = true; } } } } if (TypeErrorFound) return ExprError(); // If we determined that the generic selection is result-dependent, don't // try to compute the result expression. if (IsResultDependent) return GenericSelectionExpr::Create(Context, KeyLoc, ControllingExpr, Types, Exprs, DefaultLoc, RParenLoc, ContainsUnexpandedParameterPack); SmallVector CompatIndices; unsigned DefaultIndex = -1U; for (unsigned i = 0; i < NumAssocs; ++i) { if (!Types[i]) DefaultIndex = i; else if (Context.typesAreCompatible(ControllingExpr->getType(), Types[i]->getType())) CompatIndices.push_back(i); } // C11 6.5.1.1p2 "The controlling expression of a generic selection shall have // type compatible with at most one of the types named in its generic // association list." if (CompatIndices.size() > 1) { // We strip parens here because the controlling expression is typically // parenthesized in macro definitions. ControllingExpr = ControllingExpr->IgnoreParens(); Diag(ControllingExpr->getBeginLoc(), diag::err_generic_sel_multi_match) << ControllingExpr->getSourceRange() << ControllingExpr->getType() << (unsigned)CompatIndices.size(); for (unsigned I : CompatIndices) { Diag(Types[I]->getTypeLoc().getBeginLoc(), diag::note_compat_assoc) << Types[I]->getTypeLoc().getSourceRange() << Types[I]->getType(); } return ExprError(); } // C11 6.5.1.1p2 "If a generic selection has no default generic association, // its controlling expression shall have type compatible with exactly one of // the types named in its generic association list." if (DefaultIndex == -1U && CompatIndices.size() == 0) { // We strip parens here because the controlling expression is typically // parenthesized in macro definitions. ControllingExpr = ControllingExpr->IgnoreParens(); Diag(ControllingExpr->getBeginLoc(), diag::err_generic_sel_no_match) << ControllingExpr->getSourceRange() << ControllingExpr->getType(); return ExprError(); } // C11 6.5.1.1p3 "If a generic selection has a generic association with a // type name that is compatible with the type of the controlling expression, // then the result expression of the generic selection is the expression // in that generic association. Otherwise, the result expression of the // generic selection is the expression in the default generic association." unsigned ResultIndex = CompatIndices.size() ? CompatIndices[0] : DefaultIndex; return GenericSelectionExpr::Create( Context, KeyLoc, ControllingExpr, Types, Exprs, DefaultLoc, RParenLoc, ContainsUnexpandedParameterPack, ResultIndex); } /// getUDSuffixLoc - Create a SourceLocation for a ud-suffix, given the /// location of the token and the offset of the ud-suffix within it. static SourceLocation getUDSuffixLoc(Sema &S, SourceLocation TokLoc, unsigned Offset) { return Lexer::AdvanceToTokenCharacter(TokLoc, Offset, S.getSourceManager(), S.getLangOpts()); } /// BuildCookedLiteralOperatorCall - A user-defined literal was found. Look up /// the corresponding cooked (non-raw) literal operator, and build a call to it. static ExprResult BuildCookedLiteralOperatorCall(Sema &S, Scope *Scope, IdentifierInfo *UDSuffix, SourceLocation UDSuffixLoc, ArrayRef Args, SourceLocation LitEndLoc) { assert(Args.size() <= 2 && "too many arguments for literal operator"); QualType ArgTy[2]; for (unsigned ArgIdx = 0; ArgIdx != Args.size(); ++ArgIdx) { ArgTy[ArgIdx] = Args[ArgIdx]->getType(); if (ArgTy[ArgIdx]->isArrayType()) ArgTy[ArgIdx] = S.Context.getArrayDecayedType(ArgTy[ArgIdx]); } DeclarationName OpName = S.Context.DeclarationNames.getCXXLiteralOperatorName(UDSuffix); DeclarationNameInfo OpNameInfo(OpName, UDSuffixLoc); OpNameInfo.setCXXLiteralOperatorNameLoc(UDSuffixLoc); LookupResult R(S, OpName, UDSuffixLoc, Sema::LookupOrdinaryName); if (S.LookupLiteralOperator(Scope, R, llvm::makeArrayRef(ArgTy, Args.size()), /*AllowRaw*/ false, /*AllowTemplate*/ false, /*AllowStringTemplate*/ false, /*DiagnoseMissing*/ true) == Sema::LOLR_Error) return ExprError(); return S.BuildLiteralOperatorCall(R, OpNameInfo, Args, LitEndLoc); } /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). The result string has to handle string /// concatenation ([C99 5.1.1.2, translation phase #6]), so it may come from /// multiple tokens. However, the common case is that StringToks points to one /// string. /// ExprResult Sema::ActOnStringLiteral(ArrayRef StringToks, Scope *UDLScope) { assert(!StringToks.empty() && "Must have at least one string!"); StringLiteralParser Literal(StringToks, PP); if (Literal.hadError) return ExprError(); SmallVector StringTokLocs; for (const Token &Tok : StringToks) StringTokLocs.push_back(Tok.getLocation()); QualType CharTy = Context.CharTy; StringLiteral::StringKind Kind = StringLiteral::Ascii; if (Literal.isWide()) { CharTy = Context.getWideCharType(); Kind = StringLiteral::Wide; } else if (Literal.isUTF8()) { if (getLangOpts().Char8) CharTy = Context.Char8Ty; Kind = StringLiteral::UTF8; } else if (Literal.isUTF16()) { CharTy = Context.Char16Ty; Kind = StringLiteral::UTF16; } else if (Literal.isUTF32()) { CharTy = Context.Char32Ty; Kind = StringLiteral::UTF32; } else if (Literal.isPascal()) { CharTy = Context.UnsignedCharTy; } // Warn on initializing an array of char from a u8 string literal; this // becomes ill-formed in C++2a. if (getLangOpts().CPlusPlus && !getLangOpts().CPlusPlus20 && !getLangOpts().Char8 && Kind == StringLiteral::UTF8) { Diag(StringTokLocs.front(), diag::warn_cxx20_compat_utf8_string); // Create removals for all 'u8' prefixes in the string literal(s). This // ensures C++2a compatibility (but may change the program behavior when // built by non-Clang compilers for which the execution character set is // not always UTF-8). auto RemovalDiag = PDiag(diag::note_cxx20_compat_utf8_string_remove_u8); SourceLocation RemovalDiagLoc; for (const Token &Tok : StringToks) { if (Tok.getKind() == tok::utf8_string_literal) { if (RemovalDiagLoc.isInvalid()) RemovalDiagLoc = Tok.getLocation(); RemovalDiag << FixItHint::CreateRemoval(CharSourceRange::getCharRange( Tok.getLocation(), Lexer::AdvanceToTokenCharacter(Tok.getLocation(), 2, getSourceManager(), getLangOpts()))); } } Diag(RemovalDiagLoc, RemovalDiag); } QualType StrTy = Context.getStringLiteralArrayType(CharTy, Literal.GetNumStringChars()); // Pass &StringTokLocs[0], StringTokLocs.size() to factory! StringLiteral *Lit = StringLiteral::Create(Context, Literal.GetString(), Kind, Literal.Pascal, StrTy, &StringTokLocs[0], StringTokLocs.size()); if (Literal.getUDSuffix().empty()) return Lit; // We're building a user-defined literal. IdentifierInfo *UDSuffix = &Context.Idents.get(Literal.getUDSuffix()); SourceLocation UDSuffixLoc = getUDSuffixLoc(*this, StringTokLocs[Literal.getUDSuffixToken()], Literal.getUDSuffixOffset()); // Make sure we're allowed user-defined literals here. if (!UDLScope) return ExprError(Diag(UDSuffixLoc, diag::err_invalid_string_udl)); // C++11 [lex.ext]p5: The literal L is treated as a call of the form // operator "" X (str, len) QualType SizeType = Context.getSizeType(); DeclarationName OpName = Context.DeclarationNames.getCXXLiteralOperatorName(UDSuffix); DeclarationNameInfo OpNameInfo(OpName, UDSuffixLoc); OpNameInfo.setCXXLiteralOperatorNameLoc(UDSuffixLoc); QualType ArgTy[] = { Context.getArrayDecayedType(StrTy), SizeType }; LookupResult R(*this, OpName, UDSuffixLoc, LookupOrdinaryName); switch (LookupLiteralOperator(UDLScope, R, ArgTy, /*AllowRaw*/ false, /*AllowTemplate*/ false, /*AllowStringTemplate*/ true, /*DiagnoseMissing*/ true)) { case LOLR_Cooked: { llvm::APInt Len(Context.getIntWidth(SizeType), Literal.GetNumStringChars()); IntegerLiteral *LenArg = IntegerLiteral::Create(Context, Len, SizeType, StringTokLocs[0]); Expr *Args[] = { Lit, LenArg }; return BuildLiteralOperatorCall(R, OpNameInfo, Args, StringTokLocs.back()); } case LOLR_StringTemplate: { TemplateArgumentListInfo ExplicitArgs; unsigned CharBits = Context.getIntWidth(CharTy); bool CharIsUnsigned = CharTy->isUnsignedIntegerType(); llvm::APSInt Value(CharBits, CharIsUnsigned); TemplateArgument TypeArg(CharTy); TemplateArgumentLocInfo TypeArgInfo(Context.getTrivialTypeSourceInfo(CharTy)); ExplicitArgs.addArgument(TemplateArgumentLoc(TypeArg, TypeArgInfo)); for (unsigned I = 0, N = Lit->getLength(); I != N; ++I) { Value = Lit->getCodeUnit(I); TemplateArgument Arg(Context, Value, CharTy); TemplateArgumentLocInfo ArgInfo; ExplicitArgs.addArgument(TemplateArgumentLoc(Arg, ArgInfo)); } return BuildLiteralOperatorCall(R, OpNameInfo, None, StringTokLocs.back(), &ExplicitArgs); } case LOLR_Raw: case LOLR_Template: case LOLR_ErrorNoDiagnostic: llvm_unreachable("unexpected literal operator lookup result"); case LOLR_Error: return ExprError(); } llvm_unreachable("unexpected literal operator lookup result"); } DeclRefExpr * Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS) { DeclarationNameInfo NameInfo(D->getDeclName(), Loc); return BuildDeclRefExpr(D, Ty, VK, NameInfo, SS); } DeclRefExpr * Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS, NamedDecl *FoundD, SourceLocation TemplateKWLoc, const TemplateArgumentListInfo *TemplateArgs) { NestedNameSpecifierLoc NNS = SS ? SS->getWithLocInContext(Context) : NestedNameSpecifierLoc(); return BuildDeclRefExpr(D, Ty, VK, NameInfo, NNS, FoundD, TemplateKWLoc, TemplateArgs); } NonOdrUseReason Sema::getNonOdrUseReasonInCurrentContext(ValueDecl *D) { // A declaration named in an unevaluated operand never constitutes an odr-use. if (isUnevaluatedContext()) return NOUR_Unevaluated; // C++2a [basic.def.odr]p4: // A variable x whose name appears as a potentially-evaluated expression e // is odr-used by e unless [...] x is a reference that is usable in // constant expressions. if (VarDecl *VD = dyn_cast(D)) { if (VD->getType()->isReferenceType() && !(getLangOpts().OpenMP && isOpenMPCapturedDecl(D)) && VD->isUsableInConstantExpressions(Context)) return NOUR_Constant; } // All remaining non-variable cases constitute an odr-use. For variables, we // need to wait and see how the expression is used. return NOUR_None; } /// BuildDeclRefExpr - Build an expression that references a /// declaration that does not require a closure capture. DeclRefExpr * Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD, SourceLocation TemplateKWLoc, const TemplateArgumentListInfo *TemplateArgs) { bool RefersToCapturedVariable = isa(D) && NeedToCaptureVariable(cast(D), NameInfo.getLoc()); DeclRefExpr *E = DeclRefExpr::Create( Context, NNS, TemplateKWLoc, D, RefersToCapturedVariable, NameInfo, Ty, VK, FoundD, TemplateArgs, getNonOdrUseReasonInCurrentContext(D)); MarkDeclRefReferenced(E); // C++ [except.spec]p17: // An exception-specification is considered to be needed when: // - in an expression, the function is the unique lookup result or // the selected member of a set of overloaded functions. // // We delay doing this until after we've built the function reference and // marked it as used so that: // a) if the function is defaulted, we get errors from defining it before / // instead of errors from computing its exception specification, and // b) if the function is a defaulted comparison, we can use the body we // build when defining it as input to the exception specification // computation rather than computing a new body. if (auto *FPT = Ty->getAs()) { if (isUnresolvedExceptionSpec(FPT->getExceptionSpecType())) { if (auto *NewFPT = ResolveExceptionSpec(NameInfo.getLoc(), FPT)) E->setType(Context.getQualifiedType(NewFPT, Ty.getQualifiers())); } } if (getLangOpts().ObjCWeak && isa(D) && Ty.getObjCLifetime() == Qualifiers::OCL_Weak && !isUnevaluatedContext() && !Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, E->getBeginLoc())) getCurFunction()->recordUseOfWeak(E); FieldDecl *FD = dyn_cast(D); if (IndirectFieldDecl *IFD = dyn_cast(D)) FD = IFD->getAnonField(); if (FD) { UnusedPrivateFields.remove(FD); // Just in case we're building an illegal pointer-to-member. if (FD->isBitField()) E->setObjectKind(OK_BitField); } // C++ [expr.prim]/8: The expression [...] is a bit-field if the identifier // designates a bit-field. if (auto *BD = dyn_cast(D)) if (auto *BE = BD->getBinding()) E->setObjectKind(BE->getObjectKind()); return E; } /// Decomposes the given name into a DeclarationNameInfo, its location, and /// possibly a list of template arguments. /// /// If this produces template arguments, it is permitted to call /// DecomposeTemplateName. /// /// This actually loses a lot of source location information for /// non-standard name kinds; we should consider preserving that in /// some way. void Sema::DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs) { if (Id.getKind() == UnqualifiedIdKind::IK_TemplateId) { Buffer.setLAngleLoc(Id.TemplateId->LAngleLoc); Buffer.setRAngleLoc(Id.TemplateId->RAngleLoc); ASTTemplateArgsPtr TemplateArgsPtr(Id.TemplateId->getTemplateArgs(), Id.TemplateId->NumArgs); translateTemplateArguments(TemplateArgsPtr, Buffer); TemplateName TName = Id.TemplateId->Template.get(); SourceLocation TNameLoc = Id.TemplateId->TemplateNameLoc; NameInfo = Context.getNameForTemplate(TName, TNameLoc); TemplateArgs = &Buffer; } else { NameInfo = GetNameFromUnqualifiedId(Id); TemplateArgs = nullptr; } } static void emitEmptyLookupTypoDiagnostic( const TypoCorrection &TC, Sema &SemaRef, const CXXScopeSpec &SS, DeclarationName Typo, SourceLocation TypoLoc, ArrayRef Args, unsigned DiagnosticID, unsigned DiagnosticSuggestID) { DeclContext *Ctx = SS.isEmpty() ? nullptr : SemaRef.computeDeclContext(SS, false); if (!TC) { // Emit a special diagnostic for failed member lookups. // FIXME: computing the declaration context might fail here (?) if (Ctx) SemaRef.Diag(TypoLoc, diag::err_no_member) << Typo << Ctx << SS.getRange(); else SemaRef.Diag(TypoLoc, DiagnosticID) << Typo; return; } std::string CorrectedStr = TC.getAsString(SemaRef.getLangOpts()); bool DroppedSpecifier = TC.WillReplaceSpecifier() && Typo.getAsString() == CorrectedStr; unsigned NoteID = TC.getCorrectionDeclAs() ? diag::note_implicit_param_decl : diag::note_previous_decl; if (!Ctx) SemaRef.diagnoseTypo(TC, SemaRef.PDiag(DiagnosticSuggestID) << Typo, SemaRef.PDiag(NoteID)); else SemaRef.diagnoseTypo(TC, SemaRef.PDiag(diag::err_no_member_suggest) << Typo << Ctx << DroppedSpecifier << SS.getRange(), SemaRef.PDiag(NoteID)); } /// Diagnose an empty lookup. /// /// \return false if new lookup candidates were found bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef Args, TypoExpr **Out) { DeclarationName Name = R.getLookupName(); unsigned diagnostic = diag::err_undeclared_var_use; unsigned diagnostic_suggest = diag::err_undeclared_var_use_suggest; if (Name.getNameKind() == DeclarationName::CXXOperatorName || Name.getNameKind() == DeclarationName::CXXLiteralOperatorName || Name.getNameKind() == DeclarationName::CXXConversionFunctionName) { diagnostic = diag::err_undeclared_use; diagnostic_suggest = diag::err_undeclared_use_suggest; } // If the original lookup was an unqualified lookup, fake an // unqualified lookup. This is useful when (for example) the // original lookup would not have found something because it was a // dependent name. DeclContext *DC = SS.isEmpty() ? CurContext : nullptr; while (DC) { if (isa(DC)) { LookupQualifiedName(R, DC); if (!R.empty()) { // Don't give errors about ambiguities in this lookup. R.suppressDiagnostics(); // During a default argument instantiation the CurContext points // to a CXXMethodDecl; but we can't apply a this-> fixit inside a // function parameter list, hence add an explicit check. bool isDefaultArgument = !CodeSynthesisContexts.empty() && CodeSynthesisContexts.back().Kind == CodeSynthesisContext::DefaultFunctionArgumentInstantiation; CXXMethodDecl *CurMethod = dyn_cast(CurContext); bool isInstance = CurMethod && CurMethod->isInstance() && DC == CurMethod->getParent() && !isDefaultArgument; // Give a code modification hint to insert 'this->'. // TODO: fixit for inserting 'Base::' in the other cases. // Actually quite difficult! if (getLangOpts().MSVCCompat) diagnostic = diag::ext_found_via_dependent_bases_lookup; if (isInstance) { Diag(R.getNameLoc(), diagnostic) << Name << FixItHint::CreateInsertion(R.getNameLoc(), "this->"); CheckCXXThisCapture(R.getNameLoc()); } else { Diag(R.getNameLoc(), diagnostic) << Name; } // Do we really want to note all of these? for (NamedDecl *D : R) Diag(D->getLocation(), diag::note_dependent_var_use); // Return true if we are inside a default argument instantiation // and the found name refers to an instance member function, otherwise // the function calling DiagnoseEmptyLookup will try to create an // implicit member call and this is wrong for default argument. if (isDefaultArgument && ((*R.begin())->isCXXInstanceMember())) { Diag(R.getNameLoc(), diag::err_member_call_without_object); return true; } // Tell the callee to try to recover. return false; } R.clear(); } DC = DC->getLookupParent(); } // We didn't find anything, so try to correct for a typo. TypoCorrection Corrected; if (S && Out) { SourceLocation TypoLoc = R.getNameLoc(); assert(!ExplicitTemplateArgs && "Diagnosing an empty lookup with explicit template args!"); *Out = CorrectTypoDelayed( R.getLookupNameInfo(), R.getLookupKind(), S, &SS, CCC, [=](const TypoCorrection &TC) { emitEmptyLookupTypoDiagnostic(TC, *this, SS, Name, TypoLoc, Args, diagnostic, diagnostic_suggest); }, nullptr, CTK_ErrorRecovery); if (*Out) return true; } else if (S && (Corrected = CorrectTypo(R.getLookupNameInfo(), R.getLookupKind(), S, &SS, CCC, CTK_ErrorRecovery))) { std::string CorrectedStr(Corrected.getAsString(getLangOpts())); bool DroppedSpecifier = Corrected.WillReplaceSpecifier() && Name.getAsString() == CorrectedStr; R.setLookupName(Corrected.getCorrection()); bool AcceptableWithRecovery = false; bool AcceptableWithoutRecovery = false; NamedDecl *ND = Corrected.getFoundDecl(); if (ND) { if (Corrected.isOverloaded()) { OverloadCandidateSet OCS(R.getNameLoc(), OverloadCandidateSet::CSK_Normal); OverloadCandidateSet::iterator Best; for (NamedDecl *CD : Corrected) { if (FunctionTemplateDecl *FTD = dyn_cast(CD)) AddTemplateOverloadCandidate( FTD, DeclAccessPair::make(FTD, AS_none), ExplicitTemplateArgs, Args, OCS); else if (FunctionDecl *FD = dyn_cast(CD)) if (!ExplicitTemplateArgs || ExplicitTemplateArgs->size() == 0) AddOverloadCandidate(FD, DeclAccessPair::make(FD, AS_none), Args, OCS); } switch (OCS.BestViableFunction(*this, R.getNameLoc(), Best)) { case OR_Success: ND = Best->FoundDecl; Corrected.setCorrectionDecl(ND); break; default: // FIXME: Arbitrarily pick the first declaration for the note. Corrected.setCorrectionDecl(ND); break; } } R.addDecl(ND); if (getLangOpts().CPlusPlus && ND->isCXXClassMember()) { CXXRecordDecl *Record = nullptr; if (Corrected.getCorrectionSpecifier()) { const Type *Ty = Corrected.getCorrectionSpecifier()->getAsType(); Record = Ty->getAsCXXRecordDecl(); } if (!Record) Record = cast( ND->getDeclContext()->getRedeclContext()); R.setNamingClass(Record); } auto *UnderlyingND = ND->getUnderlyingDecl(); AcceptableWithRecovery = isa(UnderlyingND) || isa(UnderlyingND); // FIXME: If we ended up with a typo for a type name or // Objective-C class name, we're in trouble because the parser // is in the wrong place to recover. Suggest the typo // correction, but don't make it a fix-it since we're not going // to recover well anyway. AcceptableWithoutRecovery = isa(UnderlyingND) || getAsTypeTemplateDecl(UnderlyingND) || isa(UnderlyingND); } else { // FIXME: We found a keyword. Suggest it, but don't provide a fix-it // because we aren't able to recover. AcceptableWithoutRecovery = true; } if (AcceptableWithRecovery || AcceptableWithoutRecovery) { unsigned NoteID = Corrected.getCorrectionDeclAs() ? diag::note_implicit_param_decl : diag::note_previous_decl; if (SS.isEmpty()) diagnoseTypo(Corrected, PDiag(diagnostic_suggest) << Name, PDiag(NoteID), AcceptableWithRecovery); else diagnoseTypo(Corrected, PDiag(diag::err_no_member_suggest) << Name << computeDeclContext(SS, false) << DroppedSpecifier << SS.getRange(), PDiag(NoteID), AcceptableWithRecovery); // Tell the callee whether to try to recover. return !AcceptableWithRecovery; } } R.clear(); // Emit a special diagnostic for failed member lookups. // FIXME: computing the declaration context might fail here (?) if (!SS.isEmpty()) { Diag(R.getNameLoc(), diag::err_no_member) << Name << computeDeclContext(SS, false) << SS.getRange(); return true; } // Give up, we can't recover. Diag(R.getNameLoc(), diagnostic) << Name; return true; } /// In Microsoft mode, if we are inside a template class whose parent class has /// dependent base classes, and we can't resolve an unqualified identifier, then /// assume the identifier is a member of a dependent base class. We can only /// recover successfully in static methods, instance methods, and other contexts /// where 'this' is available. This doesn't precisely match MSVC's /// instantiation model, but it's close enough. static Expr * recoverFromMSUnqualifiedLookup(Sema &S, ASTContext &Context, DeclarationNameInfo &NameInfo, SourceLocation TemplateKWLoc, const TemplateArgumentListInfo *TemplateArgs) { // Only try to recover from lookup into dependent bases in static methods or // contexts where 'this' is available. QualType ThisType = S.getCurrentThisType(); const CXXRecordDecl *RD = nullptr; if (!ThisType.isNull()) RD = ThisType->getPointeeType()->getAsCXXRecordDecl(); else if (auto *MD = dyn_cast(S.CurContext)) RD = MD->getParent(); if (!RD || !RD->hasAnyDependentBases()) return nullptr; // Diagnose this as unqualified lookup into a dependent base class. If 'this' // is available, suggest inserting 'this->' as a fixit. SourceLocation Loc = NameInfo.getLoc(); auto DB = S.Diag(Loc, diag::ext_undeclared_unqual_id_with_dependent_base); DB << NameInfo.getName() << RD; if (!ThisType.isNull()) { DB << FixItHint::CreateInsertion(Loc, "this->"); return CXXDependentScopeMemberExpr::Create( Context, /*This=*/nullptr, ThisType, /*IsArrow=*/true, /*Op=*/SourceLocation(), NestedNameSpecifierLoc(), TemplateKWLoc, /*FirstQualifierFoundInScope=*/nullptr, NameInfo, TemplateArgs); } // Synthesize a fake NNS that points to the derived class. This will // perform name lookup during template instantiation. CXXScopeSpec SS; auto *NNS = NestedNameSpecifier::Create(Context, nullptr, true, RD->getTypeForDecl()); SS.MakeTrivial(Context, NNS, SourceRange(Loc, Loc)); return DependentScopeDeclRefExpr::Create( Context, SS.getWithLocInContext(Context), TemplateKWLoc, NameInfo, TemplateArgs); } ExprResult Sema::ActOnIdExpression(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC, bool IsInlineAsmIdentifier, Token *KeywordReplacement) { assert(!(IsAddressOfOperand && HasTrailingLParen) && "cannot be direct & operand and have a trailing lparen"); if (SS.isInvalid()) return ExprError(); TemplateArgumentListInfo TemplateArgsBuffer; // Decompose the UnqualifiedId into the following data. DeclarationNameInfo NameInfo; const TemplateArgumentListInfo *TemplateArgs; DecomposeUnqualifiedId(Id, TemplateArgsBuffer, NameInfo, TemplateArgs); DeclarationName Name = NameInfo.getName(); IdentifierInfo *II = Name.getAsIdentifierInfo(); SourceLocation NameLoc = NameInfo.getLoc(); if (II && II->isEditorPlaceholder()) { // FIXME: When typed placeholders are supported we can create a typed // placeholder expression node. return ExprError(); } // C++ [temp.dep.expr]p3: // An id-expression is type-dependent if it contains: // -- an identifier that was declared with a dependent type, // (note: handled after lookup) // -- a template-id that is dependent, // (note: handled in BuildTemplateIdExpr) // -- a conversion-function-id that specifies a dependent type, // -- a nested-name-specifier that contains a class-name that // names a dependent type. // Determine whether this is a member of an unknown specialization; // we need to handle these differently. bool DependentID = false; if (Name.getNameKind() == DeclarationName::CXXConversionFunctionName && Name.getCXXNameType()->isDependentType()) { DependentID = true; } else if (SS.isSet()) { if (DeclContext *DC = computeDeclContext(SS, false)) { if (RequireCompleteDeclContext(SS, DC)) return ExprError(); } else { DependentID = true; } } if (DependentID) return ActOnDependentIdExpression(SS, TemplateKWLoc, NameInfo, IsAddressOfOperand, TemplateArgs); // Perform the required lookup. LookupResult R(*this, NameInfo, (Id.getKind() == UnqualifiedIdKind::IK_ImplicitSelfParam) ? LookupObjCImplicitSelfParam : LookupOrdinaryName); if (TemplateKWLoc.isValid() || TemplateArgs) { // Lookup the template name again to correctly establish the context in // which it was found. This is really unfortunate as we already did the // lookup to determine that it was a template name in the first place. If // this becomes a performance hit, we can work harder to preserve those // results until we get here but it's likely not worth it. bool MemberOfUnknownSpecialization; AssumedTemplateKind AssumedTemplate; if (LookupTemplateName(R, S, SS, QualType(), /*EnteringContext=*/false, MemberOfUnknownSpecialization, TemplateKWLoc, &AssumedTemplate)) return ExprError(); if (MemberOfUnknownSpecialization || (R.getResultKind() == LookupResult::NotFoundInCurrentInstantiation)) return ActOnDependentIdExpression(SS, TemplateKWLoc, NameInfo, IsAddressOfOperand, TemplateArgs); } else { bool IvarLookupFollowUp = II && !SS.isSet() && getCurMethodDecl(); LookupParsedName(R, S, &SS, !IvarLookupFollowUp); // If the result might be in a dependent base class, this is a dependent // id-expression. if (R.getResultKind() == LookupResult::NotFoundInCurrentInstantiation) return ActOnDependentIdExpression(SS, TemplateKWLoc, NameInfo, IsAddressOfOperand, TemplateArgs); // If this reference is in an Objective-C method, then we need to do // some special Objective-C lookup, too. if (IvarLookupFollowUp) { ExprResult E(LookupInObjCMethod(R, S, II, true)); if (E.isInvalid()) return ExprError(); if (Expr *Ex = E.getAs()) return Ex; } } if (R.isAmbiguous()) return ExprError(); // This could be an implicitly declared function reference (legal in C90, // extension in C99, forbidden in C++). if (R.empty() && HasTrailingLParen && II && !getLangOpts().CPlusPlus) { NamedDecl *D = ImplicitlyDefineFunction(NameLoc, *II, S); if (D) R.addDecl(D); } // Determine whether this name might be a candidate for // argument-dependent lookup. bool ADL = UseArgumentDependentLookup(SS, R, HasTrailingLParen); if (R.empty() && !ADL) { if (SS.isEmpty() && getLangOpts().MSVCCompat) { if (Expr *E = recoverFromMSUnqualifiedLookup(*this, Context, NameInfo, TemplateKWLoc, TemplateArgs)) return E; } // Don't diagnose an empty lookup for inline assembly. if (IsInlineAsmIdentifier) return ExprError(); // If this name wasn't predeclared and if this is not a function // call, diagnose the problem. TypoExpr *TE = nullptr; DefaultFilterCCC DefaultValidator(II, SS.isValid() ? SS.getScopeRep() : nullptr); DefaultValidator.IsAddressOfOperand = IsAddressOfOperand; assert((!CCC || CCC->IsAddressOfOperand == IsAddressOfOperand) && "Typo correction callback misconfigured"); if (CCC) { // Make sure the callback knows what the typo being diagnosed is. CCC->setTypoName(II); if (SS.isValid()) CCC->setTypoNNS(SS.getScopeRep()); } // FIXME: DiagnoseEmptyLookup produces bad diagnostics if we're looking for // a template name, but we happen to have always already looked up the name // before we get here if it must be a template name. if (DiagnoseEmptyLookup(S, SS, R, CCC ? *CCC : DefaultValidator, nullptr, None, &TE)) { if (TE && KeywordReplacement) { auto &State = getTypoExprState(TE); auto BestTC = State.Consumer->getNextCorrection(); if (BestTC.isKeyword()) { auto *II = BestTC.getCorrectionAsIdentifierInfo(); if (State.DiagHandler) State.DiagHandler(BestTC); KeywordReplacement->startToken(); KeywordReplacement->setKind(II->getTokenID()); KeywordReplacement->setIdentifierInfo(II); KeywordReplacement->setLocation(BestTC.getCorrectionRange().getBegin()); // Clean up the state associated with the TypoExpr, since it has // now been diagnosed (without a call to CorrectDelayedTyposInExpr). clearDelayedTypo(TE); // Signal that a correction to a keyword was performed by returning a // valid-but-null ExprResult. return (Expr*)nullptr; } State.Consumer->resetCorrectionStream(); } return TE ? TE : ExprError(); } assert(!R.empty() && "DiagnoseEmptyLookup returned false but added no results"); // If we found an Objective-C instance variable, let // LookupInObjCMethod build the appropriate expression to // reference the ivar. if (ObjCIvarDecl *Ivar = R.getAsSingle()) { R.clear(); ExprResult E(LookupInObjCMethod(R, S, Ivar->getIdentifier())); // In a hopelessly buggy code, Objective-C instance variable // lookup fails and no expression will be built to reference it. if (!E.isInvalid() && !E.get()) return ExprError(); return E; } } // This is guaranteed from this point on. assert(!R.empty() || ADL); // Check whether this might be a C++ implicit instance member access. // C++ [class.mfct.non-static]p3: // When an id-expression that is not part of a class member access // syntax and not used to form a pointer to member is used in the // body of a non-static member function of class X, if name lookup // resolves the name in the id-expression to a non-static non-type // member of some class C, the id-expression is transformed into a // class member access expression using (*this) as the // postfix-expression to the left of the . operator. // // But we don't actually need to do this for '&' operands if R // resolved to a function or overloaded function set, because the // expression is ill-formed if it actually works out to be a // non-static member function: // // C++ [expr.ref]p4: // Otherwise, if E1.E2 refers to a non-static member function. . . // [t]he expression can be used only as the left-hand operand of a // member function call. // // There are other safeguards against such uses, but it's important // to get this right here so that we don't end up making a // spuriously dependent expression if we're inside a dependent // instance method. if (!R.empty() && (*R.begin())->isCXXClassMember()) { bool MightBeImplicitMember; if (!IsAddressOfOperand) MightBeImplicitMember = true; else if (!SS.isEmpty()) MightBeImplicitMember = false; else if (R.isOverloadedResult()) MightBeImplicitMember = false; else if (R.isUnresolvableResult()) MightBeImplicitMember = true; else MightBeImplicitMember = isa(R.getFoundDecl()) || isa(R.getFoundDecl()) || isa(R.getFoundDecl()); if (MightBeImplicitMember) return BuildPossibleImplicitMemberExpr(SS, TemplateKWLoc, R, TemplateArgs, S); } if (TemplateArgs || TemplateKWLoc.isValid()) { // In C++1y, if this is a variable template id, then check it // in BuildTemplateIdExpr(). // The single lookup result must be a variable template declaration. if (Id.getKind() == UnqualifiedIdKind::IK_TemplateId && Id.TemplateId && Id.TemplateId->Kind == TNK_Var_template) { assert(R.getAsSingle() && "There should only be one declaration found."); } return BuildTemplateIdExpr(SS, TemplateKWLoc, R, ADL, TemplateArgs); } return BuildDeclarationNameExpr(SS, R, ADL); } /// BuildQualifiedDeclarationNameExpr - Build a C++ qualified /// declaration name, generally during template instantiation. /// There's a large number of things which don't need to be done along /// this path. ExprResult Sema::BuildQualifiedDeclarationNameExpr( CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI) { DeclContext *DC = computeDeclContext(SS, false); if (!DC) return BuildDependentDeclRefExpr(SS, /*TemplateKWLoc=*/SourceLocation(), NameInfo, /*TemplateArgs=*/nullptr); if (RequireCompleteDeclContext(SS, DC)) return ExprError(); LookupResult R(*this, NameInfo, LookupOrdinaryName); LookupQualifiedName(R, DC); if (R.isAmbiguous()) return ExprError(); if (R.getResultKind() == LookupResult::NotFoundInCurrentInstantiation) return BuildDependentDeclRefExpr(SS, /*TemplateKWLoc=*/SourceLocation(), NameInfo, /*TemplateArgs=*/nullptr); if (R.empty()) { Diag(NameInfo.getLoc(), diag::err_no_member) << NameInfo.getName() << DC << SS.getRange(); return ExprError(); } if (const TypeDecl *TD = R.getAsSingle()) { // Diagnose a missing typename if this resolved unambiguously to a type in // a dependent context. If we can recover with a type, downgrade this to // a warning in Microsoft compatibility mode. unsigned DiagID = diag::err_typename_missing; if (RecoveryTSI && getLangOpts().MSVCCompat) DiagID = diag::ext_typename_missing; SourceLocation Loc = SS.getBeginLoc(); auto D = Diag(Loc, DiagID); D << SS.getScopeRep() << NameInfo.getName().getAsString() << SourceRange(Loc, NameInfo.getEndLoc()); // Don't recover if the caller isn't expecting us to or if we're in a SFINAE // context. if (!RecoveryTSI) return ExprError(); // Only issue the fixit if we're prepared to recover. D << FixItHint::CreateInsertion(Loc, "typename "); // Recover by pretending this was an elaborated type. QualType Ty = Context.getTypeDeclType(TD); TypeLocBuilder TLB; TLB.pushTypeSpec(Ty).setNameLoc(NameInfo.getLoc()); QualType ET = getElaboratedType(ETK_None, SS, Ty); ElaboratedTypeLoc QTL = TLB.push(ET); QTL.setElaboratedKeywordLoc(SourceLocation()); QTL.setQualifierLoc(SS.getWithLocInContext(Context)); *RecoveryTSI = TLB.getTypeSourceInfo(Context, ET); return ExprEmpty(); } // Defend against this resolving to an implicit member access. We usually // won't get here if this might be a legitimate a class member (we end up in // BuildMemberReferenceExpr instead), but this can be valid if we're forming // a pointer-to-member or in an unevaluated context in C++11. if (!R.empty() && (*R.begin())->isCXXClassMember() && !IsAddressOfOperand) return BuildPossibleImplicitMemberExpr(SS, /*TemplateKWLoc=*/SourceLocation(), R, /*TemplateArgs=*/nullptr, S); return BuildDeclarationNameExpr(SS, R, /* ADL */ false); } /// The parser has read a name in, and Sema has detected that we're currently /// inside an ObjC method. Perform some additional checks and determine if we /// should form a reference to an ivar. /// /// Ideally, most of this would be done by lookup, but there's /// actually quite a lot of extra work involved. DeclResult Sema::LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S, IdentifierInfo *II) { SourceLocation Loc = Lookup.getNameLoc(); ObjCMethodDecl *CurMethod = getCurMethodDecl(); // Check for error condition which is already reported. if (!CurMethod) return DeclResult(true); // There are two cases to handle here. 1) scoped lookup could have failed, // in which case we should look for an ivar. 2) scoped lookup could have // found a decl, but that decl is outside the current instance method (i.e. // a global variable). In these two cases, we do a lookup for an ivar with // this name, if the lookup sucedes, we replace it our current decl. // If we're in a class method, we don't normally want to look for // ivars. But if we don't find anything else, and there's an // ivar, that's an error. bool IsClassMethod = CurMethod->isClassMethod(); bool LookForIvars; if (Lookup.empty()) LookForIvars = true; else if (IsClassMethod) LookForIvars = false; else LookForIvars = (Lookup.isSingleResult() && Lookup.getFoundDecl()->isDefinedOutsideFunctionOrMethod()); ObjCInterfaceDecl *IFace = nullptr; if (LookForIvars) { IFace = CurMethod->getClassInterface(); ObjCInterfaceDecl *ClassDeclared; ObjCIvarDecl *IV = nullptr; if (IFace && (IV = IFace->lookupInstanceVariable(II, ClassDeclared))) { // Diagnose using an ivar in a class method. if (IsClassMethod) { Diag(Loc, diag::err_ivar_use_in_class_method) << IV->getDeclName(); return DeclResult(true); } // Diagnose the use of an ivar outside of the declaring class. if (IV->getAccessControl() == ObjCIvarDecl::Private && !declaresSameEntity(ClassDeclared, IFace) && !getLangOpts().DebuggerSupport) Diag(Loc, diag::err_private_ivar_access) << IV->getDeclName(); // Success. return IV; } } else if (CurMethod->isInstanceMethod()) { // We should warn if a local variable hides an ivar. if (ObjCInterfaceDecl *IFace = CurMethod->getClassInterface()) { ObjCInterfaceDecl *ClassDeclared; if (ObjCIvarDecl *IV = IFace->lookupInstanceVariable(II, ClassDeclared)) { if (IV->getAccessControl() != ObjCIvarDecl::Private || declaresSameEntity(IFace, ClassDeclared)) Diag(Loc, diag::warn_ivar_use_hidden) << IV->getDeclName(); } } } else if (Lookup.isSingleResult() && Lookup.getFoundDecl()->isDefinedOutsideFunctionOrMethod()) { // If accessing a stand-alone ivar in a class method, this is an error. if (const ObjCIvarDecl *IV = dyn_cast(Lookup.getFoundDecl())) { Diag(Loc, diag::err_ivar_use_in_class_method) << IV->getDeclName(); return DeclResult(true); } } // Didn't encounter an error, didn't find an ivar. return DeclResult(false); } ExprResult Sema::BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV) { ObjCMethodDecl *CurMethod = getCurMethodDecl(); assert(CurMethod && CurMethod->isInstanceMethod() && "should not reference ivar from this context"); ObjCInterfaceDecl *IFace = CurMethod->getClassInterface(); assert(IFace && "should not reference ivar from this context"); // If we're referencing an invalid decl, just return this as a silent // error node. The error diagnostic was already emitted on the decl. if (IV->isInvalidDecl()) return ExprError(); // Check if referencing a field with __attribute__((deprecated)). if (DiagnoseUseOfDecl(IV, Loc)) return ExprError(); // FIXME: This should use a new expr for a direct reference, don't // turn this into Self->ivar, just return a BareIVarExpr or something. IdentifierInfo &II = Context.Idents.get("self"); UnqualifiedId SelfName; SelfName.setIdentifier(&II, SourceLocation()); SelfName.setKind(UnqualifiedIdKind::IK_ImplicitSelfParam); CXXScopeSpec SelfScopeSpec; SourceLocation TemplateKWLoc; ExprResult SelfExpr = ActOnIdExpression(S, SelfScopeSpec, TemplateKWLoc, SelfName, /*HasTrailingLParen=*/false, /*IsAddressOfOperand=*/false); if (SelfExpr.isInvalid()) return ExprError(); SelfExpr = DefaultLvalueConversion(SelfExpr.get()); if (SelfExpr.isInvalid()) return ExprError(); MarkAnyDeclReferenced(Loc, IV, true); ObjCMethodFamily MF = CurMethod->getMethodFamily(); if (MF != OMF_init && MF != OMF_dealloc && MF != OMF_finalize && !IvarBacksCurrentMethodAccessor(IFace, CurMethod, IV)) Diag(Loc, diag::warn_direct_ivar_access) << IV->getDeclName(); ObjCIvarRefExpr *Result = new (Context) ObjCIvarRefExpr(IV, IV->getUsageType(SelfExpr.get()->getType()), Loc, IV->getLocation(), SelfExpr.get(), true, true); if (IV->getType().getObjCLifetime() == Qualifiers::OCL_Weak) { if (!isUnevaluatedContext() && !Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc)) getCurFunction()->recordUseOfWeak(Result); } if (getLangOpts().ObjCAutoRefCount) if (const BlockDecl *BD = CurContext->getInnermostBlockDecl()) ImplicitlyRetainedSelfLocs.push_back({Loc, BD}); return Result; } /// The parser has read a name in, and Sema has detected that we're currently /// inside an ObjC method. Perform some additional checks and determine if we /// should form a reference to an ivar. If so, build an expression referencing /// that ivar. ExprResult Sema::LookupInObjCMethod(LookupResult &Lookup, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation) { // FIXME: Integrate this lookup step into LookupParsedName. DeclResult Ivar = LookupIvarInObjCMethod(Lookup, S, II); if (Ivar.isInvalid()) return ExprError(); if (Ivar.isUsable()) return BuildIvarRefExpr(S, Lookup.getNameLoc(), cast(Ivar.get())); if (Lookup.empty() && II && AllowBuiltinCreation) LookupBuiltin(Lookup); // Sentinel value saying that we didn't do anything special. return ExprResult(false); } /// Cast a base object to a member's actual type. /// /// Logically this happens in three phases: /// /// * First we cast from the base type to the naming class. /// The naming class is the class into which we were looking /// when we found the member; it's the qualifier type if a /// qualifier was provided, and otherwise it's the base type. /// /// * Next we cast from the naming class to the declaring class. /// If the member we found was brought into a class's scope by /// a using declaration, this is that class; otherwise it's /// the class declaring the member. /// /// * Finally we cast from the declaring class to the "true" /// declaring class of the member. This conversion does not /// obey access control. ExprResult Sema::PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member) { CXXRecordDecl *RD = dyn_cast(Member->getDeclContext()); if (!RD) return From; QualType DestRecordType; QualType DestType; QualType FromRecordType; QualType FromType = From->getType(); bool PointerConversions = false; if (isa(Member)) { DestRecordType = Context.getCanonicalType(Context.getTypeDeclType(RD)); auto FromPtrType = FromType->getAs(); DestRecordType = Context.getAddrSpaceQualType( DestRecordType, FromPtrType ? FromType->getPointeeType().getAddressSpace() : FromType.getAddressSpace()); if (FromPtrType) { DestType = Context.getPointerType(DestRecordType); FromRecordType = FromPtrType->getPointeeType(); PointerConversions = true; } else { DestType = DestRecordType; FromRecordType = FromType; } } else if (CXXMethodDecl *Method = dyn_cast(Member)) { if (Method->isStatic()) return From; DestType = Method->getThisType(); DestRecordType = DestType->getPointeeType(); if (FromType->getAs()) { FromRecordType = FromType->getPointeeType(); PointerConversions = true; } else { FromRecordType = FromType; DestType = DestRecordType; } LangAS FromAS = FromRecordType.getAddressSpace(); LangAS DestAS = DestRecordType.getAddressSpace(); if (FromAS != DestAS) { QualType FromRecordTypeWithoutAS = Context.removeAddrSpaceQualType(FromRecordType); QualType FromTypeWithDestAS = Context.getAddrSpaceQualType(FromRecordTypeWithoutAS, DestAS); if (PointerConversions) FromTypeWithDestAS = Context.getPointerType(FromTypeWithDestAS); From = ImpCastExprToType(From, FromTypeWithDestAS, CK_AddressSpaceConversion, From->getValueKind()) .get(); } } else { // No conversion necessary. return From; } if (DestType->isDependentType() || FromType->isDependentType()) return From; // If the unqualified types are the same, no conversion is necessary. if (Context.hasSameUnqualifiedType(FromRecordType, DestRecordType)) return From; SourceRange FromRange = From->getSourceRange(); SourceLocation FromLoc = FromRange.getBegin(); ExprValueKind VK = From->getValueKind(); // C++ [class.member.lookup]p8: // [...] Ambiguities can often be resolved by qualifying a name with its // class name. // // If the member was a qualified name and the qualified referred to a // specific base subobject type, we'll cast to that intermediate type // first and then to the object in which the member is declared. That allows // one to resolve ambiguities in, e.g., a diamond-shaped hierarchy such as: // // class Base { public: int x; }; // class Derived1 : public Base { }; // class Derived2 : public Base { }; // class VeryDerived : public Derived1, public Derived2 { void f(); }; // // void VeryDerived::f() { // x = 17; // error: ambiguous base subobjects // Derived1::x = 17; // okay, pick the Base subobject of Derived1 // } if (Qualifier && Qualifier->getAsType()) { QualType QType = QualType(Qualifier->getAsType(), 0); assert(QType->isRecordType() && "lookup done with non-record type"); QualType QRecordType = QualType(QType->getAs(), 0); // In C++98, the qualifier type doesn't actually have to be a base // type of the object type, in which case we just ignore it. // Otherwise build the appropriate casts. if (IsDerivedFrom(FromLoc, FromRecordType, QRecordType)) { CXXCastPath BasePath; if (CheckDerivedToBaseConversion(FromRecordType, QRecordType, FromLoc, FromRange, &BasePath)) return ExprError(); if (PointerConversions) QType = Context.getPointerType(QType); From = ImpCastExprToType(From, QType, CK_UncheckedDerivedToBase, VK, &BasePath).get(); FromType = QType; FromRecordType = QRecordType; // If the qualifier type was the same as the destination type, // we're done. if (Context.hasSameUnqualifiedType(FromRecordType, DestRecordType)) return From; } } bool IgnoreAccess = false; // If we actually found the member through a using declaration, cast // down to the using declaration's type. // // Pointer equality is fine here because only one declaration of a // class ever has member declarations. if (FoundDecl->getDeclContext() != Member->getDeclContext()) { assert(isa(FoundDecl)); QualType URecordType = Context.getTypeDeclType( cast(FoundDecl->getDeclContext())); // We only need to do this if the naming-class to declaring-class // conversion is non-trivial. if (!Context.hasSameUnqualifiedType(FromRecordType, URecordType)) { assert(IsDerivedFrom(FromLoc, FromRecordType, URecordType)); CXXCastPath BasePath; if (CheckDerivedToBaseConversion(FromRecordType, URecordType, FromLoc, FromRange, &BasePath)) return ExprError(); QualType UType = URecordType; if (PointerConversions) UType = Context.getPointerType(UType); From = ImpCastExprToType(From, UType, CK_UncheckedDerivedToBase, VK, &BasePath).get(); FromType = UType; FromRecordType = URecordType; } // We don't do access control for the conversion from the // declaring class to the true declaring class. IgnoreAccess = true; } CXXCastPath BasePath; if (CheckDerivedToBaseConversion(FromRecordType, DestRecordType, FromLoc, FromRange, &BasePath, IgnoreAccess)) return ExprError(); return ImpCastExprToType(From, DestType, CK_UncheckedDerivedToBase, VK, &BasePath); } bool Sema::UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen) { // Only when used directly as the postfix-expression of a call. if (!HasTrailingLParen) return false; // Never if a scope specifier was provided. if (SS.isSet()) return false; // Only in C++ or ObjC++. if (!getLangOpts().CPlusPlus) return false; // Turn off ADL when we find certain kinds of declarations during // normal lookup: for (NamedDecl *D : R) { // C++0x [basic.lookup.argdep]p3: // -- a declaration of a class member // Since using decls preserve this property, we check this on the // original decl. if (D->isCXXClassMember()) return false; // C++0x [basic.lookup.argdep]p3: // -- a block-scope function declaration that is not a // using-declaration // NOTE: we also trigger this for function templates (in fact, we // don't check the decl type at all, since all other decl types // turn off ADL anyway). if (isa(D)) D = cast(D)->getTargetDecl(); else if (D->getLexicalDeclContext()->isFunctionOrMethod()) return false; // C++0x [basic.lookup.argdep]p3: // -- a declaration that is neither a function or a function // template // And also for builtin functions. if (isa(D)) { FunctionDecl *FDecl = cast(D); // But also builtin functions. if (FDecl->getBuiltinID() && FDecl->isImplicit()) return false; } else if (!isa(D)) return false; } return true; } /// Diagnoses obvious problems with the use of the given declaration /// as an expression. This is only actually called for lookups that /// were not overloaded, and it doesn't promise that the declaration /// will in fact be used. static bool CheckDeclInExpr(Sema &S, SourceLocation Loc, NamedDecl *D) { if (D->isInvalidDecl()) return true; if (isa(D)) { S.Diag(Loc, diag::err_unexpected_typedef) << D->getDeclName(); return true; } if (isa(D)) { S.Diag(Loc, diag::err_unexpected_interface) << D->getDeclName(); return true; } if (isa(D)) { S.Diag(Loc, diag::err_unexpected_namespace) << D->getDeclName(); return true; } return false; } // Certain multiversion types should be treated as overloaded even when there is // only one result. static bool ShouldLookupResultBeMultiVersionOverload(const LookupResult &R) { assert(R.isSingleResult() && "Expected only a single result"); const auto *FD = dyn_cast(R.getFoundDecl()); return FD && (FD->isCPUDispatchMultiVersion() || FD->isCPUSpecificMultiVersion()); } ExprResult Sema::BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl) { // If this is a single, fully-resolved result and we don't need ADL, // just build an ordinary singleton decl ref. if (!NeedsADL && R.isSingleResult() && !R.getAsSingle() && !ShouldLookupResultBeMultiVersionOverload(R)) return BuildDeclarationNameExpr(SS, R.getLookupNameInfo(), R.getFoundDecl(), R.getRepresentativeDecl(), nullptr, AcceptInvalidDecl); // We only need to check the declaration if there's exactly one // result, because in the overloaded case the results can only be // functions and function templates. if (R.isSingleResult() && !ShouldLookupResultBeMultiVersionOverload(R) && CheckDeclInExpr(*this, R.getNameLoc(), R.getFoundDecl())) return ExprError(); // Otherwise, just build an unresolved lookup expression. Suppress // any lookup-related diagnostics; we'll hash these out later, when // we've picked a target. R.suppressDiagnostics(); UnresolvedLookupExpr *ULE = UnresolvedLookupExpr::Create(Context, R.getNamingClass(), SS.getWithLocInContext(Context), R.getLookupNameInfo(), NeedsADL, R.isOverloadedResult(), R.begin(), R.end()); return ULE; } static void diagnoseUncapturableValueReference(Sema &S, SourceLocation loc, ValueDecl *var, DeclContext *DC); /// Complete semantic analysis for a reference to the given declaration. ExprResult Sema::BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD, const TemplateArgumentListInfo *TemplateArgs, bool AcceptInvalidDecl) { assert(D && "Cannot refer to a NULL declaration"); assert(!isa(D) && "Cannot refer unambiguously to a function template"); SourceLocation Loc = NameInfo.getLoc(); if (CheckDeclInExpr(*this, Loc, D)) return ExprError(); if (TemplateDecl *Template = dyn_cast(D)) { // Specifically diagnose references to class templates that are missing // a template argument list. diagnoseMissingTemplateArguments(TemplateName(Template), Loc); return ExprError(); } // Make sure that we're referring to a value. ValueDecl *VD = dyn_cast(D); if (!VD) { Diag(Loc, diag::err_ref_non_value) << D << SS.getRange(); Diag(D->getLocation(), diag::note_declared_at); return ExprError(); } // Check whether this declaration can be used. Note that we suppress // this check when we're going to perform argument-dependent lookup // on this function name, because this might not be the function // that overload resolution actually selects. if (DiagnoseUseOfDecl(VD, Loc)) return ExprError(); // Only create DeclRefExpr's for valid Decl's. if (VD->isInvalidDecl() && !AcceptInvalidDecl) return ExprError(); // Handle members of anonymous structs and unions. If we got here, // and the reference is to a class member indirect field, then this // must be the subject of a pointer-to-member expression. if (IndirectFieldDecl *indirectField = dyn_cast(VD)) if (!indirectField->isCXXClassMember()) return BuildAnonymousStructUnionMemberReference(SS, NameInfo.getLoc(), indirectField); { QualType type = VD->getType(); if (type.isNull()) return ExprError(); ExprValueKind valueKind = VK_RValue; // In 'T ...V;', the type of the declaration 'V' is 'T...', but the type of // a reference to 'V' is simply (unexpanded) 'T'. The type, like the value, // is expanded by some outer '...' in the context of the use. type = type.getNonPackExpansionType(); switch (D->getKind()) { // Ignore all the non-ValueDecl kinds. #define ABSTRACT_DECL(kind) #define VALUE(type, base) #define DECL(type, base) \ case Decl::type: #include "clang/AST/DeclNodes.inc" llvm_unreachable("invalid value decl kind"); // These shouldn't make it here. case Decl::ObjCAtDefsField: llvm_unreachable("forming non-member reference to ivar?"); // Enum constants are always r-values and never references. // Unresolved using declarations are dependent. case Decl::EnumConstant: case Decl::UnresolvedUsingValue: case Decl::OMPDeclareReduction: case Decl::OMPDeclareMapper: valueKind = VK_RValue; break; // Fields and indirect fields that got here must be for // pointer-to-member expressions; we just call them l-values for // internal consistency, because this subexpression doesn't really // exist in the high-level semantics. case Decl::Field: case Decl::IndirectField: case Decl::ObjCIvar: assert(getLangOpts().CPlusPlus && "building reference to field in C?"); // These can't have reference type in well-formed programs, but // for internal consistency we do this anyway. type = type.getNonReferenceType(); valueKind = VK_LValue; break; // Non-type template parameters are either l-values or r-values // depending on the type. case Decl::NonTypeTemplateParm: { if (const ReferenceType *reftype = type->getAs()) { type = reftype->getPointeeType(); valueKind = VK_LValue; // even if the parameter is an r-value reference break; } // For non-references, we need to strip qualifiers just in case // the template parameter was declared as 'const int' or whatever. valueKind = VK_RValue; type = type.getUnqualifiedType(); break; } case Decl::Var: case Decl::VarTemplateSpecialization: case Decl::VarTemplatePartialSpecialization: case Decl::Decomposition: case Decl::OMPCapturedExpr: // In C, "extern void blah;" is valid and is an r-value. if (!getLangOpts().CPlusPlus && !type.hasQualifiers() && type->isVoidType()) { valueKind = VK_RValue; break; } LLVM_FALLTHROUGH; case Decl::ImplicitParam: case Decl::ParmVar: { // These are always l-values. valueKind = VK_LValue; type = type.getNonReferenceType(); // FIXME: Does the addition of const really only apply in // potentially-evaluated contexts? Since the variable isn't actually // captured in an unevaluated context, it seems that the answer is no. if (!isUnevaluatedContext()) { QualType CapturedType = getCapturedDeclRefType(cast(VD), Loc); if (!CapturedType.isNull()) type = CapturedType; } break; } case Decl::Binding: { // These are always lvalues. valueKind = VK_LValue; type = type.getNonReferenceType(); // FIXME: Support lambda-capture of BindingDecls, once CWG actually // decides how that's supposed to work. auto *BD = cast(VD); if (BD->getDeclContext() != CurContext) { auto *DD = dyn_cast_or_null(BD->getDecomposedDecl()); if (DD && DD->hasLocalStorage()) diagnoseUncapturableValueReference(*this, Loc, BD, CurContext); } break; } case Decl::Function: { if (unsigned BID = cast(VD)->getBuiltinID()) { if (!Context.BuiltinInfo.isPredefinedLibFunction(BID)) { type = Context.BuiltinFnTy; valueKind = VK_RValue; break; } } const FunctionType *fty = type->castAs(); // If we're referring to a function with an __unknown_anytype // result type, make the entire expression __unknown_anytype. if (fty->getReturnType() == Context.UnknownAnyTy) { type = Context.UnknownAnyTy; valueKind = VK_RValue; break; } // Functions are l-values in C++. if (getLangOpts().CPlusPlus) { valueKind = VK_LValue; break; } // C99 DR 316 says that, if a function type comes from a // function definition (without a prototype), that type is only // used for checking compatibility. Therefore, when referencing // the function, we pretend that we don't have the full function // type. if (!cast(VD)->hasPrototype() && isa(fty)) type = Context.getFunctionNoProtoType(fty->getReturnType(), fty->getExtInfo()); // Functions are r-values in C. valueKind = VK_RValue; break; } case Decl::CXXDeductionGuide: llvm_unreachable("building reference to deduction guide"); case Decl::MSProperty: case Decl::MSGuid: // FIXME: Should MSGuidDecl be subject to capture in OpenMP, // or duplicated between host and device? valueKind = VK_LValue; break; case Decl::CXXMethod: // If we're referring to a method with an __unknown_anytype // result type, make the entire expression __unknown_anytype. // This should only be possible with a type written directly. if (const FunctionProtoType *proto = dyn_cast(VD->getType())) if (proto->getReturnType() == Context.UnknownAnyTy) { type = Context.UnknownAnyTy; valueKind = VK_RValue; break; } // C++ methods are l-values if static, r-values if non-static. if (cast(VD)->isStatic()) { valueKind = VK_LValue; break; } LLVM_FALLTHROUGH; case Decl::CXXConversion: case Decl::CXXDestructor: case Decl::CXXConstructor: valueKind = VK_RValue; break; } return BuildDeclRefExpr(VD, type, valueKind, NameInfo, &SS, FoundD, /*FIXME: TemplateKWLoc*/ SourceLocation(), TemplateArgs); } } static void ConvertUTF8ToWideString(unsigned CharByteWidth, StringRef Source, SmallString<32> &Target) { Target.resize(CharByteWidth * (Source.size() + 1)); char *ResultPtr = &Target[0]; const llvm::UTF8 *ErrorPtr; bool success = llvm::ConvertUTF8toWide(CharByteWidth, Source, ResultPtr, ErrorPtr); (void)success; assert(success); Target.resize(ResultPtr - &Target[0]); } ExprResult Sema::BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK) { // Pick the current block, lambda, captured statement or function. Decl *currentDecl = nullptr; if (const BlockScopeInfo *BSI = getCurBlock()) currentDecl = BSI->TheDecl; else if (const LambdaScopeInfo *LSI = getCurLambda()) currentDecl = LSI->CallOperator; else if (const CapturedRegionScopeInfo *CSI = getCurCapturedRegion()) currentDecl = CSI->TheCapturedDecl; else currentDecl = getCurFunctionOrMethodDecl(); if (!currentDecl) { Diag(Loc, diag::ext_predef_outside_function); currentDecl = Context.getTranslationUnitDecl(); } QualType ResTy; StringLiteral *SL = nullptr; if (cast(currentDecl)->isDependentContext()) ResTy = Context.DependentTy; else { // Pre-defined identifiers are of type char[x], where x is the length of // the string. auto Str = PredefinedExpr::ComputeName(IK, currentDecl); unsigned Length = Str.length(); llvm::APInt LengthI(32, Length + 1); if (IK == PredefinedExpr::LFunction || IK == PredefinedExpr::LFuncSig) { ResTy = Context.adjustStringLiteralBaseType(Context.WideCharTy.withConst()); SmallString<32> RawChars; ConvertUTF8ToWideString(Context.getTypeSizeInChars(ResTy).getQuantity(), Str, RawChars); ResTy = Context.getConstantArrayType(ResTy, LengthI, nullptr, ArrayType::Normal, /*IndexTypeQuals*/ 0); SL = StringLiteral::Create(Context, RawChars, StringLiteral::Wide, /*Pascal*/ false, ResTy, Loc); } else { ResTy = Context.adjustStringLiteralBaseType(Context.CharTy.withConst()); ResTy = Context.getConstantArrayType(ResTy, LengthI, nullptr, ArrayType::Normal, /*IndexTypeQuals*/ 0); SL = StringLiteral::Create(Context, Str, StringLiteral::Ascii, /*Pascal*/ false, ResTy, Loc); } } return PredefinedExpr::Create(Context, Loc, ResTy, IK, SL); } static std::pair GetUniqueStableNameInfo(ASTContext &Context, QualType OpType, SourceLocation OpLoc, PredefinedExpr::IdentKind K) { std::pair Result{{}, nullptr}; if (OpType->isDependentType()) { Result.first = Context.DependentTy; return Result; } std::string Str = PredefinedExpr::ComputeName(Context, K, OpType); llvm::APInt Length(32, Str.length() + 1); Result.first = Context.adjustStringLiteralBaseType(Context.CharTy.withConst()); Result.first = Context.getConstantArrayType( Result.first, Length, nullptr, ArrayType::Normal, /*IndexTypeQuals*/ 0); Result.second = StringLiteral::Create(Context, Str, StringLiteral::Ascii, /*Pascal*/ false, Result.first, OpLoc); return Result; } ExprResult Sema::BuildUniqueStableName(SourceLocation OpLoc, TypeSourceInfo *Operand) { QualType ResultTy; StringLiteral *SL; std::tie(ResultTy, SL) = GetUniqueStableNameInfo( Context, Operand->getType(), OpLoc, PredefinedExpr::UniqueStableNameType); return PredefinedExpr::Create(Context, OpLoc, ResultTy, PredefinedExpr::UniqueStableNameType, SL, Operand); } ExprResult Sema::BuildUniqueStableName(SourceLocation OpLoc, Expr *E) { QualType ResultTy; StringLiteral *SL; std::tie(ResultTy, SL) = GetUniqueStableNameInfo( Context, E->getType(), OpLoc, PredefinedExpr::UniqueStableNameExpr); return PredefinedExpr::Create(Context, OpLoc, ResultTy, PredefinedExpr::UniqueStableNameExpr, SL, E); } ExprResult Sema::ActOnUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation L, SourceLocation R, ParsedType Ty) { TypeSourceInfo *TInfo = nullptr; QualType T = GetTypeFromParser(Ty, &TInfo); if (T.isNull()) return ExprError(); if (!TInfo) TInfo = Context.getTrivialTypeSourceInfo(T, OpLoc); return BuildUniqueStableName(OpLoc, TInfo); } ExprResult Sema::ActOnUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation L, SourceLocation R, Expr *E) { return BuildUniqueStableName(OpLoc, E); } ExprResult Sema::ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind) { PredefinedExpr::IdentKind IK; switch (Kind) { default: llvm_unreachable("Unknown simple primary expr!"); case tok::kw___func__: IK = PredefinedExpr::Func; break; // [C99 6.4.2.2] case tok::kw___FUNCTION__: IK = PredefinedExpr::Function; break; case tok::kw___FUNCDNAME__: IK = PredefinedExpr::FuncDName; break; // [MS] case tok::kw___FUNCSIG__: IK = PredefinedExpr::FuncSig; break; // [MS] case tok::kw_L__FUNCTION__: IK = PredefinedExpr::LFunction; break; // [MS] case tok::kw_L__FUNCSIG__: IK = PredefinedExpr::LFuncSig; break; // [MS] case tok::kw___PRETTY_FUNCTION__: IK = PredefinedExpr::PrettyFunction; break; } return BuildPredefinedExpr(Loc, IK); } ExprResult Sema::ActOnCharacterConstant(const Token &Tok, Scope *UDLScope) { SmallString<16> CharBuffer; bool Invalid = false; StringRef ThisTok = PP.getSpelling(Tok, CharBuffer, &Invalid); if (Invalid) return ExprError(); CharLiteralParser Literal(ThisTok.begin(), ThisTok.end(), Tok.getLocation(), PP, Tok.getKind()); if (Literal.hadError()) return ExprError(); QualType Ty; if (Literal.isWide()) Ty = Context.WideCharTy; // L'x' -> wchar_t in C and C++. else if (Literal.isUTF8() && getLangOpts().Char8) Ty = Context.Char8Ty; // u8'x' -> char8_t when it exists. else if (Literal.isUTF16()) Ty = Context.Char16Ty; // u'x' -> char16_t in C11 and C++11. else if (Literal.isUTF32()) Ty = Context.Char32Ty; // U'x' -> char32_t in C11 and C++11. else if (!getLangOpts().CPlusPlus || Literal.isMultiChar()) Ty = Context.IntTy; // 'x' -> int in C, 'wxyz' -> int in C++. else Ty = Context.CharTy; // 'x' -> char in C++ CharacterLiteral::CharacterKind Kind = CharacterLiteral::Ascii; if (Literal.isWide()) Kind = CharacterLiteral::Wide; else if (Literal.isUTF16()) Kind = CharacterLiteral::UTF16; else if (Literal.isUTF32()) Kind = CharacterLiteral::UTF32; else if (Literal.isUTF8()) Kind = CharacterLiteral::UTF8; Expr *Lit = new (Context) CharacterLiteral(Literal.getValue(), Kind, Ty, Tok.getLocation()); if (Literal.getUDSuffix().empty()) return Lit; // We're building a user-defined literal. IdentifierInfo *UDSuffix = &Context.Idents.get(Literal.getUDSuffix()); SourceLocation UDSuffixLoc = getUDSuffixLoc(*this, Tok.getLocation(), Literal.getUDSuffixOffset()); // Make sure we're allowed user-defined literals here. if (!UDLScope) return ExprError(Diag(UDSuffixLoc, diag::err_invalid_character_udl)); // C++11 [lex.ext]p6: The literal L is treated as a call of the form // operator "" X (ch) return BuildCookedLiteralOperatorCall(*this, UDLScope, UDSuffix, UDSuffixLoc, Lit, Tok.getLocation()); } ExprResult Sema::ActOnIntegerConstant(SourceLocation Loc, uint64_t Val) { unsigned IntSize = Context.getTargetInfo().getIntWidth(); return IntegerLiteral::Create(Context, llvm::APInt(IntSize, Val), Context.IntTy, Loc); } static Expr *BuildFloatingLiteral(Sema &S, NumericLiteralParser &Literal, QualType Ty, SourceLocation Loc) { const llvm::fltSemantics &Format = S.Context.getFloatTypeSemantics(Ty); using llvm::APFloat; APFloat Val(Format); APFloat::opStatus result = Literal.GetFloatValue(Val); // Overflow is always an error, but underflow is only an error if // we underflowed to zero (APFloat reports denormals as underflow). if ((result & APFloat::opOverflow) || ((result & APFloat::opUnderflow) && Val.isZero())) { unsigned diagnostic; SmallString<20> buffer; if (result & APFloat::opOverflow) { diagnostic = diag::warn_float_overflow; APFloat::getLargest(Format).toString(buffer); } else { diagnostic = diag::warn_float_underflow; APFloat::getSmallest(Format).toString(buffer); } S.Diag(Loc, diagnostic) << Ty << StringRef(buffer.data(), buffer.size()); } bool isExact = (result == APFloat::opOK); return FloatingLiteral::Create(S.Context, Val, isExact, Ty, Loc); } bool Sema::CheckLoopHintExpr(Expr *E, SourceLocation Loc) { assert(E && "Invalid expression"); if (E->isValueDependent()) return false; QualType QT = E->getType(); if (!QT->isIntegerType() || QT->isBooleanType() || QT->isCharType()) { Diag(E->getExprLoc(), diag::err_pragma_loop_invalid_argument_type) << QT; return true; } llvm::APSInt ValueAPS; ExprResult R = VerifyIntegerConstantExpression(E, &ValueAPS); if (R.isInvalid()) return true; bool ValueIsPositive = ValueAPS.isStrictlyPositive(); if (!ValueIsPositive || ValueAPS.getActiveBits() > 31) { Diag(E->getExprLoc(), diag::err_pragma_loop_invalid_argument_value) << ValueAPS.toString(10) << ValueIsPositive; return true; } return false; } ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) { // Fast path for a single digit (which is quite common). A single digit // cannot have a trigraph, escaped newline, radix prefix, or suffix. if (Tok.getLength() == 1) { const char Val = PP.getSpellingOfSingleCharacterNumericConstant(Tok); return ActOnIntegerConstant(Tok.getLocation(), Val-'0'); } SmallString<128> SpellingBuffer; // NumericLiteralParser wants to overread by one character. Add padding to // the buffer in case the token is copied to the buffer. If getSpelling() // returns a StringRef to the memory buffer, it should have a null char at // the EOF, so it is also safe. SpellingBuffer.resize(Tok.getLength() + 1); // Get the spelling of the token, which eliminates trigraphs, etc. bool Invalid = false; StringRef TokSpelling = PP.getSpelling(Tok, SpellingBuffer, &Invalid); if (Invalid) return ExprError(); NumericLiteralParser Literal(TokSpelling, Tok.getLocation(), PP.getSourceManager(), PP.getLangOpts(), PP.getTargetInfo(), PP.getDiagnostics()); if (Literal.hadError) return ExprError(); if (Literal.hasUDSuffix()) { // We're building a user-defined literal. IdentifierInfo *UDSuffix = &Context.Idents.get(Literal.getUDSuffix()); SourceLocation UDSuffixLoc = getUDSuffixLoc(*this, Tok.getLocation(), Literal.getUDSuffixOffset()); // Make sure we're allowed user-defined literals here. if (!UDLScope) return ExprError(Diag(UDSuffixLoc, diag::err_invalid_numeric_udl)); QualType CookedTy; if (Literal.isFloatingLiteral()) { // C++11 [lex.ext]p4: If S contains a literal operator with parameter type // long double, the literal is treated as a call of the form // operator "" X (f L) CookedTy = Context.LongDoubleTy; } else { // C++11 [lex.ext]p3: If S contains a literal operator with parameter type // unsigned long long, the literal is treated as a call of the form // operator "" X (n ULL) CookedTy = Context.UnsignedLongLongTy; } DeclarationName OpName = Context.DeclarationNames.getCXXLiteralOperatorName(UDSuffix); DeclarationNameInfo OpNameInfo(OpName, UDSuffixLoc); OpNameInfo.setCXXLiteralOperatorNameLoc(UDSuffixLoc); SourceLocation TokLoc = Tok.getLocation(); // Perform literal operator lookup to determine if we're building a raw // literal or a cooked one. LookupResult R(*this, OpName, UDSuffixLoc, LookupOrdinaryName); switch (LookupLiteralOperator(UDLScope, R, CookedTy, /*AllowRaw*/ true, /*AllowTemplate*/ true, /*AllowStringTemplate*/ false, /*DiagnoseMissing*/ !Literal.isImaginary)) { case LOLR_ErrorNoDiagnostic: // Lookup failure for imaginary constants isn't fatal, there's still the // GNU extension producing _Complex types. break; case LOLR_Error: return ExprError(); case LOLR_Cooked: { Expr *Lit; if (Literal.isFloatingLiteral()) { Lit = BuildFloatingLiteral(*this, Literal, CookedTy, Tok.getLocation()); } else { llvm::APInt ResultVal(Context.getTargetInfo().getLongLongWidth(), 0); if (Literal.GetIntegerValue(ResultVal)) Diag(Tok.getLocation(), diag::err_integer_literal_too_large) << /* Unsigned */ 1; Lit = IntegerLiteral::Create(Context, ResultVal, CookedTy, Tok.getLocation()); } return BuildLiteralOperatorCall(R, OpNameInfo, Lit, TokLoc); } case LOLR_Raw: { // C++11 [lit.ext]p3, p4: If S contains a raw literal operator, the // literal is treated as a call of the form // operator "" X ("n") unsigned Length = Literal.getUDSuffixOffset(); QualType StrTy = Context.getConstantArrayType( Context.adjustStringLiteralBaseType(Context.CharTy.withConst()), llvm::APInt(32, Length + 1), nullptr, ArrayType::Normal, 0); Expr *Lit = StringLiteral::Create( Context, StringRef(TokSpelling.data(), Length), StringLiteral::Ascii, /*Pascal*/false, StrTy, &TokLoc, 1); return BuildLiteralOperatorCall(R, OpNameInfo, Lit, TokLoc); } case LOLR_Template: { // C++11 [lit.ext]p3, p4: Otherwise (S contains a literal operator // template), L is treated as a call fo the form // operator "" X <'c1', 'c2', ... 'ck'>() // where n is the source character sequence c1 c2 ... ck. TemplateArgumentListInfo ExplicitArgs; unsigned CharBits = Context.getIntWidth(Context.CharTy); bool CharIsUnsigned = Context.CharTy->isUnsignedIntegerType(); llvm::APSInt Value(CharBits, CharIsUnsigned); for (unsigned I = 0, N = Literal.getUDSuffixOffset(); I != N; ++I) { Value = TokSpelling[I]; TemplateArgument Arg(Context, Value, Context.CharTy); TemplateArgumentLocInfo ArgInfo; ExplicitArgs.addArgument(TemplateArgumentLoc(Arg, ArgInfo)); } return BuildLiteralOperatorCall(R, OpNameInfo, None, TokLoc, &ExplicitArgs); } case LOLR_StringTemplate: llvm_unreachable("unexpected literal operator lookup result"); } } Expr *Res; if (Literal.isFixedPointLiteral()) { QualType Ty; if (Literal.isAccum) { if (Literal.isHalf) { Ty = Context.ShortAccumTy; } else if (Literal.isLong) { Ty = Context.LongAccumTy; } else { Ty = Context.AccumTy; } } else if (Literal.isFract) { if (Literal.isHalf) { Ty = Context.ShortFractTy; } else if (Literal.isLong) { Ty = Context.LongFractTy; } else { Ty = Context.FractTy; } } if (Literal.isUnsigned) Ty = Context.getCorrespondingUnsignedType(Ty); bool isSigned = !Literal.isUnsigned; unsigned scale = Context.getFixedPointScale(Ty); unsigned bit_width = Context.getTypeInfo(Ty).Width; llvm::APInt Val(bit_width, 0, isSigned); bool Overflowed = Literal.GetFixedPointValue(Val, scale); bool ValIsZero = Val.isNullValue() && !Overflowed; auto MaxVal = Context.getFixedPointMax(Ty).getValue(); if (Literal.isFract && Val == MaxVal + 1 && !ValIsZero) // Clause 6.4.4 - The value of a constant shall be in the range of // representable values for its type, with exception for constants of a // fract type with a value of exactly 1; such a constant shall denote // the maximal value for the type. --Val; else if (Val.ugt(MaxVal) || Overflowed) Diag(Tok.getLocation(), diag::err_too_large_for_fixed_point); Res = FixedPointLiteral::CreateFromRawInt(Context, Val, Ty, Tok.getLocation(), scale); } else if (Literal.isFloatingLiteral()) { QualType Ty; if (Literal.isHalf){ if (getOpenCLOptions().isEnabled("cl_khr_fp16")) Ty = Context.HalfTy; else { Diag(Tok.getLocation(), diag::err_half_const_requires_fp16); return ExprError(); } } else if (Literal.isFloat) Ty = Context.FloatTy; else if (Literal.isLong) Ty = Context.LongDoubleTy; else if (Literal.isFloat16) Ty = Context.Float16Ty; else if (Literal.isFloat128) Ty = Context.Float128Ty; else Ty = Context.DoubleTy; Res = BuildFloatingLiteral(*this, Literal, Ty, Tok.getLocation()); if (Ty == Context.DoubleTy) { if (getLangOpts().SinglePrecisionConstants) { const BuiltinType *BTy = Ty->getAs(); if (BTy->getKind() != BuiltinType::Float) { Res = ImpCastExprToType(Res, Context.FloatTy, CK_FloatingCast).get(); } } else if (getLangOpts().OpenCL && !getOpenCLOptions().isEnabled("cl_khr_fp64")) { // Impose single-precision float type when cl_khr_fp64 is not enabled. Diag(Tok.getLocation(), diag::warn_double_const_requires_fp64); Res = ImpCastExprToType(Res, Context.FloatTy, CK_FloatingCast).get(); } } } else if (!Literal.isIntegerLiteral()) { return ExprError(); } else { QualType Ty; // 'long long' is a C99 or C++11 feature. if (!getLangOpts().C99 && Literal.isLongLong) { if (getLangOpts().CPlusPlus) Diag(Tok.getLocation(), getLangOpts().CPlusPlus11 ? diag::warn_cxx98_compat_longlong : diag::ext_cxx11_longlong); else Diag(Tok.getLocation(), diag::ext_c99_longlong); } // Get the value in the widest-possible width. unsigned MaxWidth = Context.getTargetInfo().getIntMaxTWidth(); llvm::APInt ResultVal(MaxWidth, 0); if (Literal.GetIntegerValue(ResultVal)) { // If this value didn't fit into uintmax_t, error and force to ull. Diag(Tok.getLocation(), diag::err_integer_literal_too_large) << /* Unsigned */ 1; Ty = Context.UnsignedLongLongTy; assert(Context.getTypeSize(Ty) == ResultVal.getBitWidth() && "long long is not intmax_t?"); } else { // If this value fits into a ULL, try to figure out what else it fits into // according to the rules of C99 6.4.4.1p5. // Octal, Hexadecimal, and integers with a U suffix are allowed to // be an unsigned int. bool AllowUnsigned = Literal.isUnsigned || Literal.getRadix() != 10; // Check from smallest to largest, picking the smallest type we can. unsigned Width = 0; // Microsoft specific integer suffixes are explicitly sized. if (Literal.MicrosoftInteger) { if (Literal.MicrosoftInteger == 8 && !Literal.isUnsigned) { Width = 8; Ty = Context.CharTy; } else { Width = Literal.MicrosoftInteger; Ty = Context.getIntTypeForBitwidth(Width, /*Signed=*/!Literal.isUnsigned); } } if (Ty.isNull() && !Literal.isLong && !Literal.isLongLong) { // Are int/unsigned possibilities? unsigned IntSize = Context.getTargetInfo().getIntWidth(); // Does it fit in a unsigned int? if (ResultVal.isIntN(IntSize)) { // Does it fit in a signed int? if (!Literal.isUnsigned && ResultVal[IntSize-1] == 0) Ty = Context.IntTy; else if (AllowUnsigned) Ty = Context.UnsignedIntTy; Width = IntSize; } } // Are long/unsigned long possibilities? if (Ty.isNull() && !Literal.isLongLong) { unsigned LongSize = Context.getTargetInfo().getLongWidth(); // Does it fit in a unsigned long? if (ResultVal.isIntN(LongSize)) { // Does it fit in a signed long? if (!Literal.isUnsigned && ResultVal[LongSize-1] == 0) Ty = Context.LongTy; else if (AllowUnsigned) Ty = Context.UnsignedLongTy; // Check according to the rules of C90 6.1.3.2p5. C++03 [lex.icon]p2 // is compatible. else if (!getLangOpts().C99 && !getLangOpts().CPlusPlus11) { const unsigned LongLongSize = Context.getTargetInfo().getLongLongWidth(); Diag(Tok.getLocation(), getLangOpts().CPlusPlus ? Literal.isLong ? diag::warn_old_implicitly_unsigned_long_cxx : /*C++98 UB*/ diag:: ext_old_implicitly_unsigned_long_cxx : diag::warn_old_implicitly_unsigned_long) << (LongLongSize > LongSize ? /*will have type 'long long'*/ 0 : /*will be ill-formed*/ 1); Ty = Context.UnsignedLongTy; } Width = LongSize; } } // Check long long if needed. if (Ty.isNull()) { unsigned LongLongSize = Context.getTargetInfo().getLongLongWidth(); // Does it fit in a unsigned long long? if (ResultVal.isIntN(LongLongSize)) { // Does it fit in a signed long long? // To be compatible with MSVC, hex integer literals ending with the // LL or i64 suffix are always signed in Microsoft mode. if (!Literal.isUnsigned && (ResultVal[LongLongSize-1] == 0 || (getLangOpts().MSVCCompat && Literal.isLongLong))) Ty = Context.LongLongTy; else if (AllowUnsigned) Ty = Context.UnsignedLongLongTy; Width = LongLongSize; } } // If we still couldn't decide a type, we probably have something that // does not fit in a signed long long, but has no U suffix. if (Ty.isNull()) { Diag(Tok.getLocation(), diag::ext_integer_literal_too_large_for_signed); Ty = Context.UnsignedLongLongTy; Width = Context.getTargetInfo().getLongLongWidth(); } if (ResultVal.getBitWidth() != Width) ResultVal = ResultVal.trunc(Width); } Res = IntegerLiteral::Create(Context, ResultVal, Ty, Tok.getLocation()); } // If this is an imaginary literal, create the ImaginaryLiteral wrapper. if (Literal.isImaginary) { Res = new (Context) ImaginaryLiteral(Res, Context.getComplexType(Res->getType())); Diag(Tok.getLocation(), diag::ext_imaginary_constant); } return Res; } ExprResult Sema::ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E) { assert(E && "ActOnParenExpr() missing expr"); return new (Context) ParenExpr(L, R, E); } static bool CheckVecStepTraitOperandType(Sema &S, QualType T, SourceLocation Loc, SourceRange ArgRange) { // [OpenCL 1.1 6.11.12] "The vec_step built-in function takes a built-in // scalar or vector data type argument..." // Every built-in scalar type (OpenCL 1.1 6.1.1) is either an arithmetic // type (C99 6.2.5p18) or void. if (!(T->isArithmeticType() || T->isVoidType() || T->isVectorType())) { S.Diag(Loc, diag::err_vecstep_non_scalar_vector_type) << T << ArgRange; return true; } assert((T->isVoidType() || !T->isIncompleteType()) && "Scalar types should always be complete"); return false; } static bool CheckExtensionTraitOperandType(Sema &S, QualType T, SourceLocation Loc, SourceRange ArgRange, UnaryExprOrTypeTrait TraitKind) { // Invalid types must be hard errors for SFINAE in C++. if (S.LangOpts.CPlusPlus) return true; // C99 6.5.3.4p1: if (T->isFunctionType() && (TraitKind == UETT_SizeOf || TraitKind == UETT_AlignOf || TraitKind == UETT_PreferredAlignOf)) { // sizeof(function)/alignof(function) is allowed as an extension. S.Diag(Loc, diag::ext_sizeof_alignof_function_type) << getTraitSpelling(TraitKind) << ArgRange; return false; } // Allow sizeof(void)/alignof(void) as an extension, unless in OpenCL where // this is an error (OpenCL v1.1 s6.3.k) if (T->isVoidType()) { unsigned DiagID = S.LangOpts.OpenCL ? diag::err_opencl_sizeof_alignof_type : diag::ext_sizeof_alignof_void_type; S.Diag(Loc, DiagID) << getTraitSpelling(TraitKind) << ArgRange; return false; } return true; } static bool CheckObjCTraitOperandConstraints(Sema &S, QualType T, SourceLocation Loc, SourceRange ArgRange, UnaryExprOrTypeTrait TraitKind) { // Reject sizeof(interface) and sizeof(interface) if the // runtime doesn't allow it. if (!S.LangOpts.ObjCRuntime.allowsSizeofAlignof() && T->isObjCObjectType()) { S.Diag(Loc, diag::err_sizeof_nonfragile_interface) << T << (TraitKind == UETT_SizeOf) << ArgRange; return true; } return false; } /// Check whether E is a pointer from a decayed array type (the decayed /// pointer type is equal to T) and emit a warning if it is. static void warnOnSizeofOnArrayDecay(Sema &S, SourceLocation Loc, QualType T, Expr *E) { // Don't warn if the operation changed the type. if (T != E->getType()) return; // Now look for array decays. ImplicitCastExpr *ICE = dyn_cast(E); if (!ICE || ICE->getCastKind() != CK_ArrayToPointerDecay) return; S.Diag(Loc, diag::warn_sizeof_array_decay) << ICE->getSourceRange() << ICE->getType() << ICE->getSubExpr()->getType(); } /// Check the constraints on expression operands to unary type expression /// and type traits. /// /// Completes any types necessary and validates the constraints on the operand /// expression. The logic mostly mirrors the type-based overload, but may modify /// the expression as it completes the type for that expression through template /// instantiation, etc. bool Sema::CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind) { QualType ExprTy = E->getType(); assert(!ExprTy->isReferenceType()); bool IsUnevaluatedOperand = (ExprKind == UETT_SizeOf || ExprKind == UETT_AlignOf || ExprKind == UETT_PreferredAlignOf); if (IsUnevaluatedOperand) { ExprResult Result = CheckUnevaluatedOperand(E); if (Result.isInvalid()) return true; E = Result.get(); } if (ExprKind == UETT_VecStep) return CheckVecStepTraitOperandType(*this, ExprTy, E->getExprLoc(), E->getSourceRange()); // Explicitly list some types as extensions. if (!CheckExtensionTraitOperandType(*this, ExprTy, E->getExprLoc(), E->getSourceRange(), ExprKind)) return false; // 'alignof' applied to an expression only requires the base element type of // the expression to be complete. 'sizeof' requires the expression's type to // be complete (and will attempt to complete it if it's an array of unknown // bound). if (ExprKind == UETT_AlignOf || ExprKind == UETT_PreferredAlignOf) { if (RequireCompleteSizedType( E->getExprLoc(), Context.getBaseElementType(E->getType()), diag::err_sizeof_alignof_incomplete_or_sizeless_type, getTraitSpelling(ExprKind), E->getSourceRange())) return true; } else { if (RequireCompleteSizedExprType( E, diag::err_sizeof_alignof_incomplete_or_sizeless_type, getTraitSpelling(ExprKind), E->getSourceRange())) return true; } // Completing the expression's type may have changed it. ExprTy = E->getType(); assert(!ExprTy->isReferenceType()); if (ExprTy->isFunctionType()) { Diag(E->getExprLoc(), diag::err_sizeof_alignof_function_type) << getTraitSpelling(ExprKind) << E->getSourceRange(); return true; } // The operand for sizeof and alignof is in an unevaluated expression context, // so side effects could result in unintended consequences. if (IsUnevaluatedOperand && !inTemplateInstantiation() && E->HasSideEffects(Context, false)) Diag(E->getExprLoc(), diag::warn_side_effects_unevaluated_context); if (CheckObjCTraitOperandConstraints(*this, ExprTy, E->getExprLoc(), E->getSourceRange(), ExprKind)) return true; if (ExprKind == UETT_SizeOf) { if (DeclRefExpr *DeclRef = dyn_cast(E->IgnoreParens())) { if (ParmVarDecl *PVD = dyn_cast(DeclRef->getFoundDecl())) { QualType OType = PVD->getOriginalType(); QualType Type = PVD->getType(); if (Type->isPointerType() && OType->isArrayType()) { Diag(E->getExprLoc(), diag::warn_sizeof_array_param) << Type << OType; Diag(PVD->getLocation(), diag::note_declared_at); } } } // Warn on "sizeof(array op x)" and "sizeof(x op array)", where the array // decays into a pointer and returns an unintended result. This is most // likely a typo for "sizeof(array) op x". if (BinaryOperator *BO = dyn_cast(E->IgnoreParens())) { warnOnSizeofOnArrayDecay(*this, BO->getOperatorLoc(), BO->getType(), BO->getLHS()); warnOnSizeofOnArrayDecay(*this, BO->getOperatorLoc(), BO->getType(), BO->getRHS()); } } return false; } /// Check the constraints on operands to unary expression and type /// traits. /// /// This will complete any types necessary, and validate the various constraints /// on those operands. /// /// The UsualUnaryConversions() function is *not* called by this routine. /// C99 6.3.2.1p[2-4] all state: /// Except when it is the operand of the sizeof operator ... /// /// C++ [expr.sizeof]p4 /// The lvalue-to-rvalue, array-to-pointer, and function-to-pointer /// standard conversions are not applied to the operand of sizeof. /// /// This policy is followed for all of the unary trait expressions. bool Sema::CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind) { if (ExprType->isDependentType()) return false; // C++ [expr.sizeof]p2: // When applied to a reference or a reference type, the result // is the size of the referenced type. // C++11 [expr.alignof]p3: // When alignof is applied to a reference type, the result // shall be the alignment of the referenced type. if (const ReferenceType *Ref = ExprType->getAs()) ExprType = Ref->getPointeeType(); // C11 6.5.3.4/3, C++11 [expr.alignof]p3: // When alignof or _Alignof is applied to an array type, the result // is the alignment of the element type. if (ExprKind == UETT_AlignOf || ExprKind == UETT_PreferredAlignOf || ExprKind == UETT_OpenMPRequiredSimdAlign) ExprType = Context.getBaseElementType(ExprType); if (ExprKind == UETT_VecStep) return CheckVecStepTraitOperandType(*this, ExprType, OpLoc, ExprRange); // Explicitly list some types as extensions. if (!CheckExtensionTraitOperandType(*this, ExprType, OpLoc, ExprRange, ExprKind)) return false; if (RequireCompleteSizedType( OpLoc, ExprType, diag::err_sizeof_alignof_incomplete_or_sizeless_type, getTraitSpelling(ExprKind), ExprRange)) return true; if (ExprType->isFunctionType()) { Diag(OpLoc, diag::err_sizeof_alignof_function_type) << getTraitSpelling(ExprKind) << ExprRange; return true; } if (CheckObjCTraitOperandConstraints(*this, ExprType, OpLoc, ExprRange, ExprKind)) return true; return false; } static bool CheckAlignOfExpr(Sema &S, Expr *E, UnaryExprOrTypeTrait ExprKind) { // Cannot know anything else if the expression is dependent. if (E->isTypeDependent()) return false; if (E->getObjectKind() == OK_BitField) { S.Diag(E->getExprLoc(), diag::err_sizeof_alignof_typeof_bitfield) << 1 << E->getSourceRange(); return true; } ValueDecl *D = nullptr; Expr *Inner = E->IgnoreParens(); if (DeclRefExpr *DRE = dyn_cast(Inner)) { D = DRE->getDecl(); } else if (MemberExpr *ME = dyn_cast(Inner)) { D = ME->getMemberDecl(); } // If it's a field, require the containing struct to have a // complete definition so that we can compute the layout. // // This can happen in C++11 onwards, either by naming the member // in a way that is not transformed into a member access expression // (in an unevaluated operand, for instance), or by naming the member // in a trailing-return-type. // // For the record, since __alignof__ on expressions is a GCC // extension, GCC seems to permit this but always gives the // nonsensical answer 0. // // We don't really need the layout here --- we could instead just // directly check for all the appropriate alignment-lowing // attributes --- but that would require duplicating a lot of // logic that just isn't worth duplicating for such a marginal // use-case. if (FieldDecl *FD = dyn_cast_or_null(D)) { // Fast path this check, since we at least know the record has a // definition if we can find a member of it. if (!FD->getParent()->isCompleteDefinition()) { S.Diag(E->getExprLoc(), diag::err_alignof_member_of_incomplete_type) << E->getSourceRange(); return true; } // Otherwise, if it's a field, and the field doesn't have // reference type, then it must have a complete type (or be a // flexible array member, which we explicitly want to // white-list anyway), which makes the following checks trivial. if (!FD->getType()->isReferenceType()) return false; } return S.CheckUnaryExprOrTypeTraitOperand(E, ExprKind); } bool Sema::CheckVecStepExpr(Expr *E) { E = E->IgnoreParens(); // Cannot know anything else if the expression is dependent. if (E->isTypeDependent()) return false; return CheckUnaryExprOrTypeTraitOperand(E, UETT_VecStep); } static void captureVariablyModifiedType(ASTContext &Context, QualType T, CapturingScopeInfo *CSI) { assert(T->isVariablyModifiedType()); assert(CSI != nullptr); // We're going to walk down into the type and look for VLA expressions. do { const Type *Ty = T.getTypePtr(); switch (Ty->getTypeClass()) { #define TYPE(Class, Base) #define ABSTRACT_TYPE(Class, Base) #define NON_CANONICAL_TYPE(Class, Base) #define DEPENDENT_TYPE(Class, Base) case Type::Class: #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) #include "clang/AST/TypeNodes.inc" T = QualType(); break; // These types are never variably-modified. case Type::Builtin: case Type::Complex: case Type::Vector: case Type::ExtVector: case Type::ConstantMatrix: case Type::Record: case Type::Enum: case Type::Elaborated: case Type::TemplateSpecialization: case Type::ObjCObject: case Type::ObjCInterface: case Type::ObjCObjectPointer: case Type::ObjCTypeParam: case Type::Pipe: case Type::ExtInt: llvm_unreachable("type class is never variably-modified!"); case Type::Adjusted: T = cast(Ty)->getOriginalType(); break; case Type::Decayed: T = cast(Ty)->getPointeeType(); break; case Type::Pointer: T = cast(Ty)->getPointeeType(); break; case Type::BlockPointer: T = cast(Ty)->getPointeeType(); break; case Type::LValueReference: case Type::RValueReference: T = cast(Ty)->getPointeeType(); break; case Type::MemberPointer: T = cast(Ty)->getPointeeType(); break; case Type::ConstantArray: case Type::IncompleteArray: // Losing element qualification here is fine. T = cast(Ty)->getElementType(); break; case Type::VariableArray: { // Losing element qualification here is fine. const VariableArrayType *VAT = cast(Ty); // Unknown size indication requires no size computation. // Otherwise, evaluate and record it. auto Size = VAT->getSizeExpr(); if (Size && !CSI->isVLATypeCaptured(VAT) && (isa(CSI) || isa(CSI))) CSI->addVLATypeCapture(Size->getExprLoc(), VAT, Context.getSizeType()); T = VAT->getElementType(); break; } case Type::FunctionProto: case Type::FunctionNoProto: T = cast(Ty)->getReturnType(); break; case Type::Paren: case Type::TypeOf: case Type::UnaryTransform: case Type::Attributed: case Type::SubstTemplateTypeParm: - case Type::PackExpansion: case Type::MacroQualified: // Keep walking after single level desugaring. T = T.getSingleStepDesugaredType(Context); break; case Type::Typedef: T = cast(Ty)->desugar(); break; case Type::Decltype: T = cast(Ty)->desugar(); break; case Type::Auto: case Type::DeducedTemplateSpecialization: T = cast(Ty)->getDeducedType(); break; case Type::TypeOfExpr: T = cast(Ty)->getUnderlyingExpr()->getType(); break; case Type::Atomic: T = cast(Ty)->getValueType(); break; } } while (!T.isNull() && T->isVariablyModifiedType()); } /// Build a sizeof or alignof expression given a type operand. ExprResult Sema::CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R) { if (!TInfo) return ExprError(); QualType T = TInfo->getType(); if (!T->isDependentType() && CheckUnaryExprOrTypeTraitOperand(T, OpLoc, R, ExprKind)) return ExprError(); if (T->isVariablyModifiedType() && FunctionScopes.size() > 1) { if (auto *TT = T->getAs()) { for (auto I = FunctionScopes.rbegin(), E = std::prev(FunctionScopes.rend()); I != E; ++I) { auto *CSI = dyn_cast(*I); if (CSI == nullptr) break; DeclContext *DC = nullptr; if (auto *LSI = dyn_cast(CSI)) DC = LSI->CallOperator; else if (auto *CRSI = dyn_cast(CSI)) DC = CRSI->TheCapturedDecl; else if (auto *BSI = dyn_cast(CSI)) DC = BSI->TheDecl; if (DC) { if (DC->containsDecl(TT->getDecl())) break; captureVariablyModifiedType(Context, T, CSI); } } } } // C99 6.5.3.4p4: the type (an unsigned integer type) is size_t. return new (Context) UnaryExprOrTypeTraitExpr( ExprKind, TInfo, Context.getSizeType(), OpLoc, R.getEnd()); } /// Build a sizeof or alignof expression given an expression /// operand. ExprResult Sema::CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind) { ExprResult PE = CheckPlaceholderExpr(E); if (PE.isInvalid()) return ExprError(); E = PE.get(); // Verify that the operand is valid. bool isInvalid = false; if (E->isTypeDependent()) { // Delay type-checking for type-dependent expressions. } else if (ExprKind == UETT_AlignOf || ExprKind == UETT_PreferredAlignOf) { isInvalid = CheckAlignOfExpr(*this, E, ExprKind); } else if (ExprKind == UETT_VecStep) { isInvalid = CheckVecStepExpr(E); } else if (ExprKind == UETT_OpenMPRequiredSimdAlign) { Diag(E->getExprLoc(), diag::err_openmp_default_simd_align_expr); isInvalid = true; } else if (E->refersToBitField()) { // C99 6.5.3.4p1. Diag(E->getExprLoc(), diag::err_sizeof_alignof_typeof_bitfield) << 0; isInvalid = true; } else { isInvalid = CheckUnaryExprOrTypeTraitOperand(E, UETT_SizeOf); } if (isInvalid) return ExprError(); if (ExprKind == UETT_SizeOf && E->getType()->isVariableArrayType()) { PE = TransformToPotentiallyEvaluated(E); if (PE.isInvalid()) return ExprError(); E = PE.get(); } // C99 6.5.3.4p4: the type (an unsigned integer type) is size_t. return new (Context) UnaryExprOrTypeTraitExpr( ExprKind, E, Context.getSizeType(), OpLoc, E->getSourceRange().getEnd()); } /// ActOnUnaryExprOrTypeTraitExpr - Handle @c sizeof(type) and @c sizeof @c /// expr and the same for @c alignof and @c __alignof /// Note that the ArgRange is invalid if isType is false. ExprResult Sema::ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange) { // If error parsing type, ignore. if (!TyOrEx) return ExprError(); if (IsType) { TypeSourceInfo *TInfo; (void) GetTypeFromParser(ParsedType::getFromOpaquePtr(TyOrEx), &TInfo); return CreateUnaryExprOrTypeTraitExpr(TInfo, OpLoc, ExprKind, ArgRange); } Expr *ArgEx = (Expr *)TyOrEx; ExprResult Result = CreateUnaryExprOrTypeTraitExpr(ArgEx, OpLoc, ExprKind); return Result; } static QualType CheckRealImagOperand(Sema &S, ExprResult &V, SourceLocation Loc, bool IsReal) { if (V.get()->isTypeDependent()) return S.Context.DependentTy; // _Real and _Imag are only l-values for normal l-values. if (V.get()->getObjectKind() != OK_Ordinary) { V = S.DefaultLvalueConversion(V.get()); if (V.isInvalid()) return QualType(); } // These operators return the element type of a complex type. if (const ComplexType *CT = V.get()->getType()->getAs()) return CT->getElementType(); // Otherwise they pass through real integer and floating point types here. if (V.get()->getType()->isArithmeticType()) return V.get()->getType(); // Test for placeholders. ExprResult PR = S.CheckPlaceholderExpr(V.get()); if (PR.isInvalid()) return QualType(); if (PR.get() != V.get()) { V = PR; return CheckRealImagOperand(S, V, Loc, IsReal); } // Reject anything else. S.Diag(Loc, diag::err_realimag_invalid_type) << V.get()->getType() << (IsReal ? "__real" : "__imag"); return QualType(); } ExprResult Sema::ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input) { UnaryOperatorKind Opc; switch (Kind) { default: llvm_unreachable("Unknown unary op!"); case tok::plusplus: Opc = UO_PostInc; break; case tok::minusminus: Opc = UO_PostDec; break; } // Since this might is a postfix expression, get rid of ParenListExprs. ExprResult Result = MaybeConvertParenListExprToParenExpr(S, Input); if (Result.isInvalid()) return ExprError(); Input = Result.get(); return BuildUnaryOp(S, OpLoc, Opc, Input); } /// Diagnose if arithmetic on the given ObjC pointer is illegal. /// /// \return true on error static bool checkArithmeticOnObjCPointer(Sema &S, SourceLocation opLoc, Expr *op) { assert(op->getType()->isObjCObjectPointerType()); if (S.LangOpts.ObjCRuntime.allowsPointerArithmetic() && !S.LangOpts.ObjCSubscriptingLegacyRuntime) return false; S.Diag(opLoc, diag::err_arithmetic_nonfragile_interface) << op->getType()->castAs()->getPointeeType() << op->getSourceRange(); return true; } static bool isMSPropertySubscriptExpr(Sema &S, Expr *Base) { auto *BaseNoParens = Base->IgnoreParens(); if (auto *MSProp = dyn_cast(BaseNoParens)) return MSProp->getPropertyDecl()->getType()->isArrayType(); return isa(BaseNoParens); } ExprResult Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base, SourceLocation lbLoc, Expr *idx, SourceLocation rbLoc) { if (base && !base->getType().isNull() && base->getType()->isSpecificPlaceholderType(BuiltinType::OMPArraySection)) return ActOnOMPArraySectionExpr(base, lbLoc, idx, SourceLocation(), SourceLocation(), /*Length*/ nullptr, /*Stride=*/nullptr, rbLoc); // Since this might be a postfix expression, get rid of ParenListExprs. if (isa(base)) { ExprResult result = MaybeConvertParenListExprToParenExpr(S, base); if (result.isInvalid()) return ExprError(); base = result.get(); } // Check if base and idx form a MatrixSubscriptExpr. // // Helper to check for comma expressions, which are not allowed as indices for // matrix subscript expressions. auto CheckAndReportCommaError = [this, base, rbLoc](Expr *E) { if (isa(E) && cast(E)->isCommaOp()) { Diag(E->getExprLoc(), diag::err_matrix_subscript_comma) << SourceRange(base->getBeginLoc(), rbLoc); return true; } return false; }; // The matrix subscript operator ([][])is considered a single operator. // Separating the index expressions by parenthesis is not allowed. if (base->getType()->isSpecificPlaceholderType( BuiltinType::IncompleteMatrixIdx) && !isa(base)) { Diag(base->getExprLoc(), diag::err_matrix_separate_incomplete_index) << SourceRange(base->getBeginLoc(), rbLoc); return ExprError(); } // If the base is either a MatrixSubscriptExpr or a matrix type, try to create // a new MatrixSubscriptExpr. auto *matSubscriptE = dyn_cast(base); if (matSubscriptE) { if (CheckAndReportCommaError(idx)) return ExprError(); assert(matSubscriptE->isIncomplete() && "base has to be an incomplete matrix subscript"); return CreateBuiltinMatrixSubscriptExpr( matSubscriptE->getBase(), matSubscriptE->getRowIdx(), idx, rbLoc); } Expr *matrixBase = base; bool IsMSPropertySubscript = isMSPropertySubscriptExpr(*this, base); if (!IsMSPropertySubscript) { ExprResult result = CheckPlaceholderExpr(base); if (!result.isInvalid()) matrixBase = result.get(); } if (matrixBase->getType()->isMatrixType()) { if (CheckAndReportCommaError(idx)) return ExprError(); return CreateBuiltinMatrixSubscriptExpr(matrixBase, idx, nullptr, rbLoc); } // A comma-expression as the index is deprecated in C++2a onwards. if (getLangOpts().CPlusPlus20 && ((isa(idx) && cast(idx)->isCommaOp()) || (isa(idx) && cast(idx)->getOperator() == OO_Comma))) { Diag(idx->getExprLoc(), diag::warn_deprecated_comma_subscript) << SourceRange(base->getBeginLoc(), rbLoc); } // Handle any non-overload placeholder types in the base and index // expressions. We can't handle overloads here because the other // operand might be an overloadable type, in which case the overload // resolution for the operator overload should get the first crack // at the overload. if (base->getType()->isNonOverloadPlaceholderType()) { IsMSPropertySubscript = isMSPropertySubscriptExpr(*this, base); if (!IsMSPropertySubscript) { ExprResult result = CheckPlaceholderExpr(base); if (result.isInvalid()) return ExprError(); base = result.get(); } } if (idx->getType()->isNonOverloadPlaceholderType()) { ExprResult result = CheckPlaceholderExpr(idx); if (result.isInvalid()) return ExprError(); idx = result.get(); } // Build an unanalyzed expression if either operand is type-dependent. if (getLangOpts().CPlusPlus && (base->isTypeDependent() || idx->isTypeDependent())) { return new (Context) ArraySubscriptExpr(base, idx, Context.DependentTy, VK_LValue, OK_Ordinary, rbLoc); } // MSDN, property (C++) // https://msdn.microsoft.com/en-us/library/yhfk0thd(v=vs.120).aspx // This attribute can also be used in the declaration of an empty array in a // class or structure definition. For example: // __declspec(property(get=GetX, put=PutX)) int x[]; // The above statement indicates that x[] can be used with one or more array // indices. In this case, i=p->x[a][b] will be turned into i=p->GetX(a, b), // and p->x[a][b] = i will be turned into p->PutX(a, b, i); if (IsMSPropertySubscript) { // Build MS property subscript expression if base is MS property reference // or MS property subscript. return new (Context) MSPropertySubscriptExpr( base, idx, Context.PseudoObjectTy, VK_LValue, OK_Ordinary, rbLoc); } // Use C++ overloaded-operator rules if either operand has record // type. The spec says to do this if either type is *overloadable*, // but enum types can't declare subscript operators or conversion // operators, so there's nothing interesting for overload resolution // to do if there aren't any record types involved. // // ObjC pointers have their own subscripting logic that is not tied // to overload resolution and so should not take this path. if (getLangOpts().CPlusPlus && (base->getType()->isRecordType() || (!base->getType()->isObjCObjectPointerType() && idx->getType()->isRecordType()))) { return CreateOverloadedArraySubscriptExpr(lbLoc, rbLoc, base, idx); } ExprResult Res = CreateBuiltinArraySubscriptExpr(base, lbLoc, idx, rbLoc); if (!Res.isInvalid() && isa(Res.get())) CheckSubscriptAccessOfNoDeref(cast(Res.get())); return Res; } ExprResult Sema::tryConvertExprToType(Expr *E, QualType Ty) { InitializedEntity Entity = InitializedEntity::InitializeTemporary(Ty); InitializationKind Kind = InitializationKind::CreateCopy(E->getBeginLoc(), SourceLocation()); InitializationSequence InitSeq(*this, Entity, Kind, E); return InitSeq.Perform(*this, Entity, Kind, E); } ExprResult Sema::CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx, Expr *ColumnIdx, SourceLocation RBLoc) { ExprResult BaseR = CheckPlaceholderExpr(Base); if (BaseR.isInvalid()) return BaseR; Base = BaseR.get(); ExprResult RowR = CheckPlaceholderExpr(RowIdx); if (RowR.isInvalid()) return RowR; RowIdx = RowR.get(); if (!ColumnIdx) return new (Context) MatrixSubscriptExpr( Base, RowIdx, ColumnIdx, Context.IncompleteMatrixIdxTy, RBLoc); // Build an unanalyzed expression if any of the operands is type-dependent. if (Base->isTypeDependent() || RowIdx->isTypeDependent() || ColumnIdx->isTypeDependent()) return new (Context) MatrixSubscriptExpr(Base, RowIdx, ColumnIdx, Context.DependentTy, RBLoc); ExprResult ColumnR = CheckPlaceholderExpr(ColumnIdx); if (ColumnR.isInvalid()) return ColumnR; ColumnIdx = ColumnR.get(); // Check that IndexExpr is an integer expression. If it is a constant // expression, check that it is less than Dim (= the number of elements in the // corresponding dimension). auto IsIndexValid = [&](Expr *IndexExpr, unsigned Dim, bool IsColumnIdx) -> Expr * { if (!IndexExpr->getType()->isIntegerType() && !IndexExpr->isTypeDependent()) { Diag(IndexExpr->getBeginLoc(), diag::err_matrix_index_not_integer) << IsColumnIdx; return nullptr; } llvm::APSInt Idx; if (IndexExpr->isIntegerConstantExpr(Idx, Context) && (Idx < 0 || Idx >= Dim)) { Diag(IndexExpr->getBeginLoc(), diag::err_matrix_index_outside_range) << IsColumnIdx << Dim; return nullptr; } ExprResult ConvExpr = tryConvertExprToType(IndexExpr, Context.getSizeType()); assert(!ConvExpr.isInvalid() && "should be able to convert any integer type to size type"); return ConvExpr.get(); }; auto *MTy = Base->getType()->getAs(); RowIdx = IsIndexValid(RowIdx, MTy->getNumRows(), false); ColumnIdx = IsIndexValid(ColumnIdx, MTy->getNumColumns(), true); if (!RowIdx || !ColumnIdx) return ExprError(); return new (Context) MatrixSubscriptExpr(Base, RowIdx, ColumnIdx, MTy->getElementType(), RBLoc); } void Sema::CheckAddressOfNoDeref(const Expr *E) { ExpressionEvaluationContextRecord &LastRecord = ExprEvalContexts.back(); const Expr *StrippedExpr = E->IgnoreParenImpCasts(); // For expressions like `&(*s).b`, the base is recorded and what should be // checked. const MemberExpr *Member = nullptr; while ((Member = dyn_cast(StrippedExpr)) && !Member->isArrow()) StrippedExpr = Member->getBase()->IgnoreParenImpCasts(); LastRecord.PossibleDerefs.erase(StrippedExpr); } void Sema::CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E) { QualType ResultTy = E->getType(); ExpressionEvaluationContextRecord &LastRecord = ExprEvalContexts.back(); // Bail if the element is an array since it is not memory access. if (isa(ResultTy)) return; if (ResultTy->hasAttr(attr::NoDeref)) { LastRecord.PossibleDerefs.insert(E); return; } // Check if the base type is a pointer to a member access of a struct // marked with noderef. const Expr *Base = E->getBase(); QualType BaseTy = Base->getType(); if (!(isa(BaseTy) || isa(BaseTy))) // Not a pointer access return; const MemberExpr *Member = nullptr; while ((Member = dyn_cast(Base->IgnoreParenCasts())) && Member->isArrow()) Base = Member->getBase(); if (const auto *Ptr = dyn_cast(Base->getType())) { if (Ptr->getPointeeType()->hasAttr(attr::NoDeref)) LastRecord.PossibleDerefs.insert(E); } } ExprResult Sema::ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLocFirst, SourceLocation ColonLocSecond, Expr *Length, Expr *Stride, SourceLocation RBLoc) { if (Base->getType()->isPlaceholderType() && !Base->getType()->isSpecificPlaceholderType( BuiltinType::OMPArraySection)) { ExprResult Result = CheckPlaceholderExpr(Base); if (Result.isInvalid()) return ExprError(); Base = Result.get(); } if (LowerBound && LowerBound->getType()->isNonOverloadPlaceholderType()) { ExprResult Result = CheckPlaceholderExpr(LowerBound); if (Result.isInvalid()) return ExprError(); Result = DefaultLvalueConversion(Result.get()); if (Result.isInvalid()) return ExprError(); LowerBound = Result.get(); } if (Length && Length->getType()->isNonOverloadPlaceholderType()) { ExprResult Result = CheckPlaceholderExpr(Length); if (Result.isInvalid()) return ExprError(); Result = DefaultLvalueConversion(Result.get()); if (Result.isInvalid()) return ExprError(); Length = Result.get(); } if (Stride && Stride->getType()->isNonOverloadPlaceholderType()) { ExprResult Result = CheckPlaceholderExpr(Stride); if (Result.isInvalid()) return ExprError(); Result = DefaultLvalueConversion(Result.get()); if (Result.isInvalid()) return ExprError(); Stride = Result.get(); } // Build an unanalyzed expression if either operand is type-dependent. if (Base->isTypeDependent() || (LowerBound && (LowerBound->isTypeDependent() || LowerBound->isValueDependent())) || (Length && (Length->isTypeDependent() || Length->isValueDependent())) || (Stride && (Stride->isTypeDependent() || Stride->isValueDependent()))) { return new (Context) OMPArraySectionExpr( Base, LowerBound, Length, Stride, Context.DependentTy, VK_LValue, OK_Ordinary, ColonLocFirst, ColonLocSecond, RBLoc); } // Perform default conversions. QualType OriginalTy = OMPArraySectionExpr::getBaseOriginalType(Base); QualType ResultTy; if (OriginalTy->isAnyPointerType()) { ResultTy = OriginalTy->getPointeeType(); } else if (OriginalTy->isArrayType()) { ResultTy = OriginalTy->getAsArrayTypeUnsafe()->getElementType(); } else { return ExprError( Diag(Base->getExprLoc(), diag::err_omp_typecheck_section_value) << Base->getSourceRange()); } // C99 6.5.2.1p1 if (LowerBound) { auto Res = PerformOpenMPImplicitIntegerConversion(LowerBound->getExprLoc(), LowerBound); if (Res.isInvalid()) return ExprError(Diag(LowerBound->getExprLoc(), diag::err_omp_typecheck_section_not_integer) << 0 << LowerBound->getSourceRange()); LowerBound = Res.get(); if (LowerBound->getType()->isSpecificBuiltinType(BuiltinType::Char_S) || LowerBound->getType()->isSpecificBuiltinType(BuiltinType::Char_U)) Diag(LowerBound->getExprLoc(), diag::warn_omp_section_is_char) << 0 << LowerBound->getSourceRange(); } if (Length) { auto Res = PerformOpenMPImplicitIntegerConversion(Length->getExprLoc(), Length); if (Res.isInvalid()) return ExprError(Diag(Length->getExprLoc(), diag::err_omp_typecheck_section_not_integer) << 1 << Length->getSourceRange()); Length = Res.get(); if (Length->getType()->isSpecificBuiltinType(BuiltinType::Char_S) || Length->getType()->isSpecificBuiltinType(BuiltinType::Char_U)) Diag(Length->getExprLoc(), diag::warn_omp_section_is_char) << 1 << Length->getSourceRange(); } if (Stride) { ExprResult Res = PerformOpenMPImplicitIntegerConversion(Stride->getExprLoc(), Stride); if (Res.isInvalid()) return ExprError(Diag(Stride->getExprLoc(), diag::err_omp_typecheck_section_not_integer) << 1 << Stride->getSourceRange()); Stride = Res.get(); if (Stride->getType()->isSpecificBuiltinType(BuiltinType::Char_S) || Stride->getType()->isSpecificBuiltinType(BuiltinType::Char_U)) Diag(Stride->getExprLoc(), diag::warn_omp_section_is_char) << 1 << Stride->getSourceRange(); } // C99 6.5.2.1p1: "shall have type "pointer to *object* type". Similarly, // C++ [expr.sub]p1: The type "T" shall be a completely-defined object // type. Note that functions are not objects, and that (in C99 parlance) // incomplete types are not object types. if (ResultTy->isFunctionType()) { Diag(Base->getExprLoc(), diag::err_omp_section_function_type) << ResultTy << Base->getSourceRange(); return ExprError(); } if (RequireCompleteType(Base->getExprLoc(), ResultTy, diag::err_omp_section_incomplete_type, Base)) return ExprError(); if (LowerBound && !OriginalTy->isAnyPointerType()) { Expr::EvalResult Result; if (LowerBound->EvaluateAsInt(Result, Context)) { // OpenMP 5.0, [2.1.5 Array Sections] // The array section must be a subset of the original array. llvm::APSInt LowerBoundValue = Result.Val.getInt(); if (LowerBoundValue.isNegative()) { Diag(LowerBound->getExprLoc(), diag::err_omp_section_not_subset_of_array) << LowerBound->getSourceRange(); return ExprError(); } } } if (Length) { Expr::EvalResult Result; if (Length->EvaluateAsInt(Result, Context)) { // OpenMP 5.0, [2.1.5 Array Sections] // The length must evaluate to non-negative integers. llvm::APSInt LengthValue = Result.Val.getInt(); if (LengthValue.isNegative()) { Diag(Length->getExprLoc(), diag::err_omp_section_length_negative) << LengthValue.toString(/*Radix=*/10, /*Signed=*/true) << Length->getSourceRange(); return ExprError(); } } } else if (ColonLocFirst.isValid() && (OriginalTy.isNull() || (!OriginalTy->isConstantArrayType() && !OriginalTy->isVariableArrayType()))) { // OpenMP 5.0, [2.1.5 Array Sections] // When the size of the array dimension is not known, the length must be // specified explicitly. Diag(ColonLocFirst, diag::err_omp_section_length_undefined) << (!OriginalTy.isNull() && OriginalTy->isArrayType()); return ExprError(); } if (Stride) { Expr::EvalResult Result; if (Stride->EvaluateAsInt(Result, Context)) { // OpenMP 5.0, [2.1.5 Array Sections] // The stride must evaluate to a positive integer. llvm::APSInt StrideValue = Result.Val.getInt(); if (!StrideValue.isStrictlyPositive()) { Diag(Stride->getExprLoc(), diag::err_omp_section_stride_non_positive) << StrideValue.toString(/*Radix=*/10, /*Signed=*/true) << Stride->getSourceRange(); return ExprError(); } } } if (!Base->getType()->isSpecificPlaceholderType( BuiltinType::OMPArraySection)) { ExprResult Result = DefaultFunctionArrayLvalueConversion(Base); if (Result.isInvalid()) return ExprError(); Base = Result.get(); } return new (Context) OMPArraySectionExpr( Base, LowerBound, Length, Stride, Context.OMPArraySectionTy, VK_LValue, OK_Ordinary, ColonLocFirst, ColonLocSecond, RBLoc); } ExprResult Sema::ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc, SourceLocation RParenLoc, ArrayRef Dims, ArrayRef Brackets) { if (Base->getType()->isPlaceholderType()) { ExprResult Result = CheckPlaceholderExpr(Base); if (Result.isInvalid()) return ExprError(); Result = DefaultLvalueConversion(Result.get()); if (Result.isInvalid()) return ExprError(); Base = Result.get(); } QualType BaseTy = Base->getType(); // Delay analysis of the types/expressions if instantiation/specialization is // required. if (!BaseTy->isPointerType() && Base->isTypeDependent()) return OMPArrayShapingExpr::Create(Context, Context.DependentTy, Base, LParenLoc, RParenLoc, Dims, Brackets); if (!BaseTy->isPointerType() || (!Base->isTypeDependent() && BaseTy->getPointeeType()->isIncompleteType())) return ExprError(Diag(Base->getExprLoc(), diag::err_omp_non_pointer_type_array_shaping_base) << Base->getSourceRange()); SmallVector NewDims; bool ErrorFound = false; for (Expr *Dim : Dims) { if (Dim->getType()->isPlaceholderType()) { ExprResult Result = CheckPlaceholderExpr(Dim); if (Result.isInvalid()) { ErrorFound = true; continue; } Result = DefaultLvalueConversion(Result.get()); if (Result.isInvalid()) { ErrorFound = true; continue; } Dim = Result.get(); } if (!Dim->isTypeDependent()) { ExprResult Result = PerformOpenMPImplicitIntegerConversion(Dim->getExprLoc(), Dim); if (Result.isInvalid()) { ErrorFound = true; Diag(Dim->getExprLoc(), diag::err_omp_typecheck_shaping_not_integer) << Dim->getSourceRange(); continue; } Dim = Result.get(); Expr::EvalResult EvResult; if (!Dim->isValueDependent() && Dim->EvaluateAsInt(EvResult, Context)) { // OpenMP 5.0, [2.1.4 Array Shaping] // Each si is an integral type expression that must evaluate to a // positive integer. llvm::APSInt Value = EvResult.Val.getInt(); if (!Value.isStrictlyPositive()) { Diag(Dim->getExprLoc(), diag::err_omp_shaping_dimension_not_positive) << Value.toString(/*Radix=*/10, /*Signed=*/true) << Dim->getSourceRange(); ErrorFound = true; continue; } } } NewDims.push_back(Dim); } if (ErrorFound) return ExprError(); return OMPArrayShapingExpr::Create(Context, Context.OMPArrayShapingTy, Base, LParenLoc, RParenLoc, NewDims, Brackets); } ExprResult Sema::ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc, SourceLocation LLoc, SourceLocation RLoc, ArrayRef Data) { SmallVector ID; bool IsCorrect = true; for (const OMPIteratorData &D : Data) { TypeSourceInfo *TInfo = nullptr; SourceLocation StartLoc; QualType DeclTy; if (!D.Type.getAsOpaquePtr()) { // OpenMP 5.0, 2.1.6 Iterators // In an iterator-specifier, if the iterator-type is not specified then // the type of that iterator is of int type. DeclTy = Context.IntTy; StartLoc = D.DeclIdentLoc; } else { DeclTy = GetTypeFromParser(D.Type, &TInfo); StartLoc = TInfo->getTypeLoc().getBeginLoc(); } bool IsDeclTyDependent = DeclTy->isDependentType() || DeclTy->containsUnexpandedParameterPack() || DeclTy->isInstantiationDependentType(); if (!IsDeclTyDependent) { if (!DeclTy->isIntegralType(Context) && !DeclTy->isAnyPointerType()) { // OpenMP 5.0, 2.1.6 Iterators, Restrictions, C/C++ // The iterator-type must be an integral or pointer type. Diag(StartLoc, diag::err_omp_iterator_not_integral_or_pointer) << DeclTy; IsCorrect = false; continue; } if (DeclTy.isConstant(Context)) { // OpenMP 5.0, 2.1.6 Iterators, Restrictions, C/C++ // The iterator-type must not be const qualified. Diag(StartLoc, diag::err_omp_iterator_not_integral_or_pointer) << DeclTy; IsCorrect = false; continue; } } // Iterator declaration. assert(D.DeclIdent && "Identifier expected."); // Always try to create iterator declarator to avoid extra error messages // about unknown declarations use. auto *VD = VarDecl::Create(Context, CurContext, StartLoc, D.DeclIdentLoc, D.DeclIdent, DeclTy, TInfo, SC_None); VD->setImplicit(); if (S) { // Check for conflicting previous declaration. DeclarationNameInfo NameInfo(VD->getDeclName(), D.DeclIdentLoc); LookupResult Previous(*this, NameInfo, LookupOrdinaryName, ForVisibleRedeclaration); Previous.suppressDiagnostics(); LookupName(Previous, S); FilterLookupForScope(Previous, CurContext, S, /*ConsiderLinkage=*/false, /*AllowInlineNamespace=*/false); if (!Previous.empty()) { NamedDecl *Old = Previous.getRepresentativeDecl(); Diag(D.DeclIdentLoc, diag::err_redefinition) << VD->getDeclName(); Diag(Old->getLocation(), diag::note_previous_definition); } else { PushOnScopeChains(VD, S); } } else { CurContext->addDecl(VD); } Expr *Begin = D.Range.Begin; if (!IsDeclTyDependent && Begin && !Begin->isTypeDependent()) { ExprResult BeginRes = PerformImplicitConversion(Begin, DeclTy, AA_Converting); Begin = BeginRes.get(); } Expr *End = D.Range.End; if (!IsDeclTyDependent && End && !End->isTypeDependent()) { ExprResult EndRes = PerformImplicitConversion(End, DeclTy, AA_Converting); End = EndRes.get(); } Expr *Step = D.Range.Step; if (!IsDeclTyDependent && Step && !Step->isTypeDependent()) { if (!Step->getType()->isIntegralType(Context)) { Diag(Step->getExprLoc(), diag::err_omp_iterator_step_not_integral) << Step << Step->getSourceRange(); IsCorrect = false; continue; } llvm::APSInt Result; bool IsConstant = Step->isIntegerConstantExpr(Result, Context); // OpenMP 5.0, 2.1.6 Iterators, Restrictions // If the step expression of a range-specification equals zero, the // behavior is unspecified. if (IsConstant && Result.isNullValue()) { Diag(Step->getExprLoc(), diag::err_omp_iterator_step_constant_zero) << Step << Step->getSourceRange(); IsCorrect = false; continue; } } if (!Begin || !End || !IsCorrect) { IsCorrect = false; continue; } OMPIteratorExpr::IteratorDefinition &IDElem = ID.emplace_back(); IDElem.IteratorDecl = VD; IDElem.AssignmentLoc = D.AssignLoc; IDElem.Range.Begin = Begin; IDElem.Range.End = End; IDElem.Range.Step = Step; IDElem.ColonLoc = D.ColonLoc; IDElem.SecondColonLoc = D.SecColonLoc; } if (!IsCorrect) { // Invalidate all created iterator declarations if error is found. for (const OMPIteratorExpr::IteratorDefinition &D : ID) { if (Decl *ID = D.IteratorDecl) ID->setInvalidDecl(); } return ExprError(); } SmallVector Helpers; if (!CurContext->isDependentContext()) { // Build number of ityeration for each iteration range. // Ni = ((Stepi > 0) ? ((Endi + Stepi -1 - Begini)/Stepi) : // ((Begini-Stepi-1-Endi) / -Stepi); for (OMPIteratorExpr::IteratorDefinition &D : ID) { // (Endi - Begini) ExprResult Res = CreateBuiltinBinOp(D.AssignmentLoc, BO_Sub, D.Range.End, D.Range.Begin); if(!Res.isUsable()) { IsCorrect = false; continue; } ExprResult St, St1; if (D.Range.Step) { St = D.Range.Step; // (Endi - Begini) + Stepi Res = CreateBuiltinBinOp(D.AssignmentLoc, BO_Add, Res.get(), St.get()); if (!Res.isUsable()) { IsCorrect = false; continue; } // (Endi - Begini) + Stepi - 1 Res = CreateBuiltinBinOp(D.AssignmentLoc, BO_Sub, Res.get(), ActOnIntegerConstant(D.AssignmentLoc, 1).get()); if (!Res.isUsable()) { IsCorrect = false; continue; } // ((Endi - Begini) + Stepi - 1) / Stepi Res = CreateBuiltinBinOp(D.AssignmentLoc, BO_Div, Res.get(), St.get()); if (!Res.isUsable()) { IsCorrect = false; continue; } St1 = CreateBuiltinUnaryOp(D.AssignmentLoc, UO_Minus, D.Range.Step); // (Begini - Endi) ExprResult Res1 = CreateBuiltinBinOp(D.AssignmentLoc, BO_Sub, D.Range.Begin, D.Range.End); if (!Res1.isUsable()) { IsCorrect = false; continue; } // (Begini - Endi) - Stepi Res1 = CreateBuiltinBinOp(D.AssignmentLoc, BO_Add, Res1.get(), St1.get()); if (!Res1.isUsable()) { IsCorrect = false; continue; } // (Begini - Endi) - Stepi - 1 Res1 = CreateBuiltinBinOp(D.AssignmentLoc, BO_Sub, Res1.get(), ActOnIntegerConstant(D.AssignmentLoc, 1).get()); if (!Res1.isUsable()) { IsCorrect = false; continue; } // ((Begini - Endi) - Stepi - 1) / (-Stepi) Res1 = CreateBuiltinBinOp(D.AssignmentLoc, BO_Div, Res1.get(), St1.get()); if (!Res1.isUsable()) { IsCorrect = false; continue; } // Stepi > 0. ExprResult CmpRes = CreateBuiltinBinOp(D.AssignmentLoc, BO_GT, D.Range.Step, ActOnIntegerConstant(D.AssignmentLoc, 0).get()); if (!CmpRes.isUsable()) { IsCorrect = false; continue; } Res = ActOnConditionalOp(D.AssignmentLoc, D.AssignmentLoc, CmpRes.get(), Res.get(), Res1.get()); if (!Res.isUsable()) { IsCorrect = false; continue; } } Res = ActOnFinishFullExpr(Res.get(), /*DiscardedValue=*/false); if (!Res.isUsable()) { IsCorrect = false; continue; } // Build counter update. // Build counter. auto *CounterVD = VarDecl::Create(Context, CurContext, D.IteratorDecl->getBeginLoc(), D.IteratorDecl->getBeginLoc(), nullptr, Res.get()->getType(), nullptr, SC_None); CounterVD->setImplicit(); ExprResult RefRes = BuildDeclRefExpr(CounterVD, CounterVD->getType(), VK_LValue, D.IteratorDecl->getBeginLoc()); // Build counter update. // I = Begini + counter * Stepi; ExprResult UpdateRes; if (D.Range.Step) { UpdateRes = CreateBuiltinBinOp( D.AssignmentLoc, BO_Mul, DefaultLvalueConversion(RefRes.get()).get(), St.get()); } else { UpdateRes = DefaultLvalueConversion(RefRes.get()); } if (!UpdateRes.isUsable()) { IsCorrect = false; continue; } UpdateRes = CreateBuiltinBinOp(D.AssignmentLoc, BO_Add, D.Range.Begin, UpdateRes.get()); if (!UpdateRes.isUsable()) { IsCorrect = false; continue; } ExprResult VDRes = BuildDeclRefExpr(cast(D.IteratorDecl), cast(D.IteratorDecl)->getType(), VK_LValue, D.IteratorDecl->getBeginLoc()); UpdateRes = CreateBuiltinBinOp(D.AssignmentLoc, BO_Assign, VDRes.get(), UpdateRes.get()); if (!UpdateRes.isUsable()) { IsCorrect = false; continue; } UpdateRes = ActOnFinishFullExpr(UpdateRes.get(), /*DiscardedValue=*/true); if (!UpdateRes.isUsable()) { IsCorrect = false; continue; } ExprResult CounterUpdateRes = CreateBuiltinUnaryOp(D.AssignmentLoc, UO_PreInc, RefRes.get()); if (!CounterUpdateRes.isUsable()) { IsCorrect = false; continue; } CounterUpdateRes = ActOnFinishFullExpr(CounterUpdateRes.get(), /*DiscardedValue=*/true); if (!CounterUpdateRes.isUsable()) { IsCorrect = false; continue; } OMPIteratorHelperData &HD = Helpers.emplace_back(); HD.CounterVD = CounterVD; HD.Upper = Res.get(); HD.Update = UpdateRes.get(); HD.CounterUpdate = CounterUpdateRes.get(); } } else { Helpers.assign(ID.size(), {}); } if (!IsCorrect) { // Invalidate all created iterator declarations if error is found. for (const OMPIteratorExpr::IteratorDefinition &D : ID) { if (Decl *ID = D.IteratorDecl) ID->setInvalidDecl(); } return ExprError(); } return OMPIteratorExpr::Create(Context, Context.OMPIteratorTy, IteratorKwLoc, LLoc, RLoc, ID, Helpers); } ExprResult Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc) { Expr *LHSExp = Base; Expr *RHSExp = Idx; ExprValueKind VK = VK_LValue; ExprObjectKind OK = OK_Ordinary; // Per C++ core issue 1213, the result is an xvalue if either operand is // a non-lvalue array, and an lvalue otherwise. if (getLangOpts().CPlusPlus11) { for (auto *Op : {LHSExp, RHSExp}) { Op = Op->IgnoreImplicit(); if (Op->getType()->isArrayType() && !Op->isLValue()) VK = VK_XValue; } } // Perform default conversions. if (!LHSExp->getType()->getAs()) { ExprResult Result = DefaultFunctionArrayLvalueConversion(LHSExp); if (Result.isInvalid()) return ExprError(); LHSExp = Result.get(); } ExprResult Result = DefaultFunctionArrayLvalueConversion(RHSExp); if (Result.isInvalid()) return ExprError(); RHSExp = Result.get(); QualType LHSTy = LHSExp->getType(), RHSTy = RHSExp->getType(); // C99 6.5.2.1p2: the expression e1[e2] is by definition precisely equivalent // to the expression *((e1)+(e2)). This means the array "Base" may actually be // in the subscript position. As a result, we need to derive the array base // and index from the expression types. Expr *BaseExpr, *IndexExpr; QualType ResultType; if (LHSTy->isDependentType() || RHSTy->isDependentType()) { BaseExpr = LHSExp; IndexExpr = RHSExp; ResultType = Context.DependentTy; } else if (const PointerType *PTy = LHSTy->getAs()) { BaseExpr = LHSExp; IndexExpr = RHSExp; ResultType = PTy->getPointeeType(); } else if (const ObjCObjectPointerType *PTy = LHSTy->getAs()) { BaseExpr = LHSExp; IndexExpr = RHSExp; // Use custom logic if this should be the pseudo-object subscript // expression. if (!LangOpts.isSubscriptPointerArithmetic()) return BuildObjCSubscriptExpression(RLoc, BaseExpr, IndexExpr, nullptr, nullptr); ResultType = PTy->getPointeeType(); } else if (const PointerType *PTy = RHSTy->getAs()) { // Handle the uncommon case of "123[Ptr]". BaseExpr = RHSExp; IndexExpr = LHSExp; ResultType = PTy->getPointeeType(); } else if (const ObjCObjectPointerType *PTy = RHSTy->getAs()) { // Handle the uncommon case of "123[Ptr]". BaseExpr = RHSExp; IndexExpr = LHSExp; ResultType = PTy->getPointeeType(); if (!LangOpts.isSubscriptPointerArithmetic()) { Diag(LLoc, diag::err_subscript_nonfragile_interface) << ResultType << BaseExpr->getSourceRange(); return ExprError(); } } else if (const VectorType *VTy = LHSTy->getAs()) { BaseExpr = LHSExp; // vectors: V[123] IndexExpr = RHSExp; // We apply C++ DR1213 to vector subscripting too. if (getLangOpts().CPlusPlus11 && LHSExp->getValueKind() == VK_RValue) { ExprResult Materialized = TemporaryMaterializationConversion(LHSExp); if (Materialized.isInvalid()) return ExprError(); LHSExp = Materialized.get(); } VK = LHSExp->getValueKind(); if (VK != VK_RValue) OK = OK_VectorComponent; ResultType = VTy->getElementType(); QualType BaseType = BaseExpr->getType(); Qualifiers BaseQuals = BaseType.getQualifiers(); Qualifiers MemberQuals = ResultType.getQualifiers(); Qualifiers Combined = BaseQuals + MemberQuals; if (Combined != MemberQuals) ResultType = Context.getQualifiedType(ResultType, Combined); } else if (LHSTy->isArrayType()) { // If we see an array that wasn't promoted by // DefaultFunctionArrayLvalueConversion, it must be an array that // wasn't promoted because of the C90 rule that doesn't // allow promoting non-lvalue arrays. Warn, then // force the promotion here. Diag(LHSExp->getBeginLoc(), diag::ext_subscript_non_lvalue) << LHSExp->getSourceRange(); LHSExp = ImpCastExprToType(LHSExp, Context.getArrayDecayedType(LHSTy), CK_ArrayToPointerDecay).get(); LHSTy = LHSExp->getType(); BaseExpr = LHSExp; IndexExpr = RHSExp; ResultType = LHSTy->getAs()->getPointeeType(); } else if (RHSTy->isArrayType()) { // Same as previous, except for 123[f().a] case Diag(RHSExp->getBeginLoc(), diag::ext_subscript_non_lvalue) << RHSExp->getSourceRange(); RHSExp = ImpCastExprToType(RHSExp, Context.getArrayDecayedType(RHSTy), CK_ArrayToPointerDecay).get(); RHSTy = RHSExp->getType(); BaseExpr = RHSExp; IndexExpr = LHSExp; ResultType = RHSTy->getAs()->getPointeeType(); } else { return ExprError(Diag(LLoc, diag::err_typecheck_subscript_value) << LHSExp->getSourceRange() << RHSExp->getSourceRange()); } // C99 6.5.2.1p1 if (!IndexExpr->getType()->isIntegerType() && !IndexExpr->isTypeDependent()) return ExprError(Diag(LLoc, diag::err_typecheck_subscript_not_integer) << IndexExpr->getSourceRange()); if ((IndexExpr->getType()->isSpecificBuiltinType(BuiltinType::Char_S) || IndexExpr->getType()->isSpecificBuiltinType(BuiltinType::Char_U)) && !IndexExpr->isTypeDependent()) Diag(LLoc, diag::warn_subscript_is_char) << IndexExpr->getSourceRange(); // C99 6.5.2.1p1: "shall have type "pointer to *object* type". Similarly, // C++ [expr.sub]p1: The type "T" shall be a completely-defined object // type. Note that Functions are not objects, and that (in C99 parlance) // incomplete types are not object types. if (ResultType->isFunctionType()) { Diag(BaseExpr->getBeginLoc(), diag::err_subscript_function_type) << ResultType << BaseExpr->getSourceRange(); return ExprError(); } if (ResultType->isVoidType() && !getLangOpts().CPlusPlus) { // GNU extension: subscripting on pointer to void Diag(LLoc, diag::ext_gnu_subscript_void_type) << BaseExpr->getSourceRange(); // C forbids expressions of unqualified void type from being l-values. // See IsCForbiddenLValueType. if (!ResultType.hasQualifiers()) VK = VK_RValue; } else if (!ResultType->isDependentType() && RequireCompleteSizedType( LLoc, ResultType, diag::err_subscript_incomplete_or_sizeless_type, BaseExpr)) return ExprError(); assert(VK == VK_RValue || LangOpts.CPlusPlus || !ResultType.isCForbiddenLValueType()); if (LHSExp->IgnoreParenImpCasts()->getType()->isVariablyModifiedType() && FunctionScopes.size() > 1) { if (auto *TT = LHSExp->IgnoreParenImpCasts()->getType()->getAs()) { for (auto I = FunctionScopes.rbegin(), E = std::prev(FunctionScopes.rend()); I != E; ++I) { auto *CSI = dyn_cast(*I); if (CSI == nullptr) break; DeclContext *DC = nullptr; if (auto *LSI = dyn_cast(CSI)) DC = LSI->CallOperator; else if (auto *CRSI = dyn_cast(CSI)) DC = CRSI->TheCapturedDecl; else if (auto *BSI = dyn_cast(CSI)) DC = BSI->TheDecl; if (DC) { if (DC->containsDecl(TT->getDecl())) break; captureVariablyModifiedType( Context, LHSExp->IgnoreParenImpCasts()->getType(), CSI); } } } } return new (Context) ArraySubscriptExpr(LHSExp, RHSExp, ResultType, VK, OK, RLoc); } bool Sema::CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param) { if (Param->hasUnparsedDefaultArg()) { // If we've already cleared out the location for the default argument, // that means we're parsing it right now. if (!UnparsedDefaultArgLocs.count(Param)) { Diag(Param->getBeginLoc(), diag::err_recursive_default_argument) << FD; Diag(CallLoc, diag::note_recursive_default_argument_used_here); Param->setInvalidDecl(); return true; } Diag(CallLoc, diag::err_use_of_default_argument_to_function_declared_later) << FD << cast(FD->getDeclContext())->getDeclName(); Diag(UnparsedDefaultArgLocs[Param], diag::note_default_argument_declared_here); return true; } if (Param->hasUninstantiatedDefaultArg() && InstantiateDefaultArgument(CallLoc, FD, Param)) return true; assert(Param->hasInit() && "default argument but no initializer?"); // If the default expression creates temporaries, we need to // push them to the current stack of expression temporaries so they'll // be properly destroyed. // FIXME: We should really be rebuilding the default argument with new // bound temporaries; see the comment in PR5810. // We don't need to do that with block decls, though, because // blocks in default argument expression can never capture anything. if (auto Init = dyn_cast(Param->getInit())) { // Set the "needs cleanups" bit regardless of whether there are // any explicit objects. Cleanup.setExprNeedsCleanups(Init->cleanupsHaveSideEffects()); // Append all the objects to the cleanup list. Right now, this // should always be a no-op, because blocks in default argument // expressions should never be able to capture anything. assert(!Init->getNumObjects() && "default argument expression has capturing blocks?"); } // We already type-checked the argument, so we know it works. // Just mark all of the declarations in this potentially-evaluated expression // as being "referenced". EnterExpressionEvaluationContext EvalContext( *this, ExpressionEvaluationContext::PotentiallyEvaluated, Param); MarkDeclarationsReferencedInExpr(Param->getDefaultArg(), /*SkipLocalVariables=*/true); return false; } ExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param) { assert(Param->hasDefaultArg() && "can't build nonexistent default arg"); if (CheckCXXDefaultArgExpr(CallLoc, FD, Param)) return ExprError(); return CXXDefaultArgExpr::Create(Context, CallLoc, Param, CurContext); } Sema::VariadicCallType Sema::getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn) { if (Proto && Proto->isVariadic()) { if (dyn_cast_or_null(FDecl)) return VariadicConstructor; else if (Fn && Fn->getType()->isBlockPointerType()) return VariadicBlock; else if (FDecl) { if (CXXMethodDecl *Method = dyn_cast_or_null(FDecl)) if (Method->isInstance()) return VariadicMethod; } else if (Fn && Fn->getType() == Context.BoundMemberTy) return VariadicMethod; return VariadicFunction; } return VariadicDoesNotApply; } namespace { class FunctionCallCCC final : public FunctionCallFilterCCC { public: FunctionCallCCC(Sema &SemaRef, const IdentifierInfo *FuncName, unsigned NumArgs, MemberExpr *ME) : FunctionCallFilterCCC(SemaRef, NumArgs, false, ME), FunctionName(FuncName) {} bool ValidateCandidate(const TypoCorrection &candidate) override { if (!candidate.getCorrectionSpecifier() || candidate.getCorrectionAsIdentifierInfo() != FunctionName) { return false; } return FunctionCallFilterCCC::ValidateCandidate(candidate); } std::unique_ptr clone() override { return std::make_unique(*this); } private: const IdentifierInfo *const FunctionName; }; } static TypoCorrection TryTypoCorrectionForCall(Sema &S, Expr *Fn, FunctionDecl *FDecl, ArrayRef Args) { MemberExpr *ME = dyn_cast(Fn); DeclarationName FuncName = FDecl->getDeclName(); SourceLocation NameLoc = ME ? ME->getMemberLoc() : Fn->getBeginLoc(); FunctionCallCCC CCC(S, FuncName.getAsIdentifierInfo(), Args.size(), ME); if (TypoCorrection Corrected = S.CorrectTypo( DeclarationNameInfo(FuncName, NameLoc), Sema::LookupOrdinaryName, S.getScopeForContext(S.CurContext), nullptr, CCC, Sema::CTK_ErrorRecovery)) { if (NamedDecl *ND = Corrected.getFoundDecl()) { if (Corrected.isOverloaded()) { OverloadCandidateSet OCS(NameLoc, OverloadCandidateSet::CSK_Normal); OverloadCandidateSet::iterator Best; for (NamedDecl *CD : Corrected) { if (FunctionDecl *FD = dyn_cast(CD)) S.AddOverloadCandidate(FD, DeclAccessPair::make(FD, AS_none), Args, OCS); } switch (OCS.BestViableFunction(S, NameLoc, Best)) { case OR_Success: ND = Best->FoundDecl; Corrected.setCorrectionDecl(ND); break; default: break; } } ND = ND->getUnderlyingDecl(); if (isa(ND) || isa(ND)) return Corrected; } } return TypoCorrection(); } /// ConvertArgumentsForCall - Converts the arguments specified in /// Args/NumArgs to the parameter types of the function FDecl with /// function prototype Proto. Call is the call expression itself, and /// Fn is the function expression. For a C++ member function, this /// routine does not attempt to convert the object argument. Returns /// true if the call is ill-formed. bool Sema::ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef Args, SourceLocation RParenLoc, bool IsExecConfig) { // Bail out early if calling a builtin with custom typechecking. if (FDecl) if (unsigned ID = FDecl->getBuiltinID()) if (Context.BuiltinInfo.hasCustomTypechecking(ID)) return false; // C99 6.5.2.2p7 - the arguments are implicitly converted, as if by // assignment, to the types of the corresponding parameter, ... unsigned NumParams = Proto->getNumParams(); bool Invalid = false; unsigned MinArgs = FDecl ? FDecl->getMinRequiredArguments() : NumParams; unsigned FnKind = Fn->getType()->isBlockPointerType() ? 1 /* block */ : (IsExecConfig ? 3 /* kernel function (exec config) */ : 0 /* function */); // If too few arguments are available (and we don't have default // arguments for the remaining parameters), don't make the call. if (Args.size() < NumParams) { if (Args.size() < MinArgs) { TypoCorrection TC; if (FDecl && (TC = TryTypoCorrectionForCall(*this, Fn, FDecl, Args))) { unsigned diag_id = MinArgs == NumParams && !Proto->isVariadic() ? diag::err_typecheck_call_too_few_args_suggest : diag::err_typecheck_call_too_few_args_at_least_suggest; diagnoseTypo(TC, PDiag(diag_id) << FnKind << MinArgs << static_cast(Args.size()) << TC.getCorrectionRange()); } else if (MinArgs == 1 && FDecl && FDecl->getParamDecl(0)->getDeclName()) Diag(RParenLoc, MinArgs == NumParams && !Proto->isVariadic() ? diag::err_typecheck_call_too_few_args_one : diag::err_typecheck_call_too_few_args_at_least_one) << FnKind << FDecl->getParamDecl(0) << Fn->getSourceRange(); else Diag(RParenLoc, MinArgs == NumParams && !Proto->isVariadic() ? diag::err_typecheck_call_too_few_args : diag::err_typecheck_call_too_few_args_at_least) << FnKind << MinArgs << static_cast(Args.size()) << Fn->getSourceRange(); // Emit the location of the prototype. if (!TC && FDecl && !FDecl->getBuiltinID() && !IsExecConfig) Diag(FDecl->getLocation(), diag::note_callee_decl) << FDecl; return true; } // We reserve space for the default arguments when we create // the call expression, before calling ConvertArgumentsForCall. assert((Call->getNumArgs() == NumParams) && "We should have reserved space for the default arguments before!"); } // If too many are passed and not variadic, error on the extras and drop // them. if (Args.size() > NumParams) { if (!Proto->isVariadic()) { TypoCorrection TC; if (FDecl && (TC = TryTypoCorrectionForCall(*this, Fn, FDecl, Args))) { unsigned diag_id = MinArgs == NumParams && !Proto->isVariadic() ? diag::err_typecheck_call_too_many_args_suggest : diag::err_typecheck_call_too_many_args_at_most_suggest; diagnoseTypo(TC, PDiag(diag_id) << FnKind << NumParams << static_cast(Args.size()) << TC.getCorrectionRange()); } else if (NumParams == 1 && FDecl && FDecl->getParamDecl(0)->getDeclName()) Diag(Args[NumParams]->getBeginLoc(), MinArgs == NumParams ? diag::err_typecheck_call_too_many_args_one : diag::err_typecheck_call_too_many_args_at_most_one) << FnKind << FDecl->getParamDecl(0) << static_cast(Args.size()) << Fn->getSourceRange() << SourceRange(Args[NumParams]->getBeginLoc(), Args.back()->getEndLoc()); else Diag(Args[NumParams]->getBeginLoc(), MinArgs == NumParams ? diag::err_typecheck_call_too_many_args : diag::err_typecheck_call_too_many_args_at_most) << FnKind << NumParams << static_cast(Args.size()) << Fn->getSourceRange() << SourceRange(Args[NumParams]->getBeginLoc(), Args.back()->getEndLoc()); // Emit the location of the prototype. if (!TC && FDecl && !FDecl->getBuiltinID() && !IsExecConfig) Diag(FDecl->getLocation(), diag::note_callee_decl) << FDecl; // This deletes the extra arguments. Call->shrinkNumArgs(NumParams); return true; } } SmallVector AllArgs; VariadicCallType CallType = getVariadicCallType(FDecl, Proto, Fn); Invalid = GatherArgumentsForCall(Call->getBeginLoc(), FDecl, Proto, 0, Args, AllArgs, CallType); if (Invalid) return true; unsigned TotalNumArgs = AllArgs.size(); for (unsigned i = 0; i < TotalNumArgs; ++i) Call->setArg(i, AllArgs[i]); return false; } bool Sema::GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef Args, SmallVectorImpl &AllArgs, VariadicCallType CallType, bool AllowExplicit, bool IsListInitialization) { unsigned NumParams = Proto->getNumParams(); bool Invalid = false; size_t ArgIx = 0; // Continue to check argument types (even if we have too few/many args). for (unsigned i = FirstParam; i < NumParams; i++) { QualType ProtoArgType = Proto->getParamType(i); Expr *Arg; ParmVarDecl *Param = FDecl ? FDecl->getParamDecl(i) : nullptr; if (ArgIx < Args.size()) { Arg = Args[ArgIx++]; if (RequireCompleteType(Arg->getBeginLoc(), ProtoArgType, diag::err_call_incomplete_argument, Arg)) return true; // Strip the unbridged-cast placeholder expression off, if applicable. bool CFAudited = false; if (Arg->getType() == Context.ARCUnbridgedCastTy && FDecl && FDecl->hasAttr() && (!Param || !Param->hasAttr())) Arg = stripARCUnbridgedCast(Arg); else if (getLangOpts().ObjCAutoRefCount && FDecl && FDecl->hasAttr() && (!Param || !Param->hasAttr())) CFAudited = true; if (Proto->getExtParameterInfo(i).isNoEscape()) if (auto *BE = dyn_cast(Arg->IgnoreParenNoopCasts(Context))) BE->getBlockDecl()->setDoesNotEscape(); InitializedEntity Entity = Param ? InitializedEntity::InitializeParameter(Context, Param, ProtoArgType) : InitializedEntity::InitializeParameter( Context, ProtoArgType, Proto->isParamConsumed(i)); // Remember that parameter belongs to a CF audited API. if (CFAudited) Entity.setParameterCFAudited(); ExprResult ArgE = PerformCopyInitialization( Entity, SourceLocation(), Arg, IsListInitialization, AllowExplicit); if (ArgE.isInvalid()) return true; Arg = ArgE.getAs(); } else { assert(Param && "can't use default arguments without a known callee"); ExprResult ArgExpr = BuildCXXDefaultArgExpr(CallLoc, FDecl, Param); if (ArgExpr.isInvalid()) return true; Arg = ArgExpr.getAs(); } // Check for array bounds violations for each argument to the call. This // check only triggers warnings when the argument isn't a more complex Expr // with its own checking, such as a BinaryOperator. CheckArrayAccess(Arg); // Check for violations of C99 static array rules (C99 6.7.5.3p7). CheckStaticArrayArgument(CallLoc, Param, Arg); AllArgs.push_back(Arg); } // If this is a variadic call, handle args passed through "...". if (CallType != VariadicDoesNotApply) { // Assume that extern "C" functions with variadic arguments that // return __unknown_anytype aren't *really* variadic. if (Proto->getReturnType() == Context.UnknownAnyTy && FDecl && FDecl->isExternC()) { for (Expr *A : Args.slice(ArgIx)) { QualType paramType; // ignored ExprResult arg = checkUnknownAnyArg(CallLoc, A, paramType); Invalid |= arg.isInvalid(); AllArgs.push_back(arg.get()); } // Otherwise do argument promotion, (C99 6.5.2.2p7). } else { for (Expr *A : Args.slice(ArgIx)) { ExprResult Arg = DefaultVariadicArgumentPromotion(A, CallType, FDecl); Invalid |= Arg.isInvalid(); AllArgs.push_back(Arg.get()); } } // Check for array bounds violations. for (Expr *A : Args.slice(ArgIx)) CheckArrayAccess(A); } return Invalid; } static void DiagnoseCalleeStaticArrayParam(Sema &S, ParmVarDecl *PVD) { TypeLoc TL = PVD->getTypeSourceInfo()->getTypeLoc(); if (DecayedTypeLoc DTL = TL.getAs()) TL = DTL.getOriginalLoc(); if (ArrayTypeLoc ATL = TL.getAs()) S.Diag(PVD->getLocation(), diag::note_callee_static_array) << ATL.getLocalSourceRange(); } /// CheckStaticArrayArgument - If the given argument corresponds to a static /// array parameter, check that it is non-null, and that if it is formed by /// array-to-pointer decay, the underlying array is sufficiently large. /// /// C99 6.7.5.3p7: If the keyword static also appears within the [ and ] of the /// array type derivation, then for each call to the function, the value of the /// corresponding actual argument shall provide access to the first element of /// an array with at least as many elements as specified by the size expression. void Sema::CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr) { // Static array parameters are not supported in C++. if (!Param || getLangOpts().CPlusPlus) return; QualType OrigTy = Param->getOriginalType(); const ArrayType *AT = Context.getAsArrayType(OrigTy); if (!AT || AT->getSizeModifier() != ArrayType::Static) return; if (ArgExpr->isNullPointerConstant(Context, Expr::NPC_NeverValueDependent)) { Diag(CallLoc, diag::warn_null_arg) << ArgExpr->getSourceRange(); DiagnoseCalleeStaticArrayParam(*this, Param); return; } const ConstantArrayType *CAT = dyn_cast(AT); if (!CAT) return; const ConstantArrayType *ArgCAT = Context.getAsConstantArrayType(ArgExpr->IgnoreParenCasts()->getType()); if (!ArgCAT) return; if (getASTContext().hasSameUnqualifiedType(CAT->getElementType(), ArgCAT->getElementType())) { if (ArgCAT->getSize().ult(CAT->getSize())) { Diag(CallLoc, diag::warn_static_array_too_small) << ArgExpr->getSourceRange() << (unsigned)ArgCAT->getSize().getZExtValue() << (unsigned)CAT->getSize().getZExtValue() << 0; DiagnoseCalleeStaticArrayParam(*this, Param); } return; } Optional ArgSize = getASTContext().getTypeSizeInCharsIfKnown(ArgCAT); Optional ParmSize = getASTContext().getTypeSizeInCharsIfKnown(CAT); if (ArgSize && ParmSize && *ArgSize < *ParmSize) { Diag(CallLoc, diag::warn_static_array_too_small) << ArgExpr->getSourceRange() << (unsigned)ArgSize->getQuantity() << (unsigned)ParmSize->getQuantity() << 1; DiagnoseCalleeStaticArrayParam(*this, Param); } } /// Given a function expression of unknown-any type, try to rebuild it /// to have a function type. static ExprResult rebuildUnknownAnyFunction(Sema &S, Expr *fn); /// Is the given type a placeholder that we need to lower out /// immediately during argument processing? static bool isPlaceholderToRemoveAsArg(QualType type) { // Placeholders are never sugared. const BuiltinType *placeholder = dyn_cast(type); if (!placeholder) return false; switch (placeholder->getKind()) { // Ignore all the non-placeholder types. #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ case BuiltinType::Id: #include "clang/Basic/OpenCLImageTypes.def" #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ case BuiltinType::Id: #include "clang/Basic/OpenCLExtensionTypes.def" // In practice we'll never use this, since all SVE types are sugared // via TypedefTypes rather than exposed directly as BuiltinTypes. #define SVE_TYPE(Name, Id, SingletonId) \ case BuiltinType::Id: #include "clang/Basic/AArch64SVEACLETypes.def" #define PLACEHOLDER_TYPE(ID, SINGLETON_ID) #define BUILTIN_TYPE(ID, SINGLETON_ID) case BuiltinType::ID: #include "clang/AST/BuiltinTypes.def" return false; // We cannot lower out overload sets; they might validly be resolved // by the call machinery. case BuiltinType::Overload: return false; // Unbridged casts in ARC can be handled in some call positions and // should be left in place. case BuiltinType::ARCUnbridgedCast: return false; // Pseudo-objects should be converted as soon as possible. case BuiltinType::PseudoObject: return true; // The debugger mode could theoretically but currently does not try // to resolve unknown-typed arguments based on known parameter types. case BuiltinType::UnknownAny: return true; // These are always invalid as call arguments and should be reported. case BuiltinType::BoundMember: case BuiltinType::BuiltinFn: case BuiltinType::IncompleteMatrixIdx: case BuiltinType::OMPArraySection: case BuiltinType::OMPArrayShaping: case BuiltinType::OMPIterator: return true; } llvm_unreachable("bad builtin type kind"); } /// Check an argument list for placeholders that we won't try to /// handle later. static bool checkArgsForPlaceholders(Sema &S, MultiExprArg args) { // Apply this processing to all the arguments at once instead of // dying at the first failure. bool hasInvalid = false; for (size_t i = 0, e = args.size(); i != e; i++) { if (isPlaceholderToRemoveAsArg(args[i]->getType())) { ExprResult result = S.CheckPlaceholderExpr(args[i]); if (result.isInvalid()) hasInvalid = true; else args[i] = result.get(); } else if (hasInvalid) { (void)S.CorrectDelayedTyposInExpr(args[i]); } } return hasInvalid; } /// If a builtin function has a pointer argument with no explicit address /// space, then it should be able to accept a pointer to any address /// space as input. In order to do this, we need to replace the /// standard builtin declaration with one that uses the same address space /// as the call. /// /// \returns nullptr If this builtin is not a candidate for a rewrite i.e. /// it does not contain any pointer arguments without /// an address space qualifer. Otherwise the rewritten /// FunctionDecl is returned. /// TODO: Handle pointer return types. static FunctionDecl *rewriteBuiltinFunctionDecl(Sema *Sema, ASTContext &Context, FunctionDecl *FDecl, MultiExprArg ArgExprs) { QualType DeclType = FDecl->getType(); const FunctionProtoType *FT = dyn_cast(DeclType); if (!Context.BuiltinInfo.hasPtrArgsOrResult(FDecl->getBuiltinID()) || !FT || ArgExprs.size() < FT->getNumParams()) return nullptr; bool NeedsNewDecl = false; unsigned i = 0; SmallVector OverloadParams; for (QualType ParamType : FT->param_types()) { // Convert array arguments to pointer to simplify type lookup. ExprResult ArgRes = Sema->DefaultFunctionArrayLvalueConversion(ArgExprs[i++]); if (ArgRes.isInvalid()) return nullptr; Expr *Arg = ArgRes.get(); QualType ArgType = Arg->getType(); if (!ParamType->isPointerType() || ParamType.hasAddressSpace() || !ArgType->isPointerType() || !ArgType->getPointeeType().hasAddressSpace()) { OverloadParams.push_back(ParamType); continue; } QualType PointeeType = ParamType->getPointeeType(); if (PointeeType.hasAddressSpace()) continue; NeedsNewDecl = true; LangAS AS = ArgType->getPointeeType().getAddressSpace(); PointeeType = Context.getAddrSpaceQualType(PointeeType, AS); OverloadParams.push_back(Context.getPointerType(PointeeType)); } if (!NeedsNewDecl) return nullptr; FunctionProtoType::ExtProtoInfo EPI; EPI.Variadic = FT->isVariadic(); QualType OverloadTy = Context.getFunctionType(FT->getReturnType(), OverloadParams, EPI); DeclContext *Parent = FDecl->getParent(); FunctionDecl *OverloadDecl = FunctionDecl::Create(Context, Parent, FDecl->getLocation(), FDecl->getLocation(), FDecl->getIdentifier(), OverloadTy, /*TInfo=*/nullptr, SC_Extern, false, /*hasPrototype=*/true); SmallVector Params; FT = cast(OverloadTy); for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) { QualType ParamType = FT->getParamType(i); ParmVarDecl *Parm = ParmVarDecl::Create(Context, OverloadDecl, SourceLocation(), SourceLocation(), nullptr, ParamType, /*TInfo=*/nullptr, SC_None, nullptr); Parm->setScopeInfo(0, i); Params.push_back(Parm); } OverloadDecl->setParams(Params); Sema->mergeDeclAttributes(OverloadDecl, FDecl); return OverloadDecl; } static void checkDirectCallValidity(Sema &S, const Expr *Fn, FunctionDecl *Callee, MultiExprArg ArgExprs) { // `Callee` (when called with ArgExprs) may be ill-formed. enable_if (and // similar attributes) really don't like it when functions are called with an // invalid number of args. if (S.TooManyArguments(Callee->getNumParams(), ArgExprs.size(), /*PartialOverloading=*/false) && !Callee->isVariadic()) return; if (Callee->getMinRequiredArguments() > ArgExprs.size()) return; if (const EnableIfAttr *Attr = S.CheckEnableIf(Callee, Fn->getBeginLoc(), ArgExprs, true)) { S.Diag(Fn->getBeginLoc(), isa(Callee) ? diag::err_ovl_no_viable_member_function_in_call : diag::err_ovl_no_viable_function_in_call) << Callee << Callee->getSourceRange(); S.Diag(Callee->getLocation(), diag::note_ovl_candidate_disabled_by_function_cond_attr) << Attr->getCond()->getSourceRange() << Attr->getMessage(); return; } } static bool enclosingClassIsRelatedToClassInWhichMembersWereFound( const UnresolvedMemberExpr *const UME, Sema &S) { const auto GetFunctionLevelDCIfCXXClass = [](Sema &S) -> const CXXRecordDecl * { const DeclContext *const DC = S.getFunctionLevelDeclContext(); if (!DC || !DC->getParent()) return nullptr; // If the call to some member function was made from within a member // function body 'M' return return 'M's parent. if (const auto *MD = dyn_cast(DC)) return MD->getParent()->getCanonicalDecl(); // else the call was made from within a default member initializer of a // class, so return the class. if (const auto *RD = dyn_cast(DC)) return RD->getCanonicalDecl(); return nullptr; }; // If our DeclContext is neither a member function nor a class (in the // case of a lambda in a default member initializer), we can't have an // enclosing 'this'. const CXXRecordDecl *const CurParentClass = GetFunctionLevelDCIfCXXClass(S); if (!CurParentClass) return false; // The naming class for implicit member functions call is the class in which // name lookup starts. const CXXRecordDecl *const NamingClass = UME->getNamingClass()->getCanonicalDecl(); assert(NamingClass && "Must have naming class even for implicit access"); // If the unresolved member functions were found in a 'naming class' that is // related (either the same or derived from) to the class that contains the // member function that itself contained the implicit member access. return CurParentClass == NamingClass || CurParentClass->isDerivedFrom(NamingClass); } static void tryImplicitlyCaptureThisIfImplicitMemberFunctionAccessWithDependentArgs( Sema &S, const UnresolvedMemberExpr *const UME, SourceLocation CallLoc) { if (!UME) return; LambdaScopeInfo *const CurLSI = S.getCurLambda(); // Only try and implicitly capture 'this' within a C++ Lambda if it hasn't // already been captured, or if this is an implicit member function call (if // it isn't, an attempt to capture 'this' should already have been made). if (!CurLSI || CurLSI->ImpCaptureStyle == CurLSI->ImpCap_None || !UME->isImplicitAccess() || CurLSI->isCXXThisCaptured()) return; // Check if the naming class in which the unresolved members were found is // related (same as or is a base of) to the enclosing class. if (!enclosingClassIsRelatedToClassInWhichMembersWereFound(UME, S)) return; DeclContext *EnclosingFunctionCtx = S.CurContext->getParent()->getParent(); // If the enclosing function is not dependent, then this lambda is // capture ready, so if we can capture this, do so. if (!EnclosingFunctionCtx->isDependentContext()) { // If the current lambda and all enclosing lambdas can capture 'this' - // then go ahead and capture 'this' (since our unresolved overload set // contains at least one non-static member function). if (!S.CheckCXXThisCapture(CallLoc, /*Explcit*/ false, /*Diagnose*/ false)) S.CheckCXXThisCapture(CallLoc); } else if (S.CurContext->isDependentContext()) { // ... since this is an implicit member reference, that might potentially // involve a 'this' capture, mark 'this' for potential capture in // enclosing lambdas. if (CurLSI->ImpCaptureStyle != CurLSI->ImpCap_None) CurLSI->addPotentialThisCapture(CallLoc); } } ExprResult Sema::ActOnCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig) { ExprResult Call = BuildCallExpr(Scope, Fn, LParenLoc, ArgExprs, RParenLoc, ExecConfig); if (Call.isInvalid()) return Call; // Diagnose uses of the C++20 "ADL-only template-id call" feature in earlier // language modes. if (auto *ULE = dyn_cast(Fn)) { if (ULE->hasExplicitTemplateArgs() && ULE->decls_begin() == ULE->decls_end()) { Diag(Fn->getExprLoc(), getLangOpts().CPlusPlus20 ? diag::warn_cxx17_compat_adl_only_template_id : diag::ext_adl_only_template_id) << ULE->getName(); } } if (LangOpts.OpenMP) Call = ActOnOpenMPCall(Call, Scope, LParenLoc, ArgExprs, RParenLoc, ExecConfig); return Call; } /// BuildCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult Sema::BuildCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig, bool IsExecConfig) { // Since this might be a postfix expression, get rid of ParenListExprs. ExprResult Result = MaybeConvertParenListExprToParenExpr(Scope, Fn); if (Result.isInvalid()) return ExprError(); Fn = Result.get(); if (checkArgsForPlaceholders(*this, ArgExprs)) return ExprError(); if (getLangOpts().CPlusPlus) { // If this is a pseudo-destructor expression, build the call immediately. if (isa(Fn)) { if (!ArgExprs.empty()) { // Pseudo-destructor calls should not have any arguments. Diag(Fn->getBeginLoc(), diag::err_pseudo_dtor_call_with_args) << FixItHint::CreateRemoval( SourceRange(ArgExprs.front()->getBeginLoc(), ArgExprs.back()->getEndLoc())); } return CallExpr::Create(Context, Fn, /*Args=*/{}, Context.VoidTy, VK_RValue, RParenLoc); } if (Fn->getType() == Context.PseudoObjectTy) { ExprResult result = CheckPlaceholderExpr(Fn); if (result.isInvalid()) return ExprError(); Fn = result.get(); } // Determine whether this is a dependent call inside a C++ template, // in which case we won't do any semantic analysis now. if (Fn->isTypeDependent() || Expr::hasAnyTypeDependentArguments(ArgExprs)) { if (ExecConfig) { return CUDAKernelCallExpr::Create( Context, Fn, cast(ExecConfig), ArgExprs, Context.DependentTy, VK_RValue, RParenLoc); } else { tryImplicitlyCaptureThisIfImplicitMemberFunctionAccessWithDependentArgs( *this, dyn_cast(Fn->IgnoreParens()), Fn->getBeginLoc()); return CallExpr::Create(Context, Fn, ArgExprs, Context.DependentTy, VK_RValue, RParenLoc); } } // Determine whether this is a call to an object (C++ [over.call.object]). if (Fn->getType()->isRecordType()) return BuildCallToObjectOfClassType(Scope, Fn, LParenLoc, ArgExprs, RParenLoc); if (Fn->getType() == Context.UnknownAnyTy) { ExprResult result = rebuildUnknownAnyFunction(*this, Fn); if (result.isInvalid()) return ExprError(); Fn = result.get(); } if (Fn->getType() == Context.BoundMemberTy) { return BuildCallToMemberFunction(Scope, Fn, LParenLoc, ArgExprs, RParenLoc); } } // Check for overloaded calls. This can happen even in C due to extensions. if (Fn->getType() == Context.OverloadTy) { OverloadExpr::FindResult find = OverloadExpr::find(Fn); // We aren't supposed to apply this logic if there's an '&' involved. if (!find.HasFormOfMemberPointer) { if (Expr::hasAnyTypeDependentArguments(ArgExprs)) return CallExpr::Create(Context, Fn, ArgExprs, Context.DependentTy, VK_RValue, RParenLoc); OverloadExpr *ovl = find.Expression; if (UnresolvedLookupExpr *ULE = dyn_cast(ovl)) return BuildOverloadedCallExpr( Scope, Fn, ULE, LParenLoc, ArgExprs, RParenLoc, ExecConfig, /*AllowTypoCorrection=*/true, find.IsAddressOfOperand); return BuildCallToMemberFunction(Scope, Fn, LParenLoc, ArgExprs, RParenLoc); } } // If we're directly calling a function, get the appropriate declaration. if (Fn->getType() == Context.UnknownAnyTy) { ExprResult result = rebuildUnknownAnyFunction(*this, Fn); if (result.isInvalid()) return ExprError(); Fn = result.get(); } Expr *NakedFn = Fn->IgnoreParens(); bool CallingNDeclIndirectly = false; NamedDecl *NDecl = nullptr; if (UnaryOperator *UnOp = dyn_cast(NakedFn)) { if (UnOp->getOpcode() == UO_AddrOf) { CallingNDeclIndirectly = true; NakedFn = UnOp->getSubExpr()->IgnoreParens(); } } if (auto *DRE = dyn_cast(NakedFn)) { NDecl = DRE->getDecl(); FunctionDecl *FDecl = dyn_cast(NDecl); if (FDecl && FDecl->getBuiltinID()) { // Rewrite the function decl for this builtin by replacing parameters // with no explicit address space with the address space of the arguments // in ArgExprs. if ((FDecl = rewriteBuiltinFunctionDecl(this, Context, FDecl, ArgExprs))) { NDecl = FDecl; Fn = DeclRefExpr::Create( Context, FDecl->getQualifierLoc(), SourceLocation(), FDecl, false, SourceLocation(), FDecl->getType(), Fn->getValueKind(), FDecl, nullptr, DRE->isNonOdrUse()); } } } else if (isa(NakedFn)) NDecl = cast(NakedFn)->getMemberDecl(); if (FunctionDecl *FD = dyn_cast_or_null(NDecl)) { if (CallingNDeclIndirectly && !checkAddressOfFunctionIsAvailable( FD, /*Complain=*/true, Fn->getBeginLoc())) return ExprError(); if (getLangOpts().OpenCL && checkOpenCLDisabledDecl(*FD, *Fn)) return ExprError(); checkDirectCallValidity(*this, Fn, FD, ArgExprs); } return BuildResolvedCallExpr(Fn, NDecl, LParenLoc, ArgExprs, RParenLoc, ExecConfig, IsExecConfig); } /// ActOnAsTypeExpr - create a new asType (bitcast) from the arguments. /// /// __builtin_astype( value, dst type ) /// ExprResult Sema::ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc) { ExprValueKind VK = VK_RValue; ExprObjectKind OK = OK_Ordinary; QualType DstTy = GetTypeFromParser(ParsedDestTy); QualType SrcTy = E->getType(); if (Context.getTypeSize(DstTy) != Context.getTypeSize(SrcTy)) return ExprError(Diag(BuiltinLoc, diag::err_invalid_astype_of_different_size) << DstTy << SrcTy << E->getSourceRange()); return new (Context) AsTypeExpr(E, DstTy, VK, OK, BuiltinLoc, RParenLoc); } /// ActOnConvertVectorExpr - create a new convert-vector expression from the /// provided arguments. /// /// __builtin_convertvector( value, dst type ) /// ExprResult Sema::ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc) { TypeSourceInfo *TInfo; GetTypeFromParser(ParsedDestTy, &TInfo); return SemaConvertVectorExpr(E, TInfo, BuiltinLoc, RParenLoc); } /// BuildResolvedCallExpr - Build a call to a resolved expression, /// i.e. an expression not of \p OverloadTy. The expression should /// unary-convert to an expression of function-pointer or /// block-pointer type. /// /// \param NDecl the declaration being called, if available ExprResult Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef Args, SourceLocation RParenLoc, Expr *Config, bool IsExecConfig, ADLCallKind UsesADL) { FunctionDecl *FDecl = dyn_cast_or_null(NDecl); unsigned BuiltinID = (FDecl ? FDecl->getBuiltinID() : 0); // Functions with 'interrupt' attribute cannot be called directly. if (FDecl && FDecl->hasAttr()) { Diag(Fn->getExprLoc(), diag::err_anyx86_interrupt_called); return ExprError(); } // Interrupt handlers don't save off the VFP regs automatically on ARM, // so there's some risk when calling out to non-interrupt handler functions // that the callee might not preserve them. This is easy to diagnose here, // but can be very challenging to debug. if (auto *Caller = getCurFunctionDecl()) if (Caller->hasAttr()) { bool VFP = Context.getTargetInfo().hasFeature("vfp"); if (VFP && (!FDecl || !FDecl->hasAttr())) Diag(Fn->getExprLoc(), diag::warn_arm_interrupt_calling_convention); } // Promote the function operand. // We special-case function promotion here because we only allow promoting // builtin functions to function pointers in the callee of a call. ExprResult Result; QualType ResultTy; if (BuiltinID && Fn->getType()->isSpecificBuiltinType(BuiltinType::BuiltinFn)) { // Extract the return type from the (builtin) function pointer type. // FIXME Several builtins still have setType in // Sema::CheckBuiltinFunctionCall. One should review their definitions in // Builtins.def to ensure they are correct before removing setType calls. QualType FnPtrTy = Context.getPointerType(FDecl->getType()); Result = ImpCastExprToType(Fn, FnPtrTy, CK_BuiltinFnToFnPtr).get(); ResultTy = FDecl->getCallResultType(); } else { Result = CallExprUnaryConversions(Fn); ResultTy = Context.BoolTy; } if (Result.isInvalid()) return ExprError(); Fn = Result.get(); // Check for a valid function type, but only if it is not a builtin which // requires custom type checking. These will be handled by // CheckBuiltinFunctionCall below just after creation of the call expression. const FunctionType *FuncT = nullptr; if (!BuiltinID || !Context.BuiltinInfo.hasCustomTypechecking(BuiltinID)) { retry: if (const PointerType *PT = Fn->getType()->getAs()) { // C99 6.5.2.2p1 - "The expression that denotes the called function shall // have type pointer to function". FuncT = PT->getPointeeType()->getAs(); if (!FuncT) return ExprError(Diag(LParenLoc, diag::err_typecheck_call_not_function) << Fn->getType() << Fn->getSourceRange()); } else if (const BlockPointerType *BPT = Fn->getType()->getAs()) { FuncT = BPT->getPointeeType()->castAs(); } else { // Handle calls to expressions of unknown-any type. if (Fn->getType() == Context.UnknownAnyTy) { ExprResult rewrite = rebuildUnknownAnyFunction(*this, Fn); if (rewrite.isInvalid()) return ExprError(); Fn = rewrite.get(); goto retry; } return ExprError(Diag(LParenLoc, diag::err_typecheck_call_not_function) << Fn->getType() << Fn->getSourceRange()); } } // Get the number of parameters in the function prototype, if any. // We will allocate space for max(Args.size(), NumParams) arguments // in the call expression. const auto *Proto = dyn_cast_or_null(FuncT); unsigned NumParams = Proto ? Proto->getNumParams() : 0; CallExpr *TheCall; if (Config) { assert(UsesADL == ADLCallKind::NotADL && "CUDAKernelCallExpr should not use ADL"); TheCall = CUDAKernelCallExpr::Create(Context, Fn, cast(Config), Args, ResultTy, VK_RValue, RParenLoc, NumParams); } else { TheCall = CallExpr::Create(Context, Fn, Args, ResultTy, VK_RValue, RParenLoc, NumParams, UsesADL); } if (!getLangOpts().CPlusPlus) { // Forget about the nulled arguments since typo correction // do not handle them well. TheCall->shrinkNumArgs(Args.size()); // C cannot always handle TypoExpr nodes in builtin calls and direct // function calls as their argument checking don't necessarily handle // dependent types properly, so make sure any TypoExprs have been // dealt with. ExprResult Result = CorrectDelayedTyposInExpr(TheCall); if (!Result.isUsable()) return ExprError(); CallExpr *TheOldCall = TheCall; TheCall = dyn_cast(Result.get()); bool CorrectedTypos = TheCall != TheOldCall; if (!TheCall) return Result; Args = llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()); // A new call expression node was created if some typos were corrected. // However it may not have been constructed with enough storage. In this // case, rebuild the node with enough storage. The waste of space is // immaterial since this only happens when some typos were corrected. if (CorrectedTypos && Args.size() < NumParams) { if (Config) TheCall = CUDAKernelCallExpr::Create( Context, Fn, cast(Config), Args, ResultTy, VK_RValue, RParenLoc, NumParams); else TheCall = CallExpr::Create(Context, Fn, Args, ResultTy, VK_RValue, RParenLoc, NumParams, UsesADL); } // We can now handle the nulled arguments for the default arguments. TheCall->setNumArgsUnsafe(std::max(Args.size(), NumParams)); } // Bail out early if calling a builtin with custom type checking. if (BuiltinID && Context.BuiltinInfo.hasCustomTypechecking(BuiltinID)) return CheckBuiltinFunctionCall(FDecl, BuiltinID, TheCall); if (getLangOpts().CUDA) { if (Config) { // CUDA: Kernel calls must be to global functions if (FDecl && !FDecl->hasAttr()) return ExprError(Diag(LParenLoc,diag::err_kern_call_not_global_function) << FDecl << Fn->getSourceRange()); // CUDA: Kernel function must have 'void' return type if (!FuncT->getReturnType()->isVoidType() && !FuncT->getReturnType()->getAs() && !FuncT->getReturnType()->isInstantiationDependentType()) return ExprError(Diag(LParenLoc, diag::err_kern_type_not_void_return) << Fn->getType() << Fn->getSourceRange()); } else { // CUDA: Calls to global functions must be configured if (FDecl && FDecl->hasAttr()) return ExprError(Diag(LParenLoc, diag::err_global_call_not_config) << FDecl << Fn->getSourceRange()); } } // Check for a valid return type if (CheckCallReturnType(FuncT->getReturnType(), Fn->getBeginLoc(), TheCall, FDecl)) return ExprError(); // We know the result type of the call, set it. TheCall->setType(FuncT->getCallResultType(Context)); TheCall->setValueKind(Expr::getValueKindForType(FuncT->getReturnType())); if (Proto) { if (ConvertArgumentsForCall(TheCall, Fn, FDecl, Proto, Args, RParenLoc, IsExecConfig)) return ExprError(); } else { assert(isa(FuncT) && "Unknown FunctionType!"); if (FDecl) { // Check if we have too few/too many template arguments, based // on our knowledge of the function definition. const FunctionDecl *Def = nullptr; if (FDecl->hasBody(Def) && Args.size() != Def->param_size()) { Proto = Def->getType()->getAs(); if (!Proto || !(Proto->isVariadic() && Args.size() >= Def->param_size())) Diag(RParenLoc, diag::warn_call_wrong_number_of_arguments) << (Args.size() > Def->param_size()) << FDecl << Fn->getSourceRange(); } // If the function we're calling isn't a function prototype, but we have // a function prototype from a prior declaratiom, use that prototype. if (!FDecl->hasPrototype()) Proto = FDecl->getType()->getAs(); } // Promote the arguments (C99 6.5.2.2p6). for (unsigned i = 0, e = Args.size(); i != e; i++) { Expr *Arg = Args[i]; if (Proto && i < Proto->getNumParams()) { InitializedEntity Entity = InitializedEntity::InitializeParameter( Context, Proto->getParamType(i), Proto->isParamConsumed(i)); ExprResult ArgE = PerformCopyInitialization(Entity, SourceLocation(), Arg); if (ArgE.isInvalid()) return true; Arg = ArgE.getAs(); } else { ExprResult ArgE = DefaultArgumentPromotion(Arg); if (ArgE.isInvalid()) return true; Arg = ArgE.getAs(); } if (RequireCompleteType(Arg->getBeginLoc(), Arg->getType(), diag::err_call_incomplete_argument, Arg)) return ExprError(); TheCall->setArg(i, Arg); } } if (CXXMethodDecl *Method = dyn_cast_or_null(FDecl)) if (!Method->isStatic()) return ExprError(Diag(LParenLoc, diag::err_member_call_without_object) << Fn->getSourceRange()); // Check for sentinels if (NDecl) DiagnoseSentinelCalls(NDecl, LParenLoc, Args); // Warn for unions passing across security boundary (CMSE). if (FuncT != nullptr && FuncT->getCmseNSCallAttr()) { for (unsigned i = 0, e = Args.size(); i != e; i++) { if (const auto *RT = dyn_cast(Args[i]->getType().getCanonicalType())) { if (RT->getDecl()->isOrContainsUnion()) Diag(Args[i]->getBeginLoc(), diag::warn_cmse_nonsecure_union) << 0 << i; } } } // Do special checking on direct calls to functions. if (FDecl) { if (CheckFunctionCall(FDecl, TheCall, Proto)) return ExprError(); checkFortifiedBuiltinMemoryFunction(FDecl, TheCall); if (BuiltinID) return CheckBuiltinFunctionCall(FDecl, BuiltinID, TheCall); } else if (NDecl) { if (CheckPointerCall(NDecl, TheCall, Proto)) return ExprError(); } else { if (CheckOtherCall(TheCall, Proto)) return ExprError(); } return CheckForImmediateInvocation(MaybeBindToTemporary(TheCall), FDecl); } ExprResult Sema::ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr) { assert(Ty && "ActOnCompoundLiteral(): missing type"); assert(InitExpr && "ActOnCompoundLiteral(): missing expression"); TypeSourceInfo *TInfo; QualType literalType = GetTypeFromParser(Ty, &TInfo); if (!TInfo) TInfo = Context.getTrivialTypeSourceInfo(literalType); return BuildCompoundLiteralExpr(LParenLoc, TInfo, RParenLoc, InitExpr); } ExprResult Sema::BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr) { QualType literalType = TInfo->getType(); if (literalType->isArrayType()) { if (RequireCompleteSizedType( LParenLoc, Context.getBaseElementType(literalType), diag::err_array_incomplete_or_sizeless_type, SourceRange(LParenLoc, LiteralExpr->getSourceRange().getEnd()))) return ExprError(); if (literalType->isVariableArrayType()) return ExprError(Diag(LParenLoc, diag::err_variable_object_no_init) << SourceRange(LParenLoc, LiteralExpr->getSourceRange().getEnd())); } else if (!literalType->isDependentType() && RequireCompleteType(LParenLoc, literalType, diag::err_typecheck_decl_incomplete_type, SourceRange(LParenLoc, LiteralExpr->getSourceRange().getEnd()))) return ExprError(); InitializedEntity Entity = InitializedEntity::InitializeCompoundLiteralInit(TInfo); InitializationKind Kind = InitializationKind::CreateCStyleCast(LParenLoc, SourceRange(LParenLoc, RParenLoc), /*InitList=*/true); InitializationSequence InitSeq(*this, Entity, Kind, LiteralExpr); ExprResult Result = InitSeq.Perform(*this, Entity, Kind, LiteralExpr, &literalType); if (Result.isInvalid()) return ExprError(); LiteralExpr = Result.get(); bool isFileScope = !CurContext->isFunctionOrMethod(); // In C, compound literals are l-values for some reason. // For GCC compatibility, in C++, file-scope array compound literals with // constant initializers are also l-values, and compound literals are // otherwise prvalues. // // (GCC also treats C++ list-initialized file-scope array prvalues with // constant initializers as l-values, but that's non-conforming, so we don't // follow it there.) // // FIXME: It would be better to handle the lvalue cases as materializing and // lifetime-extending a temporary object, but our materialized temporaries // representation only supports lifetime extension from a variable, not "out // of thin air". // FIXME: For C++, we might want to instead lifetime-extend only if a pointer // is bound to the result of applying array-to-pointer decay to the compound // literal. // FIXME: GCC supports compound literals of reference type, which should // obviously have a value kind derived from the kind of reference involved. ExprValueKind VK = (getLangOpts().CPlusPlus && !(isFileScope && literalType->isArrayType())) ? VK_RValue : VK_LValue; if (isFileScope) if (auto ILE = dyn_cast(LiteralExpr)) for (unsigned i = 0, j = ILE->getNumInits(); i != j; i++) { Expr *Init = ILE->getInit(i); ILE->setInit(i, ConstantExpr::Create(Context, Init)); } auto *E = new (Context) CompoundLiteralExpr(LParenLoc, TInfo, literalType, VK, LiteralExpr, isFileScope); if (isFileScope) { if (!LiteralExpr->isTypeDependent() && !LiteralExpr->isValueDependent() && !literalType->isDependentType()) // C99 6.5.2.5p3 if (CheckForConstantInitializer(LiteralExpr, literalType)) return ExprError(); } else if (literalType.getAddressSpace() != LangAS::opencl_private && literalType.getAddressSpace() != LangAS::Default) { // Embedded-C extensions to C99 6.5.2.5: // "If the compound literal occurs inside the body of a function, the // type name shall not be qualified by an address-space qualifier." Diag(LParenLoc, diag::err_compound_literal_with_address_space) << SourceRange(LParenLoc, LiteralExpr->getSourceRange().getEnd()); return ExprError(); } if (!isFileScope && !getLangOpts().CPlusPlus) { // Compound literals that have automatic storage duration are destroyed at // the end of the scope in C; in C++, they're just temporaries. // Emit diagnostics if it is or contains a C union type that is non-trivial // to destruct. if (E->getType().hasNonTrivialToPrimitiveDestructCUnion()) checkNonTrivialCUnion(E->getType(), E->getExprLoc(), NTCUC_CompoundLiteral, NTCUK_Destruct); // Diagnose jumps that enter or exit the lifetime of the compound literal. if (literalType.isDestructedType()) { Cleanup.setExprNeedsCleanups(true); ExprCleanupObjects.push_back(E); getCurFunction()->setHasBranchProtectedScope(); } } if (E->getType().hasNonTrivialToPrimitiveDefaultInitializeCUnion() || E->getType().hasNonTrivialToPrimitiveCopyCUnion()) checkNonTrivialCUnionInInitializer(E->getInitializer(), E->getInitializer()->getExprLoc()); return MaybeBindToTemporary(E); } ExprResult Sema::ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc) { // Only produce each kind of designated initialization diagnostic once. SourceLocation FirstDesignator; bool DiagnosedArrayDesignator = false; bool DiagnosedNestedDesignator = false; bool DiagnosedMixedDesignator = false; // Check that any designated initializers are syntactically valid in the // current language mode. for (unsigned I = 0, E = InitArgList.size(); I != E; ++I) { if (auto *DIE = dyn_cast(InitArgList[I])) { if (FirstDesignator.isInvalid()) FirstDesignator = DIE->getBeginLoc(); if (!getLangOpts().CPlusPlus) break; if (!DiagnosedNestedDesignator && DIE->size() > 1) { DiagnosedNestedDesignator = true; Diag(DIE->getBeginLoc(), diag::ext_designated_init_nested) << DIE->getDesignatorsSourceRange(); } for (auto &Desig : DIE->designators()) { if (!Desig.isFieldDesignator() && !DiagnosedArrayDesignator) { DiagnosedArrayDesignator = true; Diag(Desig.getBeginLoc(), diag::ext_designated_init_array) << Desig.getSourceRange(); } } if (!DiagnosedMixedDesignator && !isa(InitArgList[0])) { DiagnosedMixedDesignator = true; Diag(DIE->getBeginLoc(), diag::ext_designated_init_mixed) << DIE->getSourceRange(); Diag(InitArgList[0]->getBeginLoc(), diag::note_designated_init_mixed) << InitArgList[0]->getSourceRange(); } } else if (getLangOpts().CPlusPlus && !DiagnosedMixedDesignator && isa(InitArgList[0])) { DiagnosedMixedDesignator = true; auto *DIE = cast(InitArgList[0]); Diag(DIE->getBeginLoc(), diag::ext_designated_init_mixed) << DIE->getSourceRange(); Diag(InitArgList[I]->getBeginLoc(), diag::note_designated_init_mixed) << InitArgList[I]->getSourceRange(); } } if (FirstDesignator.isValid()) { // Only diagnose designated initiaization as a C++20 extension if we didn't // already diagnose use of (non-C++20) C99 designator syntax. if (getLangOpts().CPlusPlus && !DiagnosedArrayDesignator && !DiagnosedNestedDesignator && !DiagnosedMixedDesignator) { Diag(FirstDesignator, getLangOpts().CPlusPlus20 ? diag::warn_cxx17_compat_designated_init : diag::ext_cxx_designated_init); } else if (!getLangOpts().CPlusPlus && !getLangOpts().C99) { Diag(FirstDesignator, diag::ext_designated_init); } } return BuildInitList(LBraceLoc, InitArgList, RBraceLoc); } ExprResult Sema::BuildInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc) { // Semantic analysis for initializers is done by ActOnDeclarator() and // CheckInitializer() - it requires knowledge of the object being initialized. // Immediately handle non-overload placeholders. Overloads can be // resolved contextually, but everything else here can't. for (unsigned I = 0, E = InitArgList.size(); I != E; ++I) { if (InitArgList[I]->getType()->isNonOverloadPlaceholderType()) { ExprResult result = CheckPlaceholderExpr(InitArgList[I]); // Ignore failures; dropping the entire initializer list because // of one failure would be terrible for indexing/etc. if (result.isInvalid()) continue; InitArgList[I] = result.get(); } } InitListExpr *E = new (Context) InitListExpr(Context, LBraceLoc, InitArgList, RBraceLoc); E->setType(Context.VoidTy); // FIXME: just a place holder for now. return E; } /// Do an explicit extend of the given block pointer if we're in ARC. void Sema::maybeExtendBlockObject(ExprResult &E) { assert(E.get()->getType()->isBlockPointerType()); assert(E.get()->isRValue()); // Only do this in an r-value context. if (!getLangOpts().ObjCAutoRefCount) return; E = ImplicitCastExpr::Create(Context, E.get()->getType(), CK_ARCExtendBlockObject, E.get(), /*base path*/ nullptr, VK_RValue); Cleanup.setExprNeedsCleanups(true); } /// Prepare a conversion of the given expression to an ObjC object /// pointer type. CastKind Sema::PrepareCastToObjCObjectPointer(ExprResult &E) { QualType type = E.get()->getType(); if (type->isObjCObjectPointerType()) { return CK_BitCast; } else if (type->isBlockPointerType()) { maybeExtendBlockObject(E); return CK_BlockPointerToObjCPointerCast; } else { assert(type->isPointerType()); return CK_CPointerToObjCPointerCast; } } /// Prepares for a scalar cast, performing all the necessary stages /// except the final cast and returning the kind required. CastKind Sema::PrepareScalarCast(ExprResult &Src, QualType DestTy) { // Both Src and Dest are scalar types, i.e. arithmetic or pointer. // Also, callers should have filtered out the invalid cases with // pointers. Everything else should be possible. QualType SrcTy = Src.get()->getType(); if (Context.hasSameUnqualifiedType(SrcTy, DestTy)) return CK_NoOp; switch (Type::ScalarTypeKind SrcKind = SrcTy->getScalarTypeKind()) { case Type::STK_MemberPointer: llvm_unreachable("member pointer type in C"); case Type::STK_CPointer: case Type::STK_BlockPointer: case Type::STK_ObjCObjectPointer: switch (DestTy->getScalarTypeKind()) { case Type::STK_CPointer: { LangAS SrcAS = SrcTy->getPointeeType().getAddressSpace(); LangAS DestAS = DestTy->getPointeeType().getAddressSpace(); if (SrcAS != DestAS) return CK_AddressSpaceConversion; if (Context.hasCvrSimilarType(SrcTy, DestTy)) return CK_NoOp; return CK_BitCast; } case Type::STK_BlockPointer: return (SrcKind == Type::STK_BlockPointer ? CK_BitCast : CK_AnyPointerToBlockPointerCast); case Type::STK_ObjCObjectPointer: if (SrcKind == Type::STK_ObjCObjectPointer) return CK_BitCast; if (SrcKind == Type::STK_CPointer) return CK_CPointerToObjCPointerCast; maybeExtendBlockObject(Src); return CK_BlockPointerToObjCPointerCast; case Type::STK_Bool: return CK_PointerToBoolean; case Type::STK_Integral: return CK_PointerToIntegral; case Type::STK_Floating: case Type::STK_FloatingComplex: case Type::STK_IntegralComplex: case Type::STK_MemberPointer: case Type::STK_FixedPoint: llvm_unreachable("illegal cast from pointer"); } llvm_unreachable("Should have returned before this"); case Type::STK_FixedPoint: switch (DestTy->getScalarTypeKind()) { case Type::STK_FixedPoint: return CK_FixedPointCast; case Type::STK_Bool: return CK_FixedPointToBoolean; case Type::STK_Integral: return CK_FixedPointToIntegral; case Type::STK_Floating: case Type::STK_IntegralComplex: case Type::STK_FloatingComplex: Diag(Src.get()->getExprLoc(), diag::err_unimplemented_conversion_with_fixed_point_type) << DestTy; return CK_IntegralCast; case Type::STK_CPointer: case Type::STK_ObjCObjectPointer: case Type::STK_BlockPointer: case Type::STK_MemberPointer: llvm_unreachable("illegal cast to pointer type"); } llvm_unreachable("Should have returned before this"); case Type::STK_Bool: // casting from bool is like casting from an integer case Type::STK_Integral: switch (DestTy->getScalarTypeKind()) { case Type::STK_CPointer: case Type::STK_ObjCObjectPointer: case Type::STK_BlockPointer: if (Src.get()->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) return CK_NullToPointer; return CK_IntegralToPointer; case Type::STK_Bool: return CK_IntegralToBoolean; case Type::STK_Integral: return CK_IntegralCast; case Type::STK_Floating: return CK_IntegralToFloating; case Type::STK_IntegralComplex: Src = ImpCastExprToType(Src.get(), DestTy->castAs()->getElementType(), CK_IntegralCast); return CK_IntegralRealToComplex; case Type::STK_FloatingComplex: Src = ImpCastExprToType(Src.get(), DestTy->castAs()->getElementType(), CK_IntegralToFloating); return CK_FloatingRealToComplex; case Type::STK_MemberPointer: llvm_unreachable("member pointer type in C"); case Type::STK_FixedPoint: return CK_IntegralToFixedPoint; } llvm_unreachable("Should have returned before this"); case Type::STK_Floating: switch (DestTy->getScalarTypeKind()) { case Type::STK_Floating: return CK_FloatingCast; case Type::STK_Bool: return CK_FloatingToBoolean; case Type::STK_Integral: return CK_FloatingToIntegral; case Type::STK_FloatingComplex: Src = ImpCastExprToType(Src.get(), DestTy->castAs()->getElementType(), CK_FloatingCast); return CK_FloatingRealToComplex; case Type::STK_IntegralComplex: Src = ImpCastExprToType(Src.get(), DestTy->castAs()->getElementType(), CK_FloatingToIntegral); return CK_IntegralRealToComplex; case Type::STK_CPointer: case Type::STK_ObjCObjectPointer: case Type::STK_BlockPointer: llvm_unreachable("valid float->pointer cast?"); case Type::STK_MemberPointer: llvm_unreachable("member pointer type in C"); case Type::STK_FixedPoint: Diag(Src.get()->getExprLoc(), diag::err_unimplemented_conversion_with_fixed_point_type) << SrcTy; return CK_IntegralCast; } llvm_unreachable("Should have returned before this"); case Type::STK_FloatingComplex: switch (DestTy->getScalarTypeKind()) { case Type::STK_FloatingComplex: return CK_FloatingComplexCast; case Type::STK_IntegralComplex: return CK_FloatingComplexToIntegralComplex; case Type::STK_Floating: { QualType ET = SrcTy->castAs()->getElementType(); if (Context.hasSameType(ET, DestTy)) return CK_FloatingComplexToReal; Src = ImpCastExprToType(Src.get(), ET, CK_FloatingComplexToReal); return CK_FloatingCast; } case Type::STK_Bool: return CK_FloatingComplexToBoolean; case Type::STK_Integral: Src = ImpCastExprToType(Src.get(), SrcTy->castAs()->getElementType(), CK_FloatingComplexToReal); return CK_FloatingToIntegral; case Type::STK_CPointer: case Type::STK_ObjCObjectPointer: case Type::STK_BlockPointer: llvm_unreachable("valid complex float->pointer cast?"); case Type::STK_MemberPointer: llvm_unreachable("member pointer type in C"); case Type::STK_FixedPoint: Diag(Src.get()->getExprLoc(), diag::err_unimplemented_conversion_with_fixed_point_type) << SrcTy; return CK_IntegralCast; } llvm_unreachable("Should have returned before this"); case Type::STK_IntegralComplex: switch (DestTy->getScalarTypeKind()) { case Type::STK_FloatingComplex: return CK_IntegralComplexToFloatingComplex; case Type::STK_IntegralComplex: return CK_IntegralComplexCast; case Type::STK_Integral: { QualType ET = SrcTy->castAs()->getElementType(); if (Context.hasSameType(ET, DestTy)) return CK_IntegralComplexToReal; Src = ImpCastExprToType(Src.get(), ET, CK_IntegralComplexToReal); return CK_IntegralCast; } case Type::STK_Bool: return CK_IntegralComplexToBoolean; case Type::STK_Floating: Src = ImpCastExprToType(Src.get(), SrcTy->castAs()->getElementType(), CK_IntegralComplexToReal); return CK_IntegralToFloating; case Type::STK_CPointer: case Type::STK_ObjCObjectPointer: case Type::STK_BlockPointer: llvm_unreachable("valid complex int->pointer cast?"); case Type::STK_MemberPointer: llvm_unreachable("member pointer type in C"); case Type::STK_FixedPoint: Diag(Src.get()->getExprLoc(), diag::err_unimplemented_conversion_with_fixed_point_type) << SrcTy; return CK_IntegralCast; } llvm_unreachable("Should have returned before this"); } llvm_unreachable("Unhandled scalar cast"); } static bool breakDownVectorType(QualType type, uint64_t &len, QualType &eltType) { // Vectors are simple. if (const VectorType *vecType = type->getAs()) { len = vecType->getNumElements(); eltType = vecType->getElementType(); assert(eltType->isScalarType()); return true; } // We allow lax conversion to and from non-vector types, but only if // they're real types (i.e. non-complex, non-pointer scalar types). if (!type->isRealType()) return false; len = 1; eltType = type; return true; } /// Are the two types lax-compatible vector types? That is, given /// that one of them is a vector, do they have equal storage sizes, /// where the storage size is the number of elements times the element /// size? /// /// This will also return false if either of the types is neither a /// vector nor a real type. bool Sema::areLaxCompatibleVectorTypes(QualType srcTy, QualType destTy) { assert(destTy->isVectorType() || srcTy->isVectorType()); // Disallow lax conversions between scalars and ExtVectors (these // conversions are allowed for other vector types because common headers // depend on them). Most scalar OP ExtVector cases are handled by the // splat path anyway, which does what we want (convert, not bitcast). // What this rules out for ExtVectors is crazy things like char4*float. if (srcTy->isScalarType() && destTy->isExtVectorType()) return false; if (destTy->isScalarType() && srcTy->isExtVectorType()) return false; uint64_t srcLen, destLen; QualType srcEltTy, destEltTy; if (!breakDownVectorType(srcTy, srcLen, srcEltTy)) return false; if (!breakDownVectorType(destTy, destLen, destEltTy)) return false; // ASTContext::getTypeSize will return the size rounded up to a // power of 2, so instead of using that, we need to use the raw // element size multiplied by the element count. uint64_t srcEltSize = Context.getTypeSize(srcEltTy); uint64_t destEltSize = Context.getTypeSize(destEltTy); return (srcLen * srcEltSize == destLen * destEltSize); } /// Is this a legal conversion between two types, one of which is /// known to be a vector type? bool Sema::isLaxVectorConversion(QualType srcTy, QualType destTy) { assert(destTy->isVectorType() || srcTy->isVectorType()); switch (Context.getLangOpts().getLaxVectorConversions()) { case LangOptions::LaxVectorConversionKind::None: return false; case LangOptions::LaxVectorConversionKind::Integer: if (!srcTy->isIntegralOrEnumerationType()) { auto *Vec = srcTy->getAs(); if (!Vec || !Vec->getElementType()->isIntegralOrEnumerationType()) return false; } if (!destTy->isIntegralOrEnumerationType()) { auto *Vec = destTy->getAs(); if (!Vec || !Vec->getElementType()->isIntegralOrEnumerationType()) return false; } // OK, integer (vector) -> integer (vector) bitcast. break; case LangOptions::LaxVectorConversionKind::All: break; } return areLaxCompatibleVectorTypes(srcTy, destTy); } bool Sema::CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind) { assert(VectorTy->isVectorType() && "Not a vector type!"); if (Ty->isVectorType() || Ty->isIntegralType(Context)) { if (!areLaxCompatibleVectorTypes(Ty, VectorTy)) return Diag(R.getBegin(), Ty->isVectorType() ? diag::err_invalid_conversion_between_vectors : diag::err_invalid_conversion_between_vector_and_integer) << VectorTy << Ty << R; } else return Diag(R.getBegin(), diag::err_invalid_conversion_between_vector_and_scalar) << VectorTy << Ty << R; Kind = CK_BitCast; return false; } ExprResult Sema::prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr) { QualType DestElemTy = VectorTy->castAs()->getElementType(); if (DestElemTy == SplattedExpr->getType()) return SplattedExpr; assert(DestElemTy->isFloatingType() || DestElemTy->isIntegralOrEnumerationType()); CastKind CK; if (VectorTy->isExtVectorType() && SplattedExpr->getType()->isBooleanType()) { // OpenCL requires that we convert `true` boolean expressions to -1, but // only when splatting vectors. if (DestElemTy->isFloatingType()) { // To avoid having to have a CK_BooleanToSignedFloating cast kind, we cast // in two steps: boolean to signed integral, then to floating. ExprResult CastExprRes = ImpCastExprToType(SplattedExpr, Context.IntTy, CK_BooleanToSignedIntegral); SplattedExpr = CastExprRes.get(); CK = CK_IntegralToFloating; } else { CK = CK_BooleanToSignedIntegral; } } else { ExprResult CastExprRes = SplattedExpr; CK = PrepareScalarCast(CastExprRes, DestElemTy); if (CastExprRes.isInvalid()) return ExprError(); SplattedExpr = CastExprRes.get(); } return ImpCastExprToType(SplattedExpr, DestElemTy, CK); } ExprResult Sema::CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind) { assert(DestTy->isExtVectorType() && "Not an extended vector type!"); QualType SrcTy = CastExpr->getType(); // If SrcTy is a VectorType, the total size must match to explicitly cast to // an ExtVectorType. // In OpenCL, casts between vectors of different types are not allowed. // (See OpenCL 6.2). if (SrcTy->isVectorType()) { if (!areLaxCompatibleVectorTypes(SrcTy, DestTy) || (getLangOpts().OpenCL && !Context.hasSameUnqualifiedType(DestTy, SrcTy))) { Diag(R.getBegin(),diag::err_invalid_conversion_between_ext_vectors) << DestTy << SrcTy << R; return ExprError(); } Kind = CK_BitCast; return CastExpr; } // All non-pointer scalars can be cast to ExtVector type. The appropriate // conversion will take place first from scalar to elt type, and then // splat from elt type to vector. if (SrcTy->isPointerType()) return Diag(R.getBegin(), diag::err_invalid_conversion_between_vector_and_scalar) << DestTy << SrcTy << R; Kind = CK_VectorSplat; return prepareVectorSplat(DestTy, CastExpr); } ExprResult Sema::ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr) { assert(!D.isInvalidType() && (CastExpr != nullptr) && "ActOnCastExpr(): missing type or expr"); TypeSourceInfo *castTInfo = GetTypeForDeclaratorCast(D, CastExpr->getType()); if (D.isInvalidType()) return ExprError(); if (getLangOpts().CPlusPlus) { // Check that there are no default arguments (C++ only). CheckExtraCXXDefaultArguments(D); } else { // Make sure any TypoExprs have been dealt with. ExprResult Res = CorrectDelayedTyposInExpr(CastExpr); if (!Res.isUsable()) return ExprError(); CastExpr = Res.get(); } checkUnusedDeclAttributes(D); QualType castType = castTInfo->getType(); Ty = CreateParsedType(castType, castTInfo); bool isVectorLiteral = false; // Check for an altivec or OpenCL literal, // i.e. all the elements are integer constants. ParenExpr *PE = dyn_cast(CastExpr); ParenListExpr *PLE = dyn_cast(CastExpr); if ((getLangOpts().AltiVec || getLangOpts().ZVector || getLangOpts().OpenCL) && castType->isVectorType() && (PE || PLE)) { if (PLE && PLE->getNumExprs() == 0) { Diag(PLE->getExprLoc(), diag::err_altivec_empty_initializer); return ExprError(); } if (PE || PLE->getNumExprs() == 1) { Expr *E = (PE ? PE->getSubExpr() : PLE->getExpr(0)); if (!E->isTypeDependent() && !E->getType()->isVectorType()) isVectorLiteral = true; } else isVectorLiteral = true; } // If this is a vector initializer, '(' type ')' '(' init, ..., init ')' // then handle it as such. if (isVectorLiteral) return BuildVectorLiteral(LParenLoc, RParenLoc, CastExpr, castTInfo); // If the Expr being casted is a ParenListExpr, handle it specially. // This is not an AltiVec-style cast, so turn the ParenListExpr into a // sequence of BinOp comma operators. if (isa(CastExpr)) { ExprResult Result = MaybeConvertParenListExprToParenExpr(S, CastExpr); if (Result.isInvalid()) return ExprError(); CastExpr = Result.get(); } if (getLangOpts().CPlusPlus && !castType->isVoidType() && !getSourceManager().isInSystemMacro(LParenLoc)) Diag(LParenLoc, diag::warn_old_style_cast) << CastExpr->getSourceRange(); CheckTollFreeBridgeCast(castType, CastExpr); CheckObjCBridgeRelatedCast(castType, CastExpr); DiscardMisalignedMemberAddress(castType.getTypePtr(), CastExpr); return BuildCStyleCastExpr(LParenLoc, castTInfo, RParenLoc, CastExpr); } ExprResult Sema::BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo) { assert((isa(E) || isa(E)) && "Expected paren or paren list expression"); Expr **exprs; unsigned numExprs; Expr *subExpr; SourceLocation LiteralLParenLoc, LiteralRParenLoc; if (ParenListExpr *PE = dyn_cast(E)) { LiteralLParenLoc = PE->getLParenLoc(); LiteralRParenLoc = PE->getRParenLoc(); exprs = PE->getExprs(); numExprs = PE->getNumExprs(); } else { // isa by assertion at function entrance LiteralLParenLoc = cast(E)->getLParen(); LiteralRParenLoc = cast(E)->getRParen(); subExpr = cast(E)->getSubExpr(); exprs = &subExpr; numExprs = 1; } QualType Ty = TInfo->getType(); assert(Ty->isVectorType() && "Expected vector type"); SmallVector initExprs; const VectorType *VTy = Ty->castAs(); unsigned numElems = VTy->getNumElements(); // '(...)' form of vector initialization in AltiVec: the number of // initializers must be one or must match the size of the vector. // If a single value is specified in the initializer then it will be // replicated to all the components of the vector if (VTy->getVectorKind() == VectorType::AltiVecVector) { // The number of initializers must be one or must match the size of the // vector. If a single value is specified in the initializer then it will // be replicated to all the components of the vector if (numExprs == 1) { QualType ElemTy = VTy->getElementType(); ExprResult Literal = DefaultLvalueConversion(exprs[0]); if (Literal.isInvalid()) return ExprError(); Literal = ImpCastExprToType(Literal.get(), ElemTy, PrepareScalarCast(Literal, ElemTy)); return BuildCStyleCastExpr(LParenLoc, TInfo, RParenLoc, Literal.get()); } else if (numExprs < numElems) { Diag(E->getExprLoc(), diag::err_incorrect_number_of_vector_initializers); return ExprError(); } else initExprs.append(exprs, exprs + numExprs); } else { // For OpenCL, when the number of initializers is a single value, // it will be replicated to all components of the vector. if (getLangOpts().OpenCL && VTy->getVectorKind() == VectorType::GenericVector && numExprs == 1) { QualType ElemTy = VTy->getElementType(); ExprResult Literal = DefaultLvalueConversion(exprs[0]); if (Literal.isInvalid()) return ExprError(); Literal = ImpCastExprToType(Literal.get(), ElemTy, PrepareScalarCast(Literal, ElemTy)); return BuildCStyleCastExpr(LParenLoc, TInfo, RParenLoc, Literal.get()); } initExprs.append(exprs, exprs + numExprs); } // FIXME: This means that pretty-printing the final AST will produce curly // braces instead of the original commas. InitListExpr *initE = new (Context) InitListExpr(Context, LiteralLParenLoc, initExprs, LiteralRParenLoc); initE->setType(Ty); return BuildCompoundLiteralExpr(LParenLoc, TInfo, RParenLoc, initE); } /// This is not an AltiVec-style cast or or C++ direct-initialization, so turn /// the ParenListExpr into a sequence of comma binary operators. ExprResult Sema::MaybeConvertParenListExprToParenExpr(Scope *S, Expr *OrigExpr) { ParenListExpr *E = dyn_cast(OrigExpr); if (!E) return OrigExpr; ExprResult Result(E->getExpr(0)); for (unsigned i = 1, e = E->getNumExprs(); i != e && !Result.isInvalid(); ++i) Result = ActOnBinOp(S, E->getExprLoc(), tok::comma, Result.get(), E->getExpr(i)); if (Result.isInvalid()) return ExprError(); return ActOnParenExpr(E->getLParenLoc(), E->getRParenLoc(), Result.get()); } ExprResult Sema::ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val) { return ParenListExpr::Create(Context, L, Val, R); } /// Emit a specialized diagnostic when one expression is a null pointer /// constant and the other is not a pointer. Returns true if a diagnostic is /// emitted. bool Sema::DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc) { Expr *NullExpr = LHSExpr; Expr *NonPointerExpr = RHSExpr; Expr::NullPointerConstantKind NullKind = NullExpr->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNotNull); if (NullKind == Expr::NPCK_NotNull) { NullExpr = RHSExpr; NonPointerExpr = LHSExpr; NullKind = NullExpr->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNotNull); } if (NullKind == Expr::NPCK_NotNull) return false; if (NullKind == Expr::NPCK_ZeroExpression) return false; if (NullKind == Expr::NPCK_ZeroLiteral) { // In this case, check to make sure that we got here from a "NULL" // string in the source code. NullExpr = NullExpr->IgnoreParenImpCasts(); SourceLocation loc = NullExpr->getExprLoc(); if (!findMacroSpelling(loc, "NULL")) return false; } int DiagType = (NullKind == Expr::NPCK_CXX11_nullptr); Diag(QuestionLoc, diag::err_typecheck_cond_incompatible_operands_null) << NonPointerExpr->getType() << DiagType << NonPointerExpr->getSourceRange(); return true; } /// Return false if the condition expression is valid, true otherwise. static bool checkCondition(Sema &S, Expr *Cond, SourceLocation QuestionLoc) { QualType CondTy = Cond->getType(); // OpenCL v1.1 s6.3.i says the condition cannot be a floating point type. if (S.getLangOpts().OpenCL && CondTy->isFloatingType()) { S.Diag(QuestionLoc, diag::err_typecheck_cond_expect_nonfloat) << CondTy << Cond->getSourceRange(); return true; } // C99 6.5.15p2 if (CondTy->isScalarType()) return false; S.Diag(QuestionLoc, diag::err_typecheck_cond_expect_scalar) << CondTy << Cond->getSourceRange(); return true; } /// Handle when one or both operands are void type. static QualType checkConditionalVoidType(Sema &S, ExprResult &LHS, ExprResult &RHS) { Expr *LHSExpr = LHS.get(); Expr *RHSExpr = RHS.get(); if (!LHSExpr->getType()->isVoidType()) S.Diag(RHSExpr->getBeginLoc(), diag::ext_typecheck_cond_one_void) << RHSExpr->getSourceRange(); if (!RHSExpr->getType()->isVoidType()) S.Diag(LHSExpr->getBeginLoc(), diag::ext_typecheck_cond_one_void) << LHSExpr->getSourceRange(); LHS = S.ImpCastExprToType(LHS.get(), S.Context.VoidTy, CK_ToVoid); RHS = S.ImpCastExprToType(RHS.get(), S.Context.VoidTy, CK_ToVoid); return S.Context.VoidTy; } /// Return false if the NullExpr can be promoted to PointerTy, /// true otherwise. static bool checkConditionalNullPointer(Sema &S, ExprResult &NullExpr, QualType PointerTy) { if ((!PointerTy->isAnyPointerType() && !PointerTy->isBlockPointerType()) || !NullExpr.get()->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNull)) return true; NullExpr = S.ImpCastExprToType(NullExpr.get(), PointerTy, CK_NullToPointer); return false; } /// Checks compatibility between two pointers and return the resulting /// type. static QualType checkConditionalPointerCompatibility(Sema &S, ExprResult &LHS, ExprResult &RHS, SourceLocation Loc) { QualType LHSTy = LHS.get()->getType(); QualType RHSTy = RHS.get()->getType(); if (S.Context.hasSameType(LHSTy, RHSTy)) { // Two identical pointers types are always compatible. return LHSTy; } QualType lhptee, rhptee; // Get the pointee types. bool IsBlockPointer = false; if (const BlockPointerType *LHSBTy = LHSTy->getAs()) { lhptee = LHSBTy->getPointeeType(); rhptee = RHSTy->castAs()->getPointeeType(); IsBlockPointer = true; } else { lhptee = LHSTy->castAs()->getPointeeType(); rhptee = RHSTy->castAs()->getPointeeType(); } // C99 6.5.15p6: If both operands are pointers to compatible types or to // differently qualified versions of compatible types, the result type is // a pointer to an appropriately qualified version of the composite // type. // Only CVR-qualifiers exist in the standard, and the differently-qualified // clause doesn't make sense for our extensions. E.g. address space 2 should // be incompatible with address space 3: they may live on different devices or // anything. Qualifiers lhQual = lhptee.getQualifiers(); Qualifiers rhQual = rhptee.getQualifiers(); LangAS ResultAddrSpace = LangAS::Default; LangAS LAddrSpace = lhQual.getAddressSpace(); LangAS RAddrSpace = rhQual.getAddressSpace(); // OpenCL v1.1 s6.5 - Conversion between pointers to distinct address // spaces is disallowed. if (lhQual.isAddressSpaceSupersetOf(rhQual)) ResultAddrSpace = LAddrSpace; else if (rhQual.isAddressSpaceSupersetOf(lhQual)) ResultAddrSpace = RAddrSpace; else { S.Diag(Loc, diag::err_typecheck_op_on_nonoverlapping_address_space_pointers) << LHSTy << RHSTy << 2 << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); return QualType(); } unsigned MergedCVRQual = lhQual.getCVRQualifiers() | rhQual.getCVRQualifiers(); auto LHSCastKind = CK_BitCast, RHSCastKind = CK_BitCast; lhQual.removeCVRQualifiers(); rhQual.removeCVRQualifiers(); // OpenCL v2.0 specification doesn't extend compatibility of type qualifiers // (C99 6.7.3) for address spaces. We assume that the check should behave in // the same manner as it's defined for CVR qualifiers, so for OpenCL two // qual types are compatible iff // * corresponded types are compatible // * CVR qualifiers are equal // * address spaces are equal // Thus for conditional operator we merge CVR and address space unqualified // pointees and if there is a composite type we return a pointer to it with // merged qualifiers. LHSCastKind = LAddrSpace == ResultAddrSpace ? CK_BitCast : CK_AddressSpaceConversion; RHSCastKind = RAddrSpace == ResultAddrSpace ? CK_BitCast : CK_AddressSpaceConversion; lhQual.removeAddressSpace(); rhQual.removeAddressSpace(); lhptee = S.Context.getQualifiedType(lhptee.getUnqualifiedType(), lhQual); rhptee = S.Context.getQualifiedType(rhptee.getUnqualifiedType(), rhQual); QualType CompositeTy = S.Context.mergeTypes(lhptee, rhptee); if (CompositeTy.isNull()) { // In this situation, we assume void* type. No especially good // reason, but this is what gcc does, and we do have to pick // to get a consistent AST. QualType incompatTy; incompatTy = S.Context.getPointerType( S.Context.getAddrSpaceQualType(S.Context.VoidTy, ResultAddrSpace)); LHS = S.ImpCastExprToType(LHS.get(), incompatTy, LHSCastKind); RHS = S.ImpCastExprToType(RHS.get(), incompatTy, RHSCastKind); // FIXME: For OpenCL the warning emission and cast to void* leaves a room // for casts between types with incompatible address space qualifiers. // For the following code the compiler produces casts between global and // local address spaces of the corresponded innermost pointees: // local int *global *a; // global int *global *b; // a = (0 ? a : b); // see C99 6.5.16.1.p1. S.Diag(Loc, diag::ext_typecheck_cond_incompatible_pointers) << LHSTy << RHSTy << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); return incompatTy; } // The pointer types are compatible. // In case of OpenCL ResultTy should have the address space qualifier // which is a superset of address spaces of both the 2nd and the 3rd // operands of the conditional operator. QualType ResultTy = [&, ResultAddrSpace]() { if (S.getLangOpts().OpenCL) { Qualifiers CompositeQuals = CompositeTy.getQualifiers(); CompositeQuals.setAddressSpace(ResultAddrSpace); return S.Context .getQualifiedType(CompositeTy.getUnqualifiedType(), CompositeQuals) .withCVRQualifiers(MergedCVRQual); } return CompositeTy.withCVRQualifiers(MergedCVRQual); }(); if (IsBlockPointer) ResultTy = S.Context.getBlockPointerType(ResultTy); else ResultTy = S.Context.getPointerType(ResultTy); LHS = S.ImpCastExprToType(LHS.get(), ResultTy, LHSCastKind); RHS = S.ImpCastExprToType(RHS.get(), ResultTy, RHSCastKind); return ResultTy; } /// Return the resulting type when the operands are both block pointers. static QualType checkConditionalBlockPointerCompatibility(Sema &S, ExprResult &LHS, ExprResult &RHS, SourceLocation Loc) { QualType LHSTy = LHS.get()->getType(); QualType RHSTy = RHS.get()->getType(); if (!LHSTy->isBlockPointerType() || !RHSTy->isBlockPointerType()) { if (LHSTy->isVoidPointerType() || RHSTy->isVoidPointerType()) { QualType destType = S.Context.getPointerType(S.Context.VoidTy); LHS = S.ImpCastExprToType(LHS.get(), destType, CK_BitCast); RHS = S.ImpCastExprToType(RHS.get(), destType, CK_BitCast); return destType; } S.Diag(Loc, diag::err_typecheck_cond_incompatible_operands) << LHSTy << RHSTy << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); return QualType(); } // We have 2 block pointer types. return checkConditionalPointerCompatibility(S, LHS, RHS, Loc); } /// Return the resulting type when the operands are both pointers. static QualType checkConditionalObjectPointersCompatibility(Sema &S, ExprResult &LHS, ExprResult &RHS, SourceLocation Loc) { // get the pointer types QualType LHSTy = LHS.get()->getType(); QualType RHSTy = RHS.get()->getType(); // get the "pointed to" types QualType lhptee = LHSTy->castAs()->getPointeeType(); QualType rhptee = RHSTy->castAs()->getPointeeType(); // ignore qualifiers on void (C99 6.5.15p3, clause 6) if (lhptee->isVoidType() && rhptee->isIncompleteOrObjectType()) { // Figure out necessary qualifiers (C99 6.5.15p6) QualType destPointee = S.Context.getQualifiedType(lhptee, rhptee.getQualifiers()); QualType destType = S.Context.getPointerType(destPointee); // Add qualifiers if necessary. LHS = S.ImpCastExprToType(LHS.get(), destType, CK_NoOp); // Promote to void*. RHS = S.ImpCastExprToType(RHS.get(), destType, CK_BitCast); return destType; } if (rhptee->isVoidType() && lhptee->isIncompleteOrObjectType()) { QualType destPointee = S.Context.getQualifiedType(rhptee, lhptee.getQualifiers()); QualType destType = S.Context.getPointerType(destPointee); // Add qualifiers if necessary. RHS = S.ImpCastExprToType(RHS.get(), destType, CK_NoOp); // Promote to void*. LHS = S.ImpCastExprToType(LHS.get(), destType, CK_BitCast); return destType; } return checkConditionalPointerCompatibility(S, LHS, RHS, Loc); } /// Return false if the first expression is not an integer and the second /// expression is not a pointer, true otherwise. static bool checkPointerIntegerMismatch(Sema &S, ExprResult &Int, Expr* PointerExpr, SourceLocation Loc, bool IsIntFirstExpr) { if (!PointerExpr->getType()->isPointerType() || !Int.get()->getType()->isIntegerType()) return false; Expr *Expr1 = IsIntFirstExpr ? Int.get() : PointerExpr; Expr *Expr2 = IsIntFirstExpr ? PointerExpr : Int.get(); S.Diag(Loc, diag::ext_typecheck_cond_pointer_integer_mismatch) << Expr1->getType() << Expr2->getType() << Expr1->getSourceRange() << Expr2->getSourceRange(); Int = S.ImpCastExprToType(Int.get(), PointerExpr->getType(), CK_IntegralToPointer); return true; } /// Simple conversion between integer and floating point types. /// /// Used when handling the OpenCL conditional operator where the /// condition is a vector while the other operands are scalar. /// /// OpenCL v1.1 s6.3.i and s6.11.6 together require that the scalar /// types are either integer or floating type. Between the two /// operands, the type with the higher rank is defined as the "result /// type". The other operand needs to be promoted to the same type. No /// other type promotion is allowed. We cannot use /// UsualArithmeticConversions() for this purpose, since it always /// promotes promotable types. static QualType OpenCLArithmeticConversions(Sema &S, ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc) { LHS = S.DefaultFunctionArrayLvalueConversion(LHS.get()); if (LHS.isInvalid()) return QualType(); RHS = S.DefaultFunctionArrayLvalueConversion(RHS.get()); if (RHS.isInvalid()) return QualType(); // For conversion purposes, we ignore any qualifiers. // For example, "const float" and "float" are equivalent. QualType LHSType = S.Context.getCanonicalType(LHS.get()->getType()).getUnqualifiedType(); QualType RHSType = S.Context.getCanonicalType(RHS.get()->getType()).getUnqualifiedType(); if (!LHSType->isIntegerType() && !LHSType->isRealFloatingType()) { S.Diag(QuestionLoc, diag::err_typecheck_cond_expect_int_float) << LHSType << LHS.get()->getSourceRange(); return QualType(); } if (!RHSType->isIntegerType() && !RHSType->isRealFloatingType()) { S.Diag(QuestionLoc, diag::err_typecheck_cond_expect_int_float) << RHSType << RHS.get()->getSourceRange(); return QualType(); } // If both types are identical, no conversion is needed. if (LHSType == RHSType) return LHSType; // Now handle "real" floating types (i.e. float, double, long double). if (LHSType->isRealFloatingType() || RHSType->isRealFloatingType()) return handleFloatConversion(S, LHS, RHS, LHSType, RHSType, /*IsCompAssign = */ false); // Finally, we have two differing integer types. return handleIntegerConversion (S, LHS, RHS, LHSType, RHSType, /*IsCompAssign = */ false); } /// Convert scalar operands to a vector that matches the /// condition in length. /// /// Used when handling the OpenCL conditional operator where the /// condition is a vector while the other operands are scalar. /// /// We first compute the "result type" for the scalar operands /// according to OpenCL v1.1 s6.3.i. Both operands are then converted /// into a vector of that type where the length matches the condition /// vector type. s6.11.6 requires that the element types of the result /// and the condition must have the same number of bits. static QualType OpenCLConvertScalarsToVectors(Sema &S, ExprResult &LHS, ExprResult &RHS, QualType CondTy, SourceLocation QuestionLoc) { QualType ResTy = OpenCLArithmeticConversions(S, LHS, RHS, QuestionLoc); if (ResTy.isNull()) return QualType(); const VectorType *CV = CondTy->getAs(); assert(CV); // Determine the vector result type unsigned NumElements = CV->getNumElements(); QualType VectorTy = S.Context.getExtVectorType(ResTy, NumElements); // Ensure that all types have the same number of bits if (S.Context.getTypeSize(CV->getElementType()) != S.Context.getTypeSize(ResTy)) { // Since VectorTy is created internally, it does not pretty print // with an OpenCL name. Instead, we just print a description. std::string EleTyName = ResTy.getUnqualifiedType().getAsString(); SmallString<64> Str; llvm::raw_svector_ostream OS(Str); OS << "(vector of " << NumElements << " '" << EleTyName << "' values)"; S.Diag(QuestionLoc, diag::err_conditional_vector_element_size) << CondTy << OS.str(); return QualType(); } // Convert operands to the vector result type LHS = S.ImpCastExprToType(LHS.get(), VectorTy, CK_VectorSplat); RHS = S.ImpCastExprToType(RHS.get(), VectorTy, CK_VectorSplat); return VectorTy; } /// Return false if this is a valid OpenCL condition vector static bool checkOpenCLConditionVector(Sema &S, Expr *Cond, SourceLocation QuestionLoc) { // OpenCL v1.1 s6.11.6 says the elements of the vector must be of // integral type. const VectorType *CondTy = Cond->getType()->getAs(); assert(CondTy); QualType EleTy = CondTy->getElementType(); if (EleTy->isIntegerType()) return false; S.Diag(QuestionLoc, diag::err_typecheck_cond_expect_nonfloat) << Cond->getType() << Cond->getSourceRange(); return true; } /// Return false if the vector condition type and the vector /// result type are compatible. /// /// OpenCL v1.1 s6.11.6 requires that both vector types have the same /// number of elements, and their element types have the same number /// of bits. static bool checkVectorResult(Sema &S, QualType CondTy, QualType VecResTy, SourceLocation QuestionLoc) { const VectorType *CV = CondTy->getAs(); const VectorType *RV = VecResTy->getAs(); assert(CV && RV); if (CV->getNumElements() != RV->getNumElements()) { S.Diag(QuestionLoc, diag::err_conditional_vector_size) << CondTy << VecResTy; return true; } QualType CVE = CV->getElementType(); QualType RVE = RV->getElementType(); if (S.Context.getTypeSize(CVE) != S.Context.getTypeSize(RVE)) { S.Diag(QuestionLoc, diag::err_conditional_vector_element_size) << CondTy << VecResTy; return true; } return false; } /// Return the resulting type for the conditional operator in /// OpenCL (aka "ternary selection operator", OpenCL v1.1 /// s6.3.i) when the condition is a vector type. static QualType OpenCLCheckVectorConditional(Sema &S, ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc) { Cond = S.DefaultFunctionArrayLvalueConversion(Cond.get()); if (Cond.isInvalid()) return QualType(); QualType CondTy = Cond.get()->getType(); if (checkOpenCLConditionVector(S, Cond.get(), QuestionLoc)) return QualType(); // If either operand is a vector then find the vector type of the // result as specified in OpenCL v1.1 s6.3.i. if (LHS.get()->getType()->isVectorType() || RHS.get()->getType()->isVectorType()) { QualType VecResTy = S.CheckVectorOperands(LHS, RHS, QuestionLoc, /*isCompAssign*/false, /*AllowBothBool*/true, /*AllowBoolConversions*/false); if (VecResTy.isNull()) return QualType(); // The result type must match the condition type as specified in // OpenCL v1.1 s6.11.6. if (checkVectorResult(S, CondTy, VecResTy, QuestionLoc)) return QualType(); return VecResTy; } // Both operands are scalar. return OpenCLConvertScalarsToVectors(S, LHS, RHS, CondTy, QuestionLoc); } /// Return true if the Expr is block type static bool checkBlockType(Sema &S, const Expr *E) { if (const CallExpr *CE = dyn_cast(E)) { QualType Ty = CE->getCallee()->getType(); if (Ty->isBlockPointerType()) { S.Diag(E->getExprLoc(), diag::err_opencl_ternary_with_block); return true; } } return false; } /// Note that LHS is not null here, even if this is the gnu "x ?: y" extension. /// In that case, LHS = cond. /// C99 6.5.15 QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc) { ExprResult LHSResult = CheckPlaceholderExpr(LHS.get()); if (!LHSResult.isUsable()) return QualType(); LHS = LHSResult; ExprResult RHSResult = CheckPlaceholderExpr(RHS.get()); if (!RHSResult.isUsable()) return QualType(); RHS = RHSResult; // C++ is sufficiently different to merit its own checker. if (getLangOpts().CPlusPlus) return CXXCheckConditionalOperands(Cond, LHS, RHS, VK, OK, QuestionLoc); VK = VK_RValue; OK = OK_Ordinary; // The OpenCL operator with a vector condition is sufficiently // different to merit its own checker. if ((getLangOpts().OpenCL && Cond.get()->getType()->isVectorType()) || Cond.get()->getType()->isExtVectorType()) return OpenCLCheckVectorConditional(*this, Cond, LHS, RHS, QuestionLoc); // First, check the condition. Cond = UsualUnaryConversions(Cond.get()); if (Cond.isInvalid()) return QualType(); if (checkCondition(*this, Cond.get(), QuestionLoc)) return QualType(); // Now check the two expressions. if (LHS.get()->getType()->isVectorType() || RHS.get()->getType()->isVectorType()) return CheckVectorOperands(LHS, RHS, QuestionLoc, /*isCompAssign*/false, /*AllowBothBool*/true, /*AllowBoolConversions*/false); QualType ResTy = UsualArithmeticConversions(LHS, RHS, QuestionLoc, ACK_Conditional); if (LHS.isInvalid() || RHS.isInvalid()) return QualType(); QualType LHSTy = LHS.get()->getType(); QualType RHSTy = RHS.get()->getType(); // Diagnose attempts to convert between __float128 and long double where // such conversions currently can't be handled. if (unsupportedTypeConversion(*this, LHSTy, RHSTy)) { Diag(QuestionLoc, diag::err_typecheck_cond_incompatible_operands) << LHSTy << RHSTy << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); return QualType(); } // OpenCL v2.0 s6.12.5 - Blocks cannot be used as expressions of the ternary // selection operator (?:). if (getLangOpts().OpenCL && (checkBlockType(*this, LHS.get()) | checkBlockType(*this, RHS.get()))) { return QualType(); } // If both operands have arithmetic type, do the usual arithmetic conversions // to find a common type: C99 6.5.15p3,5. if (LHSTy->isArithmeticType() && RHSTy->isArithmeticType()) { LHS = ImpCastExprToType(LHS.get(), ResTy, PrepareScalarCast(LHS, ResTy)); RHS = ImpCastExprToType(RHS.get(), ResTy, PrepareScalarCast(RHS, ResTy)); return ResTy; } // And if they're both bfloat (which isn't arithmetic), that's fine too. if (LHSTy->isBFloat16Type() && RHSTy->isBFloat16Type()) { return LHSTy; } // If both operands are the same structure or union type, the result is that // type. if (const RecordType *LHSRT = LHSTy->getAs()) { // C99 6.5.15p3 if (const RecordType *RHSRT = RHSTy->getAs()) if (LHSRT->getDecl() == RHSRT->getDecl()) // "If both the operands have structure or union type, the result has // that type." This implies that CV qualifiers are dropped. return LHSTy.getUnqualifiedType(); // FIXME: Type of conditional expression must be complete in C mode. } // C99 6.5.15p5: "If both operands have void type, the result has void type." // The following || allows only one side to be void (a GCC-ism). if (LHSTy->isVoidType() || RHSTy->isVoidType()) { return checkConditionalVoidType(*this, LHS, RHS); } // C99 6.5.15p6 - "if one operand is a null pointer constant, the result has // the type of the other operand." if (!checkConditionalNullPointer(*this, RHS, LHSTy)) return LHSTy; if (!checkConditionalNullPointer(*this, LHS, RHSTy)) return RHSTy; // All objective-c pointer type analysis is done here. QualType compositeType = FindCompositeObjCPointerType(LHS, RHS, QuestionLoc); if (LHS.isInvalid() || RHS.isInvalid()) return QualType(); if (!compositeType.isNull()) return compositeType; // Handle block pointer types. if (LHSTy->isBlockPointerType() || RHSTy->isBlockPointerType()) return checkConditionalBlockPointerCompatibility(*this, LHS, RHS, QuestionLoc); // Check constraints for C object pointers types (C99 6.5.15p3,6). if (LHSTy->isPointerType() && RHSTy->isPointerType()) return checkConditionalObjectPointersCompatibility(*this, LHS, RHS, QuestionLoc); // GCC compatibility: soften pointer/integer mismatch. Note that // null pointers have been filtered out by this point. if (checkPointerIntegerMismatch(*this, LHS, RHS.get(), QuestionLoc, /*IsIntFirstExpr=*/true)) return RHSTy; if (checkPointerIntegerMismatch(*this, RHS, LHS.get(), QuestionLoc, /*IsIntFirstExpr=*/false)) return LHSTy; // Allow ?: operations in which both operands have the same // built-in sizeless type. if (LHSTy->isSizelessBuiltinType() && LHSTy == RHSTy) return LHSTy; // Emit a better diagnostic if one of the expressions is a null pointer // constant and the other is not a pointer type. In this case, the user most // likely forgot to take the address of the other expression. if (DiagnoseConditionalForNull(LHS.get(), RHS.get(), QuestionLoc)) return QualType(); // Otherwise, the operands are not compatible. Diag(QuestionLoc, diag::err_typecheck_cond_incompatible_operands) << LHSTy << RHSTy << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); return QualType(); } /// FindCompositeObjCPointerType - Helper method to find composite type of /// two objective-c pointer types of the two input expressions. QualType Sema::FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc) { QualType LHSTy = LHS.get()->getType(); QualType RHSTy = RHS.get()->getType(); // Handle things like Class and struct objc_class*. Here we case the result // to the pseudo-builtin, because that will be implicitly cast back to the // redefinition type if an attempt is made to access its fields. if (LHSTy->isObjCClassType() && (Context.hasSameType(RHSTy, Context.getObjCClassRedefinitionType()))) { RHS = ImpCastExprToType(RHS.get(), LHSTy, CK_CPointerToObjCPointerCast); return LHSTy; } if (RHSTy->isObjCClassType() && (Context.hasSameType(LHSTy, Context.getObjCClassRedefinitionType()))) { LHS = ImpCastExprToType(LHS.get(), RHSTy, CK_CPointerToObjCPointerCast); return RHSTy; } // And the same for struct objc_object* / id if (LHSTy->isObjCIdType() && (Context.hasSameType(RHSTy, Context.getObjCIdRedefinitionType()))) { RHS = ImpCastExprToType(RHS.get(), LHSTy, CK_CPointerToObjCPointerCast); return LHSTy; } if (RHSTy->isObjCIdType() && (Context.hasSameType(LHSTy, Context.getObjCIdRedefinitionType()))) { LHS = ImpCastExprToType(LHS.get(), RHSTy, CK_CPointerToObjCPointerCast); return RHSTy; } // And the same for struct objc_selector* / SEL if (Context.isObjCSelType(LHSTy) && (Context.hasSameType(RHSTy, Context.getObjCSelRedefinitionType()))) { RHS = ImpCastExprToType(RHS.get(), LHSTy, CK_BitCast); return LHSTy; } if (Context.isObjCSelType(RHSTy) && (Context.hasSameType(LHSTy, Context.getObjCSelRedefinitionType()))) { LHS = ImpCastExprToType(LHS.get(), RHSTy, CK_BitCast); return RHSTy; } // Check constraints for Objective-C object pointers types. if (LHSTy->isObjCObjectPointerType() && RHSTy->isObjCObjectPointerType()) { if (Context.getCanonicalType(LHSTy) == Context.getCanonicalType(RHSTy)) { // Two identical object pointer types are always compatible. return LHSTy; } const ObjCObjectPointerType *LHSOPT = LHSTy->castAs(); const ObjCObjectPointerType *RHSOPT = RHSTy->castAs(); QualType compositeType = LHSTy; // If both operands are interfaces and either operand can be // assigned to the other, use that type as the composite // type. This allows // xxx ? (A*) a : (B*) b // where B is a subclass of A. // // Additionally, as for assignment, if either type is 'id' // allow silent coercion. Finally, if the types are // incompatible then make sure to use 'id' as the composite // type so the result is acceptable for sending messages to. // FIXME: Consider unifying with 'areComparableObjCPointerTypes'. // It could return the composite type. if (!(compositeType = Context.areCommonBaseCompatible(LHSOPT, RHSOPT)).isNull()) { // Nothing more to do. } else if (Context.canAssignObjCInterfaces(LHSOPT, RHSOPT)) { compositeType = RHSOPT->isObjCBuiltinType() ? RHSTy : LHSTy; } else if (Context.canAssignObjCInterfaces(RHSOPT, LHSOPT)) { compositeType = LHSOPT->isObjCBuiltinType() ? LHSTy : RHSTy; } else if ((LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType()) && Context.ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, true)) { // Need to handle "id" explicitly. // GCC allows qualified id and any Objective-C type to devolve to // id. Currently localizing to here until clear this should be // part of ObjCQualifiedIdTypesAreCompatible. compositeType = Context.getObjCIdType(); } else if (LHSTy->isObjCIdType() || RHSTy->isObjCIdType()) { compositeType = Context.getObjCIdType(); } else { Diag(QuestionLoc, diag::ext_typecheck_cond_incompatible_operands) << LHSTy << RHSTy << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); QualType incompatTy = Context.getObjCIdType(); LHS = ImpCastExprToType(LHS.get(), incompatTy, CK_BitCast); RHS = ImpCastExprToType(RHS.get(), incompatTy, CK_BitCast); return incompatTy; } // The object pointer types are compatible. LHS = ImpCastExprToType(LHS.get(), compositeType, CK_BitCast); RHS = ImpCastExprToType(RHS.get(), compositeType, CK_BitCast); return compositeType; } // Check Objective-C object pointer types and 'void *' if (LHSTy->isVoidPointerType() && RHSTy->isObjCObjectPointerType()) { if (getLangOpts().ObjCAutoRefCount) { // ARC forbids the implicit conversion of object pointers to 'void *', // so these types are not compatible. Diag(QuestionLoc, diag::err_cond_voidptr_arc) << LHSTy << RHSTy << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); LHS = RHS = true; return QualType(); } QualType lhptee = LHSTy->castAs()->getPointeeType(); QualType rhptee = RHSTy->castAs()->getPointeeType(); QualType destPointee = Context.getQualifiedType(lhptee, rhptee.getQualifiers()); QualType destType = Context.getPointerType(destPointee); // Add qualifiers if necessary. LHS = ImpCastExprToType(LHS.get(), destType, CK_NoOp); // Promote to void*. RHS = ImpCastExprToType(RHS.get(), destType, CK_BitCast); return destType; } if (LHSTy->isObjCObjectPointerType() && RHSTy->isVoidPointerType()) { if (getLangOpts().ObjCAutoRefCount) { // ARC forbids the implicit conversion of object pointers to 'void *', // so these types are not compatible. Diag(QuestionLoc, diag::err_cond_voidptr_arc) << LHSTy << RHSTy << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); LHS = RHS = true; return QualType(); } QualType lhptee = LHSTy->castAs()->getPointeeType(); QualType rhptee = RHSTy->castAs()->getPointeeType(); QualType destPointee = Context.getQualifiedType(rhptee, lhptee.getQualifiers()); QualType destType = Context.getPointerType(destPointee); // Add qualifiers if necessary. RHS = ImpCastExprToType(RHS.get(), destType, CK_NoOp); // Promote to void*. LHS = ImpCastExprToType(LHS.get(), destType, CK_BitCast); return destType; } return QualType(); } /// SuggestParentheses - Emit a note with a fixit hint that wraps /// ParenRange in parentheses. static void SuggestParentheses(Sema &Self, SourceLocation Loc, const PartialDiagnostic &Note, SourceRange ParenRange) { SourceLocation EndLoc = Self.getLocForEndOfToken(ParenRange.getEnd()); if (ParenRange.getBegin().isFileID() && ParenRange.getEnd().isFileID() && EndLoc.isValid()) { Self.Diag(Loc, Note) << FixItHint::CreateInsertion(ParenRange.getBegin(), "(") << FixItHint::CreateInsertion(EndLoc, ")"); } else { // We can't display the parentheses, so just show the bare note. Self.Diag(Loc, Note) << ParenRange; } } static bool IsArithmeticOp(BinaryOperatorKind Opc) { return BinaryOperator::isAdditiveOp(Opc) || BinaryOperator::isMultiplicativeOp(Opc) || BinaryOperator::isShiftOp(Opc) || Opc == BO_And || Opc == BO_Or; // This only checks for bitwise-or and bitwise-and, but not bitwise-xor and // not any of the logical operators. Bitwise-xor is commonly used as a // logical-xor because there is no logical-xor operator. The logical // operators, including uses of xor, have a high false positive rate for // precedence warnings. } /// IsArithmeticBinaryExpr - Returns true if E is an arithmetic binary /// expression, either using a built-in or overloaded operator, /// and sets *OpCode to the opcode and *RHSExprs to the right-hand side /// expression. static bool IsArithmeticBinaryExpr(Expr *E, BinaryOperatorKind *Opcode, Expr **RHSExprs) { // Don't strip parenthesis: we should not warn if E is in parenthesis. E = E->IgnoreImpCasts(); E = E->IgnoreConversionOperator(); E = E->IgnoreImpCasts(); if (auto *MTE = dyn_cast(E)) { E = MTE->getSubExpr(); E = E->IgnoreImpCasts(); } // Built-in binary operator. if (BinaryOperator *OP = dyn_cast(E)) { if (IsArithmeticOp(OP->getOpcode())) { *Opcode = OP->getOpcode(); *RHSExprs = OP->getRHS(); return true; } } // Overloaded operator. if (CXXOperatorCallExpr *Call = dyn_cast(E)) { if (Call->getNumArgs() != 2) return false; // Make sure this is really a binary operator that is safe to pass into // BinaryOperator::getOverloadedOpcode(), e.g. it's not a subscript op. OverloadedOperatorKind OO = Call->getOperator(); if (OO < OO_Plus || OO > OO_Arrow || OO == OO_PlusPlus || OO == OO_MinusMinus) return false; BinaryOperatorKind OpKind = BinaryOperator::getOverloadedOpcode(OO); if (IsArithmeticOp(OpKind)) { *Opcode = OpKind; *RHSExprs = Call->getArg(1); return true; } } return false; } /// ExprLooksBoolean - Returns true if E looks boolean, i.e. it has boolean type /// or is a logical expression such as (x==y) which has int type, but is /// commonly interpreted as boolean. static bool ExprLooksBoolean(Expr *E) { E = E->IgnoreParenImpCasts(); if (E->getType()->isBooleanType()) return true; if (BinaryOperator *OP = dyn_cast(E)) return OP->isComparisonOp() || OP->isLogicalOp(); if (UnaryOperator *OP = dyn_cast(E)) return OP->getOpcode() == UO_LNot; if (E->getType()->isPointerType()) return true; // FIXME: What about overloaded operator calls returning "unspecified boolean // type"s (commonly pointer-to-members)? return false; } /// DiagnoseConditionalPrecedence - Emit a warning when a conditional operator /// and binary operator are mixed in a way that suggests the programmer assumed /// the conditional operator has higher precedence, for example: /// "int x = a + someBinaryCondition ? 1 : 2". static void DiagnoseConditionalPrecedence(Sema &Self, SourceLocation OpLoc, Expr *Condition, Expr *LHSExpr, Expr *RHSExpr) { BinaryOperatorKind CondOpcode; Expr *CondRHS; if (!IsArithmeticBinaryExpr(Condition, &CondOpcode, &CondRHS)) return; if (!ExprLooksBoolean(CondRHS)) return; // The condition is an arithmetic binary expression, with a right- // hand side that looks boolean, so warn. unsigned DiagID = BinaryOperator::isBitwiseOp(CondOpcode) ? diag::warn_precedence_bitwise_conditional : diag::warn_precedence_conditional; Self.Diag(OpLoc, DiagID) << Condition->getSourceRange() << BinaryOperator::getOpcodeStr(CondOpcode); SuggestParentheses( Self, OpLoc, Self.PDiag(diag::note_precedence_silence) << BinaryOperator::getOpcodeStr(CondOpcode), SourceRange(Condition->getBeginLoc(), Condition->getEndLoc())); SuggestParentheses(Self, OpLoc, Self.PDiag(diag::note_precedence_conditional_first), SourceRange(CondRHS->getBeginLoc(), RHSExpr->getEndLoc())); } /// Compute the nullability of a conditional expression. static QualType computeConditionalNullability(QualType ResTy, bool IsBin, QualType LHSTy, QualType RHSTy, ASTContext &Ctx) { if (!ResTy->isAnyPointerType()) return ResTy; auto GetNullability = [&Ctx](QualType Ty) { Optional Kind = Ty->getNullability(Ctx); if (Kind) return *Kind; return NullabilityKind::Unspecified; }; auto LHSKind = GetNullability(LHSTy), RHSKind = GetNullability(RHSTy); NullabilityKind MergedKind; // Compute nullability of a binary conditional expression. if (IsBin) { if (LHSKind == NullabilityKind::NonNull) MergedKind = NullabilityKind::NonNull; else MergedKind = RHSKind; // Compute nullability of a normal conditional expression. } else { if (LHSKind == NullabilityKind::Nullable || RHSKind == NullabilityKind::Nullable) MergedKind = NullabilityKind::Nullable; else if (LHSKind == NullabilityKind::NonNull) MergedKind = RHSKind; else if (RHSKind == NullabilityKind::NonNull) MergedKind = LHSKind; else MergedKind = NullabilityKind::Unspecified; } // Return if ResTy already has the correct nullability. if (GetNullability(ResTy) == MergedKind) return ResTy; // Strip all nullability from ResTy. while (ResTy->getNullability(Ctx)) ResTy = ResTy.getSingleStepDesugaredType(Ctx); // Create a new AttributedType with the new nullability kind. auto NewAttr = AttributedType::getNullabilityAttrKind(MergedKind); return Ctx.getAttributedType(NewAttr, ResTy, ResTy); } /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult Sema::ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr) { if (!getLangOpts().CPlusPlus) { // C cannot handle TypoExpr nodes in the condition because it // doesn't handle dependent types properly, so make sure any TypoExprs have // been dealt with before checking the operands. ExprResult CondResult = CorrectDelayedTyposInExpr(CondExpr); ExprResult LHSResult = CorrectDelayedTyposInExpr(LHSExpr); ExprResult RHSResult = CorrectDelayedTyposInExpr(RHSExpr); if (!CondResult.isUsable()) return ExprError(); if (LHSExpr) { if (!LHSResult.isUsable()) return ExprError(); } if (!RHSResult.isUsable()) return ExprError(); CondExpr = CondResult.get(); LHSExpr = LHSResult.get(); RHSExpr = RHSResult.get(); } // If this is the gnu "x ?: y" extension, analyze the types as though the LHS // was the condition. OpaqueValueExpr *opaqueValue = nullptr; Expr *commonExpr = nullptr; if (!LHSExpr) { commonExpr = CondExpr; // Lower out placeholder types first. This is important so that we don't // try to capture a placeholder. This happens in few cases in C++; such // as Objective-C++'s dictionary subscripting syntax. if (commonExpr->hasPlaceholderType()) { ExprResult result = CheckPlaceholderExpr(commonExpr); if (!result.isUsable()) return ExprError(); commonExpr = result.get(); } // We usually want to apply unary conversions *before* saving, except // in the special case of a C++ l-value conditional. if (!(getLangOpts().CPlusPlus && !commonExpr->isTypeDependent() && commonExpr->getValueKind() == RHSExpr->getValueKind() && commonExpr->isGLValue() && commonExpr->isOrdinaryOrBitFieldObject() && RHSExpr->isOrdinaryOrBitFieldObject() && Context.hasSameType(commonExpr->getType(), RHSExpr->getType()))) { ExprResult commonRes = UsualUnaryConversions(commonExpr); if (commonRes.isInvalid()) return ExprError(); commonExpr = commonRes.get(); } // If the common expression is a class or array prvalue, materialize it // so that we can safely refer to it multiple times. if (commonExpr->isRValue() && (commonExpr->getType()->isRecordType() || commonExpr->getType()->isArrayType())) { ExprResult MatExpr = TemporaryMaterializationConversion(commonExpr); if (MatExpr.isInvalid()) return ExprError(); commonExpr = MatExpr.get(); } opaqueValue = new (Context) OpaqueValueExpr(commonExpr->getExprLoc(), commonExpr->getType(), commonExpr->getValueKind(), commonExpr->getObjectKind(), commonExpr); LHSExpr = CondExpr = opaqueValue; } QualType LHSTy = LHSExpr->getType(), RHSTy = RHSExpr->getType(); ExprValueKind VK = VK_RValue; ExprObjectKind OK = OK_Ordinary; ExprResult Cond = CondExpr, LHS = LHSExpr, RHS = RHSExpr; QualType result = CheckConditionalOperands(Cond, LHS, RHS, VK, OK, QuestionLoc); if (result.isNull() || Cond.isInvalid() || LHS.isInvalid() || RHS.isInvalid()) return ExprError(); DiagnoseConditionalPrecedence(*this, QuestionLoc, Cond.get(), LHS.get(), RHS.get()); CheckBoolLikeConversion(Cond.get(), QuestionLoc); result = computeConditionalNullability(result, commonExpr, LHSTy, RHSTy, Context); if (!commonExpr) return new (Context) ConditionalOperator(Cond.get(), QuestionLoc, LHS.get(), ColonLoc, RHS.get(), result, VK, OK); return new (Context) BinaryConditionalOperator( commonExpr, opaqueValue, Cond.get(), LHS.get(), RHS.get(), QuestionLoc, ColonLoc, result, VK, OK); } // Check if we have a conversion between incompatible cmse function pointer // types, that is, a conversion between a function pointer with the // cmse_nonsecure_call attribute and one without. static bool IsInvalidCmseNSCallConversion(Sema &S, QualType FromType, QualType ToType) { if (const auto *ToFn = dyn_cast(S.Context.getCanonicalType(ToType))) { if (const auto *FromFn = dyn_cast(S.Context.getCanonicalType(FromType))) { FunctionType::ExtInfo ToEInfo = ToFn->getExtInfo(); FunctionType::ExtInfo FromEInfo = FromFn->getExtInfo(); return ToEInfo.getCmseNSCall() != FromEInfo.getCmseNSCall(); } } return false; } // checkPointerTypesForAssignment - This is a very tricky routine (despite // being closely modeled after the C99 spec:-). The odd characteristic of this // routine is it effectively iqnores the qualifiers on the top level pointee. // This circumvents the usual type rules specified in 6.2.7p1 & 6.7.5.[1-3]. // FIXME: add a couple examples in this comment. static Sema::AssignConvertType checkPointerTypesForAssignment(Sema &S, QualType LHSType, QualType RHSType) { assert(LHSType.isCanonical() && "LHS not canonicalized!"); assert(RHSType.isCanonical() && "RHS not canonicalized!"); // get the "pointed to" type (ignoring qualifiers at the top level) const Type *lhptee, *rhptee; Qualifiers lhq, rhq; std::tie(lhptee, lhq) = cast(LHSType)->getPointeeType().split().asPair(); std::tie(rhptee, rhq) = cast(RHSType)->getPointeeType().split().asPair(); Sema::AssignConvertType ConvTy = Sema::Compatible; // C99 6.5.16.1p1: This following citation is common to constraints // 3 & 4 (below). ...and the type *pointed to* by the left has all the // qualifiers of the type *pointed to* by the right; // As a special case, 'non-__weak A *' -> 'non-__weak const *' is okay. if (lhq.getObjCLifetime() != rhq.getObjCLifetime() && lhq.compatiblyIncludesObjCLifetime(rhq)) { // Ignore lifetime for further calculation. lhq.removeObjCLifetime(); rhq.removeObjCLifetime(); } if (!lhq.compatiblyIncludes(rhq)) { // Treat address-space mismatches as fatal. if (!lhq.isAddressSpaceSupersetOf(rhq)) return Sema::IncompatiblePointerDiscardsQualifiers; // It's okay to add or remove GC or lifetime qualifiers when converting to // and from void*. else if (lhq.withoutObjCGCAttr().withoutObjCLifetime() .compatiblyIncludes( rhq.withoutObjCGCAttr().withoutObjCLifetime()) && (lhptee->isVoidType() || rhptee->isVoidType())) ; // keep old // Treat lifetime mismatches as fatal. else if (lhq.getObjCLifetime() != rhq.getObjCLifetime()) ConvTy = Sema::IncompatiblePointerDiscardsQualifiers; // For GCC/MS compatibility, other qualifier mismatches are treated // as still compatible in C. else ConvTy = Sema::CompatiblePointerDiscardsQualifiers; } // C99 6.5.16.1p1 (constraint 4): If one operand is a pointer to an object or // incomplete type and the other is a pointer to a qualified or unqualified // version of void... if (lhptee->isVoidType()) { if (rhptee->isIncompleteOrObjectType()) return ConvTy; // As an extension, we allow cast to/from void* to function pointer. assert(rhptee->isFunctionType()); return Sema::FunctionVoidPointer; } if (rhptee->isVoidType()) { if (lhptee->isIncompleteOrObjectType()) return ConvTy; // As an extension, we allow cast to/from void* to function pointer. assert(lhptee->isFunctionType()); return Sema::FunctionVoidPointer; } // C99 6.5.16.1p1 (constraint 3): both operands are pointers to qualified or // unqualified versions of compatible types, ... QualType ltrans = QualType(lhptee, 0), rtrans = QualType(rhptee, 0); if (!S.Context.typesAreCompatible(ltrans, rtrans)) { // Check if the pointee types are compatible ignoring the sign. // We explicitly check for char so that we catch "char" vs // "unsigned char" on systems where "char" is unsigned. if (lhptee->isCharType()) ltrans = S.Context.UnsignedCharTy; else if (lhptee->hasSignedIntegerRepresentation()) ltrans = S.Context.getCorrespondingUnsignedType(ltrans); if (rhptee->isCharType()) rtrans = S.Context.UnsignedCharTy; else if (rhptee->hasSignedIntegerRepresentation()) rtrans = S.Context.getCorrespondingUnsignedType(rtrans); if (ltrans == rtrans) { // Types are compatible ignoring the sign. Qualifier incompatibility // takes priority over sign incompatibility because the sign // warning can be disabled. if (ConvTy != Sema::Compatible) return ConvTy; return Sema::IncompatiblePointerSign; } // If we are a multi-level pointer, it's possible that our issue is simply // one of qualification - e.g. char ** -> const char ** is not allowed. If // the eventual target type is the same and the pointers have the same // level of indirection, this must be the issue. if (isa(lhptee) && isa(rhptee)) { do { std::tie(lhptee, lhq) = cast(lhptee)->getPointeeType().split().asPair(); std::tie(rhptee, rhq) = cast(rhptee)->getPointeeType().split().asPair(); // Inconsistent address spaces at this point is invalid, even if the // address spaces would be compatible. // FIXME: This doesn't catch address space mismatches for pointers of // different nesting levels, like: // __local int *** a; // int ** b = a; // It's not clear how to actually determine when such pointers are // invalidly incompatible. if (lhq.getAddressSpace() != rhq.getAddressSpace()) return Sema::IncompatibleNestedPointerAddressSpaceMismatch; } while (isa(lhptee) && isa(rhptee)); if (lhptee == rhptee) return Sema::IncompatibleNestedPointerQualifiers; } // General pointer incompatibility takes priority over qualifiers. if (RHSType->isFunctionPointerType() && LHSType->isFunctionPointerType()) return Sema::IncompatibleFunctionPointer; return Sema::IncompatiblePointer; } if (!S.getLangOpts().CPlusPlus && S.IsFunctionConversion(ltrans, rtrans, ltrans)) return Sema::IncompatibleFunctionPointer; if (IsInvalidCmseNSCallConversion(S, ltrans, rtrans)) return Sema::IncompatibleFunctionPointer; return ConvTy; } /// checkBlockPointerTypesForAssignment - This routine determines whether two /// block pointer types are compatible or whether a block and normal pointer /// are compatible. It is more restrict than comparing two function pointer // types. static Sema::AssignConvertType checkBlockPointerTypesForAssignment(Sema &S, QualType LHSType, QualType RHSType) { assert(LHSType.isCanonical() && "LHS not canonicalized!"); assert(RHSType.isCanonical() && "RHS not canonicalized!"); QualType lhptee, rhptee; // get the "pointed to" type (ignoring qualifiers at the top level) lhptee = cast(LHSType)->getPointeeType(); rhptee = cast(RHSType)->getPointeeType(); // In C++, the types have to match exactly. if (S.getLangOpts().CPlusPlus) return Sema::IncompatibleBlockPointer; Sema::AssignConvertType ConvTy = Sema::Compatible; // For blocks we enforce that qualifiers are identical. Qualifiers LQuals = lhptee.getLocalQualifiers(); Qualifiers RQuals = rhptee.getLocalQualifiers(); if (S.getLangOpts().OpenCL) { LQuals.removeAddressSpace(); RQuals.removeAddressSpace(); } if (LQuals != RQuals) ConvTy = Sema::CompatiblePointerDiscardsQualifiers; // FIXME: OpenCL doesn't define the exact compile time semantics for a block // assignment. // The current behavior is similar to C++ lambdas. A block might be // assigned to a variable iff its return type and parameters are compatible // (C99 6.2.7) with the corresponding return type and parameters of the LHS of // an assignment. Presumably it should behave in way that a function pointer // assignment does in C, so for each parameter and return type: // * CVR and address space of LHS should be a superset of CVR and address // space of RHS. // * unqualified types should be compatible. if (S.getLangOpts().OpenCL) { if (!S.Context.typesAreBlockPointerCompatible( S.Context.getQualifiedType(LHSType.getUnqualifiedType(), LQuals), S.Context.getQualifiedType(RHSType.getUnqualifiedType(), RQuals))) return Sema::IncompatibleBlockPointer; } else if (!S.Context.typesAreBlockPointerCompatible(LHSType, RHSType)) return Sema::IncompatibleBlockPointer; return ConvTy; } /// checkObjCPointerTypesForAssignment - Compares two objective-c pointer types /// for assignment compatibility. static Sema::AssignConvertType checkObjCPointerTypesForAssignment(Sema &S, QualType LHSType, QualType RHSType) { assert(LHSType.isCanonical() && "LHS was not canonicalized!"); assert(RHSType.isCanonical() && "RHS was not canonicalized!"); if (LHSType->isObjCBuiltinType()) { // Class is not compatible with ObjC object pointers. if (LHSType->isObjCClassType() && !RHSType->isObjCBuiltinType() && !RHSType->isObjCQualifiedClassType()) return Sema::IncompatiblePointer; return Sema::Compatible; } if (RHSType->isObjCBuiltinType()) { if (RHSType->isObjCClassType() && !LHSType->isObjCBuiltinType() && !LHSType->isObjCQualifiedClassType()) return Sema::IncompatiblePointer; return Sema::Compatible; } QualType lhptee = LHSType->castAs()->getPointeeType(); QualType rhptee = RHSType->castAs()->getPointeeType(); if (!lhptee.isAtLeastAsQualifiedAs(rhptee) && // make an exception for id

!LHSType->isObjCQualifiedIdType()) return Sema::CompatiblePointerDiscardsQualifiers; if (S.Context.typesAreCompatible(LHSType, RHSType)) return Sema::Compatible; if (LHSType->isObjCQualifiedIdType() || RHSType->isObjCQualifiedIdType()) return Sema::IncompatibleObjCQualifiedId; return Sema::IncompatiblePointer; } Sema::AssignConvertType Sema::CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType) { // Fake up an opaque expression. We don't actually care about what // cast operations are required, so if CheckAssignmentConstraints // adds casts to this they'll be wasted, but fortunately that doesn't // usually happen on valid code. OpaqueValueExpr RHSExpr(Loc, RHSType, VK_RValue); ExprResult RHSPtr = &RHSExpr; CastKind K; return CheckAssignmentConstraints(LHSType, RHSPtr, K, /*ConvertRHS=*/false); } /// This helper function returns true if QT is a vector type that has element /// type ElementType. static bool isVector(QualType QT, QualType ElementType) { if (const VectorType *VT = QT->getAs()) return VT->getElementType().getCanonicalType() == ElementType; return false; } /// CheckAssignmentConstraints (C99 6.5.16) - This routine currently /// has code to accommodate several GCC extensions when type checking /// pointers. Here are some objectionable examples that GCC considers warnings: /// /// int a, *pint; /// short *pshort; /// struct foo *pfoo; /// /// pint = pshort; // warning: assignment from incompatible pointer type /// a = pint; // warning: assignment makes integer from pointer without a cast /// pint = a; // warning: assignment makes pointer from integer without a cast /// pint = pfoo; // warning: assignment from incompatible pointer type /// /// As a result, the code for dealing with pointers is more complex than the /// C99 spec dictates. /// /// Sets 'Kind' for any result kind except Incompatible. Sema::AssignConvertType Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS) { QualType RHSType = RHS.get()->getType(); QualType OrigLHSType = LHSType; // Get canonical types. We're not formatting these types, just comparing // them. LHSType = Context.getCanonicalType(LHSType).getUnqualifiedType(); RHSType = Context.getCanonicalType(RHSType).getUnqualifiedType(); // Common case: no conversion required. if (LHSType == RHSType) { Kind = CK_NoOp; return Compatible; } // If we have an atomic type, try a non-atomic assignment, then just add an // atomic qualification step. if (const AtomicType *AtomicTy = dyn_cast(LHSType)) { Sema::AssignConvertType result = CheckAssignmentConstraints(AtomicTy->getValueType(), RHS, Kind); if (result != Compatible) return result; if (Kind != CK_NoOp && ConvertRHS) RHS = ImpCastExprToType(RHS.get(), AtomicTy->getValueType(), Kind); Kind = CK_NonAtomicToAtomic; return Compatible; } // If the left-hand side is a reference type, then we are in a // (rare!) case where we've allowed the use of references in C, // e.g., as a parameter type in a built-in function. In this case, // just make sure that the type referenced is compatible with the // right-hand side type. The caller is responsible for adjusting // LHSType so that the resulting expression does not have reference // type. if (const ReferenceType *LHSTypeRef = LHSType->getAs()) { if (Context.typesAreCompatible(LHSTypeRef->getPointeeType(), RHSType)) { Kind = CK_LValueBitCast; return Compatible; } return Incompatible; } // Allow scalar to ExtVector assignments, and assignments of an ExtVector type // to the same ExtVector type. if (LHSType->isExtVectorType()) { if (RHSType->isExtVectorType()) return Incompatible; if (RHSType->isArithmeticType()) { // CK_VectorSplat does T -> vector T, so first cast to the element type. if (ConvertRHS) RHS = prepareVectorSplat(LHSType, RHS.get()); Kind = CK_VectorSplat; return Compatible; } } // Conversions to or from vector type. if (LHSType->isVectorType() || RHSType->isVectorType()) { if (LHSType->isVectorType() && RHSType->isVectorType()) { // Allow assignments of an AltiVec vector type to an equivalent GCC // vector type and vice versa if (Context.areCompatibleVectorTypes(LHSType, RHSType)) { Kind = CK_BitCast; return Compatible; } // If we are allowing lax vector conversions, and LHS and RHS are both // vectors, the total size only needs to be the same. This is a bitcast; // no bits are changed but the result type is different. if (isLaxVectorConversion(RHSType, LHSType)) { Kind = CK_BitCast; return IncompatibleVectors; } } // When the RHS comes from another lax conversion (e.g. binops between // scalars and vectors) the result is canonicalized as a vector. When the // LHS is also a vector, the lax is allowed by the condition above. Handle // the case where LHS is a scalar. if (LHSType->isScalarType()) { const VectorType *VecType = RHSType->getAs(); if (VecType && VecType->getNumElements() == 1 && isLaxVectorConversion(RHSType, LHSType)) { ExprResult *VecExpr = &RHS; *VecExpr = ImpCastExprToType(VecExpr->get(), LHSType, CK_BitCast); Kind = CK_BitCast; return Compatible; } } return Incompatible; } // Diagnose attempts to convert between __float128 and long double where // such conversions currently can't be handled. if (unsupportedTypeConversion(*this, LHSType, RHSType)) return Incompatible; // Disallow assigning a _Complex to a real type in C++ mode since it simply // discards the imaginary part. if (getLangOpts().CPlusPlus && RHSType->getAs() && !LHSType->getAs()) return Incompatible; // Arithmetic conversions. if (LHSType->isArithmeticType() && RHSType->isArithmeticType() && !(getLangOpts().CPlusPlus && LHSType->isEnumeralType())) { if (ConvertRHS) Kind = PrepareScalarCast(RHS, LHSType); return Compatible; } // Conversions to normal pointers. if (const PointerType *LHSPointer = dyn_cast(LHSType)) { // U* -> T* if (isa(RHSType)) { LangAS AddrSpaceL = LHSPointer->getPointeeType().getAddressSpace(); LangAS AddrSpaceR = RHSType->getPointeeType().getAddressSpace(); if (AddrSpaceL != AddrSpaceR) Kind = CK_AddressSpaceConversion; else if (Context.hasCvrSimilarType(RHSType, LHSType)) Kind = CK_NoOp; else Kind = CK_BitCast; return checkPointerTypesForAssignment(*this, LHSType, RHSType); } // int -> T* if (RHSType->isIntegerType()) { Kind = CK_IntegralToPointer; // FIXME: null? return IntToPointer; } // C pointers are not compatible with ObjC object pointers, // with two exceptions: if (isa(RHSType)) { // - conversions to void* if (LHSPointer->getPointeeType()->isVoidType()) { Kind = CK_BitCast; return Compatible; } // - conversions from 'Class' to the redefinition type if (RHSType->isObjCClassType() && Context.hasSameType(LHSType, Context.getObjCClassRedefinitionType())) { Kind = CK_BitCast; return Compatible; } Kind = CK_BitCast; return IncompatiblePointer; } // U^ -> void* if (RHSType->getAs()) { if (LHSPointer->getPointeeType()->isVoidType()) { LangAS AddrSpaceL = LHSPointer->getPointeeType().getAddressSpace(); LangAS AddrSpaceR = RHSType->getAs() ->getPointeeType() .getAddressSpace(); Kind = AddrSpaceL != AddrSpaceR ? CK_AddressSpaceConversion : CK_BitCast; return Compatible; } } return Incompatible; } // Conversions to block pointers. if (isa(LHSType)) { // U^ -> T^ if (RHSType->isBlockPointerType()) { LangAS AddrSpaceL = LHSType->getAs() ->getPointeeType() .getAddressSpace(); LangAS AddrSpaceR = RHSType->getAs() ->getPointeeType() .getAddressSpace(); Kind = AddrSpaceL != AddrSpaceR ? CK_AddressSpaceConversion : CK_BitCast; return checkBlockPointerTypesForAssignment(*this, LHSType, RHSType); } // int or null -> T^ if (RHSType->isIntegerType()) { Kind = CK_IntegralToPointer; // FIXME: null return IntToBlockPointer; } // id -> T^ if (getLangOpts().ObjC && RHSType->isObjCIdType()) { Kind = CK_AnyPointerToBlockPointerCast; return Compatible; } // void* -> T^ if (const PointerType *RHSPT = RHSType->getAs()) if (RHSPT->getPointeeType()->isVoidType()) { Kind = CK_AnyPointerToBlockPointerCast; return Compatible; } return Incompatible; } // Conversions to Objective-C pointers. if (isa(LHSType)) { // A* -> B* if (RHSType->isObjCObjectPointerType()) { Kind = CK_BitCast; Sema::AssignConvertType result = checkObjCPointerTypesForAssignment(*this, LHSType, RHSType); if (getLangOpts().allowsNonTrivialObjCLifetimeQualifiers() && result == Compatible && !CheckObjCARCUnavailableWeakConversion(OrigLHSType, RHSType)) result = IncompatibleObjCWeakRef; return result; } // int or null -> A* if (RHSType->isIntegerType()) { Kind = CK_IntegralToPointer; // FIXME: null return IntToPointer; } // In general, C pointers are not compatible with ObjC object pointers, // with two exceptions: if (isa(RHSType)) { Kind = CK_CPointerToObjCPointerCast; // - conversions from 'void*' if (RHSType->isVoidPointerType()) { return Compatible; } // - conversions to 'Class' from its redefinition type if (LHSType->isObjCClassType() && Context.hasSameType(RHSType, Context.getObjCClassRedefinitionType())) { return Compatible; } return IncompatiblePointer; } // Only under strict condition T^ is compatible with an Objective-C pointer. if (RHSType->isBlockPointerType() && LHSType->isBlockCompatibleObjCPointerType(Context)) { if (ConvertRHS) maybeExtendBlockObject(RHS); Kind = CK_BlockPointerToObjCPointerCast; return Compatible; } return Incompatible; } // Conversions from pointers that are not covered by the above. if (isa(RHSType)) { // T* -> _Bool if (LHSType == Context.BoolTy) { Kind = CK_PointerToBoolean; return Compatible; } // T* -> int if (LHSType->isIntegerType()) { Kind = CK_PointerToIntegral; return PointerToInt; } return Incompatible; } // Conversions from Objective-C pointers that are not covered by the above. if (isa(RHSType)) { // T* -> _Bool if (LHSType == Context.BoolTy) { Kind = CK_PointerToBoolean; return Compatible; } // T* -> int if (LHSType->isIntegerType()) { Kind = CK_PointerToIntegral; return PointerToInt; } return Incompatible; } // struct A -> struct B if (isa(LHSType) && isa(RHSType)) { if (Context.typesAreCompatible(LHSType, RHSType)) { Kind = CK_NoOp; return Compatible; } } if (LHSType->isSamplerT() && RHSType->isIntegerType()) { Kind = CK_IntToOCLSampler; return Compatible; } return Incompatible; } /// Constructs a transparent union from an expression that is /// used to initialize the transparent union. static void ConstructTransparentUnion(Sema &S, ASTContext &C, ExprResult &EResult, QualType UnionType, FieldDecl *Field) { // Build an initializer list that designates the appropriate member // of the transparent union. Expr *E = EResult.get(); InitListExpr *Initializer = new (C) InitListExpr(C, SourceLocation(), E, SourceLocation()); Initializer->setType(UnionType); Initializer->setInitializedFieldInUnion(Field); // Build a compound literal constructing a value of the transparent // union type from this initializer list. TypeSourceInfo *unionTInfo = C.getTrivialTypeSourceInfo(UnionType); EResult = new (C) CompoundLiteralExpr(SourceLocation(), unionTInfo, UnionType, VK_RValue, Initializer, false); } Sema::AssignConvertType Sema::CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS) { QualType RHSType = RHS.get()->getType(); // If the ArgType is a Union type, we want to handle a potential // transparent_union GCC extension. const RecordType *UT = ArgType->getAsUnionType(); if (!UT || !UT->getDecl()->hasAttr()) return Incompatible; // The field to initialize within the transparent union. RecordDecl *UD = UT->getDecl(); FieldDecl *InitField = nullptr; // It's compatible if the expression matches any of the fields. for (auto *it : UD->fields()) { if (it->getType()->isPointerType()) { // If the transparent union contains a pointer type, we allow: // 1) void pointer // 2) null pointer constant if (RHSType->isPointerType()) if (RHSType->castAs()->getPointeeType()->isVoidType()) { RHS = ImpCastExprToType(RHS.get(), it->getType(), CK_BitCast); InitField = it; break; } if (RHS.get()->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) { RHS = ImpCastExprToType(RHS.get(), it->getType(), CK_NullToPointer); InitField = it; break; } } CastKind Kind; if (CheckAssignmentConstraints(it->getType(), RHS, Kind) == Compatible) { RHS = ImpCastExprToType(RHS.get(), it->getType(), Kind); InitField = it; break; } } if (!InitField) return Incompatible; ConstructTransparentUnion(*this, Context, RHS, ArgType, InitField); return Compatible; } Sema::AssignConvertType Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &CallerRHS, bool Diagnose, bool DiagnoseCFAudited, bool ConvertRHS) { // We need to be able to tell the caller whether we diagnosed a problem, if // they ask us to issue diagnostics. assert((ConvertRHS || !Diagnose) && "can't indicate whether we diagnosed"); // If ConvertRHS is false, we want to leave the caller's RHS untouched. Sadly, // we can't avoid *all* modifications at the moment, so we need some somewhere // to put the updated value. ExprResult LocalRHS = CallerRHS; ExprResult &RHS = ConvertRHS ? CallerRHS : LocalRHS; if (const auto *LHSPtrType = LHSType->getAs()) { if (const auto *RHSPtrType = RHS.get()->getType()->getAs()) { if (RHSPtrType->getPointeeType()->hasAttr(attr::NoDeref) && !LHSPtrType->getPointeeType()->hasAttr(attr::NoDeref)) { Diag(RHS.get()->getExprLoc(), diag::warn_noderef_to_dereferenceable_pointer) << RHS.get()->getSourceRange(); } } } if (getLangOpts().CPlusPlus) { if (!LHSType->isRecordType() && !LHSType->isAtomicType()) { // C++ 5.17p3: If the left operand is not of class type, the // expression is implicitly converted (C++ 4) to the // cv-unqualified type of the left operand. QualType RHSType = RHS.get()->getType(); if (Diagnose) { RHS = PerformImplicitConversion(RHS.get(), LHSType.getUnqualifiedType(), AA_Assigning); } else { ImplicitConversionSequence ICS = TryImplicitConversion(RHS.get(), LHSType.getUnqualifiedType(), /*SuppressUserConversions=*/false, AllowedExplicit::None, /*InOverloadResolution=*/false, /*CStyle=*/false, /*AllowObjCWritebackConversion=*/false); if (ICS.isFailure()) return Incompatible; RHS = PerformImplicitConversion(RHS.get(), LHSType.getUnqualifiedType(), ICS, AA_Assigning); } if (RHS.isInvalid()) return Incompatible; Sema::AssignConvertType result = Compatible; if (getLangOpts().allowsNonTrivialObjCLifetimeQualifiers() && !CheckObjCARCUnavailableWeakConversion(LHSType, RHSType)) result = IncompatibleObjCWeakRef; return result; } // FIXME: Currently, we fall through and treat C++ classes like C // structures. // FIXME: We also fall through for atomics; not sure what should // happen there, though. } else if (RHS.get()->getType() == Context.OverloadTy) { // As a set of extensions to C, we support overloading on functions. These // functions need to be resolved here. DeclAccessPair DAP; if (FunctionDecl *FD = ResolveAddressOfOverloadedFunction( RHS.get(), LHSType, /*Complain=*/false, DAP)) RHS = FixOverloadedFunctionReference(RHS.get(), DAP, FD); else return Incompatible; } // C99 6.5.16.1p1: the left operand is a pointer and the right is // a null pointer constant. if ((LHSType->isPointerType() || LHSType->isObjCObjectPointerType() || LHSType->isBlockPointerType()) && RHS.get()->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) { if (Diagnose || ConvertRHS) { CastKind Kind; CXXCastPath Path; CheckPointerConversion(RHS.get(), LHSType, Kind, Path, /*IgnoreBaseAccess=*/false, Diagnose); if (ConvertRHS) RHS = ImpCastExprToType(RHS.get(), LHSType, Kind, VK_RValue, &Path); } return Compatible; } // OpenCL queue_t type assignment. if (LHSType->isQueueT() && RHS.get()->isNullPointerConstant( Context, Expr::NPC_ValueDependentIsNull)) { RHS = ImpCastExprToType(RHS.get(), LHSType, CK_NullToPointer); return Compatible; } // This check seems unnatural, however it is necessary to ensure the proper // conversion of functions/arrays. If the conversion were done for all // DeclExpr's (created by ActOnIdExpression), it would mess up the unary // expressions that suppress this implicit conversion (&, sizeof). // // Suppress this for references: C++ 8.5.3p5. if (!LHSType->isReferenceType()) { // FIXME: We potentially allocate here even if ConvertRHS is false. RHS = DefaultFunctionArrayLvalueConversion(RHS.get(), Diagnose); if (RHS.isInvalid()) return Incompatible; } CastKind Kind; Sema::AssignConvertType result = CheckAssignmentConstraints(LHSType, RHS, Kind, ConvertRHS); // C99 6.5.16.1p2: The value of the right operand is converted to the // type of the assignment expression. // CheckAssignmentConstraints allows the left-hand side to be a reference, // so that we can use references in built-in functions even in C. // The getNonReferenceType() call makes sure that the resulting expression // does not have reference type. if (result != Incompatible && RHS.get()->getType() != LHSType) { QualType Ty = LHSType.getNonLValueExprType(Context); Expr *E = RHS.get(); // Check for various Objective-C errors. If we are not reporting // diagnostics and just checking for errors, e.g., during overload // resolution, return Incompatible to indicate the failure. if (getLangOpts().allowsNonTrivialObjCLifetimeQualifiers() && CheckObjCConversion(SourceRange(), Ty, E, CCK_ImplicitConversion, Diagnose, DiagnoseCFAudited) != ACR_okay) { if (!Diagnose) return Incompatible; } if (getLangOpts().ObjC && (CheckObjCBridgeRelatedConversions(E->getBeginLoc(), LHSType, E->getType(), E, Diagnose) || CheckConversionToObjCLiteral(LHSType, E, Diagnose))) { if (!Diagnose) return Incompatible; // Replace the expression with a corrected version and continue so we // can find further errors. RHS = E; return Compatible; } if (ConvertRHS) RHS = ImpCastExprToType(E, Ty, Kind); } return result; } namespace { /// The original operand to an operator, prior to the application of the usual /// arithmetic conversions and converting the arguments of a builtin operator /// candidate. struct OriginalOperand { explicit OriginalOperand(Expr *Op) : Orig(Op), Conversion(nullptr) { if (auto *MTE = dyn_cast(Op)) Op = MTE->getSubExpr(); if (auto *BTE = dyn_cast(Op)) Op = BTE->getSubExpr(); if (auto *ICE = dyn_cast(Op)) { Orig = ICE->getSubExprAsWritten(); Conversion = ICE->getConversionFunction(); } } QualType getType() const { return Orig->getType(); } Expr *Orig; NamedDecl *Conversion; }; } QualType Sema::InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS) { OriginalOperand OrigLHS(LHS.get()), OrigRHS(RHS.get()); Diag(Loc, diag::err_typecheck_invalid_operands) << OrigLHS.getType() << OrigRHS.getType() << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); // If a user-defined conversion was applied to either of the operands prior // to applying the built-in operator rules, tell the user about it. if (OrigLHS.Conversion) { Diag(OrigLHS.Conversion->getLocation(), diag::note_typecheck_invalid_operands_converted) << 0 << LHS.get()->getType(); } if (OrigRHS.Conversion) { Diag(OrigRHS.Conversion->getLocation(), diag::note_typecheck_invalid_operands_converted) << 1 << RHS.get()->getType(); } return QualType(); } // Diagnose cases where a scalar was implicitly converted to a vector and // diagnose the underlying types. Otherwise, diagnose the error // as invalid vector logical operands for non-C++ cases. QualType Sema::InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS) { QualType LHSType = LHS.get()->IgnoreImpCasts()->getType(); QualType RHSType = RHS.get()->IgnoreImpCasts()->getType(); bool LHSNatVec = LHSType->isVectorType(); bool RHSNatVec = RHSType->isVectorType(); if (!(LHSNatVec && RHSNatVec)) { Expr *Vector = LHSNatVec ? LHS.get() : RHS.get(); Expr *NonVector = !LHSNatVec ? LHS.get() : RHS.get(); Diag(Loc, diag::err_typecheck_logical_vector_expr_gnu_cpp_restrict) << 0 << Vector->getType() << NonVector->IgnoreImpCasts()->getType() << Vector->getSourceRange(); return QualType(); } Diag(Loc, diag::err_typecheck_logical_vector_expr_gnu_cpp_restrict) << 1 << LHSType << RHSType << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); return QualType(); } /// Try to convert a value of non-vector type to a vector type by converting /// the type to the element type of the vector and then performing a splat. /// If the language is OpenCL, we only use conversions that promote scalar /// rank; for C, Obj-C, and C++ we allow any real scalar conversion except /// for float->int. /// /// OpenCL V2.0 6.2.6.p2: /// An error shall occur if any scalar operand type has greater rank /// than the type of the vector element. /// /// \param scalar - if non-null, actually perform the conversions /// \return true if the operation fails (but without diagnosing the failure) static bool tryVectorConvertAndSplat(Sema &S, ExprResult *scalar, QualType scalarTy, QualType vectorEltTy, QualType vectorTy, unsigned &DiagID) { // The conversion to apply to the scalar before splatting it, // if necessary. CastKind scalarCast = CK_NoOp; if (vectorEltTy->isIntegralType(S.Context)) { if (S.getLangOpts().OpenCL && (scalarTy->isRealFloatingType() || (scalarTy->isIntegerType() && S.Context.getIntegerTypeOrder(vectorEltTy, scalarTy) < 0))) { DiagID = diag::err_opencl_scalar_type_rank_greater_than_vector_type; return true; } if (!scalarTy->isIntegralType(S.Context)) return true; scalarCast = CK_IntegralCast; } else if (vectorEltTy->isRealFloatingType()) { if (scalarTy->isRealFloatingType()) { if (S.getLangOpts().OpenCL && S.Context.getFloatingTypeOrder(vectorEltTy, scalarTy) < 0) { DiagID = diag::err_opencl_scalar_type_rank_greater_than_vector_type; return true; } scalarCast = CK_FloatingCast; } else if (scalarTy->isIntegralType(S.Context)) scalarCast = CK_IntegralToFloating; else return true; } else { return true; } // Adjust scalar if desired. if (scalar) { if (scalarCast != CK_NoOp) *scalar = S.ImpCastExprToType(scalar->get(), vectorEltTy, scalarCast); *scalar = S.ImpCastExprToType(scalar->get(), vectorTy, CK_VectorSplat); } return false; } /// Convert vector E to a vector with the same number of elements but different /// element type. static ExprResult convertVector(Expr *E, QualType ElementType, Sema &S) { const auto *VecTy = E->getType()->getAs(); assert(VecTy && "Expression E must be a vector"); QualType NewVecTy = S.Context.getVectorType(ElementType, VecTy->getNumElements(), VecTy->getVectorKind()); // Look through the implicit cast. Return the subexpression if its type is // NewVecTy. if (auto *ICE = dyn_cast(E)) if (ICE->getSubExpr()->getType() == NewVecTy) return ICE->getSubExpr(); auto Cast = ElementType->isIntegerType() ? CK_IntegralCast : CK_FloatingCast; return S.ImpCastExprToType(E, NewVecTy, Cast); } /// Test if a (constant) integer Int can be casted to another integer type /// IntTy without losing precision. static bool canConvertIntToOtherIntTy(Sema &S, ExprResult *Int, QualType OtherIntTy) { QualType IntTy = Int->get()->getType().getUnqualifiedType(); // Reject cases where the value of the Int is unknown as that would // possibly cause truncation, but accept cases where the scalar can be // demoted without loss of precision. Expr::EvalResult EVResult; bool CstInt = Int->get()->EvaluateAsInt(EVResult, S.Context); int Order = S.Context.getIntegerTypeOrder(OtherIntTy, IntTy); bool IntSigned = IntTy->hasSignedIntegerRepresentation(); bool OtherIntSigned = OtherIntTy->hasSignedIntegerRepresentation(); if (CstInt) { // If the scalar is constant and is of a higher order and has more active // bits that the vector element type, reject it. llvm::APSInt Result = EVResult.Val.getInt(); unsigned NumBits = IntSigned ? (Result.isNegative() ? Result.getMinSignedBits() : Result.getActiveBits()) : Result.getActiveBits(); if (Order < 0 && S.Context.getIntWidth(OtherIntTy) < NumBits) return true; // If the signedness of the scalar type and the vector element type // differs and the number of bits is greater than that of the vector // element reject it. return (IntSigned != OtherIntSigned && NumBits > S.Context.getIntWidth(OtherIntTy)); } // Reject cases where the value of the scalar is not constant and it's // order is greater than that of the vector element type. return (Order < 0); } /// Test if a (constant) integer Int can be casted to floating point type /// FloatTy without losing precision. static bool canConvertIntTyToFloatTy(Sema &S, ExprResult *Int, QualType FloatTy) { QualType IntTy = Int->get()->getType().getUnqualifiedType(); // Determine if the integer constant can be expressed as a floating point // number of the appropriate type. Expr::EvalResult EVResult; bool CstInt = Int->get()->EvaluateAsInt(EVResult, S.Context); uint64_t Bits = 0; if (CstInt) { // Reject constants that would be truncated if they were converted to // the floating point type. Test by simple to/from conversion. // FIXME: Ideally the conversion to an APFloat and from an APFloat // could be avoided if there was a convertFromAPInt method // which could signal back if implicit truncation occurred. llvm::APSInt Result = EVResult.Val.getInt(); llvm::APFloat Float(S.Context.getFloatTypeSemantics(FloatTy)); Float.convertFromAPInt(Result, IntTy->hasSignedIntegerRepresentation(), llvm::APFloat::rmTowardZero); llvm::APSInt ConvertBack(S.Context.getIntWidth(IntTy), !IntTy->hasSignedIntegerRepresentation()); bool Ignored = false; Float.convertToInteger(ConvertBack, llvm::APFloat::rmNearestTiesToEven, &Ignored); if (Result != ConvertBack) return true; } else { // Reject types that cannot be fully encoded into the mantissa of // the float. Bits = S.Context.getTypeSize(IntTy); unsigned FloatPrec = llvm::APFloat::semanticsPrecision( S.Context.getFloatTypeSemantics(FloatTy)); if (Bits > FloatPrec) return true; } return false; } /// Attempt to convert and splat Scalar into a vector whose types matches /// Vector following GCC conversion rules. The rule is that implicit /// conversion can occur when Scalar can be casted to match Vector's element /// type without causing truncation of Scalar. static bool tryGCCVectorConvertAndSplat(Sema &S, ExprResult *Scalar, ExprResult *Vector) { QualType ScalarTy = Scalar->get()->getType().getUnqualifiedType(); QualType VectorTy = Vector->get()->getType().getUnqualifiedType(); const VectorType *VT = VectorTy->getAs(); assert(!isa(VT) && "ExtVectorTypes should not be handled here!"); QualType VectorEltTy = VT->getElementType(); // Reject cases where the vector element type or the scalar element type are // not integral or floating point types. if (!VectorEltTy->isArithmeticType() || !ScalarTy->isArithmeticType()) return true; // The conversion to apply to the scalar before splatting it, // if necessary. CastKind ScalarCast = CK_NoOp; // Accept cases where the vector elements are integers and the scalar is // an integer. // FIXME: Notionally if the scalar was a floating point value with a precise // integral representation, we could cast it to an appropriate integer // type and then perform the rest of the checks here. GCC will perform // this conversion in some cases as determined by the input language. // We should accept it on a language independent basis. if (VectorEltTy->isIntegralType(S.Context) && ScalarTy->isIntegralType(S.Context) && S.Context.getIntegerTypeOrder(VectorEltTy, ScalarTy)) { if (canConvertIntToOtherIntTy(S, Scalar, VectorEltTy)) return true; ScalarCast = CK_IntegralCast; } else if (VectorEltTy->isIntegralType(S.Context) && ScalarTy->isRealFloatingType()) { if (S.Context.getTypeSize(VectorEltTy) == S.Context.getTypeSize(ScalarTy)) ScalarCast = CK_FloatingToIntegral; else return true; } else if (VectorEltTy->isRealFloatingType()) { if (ScalarTy->isRealFloatingType()) { // Reject cases where the scalar type is not a constant and has a higher // Order than the vector element type. llvm::APFloat Result(0.0); // Determine whether this is a constant scalar. In the event that the // value is dependent (and thus cannot be evaluated by the constant // evaluator), skip the evaluation. This will then diagnose once the // expression is instantiated. bool CstScalar = Scalar->get()->isValueDependent() || Scalar->get()->EvaluateAsFloat(Result, S.Context); int Order = S.Context.getFloatingTypeOrder(VectorEltTy, ScalarTy); if (!CstScalar && Order < 0) return true; // If the scalar cannot be safely casted to the vector element type, // reject it. if (CstScalar) { bool Truncated = false; Result.convert(S.Context.getFloatTypeSemantics(VectorEltTy), llvm::APFloat::rmNearestTiesToEven, &Truncated); if (Truncated) return true; } ScalarCast = CK_FloatingCast; } else if (ScalarTy->isIntegralType(S.Context)) { if (canConvertIntTyToFloatTy(S, Scalar, VectorEltTy)) return true; ScalarCast = CK_IntegralToFloating; } else return true; } else if (ScalarTy->isEnumeralType()) return true; // Adjust scalar if desired. if (Scalar) { if (ScalarCast != CK_NoOp) *Scalar = S.ImpCastExprToType(Scalar->get(), VectorEltTy, ScalarCast); *Scalar = S.ImpCastExprToType(Scalar->get(), VectorTy, CK_VectorSplat); } return false; } QualType Sema::CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversions) { if (!IsCompAssign) { LHS = DefaultFunctionArrayLvalueConversion(LHS.get()); if (LHS.isInvalid()) return QualType(); } RHS = DefaultFunctionArrayLvalueConversion(RHS.get()); if (RHS.isInvalid()) return QualType(); // For conversion purposes, we ignore any qualifiers. // For example, "const float" and "float" are equivalent. QualType LHSType = LHS.get()->getType().getUnqualifiedType(); QualType RHSType = RHS.get()->getType().getUnqualifiedType(); const VectorType *LHSVecType = LHSType->getAs(); const VectorType *RHSVecType = RHSType->getAs(); assert(LHSVecType || RHSVecType); // AltiVec-style "vector bool op vector bool" combinations are allowed // for some operators but not others. if (!AllowBothBool && LHSVecType && LHSVecType->getVectorKind() == VectorType::AltiVecBool && RHSVecType && RHSVecType->getVectorKind() == VectorType::AltiVecBool) return InvalidOperands(Loc, LHS, RHS); // If the vector types are identical, return. if (Context.hasSameType(LHSType, RHSType)) return LHSType; // If we have compatible AltiVec and GCC vector types, use the AltiVec type. if (LHSVecType && RHSVecType && Context.areCompatibleVectorTypes(LHSType, RHSType)) { if (isa(LHSVecType)) { RHS = ImpCastExprToType(RHS.get(), LHSType, CK_BitCast); return LHSType; } if (!IsCompAssign) LHS = ImpCastExprToType(LHS.get(), RHSType, CK_BitCast); return RHSType; } // AllowBoolConversions says that bool and non-bool AltiVec vectors // can be mixed, with the result being the non-bool type. The non-bool // operand must have integer element type. if (AllowBoolConversions && LHSVecType && RHSVecType && LHSVecType->getNumElements() == RHSVecType->getNumElements() && (Context.getTypeSize(LHSVecType->getElementType()) == Context.getTypeSize(RHSVecType->getElementType()))) { if (LHSVecType->getVectorKind() == VectorType::AltiVecVector && LHSVecType->getElementType()->isIntegerType() && RHSVecType->getVectorKind() == VectorType::AltiVecBool) { RHS = ImpCastExprToType(RHS.get(), LHSType, CK_BitCast); return LHSType; } if (!IsCompAssign && LHSVecType->getVectorKind() == VectorType::AltiVecBool && RHSVecType->getVectorKind() == VectorType::AltiVecVector && RHSVecType->getElementType()->isIntegerType()) { LHS = ImpCastExprToType(LHS.get(), RHSType, CK_BitCast); return RHSType; } } // If there's a vector type and a scalar, try to convert the scalar to // the vector element type and splat. unsigned DiagID = diag::err_typecheck_vector_not_convertable; if (!RHSVecType) { if (isa(LHSVecType)) { if (!tryVectorConvertAndSplat(*this, &RHS, RHSType, LHSVecType->getElementType(), LHSType, DiagID)) return LHSType; } else { if (!tryGCCVectorConvertAndSplat(*this, &RHS, &LHS)) return LHSType; } } if (!LHSVecType) { if (isa(RHSVecType)) { if (!tryVectorConvertAndSplat(*this, (IsCompAssign ? nullptr : &LHS), LHSType, RHSVecType->getElementType(), RHSType, DiagID)) return RHSType; } else { if (LHS.get()->getValueKind() == VK_LValue || !tryGCCVectorConvertAndSplat(*this, &LHS, &RHS)) return RHSType; } } // FIXME: The code below also handles conversion between vectors and // non-scalars, we should break this down into fine grained specific checks // and emit proper diagnostics. QualType VecType = LHSVecType ? LHSType : RHSType; const VectorType *VT = LHSVecType ? LHSVecType : RHSVecType; QualType OtherType = LHSVecType ? RHSType : LHSType; ExprResult *OtherExpr = LHSVecType ? &RHS : &LHS; if (isLaxVectorConversion(OtherType, VecType)) { // If we're allowing lax vector conversions, only the total (data) size // needs to be the same. For non compound assignment, if one of the types is // scalar, the result is always the vector type. if (!IsCompAssign) { *OtherExpr = ImpCastExprToType(OtherExpr->get(), VecType, CK_BitCast); return VecType; // In a compound assignment, lhs += rhs, 'lhs' is a lvalue src, forbidding // any implicit cast. Here, the 'rhs' should be implicit casted to 'lhs' // type. Note that this is already done by non-compound assignments in // CheckAssignmentConstraints. If it's a scalar type, only bitcast for // <1 x T> -> T. The result is also a vector type. } else if (OtherType->isExtVectorType() || OtherType->isVectorType() || (OtherType->isScalarType() && VT->getNumElements() == 1)) { ExprResult *RHSExpr = &RHS; *RHSExpr = ImpCastExprToType(RHSExpr->get(), LHSType, CK_BitCast); return VecType; } } // Okay, the expression is invalid. // If there's a non-vector, non-real operand, diagnose that. if ((!RHSVecType && !RHSType->isRealType()) || (!LHSVecType && !LHSType->isRealType())) { Diag(Loc, diag::err_typecheck_vector_not_convertable_non_scalar) << LHSType << RHSType << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); return QualType(); } // OpenCL V1.1 6.2.6.p1: // If the operands are of more than one vector type, then an error shall // occur. Implicit conversions between vector types are not permitted, per // section 6.2.1. if (getLangOpts().OpenCL && RHSVecType && isa(RHSVecType) && LHSVecType && isa(LHSVecType)) { Diag(Loc, diag::err_opencl_implicit_vector_conversion) << LHSType << RHSType; return QualType(); } // If there is a vector type that is not a ExtVector and a scalar, we reach // this point if scalar could not be converted to the vector's element type // without truncation. if ((RHSVecType && !isa(RHSVecType)) || (LHSVecType && !isa(LHSVecType))) { QualType Scalar = LHSVecType ? RHSType : LHSType; QualType Vector = LHSVecType ? LHSType : RHSType; unsigned ScalarOrVector = LHSVecType && RHSVecType ? 1 : 0; Diag(Loc, diag::err_typecheck_vector_not_convertable_implict_truncation) << ScalarOrVector << Scalar << Vector; return QualType(); } // Otherwise, use the generic diagnostic. Diag(Loc, DiagID) << LHSType << RHSType << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); return QualType(); } // checkArithmeticNull - Detect when a NULL constant is used improperly in an // expression. These are mainly cases where the null pointer is used as an // integer instead of a pointer. static void checkArithmeticNull(Sema &S, ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompare) { // The canonical way to check for a GNU null is with isNullPointerConstant, // but we use a bit of a hack here for speed; this is a relatively // hot path, and isNullPointerConstant is slow. bool LHSNull = isa(LHS.get()->IgnoreParenImpCasts()); bool RHSNull = isa(RHS.get()->IgnoreParenImpCasts()); QualType NonNullType = LHSNull ? RHS.get()->getType() : LHS.get()->getType(); // Avoid analyzing cases where the result will either be invalid (and // diagnosed as such) or entirely valid and not something to warn about. if ((!LHSNull && !RHSNull) || NonNullType->isBlockPointerType() || NonNullType->isMemberPointerType() || NonNullType->isFunctionType()) return; // Comparison operations would not make sense with a null pointer no matter // what the other expression is. if (!IsCompare) { S.Diag(Loc, diag::warn_null_in_arithmetic_operation) << (LHSNull ? LHS.get()->getSourceRange() : SourceRange()) << (RHSNull ? RHS.get()->getSourceRange() : SourceRange()); return; } // The rest of the operations only make sense with a null pointer // if the other expression is a pointer. if (LHSNull == RHSNull || NonNullType->isAnyPointerType() || NonNullType->canDecayToPointerType()) return; S.Diag(Loc, diag::warn_null_in_comparison_operation) << LHSNull /* LHS is NULL */ << NonNullType << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); } static void DiagnoseDivisionSizeofPointerOrArray(Sema &S, Expr *LHS, Expr *RHS, SourceLocation Loc) { const auto *LUE = dyn_cast(LHS); const auto *RUE = dyn_cast(RHS); if (!LUE || !RUE) return; if (LUE->getKind() != UETT_SizeOf || LUE->isArgumentType() || RUE->getKind() != UETT_SizeOf) return; const Expr *LHSArg = LUE->getArgumentExpr()->IgnoreParens(); QualType LHSTy = LHSArg->getType(); QualType RHSTy; if (RUE->isArgumentType()) RHSTy = RUE->getArgumentType(); else RHSTy = RUE->getArgumentExpr()->IgnoreParens()->getType(); if (LHSTy->isPointerType() && !RHSTy->isPointerType()) { if (!S.Context.hasSameUnqualifiedType(LHSTy->getPointeeType(), RHSTy)) return; S.Diag(Loc, diag::warn_division_sizeof_ptr) << LHS << LHS->getSourceRange(); if (const auto *DRE = dyn_cast(LHSArg)) { if (const ValueDecl *LHSArgDecl = DRE->getDecl()) S.Diag(LHSArgDecl->getLocation(), diag::note_pointer_declared_here) << LHSArgDecl; } } else if (const auto *ArrayTy = S.Context.getAsArrayType(LHSTy)) { QualType ArrayElemTy = ArrayTy->getElementType(); if (ArrayElemTy != S.Context.getBaseElementType(ArrayTy) || ArrayElemTy->isDependentType() || RHSTy->isDependentType() || ArrayElemTy->isCharType() || S.Context.getTypeSize(ArrayElemTy) == S.Context.getTypeSize(RHSTy)) return; S.Diag(Loc, diag::warn_division_sizeof_array) << LHSArg->getSourceRange() << ArrayElemTy << RHSTy; if (const auto *DRE = dyn_cast(LHSArg)) { if (const ValueDecl *LHSArgDecl = DRE->getDecl()) S.Diag(LHSArgDecl->getLocation(), diag::note_array_declared_here) << LHSArgDecl; } S.Diag(Loc, diag::note_precedence_silence) << RHS; } } static void DiagnoseBadDivideOrRemainderValues(Sema& S, ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsDiv) { // Check for division/remainder by zero. Expr::EvalResult RHSValue; if (!RHS.get()->isValueDependent() && RHS.get()->EvaluateAsInt(RHSValue, S.Context) && RHSValue.Val.getInt() == 0) S.DiagRuntimeBehavior(Loc, RHS.get(), S.PDiag(diag::warn_remainder_division_by_zero) << IsDiv << RHS.get()->getSourceRange()); } QualType Sema::CheckMultiplyDivideOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDiv) { checkArithmeticNull(*this, LHS, RHS, Loc, /*IsCompare=*/false); if (LHS.get()->getType()->isVectorType() || RHS.get()->getType()->isVectorType()) return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign, /*AllowBothBool*/getLangOpts().AltiVec, /*AllowBoolConversions*/false); if (!IsDiv && (LHS.get()->getType()->isConstantMatrixType() || RHS.get()->getType()->isConstantMatrixType())) return CheckMatrixMultiplyOperands(LHS, RHS, Loc, IsCompAssign); QualType compType = UsualArithmeticConversions( LHS, RHS, Loc, IsCompAssign ? ACK_CompAssign : ACK_Arithmetic); if (LHS.isInvalid() || RHS.isInvalid()) return QualType(); if (compType.isNull() || !compType->isArithmeticType()) return InvalidOperands(Loc, LHS, RHS); if (IsDiv) { DiagnoseBadDivideOrRemainderValues(*this, LHS, RHS, Loc, IsDiv); DiagnoseDivisionSizeofPointerOrArray(*this, LHS.get(), RHS.get(), Loc); } return compType; } QualType Sema::CheckRemainderOperands( ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign) { checkArithmeticNull(*this, LHS, RHS, Loc, /*IsCompare=*/false); if (LHS.get()->getType()->isVectorType() || RHS.get()->getType()->isVectorType()) { if (LHS.get()->getType()->hasIntegerRepresentation() && RHS.get()->getType()->hasIntegerRepresentation()) return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign, /*AllowBothBool*/getLangOpts().AltiVec, /*AllowBoolConversions*/false); return InvalidOperands(Loc, LHS, RHS); } QualType compType = UsualArithmeticConversions( LHS, RHS, Loc, IsCompAssign ? ACK_CompAssign : ACK_Arithmetic); if (LHS.isInvalid() || RHS.isInvalid()) return QualType(); if (compType.isNull() || !compType->isIntegerType()) return InvalidOperands(Loc, LHS, RHS); DiagnoseBadDivideOrRemainderValues(*this, LHS, RHS, Loc, false /* IsDiv */); return compType; } /// Diagnose invalid arithmetic on two void pointers. static void diagnoseArithmeticOnTwoVoidPointers(Sema &S, SourceLocation Loc, Expr *LHSExpr, Expr *RHSExpr) { S.Diag(Loc, S.getLangOpts().CPlusPlus ? diag::err_typecheck_pointer_arith_void_type : diag::ext_gnu_void_ptr) << 1 /* two pointers */ << LHSExpr->getSourceRange() << RHSExpr->getSourceRange(); } /// Diagnose invalid arithmetic on a void pointer. static void diagnoseArithmeticOnVoidPointer(Sema &S, SourceLocation Loc, Expr *Pointer) { S.Diag(Loc, S.getLangOpts().CPlusPlus ? diag::err_typecheck_pointer_arith_void_type : diag::ext_gnu_void_ptr) << 0 /* one pointer */ << Pointer->getSourceRange(); } /// Diagnose invalid arithmetic on a null pointer. /// /// If \p IsGNUIdiom is true, the operation is using the 'p = (i8*)nullptr + n' /// idiom, which we recognize as a GNU extension. /// static void diagnoseArithmeticOnNullPointer(Sema &S, SourceLocation Loc, Expr *Pointer, bool IsGNUIdiom) { if (IsGNUIdiom) S.Diag(Loc, diag::warn_gnu_null_ptr_arith) << Pointer->getSourceRange(); else S.Diag(Loc, diag::warn_pointer_arith_null_ptr) << S.getLangOpts().CPlusPlus << Pointer->getSourceRange(); } /// Diagnose invalid arithmetic on two function pointers. static void diagnoseArithmeticOnTwoFunctionPointers(Sema &S, SourceLocation Loc, Expr *LHS, Expr *RHS) { assert(LHS->getType()->isAnyPointerType()); assert(RHS->getType()->isAnyPointerType()); S.Diag(Loc, S.getLangOpts().CPlusPlus ? diag::err_typecheck_pointer_arith_function_type : diag::ext_gnu_ptr_func_arith) << 1 /* two pointers */ << LHS->getType()->getPointeeType() // We only show the second type if it differs from the first. << (unsigned)!S.Context.hasSameUnqualifiedType(LHS->getType(), RHS->getType()) << RHS->getType()->getPointeeType() << LHS->getSourceRange() << RHS->getSourceRange(); } /// Diagnose invalid arithmetic on a function pointer. static void diagnoseArithmeticOnFunctionPointer(Sema &S, SourceLocation Loc, Expr *Pointer) { assert(Pointer->getType()->isAnyPointerType()); S.Diag(Loc, S.getLangOpts().CPlusPlus ? diag::err_typecheck_pointer_arith_function_type : diag::ext_gnu_ptr_func_arith) << 0 /* one pointer */ << Pointer->getType()->getPointeeType() << 0 /* one pointer, so only one type */ << Pointer->getSourceRange(); } /// Emit error if Operand is incomplete pointer type /// /// \returns True if pointer has incomplete type static bool checkArithmeticIncompletePointerType(Sema &S, SourceLocation Loc, Expr *Operand) { QualType ResType = Operand->getType(); if (const AtomicType *ResAtomicType = ResType->getAs()) ResType = ResAtomicType->getValueType(); assert(ResType->isAnyPointerType() && !ResType->isDependentType()); QualType PointeeTy = ResType->getPointeeType(); return S.RequireCompleteSizedType( Loc, PointeeTy, diag::err_typecheck_arithmetic_incomplete_or_sizeless_type, Operand->getSourceRange()); } /// Check the validity of an arithmetic pointer operand. /// /// If the operand has pointer type, this code will check for pointer types /// which are invalid in arithmetic operations. These will be diagnosed /// appropriately, including whether or not the use is supported as an /// extension. /// /// \returns True when the operand is valid to use (even if as an extension). static bool checkArithmeticOpPointerOperand(Sema &S, SourceLocation Loc, Expr *Operand) { QualType ResType = Operand->getType(); if (const AtomicType *ResAtomicType = ResType->getAs()) ResType = ResAtomicType->getValueType(); if (!ResType->isAnyPointerType()) return true; QualType PointeeTy = ResType->getPointeeType(); if (PointeeTy->isVoidType()) { diagnoseArithmeticOnVoidPointer(S, Loc, Operand); return !S.getLangOpts().CPlusPlus; } if (PointeeTy->isFunctionType()) { diagnoseArithmeticOnFunctionPointer(S, Loc, Operand); return !S.getLangOpts().CPlusPlus; } if (checkArithmeticIncompletePointerType(S, Loc, Operand)) return false; return true; } /// Check the validity of a binary arithmetic operation w.r.t. pointer /// operands. /// /// This routine will diagnose any invalid arithmetic on pointer operands much /// like \see checkArithmeticOpPointerOperand. However, it has special logic /// for emitting a single diagnostic even for operations where both LHS and RHS /// are (potentially problematic) pointers. /// /// \returns True when the operand is valid to use (even if as an extension). static bool checkArithmeticBinOpPointerOperands(Sema &S, SourceLocation Loc, Expr *LHSExpr, Expr *RHSExpr) { bool isLHSPointer = LHSExpr->getType()->isAnyPointerType(); bool isRHSPointer = RHSExpr->getType()->isAnyPointerType(); if (!isLHSPointer && !isRHSPointer) return true; QualType LHSPointeeTy, RHSPointeeTy; if (isLHSPointer) LHSPointeeTy = LHSExpr->getType()->getPointeeType(); if (isRHSPointer) RHSPointeeTy = RHSExpr->getType()->getPointeeType(); // if both are pointers check if operation is valid wrt address spaces if (isLHSPointer && isRHSPointer) { if (!LHSPointeeTy.isAddressSpaceOverlapping(RHSPointeeTy)) { S.Diag(Loc, diag::err_typecheck_op_on_nonoverlapping_address_space_pointers) << LHSExpr->getType() << RHSExpr->getType() << 1 /*arithmetic op*/ << LHSExpr->getSourceRange() << RHSExpr->getSourceRange(); return false; } } // Check for arithmetic on pointers to incomplete types. bool isLHSVoidPtr = isLHSPointer && LHSPointeeTy->isVoidType(); bool isRHSVoidPtr = isRHSPointer && RHSPointeeTy->isVoidType(); if (isLHSVoidPtr || isRHSVoidPtr) { if (!isRHSVoidPtr) diagnoseArithmeticOnVoidPointer(S, Loc, LHSExpr); else if (!isLHSVoidPtr) diagnoseArithmeticOnVoidPointer(S, Loc, RHSExpr); else diagnoseArithmeticOnTwoVoidPointers(S, Loc, LHSExpr, RHSExpr); return !S.getLangOpts().CPlusPlus; } bool isLHSFuncPtr = isLHSPointer && LHSPointeeTy->isFunctionType(); bool isRHSFuncPtr = isRHSPointer && RHSPointeeTy->isFunctionType(); if (isLHSFuncPtr || isRHSFuncPtr) { if (!isRHSFuncPtr) diagnoseArithmeticOnFunctionPointer(S, Loc, LHSExpr); else if (!isLHSFuncPtr) diagnoseArithmeticOnFunctionPointer(S, Loc, RHSExpr); else diagnoseArithmeticOnTwoFunctionPointers(S, Loc, LHSExpr, RHSExpr); return !S.getLangOpts().CPlusPlus; } if (isLHSPointer && checkArithmeticIncompletePointerType(S, Loc, LHSExpr)) return false; if (isRHSPointer && checkArithmeticIncompletePointerType(S, Loc, RHSExpr)) return false; return true; } /// diagnoseStringPlusInt - Emit a warning when adding an integer to a string /// literal. static void diagnoseStringPlusInt(Sema &Self, SourceLocation OpLoc, Expr *LHSExpr, Expr *RHSExpr) { StringLiteral* StrExpr = dyn_cast(LHSExpr->IgnoreImpCasts()); Expr* IndexExpr = RHSExpr; if (!StrExpr) { StrExpr = dyn_cast(RHSExpr->IgnoreImpCasts()); IndexExpr = LHSExpr; } bool IsStringPlusInt = StrExpr && IndexExpr->getType()->isIntegralOrUnscopedEnumerationType(); if (!IsStringPlusInt || IndexExpr->isValueDependent()) return; SourceRange DiagRange(LHSExpr->getBeginLoc(), RHSExpr->getEndLoc()); Self.Diag(OpLoc, diag::warn_string_plus_int) << DiagRange << IndexExpr->IgnoreImpCasts()->getType(); // Only print a fixit for "str" + int, not for int + "str". if (IndexExpr == RHSExpr) { SourceLocation EndLoc = Self.getLocForEndOfToken(RHSExpr->getEndLoc()); Self.Diag(OpLoc, diag::note_string_plus_scalar_silence) << FixItHint::CreateInsertion(LHSExpr->getBeginLoc(), "&") << FixItHint::CreateReplacement(SourceRange(OpLoc), "[") << FixItHint::CreateInsertion(EndLoc, "]"); } else Self.Diag(OpLoc, diag::note_string_plus_scalar_silence); } /// Emit a warning when adding a char literal to a string. static void diagnoseStringPlusChar(Sema &Self, SourceLocation OpLoc, Expr *LHSExpr, Expr *RHSExpr) { const Expr *StringRefExpr = LHSExpr; const CharacterLiteral *CharExpr = dyn_cast(RHSExpr->IgnoreImpCasts()); if (!CharExpr) { CharExpr = dyn_cast(LHSExpr->IgnoreImpCasts()); StringRefExpr = RHSExpr; } if (!CharExpr || !StringRefExpr) return; const QualType StringType = StringRefExpr->getType(); // Return if not a PointerType. if (!StringType->isAnyPointerType()) return; // Return if not a CharacterType. if (!StringType->getPointeeType()->isAnyCharacterType()) return; ASTContext &Ctx = Self.getASTContext(); SourceRange DiagRange(LHSExpr->getBeginLoc(), RHSExpr->getEndLoc()); const QualType CharType = CharExpr->getType(); if (!CharType->isAnyCharacterType() && CharType->isIntegerType() && llvm::isUIntN(Ctx.getCharWidth(), CharExpr->getValue())) { Self.Diag(OpLoc, diag::warn_string_plus_char) << DiagRange << Ctx.CharTy; } else { Self.Diag(OpLoc, diag::warn_string_plus_char) << DiagRange << CharExpr->getType(); } // Only print a fixit for str + char, not for char + str. if (isa(RHSExpr->IgnoreImpCasts())) { SourceLocation EndLoc = Self.getLocForEndOfToken(RHSExpr->getEndLoc()); Self.Diag(OpLoc, diag::note_string_plus_scalar_silence) << FixItHint::CreateInsertion(LHSExpr->getBeginLoc(), "&") << FixItHint::CreateReplacement(SourceRange(OpLoc), "[") << FixItHint::CreateInsertion(EndLoc, "]"); } else { Self.Diag(OpLoc, diag::note_string_plus_scalar_silence); } } /// Emit error when two pointers are incompatible. static void diagnosePointerIncompatibility(Sema &S, SourceLocation Loc, Expr *LHSExpr, Expr *RHSExpr) { assert(LHSExpr->getType()->isAnyPointerType()); assert(RHSExpr->getType()->isAnyPointerType()); S.Diag(Loc, diag::err_typecheck_sub_ptr_compatible) << LHSExpr->getType() << RHSExpr->getType() << LHSExpr->getSourceRange() << RHSExpr->getSourceRange(); } // C99 6.5.6 QualType Sema::CheckAdditionOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy) { checkArithmeticNull(*this, LHS, RHS, Loc, /*IsCompare=*/false); if (LHS.get()->getType()->isVectorType() || RHS.get()->getType()->isVectorType()) { QualType compType = CheckVectorOperands( LHS, RHS, Loc, CompLHSTy, /*AllowBothBool*/getLangOpts().AltiVec, /*AllowBoolConversions*/getLangOpts().ZVector); if (CompLHSTy) *CompLHSTy = compType; return compType; } if (LHS.get()->getType()->isConstantMatrixType() || RHS.get()->getType()->isConstantMatrixType()) { return CheckMatrixElementwiseOperands(LHS, RHS, Loc, CompLHSTy); } QualType compType = UsualArithmeticConversions( LHS, RHS, Loc, CompLHSTy ? ACK_CompAssign : ACK_Arithmetic); if (LHS.isInvalid() || RHS.isInvalid()) return QualType(); // Diagnose "string literal" '+' int and string '+' "char literal". if (Opc == BO_Add) { diagnoseStringPlusInt(*this, Loc, LHS.get(), RHS.get()); diagnoseStringPlusChar(*this, Loc, LHS.get(), RHS.get()); } // handle the common case first (both operands are arithmetic). if (!compType.isNull() && compType->isArithmeticType()) { if (CompLHSTy) *CompLHSTy = compType; return compType; } // Type-checking. Ultimately the pointer's going to be in PExp; // note that we bias towards the LHS being the pointer. Expr *PExp = LHS.get(), *IExp = RHS.get(); bool isObjCPointer; if (PExp->getType()->isPointerType()) { isObjCPointer = false; } else if (PExp->getType()->isObjCObjectPointerType()) { isObjCPointer = true; } else { std::swap(PExp, IExp); if (PExp->getType()->isPointerType()) { isObjCPointer = false; } else if (PExp->getType()->isObjCObjectPointerType()) { isObjCPointer = true; } else { return InvalidOperands(Loc, LHS, RHS); } } assert(PExp->getType()->isAnyPointerType()); if (!IExp->getType()->isIntegerType()) return InvalidOperands(Loc, LHS, RHS); // Adding to a null pointer results in undefined behavior. if (PExp->IgnoreParenCasts()->isNullPointerConstant( Context, Expr::NPC_ValueDependentIsNotNull)) { // In C++ adding zero to a null pointer is defined. Expr::EvalResult KnownVal; if (!getLangOpts().CPlusPlus || (!IExp->isValueDependent() && (!IExp->EvaluateAsInt(KnownVal, Context) || KnownVal.Val.getInt() != 0))) { // Check the conditions to see if this is the 'p = nullptr + n' idiom. bool IsGNUIdiom = BinaryOperator::isNullPointerArithmeticExtension( Context, BO_Add, PExp, IExp); diagnoseArithmeticOnNullPointer(*this, Loc, PExp, IsGNUIdiom); } } if (!checkArithmeticOpPointerOperand(*this, Loc, PExp)) return QualType(); if (isObjCPointer && checkArithmeticOnObjCPointer(*this, Loc, PExp)) return QualType(); // Check array bounds for pointer arithemtic CheckArrayAccess(PExp, IExp); if (CompLHSTy) { QualType LHSTy = Context.isPromotableBitField(LHS.get()); if (LHSTy.isNull()) { LHSTy = LHS.get()->getType(); if (LHSTy->isPromotableIntegerType()) LHSTy = Context.getPromotedIntegerType(LHSTy); } *CompLHSTy = LHSTy; } return PExp->getType(); } // C99 6.5.6 QualType Sema::CheckSubtractionOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy) { checkArithmeticNull(*this, LHS, RHS, Loc, /*IsCompare=*/false); if (LHS.get()->getType()->isVectorType() || RHS.get()->getType()->isVectorType()) { QualType compType = CheckVectorOperands( LHS, RHS, Loc, CompLHSTy, /*AllowBothBool*/getLangOpts().AltiVec, /*AllowBoolConversions*/getLangOpts().ZVector); if (CompLHSTy) *CompLHSTy = compType; return compType; } if (LHS.get()->getType()->isConstantMatrixType() || RHS.get()->getType()->isConstantMatrixType()) { return CheckMatrixElementwiseOperands(LHS, RHS, Loc, CompLHSTy); } QualType compType = UsualArithmeticConversions( LHS, RHS, Loc, CompLHSTy ? ACK_CompAssign : ACK_Arithmetic); if (LHS.isInvalid() || RHS.isInvalid()) return QualType(); // Enforce type constraints: C99 6.5.6p3. // Handle the common case first (both operands are arithmetic). if (!compType.isNull() && compType->isArithmeticType()) { if (CompLHSTy) *CompLHSTy = compType; return compType; } // Either ptr - int or ptr - ptr. if (LHS.get()->getType()->isAnyPointerType()) { QualType lpointee = LHS.get()->getType()->getPointeeType(); // Diagnose bad cases where we step over interface counts. if (LHS.get()->getType()->isObjCObjectPointerType() && checkArithmeticOnObjCPointer(*this, Loc, LHS.get())) return QualType(); // The result type of a pointer-int computation is the pointer type. if (RHS.get()->getType()->isIntegerType()) { // Subtracting from a null pointer should produce a warning. // The last argument to the diagnose call says this doesn't match the // GNU int-to-pointer idiom. if (LHS.get()->IgnoreParenCasts()->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNotNull)) { // In C++ adding zero to a null pointer is defined. Expr::EvalResult KnownVal; if (!getLangOpts().CPlusPlus || (!RHS.get()->isValueDependent() && (!RHS.get()->EvaluateAsInt(KnownVal, Context) || KnownVal.Val.getInt() != 0))) { diagnoseArithmeticOnNullPointer(*this, Loc, LHS.get(), false); } } if (!checkArithmeticOpPointerOperand(*this, Loc, LHS.get())) return QualType(); // Check array bounds for pointer arithemtic CheckArrayAccess(LHS.get(), RHS.get(), /*ArraySubscriptExpr*/nullptr, /*AllowOnePastEnd*/true, /*IndexNegated*/true); if (CompLHSTy) *CompLHSTy = LHS.get()->getType(); return LHS.get()->getType(); } // Handle pointer-pointer subtractions. if (const PointerType *RHSPTy = RHS.get()->getType()->getAs()) { QualType rpointee = RHSPTy->getPointeeType(); if (getLangOpts().CPlusPlus) { // Pointee types must be the same: C++ [expr.add] if (!Context.hasSameUnqualifiedType(lpointee, rpointee)) { diagnosePointerIncompatibility(*this, Loc, LHS.get(), RHS.get()); } } else { // Pointee types must be compatible C99 6.5.6p3 if (!Context.typesAreCompatible( Context.getCanonicalType(lpointee).getUnqualifiedType(), Context.getCanonicalType(rpointee).getUnqualifiedType())) { diagnosePointerIncompatibility(*this, Loc, LHS.get(), RHS.get()); return QualType(); } } if (!checkArithmeticBinOpPointerOperands(*this, Loc, LHS.get(), RHS.get())) return QualType(); // FIXME: Add warnings for nullptr - ptr. // The pointee type may have zero size. As an extension, a structure or // union may have zero size or an array may have zero length. In this // case subtraction does not make sense. if (!rpointee->isVoidType() && !rpointee->isFunctionType()) { CharUnits ElementSize = Context.getTypeSizeInChars(rpointee); if (ElementSize.isZero()) { Diag(Loc,diag::warn_sub_ptr_zero_size_types) << rpointee.getUnqualifiedType() << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); } } if (CompLHSTy) *CompLHSTy = LHS.get()->getType(); return Context.getPointerDiffType(); } } return InvalidOperands(Loc, LHS, RHS); } static bool isScopedEnumerationType(QualType T) { if (const EnumType *ET = T->getAs()) return ET->getDecl()->isScoped(); return false; } static void DiagnoseBadShiftValues(Sema& S, ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType LHSType) { // OpenCL 6.3j: shift values are effectively % word size of LHS (more defined), // so skip remaining warnings as we don't want to modify values within Sema. if (S.getLangOpts().OpenCL) return; // Check right/shifter operand Expr::EvalResult RHSResult; if (RHS.get()->isValueDependent() || !RHS.get()->EvaluateAsInt(RHSResult, S.Context)) return; llvm::APSInt Right = RHSResult.Val.getInt(); if (Right.isNegative()) { S.DiagRuntimeBehavior(Loc, RHS.get(), S.PDiag(diag::warn_shift_negative) << RHS.get()->getSourceRange()); return; } QualType LHSExprType = LHS.get()->getType(); uint64_t LeftSize = LHSExprType->isExtIntType() ? S.Context.getIntWidth(LHSExprType) : S.Context.getTypeSize(LHSExprType); llvm::APInt LeftBits(Right.getBitWidth(), LeftSize); if (Right.uge(LeftBits)) { S.DiagRuntimeBehavior(Loc, RHS.get(), S.PDiag(diag::warn_shift_gt_typewidth) << RHS.get()->getSourceRange()); return; } if (Opc != BO_Shl) return; // When left shifting an ICE which is signed, we can check for overflow which // according to C++ standards prior to C++2a has undefined behavior // ([expr.shift] 5.8/2). Unsigned integers have defined behavior modulo one // more than the maximum value representable in the result type, so never // warn for those. (FIXME: Unsigned left-shift overflow in a constant // expression is still probably a bug.) Expr::EvalResult LHSResult; if (LHS.get()->isValueDependent() || LHSType->hasUnsignedIntegerRepresentation() || !LHS.get()->EvaluateAsInt(LHSResult, S.Context)) return; llvm::APSInt Left = LHSResult.Val.getInt(); // If LHS does not have a signed type and non-negative value // then, the behavior is undefined before C++2a. Warn about it. if (Left.isNegative() && !S.getLangOpts().isSignedOverflowDefined() && !S.getLangOpts().CPlusPlus20) { S.DiagRuntimeBehavior(Loc, LHS.get(), S.PDiag(diag::warn_shift_lhs_negative) << LHS.get()->getSourceRange()); return; } llvm::APInt ResultBits = static_cast(Right) + Left.getMinSignedBits(); if (LeftBits.uge(ResultBits)) return; llvm::APSInt Result = Left.extend(ResultBits.getLimitedValue()); Result = Result.shl(Right); // Print the bit representation of the signed integer as an unsigned // hexadecimal number. SmallString<40> HexResult; Result.toString(HexResult, 16, /*Signed =*/false, /*Literal =*/true); // If we are only missing a sign bit, this is less likely to result in actual // bugs -- if the result is cast back to an unsigned type, it will have the // expected value. Thus we place this behind a different warning that can be // turned off separately if needed. if (LeftBits == ResultBits - 1) { S.Diag(Loc, diag::warn_shift_result_sets_sign_bit) << HexResult << LHSType << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); return; } S.Diag(Loc, diag::warn_shift_result_gt_typewidth) << HexResult.str() << Result.getMinSignedBits() << LHSType << Left.getBitWidth() << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); } /// Return the resulting type when a vector is shifted /// by a scalar or vector shift amount. static QualType checkVectorShift(Sema &S, ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign) { // OpenCL v1.1 s6.3.j says RHS can be a vector only if LHS is a vector. if ((S.LangOpts.OpenCL || S.LangOpts.ZVector) && !LHS.get()->getType()->isVectorType()) { S.Diag(Loc, diag::err_shift_rhs_only_vector) << RHS.get()->getType() << LHS.get()->getType() << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); return QualType(); } if (!IsCompAssign) { LHS = S.UsualUnaryConversions(LHS.get()); if (LHS.isInvalid()) return QualType(); } RHS = S.UsualUnaryConversions(RHS.get()); if (RHS.isInvalid()) return QualType(); QualType LHSType = LHS.get()->getType(); // Note that LHS might be a scalar because the routine calls not only in // OpenCL case. const VectorType *LHSVecTy = LHSType->getAs(); QualType LHSEleType = LHSVecTy ? LHSVecTy->getElementType() : LHSType; // Note that RHS might not be a vector. QualType RHSType = RHS.get()->getType(); const VectorType *RHSVecTy = RHSType->getAs(); QualType RHSEleType = RHSVecTy ? RHSVecTy->getElementType() : RHSType; // The operands need to be integers. if (!LHSEleType->isIntegerType()) { S.Diag(Loc, diag::err_typecheck_expect_int) << LHS.get()->getType() << LHS.get()->getSourceRange(); return QualType(); } if (!RHSEleType->isIntegerType()) { S.Diag(Loc, diag::err_typecheck_expect_int) << RHS.get()->getType() << RHS.get()->getSourceRange(); return QualType(); } if (!LHSVecTy) { assert(RHSVecTy); if (IsCompAssign) return RHSType; if (LHSEleType != RHSEleType) { LHS = S.ImpCastExprToType(LHS.get(),RHSEleType, CK_IntegralCast); LHSEleType = RHSEleType; } QualType VecTy = S.Context.getExtVectorType(LHSEleType, RHSVecTy->getNumElements()); LHS = S.ImpCastExprToType(LHS.get(), VecTy, CK_VectorSplat); LHSType = VecTy; } else if (RHSVecTy) { // OpenCL v1.1 s6.3.j says that for vector types, the operators // are applied component-wise. So if RHS is a vector, then ensure // that the number of elements is the same as LHS... if (RHSVecTy->getNumElements() != LHSVecTy->getNumElements()) { S.Diag(Loc, diag::err_typecheck_vector_lengths_not_equal) << LHS.get()->getType() << RHS.get()->getType() << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); return QualType(); } if (!S.LangOpts.OpenCL && !S.LangOpts.ZVector) { const BuiltinType *LHSBT = LHSEleType->getAs(); const BuiltinType *RHSBT = RHSEleType->getAs(); if (LHSBT != RHSBT && S.Context.getTypeSize(LHSBT) != S.Context.getTypeSize(RHSBT)) { S.Diag(Loc, diag::warn_typecheck_vector_element_sizes_not_equal) << LHS.get()->getType() << RHS.get()->getType() << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); } } } else { // ...else expand RHS to match the number of elements in LHS. QualType VecTy = S.Context.getExtVectorType(RHSEleType, LHSVecTy->getNumElements()); RHS = S.ImpCastExprToType(RHS.get(), VecTy, CK_VectorSplat); } return LHSType; } // C99 6.5.7 QualType Sema::CheckShiftOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign) { checkArithmeticNull(*this, LHS, RHS, Loc, /*IsCompare=*/false); // Vector shifts promote their scalar inputs to vector type. if (LHS.get()->getType()->isVectorType() || RHS.get()->getType()->isVectorType()) { if (LangOpts.ZVector) { // The shift operators for the z vector extensions work basically // like general shifts, except that neither the LHS nor the RHS is // allowed to be a "vector bool". if (auto LHSVecType = LHS.get()->getType()->getAs()) if (LHSVecType->getVectorKind() == VectorType::AltiVecBool) return InvalidOperands(Loc, LHS, RHS); if (auto RHSVecType = RHS.get()->getType()->getAs()) if (RHSVecType->getVectorKind() == VectorType::AltiVecBool) return InvalidOperands(Loc, LHS, RHS); } return checkVectorShift(*this, LHS, RHS, Loc, IsCompAssign); } // Shifts don't perform usual arithmetic conversions, they just do integer // promotions on each operand. C99 6.5.7p3 // For the LHS, do usual unary conversions, but then reset them away // if this is a compound assignment. ExprResult OldLHS = LHS; LHS = UsualUnaryConversions(LHS.get()); if (LHS.isInvalid()) return QualType(); QualType LHSType = LHS.get()->getType(); if (IsCompAssign) LHS = OldLHS; // The RHS is simpler. RHS = UsualUnaryConversions(RHS.get()); if (RHS.isInvalid()) return QualType(); QualType RHSType = RHS.get()->getType(); // C99 6.5.7p2: Each of the operands shall have integer type. if (!LHSType->hasIntegerRepresentation() || !RHSType->hasIntegerRepresentation()) return InvalidOperands(Loc, LHS, RHS); // C++0x: Don't allow scoped enums. FIXME: Use something better than // hasIntegerRepresentation() above instead of this. if (isScopedEnumerationType(LHSType) || isScopedEnumerationType(RHSType)) { return InvalidOperands(Loc, LHS, RHS); } // Sanity-check shift operands DiagnoseBadShiftValues(*this, LHS, RHS, Loc, Opc, LHSType); // "The type of the result is that of the promoted left operand." return LHSType; } /// Diagnose bad pointer comparisons. static void diagnoseDistinctPointerComparison(Sema &S, SourceLocation Loc, ExprResult &LHS, ExprResult &RHS, bool IsError) { S.Diag(Loc, IsError ? diag::err_typecheck_comparison_of_distinct_pointers : diag::ext_typecheck_comparison_of_distinct_pointers) << LHS.get()->getType() << RHS.get()->getType() << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); } /// Returns false if the pointers are converted to a composite type, /// true otherwise. static bool convertPointersToCompositeType(Sema &S, SourceLocation Loc, ExprResult &LHS, ExprResult &RHS) { // C++ [expr.rel]p2: // [...] Pointer conversions (4.10) and qualification // conversions (4.4) are performed on pointer operands (or on // a pointer operand and a null pointer constant) to bring // them to their composite pointer type. [...] // // C++ [expr.eq]p1 uses the same notion for (in)equality // comparisons of pointers. QualType LHSType = LHS.get()->getType(); QualType RHSType = RHS.get()->getType(); assert(LHSType->isPointerType() || RHSType->isPointerType() || LHSType->isMemberPointerType() || RHSType->isMemberPointerType()); QualType T = S.FindCompositePointerType(Loc, LHS, RHS); if (T.isNull()) { if ((LHSType->isAnyPointerType() || LHSType->isMemberPointerType()) && (RHSType->isAnyPointerType() || RHSType->isMemberPointerType())) diagnoseDistinctPointerComparison(S, Loc, LHS, RHS, /*isError*/true); else S.InvalidOperands(Loc, LHS, RHS); return true; } return false; } static void diagnoseFunctionPointerToVoidComparison(Sema &S, SourceLocation Loc, ExprResult &LHS, ExprResult &RHS, bool IsError) { S.Diag(Loc, IsError ? diag::err_typecheck_comparison_of_fptr_to_void : diag::ext_typecheck_comparison_of_fptr_to_void) << LHS.get()->getType() << RHS.get()->getType() << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); } static bool isObjCObjectLiteral(ExprResult &E) { switch (E.get()->IgnoreParenImpCasts()->getStmtClass()) { case Stmt::ObjCArrayLiteralClass: case Stmt::ObjCDictionaryLiteralClass: case Stmt::ObjCStringLiteralClass: case Stmt::ObjCBoxedExprClass: return true; default: // Note that ObjCBoolLiteral is NOT an object literal! return false; } } static bool hasIsEqualMethod(Sema &S, const Expr *LHS, const Expr *RHS) { const ObjCObjectPointerType *Type = LHS->getType()->getAs(); // If this is not actually an Objective-C object, bail out. if (!Type) return false; // Get the LHS object's interface type. QualType InterfaceType = Type->getPointeeType(); // If the RHS isn't an Objective-C object, bail out. if (!RHS->getType()->isObjCObjectPointerType()) return false; // Try to find the -isEqual: method. Selector IsEqualSel = S.NSAPIObj->getIsEqualSelector(); ObjCMethodDecl *Method = S.LookupMethodInObjectType(IsEqualSel, InterfaceType, /*IsInstance=*/true); if (!Method) { if (Type->isObjCIdType()) { // For 'id', just check the global pool. Method = S.LookupInstanceMethodInGlobalPool(IsEqualSel, SourceRange(), /*receiverId=*/true); } else { // Check protocols. Method = S.LookupMethodInQualifiedType(IsEqualSel, Type, /*IsInstance=*/true); } } if (!Method) return false; QualType T = Method->parameters()[0]->getType(); if (!T->isObjCObjectPointerType()) return false; QualType R = Method->getReturnType(); if (!R->isScalarType()) return false; return true; } Sema::ObjCLiteralKind Sema::CheckLiteralKind(Expr *FromE) { FromE = FromE->IgnoreParenImpCasts(); switch (FromE->getStmtClass()) { default: break; case Stmt::ObjCStringLiteralClass: // "string literal" return LK_String; case Stmt::ObjCArrayLiteralClass: // "array literal" return LK_Array; case Stmt::ObjCDictionaryLiteralClass: // "dictionary literal" return LK_Dictionary; case Stmt::BlockExprClass: return LK_Block; case Stmt::ObjCBoxedExprClass: { Expr *Inner = cast(FromE)->getSubExpr()->IgnoreParens(); switch (Inner->getStmtClass()) { case Stmt::IntegerLiteralClass: case Stmt::FloatingLiteralClass: case Stmt::CharacterLiteralClass: case Stmt::ObjCBoolLiteralExprClass: case Stmt::CXXBoolLiteralExprClass: // "numeric literal" return LK_Numeric; case Stmt::ImplicitCastExprClass: { CastKind CK = cast(Inner)->getCastKind(); // Boolean literals can be represented by implicit casts. if (CK == CK_IntegralToBoolean || CK == CK_IntegralCast) return LK_Numeric; break; } default: break; } return LK_Boxed; } } return LK_None; } static void diagnoseObjCLiteralComparison(Sema &S, SourceLocation Loc, ExprResult &LHS, ExprResult &RHS, BinaryOperator::Opcode Opc){ Expr *Literal; Expr *Other; if (isObjCObjectLiteral(LHS)) { Literal = LHS.get(); Other = RHS.get(); } else { Literal = RHS.get(); Other = LHS.get(); } // Don't warn on comparisons against nil. Other = Other->IgnoreParenCasts(); if (Other->isNullPointerConstant(S.getASTContext(), Expr::NPC_ValueDependentIsNotNull)) return; // This should be kept in sync with warn_objc_literal_comparison. // LK_String should always be after the other literals, since it has its own // warning flag. Sema::ObjCLiteralKind LiteralKind = S.CheckLiteralKind(Literal); assert(LiteralKind != Sema::LK_Block); if (LiteralKind == Sema::LK_None) { llvm_unreachable("Unknown Objective-C object literal kind"); } if (LiteralKind == Sema::LK_String) S.Diag(Loc, diag::warn_objc_string_literal_comparison) << Literal->getSourceRange(); else S.Diag(Loc, diag::warn_objc_literal_comparison) << LiteralKind << Literal->getSourceRange(); if (BinaryOperator::isEqualityOp(Opc) && hasIsEqualMethod(S, LHS.get(), RHS.get())) { SourceLocation Start = LHS.get()->getBeginLoc(); SourceLocation End = S.getLocForEndOfToken(RHS.get()->getEndLoc()); CharSourceRange OpRange = CharSourceRange::getCharRange(Loc, S.getLocForEndOfToken(Loc)); S.Diag(Loc, diag::note_objc_literal_comparison_isequal) << FixItHint::CreateInsertion(Start, Opc == BO_EQ ? "[" : "![") << FixItHint::CreateReplacement(OpRange, " isEqual:") << FixItHint::CreateInsertion(End, "]"); } } /// Warns on !x < y, !x & y where !(x < y), !(x & y) was probably intended. static void diagnoseLogicalNotOnLHSofCheck(Sema &S, ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc) { // Check that left hand side is !something. UnaryOperator *UO = dyn_cast(LHS.get()->IgnoreImpCasts()); if (!UO || UO->getOpcode() != UO_LNot) return; // Only check if the right hand side is non-bool arithmetic type. if (RHS.get()->isKnownToHaveBooleanValue()) return; // Make sure that the something in !something is not bool. Expr *SubExpr = UO->getSubExpr()->IgnoreImpCasts(); if (SubExpr->isKnownToHaveBooleanValue()) return; // Emit warning. bool IsBitwiseOp = Opc == BO_And || Opc == BO_Or || Opc == BO_Xor; S.Diag(UO->getOperatorLoc(), diag::warn_logical_not_on_lhs_of_check) << Loc << IsBitwiseOp; // First note suggest !(x < y) SourceLocation FirstOpen = SubExpr->getBeginLoc(); SourceLocation FirstClose = RHS.get()->getEndLoc(); FirstClose = S.getLocForEndOfToken(FirstClose); if (FirstClose.isInvalid()) FirstOpen = SourceLocation(); S.Diag(UO->getOperatorLoc(), diag::note_logical_not_fix) << IsBitwiseOp << FixItHint::CreateInsertion(FirstOpen, "(") << FixItHint::CreateInsertion(FirstClose, ")"); // Second note suggests (!x) < y SourceLocation SecondOpen = LHS.get()->getBeginLoc(); SourceLocation SecondClose = LHS.get()->getEndLoc(); SecondClose = S.getLocForEndOfToken(SecondClose); if (SecondClose.isInvalid()) SecondOpen = SourceLocation(); S.Diag(UO->getOperatorLoc(), diag::note_logical_not_silence_with_parens) << FixItHint::CreateInsertion(SecondOpen, "(") << FixItHint::CreateInsertion(SecondClose, ")"); } // Returns true if E refers to a non-weak array. static bool checkForArray(const Expr *E) { const ValueDecl *D = nullptr; if (const DeclRefExpr *DR = dyn_cast(E)) { D = DR->getDecl(); } else if (const MemberExpr *Mem = dyn_cast(E)) { if (Mem->isImplicitAccess()) D = Mem->getMemberDecl(); } if (!D) return false; return D->getType()->isArrayType() && !D->isWeak(); } /// Diagnose some forms of syntactically-obvious tautological comparison. static void diagnoseTautologicalComparison(Sema &S, SourceLocation Loc, Expr *LHS, Expr *RHS, BinaryOperatorKind Opc) { Expr *LHSStripped = LHS->IgnoreParenImpCasts(); Expr *RHSStripped = RHS->IgnoreParenImpCasts(); QualType LHSType = LHS->getType(); QualType RHSType = RHS->getType(); if (LHSType->hasFloatingRepresentation() || (LHSType->isBlockPointerType() && !BinaryOperator::isEqualityOp(Opc)) || S.inTemplateInstantiation()) return; // Comparisons between two array types are ill-formed for operator<=>, so // we shouldn't emit any additional warnings about it. if (Opc == BO_Cmp && LHSType->isArrayType() && RHSType->isArrayType()) return; // For non-floating point types, check for self-comparisons of the form // x == x, x != x, x < x, etc. These always evaluate to a constant, and // often indicate logic errors in the program. // // NOTE: Don't warn about comparison expressions resulting from macro // expansion. Also don't warn about comparisons which are only self // comparisons within a template instantiation. The warnings should catch // obvious cases in the definition of the template anyways. The idea is to // warn when the typed comparison operator will always evaluate to the same // result. // Used for indexing into %select in warn_comparison_always enum { AlwaysConstant, AlwaysTrue, AlwaysFalse, AlwaysEqual, // std::strong_ordering::equal from operator<=> }; // C++2a [depr.array.comp]: // Equality and relational comparisons ([expr.eq], [expr.rel]) between two // operands of array type are deprecated. if (S.getLangOpts().CPlusPlus20 && LHSStripped->getType()->isArrayType() && RHSStripped->getType()->isArrayType()) { S.Diag(Loc, diag::warn_depr_array_comparison) << LHS->getSourceRange() << RHS->getSourceRange() << LHSStripped->getType() << RHSStripped->getType(); // Carry on to produce the tautological comparison warning, if this // expression is potentially-evaluated, we can resolve the array to a // non-weak declaration, and so on. } if (!LHS->getBeginLoc().isMacroID() && !RHS->getBeginLoc().isMacroID()) { if (Expr::isSameComparisonOperand(LHS, RHS)) { unsigned Result; switch (Opc) { case BO_EQ: case BO_LE: case BO_GE: Result = AlwaysTrue; break; case BO_NE: case BO_LT: case BO_GT: Result = AlwaysFalse; break; case BO_Cmp: Result = AlwaysEqual; break; default: Result = AlwaysConstant; break; } S.DiagRuntimeBehavior(Loc, nullptr, S.PDiag(diag::warn_comparison_always) << 0 /*self-comparison*/ << Result); } else if (checkForArray(LHSStripped) && checkForArray(RHSStripped)) { // What is it always going to evaluate to? unsigned Result; switch (Opc) { case BO_EQ: // e.g. array1 == array2 Result = AlwaysFalse; break; case BO_NE: // e.g. array1 != array2 Result = AlwaysTrue; break; default: // e.g. array1 <= array2 // The best we can say is 'a constant' Result = AlwaysConstant; break; } S.DiagRuntimeBehavior(Loc, nullptr, S.PDiag(diag::warn_comparison_always) << 1 /*array comparison*/ << Result); } } if (isa(LHSStripped)) LHSStripped = LHSStripped->IgnoreParenCasts(); if (isa(RHSStripped)) RHSStripped = RHSStripped->IgnoreParenCasts(); // Warn about comparisons against a string constant (unless the other // operand is null); the user probably wants string comparison function. Expr *LiteralString = nullptr; Expr *LiteralStringStripped = nullptr; if ((isa(LHSStripped) || isa(LHSStripped)) && !RHSStripped->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNull)) { LiteralString = LHS; LiteralStringStripped = LHSStripped; } else if ((isa(RHSStripped) || isa(RHSStripped)) && !LHSStripped->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNull)) { LiteralString = RHS; LiteralStringStripped = RHSStripped; } if (LiteralString) { S.DiagRuntimeBehavior(Loc, nullptr, S.PDiag(diag::warn_stringcompare) << isa(LiteralStringStripped) << LiteralString->getSourceRange()); } } static ImplicitConversionKind castKindToImplicitConversionKind(CastKind CK) { switch (CK) { default: { #ifndef NDEBUG llvm::errs() << "unhandled cast kind: " << CastExpr::getCastKindName(CK) << "\n"; #endif llvm_unreachable("unhandled cast kind"); } case CK_UserDefinedConversion: return ICK_Identity; case CK_LValueToRValue: return ICK_Lvalue_To_Rvalue; case CK_ArrayToPointerDecay: return ICK_Array_To_Pointer; case CK_FunctionToPointerDecay: return ICK_Function_To_Pointer; case CK_IntegralCast: return ICK_Integral_Conversion; case CK_FloatingCast: return ICK_Floating_Conversion; case CK_IntegralToFloating: case CK_FloatingToIntegral: return ICK_Floating_Integral; case CK_IntegralComplexCast: case CK_FloatingComplexCast: case CK_FloatingComplexToIntegralComplex: case CK_IntegralComplexToFloatingComplex: return ICK_Complex_Conversion; case CK_FloatingComplexToReal: case CK_FloatingRealToComplex: case CK_IntegralComplexToReal: case CK_IntegralRealToComplex: return ICK_Complex_Real; } } static bool checkThreeWayNarrowingConversion(Sema &S, QualType ToType, Expr *E, QualType FromType, SourceLocation Loc) { // Check for a narrowing implicit conversion. StandardConversionSequence SCS; SCS.setAsIdentityConversion(); SCS.setToType(0, FromType); SCS.setToType(1, ToType); if (const auto *ICE = dyn_cast(E)) SCS.Second = castKindToImplicitConversionKind(ICE->getCastKind()); APValue PreNarrowingValue; QualType PreNarrowingType; switch (SCS.getNarrowingKind(S.Context, E, PreNarrowingValue, PreNarrowingType, /*IgnoreFloatToIntegralConversion*/ true)) { case NK_Dependent_Narrowing: // Implicit conversion to a narrower type, but the expression is // value-dependent so we can't tell whether it's actually narrowing. case NK_Not_Narrowing: return false; case NK_Constant_Narrowing: // Implicit conversion to a narrower type, and the value is not a constant // expression. S.Diag(E->getBeginLoc(), diag::err_spaceship_argument_narrowing) << /*Constant*/ 1 << PreNarrowingValue.getAsString(S.Context, PreNarrowingType) << ToType; return true; case NK_Variable_Narrowing: // Implicit conversion to a narrower type, and the value is not a constant // expression. case NK_Type_Narrowing: S.Diag(E->getBeginLoc(), diag::err_spaceship_argument_narrowing) << /*Constant*/ 0 << FromType << ToType; // TODO: It's not a constant expression, but what if the user intended it // to be? Can we produce notes to help them figure out why it isn't? return true; } llvm_unreachable("unhandled case in switch"); } static QualType checkArithmeticOrEnumeralThreeWayCompare(Sema &S, ExprResult &LHS, ExprResult &RHS, SourceLocation Loc) { QualType LHSType = LHS.get()->getType(); QualType RHSType = RHS.get()->getType(); // Dig out the original argument type and expression before implicit casts // were applied. These are the types/expressions we need to check the // [expr.spaceship] requirements against. ExprResult LHSStripped = LHS.get()->IgnoreParenImpCasts(); ExprResult RHSStripped = RHS.get()->IgnoreParenImpCasts(); QualType LHSStrippedType = LHSStripped.get()->getType(); QualType RHSStrippedType = RHSStripped.get()->getType(); // C++2a [expr.spaceship]p3: If one of the operands is of type bool and the // other is not, the program is ill-formed. if (LHSStrippedType->isBooleanType() != RHSStrippedType->isBooleanType()) { S.InvalidOperands(Loc, LHSStripped, RHSStripped); return QualType(); } // FIXME: Consider combining this with checkEnumArithmeticConversions. int NumEnumArgs = (int)LHSStrippedType->isEnumeralType() + RHSStrippedType->isEnumeralType(); if (NumEnumArgs == 1) { bool LHSIsEnum = LHSStrippedType->isEnumeralType(); QualType OtherTy = LHSIsEnum ? RHSStrippedType : LHSStrippedType; if (OtherTy->hasFloatingRepresentation()) { S.InvalidOperands(Loc, LHSStripped, RHSStripped); return QualType(); } } if (NumEnumArgs == 2) { // C++2a [expr.spaceship]p5: If both operands have the same enumeration // type E, the operator yields the result of converting the operands // to the underlying type of E and applying <=> to the converted operands. if (!S.Context.hasSameUnqualifiedType(LHSStrippedType, RHSStrippedType)) { S.InvalidOperands(Loc, LHS, RHS); return QualType(); } QualType IntType = LHSStrippedType->castAs()->getDecl()->getIntegerType(); assert(IntType->isArithmeticType()); // We can't use `CK_IntegralCast` when the underlying type is 'bool', so we // promote the boolean type, and all other promotable integer types, to // avoid this. if (IntType->isPromotableIntegerType()) IntType = S.Context.getPromotedIntegerType(IntType); LHS = S.ImpCastExprToType(LHS.get(), IntType, CK_IntegralCast); RHS = S.ImpCastExprToType(RHS.get(), IntType, CK_IntegralCast); LHSType = RHSType = IntType; } // C++2a [expr.spaceship]p4: If both operands have arithmetic types, the // usual arithmetic conversions are applied to the operands. QualType Type = S.UsualArithmeticConversions(LHS, RHS, Loc, Sema::ACK_Comparison); if (LHS.isInvalid() || RHS.isInvalid()) return QualType(); if (Type.isNull()) return S.InvalidOperands(Loc, LHS, RHS); Optional CCT = getComparisonCategoryForBuiltinCmp(Type); if (!CCT) return S.InvalidOperands(Loc, LHS, RHS); bool HasNarrowing = checkThreeWayNarrowingConversion( S, Type, LHS.get(), LHSType, LHS.get()->getBeginLoc()); HasNarrowing |= checkThreeWayNarrowingConversion(S, Type, RHS.get(), RHSType, RHS.get()->getBeginLoc()); if (HasNarrowing) return QualType(); assert(!Type.isNull() && "composite type for <=> has not been set"); return S.CheckComparisonCategoryType( *CCT, Loc, Sema::ComparisonCategoryUsage::OperatorInExpression); } static QualType checkArithmeticOrEnumeralCompare(Sema &S, ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc) { if (Opc == BO_Cmp) return checkArithmeticOrEnumeralThreeWayCompare(S, LHS, RHS, Loc); // C99 6.5.8p3 / C99 6.5.9p4 QualType Type = S.UsualArithmeticConversions(LHS, RHS, Loc, Sema::ACK_Comparison); if (LHS.isInvalid() || RHS.isInvalid()) return QualType(); if (Type.isNull()) return S.InvalidOperands(Loc, LHS, RHS); assert(Type->isArithmeticType() || Type->isEnumeralType()); if (Type->isAnyComplexType() && BinaryOperator::isRelationalOp(Opc)) return S.InvalidOperands(Loc, LHS, RHS); // Check for comparisons of floating point operands using != and ==. if (Type->hasFloatingRepresentation() && BinaryOperator::isEqualityOp(Opc)) S.CheckFloatComparison(Loc, LHS.get(), RHS.get()); // The result of comparisons is 'bool' in C++, 'int' in C. return S.Context.getLogicalOperationType(); } void Sema::CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE) { if (!NullE.get()->getType()->isAnyPointerType()) return; int NullValue = PP.isMacroDefined("NULL") ? 0 : 1; if (!E.get()->getType()->isAnyPointerType() && E.get()->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNotNull) == Expr::NPCK_ZeroExpression) { if (const auto *CL = dyn_cast(E.get())) { if (CL->getValue() == 0) Diag(E.get()->getExprLoc(), diag::warn_pointer_compare) << NullValue << FixItHint::CreateReplacement(E.get()->getExprLoc(), NullValue ? "NULL" : "(void *)0"); } else if (const auto *CE = dyn_cast(E.get())) { TypeSourceInfo *TI = CE->getTypeInfoAsWritten(); QualType T = Context.getCanonicalType(TI->getType()).getUnqualifiedType(); if (T == Context.CharTy) Diag(E.get()->getExprLoc(), diag::warn_pointer_compare) << NullValue << FixItHint::CreateReplacement(E.get()->getExprLoc(), NullValue ? "NULL" : "(void *)0"); } } } // C99 6.5.8, C++ [expr.rel] QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc) { bool IsRelational = BinaryOperator::isRelationalOp(Opc); bool IsThreeWay = Opc == BO_Cmp; bool IsOrdered = IsRelational || IsThreeWay; auto IsAnyPointerType = [](ExprResult E) { QualType Ty = E.get()->getType(); return Ty->isPointerType() || Ty->isMemberPointerType(); }; // C++2a [expr.spaceship]p6: If at least one of the operands is of pointer // type, array-to-pointer, ..., conversions are performed on both operands to // bring them to their composite type. // Otherwise, all comparisons expect an rvalue, so convert to rvalue before // any type-related checks. if (!IsThreeWay || IsAnyPointerType(LHS) || IsAnyPointerType(RHS)) { LHS = DefaultFunctionArrayLvalueConversion(LHS.get()); if (LHS.isInvalid()) return QualType(); RHS = DefaultFunctionArrayLvalueConversion(RHS.get()); if (RHS.isInvalid()) return QualType(); } else { LHS = DefaultLvalueConversion(LHS.get()); if (LHS.isInvalid()) return QualType(); RHS = DefaultLvalueConversion(RHS.get()); if (RHS.isInvalid()) return QualType(); } checkArithmeticNull(*this, LHS, RHS, Loc, /*IsCompare=*/true); if (!getLangOpts().CPlusPlus && BinaryOperator::isEqualityOp(Opc)) { CheckPtrComparisonWithNullChar(LHS, RHS); CheckPtrComparisonWithNullChar(RHS, LHS); } // Handle vector comparisons separately. if (LHS.get()->getType()->isVectorType() || RHS.get()->getType()->isVectorType()) return CheckVectorCompareOperands(LHS, RHS, Loc, Opc); diagnoseLogicalNotOnLHSofCheck(*this, LHS, RHS, Loc, Opc); diagnoseTautologicalComparison(*this, Loc, LHS.get(), RHS.get(), Opc); QualType LHSType = LHS.get()->getType(); QualType RHSType = RHS.get()->getType(); if ((LHSType->isArithmeticType() || LHSType->isEnumeralType()) && (RHSType->isArithmeticType() || RHSType->isEnumeralType())) return checkArithmeticOrEnumeralCompare(*this, LHS, RHS, Loc, Opc); const Expr::NullPointerConstantKind LHSNullKind = LHS.get()->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull); const Expr::NullPointerConstantKind RHSNullKind = RHS.get()->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull); bool LHSIsNull = LHSNullKind != Expr::NPCK_NotNull; bool RHSIsNull = RHSNullKind != Expr::NPCK_NotNull; auto computeResultTy = [&]() { if (Opc != BO_Cmp) return Context.getLogicalOperationType(); assert(getLangOpts().CPlusPlus); assert(Context.hasSameType(LHS.get()->getType(), RHS.get()->getType())); QualType CompositeTy = LHS.get()->getType(); assert(!CompositeTy->isReferenceType()); Optional CCT = getComparisonCategoryForBuiltinCmp(CompositeTy); if (!CCT) return InvalidOperands(Loc, LHS, RHS); if (CompositeTy->isPointerType() && LHSIsNull != RHSIsNull) { // P0946R0: Comparisons between a null pointer constant and an object // pointer result in std::strong_equality, which is ill-formed under // P1959R0. Diag(Loc, diag::err_typecheck_three_way_comparison_of_pointer_and_zero) << (LHSIsNull ? LHS.get()->getSourceRange() : RHS.get()->getSourceRange()); return QualType(); } return CheckComparisonCategoryType( *CCT, Loc, ComparisonCategoryUsage::OperatorInExpression); }; if (!IsOrdered && LHSIsNull != RHSIsNull) { bool IsEquality = Opc == BO_EQ; if (RHSIsNull) DiagnoseAlwaysNonNullPointer(LHS.get(), RHSNullKind, IsEquality, RHS.get()->getSourceRange()); else DiagnoseAlwaysNonNullPointer(RHS.get(), LHSNullKind, IsEquality, LHS.get()->getSourceRange()); } if ((LHSType->isIntegerType() && !LHSIsNull) || (RHSType->isIntegerType() && !RHSIsNull)) { // Skip normal pointer conversion checks in this case; we have better // diagnostics for this below. } else if (getLangOpts().CPlusPlus) { // Equality comparison of a function pointer to a void pointer is invalid, // but we allow it as an extension. // FIXME: If we really want to allow this, should it be part of composite // pointer type computation so it works in conditionals too? if (!IsOrdered && ((LHSType->isFunctionPointerType() && RHSType->isVoidPointerType()) || (RHSType->isFunctionPointerType() && LHSType->isVoidPointerType()))) { // This is a gcc extension compatibility comparison. // In a SFINAE context, we treat this as a hard error to maintain // conformance with the C++ standard. diagnoseFunctionPointerToVoidComparison( *this, Loc, LHS, RHS, /*isError*/ (bool)isSFINAEContext()); if (isSFINAEContext()) return QualType(); RHS = ImpCastExprToType(RHS.get(), LHSType, CK_BitCast); return computeResultTy(); } // C++ [expr.eq]p2: // If at least one operand is a pointer [...] bring them to their // composite pointer type. // C++ [expr.spaceship]p6 // If at least one of the operands is of pointer type, [...] bring them // to their composite pointer type. // C++ [expr.rel]p2: // If both operands are pointers, [...] bring them to their composite // pointer type. // For <=>, the only valid non-pointer types are arrays and functions, and // we already decayed those, so this is really the same as the relational // comparison rule. if ((int)LHSType->isPointerType() + (int)RHSType->isPointerType() >= (IsOrdered ? 2 : 1) && (!LangOpts.ObjCAutoRefCount || !(LHSType->isObjCObjectPointerType() || RHSType->isObjCObjectPointerType()))) { if (convertPointersToCompositeType(*this, Loc, LHS, RHS)) return QualType(); return computeResultTy(); } } else if (LHSType->isPointerType() && RHSType->isPointerType()) { // C99 6.5.8p2 // All of the following pointer-related warnings are GCC extensions, except // when handling null pointer constants. QualType LCanPointeeTy = LHSType->castAs()->getPointeeType().getCanonicalType(); QualType RCanPointeeTy = RHSType->castAs()->getPointeeType().getCanonicalType(); // C99 6.5.9p2 and C99 6.5.8p2 if (Context.typesAreCompatible(LCanPointeeTy.getUnqualifiedType(), RCanPointeeTy.getUnqualifiedType())) { if (IsRelational) { // Pointers both need to point to complete or incomplete types if ((LCanPointeeTy->isIncompleteType() != RCanPointeeTy->isIncompleteType()) && !getLangOpts().C11) { Diag(Loc, diag::ext_typecheck_compare_complete_incomplete_pointers) << LHS.get()->getSourceRange() << RHS.get()->getSourceRange() << LHSType << RHSType << LCanPointeeTy->isIncompleteType() << RCanPointeeTy->isIncompleteType(); } if (LCanPointeeTy->isFunctionType()) { // Valid unless a relational comparison of function pointers Diag(Loc, diag::ext_typecheck_ordered_comparison_of_function_pointers) << LHSType << RHSType << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); } } } else if (!IsRelational && (LCanPointeeTy->isVoidType() || RCanPointeeTy->isVoidType())) { // Valid unless comparison between non-null pointer and function pointer if ((LCanPointeeTy->isFunctionType() || RCanPointeeTy->isFunctionType()) && !LHSIsNull && !RHSIsNull) diagnoseFunctionPointerToVoidComparison(*this, Loc, LHS, RHS, /*isError*/false); } else { // Invalid diagnoseDistinctPointerComparison(*this, Loc, LHS, RHS, /*isError*/false); } if (LCanPointeeTy != RCanPointeeTy) { // Treat NULL constant as a special case in OpenCL. if (getLangOpts().OpenCL && !LHSIsNull && !RHSIsNull) { if (!LCanPointeeTy.isAddressSpaceOverlapping(RCanPointeeTy)) { Diag(Loc, diag::err_typecheck_op_on_nonoverlapping_address_space_pointers) << LHSType << RHSType << 0 /* comparison */ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); } } LangAS AddrSpaceL = LCanPointeeTy.getAddressSpace(); LangAS AddrSpaceR = RCanPointeeTy.getAddressSpace(); CastKind Kind = AddrSpaceL != AddrSpaceR ? CK_AddressSpaceConversion : CK_BitCast; if (LHSIsNull && !RHSIsNull) LHS = ImpCastExprToType(LHS.get(), RHSType, Kind); else RHS = ImpCastExprToType(RHS.get(), LHSType, Kind); } return computeResultTy(); } if (getLangOpts().CPlusPlus) { // C++ [expr.eq]p4: // Two operands of type std::nullptr_t or one operand of type // std::nullptr_t and the other a null pointer constant compare equal. if (!IsOrdered && LHSIsNull && RHSIsNull) { if (LHSType->isNullPtrType()) { RHS = ImpCastExprToType(RHS.get(), LHSType, CK_NullToPointer); return computeResultTy(); } if (RHSType->isNullPtrType()) { LHS = ImpCastExprToType(LHS.get(), RHSType, CK_NullToPointer); return computeResultTy(); } } // Comparison of Objective-C pointers and block pointers against nullptr_t. // These aren't covered by the composite pointer type rules. if (!IsOrdered && RHSType->isNullPtrType() && (LHSType->isObjCObjectPointerType() || LHSType->isBlockPointerType())) { RHS = ImpCastExprToType(RHS.get(), LHSType, CK_NullToPointer); return computeResultTy(); } if (!IsOrdered && LHSType->isNullPtrType() && (RHSType->isObjCObjectPointerType() || RHSType->isBlockPointerType())) { LHS = ImpCastExprToType(LHS.get(), RHSType, CK_NullToPointer); return computeResultTy(); } if (IsRelational && ((LHSType->isNullPtrType() && RHSType->isPointerType()) || (RHSType->isNullPtrType() && LHSType->isPointerType()))) { // HACK: Relational comparison of nullptr_t against a pointer type is // invalid per DR583, but we allow it within std::less<> and friends, // since otherwise common uses of it break. // FIXME: Consider removing this hack once LWG fixes std::less<> and // friends to have std::nullptr_t overload candidates. DeclContext *DC = CurContext; if (isa(DC)) DC = DC->getParent(); if (auto *CTSD = dyn_cast(DC)) { if (CTSD->isInStdNamespace() && llvm::StringSwitch(CTSD->getName()) .Cases("less", "less_equal", "greater", "greater_equal", true) .Default(false)) { if (RHSType->isNullPtrType()) RHS = ImpCastExprToType(RHS.get(), LHSType, CK_NullToPointer); else LHS = ImpCastExprToType(LHS.get(), RHSType, CK_NullToPointer); return computeResultTy(); } } } // C++ [expr.eq]p2: // If at least one operand is a pointer to member, [...] bring them to // their composite pointer type. if (!IsOrdered && (LHSType->isMemberPointerType() || RHSType->isMemberPointerType())) { if (convertPointersToCompositeType(*this, Loc, LHS, RHS)) return QualType(); else return computeResultTy(); } } // Handle block pointer types. if (!IsOrdered && LHSType->isBlockPointerType() && RHSType->isBlockPointerType()) { QualType lpointee = LHSType->castAs()->getPointeeType(); QualType rpointee = RHSType->castAs()->getPointeeType(); if (!LHSIsNull && !RHSIsNull && !Context.typesAreCompatible(lpointee, rpointee)) { Diag(Loc, diag::err_typecheck_comparison_of_distinct_blocks) << LHSType << RHSType << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); } RHS = ImpCastExprToType(RHS.get(), LHSType, CK_BitCast); return computeResultTy(); } // Allow block pointers to be compared with null pointer constants. if (!IsOrdered && ((LHSType->isBlockPointerType() && RHSType->isPointerType()) || (LHSType->isPointerType() && RHSType->isBlockPointerType()))) { if (!LHSIsNull && !RHSIsNull) { if (!((RHSType->isPointerType() && RHSType->castAs() ->getPointeeType()->isVoidType()) || (LHSType->isPointerType() && LHSType->castAs() ->getPointeeType()->isVoidType()))) Diag(Loc, diag::err_typecheck_comparison_of_distinct_blocks) << LHSType << RHSType << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); } if (LHSIsNull && !RHSIsNull) LHS = ImpCastExprToType(LHS.get(), RHSType, RHSType->isPointerType() ? CK_BitCast : CK_AnyPointerToBlockPointerCast); else RHS = ImpCastExprToType(RHS.get(), LHSType, LHSType->isPointerType() ? CK_BitCast : CK_AnyPointerToBlockPointerCast); return computeResultTy(); } if (LHSType->isObjCObjectPointerType() || RHSType->isObjCObjectPointerType()) { const PointerType *LPT = LHSType->getAs(); const PointerType *RPT = RHSType->getAs(); if (LPT || RPT) { bool LPtrToVoid = LPT ? LPT->getPointeeType()->isVoidType() : false; bool RPtrToVoid = RPT ? RPT->getPointeeType()->isVoidType() : false; if (!LPtrToVoid && !RPtrToVoid && !Context.typesAreCompatible(LHSType, RHSType)) { diagnoseDistinctPointerComparison(*this, Loc, LHS, RHS, /*isError*/false); } // FIXME: If LPtrToVoid, we should presumably convert the LHS rather than // the RHS, but we have test coverage for this behavior. // FIXME: Consider using convertPointersToCompositeType in C++. if (LHSIsNull && !RHSIsNull) { Expr *E = LHS.get(); if (getLangOpts().ObjCAutoRefCount) CheckObjCConversion(SourceRange(), RHSType, E, CCK_ImplicitConversion); LHS = ImpCastExprToType(E, RHSType, RPT ? CK_BitCast :CK_CPointerToObjCPointerCast); } else { Expr *E = RHS.get(); if (getLangOpts().ObjCAutoRefCount) CheckObjCConversion(SourceRange(), LHSType, E, CCK_ImplicitConversion, /*Diagnose=*/true, /*DiagnoseCFAudited=*/false, Opc); RHS = ImpCastExprToType(E, LHSType, LPT ? CK_BitCast :CK_CPointerToObjCPointerCast); } return computeResultTy(); } if (LHSType->isObjCObjectPointerType() && RHSType->isObjCObjectPointerType()) { if (!Context.areComparableObjCPointerTypes(LHSType, RHSType)) diagnoseDistinctPointerComparison(*this, Loc, LHS, RHS, /*isError*/false); if (isObjCObjectLiteral(LHS) || isObjCObjectLiteral(RHS)) diagnoseObjCLiteralComparison(*this, Loc, LHS, RHS, Opc); if (LHSIsNull && !RHSIsNull) LHS = ImpCastExprToType(LHS.get(), RHSType, CK_BitCast); else RHS = ImpCastExprToType(RHS.get(), LHSType, CK_BitCast); return computeResultTy(); } if (!IsOrdered && LHSType->isBlockPointerType() && RHSType->isBlockCompatibleObjCPointerType(Context)) { LHS = ImpCastExprToType(LHS.get(), RHSType, CK_BlockPointerToObjCPointerCast); return computeResultTy(); } else if (!IsOrdered && LHSType->isBlockCompatibleObjCPointerType(Context) && RHSType->isBlockPointerType()) { RHS = ImpCastExprToType(RHS.get(), LHSType, CK_BlockPointerToObjCPointerCast); return computeResultTy(); } } if ((LHSType->isAnyPointerType() && RHSType->isIntegerType()) || (LHSType->isIntegerType() && RHSType->isAnyPointerType())) { unsigned DiagID = 0; bool isError = false; if (LangOpts.DebuggerSupport) { // Under a debugger, allow the comparison of pointers to integers, // since users tend to want to compare addresses. } else if ((LHSIsNull && LHSType->isIntegerType()) || (RHSIsNull && RHSType->isIntegerType())) { if (IsOrdered) { isError = getLangOpts().CPlusPlus; DiagID = isError ? diag::err_typecheck_ordered_comparison_of_pointer_and_zero : diag::ext_typecheck_ordered_comparison_of_pointer_and_zero; } } else if (getLangOpts().CPlusPlus) { DiagID = diag::err_typecheck_comparison_of_pointer_integer; isError = true; } else if (IsOrdered) DiagID = diag::ext_typecheck_ordered_comparison_of_pointer_integer; else DiagID = diag::ext_typecheck_comparison_of_pointer_integer; if (DiagID) { Diag(Loc, DiagID) << LHSType << RHSType << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); if (isError) return QualType(); } if (LHSType->isIntegerType()) LHS = ImpCastExprToType(LHS.get(), RHSType, LHSIsNull ? CK_NullToPointer : CK_IntegralToPointer); else RHS = ImpCastExprToType(RHS.get(), LHSType, RHSIsNull ? CK_NullToPointer : CK_IntegralToPointer); return computeResultTy(); } // Handle block pointers. if (!IsOrdered && RHSIsNull && LHSType->isBlockPointerType() && RHSType->isIntegerType()) { RHS = ImpCastExprToType(RHS.get(), LHSType, CK_NullToPointer); return computeResultTy(); } if (!IsOrdered && LHSIsNull && LHSType->isIntegerType() && RHSType->isBlockPointerType()) { LHS = ImpCastExprToType(LHS.get(), RHSType, CK_NullToPointer); return computeResultTy(); } if (getLangOpts().OpenCLVersion >= 200 || getLangOpts().OpenCLCPlusPlus) { if (LHSType->isClkEventT() && RHSType->isClkEventT()) { return computeResultTy(); } if (LHSType->isQueueT() && RHSType->isQueueT()) { return computeResultTy(); } if (LHSIsNull && RHSType->isQueueT()) { LHS = ImpCastExprToType(LHS.get(), RHSType, CK_NullToPointer); return computeResultTy(); } if (LHSType->isQueueT() && RHSIsNull) { RHS = ImpCastExprToType(RHS.get(), LHSType, CK_NullToPointer); return computeResultTy(); } } return InvalidOperands(Loc, LHS, RHS); } // Return a signed ext_vector_type that is of identical size and number of // elements. For floating point vectors, return an integer type of identical // size and number of elements. In the non ext_vector_type case, search from // the largest type to the smallest type to avoid cases where long long == long, // where long gets picked over long long. QualType Sema::GetSignedVectorType(QualType V) { const VectorType *VTy = V->castAs(); unsigned TypeSize = Context.getTypeSize(VTy->getElementType()); if (isa(VTy)) { if (TypeSize == Context.getTypeSize(Context.CharTy)) return Context.getExtVectorType(Context.CharTy, VTy->getNumElements()); else if (TypeSize == Context.getTypeSize(Context.ShortTy)) return Context.getExtVectorType(Context.ShortTy, VTy->getNumElements()); else if (TypeSize == Context.getTypeSize(Context.IntTy)) return Context.getExtVectorType(Context.IntTy, VTy->getNumElements()); else if (TypeSize == Context.getTypeSize(Context.LongTy)) return Context.getExtVectorType(Context.LongTy, VTy->getNumElements()); assert(TypeSize == Context.getTypeSize(Context.LongLongTy) && "Unhandled vector element size in vector compare"); return Context.getExtVectorType(Context.LongLongTy, VTy->getNumElements()); } if (TypeSize == Context.getTypeSize(Context.LongLongTy)) return Context.getVectorType(Context.LongLongTy, VTy->getNumElements(), VectorType::GenericVector); else if (TypeSize == Context.getTypeSize(Context.LongTy)) return Context.getVectorType(Context.LongTy, VTy->getNumElements(), VectorType::GenericVector); else if (TypeSize == Context.getTypeSize(Context.IntTy)) return Context.getVectorType(Context.IntTy, VTy->getNumElements(), VectorType::GenericVector); else if (TypeSize == Context.getTypeSize(Context.ShortTy)) return Context.getVectorType(Context.ShortTy, VTy->getNumElements(), VectorType::GenericVector); assert(TypeSize == Context.getTypeSize(Context.CharTy) && "Unhandled vector element size in vector compare"); return Context.getVectorType(Context.CharTy, VTy->getNumElements(), VectorType::GenericVector); } /// CheckVectorCompareOperands - vector comparisons are a clang extension that /// operates on extended vector types. Instead of producing an IntTy result, /// like a scalar comparison, a vector comparison produces a vector of integer /// types. QualType Sema::CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc) { if (Opc == BO_Cmp) { Diag(Loc, diag::err_three_way_vector_comparison); return QualType(); } // Check to make sure we're operating on vectors of the same type and width, // Allowing one side to be a scalar of element type. QualType vType = CheckVectorOperands(LHS, RHS, Loc, /*isCompAssign*/false, /*AllowBothBool*/true, /*AllowBoolConversions*/getLangOpts().ZVector); if (vType.isNull()) return vType; QualType LHSType = LHS.get()->getType(); // If AltiVec, the comparison results in a numeric type, i.e. // bool for C++, int for C if (getLangOpts().AltiVec && vType->castAs()->getVectorKind() == VectorType::AltiVecVector) return Context.getLogicalOperationType(); // For non-floating point types, check for self-comparisons of the form // x == x, x != x, x < x, etc. These always evaluate to a constant, and // often indicate logic errors in the program. diagnoseTautologicalComparison(*this, Loc, LHS.get(), RHS.get(), Opc); // Check for comparisons of floating point operands using != and ==. if (BinaryOperator::isEqualityOp(Opc) && LHSType->hasFloatingRepresentation()) { assert(RHS.get()->getType()->hasFloatingRepresentation()); CheckFloatComparison(Loc, LHS.get(), RHS.get()); } // Return a signed type for the vector. return GetSignedVectorType(vType); } static void diagnoseXorMisusedAsPow(Sema &S, const ExprResult &XorLHS, const ExprResult &XorRHS, const SourceLocation Loc) { // Do not diagnose macros. if (Loc.isMacroID()) return; bool Negative = false; bool ExplicitPlus = false; const auto *LHSInt = dyn_cast(XorLHS.get()); const auto *RHSInt = dyn_cast(XorRHS.get()); if (!LHSInt) return; if (!RHSInt) { // Check negative literals. if (const auto *UO = dyn_cast(XorRHS.get())) { UnaryOperatorKind Opc = UO->getOpcode(); if (Opc != UO_Minus && Opc != UO_Plus) return; RHSInt = dyn_cast(UO->getSubExpr()); if (!RHSInt) return; Negative = (Opc == UO_Minus); ExplicitPlus = !Negative; } else { return; } } const llvm::APInt &LeftSideValue = LHSInt->getValue(); llvm::APInt RightSideValue = RHSInt->getValue(); if (LeftSideValue != 2 && LeftSideValue != 10) return; if (LeftSideValue.getBitWidth() != RightSideValue.getBitWidth()) return; CharSourceRange ExprRange = CharSourceRange::getCharRange( LHSInt->getBeginLoc(), S.getLocForEndOfToken(RHSInt->getLocation())); llvm::StringRef ExprStr = Lexer::getSourceText(ExprRange, S.getSourceManager(), S.getLangOpts()); CharSourceRange XorRange = CharSourceRange::getCharRange(Loc, S.getLocForEndOfToken(Loc)); llvm::StringRef XorStr = Lexer::getSourceText(XorRange, S.getSourceManager(), S.getLangOpts()); // Do not diagnose if xor keyword/macro is used. if (XorStr == "xor") return; std::string LHSStr = std::string(Lexer::getSourceText( CharSourceRange::getTokenRange(LHSInt->getSourceRange()), S.getSourceManager(), S.getLangOpts())); std::string RHSStr = std::string(Lexer::getSourceText( CharSourceRange::getTokenRange(RHSInt->getSourceRange()), S.getSourceManager(), S.getLangOpts())); if (Negative) { RightSideValue = -RightSideValue; RHSStr = "-" + RHSStr; } else if (ExplicitPlus) { RHSStr = "+" + RHSStr; } StringRef LHSStrRef = LHSStr; StringRef RHSStrRef = RHSStr; // Do not diagnose literals with digit separators, binary, hexadecimal, octal // literals. if (LHSStrRef.startswith("0b") || LHSStrRef.startswith("0B") || RHSStrRef.startswith("0b") || RHSStrRef.startswith("0B") || LHSStrRef.startswith("0x") || LHSStrRef.startswith("0X") || RHSStrRef.startswith("0x") || RHSStrRef.startswith("0X") || (LHSStrRef.size() > 1 && LHSStrRef.startswith("0")) || (RHSStrRef.size() > 1 && RHSStrRef.startswith("0")) || LHSStrRef.find('\'') != StringRef::npos || RHSStrRef.find('\'') != StringRef::npos) return; bool SuggestXor = S.getLangOpts().CPlusPlus || S.getPreprocessor().isMacroDefined("xor"); const llvm::APInt XorValue = LeftSideValue ^ RightSideValue; int64_t RightSideIntValue = RightSideValue.getSExtValue(); if (LeftSideValue == 2 && RightSideIntValue >= 0) { std::string SuggestedExpr = "1 << " + RHSStr; bool Overflow = false; llvm::APInt One = (LeftSideValue - 1); llvm::APInt PowValue = One.sshl_ov(RightSideValue, Overflow); if (Overflow) { if (RightSideIntValue < 64) S.Diag(Loc, diag::warn_xor_used_as_pow_base) << ExprStr << XorValue.toString(10, true) << ("1LL << " + RHSStr) << FixItHint::CreateReplacement(ExprRange, "1LL << " + RHSStr); else if (RightSideIntValue == 64) S.Diag(Loc, diag::warn_xor_used_as_pow) << ExprStr << XorValue.toString(10, true); else return; } else { S.Diag(Loc, diag::warn_xor_used_as_pow_base_extra) << ExprStr << XorValue.toString(10, true) << SuggestedExpr << PowValue.toString(10, true) << FixItHint::CreateReplacement( ExprRange, (RightSideIntValue == 0) ? "1" : SuggestedExpr); } S.Diag(Loc, diag::note_xor_used_as_pow_silence) << ("0x2 ^ " + RHSStr) << SuggestXor; } else if (LeftSideValue == 10) { std::string SuggestedValue = "1e" + std::to_string(RightSideIntValue); S.Diag(Loc, diag::warn_xor_used_as_pow_base) << ExprStr << XorValue.toString(10, true) << SuggestedValue << FixItHint::CreateReplacement(ExprRange, SuggestedValue); S.Diag(Loc, diag::note_xor_used_as_pow_silence) << ("0xA ^ " + RHSStr) << SuggestXor; } } QualType Sema::CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc) { // Ensure that either both operands are of the same vector type, or // one operand is of a vector type and the other is of its element type. QualType vType = CheckVectorOperands(LHS, RHS, Loc, false, /*AllowBothBool*/true, /*AllowBoolConversions*/false); if (vType.isNull()) return InvalidOperands(Loc, LHS, RHS); if (getLangOpts().OpenCL && getLangOpts().OpenCLVersion < 120 && !getLangOpts().OpenCLCPlusPlus && vType->hasFloatingRepresentation()) return InvalidOperands(Loc, LHS, RHS); // FIXME: The check for C++ here is for GCC compatibility. GCC rejects the // usage of the logical operators && and || with vectors in C. This // check could be notionally dropped. if (!getLangOpts().CPlusPlus && !(isa(vType->getAs()))) return InvalidLogicalVectorOperands(Loc, LHS, RHS); return GetSignedVectorType(LHS.get()->getType()); } QualType Sema::CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign) { if (!IsCompAssign) { LHS = DefaultFunctionArrayLvalueConversion(LHS.get()); if (LHS.isInvalid()) return QualType(); } RHS = DefaultFunctionArrayLvalueConversion(RHS.get()); if (RHS.isInvalid()) return QualType(); // For conversion purposes, we ignore any qualifiers. // For example, "const float" and "float" are equivalent. QualType LHSType = LHS.get()->getType().getUnqualifiedType(); QualType RHSType = RHS.get()->getType().getUnqualifiedType(); const MatrixType *LHSMatType = LHSType->getAs(); const MatrixType *RHSMatType = RHSType->getAs(); assert((LHSMatType || RHSMatType) && "At least one operand must be a matrix"); if (Context.hasSameType(LHSType, RHSType)) return LHSType; // Type conversion may change LHS/RHS. Keep copies to the original results, in // case we have to return InvalidOperands. ExprResult OriginalLHS = LHS; ExprResult OriginalRHS = RHS; if (LHSMatType && !RHSMatType) { RHS = tryConvertExprToType(RHS.get(), LHSMatType->getElementType()); if (!RHS.isInvalid()) return LHSType; return InvalidOperands(Loc, OriginalLHS, OriginalRHS); } if (!LHSMatType && RHSMatType) { LHS = tryConvertExprToType(LHS.get(), RHSMatType->getElementType()); if (!LHS.isInvalid()) return RHSType; return InvalidOperands(Loc, OriginalLHS, OriginalRHS); } return InvalidOperands(Loc, LHS, RHS); } QualType Sema::CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign) { if (!IsCompAssign) { LHS = DefaultFunctionArrayLvalueConversion(LHS.get()); if (LHS.isInvalid()) return QualType(); } RHS = DefaultFunctionArrayLvalueConversion(RHS.get()); if (RHS.isInvalid()) return QualType(); auto *LHSMatType = LHS.get()->getType()->getAs(); auto *RHSMatType = RHS.get()->getType()->getAs(); assert((LHSMatType || RHSMatType) && "At least one operand must be a matrix"); if (LHSMatType && RHSMatType) { if (LHSMatType->getNumColumns() != RHSMatType->getNumRows()) return InvalidOperands(Loc, LHS, RHS); if (!Context.hasSameType(LHSMatType->getElementType(), RHSMatType->getElementType())) return InvalidOperands(Loc, LHS, RHS); return Context.getConstantMatrixType(LHSMatType->getElementType(), LHSMatType->getNumRows(), RHSMatType->getNumColumns()); } return CheckMatrixElementwiseOperands(LHS, RHS, Loc, IsCompAssign); } inline QualType Sema::CheckBitwiseOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc) { checkArithmeticNull(*this, LHS, RHS, Loc, /*IsCompare=*/false); bool IsCompAssign = Opc == BO_AndAssign || Opc == BO_OrAssign || Opc == BO_XorAssign; if (LHS.get()->getType()->isVectorType() || RHS.get()->getType()->isVectorType()) { if (LHS.get()->getType()->hasIntegerRepresentation() && RHS.get()->getType()->hasIntegerRepresentation()) return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign, /*AllowBothBool*/true, /*AllowBoolConversions*/getLangOpts().ZVector); return InvalidOperands(Loc, LHS, RHS); } if (Opc == BO_And) diagnoseLogicalNotOnLHSofCheck(*this, LHS, RHS, Loc, Opc); if (LHS.get()->getType()->hasFloatingRepresentation() || RHS.get()->getType()->hasFloatingRepresentation()) return InvalidOperands(Loc, LHS, RHS); ExprResult LHSResult = LHS, RHSResult = RHS; QualType compType = UsualArithmeticConversions( LHSResult, RHSResult, Loc, IsCompAssign ? ACK_CompAssign : ACK_BitwiseOp); if (LHSResult.isInvalid() || RHSResult.isInvalid()) return QualType(); LHS = LHSResult.get(); RHS = RHSResult.get(); if (Opc == BO_Xor) diagnoseXorMisusedAsPow(*this, LHS, RHS, Loc); if (!compType.isNull() && compType->isIntegralOrUnscopedEnumerationType()) return compType; return InvalidOperands(Loc, LHS, RHS); } // C99 6.5.[13,14] inline QualType Sema::CheckLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc) { // Check vector operands differently. if (LHS.get()->getType()->isVectorType() || RHS.get()->getType()->isVectorType()) return CheckVectorLogicalOperands(LHS, RHS, Loc); bool EnumConstantInBoolContext = false; for (const ExprResult &HS : {LHS, RHS}) { if (const auto *DREHS = dyn_cast(HS.get())) { const auto *ECDHS = dyn_cast(DREHS->getDecl()); if (ECDHS && ECDHS->getInitVal() != 0 && ECDHS->getInitVal() != 1) EnumConstantInBoolContext = true; } } if (EnumConstantInBoolContext) Diag(Loc, diag::warn_enum_constant_in_bool_context); // Diagnose cases where the user write a logical and/or but probably meant a // bitwise one. We do this when the LHS is a non-bool integer and the RHS // is a constant. if (!EnumConstantInBoolContext && LHS.get()->getType()->isIntegerType() && !LHS.get()->getType()->isBooleanType() && RHS.get()->getType()->isIntegerType() && !RHS.get()->isValueDependent() && // Don't warn in macros or template instantiations. !Loc.isMacroID() && !inTemplateInstantiation()) { // If the RHS can be constant folded, and if it constant folds to something // that isn't 0 or 1 (which indicate a potential logical operation that // happened to fold to true/false) then warn. // Parens on the RHS are ignored. Expr::EvalResult EVResult; if (RHS.get()->EvaluateAsInt(EVResult, Context)) { llvm::APSInt Result = EVResult.Val.getInt(); if ((getLangOpts().Bool && !RHS.get()->getType()->isBooleanType() && !RHS.get()->getExprLoc().isMacroID()) || (Result != 0 && Result != 1)) { Diag(Loc, diag::warn_logical_instead_of_bitwise) << RHS.get()->getSourceRange() << (Opc == BO_LAnd ? "&&" : "||"); // Suggest replacing the logical operator with the bitwise version Diag(Loc, diag::note_logical_instead_of_bitwise_change_operator) << (Opc == BO_LAnd ? "&" : "|") << FixItHint::CreateReplacement(SourceRange( Loc, getLocForEndOfToken(Loc)), Opc == BO_LAnd ? "&" : "|"); if (Opc == BO_LAnd) // Suggest replacing "Foo() && kNonZero" with "Foo()" Diag(Loc, diag::note_logical_instead_of_bitwise_remove_constant) << FixItHint::CreateRemoval( SourceRange(getLocForEndOfToken(LHS.get()->getEndLoc()), RHS.get()->getEndLoc())); } } } if (!Context.getLangOpts().CPlusPlus) { // OpenCL v1.1 s6.3.g: The logical operators and (&&), or (||) do // not operate on the built-in scalar and vector float types. if (Context.getLangOpts().OpenCL && Context.getLangOpts().OpenCLVersion < 120) { if (LHS.get()->getType()->isFloatingType() || RHS.get()->getType()->isFloatingType()) return InvalidOperands(Loc, LHS, RHS); } LHS = UsualUnaryConversions(LHS.get()); if (LHS.isInvalid()) return QualType(); RHS = UsualUnaryConversions(RHS.get()); if (RHS.isInvalid()) return QualType(); if (!LHS.get()->getType()->isScalarType() || !RHS.get()->getType()->isScalarType()) return InvalidOperands(Loc, LHS, RHS); return Context.IntTy; } // The following is safe because we only use this method for // non-overloadable operands. // C++ [expr.log.and]p1 // C++ [expr.log.or]p1 // The operands are both contextually converted to type bool. ExprResult LHSRes = PerformContextuallyConvertToBool(LHS.get()); if (LHSRes.isInvalid()) return InvalidOperands(Loc, LHS, RHS); LHS = LHSRes; ExprResult RHSRes = PerformContextuallyConvertToBool(RHS.get()); if (RHSRes.isInvalid()) return InvalidOperands(Loc, LHS, RHS); RHS = RHSRes; // C++ [expr.log.and]p2 // C++ [expr.log.or]p2 // The result is a bool. return Context.BoolTy; } static bool IsReadonlyMessage(Expr *E, Sema &S) { const MemberExpr *ME = dyn_cast(E); if (!ME) return false; if (!isa(ME->getMemberDecl())) return false; ObjCMessageExpr *Base = dyn_cast( ME->getBase()->IgnoreImplicit()->IgnoreParenImpCasts()); if (!Base) return false; return Base->getMethodDecl() != nullptr; } /// Is the given expression (which must be 'const') a reference to a /// variable which was originally non-const, but which has become /// 'const' due to being captured within a block? enum NonConstCaptureKind { NCCK_None, NCCK_Block, NCCK_Lambda }; static NonConstCaptureKind isReferenceToNonConstCapture(Sema &S, Expr *E) { assert(E->isLValue() && E->getType().isConstQualified()); E = E->IgnoreParens(); // Must be a reference to a declaration from an enclosing scope. DeclRefExpr *DRE = dyn_cast(E); if (!DRE) return NCCK_None; if (!DRE->refersToEnclosingVariableOrCapture()) return NCCK_None; // The declaration must be a variable which is not declared 'const'. VarDecl *var = dyn_cast(DRE->getDecl()); if (!var) return NCCK_None; if (var->getType().isConstQualified()) return NCCK_None; assert(var->hasLocalStorage() && "capture added 'const' to non-local?"); // Decide whether the first capture was for a block or a lambda. DeclContext *DC = S.CurContext, *Prev = nullptr; // Decide whether the first capture was for a block or a lambda. while (DC) { // For init-capture, it is possible that the variable belongs to the // template pattern of the current context. if (auto *FD = dyn_cast(DC)) if (var->isInitCapture() && FD->getTemplateInstantiationPattern() == var->getDeclContext()) break; if (DC == var->getDeclContext()) break; Prev = DC; DC = DC->getParent(); } // Unless we have an init-capture, we've gone one step too far. if (!var->isInitCapture()) DC = Prev; return (isa(DC) ? NCCK_Block : NCCK_Lambda); } static bool IsTypeModifiable(QualType Ty, bool IsDereference) { Ty = Ty.getNonReferenceType(); if (IsDereference && Ty->isPointerType()) Ty = Ty->getPointeeType(); return !Ty.isConstQualified(); } // Update err_typecheck_assign_const and note_typecheck_assign_const // when this enum is changed. enum { ConstFunction, ConstVariable, ConstMember, ConstMethod, NestedConstMember, ConstUnknown, // Keep as last element }; /// Emit the "read-only variable not assignable" error and print notes to give /// more information about why the variable is not assignable, such as pointing /// to the declaration of a const variable, showing that a method is const, or /// that the function is returning a const reference. static void DiagnoseConstAssignment(Sema &S, const Expr *E, SourceLocation Loc) { SourceRange ExprRange = E->getSourceRange(); // Only emit one error on the first const found. All other consts will emit // a note to the error. bool DiagnosticEmitted = false; // Track if the current expression is the result of a dereference, and if the // next checked expression is the result of a dereference. bool IsDereference = false; bool NextIsDereference = false; // Loop to process MemberExpr chains. while (true) { IsDereference = NextIsDereference; E = E->IgnoreImplicit()->IgnoreParenImpCasts(); if (const MemberExpr *ME = dyn_cast(E)) { NextIsDereference = ME->isArrow(); const ValueDecl *VD = ME->getMemberDecl(); if (const FieldDecl *Field = dyn_cast(VD)) { // Mutable fields can be modified even if the class is const. if (Field->isMutable()) { assert(DiagnosticEmitted && "Expected diagnostic not emitted."); break; } if (!IsTypeModifiable(Field->getType(), IsDereference)) { if (!DiagnosticEmitted) { S.Diag(Loc, diag::err_typecheck_assign_const) << ExprRange << ConstMember << false /*static*/ << Field << Field->getType(); DiagnosticEmitted = true; } S.Diag(VD->getLocation(), diag::note_typecheck_assign_const) << ConstMember << false /*static*/ << Field << Field->getType() << Field->getSourceRange(); } E = ME->getBase(); continue; } else if (const VarDecl *VDecl = dyn_cast(VD)) { if (VDecl->getType().isConstQualified()) { if (!DiagnosticEmitted) { S.Diag(Loc, diag::err_typecheck_assign_const) << ExprRange << ConstMember << true /*static*/ << VDecl << VDecl->getType(); DiagnosticEmitted = true; } S.Diag(VD->getLocation(), diag::note_typecheck_assign_const) << ConstMember << true /*static*/ << VDecl << VDecl->getType() << VDecl->getSourceRange(); } // Static fields do not inherit constness from parents. break; } break; // End MemberExpr } else if (const ArraySubscriptExpr *ASE = dyn_cast(E)) { E = ASE->getBase()->IgnoreParenImpCasts(); continue; } else if (const ExtVectorElementExpr *EVE = dyn_cast(E)) { E = EVE->getBase()->IgnoreParenImpCasts(); continue; } break; } if (const CallExpr *CE = dyn_cast(E)) { // Function calls const FunctionDecl *FD = CE->getDirectCallee(); if (FD && !IsTypeModifiable(FD->getReturnType(), IsDereference)) { if (!DiagnosticEmitted) { S.Diag(Loc, diag::err_typecheck_assign_const) << ExprRange << ConstFunction << FD; DiagnosticEmitted = true; } S.Diag(FD->getReturnTypeSourceRange().getBegin(), diag::note_typecheck_assign_const) << ConstFunction << FD << FD->getReturnType() << FD->getReturnTypeSourceRange(); } } else if (const DeclRefExpr *DRE = dyn_cast(E)) { // Point to variable declaration. if (const ValueDecl *VD = DRE->getDecl()) { if (!IsTypeModifiable(VD->getType(), IsDereference)) { if (!DiagnosticEmitted) { S.Diag(Loc, diag::err_typecheck_assign_const) << ExprRange << ConstVariable << VD << VD->getType(); DiagnosticEmitted = true; } S.Diag(VD->getLocation(), diag::note_typecheck_assign_const) << ConstVariable << VD << VD->getType() << VD->getSourceRange(); } } } else if (isa(E)) { if (const DeclContext *DC = S.getFunctionLevelDeclContext()) { if (const CXXMethodDecl *MD = dyn_cast(DC)) { if (MD->isConst()) { if (!DiagnosticEmitted) { S.Diag(Loc, diag::err_typecheck_assign_const) << ExprRange << ConstMethod << MD; DiagnosticEmitted = true; } S.Diag(MD->getLocation(), diag::note_typecheck_assign_const) << ConstMethod << MD << MD->getSourceRange(); } } } } if (DiagnosticEmitted) return; // Can't determine a more specific message, so display the generic error. S.Diag(Loc, diag::err_typecheck_assign_const) << ExprRange << ConstUnknown; } enum OriginalExprKind { OEK_Variable, OEK_Member, OEK_LValue }; static void DiagnoseRecursiveConstFields(Sema &S, const ValueDecl *VD, const RecordType *Ty, SourceLocation Loc, SourceRange Range, OriginalExprKind OEK, bool &DiagnosticEmitted) { std::vector RecordTypeList; RecordTypeList.push_back(Ty); unsigned NextToCheckIndex = 0; // We walk the record hierarchy breadth-first to ensure that we print // diagnostics in field nesting order. while (RecordTypeList.size() > NextToCheckIndex) { bool IsNested = NextToCheckIndex > 0; for (const FieldDecl *Field : RecordTypeList[NextToCheckIndex]->getDecl()->fields()) { // First, check every field for constness. QualType FieldTy = Field->getType(); if (FieldTy.isConstQualified()) { if (!DiagnosticEmitted) { S.Diag(Loc, diag::err_typecheck_assign_const) << Range << NestedConstMember << OEK << VD << IsNested << Field; DiagnosticEmitted = true; } S.Diag(Field->getLocation(), diag::note_typecheck_assign_const) << NestedConstMember << IsNested << Field << FieldTy << Field->getSourceRange(); } // Then we append it to the list to check next in order. FieldTy = FieldTy.getCanonicalType(); if (const auto *FieldRecTy = FieldTy->getAs()) { if (llvm::find(RecordTypeList, FieldRecTy) == RecordTypeList.end()) RecordTypeList.push_back(FieldRecTy); } } ++NextToCheckIndex; } } /// Emit an error for the case where a record we are trying to assign to has a /// const-qualified field somewhere in its hierarchy. static void DiagnoseRecursiveConstFields(Sema &S, const Expr *E, SourceLocation Loc) { QualType Ty = E->getType(); assert(Ty->isRecordType() && "lvalue was not record?"); SourceRange Range = E->getSourceRange(); const RecordType *RTy = Ty.getCanonicalType()->getAs(); bool DiagEmitted = false; if (const MemberExpr *ME = dyn_cast(E)) DiagnoseRecursiveConstFields(S, ME->getMemberDecl(), RTy, Loc, Range, OEK_Member, DiagEmitted); else if (const DeclRefExpr *DRE = dyn_cast(E)) DiagnoseRecursiveConstFields(S, DRE->getDecl(), RTy, Loc, Range, OEK_Variable, DiagEmitted); else DiagnoseRecursiveConstFields(S, nullptr, RTy, Loc, Range, OEK_LValue, DiagEmitted); if (!DiagEmitted) DiagnoseConstAssignment(S, E, Loc); } /// CheckForModifiableLvalue - Verify that E is a modifiable lvalue. If not, /// emit an error and return true. If so, return false. static bool CheckForModifiableLvalue(Expr *E, SourceLocation Loc, Sema &S) { assert(!E->hasPlaceholderType(BuiltinType::PseudoObject)); S.CheckShadowingDeclModification(E, Loc); SourceLocation OrigLoc = Loc; Expr::isModifiableLvalueResult IsLV = E->isModifiableLvalue(S.Context, &Loc); if (IsLV == Expr::MLV_ClassTemporary && IsReadonlyMessage(E, S)) IsLV = Expr::MLV_InvalidMessageExpression; if (IsLV == Expr::MLV_Valid) return false; unsigned DiagID = 0; bool NeedType = false; switch (IsLV) { // C99 6.5.16p2 case Expr::MLV_ConstQualified: // Use a specialized diagnostic when we're assigning to an object // from an enclosing function or block. if (NonConstCaptureKind NCCK = isReferenceToNonConstCapture(S, E)) { if (NCCK == NCCK_Block) DiagID = diag::err_block_decl_ref_not_modifiable_lvalue; else DiagID = diag::err_lambda_decl_ref_not_modifiable_lvalue; break; } // In ARC, use some specialized diagnostics for occasions where we // infer 'const'. These are always pseudo-strong variables. if (S.getLangOpts().ObjCAutoRefCount) { DeclRefExpr *declRef = dyn_cast(E->IgnoreParenCasts()); if (declRef && isa(declRef->getDecl())) { VarDecl *var = cast(declRef->getDecl()); // Use the normal diagnostic if it's pseudo-__strong but the // user actually wrote 'const'. if (var->isARCPseudoStrong() && (!var->getTypeSourceInfo() || !var->getTypeSourceInfo()->getType().isConstQualified())) { // There are three pseudo-strong cases: // - self ObjCMethodDecl *method = S.getCurMethodDecl(); if (method && var == method->getSelfDecl()) { DiagID = method->isClassMethod() ? diag::err_typecheck_arc_assign_self_class_method : diag::err_typecheck_arc_assign_self; // - Objective-C externally_retained attribute. } else if (var->hasAttr() || isa(var)) { DiagID = diag::err_typecheck_arc_assign_externally_retained; // - fast enumeration variables } else { DiagID = diag::err_typecheck_arr_assign_enumeration; } SourceRange Assign; if (Loc != OrigLoc) Assign = SourceRange(OrigLoc, OrigLoc); S.Diag(Loc, DiagID) << E->getSourceRange() << Assign; // We need to preserve the AST regardless, so migration tool // can do its job. return false; } } } // If none of the special cases above are triggered, then this is a // simple const assignment. if (DiagID == 0) { DiagnoseConstAssignment(S, E, Loc); return true; } break; case Expr::MLV_ConstAddrSpace: DiagnoseConstAssignment(S, E, Loc); return true; case Expr::MLV_ConstQualifiedField: DiagnoseRecursiveConstFields(S, E, Loc); return true; case Expr::MLV_ArrayType: case Expr::MLV_ArrayTemporary: DiagID = diag::err_typecheck_array_not_modifiable_lvalue; NeedType = true; break; case Expr::MLV_NotObjectType: DiagID = diag::err_typecheck_non_object_not_modifiable_lvalue; NeedType = true; break; case Expr::MLV_LValueCast: DiagID = diag::err_typecheck_lvalue_casts_not_supported; break; case Expr::MLV_Valid: llvm_unreachable("did not take early return for MLV_Valid"); case Expr::MLV_InvalidExpression: case Expr::MLV_MemberFunction: case Expr::MLV_ClassTemporary: DiagID = diag::err_typecheck_expression_not_modifiable_lvalue; break; case Expr::MLV_IncompleteType: case Expr::MLV_IncompleteVoidType: return S.RequireCompleteType(Loc, E->getType(), diag::err_typecheck_incomplete_type_not_modifiable_lvalue, E); case Expr::MLV_DuplicateVectorComponents: DiagID = diag::err_typecheck_duplicate_vector_components_not_mlvalue; break; case Expr::MLV_NoSetterProperty: llvm_unreachable("readonly properties should be processed differently"); case Expr::MLV_InvalidMessageExpression: DiagID = diag::err_readonly_message_assignment; break; case Expr::MLV_SubObjCPropertySetting: DiagID = diag::err_no_subobject_property_setting; break; } SourceRange Assign; if (Loc != OrigLoc) Assign = SourceRange(OrigLoc, OrigLoc); if (NeedType) S.Diag(Loc, DiagID) << E->getType() << E->getSourceRange() << Assign; else S.Diag(Loc, DiagID) << E->getSourceRange() << Assign; return true; } static void CheckIdentityFieldAssignment(Expr *LHSExpr, Expr *RHSExpr, SourceLocation Loc, Sema &Sema) { if (Sema.inTemplateInstantiation()) return; if (Sema.isUnevaluatedContext()) return; if (Loc.isInvalid() || Loc.isMacroID()) return; if (LHSExpr->getExprLoc().isMacroID() || RHSExpr->getExprLoc().isMacroID()) return; // C / C++ fields MemberExpr *ML = dyn_cast(LHSExpr); MemberExpr *MR = dyn_cast(RHSExpr); if (ML && MR) { if (!(isa(ML->getBase()) && isa(MR->getBase()))) return; const ValueDecl *LHSDecl = cast(ML->getMemberDecl()->getCanonicalDecl()); const ValueDecl *RHSDecl = cast(MR->getMemberDecl()->getCanonicalDecl()); if (LHSDecl != RHSDecl) return; if (LHSDecl->getType().isVolatileQualified()) return; if (const ReferenceType *RefTy = LHSDecl->getType()->getAs()) if (RefTy->getPointeeType().isVolatileQualified()) return; Sema.Diag(Loc, diag::warn_identity_field_assign) << 0; } // Objective-C instance variables ObjCIvarRefExpr *OL = dyn_cast(LHSExpr); ObjCIvarRefExpr *OR = dyn_cast(RHSExpr); if (OL && OR && OL->getDecl() == OR->getDecl()) { DeclRefExpr *RL = dyn_cast(OL->getBase()->IgnoreImpCasts()); DeclRefExpr *RR = dyn_cast(OR->getBase()->IgnoreImpCasts()); if (RL && RR && RL->getDecl() == RR->getDecl()) Sema.Diag(Loc, diag::warn_identity_field_assign) << 1; } } // C99 6.5.16.1 QualType Sema::CheckAssignmentOperands(Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType) { assert(!LHSExpr->hasPlaceholderType(BuiltinType::PseudoObject)); // Verify that LHS is a modifiable lvalue, and emit error if not. if (CheckForModifiableLvalue(LHSExpr, Loc, *this)) return QualType(); QualType LHSType = LHSExpr->getType(); QualType RHSType = CompoundType.isNull() ? RHS.get()->getType() : CompoundType; // OpenCL v1.2 s6.1.1.1 p2: // The half data type can only be used to declare a pointer to a buffer that // contains half values if (getLangOpts().OpenCL && !getOpenCLOptions().isEnabled("cl_khr_fp16") && LHSType->isHalfType()) { Diag(Loc, diag::err_opencl_half_load_store) << 1 << LHSType.getUnqualifiedType(); return QualType(); } AssignConvertType ConvTy; if (CompoundType.isNull()) { Expr *RHSCheck = RHS.get(); CheckIdentityFieldAssignment(LHSExpr, RHSCheck, Loc, *this); QualType LHSTy(LHSType); ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS); if (RHS.isInvalid()) return QualType(); // Special case of NSObject attributes on c-style pointer types. if (ConvTy == IncompatiblePointer && ((Context.isObjCNSObjectType(LHSType) && RHSType->isObjCObjectPointerType()) || (Context.isObjCNSObjectType(RHSType) && LHSType->isObjCObjectPointerType()))) ConvTy = Compatible; if (ConvTy == Compatible && LHSType->isObjCObjectType()) Diag(Loc, diag::err_objc_object_assignment) << LHSType; // If the RHS is a unary plus or minus, check to see if they = and + are // right next to each other. If so, the user may have typo'd "x =+ 4" // instead of "x += 4". if (ImplicitCastExpr *ICE = dyn_cast(RHSCheck)) RHSCheck = ICE->getSubExpr(); if (UnaryOperator *UO = dyn_cast(RHSCheck)) { if ((UO->getOpcode() == UO_Plus || UO->getOpcode() == UO_Minus) && Loc.isFileID() && UO->getOperatorLoc().isFileID() && // Only if the two operators are exactly adjacent. Loc.getLocWithOffset(1) == UO->getOperatorLoc() && // And there is a space or other character before the subexpr of the // unary +/-. We don't want to warn on "x=-1". Loc.getLocWithOffset(2) != UO->getSubExpr()->getBeginLoc() && UO->getSubExpr()->getBeginLoc().isFileID()) { Diag(Loc, diag::warn_not_compound_assign) << (UO->getOpcode() == UO_Plus ? "+" : "-") << SourceRange(UO->getOperatorLoc(), UO->getOperatorLoc()); } } if (ConvTy == Compatible) { if (LHSType.getObjCLifetime() == Qualifiers::OCL_Strong) { // Warn about retain cycles where a block captures the LHS, but // not if the LHS is a simple variable into which the block is // being stored...unless that variable can be captured by reference! const Expr *InnerLHS = LHSExpr->IgnoreParenCasts(); const DeclRefExpr *DRE = dyn_cast(InnerLHS); if (!DRE || DRE->getDecl()->hasAttr()) checkRetainCycles(LHSExpr, RHS.get()); } if (LHSType.getObjCLifetime() == Qualifiers::OCL_Strong || LHSType.isNonWeakInMRRWithObjCWeak(Context)) { // It is safe to assign a weak reference into a strong variable. // Although this code can still have problems: // id x = self.weakProp; // id y = self.weakProp; // we do not warn to warn spuriously when 'x' and 'y' are on separate // paths through the function. This should be revisited if // -Wrepeated-use-of-weak is made flow-sensitive. // For ObjCWeak only, we do not warn if the assign is to a non-weak // variable, which will be valid for the current autorelease scope. if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, RHS.get()->getBeginLoc())) getCurFunction()->markSafeWeakUse(RHS.get()); } else if (getLangOpts().ObjCAutoRefCount || getLangOpts().ObjCWeak) { checkUnsafeExprAssigns(Loc, LHSExpr, RHS.get()); } } } else { // Compound assignment "x += y" ConvTy = CheckAssignmentConstraints(Loc, LHSType, RHSType); } if (DiagnoseAssignmentResult(ConvTy, Loc, LHSType, RHSType, RHS.get(), AA_Assigning)) return QualType(); CheckForNullPointerDereference(*this, LHSExpr); if (getLangOpts().CPlusPlus20 && LHSType.isVolatileQualified()) { if (CompoundType.isNull()) { // C++2a [expr.ass]p5: // A simple-assignment whose left operand is of a volatile-qualified // type is deprecated unless the assignment is either a discarded-value // expression or an unevaluated operand ExprEvalContexts.back().VolatileAssignmentLHSs.push_back(LHSExpr); } else { // C++2a [expr.ass]p6: // [Compound-assignment] expressions are deprecated if E1 has // volatile-qualified type Diag(Loc, diag::warn_deprecated_compound_assign_volatile) << LHSType; } } // C99 6.5.16p3: The type of an assignment expression is the type of the // left operand unless the left operand has qualified type, in which case // it is the unqualified version of the type of the left operand. // C99 6.5.16.1p2: In simple assignment, the value of the right operand // is converted to the type of the assignment expression (above). // C++ 5.17p1: the type of the assignment expression is that of its left // operand. return (getLangOpts().CPlusPlus ? LHSType : LHSType.getUnqualifiedType()); } // Only ignore explicit casts to void. static bool IgnoreCommaOperand(const Expr *E) { E = E->IgnoreParens(); if (const CastExpr *CE = dyn_cast(E)) { if (CE->getCastKind() == CK_ToVoid) { return true; } // static_cast on a dependent type will not show up as CK_ToVoid. if (CE->getCastKind() == CK_Dependent && E->getType()->isVoidType() && CE->getSubExpr()->getType()->isDependentType()) { return true; } } return false; } // Look for instances where it is likely the comma operator is confused with // another operator. There is an explicit list of acceptable expressions for // the left hand side of the comma operator, otherwise emit a warning. void Sema::DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc) { // No warnings in macros if (Loc.isMacroID()) return; // Don't warn in template instantiations. if (inTemplateInstantiation()) return; // Scope isn't fine-grained enough to explicitly list the specific cases, so // instead, skip more than needed, then call back into here with the // CommaVisitor in SemaStmt.cpp. // The listed locations are the initialization and increment portions // of a for loop. The additional checks are on the condition of // if statements, do/while loops, and for loops. // Differences in scope flags for C89 mode requires the extra logic. const unsigned ForIncrementFlags = getLangOpts().C99 || getLangOpts().CPlusPlus ? Scope::ControlScope | Scope::ContinueScope | Scope::BreakScope : Scope::ContinueScope | Scope::BreakScope; const unsigned ForInitFlags = Scope::ControlScope | Scope::DeclScope; const unsigned ScopeFlags = getCurScope()->getFlags(); if ((ScopeFlags & ForIncrementFlags) == ForIncrementFlags || (ScopeFlags & ForInitFlags) == ForInitFlags) return; // If there are multiple comma operators used together, get the RHS of the // of the comma operator as the LHS. while (const BinaryOperator *BO = dyn_cast(LHS)) { if (BO->getOpcode() != BO_Comma) break; LHS = BO->getRHS(); } // Only allow some expressions on LHS to not warn. if (IgnoreCommaOperand(LHS)) return; Diag(Loc, diag::warn_comma_operator); Diag(LHS->getBeginLoc(), diag::note_cast_to_void) << LHS->getSourceRange() << FixItHint::CreateInsertion(LHS->getBeginLoc(), LangOpts.CPlusPlus ? "static_cast(" : "(void)(") << FixItHint::CreateInsertion(PP.getLocForEndOfToken(LHS->getEndLoc()), ")"); } // C99 6.5.17 static QualType CheckCommaOperands(Sema &S, ExprResult &LHS, ExprResult &RHS, SourceLocation Loc) { LHS = S.CheckPlaceholderExpr(LHS.get()); RHS = S.CheckPlaceholderExpr(RHS.get()); if (LHS.isInvalid() || RHS.isInvalid()) return QualType(); // C's comma performs lvalue conversion (C99 6.3.2.1) on both its // operands, but not unary promotions. // C++'s comma does not do any conversions at all (C++ [expr.comma]p1). // So we treat the LHS as a ignored value, and in C++ we allow the // containing site to determine what should be done with the RHS. LHS = S.IgnoredValueConversions(LHS.get()); if (LHS.isInvalid()) return QualType(); S.DiagnoseUnusedExprResult(LHS.get()); if (!S.getLangOpts().CPlusPlus) { RHS = S.DefaultFunctionArrayLvalueConversion(RHS.get()); if (RHS.isInvalid()) return QualType(); if (!RHS.get()->getType()->isVoidType()) S.RequireCompleteType(Loc, RHS.get()->getType(), diag::err_incomplete_type); } if (!S.getDiagnostics().isIgnored(diag::warn_comma_operator, Loc)) S.DiagnoseCommaOperator(LHS.get(), Loc); return RHS.get()->getType(); } /// CheckIncrementDecrementOperand - unlike most "Check" methods, this routine /// doesn't need to call UsualUnaryConversions or UsualArithmeticConversions. static QualType CheckIncrementDecrementOperand(Sema &S, Expr *Op, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation OpLoc, bool IsInc, bool IsPrefix) { if (Op->isTypeDependent()) return S.Context.DependentTy; QualType ResType = Op->getType(); // Atomic types can be used for increment / decrement where the non-atomic // versions can, so ignore the _Atomic() specifier for the purpose of // checking. if (const AtomicType *ResAtomicType = ResType->getAs()) ResType = ResAtomicType->getValueType(); assert(!ResType.isNull() && "no type for increment/decrement expression"); if (S.getLangOpts().CPlusPlus && ResType->isBooleanType()) { // Decrement of bool is not allowed. if (!IsInc) { S.Diag(OpLoc, diag::err_decrement_bool) << Op->getSourceRange(); return QualType(); } // Increment of bool sets it to true, but is deprecated. S.Diag(OpLoc, S.getLangOpts().CPlusPlus17 ? diag::ext_increment_bool : diag::warn_increment_bool) << Op->getSourceRange(); } else if (S.getLangOpts().CPlusPlus && ResType->isEnumeralType()) { // Error on enum increments and decrements in C++ mode S.Diag(OpLoc, diag::err_increment_decrement_enum) << IsInc << ResType; return QualType(); } else if (ResType->isRealType()) { // OK! } else if (ResType->isPointerType()) { // C99 6.5.2.4p2, 6.5.6p2 if (!checkArithmeticOpPointerOperand(S, OpLoc, Op)) return QualType(); } else if (ResType->isObjCObjectPointerType()) { // On modern runtimes, ObjC pointer arithmetic is forbidden. // Otherwise, we just need a complete type. if (checkArithmeticIncompletePointerType(S, OpLoc, Op) || checkArithmeticOnObjCPointer(S, OpLoc, Op)) return QualType(); } else if (ResType->isAnyComplexType()) { // C99 does not support ++/-- on complex types, we allow as an extension. S.Diag(OpLoc, diag::ext_integer_increment_complex) << ResType << Op->getSourceRange(); } else if (ResType->isPlaceholderType()) { ExprResult PR = S.CheckPlaceholderExpr(Op); if (PR.isInvalid()) return QualType(); return CheckIncrementDecrementOperand(S, PR.get(), VK, OK, OpLoc, IsInc, IsPrefix); } else if (S.getLangOpts().AltiVec && ResType->isVectorType()) { // OK! ( C/C++ Language Extensions for CBEA(Version 2.6) 10.3 ) } else if (S.getLangOpts().ZVector && ResType->isVectorType() && (ResType->castAs()->getVectorKind() != VectorType::AltiVecBool)) { // The z vector extensions allow ++ and -- for non-bool vectors. } else if(S.getLangOpts().OpenCL && ResType->isVectorType() && ResType->castAs()->getElementType()->isIntegerType()) { // OpenCL V1.2 6.3 says dec/inc ops operate on integer vector types. } else { S.Diag(OpLoc, diag::err_typecheck_illegal_increment_decrement) << ResType << int(IsInc) << Op->getSourceRange(); return QualType(); } // At this point, we know we have a real, complex or pointer type. // Now make sure the operand is a modifiable lvalue. if (CheckForModifiableLvalue(Op, OpLoc, S)) return QualType(); if (S.getLangOpts().CPlusPlus20 && ResType.isVolatileQualified()) { // C++2a [expr.pre.inc]p1, [expr.post.inc]p1: // An operand with volatile-qualified type is deprecated S.Diag(OpLoc, diag::warn_deprecated_increment_decrement_volatile) << IsInc << ResType; } // In C++, a prefix increment is the same type as the operand. Otherwise // (in C or with postfix), the increment is the unqualified type of the // operand. if (IsPrefix && S.getLangOpts().CPlusPlus) { VK = VK_LValue; OK = Op->getObjectKind(); return ResType; } else { VK = VK_RValue; return ResType.getUnqualifiedType(); } } /// getPrimaryDecl - Helper function for CheckAddressOfOperand(). /// This routine allows us to typecheck complex/recursive expressions /// where the declaration is needed for type checking. We only need to /// handle cases when the expression references a function designator /// or is an lvalue. Here are some examples: /// - &(x) => x /// - &*****f => f for f a function designator. /// - &s.xx => s /// - &s.zz[1].yy -> s, if zz is an array /// - *(x + 1) -> x, if x is an array /// - &"123"[2] -> 0 /// - & __real__ x -> x /// /// FIXME: We don't recurse to the RHS of a comma, nor handle pointers to /// members. static ValueDecl *getPrimaryDecl(Expr *E) { switch (E->getStmtClass()) { case Stmt::DeclRefExprClass: return cast(E)->getDecl(); case Stmt::MemberExprClass: // If this is an arrow operator, the address is an offset from // the base's value, so the object the base refers to is // irrelevant. if (cast(E)->isArrow()) return nullptr; // Otherwise, the expression refers to a part of the base return getPrimaryDecl(cast(E)->getBase()); case Stmt::ArraySubscriptExprClass: { // FIXME: This code shouldn't be necessary! We should catch the implicit // promotion of register arrays earlier. Expr* Base = cast(E)->getBase(); if (ImplicitCastExpr* ICE = dyn_cast(Base)) { if (ICE->getSubExpr()->getType()->isArrayType()) return getPrimaryDecl(ICE->getSubExpr()); } return nullptr; } case Stmt::UnaryOperatorClass: { UnaryOperator *UO = cast(E); switch(UO->getOpcode()) { case UO_Real: case UO_Imag: case UO_Extension: return getPrimaryDecl(UO->getSubExpr()); default: return nullptr; } } case Stmt::ParenExprClass: return getPrimaryDecl(cast(E)->getSubExpr()); case Stmt::ImplicitCastExprClass: // If the result of an implicit cast is an l-value, we care about // the sub-expression; otherwise, the result here doesn't matter. return getPrimaryDecl(cast(E)->getSubExpr()); case Stmt::CXXUuidofExprClass: return cast(E)->getGuidDecl(); default: return nullptr; } } namespace { enum { AO_Bit_Field = 0, AO_Vector_Element = 1, AO_Property_Expansion = 2, AO_Register_Variable = 3, AO_Matrix_Element = 4, AO_No_Error = 5 }; } /// Diagnose invalid operand for address of operations. /// /// \param Type The type of operand which cannot have its address taken. static void diagnoseAddressOfInvalidType(Sema &S, SourceLocation Loc, Expr *E, unsigned Type) { S.Diag(Loc, diag::err_typecheck_address_of) << Type << E->getSourceRange(); } /// CheckAddressOfOperand - The operand of & must be either a function /// designator or an lvalue designating an object. If it is an lvalue, the /// object cannot be declared with storage class register or be a bit field. /// Note: The usual conversions are *not* applied to the operand of the & /// operator (C99 6.3.2.1p[2-4]), and its result is never an lvalue. /// In C++, the operand might be an overloaded function name, in which case /// we allow the '&' but retain the overloaded-function type. QualType Sema::CheckAddressOfOperand(ExprResult &OrigOp, SourceLocation OpLoc) { if (const BuiltinType *PTy = OrigOp.get()->getType()->getAsPlaceholderType()){ if (PTy->getKind() == BuiltinType::Overload) { Expr *E = OrigOp.get()->IgnoreParens(); if (!isa(E)) { assert(cast(E)->getOpcode() == UO_AddrOf); Diag(OpLoc, diag::err_typecheck_invalid_lvalue_addrof_addrof_function) << OrigOp.get()->getSourceRange(); return QualType(); } OverloadExpr *Ovl = cast(E); if (isa(Ovl)) if (!ResolveSingleFunctionTemplateSpecialization(Ovl)) { Diag(OpLoc, diag::err_invalid_form_pointer_member_function) << OrigOp.get()->getSourceRange(); return QualType(); } return Context.OverloadTy; } if (PTy->getKind() == BuiltinType::UnknownAny) return Context.UnknownAnyTy; if (PTy->getKind() == BuiltinType::BoundMember) { Diag(OpLoc, diag::err_invalid_form_pointer_member_function) << OrigOp.get()->getSourceRange(); return QualType(); } OrigOp = CheckPlaceholderExpr(OrigOp.get()); if (OrigOp.isInvalid()) return QualType(); } if (OrigOp.get()->isTypeDependent()) return Context.DependentTy; assert(!OrigOp.get()->getType()->isPlaceholderType()); // Make sure to ignore parentheses in subsequent checks Expr *op = OrigOp.get()->IgnoreParens(); // In OpenCL captures for blocks called as lambda functions // are located in the private address space. Blocks used in // enqueue_kernel can be located in a different address space // depending on a vendor implementation. Thus preventing // taking an address of the capture to avoid invalid AS casts. if (LangOpts.OpenCL) { auto* VarRef = dyn_cast(op); if (VarRef && VarRef->refersToEnclosingVariableOrCapture()) { Diag(op->getExprLoc(), diag::err_opencl_taking_address_capture); return QualType(); } } if (getLangOpts().C99) { // Implement C99-only parts of addressof rules. if (UnaryOperator* uOp = dyn_cast(op)) { if (uOp->getOpcode() == UO_Deref) // Per C99 6.5.3.2, the address of a deref always returns a valid result // (assuming the deref expression is valid). return uOp->getSubExpr()->getType(); } // Technically, there should be a check for array subscript // expressions here, but the result of one is always an lvalue anyway. } ValueDecl *dcl = getPrimaryDecl(op); if (auto *FD = dyn_cast_or_null(dcl)) if (!checkAddressOfFunctionIsAvailable(FD, /*Complain=*/true, op->getBeginLoc())) return QualType(); Expr::LValueClassification lval = op->ClassifyLValue(Context); unsigned AddressOfError = AO_No_Error; if (lval == Expr::LV_ClassTemporary || lval == Expr::LV_ArrayTemporary) { bool sfinae = (bool)isSFINAEContext(); Diag(OpLoc, isSFINAEContext() ? diag::err_typecheck_addrof_temporary : diag::ext_typecheck_addrof_temporary) << op->getType() << op->getSourceRange(); if (sfinae) return QualType(); // Materialize the temporary as an lvalue so that we can take its address. OrigOp = op = CreateMaterializeTemporaryExpr(op->getType(), OrigOp.get(), true); } else if (isa(op)) { return Context.getPointerType(op->getType()); } else if (lval == Expr::LV_MemberFunction) { // If it's an instance method, make a member pointer. // The expression must have exactly the form &A::foo. // If the underlying expression isn't a decl ref, give up. if (!isa(op)) { Diag(OpLoc, diag::err_invalid_form_pointer_member_function) << OrigOp.get()->getSourceRange(); return QualType(); } DeclRefExpr *DRE = cast(op); CXXMethodDecl *MD = cast(DRE->getDecl()); // The id-expression was parenthesized. if (OrigOp.get() != DRE) { Diag(OpLoc, diag::err_parens_pointer_member_function) << OrigOp.get()->getSourceRange(); // The method was named without a qualifier. } else if (!DRE->getQualifier()) { if (MD->getParent()->getName().empty()) Diag(OpLoc, diag::err_unqualified_pointer_member_function) << op->getSourceRange(); else { SmallString<32> Str; StringRef Qual = (MD->getParent()->getName() + "::").toStringRef(Str); Diag(OpLoc, diag::err_unqualified_pointer_member_function) << op->getSourceRange() << FixItHint::CreateInsertion(op->getSourceRange().getBegin(), Qual); } } // Taking the address of a dtor is illegal per C++ [class.dtor]p2. if (isa(MD)) Diag(OpLoc, diag::err_typecheck_addrof_dtor) << op->getSourceRange(); QualType MPTy = Context.getMemberPointerType( op->getType(), Context.getTypeDeclType(MD->getParent()).getTypePtr()); // Under the MS ABI, lock down the inheritance model now. if (Context.getTargetInfo().getCXXABI().isMicrosoft()) (void)isCompleteType(OpLoc, MPTy); return MPTy; } else if (lval != Expr::LV_Valid && lval != Expr::LV_IncompleteVoidType) { // C99 6.5.3.2p1 // The operand must be either an l-value or a function designator if (!op->getType()->isFunctionType()) { // Use a special diagnostic for loads from property references. if (isa(op)) { AddressOfError = AO_Property_Expansion; } else { Diag(OpLoc, diag::err_typecheck_invalid_lvalue_addrof) << op->getType() << op->getSourceRange(); return QualType(); } } } else if (op->getObjectKind() == OK_BitField) { // C99 6.5.3.2p1 // The operand cannot be a bit-field AddressOfError = AO_Bit_Field; } else if (op->getObjectKind() == OK_VectorComponent) { // The operand cannot be an element of a vector AddressOfError = AO_Vector_Element; } else if (op->getObjectKind() == OK_MatrixComponent) { // The operand cannot be an element of a matrix. AddressOfError = AO_Matrix_Element; } else if (dcl) { // C99 6.5.3.2p1 // We have an lvalue with a decl. Make sure the decl is not declared // with the register storage-class specifier. if (const VarDecl *vd = dyn_cast(dcl)) { // in C++ it is not error to take address of a register // variable (c++03 7.1.1P3) if (vd->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus) { AddressOfError = AO_Register_Variable; } } else if (isa(dcl)) { AddressOfError = AO_Property_Expansion; } else if (isa(dcl)) { return Context.OverloadTy; } else if (isa(dcl) || isa(dcl)) { // Okay: we can take the address of a field. // Could be a pointer to member, though, if there is an explicit // scope qualifier for the class. if (isa(op) && cast(op)->getQualifier()) { DeclContext *Ctx = dcl->getDeclContext(); if (Ctx && Ctx->isRecord()) { if (dcl->getType()->isReferenceType()) { Diag(OpLoc, diag::err_cannot_form_pointer_to_member_of_reference_type) << dcl->getDeclName() << dcl->getType(); return QualType(); } while (cast(Ctx)->isAnonymousStructOrUnion()) Ctx = Ctx->getParent(); QualType MPTy = Context.getMemberPointerType( op->getType(), Context.getTypeDeclType(cast(Ctx)).getTypePtr()); // Under the MS ABI, lock down the inheritance model now. if (Context.getTargetInfo().getCXXABI().isMicrosoft()) (void)isCompleteType(OpLoc, MPTy); return MPTy; } } } else if (!isa(dcl) && !isa(dcl) && !isa(dcl) && !isa(dcl)) llvm_unreachable("Unknown/unexpected decl type"); } if (AddressOfError != AO_No_Error) { diagnoseAddressOfInvalidType(*this, OpLoc, op, AddressOfError); return QualType(); } if (lval == Expr::LV_IncompleteVoidType) { // Taking the address of a void variable is technically illegal, but we // allow it in cases which are otherwise valid. // Example: "extern void x; void* y = &x;". Diag(OpLoc, diag::ext_typecheck_addrof_void) << op->getSourceRange(); } // If the operand has type "type", the result has type "pointer to type". if (op->getType()->isObjCObjectType()) return Context.getObjCObjectPointerType(op->getType()); CheckAddressOfPackedMember(op); return Context.getPointerType(op->getType()); } static void RecordModifiableNonNullParam(Sema &S, const Expr *Exp) { const DeclRefExpr *DRE = dyn_cast(Exp); if (!DRE) return; const Decl *D = DRE->getDecl(); if (!D) return; const ParmVarDecl *Param = dyn_cast(D); if (!Param) return; if (const FunctionDecl* FD = dyn_cast(Param->getDeclContext())) if (!FD->hasAttr() && !Param->hasAttr()) return; if (FunctionScopeInfo *FD = S.getCurFunction()) if (!FD->ModifiedNonNullParams.count(Param)) FD->ModifiedNonNullParams.insert(Param); } /// CheckIndirectionOperand - Type check unary indirection (prefix '*'). static QualType CheckIndirectionOperand(Sema &S, Expr *Op, ExprValueKind &VK, SourceLocation OpLoc) { if (Op->isTypeDependent()) return S.Context.DependentTy; ExprResult ConvResult = S.UsualUnaryConversions(Op); if (ConvResult.isInvalid()) return QualType(); Op = ConvResult.get(); QualType OpTy = Op->getType(); QualType Result; if (isa(Op)) { QualType OpOrigType = Op->IgnoreParenCasts()->getType(); S.CheckCompatibleReinterpretCast(OpOrigType, OpTy, /*IsDereference*/true, Op->getSourceRange()); } if (const PointerType *PT = OpTy->getAs()) { Result = PT->getPointeeType(); } else if (const ObjCObjectPointerType *OPT = OpTy->getAs()) Result = OPT->getPointeeType(); else { ExprResult PR = S.CheckPlaceholderExpr(Op); if (PR.isInvalid()) return QualType(); if (PR.get() != Op) return CheckIndirectionOperand(S, PR.get(), VK, OpLoc); } if (Result.isNull()) { S.Diag(OpLoc, diag::err_typecheck_indirection_requires_pointer) << OpTy << Op->getSourceRange(); return QualType(); } // Note that per both C89 and C99, indirection is always legal, even if Result // is an incomplete type or void. It would be possible to warn about // dereferencing a void pointer, but it's completely well-defined, and such a // warning is unlikely to catch any mistakes. In C++, indirection is not valid // for pointers to 'void' but is fine for any other pointer type: // // C++ [expr.unary.op]p1: // [...] the expression to which [the unary * operator] is applied shall // be a pointer to an object type, or a pointer to a function type if (S.getLangOpts().CPlusPlus && Result->isVoidType()) S.Diag(OpLoc, diag::ext_typecheck_indirection_through_void_pointer) << OpTy << Op->getSourceRange(); // Dereferences are usually l-values... VK = VK_LValue; // ...except that certain expressions are never l-values in C. if (!S.getLangOpts().CPlusPlus && Result.isCForbiddenLValueType()) VK = VK_RValue; return Result; } BinaryOperatorKind Sema::ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind) { BinaryOperatorKind Opc; switch (Kind) { default: llvm_unreachable("Unknown binop!"); case tok::periodstar: Opc = BO_PtrMemD; break; case tok::arrowstar: Opc = BO_PtrMemI; break; case tok::star: Opc = BO_Mul; break; case tok::slash: Opc = BO_Div; break; case tok::percent: Opc = BO_Rem; break; case tok::plus: Opc = BO_Add; break; case tok::minus: Opc = BO_Sub; break; case tok::lessless: Opc = BO_Shl; break; case tok::greatergreater: Opc = BO_Shr; break; case tok::lessequal: Opc = BO_LE; break; case tok::less: Opc = BO_LT; break; case tok::greaterequal: Opc = BO_GE; break; case tok::greater: Opc = BO_GT; break; case tok::exclaimequal: Opc = BO_NE; break; case tok::equalequal: Opc = BO_EQ; break; case tok::spaceship: Opc = BO_Cmp; break; case tok::amp: Opc = BO_And; break; case tok::caret: Opc = BO_Xor; break; case tok::pipe: Opc = BO_Or; break; case tok::ampamp: Opc = BO_LAnd; break; case tok::pipepipe: Opc = BO_LOr; break; case tok::equal: Opc = BO_Assign; break; case tok::starequal: Opc = BO_MulAssign; break; case tok::slashequal: Opc = BO_DivAssign; break; case tok::percentequal: Opc = BO_RemAssign; break; case tok::plusequal: Opc = BO_AddAssign; break; case tok::minusequal: Opc = BO_SubAssign; break; case tok::lesslessequal: Opc = BO_ShlAssign; break; case tok::greatergreaterequal: Opc = BO_ShrAssign; break; case tok::ampequal: Opc = BO_AndAssign; break; case tok::caretequal: Opc = BO_XorAssign; break; case tok::pipeequal: Opc = BO_OrAssign; break; case tok::comma: Opc = BO_Comma; break; } return Opc; } static inline UnaryOperatorKind ConvertTokenKindToUnaryOpcode( tok::TokenKind Kind) { UnaryOperatorKind Opc; switch (Kind) { default: llvm_unreachable("Unknown unary op!"); case tok::plusplus: Opc = UO_PreInc; break; case tok::minusminus: Opc = UO_PreDec; break; case tok::amp: Opc = UO_AddrOf; break; case tok::star: Opc = UO_Deref; break; case tok::plus: Opc = UO_Plus; break; case tok::minus: Opc = UO_Minus; break; case tok::tilde: Opc = UO_Not; break; case tok::exclaim: Opc = UO_LNot; break; case tok::kw___real: Opc = UO_Real; break; case tok::kw___imag: Opc = UO_Imag; break; case tok::kw___extension__: Opc = UO_Extension; break; } return Opc; } /// DiagnoseSelfAssignment - Emits a warning if a value is assigned to itself. /// This warning suppressed in the event of macro expansions. static void DiagnoseSelfAssignment(Sema &S, Expr *LHSExpr, Expr *RHSExpr, SourceLocation OpLoc, bool IsBuiltin) { if (S.inTemplateInstantiation()) return; if (S.isUnevaluatedContext()) return; if (OpLoc.isInvalid() || OpLoc.isMacroID()) return; LHSExpr = LHSExpr->IgnoreParenImpCasts(); RHSExpr = RHSExpr->IgnoreParenImpCasts(); const DeclRefExpr *LHSDeclRef = dyn_cast(LHSExpr); const DeclRefExpr *RHSDeclRef = dyn_cast(RHSExpr); if (!LHSDeclRef || !RHSDeclRef || LHSDeclRef->getLocation().isMacroID() || RHSDeclRef->getLocation().isMacroID()) return; const ValueDecl *LHSDecl = cast(LHSDeclRef->getDecl()->getCanonicalDecl()); const ValueDecl *RHSDecl = cast(RHSDeclRef->getDecl()->getCanonicalDecl()); if (LHSDecl != RHSDecl) return; if (LHSDecl->getType().isVolatileQualified()) return; if (const ReferenceType *RefTy = LHSDecl->getType()->getAs()) if (RefTy->getPointeeType().isVolatileQualified()) return; S.Diag(OpLoc, IsBuiltin ? diag::warn_self_assignment_builtin : diag::warn_self_assignment_overloaded) << LHSDeclRef->getType() << LHSExpr->getSourceRange() << RHSExpr->getSourceRange(); } /// Check if a bitwise-& is performed on an Objective-C pointer. This /// is usually indicative of introspection within the Objective-C pointer. static void checkObjCPointerIntrospection(Sema &S, ExprResult &L, ExprResult &R, SourceLocation OpLoc) { if (!S.getLangOpts().ObjC) return; const Expr *ObjCPointerExpr = nullptr, *OtherExpr = nullptr; const Expr *LHS = L.get(); const Expr *RHS = R.get(); if (LHS->IgnoreParenCasts()->getType()->isObjCObjectPointerType()) { ObjCPointerExpr = LHS; OtherExpr = RHS; } else if (RHS->IgnoreParenCasts()->getType()->isObjCObjectPointerType()) { ObjCPointerExpr = RHS; OtherExpr = LHS; } // This warning is deliberately made very specific to reduce false // positives with logic that uses '&' for hashing. This logic mainly // looks for code trying to introspect into tagged pointers, which // code should generally never do. if (ObjCPointerExpr && isa(OtherExpr->IgnoreParenCasts())) { unsigned Diag = diag::warn_objc_pointer_masking; // Determine if we are introspecting the result of performSelectorXXX. const Expr *Ex = ObjCPointerExpr->IgnoreParenCasts(); // Special case messages to -performSelector and friends, which // can return non-pointer values boxed in a pointer value. // Some clients may wish to silence warnings in this subcase. if (const ObjCMessageExpr *ME = dyn_cast(Ex)) { Selector S = ME->getSelector(); StringRef SelArg0 = S.getNameForSlot(0); if (SelArg0.startswith("performSelector")) Diag = diag::warn_objc_pointer_masking_performSelector; } S.Diag(OpLoc, Diag) << ObjCPointerExpr->getSourceRange(); } } static NamedDecl *getDeclFromExpr(Expr *E) { if (!E) return nullptr; if (auto *DRE = dyn_cast(E)) return DRE->getDecl(); if (auto *ME = dyn_cast(E)) return ME->getMemberDecl(); if (auto *IRE = dyn_cast(E)) return IRE->getDecl(); return nullptr; } // This helper function promotes a binary operator's operands (which are of a // half vector type) to a vector of floats and then truncates the result to // a vector of either half or short. static ExprResult convertHalfVecBinOp(Sema &S, ExprResult LHS, ExprResult RHS, BinaryOperatorKind Opc, QualType ResultTy, ExprValueKind VK, ExprObjectKind OK, bool IsCompAssign, SourceLocation OpLoc, FPOptionsOverride FPFeatures) { auto &Context = S.getASTContext(); assert((isVector(ResultTy, Context.HalfTy) || isVector(ResultTy, Context.ShortTy)) && "Result must be a vector of half or short"); assert(isVector(LHS.get()->getType(), Context.HalfTy) && isVector(RHS.get()->getType(), Context.HalfTy) && "both operands expected to be a half vector"); RHS = convertVector(RHS.get(), Context.FloatTy, S); QualType BinOpResTy = RHS.get()->getType(); // If Opc is a comparison, ResultType is a vector of shorts. In that case, // change BinOpResTy to a vector of ints. if (isVector(ResultTy, Context.ShortTy)) BinOpResTy = S.GetSignedVectorType(BinOpResTy); if (IsCompAssign) return CompoundAssignOperator::Create(Context, LHS.get(), RHS.get(), Opc, ResultTy, VK, OK, OpLoc, FPFeatures, BinOpResTy, BinOpResTy); LHS = convertVector(LHS.get(), Context.FloatTy, S); auto *BO = BinaryOperator::Create(Context, LHS.get(), RHS.get(), Opc, BinOpResTy, VK, OK, OpLoc, FPFeatures); return convertVector(BO, ResultTy->castAs()->getElementType(), S); } static std::pair CorrectDelayedTyposInBinOp(Sema &S, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr) { ExprResult LHS = LHSExpr, RHS = RHSExpr; if (!S.getLangOpts().CPlusPlus) { // C cannot handle TypoExpr nodes on either side of a binop because it // doesn't handle dependent types properly, so make sure any TypoExprs have // been dealt with before checking the operands. LHS = S.CorrectDelayedTyposInExpr(LHS); RHS = S.CorrectDelayedTyposInExpr( RHS, /*InitDecl=*/nullptr, /*RecoverUncorrectedTypos=*/false, [Opc, LHS](Expr *E) { if (Opc != BO_Assign) return ExprResult(E); // Avoid correcting the RHS to the same Expr as the LHS. Decl *D = getDeclFromExpr(E); return (D && D == getDeclFromExpr(LHS.get())) ? ExprError() : E; }); } return std::make_pair(LHS, RHS); } /// Returns true if conversion between vectors of halfs and vectors of floats /// is needed. static bool needsConversionOfHalfVec(bool OpRequiresConversion, ASTContext &Ctx, Expr *E0, Expr *E1 = nullptr) { if (!OpRequiresConversion || Ctx.getLangOpts().NativeHalfType || Ctx.getTargetInfo().useFP16ConversionIntrinsics()) return false; auto HasVectorOfHalfType = [&Ctx](Expr *E) { QualType Ty = E->IgnoreImplicit()->getType(); // Don't promote half precision neon vectors like float16x4_t in arm_neon.h // to vectors of floats. Although the element type of the vectors is __fp16, // the vectors shouldn't be treated as storage-only types. See the // discussion here: https://reviews.llvm.org/rG825235c140e7 if (const VectorType *VT = Ty->getAs()) { if (VT->getVectorKind() == VectorType::NeonVector) return false; return VT->getElementType().getCanonicalType() == Ctx.HalfTy; } return false; }; return HasVectorOfHalfType(E0) && (!E1 || HasVectorOfHalfType(E1)); } /// CreateBuiltinBinOp - Creates a new built-in binary operation with /// operator @p Opc at location @c TokLoc. This routine only supports /// built-in operations; ActOnBinOp handles overloaded operators. ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr) { if (getLangOpts().CPlusPlus11 && isa(RHSExpr)) { // The syntax only allows initializer lists on the RHS of assignment, // so we don't need to worry about accepting invalid code for // non-assignment operators. // C++11 5.17p9: // The meaning of x = {v} [...] is that of x = T(v) [...]. The meaning // of x = {} is x = T(). InitializationKind Kind = InitializationKind::CreateDirectList( RHSExpr->getBeginLoc(), RHSExpr->getBeginLoc(), RHSExpr->getEndLoc()); InitializedEntity Entity = InitializedEntity::InitializeTemporary(LHSExpr->getType()); InitializationSequence InitSeq(*this, Entity, Kind, RHSExpr); ExprResult Init = InitSeq.Perform(*this, Entity, Kind, RHSExpr); if (Init.isInvalid()) return Init; RHSExpr = Init.get(); } ExprResult LHS = LHSExpr, RHS = RHSExpr; QualType ResultTy; // Result type of the binary operator. // The following two variables are used for compound assignment operators QualType CompLHSTy; // Type of LHS after promotions for computation QualType CompResultTy; // Type of computation result ExprValueKind VK = VK_RValue; ExprObjectKind OK = OK_Ordinary; bool ConvertHalfVec = false; std::tie(LHS, RHS) = CorrectDelayedTyposInBinOp(*this, Opc, LHSExpr, RHSExpr); if (!LHS.isUsable() || !RHS.isUsable()) return ExprError(); if (getLangOpts().OpenCL) { QualType LHSTy = LHSExpr->getType(); QualType RHSTy = RHSExpr->getType(); // OpenCLC v2.0 s6.13.11.1 allows atomic variables to be initialized by // the ATOMIC_VAR_INIT macro. if (LHSTy->isAtomicType() || RHSTy->isAtomicType()) { SourceRange SR(LHSExpr->getBeginLoc(), RHSExpr->getEndLoc()); if (BO_Assign == Opc) Diag(OpLoc, diag::err_opencl_atomic_init) << 0 << SR; else ResultTy = InvalidOperands(OpLoc, LHS, RHS); return ExprError(); } // OpenCL special types - image, sampler, pipe, and blocks are to be used // only with a builtin functions and therefore should be disallowed here. if (LHSTy->isImageType() || RHSTy->isImageType() || LHSTy->isSamplerT() || RHSTy->isSamplerT() || LHSTy->isPipeType() || RHSTy->isPipeType() || LHSTy->isBlockPointerType() || RHSTy->isBlockPointerType()) { ResultTy = InvalidOperands(OpLoc, LHS, RHS); return ExprError(); } } switch (Opc) { case BO_Assign: ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, QualType()); if (getLangOpts().CPlusPlus && LHS.get()->getObjectKind() != OK_ObjCProperty) { VK = LHS.get()->getValueKind(); OK = LHS.get()->getObjectKind(); } if (!ResultTy.isNull()) { DiagnoseSelfAssignment(*this, LHS.get(), RHS.get(), OpLoc, true); DiagnoseSelfMove(LHS.get(), RHS.get(), OpLoc); // Avoid copying a block to the heap if the block is assigned to a local // auto variable that is declared in the same scope as the block. This // optimization is unsafe if the local variable is declared in an outer // scope. For example: // // BlockTy b; // { // b = ^{...}; // } // // It is unsafe to invoke the block here if it wasn't copied to the // // heap. // b(); if (auto *BE = dyn_cast(RHS.get()->IgnoreParens())) if (auto *DRE = dyn_cast(LHS.get()->IgnoreParens())) if (auto *VD = dyn_cast(DRE->getDecl())) if (VD->hasLocalStorage() && getCurScope()->isDeclScope(VD)) BE->getBlockDecl()->setCanAvoidCopyToHeap(); if (LHS.get()->getType().hasNonTrivialToPrimitiveCopyCUnion()) checkNonTrivialCUnion(LHS.get()->getType(), LHS.get()->getExprLoc(), NTCUC_Assignment, NTCUK_Copy); } RecordModifiableNonNullParam(*this, LHS.get()); break; case BO_PtrMemD: case BO_PtrMemI: ResultTy = CheckPointerToMemberOperands(LHS, RHS, VK, OpLoc, Opc == BO_PtrMemI); break; case BO_Mul: case BO_Div: ConvertHalfVec = true; ResultTy = CheckMultiplyDivideOperands(LHS, RHS, OpLoc, false, Opc == BO_Div); break; case BO_Rem: ResultTy = CheckRemainderOperands(LHS, RHS, OpLoc); break; case BO_Add: ConvertHalfVec = true; ResultTy = CheckAdditionOperands(LHS, RHS, OpLoc, Opc); break; case BO_Sub: ConvertHalfVec = true; ResultTy = CheckSubtractionOperands(LHS, RHS, OpLoc); break; case BO_Shl: case BO_Shr: ResultTy = CheckShiftOperands(LHS, RHS, OpLoc, Opc); break; case BO_LE: case BO_LT: case BO_GE: case BO_GT: ConvertHalfVec = true; ResultTy = CheckCompareOperands(LHS, RHS, OpLoc, Opc); break; case BO_EQ: case BO_NE: ConvertHalfVec = true; ResultTy = CheckCompareOperands(LHS, RHS, OpLoc, Opc); break; case BO_Cmp: ConvertHalfVec = true; ResultTy = CheckCompareOperands(LHS, RHS, OpLoc, Opc); assert(ResultTy.isNull() || ResultTy->getAsCXXRecordDecl()); break; case BO_And: checkObjCPointerIntrospection(*this, LHS, RHS, OpLoc); LLVM_FALLTHROUGH; case BO_Xor: case BO_Or: ResultTy = CheckBitwiseOperands(LHS, RHS, OpLoc, Opc); break; case BO_LAnd: case BO_LOr: ConvertHalfVec = true; ResultTy = CheckLogicalOperands(LHS, RHS, OpLoc, Opc); break; case BO_MulAssign: case BO_DivAssign: ConvertHalfVec = true; CompResultTy = CheckMultiplyDivideOperands(LHS, RHS, OpLoc, true, Opc == BO_DivAssign); CompLHSTy = CompResultTy; if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid()) ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy); break; case BO_RemAssign: CompResultTy = CheckRemainderOperands(LHS, RHS, OpLoc, true); CompLHSTy = CompResultTy; if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid()) ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy); break; case BO_AddAssign: ConvertHalfVec = true; CompResultTy = CheckAdditionOperands(LHS, RHS, OpLoc, Opc, &CompLHSTy); if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid()) ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy); break; case BO_SubAssign: ConvertHalfVec = true; CompResultTy = CheckSubtractionOperands(LHS, RHS, OpLoc, &CompLHSTy); if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid()) ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy); break; case BO_ShlAssign: case BO_ShrAssign: CompResultTy = CheckShiftOperands(LHS, RHS, OpLoc, Opc, true); CompLHSTy = CompResultTy; if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid()) ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy); break; case BO_AndAssign: case BO_OrAssign: // fallthrough DiagnoseSelfAssignment(*this, LHS.get(), RHS.get(), OpLoc, true); LLVM_FALLTHROUGH; case BO_XorAssign: CompResultTy = CheckBitwiseOperands(LHS, RHS, OpLoc, Opc); CompLHSTy = CompResultTy; if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid()) ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy); break; case BO_Comma: ResultTy = CheckCommaOperands(*this, LHS, RHS, OpLoc); if (getLangOpts().CPlusPlus && !RHS.isInvalid()) { VK = RHS.get()->getValueKind(); OK = RHS.get()->getObjectKind(); } break; } if (ResultTy.isNull() || LHS.isInvalid() || RHS.isInvalid()) return ExprError(); // Some of the binary operations require promoting operands of half vector to // float vectors and truncating the result back to half vector. For now, we do // this only when HalfArgsAndReturn is set (that is, when the target is arm or // arm64). assert(isVector(RHS.get()->getType(), Context.HalfTy) == isVector(LHS.get()->getType(), Context.HalfTy) && "both sides are half vectors or neither sides are"); ConvertHalfVec = needsConversionOfHalfVec(ConvertHalfVec, Context, LHS.get(), RHS.get()); // Check for array bounds violations for both sides of the BinaryOperator CheckArrayAccess(LHS.get()); CheckArrayAccess(RHS.get()); if (const ObjCIsaExpr *OISA = dyn_cast(LHS.get()->IgnoreParenCasts())) { NamedDecl *ObjectSetClass = LookupSingleName(TUScope, &Context.Idents.get("object_setClass"), SourceLocation(), LookupOrdinaryName); if (ObjectSetClass && isa(LHS.get())) { SourceLocation RHSLocEnd = getLocForEndOfToken(RHS.get()->getEndLoc()); Diag(LHS.get()->getExprLoc(), diag::warn_objc_isa_assign) << FixItHint::CreateInsertion(LHS.get()->getBeginLoc(), "object_setClass(") << FixItHint::CreateReplacement(SourceRange(OISA->getOpLoc(), OpLoc), ",") << FixItHint::CreateInsertion(RHSLocEnd, ")"); } else Diag(LHS.get()->getExprLoc(), diag::warn_objc_isa_assign); } else if (const ObjCIvarRefExpr *OIRE = dyn_cast(LHS.get()->IgnoreParenCasts())) DiagnoseDirectIsaAccess(*this, OIRE, OpLoc, RHS.get()); // Opc is not a compound assignment if CompResultTy is null. if (CompResultTy.isNull()) { if (ConvertHalfVec) return convertHalfVecBinOp(*this, LHS, RHS, Opc, ResultTy, VK, OK, false, OpLoc, CurFPFeatureOverrides()); return BinaryOperator::Create(Context, LHS.get(), RHS.get(), Opc, ResultTy, VK, OK, OpLoc, CurFPFeatureOverrides()); } // Handle compound assignments. if (getLangOpts().CPlusPlus && LHS.get()->getObjectKind() != OK_ObjCProperty) { VK = VK_LValue; OK = LHS.get()->getObjectKind(); } // The LHS is not converted to the result type for fixed-point compound // assignment as the common type is computed on demand. Reset the CompLHSTy // to the LHS type we would have gotten after unary conversions. if (CompResultTy->isFixedPointType()) CompLHSTy = UsualUnaryConversions(LHS.get()).get()->getType(); if (ConvertHalfVec) return convertHalfVecBinOp(*this, LHS, RHS, Opc, ResultTy, VK, OK, true, OpLoc, CurFPFeatureOverrides()); return CompoundAssignOperator::Create( Context, LHS.get(), RHS.get(), Opc, ResultTy, VK, OK, OpLoc, CurFPFeatureOverrides(), CompLHSTy, CompResultTy); } /// DiagnoseBitwisePrecedence - Emit a warning when bitwise and comparison /// operators are mixed in a way that suggests that the programmer forgot that /// comparison operators have higher precedence. The most typical example of /// such code is "flags & 0x0020 != 0", which is equivalent to "flags & 1". static void DiagnoseBitwisePrecedence(Sema &Self, BinaryOperatorKind Opc, SourceLocation OpLoc, Expr *LHSExpr, Expr *RHSExpr) { BinaryOperator *LHSBO = dyn_cast(LHSExpr); BinaryOperator *RHSBO = dyn_cast(RHSExpr); // Check that one of the sides is a comparison operator and the other isn't. bool isLeftComp = LHSBO && LHSBO->isComparisonOp(); bool isRightComp = RHSBO && RHSBO->isComparisonOp(); if (isLeftComp == isRightComp) return; // Bitwise operations are sometimes used as eager logical ops. // Don't diagnose this. bool isLeftBitwise = LHSBO && LHSBO->isBitwiseOp(); bool isRightBitwise = RHSBO && RHSBO->isBitwiseOp(); if (isLeftBitwise || isRightBitwise) return; SourceRange DiagRange = isLeftComp ? SourceRange(LHSExpr->getBeginLoc(), OpLoc) : SourceRange(OpLoc, RHSExpr->getEndLoc()); StringRef OpStr = isLeftComp ? LHSBO->getOpcodeStr() : RHSBO->getOpcodeStr(); SourceRange ParensRange = isLeftComp ? SourceRange(LHSBO->getRHS()->getBeginLoc(), RHSExpr->getEndLoc()) : SourceRange(LHSExpr->getBeginLoc(), RHSBO->getLHS()->getEndLoc()); Self.Diag(OpLoc, diag::warn_precedence_bitwise_rel) << DiagRange << BinaryOperator::getOpcodeStr(Opc) << OpStr; SuggestParentheses(Self, OpLoc, Self.PDiag(diag::note_precedence_silence) << OpStr, (isLeftComp ? LHSExpr : RHSExpr)->getSourceRange()); SuggestParentheses(Self, OpLoc, Self.PDiag(diag::note_precedence_bitwise_first) << BinaryOperator::getOpcodeStr(Opc), ParensRange); } /// It accepts a '&&' expr that is inside a '||' one. /// Emit a diagnostic together with a fixit hint that wraps the '&&' expression /// in parentheses. static void EmitDiagnosticForLogicalAndInLogicalOr(Sema &Self, SourceLocation OpLoc, BinaryOperator *Bop) { assert(Bop->getOpcode() == BO_LAnd); Self.Diag(Bop->getOperatorLoc(), diag::warn_logical_and_in_logical_or) << Bop->getSourceRange() << OpLoc; SuggestParentheses(Self, Bop->getOperatorLoc(), Self.PDiag(diag::note_precedence_silence) << Bop->getOpcodeStr(), Bop->getSourceRange()); } /// Returns true if the given expression can be evaluated as a constant /// 'true'. static bool EvaluatesAsTrue(Sema &S, Expr *E) { bool Res; return !E->isValueDependent() && E->EvaluateAsBooleanCondition(Res, S.getASTContext()) && Res; } /// Returns true if the given expression can be evaluated as a constant /// 'false'. static bool EvaluatesAsFalse(Sema &S, Expr *E) { bool Res; return !E->isValueDependent() && E->EvaluateAsBooleanCondition(Res, S.getASTContext()) && !Res; } /// Look for '&&' in the left hand of a '||' expr. static void DiagnoseLogicalAndInLogicalOrLHS(Sema &S, SourceLocation OpLoc, Expr *LHSExpr, Expr *RHSExpr) { if (BinaryOperator *Bop = dyn_cast(LHSExpr)) { if (Bop->getOpcode() == BO_LAnd) { // If it's "a && b || 0" don't warn since the precedence doesn't matter. if (EvaluatesAsFalse(S, RHSExpr)) return; // If it's "1 && a || b" don't warn since the precedence doesn't matter. if (!EvaluatesAsTrue(S, Bop->getLHS())) return EmitDiagnosticForLogicalAndInLogicalOr(S, OpLoc, Bop); } else if (Bop->getOpcode() == BO_LOr) { if (BinaryOperator *RBop = dyn_cast(Bop->getRHS())) { // If it's "a || b && 1 || c" we didn't warn earlier for // "a || b && 1", but warn now. if (RBop->getOpcode() == BO_LAnd && EvaluatesAsTrue(S, RBop->getRHS())) return EmitDiagnosticForLogicalAndInLogicalOr(S, OpLoc, RBop); } } } } /// Look for '&&' in the right hand of a '||' expr. static void DiagnoseLogicalAndInLogicalOrRHS(Sema &S, SourceLocation OpLoc, Expr *LHSExpr, Expr *RHSExpr) { if (BinaryOperator *Bop = dyn_cast(RHSExpr)) { if (Bop->getOpcode() == BO_LAnd) { // If it's "0 || a && b" don't warn since the precedence doesn't matter. if (EvaluatesAsFalse(S, LHSExpr)) return; // If it's "a || b && 1" don't warn since the precedence doesn't matter. if (!EvaluatesAsTrue(S, Bop->getRHS())) return EmitDiagnosticForLogicalAndInLogicalOr(S, OpLoc, Bop); } } } /// Look for bitwise op in the left or right hand of a bitwise op with /// lower precedence and emit a diagnostic together with a fixit hint that wraps /// the '&' expression in parentheses. static void DiagnoseBitwiseOpInBitwiseOp(Sema &S, BinaryOperatorKind Opc, SourceLocation OpLoc, Expr *SubExpr) { if (BinaryOperator *Bop = dyn_cast(SubExpr)) { if (Bop->isBitwiseOp() && Bop->getOpcode() < Opc) { S.Diag(Bop->getOperatorLoc(), diag::warn_bitwise_op_in_bitwise_op) << Bop->getOpcodeStr() << BinaryOperator::getOpcodeStr(Opc) << Bop->getSourceRange() << OpLoc; SuggestParentheses(S, Bop->getOperatorLoc(), S.PDiag(diag::note_precedence_silence) << Bop->getOpcodeStr(), Bop->getSourceRange()); } } } static void DiagnoseAdditionInShift(Sema &S, SourceLocation OpLoc, Expr *SubExpr, StringRef Shift) { if (BinaryOperator *Bop = dyn_cast(SubExpr)) { if (Bop->getOpcode() == BO_Add || Bop->getOpcode() == BO_Sub) { StringRef Op = Bop->getOpcodeStr(); S.Diag(Bop->getOperatorLoc(), diag::warn_addition_in_bitshift) << Bop->getSourceRange() << OpLoc << Shift << Op; SuggestParentheses(S, Bop->getOperatorLoc(), S.PDiag(diag::note_precedence_silence) << Op, Bop->getSourceRange()); } } } static void DiagnoseShiftCompare(Sema &S, SourceLocation OpLoc, Expr *LHSExpr, Expr *RHSExpr) { CXXOperatorCallExpr *OCE = dyn_cast(LHSExpr); if (!OCE) return; FunctionDecl *FD = OCE->getDirectCallee(); if (!FD || !FD->isOverloadedOperator()) return; OverloadedOperatorKind Kind = FD->getOverloadedOperator(); if (Kind != OO_LessLess && Kind != OO_GreaterGreater) return; S.Diag(OpLoc, diag::warn_overloaded_shift_in_comparison) << LHSExpr->getSourceRange() << RHSExpr->getSourceRange() << (Kind == OO_LessLess); SuggestParentheses(S, OCE->getOperatorLoc(), S.PDiag(diag::note_precedence_silence) << (Kind == OO_LessLess ? "<<" : ">>"), OCE->getSourceRange()); SuggestParentheses( S, OpLoc, S.PDiag(diag::note_evaluate_comparison_first), SourceRange(OCE->getArg(1)->getBeginLoc(), RHSExpr->getEndLoc())); } /// DiagnoseBinOpPrecedence - Emit warnings for expressions with tricky /// precedence. static void DiagnoseBinOpPrecedence(Sema &Self, BinaryOperatorKind Opc, SourceLocation OpLoc, Expr *LHSExpr, Expr *RHSExpr){ // Diagnose "arg1 'bitwise' arg2 'eq' arg3". if (BinaryOperator::isBitwiseOp(Opc)) DiagnoseBitwisePrecedence(Self, Opc, OpLoc, LHSExpr, RHSExpr); // Diagnose "arg1 & arg2 | arg3" if ((Opc == BO_Or || Opc == BO_Xor) && !OpLoc.isMacroID()/* Don't warn in macros. */) { DiagnoseBitwiseOpInBitwiseOp(Self, Opc, OpLoc, LHSExpr); DiagnoseBitwiseOpInBitwiseOp(Self, Opc, OpLoc, RHSExpr); } // Warn about arg1 || arg2 && arg3, as GCC 4.3+ does. // We don't warn for 'assert(a || b && "bad")' since this is safe. if (Opc == BO_LOr && !OpLoc.isMacroID()/* Don't warn in macros. */) { DiagnoseLogicalAndInLogicalOrLHS(Self, OpLoc, LHSExpr, RHSExpr); DiagnoseLogicalAndInLogicalOrRHS(Self, OpLoc, LHSExpr, RHSExpr); } if ((Opc == BO_Shl && LHSExpr->getType()->isIntegralType(Self.getASTContext())) || Opc == BO_Shr) { StringRef Shift = BinaryOperator::getOpcodeStr(Opc); DiagnoseAdditionInShift(Self, OpLoc, LHSExpr, Shift); DiagnoseAdditionInShift(Self, OpLoc, RHSExpr, Shift); } // Warn on overloaded shift operators and comparisons, such as: // cout << 5 == 4; if (BinaryOperator::isComparisonOp(Opc)) DiagnoseShiftCompare(Self, OpLoc, LHSExpr, RHSExpr); } // Binary Operators. 'Tok' is the token for the operator. ExprResult Sema::ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr) { BinaryOperatorKind Opc = ConvertTokenKindToBinaryOpcode(Kind); assert(LHSExpr && "ActOnBinOp(): missing left expression"); assert(RHSExpr && "ActOnBinOp(): missing right expression"); // Emit warnings for tricky precedence issues, e.g. "bitfield & 0x4 == 0" DiagnoseBinOpPrecedence(*this, Opc, TokLoc, LHSExpr, RHSExpr); return BuildBinOp(S, TokLoc, Opc, LHSExpr, RHSExpr); } /// Build an overloaded binary operator expression in the given scope. static ExprResult BuildOverloadedBinOp(Sema &S, Scope *Sc, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHS, Expr *RHS) { switch (Opc) { case BO_Assign: case BO_DivAssign: case BO_RemAssign: case BO_SubAssign: case BO_AndAssign: case BO_OrAssign: case BO_XorAssign: DiagnoseSelfAssignment(S, LHS, RHS, OpLoc, false); CheckIdentityFieldAssignment(LHS, RHS, OpLoc, S); break; default: break; } // Find all of the overloaded operators visible from this // point. We perform both an operator-name lookup from the local // scope and an argument-dependent lookup based on the types of // the arguments. UnresolvedSet<16> Functions; OverloadedOperatorKind OverOp = BinaryOperator::getOverloadedOperator(Opc); if (Sc && OverOp != OO_None && OverOp != OO_Equal) S.LookupOverloadedOperatorName(OverOp, Sc, LHS->getType(), RHS->getType(), Functions); // In C++20 onwards, we may have a second operator to look up. if (S.getLangOpts().CPlusPlus20) { if (OverloadedOperatorKind ExtraOp = getRewrittenOverloadedOperator(OverOp)) S.LookupOverloadedOperatorName(ExtraOp, Sc, LHS->getType(), RHS->getType(), Functions); } // Build the (potentially-overloaded, potentially-dependent) // binary operation. return S.CreateOverloadedBinOp(OpLoc, Opc, Functions, LHS, RHS); } ExprResult Sema::BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr) { ExprResult LHS, RHS; std::tie(LHS, RHS) = CorrectDelayedTyposInBinOp(*this, Opc, LHSExpr, RHSExpr); if (!LHS.isUsable() || !RHS.isUsable()) return ExprError(); LHSExpr = LHS.get(); RHSExpr = RHS.get(); // We want to end up calling one of checkPseudoObjectAssignment // (if the LHS is a pseudo-object), BuildOverloadedBinOp (if // both expressions are overloadable or either is type-dependent), // or CreateBuiltinBinOp (in any other case). We also want to get // any placeholder types out of the way. // Handle pseudo-objects in the LHS. if (const BuiltinType *pty = LHSExpr->getType()->getAsPlaceholderType()) { // Assignments with a pseudo-object l-value need special analysis. if (pty->getKind() == BuiltinType::PseudoObject && BinaryOperator::isAssignmentOp(Opc)) return checkPseudoObjectAssignment(S, OpLoc, Opc, LHSExpr, RHSExpr); // Don't resolve overloads if the other type is overloadable. if (getLangOpts().CPlusPlus && pty->getKind() == BuiltinType::Overload) { // We can't actually test that if we still have a placeholder, // though. Fortunately, none of the exceptions we see in that // code below are valid when the LHS is an overload set. Note // that an overload set can be dependently-typed, but it never // instantiates to having an overloadable type. ExprResult resolvedRHS = CheckPlaceholderExpr(RHSExpr); if (resolvedRHS.isInvalid()) return ExprError(); RHSExpr = resolvedRHS.get(); if (RHSExpr->isTypeDependent() || RHSExpr->getType()->isOverloadableType()) return BuildOverloadedBinOp(*this, S, OpLoc, Opc, LHSExpr, RHSExpr); } // If we're instantiating "a.x < b" or "A::x < b" and 'x' names a function // template, diagnose the missing 'template' keyword instead of diagnosing // an invalid use of a bound member function. // // Note that "A::x < b" might be valid if 'b' has an overloadable type due // to C++1z [over.over]/1.4, but we already checked for that case above. if (Opc == BO_LT && inTemplateInstantiation() && (pty->getKind() == BuiltinType::BoundMember || pty->getKind() == BuiltinType::Overload)) { auto *OE = dyn_cast(LHSExpr); if (OE && !OE->hasTemplateKeyword() && !OE->hasExplicitTemplateArgs() && std::any_of(OE->decls_begin(), OE->decls_end(), [](NamedDecl *ND) { return isa(ND); })) { Diag(OE->getQualifier() ? OE->getQualifierLoc().getBeginLoc() : OE->getNameLoc(), diag::err_template_kw_missing) << OE->getName().getAsString() << ""; return ExprError(); } } ExprResult LHS = CheckPlaceholderExpr(LHSExpr); if (LHS.isInvalid()) return ExprError(); LHSExpr = LHS.get(); } // Handle pseudo-objects in the RHS. if (const BuiltinType *pty = RHSExpr->getType()->getAsPlaceholderType()) { // An overload in the RHS can potentially be resolved by the type // being assigned to. if (Opc == BO_Assign && pty->getKind() == BuiltinType::Overload) { if (getLangOpts().CPlusPlus && (LHSExpr->isTypeDependent() || RHSExpr->isTypeDependent() || LHSExpr->getType()->isOverloadableType())) return BuildOverloadedBinOp(*this, S, OpLoc, Opc, LHSExpr, RHSExpr); return CreateBuiltinBinOp(OpLoc, Opc, LHSExpr, RHSExpr); } // Don't resolve overloads if the other type is overloadable. if (getLangOpts().CPlusPlus && pty->getKind() == BuiltinType::Overload && LHSExpr->getType()->isOverloadableType()) return BuildOverloadedBinOp(*this, S, OpLoc, Opc, LHSExpr, RHSExpr); ExprResult resolvedRHS = CheckPlaceholderExpr(RHSExpr); if (!resolvedRHS.isUsable()) return ExprError(); RHSExpr = resolvedRHS.get(); } if (getLangOpts().CPlusPlus) { // If either expression is type-dependent, always build an // overloaded op. if (LHSExpr->isTypeDependent() || RHSExpr->isTypeDependent()) return BuildOverloadedBinOp(*this, S, OpLoc, Opc, LHSExpr, RHSExpr); // Otherwise, build an overloaded op if either expression has an // overloadable type. if (LHSExpr->getType()->isOverloadableType() || RHSExpr->getType()->isOverloadableType()) return BuildOverloadedBinOp(*this, S, OpLoc, Opc, LHSExpr, RHSExpr); } // Build a built-in binary operation. return CreateBuiltinBinOp(OpLoc, Opc, LHSExpr, RHSExpr); } static bool isOverflowingIntegerType(ASTContext &Ctx, QualType T) { if (T.isNull() || T->isDependentType()) return false; if (!T->isPromotableIntegerType()) return true; return Ctx.getIntWidth(T) >= Ctx.getIntWidth(Ctx.IntTy); } ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr) { ExprResult Input = InputExpr; ExprValueKind VK = VK_RValue; ExprObjectKind OK = OK_Ordinary; QualType resultType; bool CanOverflow = false; bool ConvertHalfVec = false; if (getLangOpts().OpenCL) { QualType Ty = InputExpr->getType(); // The only legal unary operation for atomics is '&'. if ((Opc != UO_AddrOf && Ty->isAtomicType()) || // OpenCL special types - image, sampler, pipe, and blocks are to be used // only with a builtin functions and therefore should be disallowed here. (Ty->isImageType() || Ty->isSamplerT() || Ty->isPipeType() || Ty->isBlockPointerType())) { return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr) << InputExpr->getType() << Input.get()->getSourceRange()); } } switch (Opc) { case UO_PreInc: case UO_PreDec: case UO_PostInc: case UO_PostDec: resultType = CheckIncrementDecrementOperand(*this, Input.get(), VK, OK, OpLoc, Opc == UO_PreInc || Opc == UO_PostInc, Opc == UO_PreInc || Opc == UO_PreDec); CanOverflow = isOverflowingIntegerType(Context, resultType); break; case UO_AddrOf: resultType = CheckAddressOfOperand(Input, OpLoc); CheckAddressOfNoDeref(InputExpr); RecordModifiableNonNullParam(*this, InputExpr); break; case UO_Deref: { Input = DefaultFunctionArrayLvalueConversion(Input.get()); if (Input.isInvalid()) return ExprError(); resultType = CheckIndirectionOperand(*this, Input.get(), VK, OpLoc); break; } case UO_Plus: case UO_Minus: CanOverflow = Opc == UO_Minus && isOverflowingIntegerType(Context, Input.get()->getType()); Input = UsualUnaryConversions(Input.get()); if (Input.isInvalid()) return ExprError(); // Unary plus and minus require promoting an operand of half vector to a // float vector and truncating the result back to a half vector. For now, we // do this only when HalfArgsAndReturns is set (that is, when the target is // arm or arm64). ConvertHalfVec = needsConversionOfHalfVec(true, Context, Input.get()); // If the operand is a half vector, promote it to a float vector. if (ConvertHalfVec) Input = convertVector(Input.get(), Context.FloatTy, *this); resultType = Input.get()->getType(); if (resultType->isDependentType()) break; if (resultType->isArithmeticType()) // C99 6.5.3.3p1 break; else if (resultType->isVectorType() && // The z vector extensions don't allow + or - with bool vectors. (!Context.getLangOpts().ZVector || resultType->castAs()->getVectorKind() != VectorType::AltiVecBool)) break; else if (getLangOpts().CPlusPlus && // C++ [expr.unary.op]p6 Opc == UO_Plus && resultType->isPointerType()) break; return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr) << resultType << Input.get()->getSourceRange()); case UO_Not: // bitwise complement Input = UsualUnaryConversions(Input.get()); if (Input.isInvalid()) return ExprError(); resultType = Input.get()->getType(); if (resultType->isDependentType()) break; // C99 6.5.3.3p1. We allow complex int and float as a GCC extension. if (resultType->isComplexType() || resultType->isComplexIntegerType()) // C99 does not support '~' for complex conjugation. Diag(OpLoc, diag::ext_integer_complement_complex) << resultType << Input.get()->getSourceRange(); else if (resultType->hasIntegerRepresentation()) break; else if (resultType->isExtVectorType() && Context.getLangOpts().OpenCL) { // OpenCL v1.1 s6.3.f: The bitwise operator not (~) does not operate // on vector float types. QualType T = resultType->castAs()->getElementType(); if (!T->isIntegerType()) return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr) << resultType << Input.get()->getSourceRange()); } else { return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr) << resultType << Input.get()->getSourceRange()); } break; case UO_LNot: // logical negation // Unlike +/-/~, integer promotions aren't done here (C99 6.5.3.3p5). Input = DefaultFunctionArrayLvalueConversion(Input.get()); if (Input.isInvalid()) return ExprError(); resultType = Input.get()->getType(); // Though we still have to promote half FP to float... if (resultType->isHalfType() && !Context.getLangOpts().NativeHalfType) { Input = ImpCastExprToType(Input.get(), Context.FloatTy, CK_FloatingCast).get(); resultType = Context.FloatTy; } if (resultType->isDependentType()) break; if (resultType->isScalarType() && !isScopedEnumerationType(resultType)) { // C99 6.5.3.3p1: ok, fallthrough; if (Context.getLangOpts().CPlusPlus) { // C++03 [expr.unary.op]p8, C++0x [expr.unary.op]p9: // operand contextually converted to bool. Input = ImpCastExprToType(Input.get(), Context.BoolTy, ScalarTypeToBooleanCastKind(resultType)); } else if (Context.getLangOpts().OpenCL && Context.getLangOpts().OpenCLVersion < 120) { // OpenCL v1.1 6.3.h: The logical operator not (!) does not // operate on scalar float types. if (!resultType->isIntegerType() && !resultType->isPointerType()) return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr) << resultType << Input.get()->getSourceRange()); } } else if (resultType->isExtVectorType()) { if (Context.getLangOpts().OpenCL && Context.getLangOpts().OpenCLVersion < 120 && !Context.getLangOpts().OpenCLCPlusPlus) { // OpenCL v1.1 6.3.h: The logical operator not (!) does not // operate on vector float types. QualType T = resultType->castAs()->getElementType(); if (!T->isIntegerType()) return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr) << resultType << Input.get()->getSourceRange()); } // Vector logical not returns the signed variant of the operand type. resultType = GetSignedVectorType(resultType); break; } else if (Context.getLangOpts().CPlusPlus && resultType->isVectorType()) { const VectorType *VTy = resultType->castAs(); if (VTy->getVectorKind() != VectorType::GenericVector) return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr) << resultType << Input.get()->getSourceRange()); // Vector logical not returns the signed variant of the operand type. resultType = GetSignedVectorType(resultType); break; } else { return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr) << resultType << Input.get()->getSourceRange()); } // LNot always has type int. C99 6.5.3.3p5. // In C++, it's bool. C++ 5.3.1p8 resultType = Context.getLogicalOperationType(); break; case UO_Real: case UO_Imag: resultType = CheckRealImagOperand(*this, Input, OpLoc, Opc == UO_Real); // _Real maps ordinary l-values into ordinary l-values. _Imag maps ordinary // complex l-values to ordinary l-values and all other values to r-values. if (Input.isInvalid()) return ExprError(); if (Opc == UO_Real || Input.get()->getType()->isAnyComplexType()) { if (Input.get()->getValueKind() != VK_RValue && Input.get()->getObjectKind() == OK_Ordinary) VK = Input.get()->getValueKind(); } else if (!getLangOpts().CPlusPlus) { // In C, a volatile scalar is read by __imag. In C++, it is not. Input = DefaultLvalueConversion(Input.get()); } break; case UO_Extension: resultType = Input.get()->getType(); VK = Input.get()->getValueKind(); OK = Input.get()->getObjectKind(); break; case UO_Coawait: // It's unnecessary to represent the pass-through operator co_await in the // AST; just return the input expression instead. assert(!Input.get()->getType()->isDependentType() && "the co_await expression must be non-dependant before " "building operator co_await"); return Input; } if (resultType.isNull() || Input.isInvalid()) return ExprError(); // Check for array bounds violations in the operand of the UnaryOperator, // except for the '*' and '&' operators that have to be handled specially // by CheckArrayAccess (as there are special cases like &array[arraysize] // that are explicitly defined as valid by the standard). if (Opc != UO_AddrOf && Opc != UO_Deref) CheckArrayAccess(Input.get()); auto *UO = UnaryOperator::Create(Context, Input.get(), Opc, resultType, VK, OK, OpLoc, CanOverflow, CurFPFeatureOverrides()); if (Opc == UO_Deref && UO->getType()->hasAttr(attr::NoDeref) && !isa(UO->getType().getDesugaredType(Context))) ExprEvalContexts.back().PossibleDerefs.insert(UO); // Convert the result back to a half vector. if (ConvertHalfVec) return convertVector(UO, Context.HalfTy, *this); return UO; } /// Determine whether the given expression is a qualified member /// access expression, of a form that could be turned into a pointer to member /// with the address-of operator. bool Sema::isQualifiedMemberAccess(Expr *E) { if (DeclRefExpr *DRE = dyn_cast(E)) { if (!DRE->getQualifier()) return false; ValueDecl *VD = DRE->getDecl(); if (!VD->isCXXClassMember()) return false; if (isa(VD) || isa(VD)) return true; if (CXXMethodDecl *Method = dyn_cast(VD)) return Method->isInstance(); return false; } if (UnresolvedLookupExpr *ULE = dyn_cast(E)) { if (!ULE->getQualifier()) return false; for (NamedDecl *D : ULE->decls()) { if (CXXMethodDecl *Method = dyn_cast(D)) { if (Method->isInstance()) return true; } else { // Overload set does not contain methods. break; } } return false; } return false; } ExprResult Sema::BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input) { // First things first: handle placeholders so that the // overloaded-operator check considers the right type. if (const BuiltinType *pty = Input->getType()->getAsPlaceholderType()) { // Increment and decrement of pseudo-object references. if (pty->getKind() == BuiltinType::PseudoObject && UnaryOperator::isIncrementDecrementOp(Opc)) return checkPseudoObjectIncDec(S, OpLoc, Opc, Input); // extension is always a builtin operator. if (Opc == UO_Extension) return CreateBuiltinUnaryOp(OpLoc, Opc, Input); // & gets special logic for several kinds of placeholder. // The builtin code knows what to do. if (Opc == UO_AddrOf && (pty->getKind() == BuiltinType::Overload || pty->getKind() == BuiltinType::UnknownAny || pty->getKind() == BuiltinType::BoundMember)) return CreateBuiltinUnaryOp(OpLoc, Opc, Input); // Anything else needs to be handled now. ExprResult Result = CheckPlaceholderExpr(Input); if (Result.isInvalid()) return ExprError(); Input = Result.get(); } if (getLangOpts().CPlusPlus && Input->getType()->isOverloadableType() && UnaryOperator::getOverloadedOperator(Opc) != OO_None && !(Opc == UO_AddrOf && isQualifiedMemberAccess(Input))) { // Find all of the overloaded operators visible from this // point. We perform both an operator-name lookup from the local // scope and an argument-dependent lookup based on the types of // the arguments. UnresolvedSet<16> Functions; OverloadedOperatorKind OverOp = UnaryOperator::getOverloadedOperator(Opc); if (S && OverOp != OO_None) LookupOverloadedOperatorName(OverOp, S, Input->getType(), QualType(), Functions); return CreateOverloadedUnaryOp(OpLoc, Opc, Functions, Input); } return CreateBuiltinUnaryOp(OpLoc, Opc, Input); } // Unary Operators. 'Tok' is the token for the operator. ExprResult Sema::ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input) { return BuildUnaryOp(S, OpLoc, ConvertTokenKindToUnaryOpcode(Op), Input); } /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult Sema::ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl) { TheDecl->markUsed(Context); // Create the AST node. The address of a label always has type 'void*'. return new (Context) AddrLabelExpr(OpLoc, LabLoc, TheDecl, Context.getPointerType(Context.VoidTy)); } void Sema::ActOnStartStmtExpr() { PushExpressionEvaluationContext(ExprEvalContexts.back().Context); } void Sema::ActOnStmtExprError() { // Note that function is also called by TreeTransform when leaving a // StmtExpr scope without rebuilding anything. DiscardCleanupsInEvaluationContext(); PopExpressionEvaluationContext(); } ExprResult Sema::ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc) { return BuildStmtExpr(LPLoc, SubStmt, RPLoc, getTemplateDepth(S)); } ExprResult Sema::BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc, unsigned TemplateDepth) { assert(SubStmt && isa(SubStmt) && "Invalid action invocation!"); CompoundStmt *Compound = cast(SubStmt); if (hasAnyUnrecoverableErrorsInThisFunction()) DiscardCleanupsInEvaluationContext(); assert(!Cleanup.exprNeedsCleanups() && "cleanups within StmtExpr not correctly bound!"); PopExpressionEvaluationContext(); // FIXME: there are a variety of strange constraints to enforce here, for // example, it is not possible to goto into a stmt expression apparently. // More semantic analysis is needed. // If there are sub-stmts in the compound stmt, take the type of the last one // as the type of the stmtexpr. QualType Ty = Context.VoidTy; bool StmtExprMayBindToTemp = false; if (!Compound->body_empty()) { // For GCC compatibility we get the last Stmt excluding trailing NullStmts. if (const auto *LastStmt = dyn_cast(Compound->getStmtExprResult())) { if (const Expr *Value = LastStmt->getExprStmt()) { StmtExprMayBindToTemp = true; Ty = Value->getType(); } } } // FIXME: Check that expression type is complete/non-abstract; statement // expressions are not lvalues. Expr *ResStmtExpr = new (Context) StmtExpr(Compound, Ty, LPLoc, RPLoc, TemplateDepth); if (StmtExprMayBindToTemp) return MaybeBindToTemporary(ResStmtExpr); return ResStmtExpr; } ExprResult Sema::ActOnStmtExprResult(ExprResult ER) { if (ER.isInvalid()) return ExprError(); // Do function/array conversion on the last expression, but not // lvalue-to-rvalue. However, initialize an unqualified type. ER = DefaultFunctionArrayConversion(ER.get()); if (ER.isInvalid()) return ExprError(); Expr *E = ER.get(); if (E->isTypeDependent()) return E; // In ARC, if the final expression ends in a consume, splice // the consume out and bind it later. In the alternate case // (when dealing with a retainable type), the result // initialization will create a produce. In both cases the // result will be +1, and we'll need to balance that out with // a bind. auto *Cast = dyn_cast(E); if (Cast && Cast->getCastKind() == CK_ARCConsumeObject) return Cast->getSubExpr(); // FIXME: Provide a better location for the initialization. return PerformCopyInitialization( InitializedEntity::InitializeStmtExprResult( E->getBeginLoc(), E->getType().getUnqualifiedType()), SourceLocation(), E); } ExprResult Sema::BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef Components, SourceLocation RParenLoc) { QualType ArgTy = TInfo->getType(); bool Dependent = ArgTy->isDependentType(); SourceRange TypeRange = TInfo->getTypeLoc().getLocalSourceRange(); // We must have at least one component that refers to the type, and the first // one is known to be a field designator. Verify that the ArgTy represents // a struct/union/class. if (!Dependent && !ArgTy->isRecordType()) return ExprError(Diag(BuiltinLoc, diag::err_offsetof_record_type) << ArgTy << TypeRange); // Type must be complete per C99 7.17p3 because a declaring a variable // with an incomplete type would be ill-formed. if (!Dependent && RequireCompleteType(BuiltinLoc, ArgTy, diag::err_offsetof_incomplete_type, TypeRange)) return ExprError(); bool DidWarnAboutNonPOD = false; QualType CurrentType = ArgTy; SmallVector Comps; SmallVector Exprs; for (const OffsetOfComponent &OC : Components) { if (OC.isBrackets) { // Offset of an array sub-field. TODO: Should we allow vector elements? if (!CurrentType->isDependentType()) { const ArrayType *AT = Context.getAsArrayType(CurrentType); if(!AT) return ExprError(Diag(OC.LocEnd, diag::err_offsetof_array_type) << CurrentType); CurrentType = AT->getElementType(); } else CurrentType = Context.DependentTy; ExprResult IdxRval = DefaultLvalueConversion(static_cast(OC.U.E)); if (IdxRval.isInvalid()) return ExprError(); Expr *Idx = IdxRval.get(); // The expression must be an integral expression. // FIXME: An integral constant expression? if (!Idx->isTypeDependent() && !Idx->isValueDependent() && !Idx->getType()->isIntegerType()) return ExprError( Diag(Idx->getBeginLoc(), diag::err_typecheck_subscript_not_integer) << Idx->getSourceRange()); // Record this array index. Comps.push_back(OffsetOfNode(OC.LocStart, Exprs.size(), OC.LocEnd)); Exprs.push_back(Idx); continue; } // Offset of a field. if (CurrentType->isDependentType()) { // We have the offset of a field, but we can't look into the dependent // type. Just record the identifier of the field. Comps.push_back(OffsetOfNode(OC.LocStart, OC.U.IdentInfo, OC.LocEnd)); CurrentType = Context.DependentTy; continue; } // We need to have a complete type to look into. if (RequireCompleteType(OC.LocStart, CurrentType, diag::err_offsetof_incomplete_type)) return ExprError(); // Look for the designated field. const RecordType *RC = CurrentType->getAs(); if (!RC) return ExprError(Diag(OC.LocEnd, diag::err_offsetof_record_type) << CurrentType); RecordDecl *RD = RC->getDecl(); // C++ [lib.support.types]p5: // The macro offsetof accepts a restricted set of type arguments in this // International Standard. type shall be a POD structure or a POD union // (clause 9). // C++11 [support.types]p4: // If type is not a standard-layout class (Clause 9), the results are // undefined. if (CXXRecordDecl *CRD = dyn_cast(RD)) { bool IsSafe = LangOpts.CPlusPlus11? CRD->isStandardLayout() : CRD->isPOD(); unsigned DiagID = LangOpts.CPlusPlus11? diag::ext_offsetof_non_standardlayout_type : diag::ext_offsetof_non_pod_type; if (!IsSafe && !DidWarnAboutNonPOD && DiagRuntimeBehavior(BuiltinLoc, nullptr, PDiag(DiagID) << SourceRange(Components[0].LocStart, OC.LocEnd) << CurrentType)) DidWarnAboutNonPOD = true; } // Look for the field. LookupResult R(*this, OC.U.IdentInfo, OC.LocStart, LookupMemberName); LookupQualifiedName(R, RD); FieldDecl *MemberDecl = R.getAsSingle(); IndirectFieldDecl *IndirectMemberDecl = nullptr; if (!MemberDecl) { if ((IndirectMemberDecl = R.getAsSingle())) MemberDecl = IndirectMemberDecl->getAnonField(); } if (!MemberDecl) return ExprError(Diag(BuiltinLoc, diag::err_no_member) << OC.U.IdentInfo << RD << SourceRange(OC.LocStart, OC.LocEnd)); // C99 7.17p3: // (If the specified member is a bit-field, the behavior is undefined.) // // We diagnose this as an error. if (MemberDecl->isBitField()) { Diag(OC.LocEnd, diag::err_offsetof_bitfield) << MemberDecl->getDeclName() << SourceRange(BuiltinLoc, RParenLoc); Diag(MemberDecl->getLocation(), diag::note_bitfield_decl); return ExprError(); } RecordDecl *Parent = MemberDecl->getParent(); if (IndirectMemberDecl) Parent = cast(IndirectMemberDecl->getDeclContext()); // If the member was found in a base class, introduce OffsetOfNodes for // the base class indirections. CXXBasePaths Paths; if (IsDerivedFrom(OC.LocStart, CurrentType, Context.getTypeDeclType(Parent), Paths)) { if (Paths.getDetectedVirtual()) { Diag(OC.LocEnd, diag::err_offsetof_field_of_virtual_base) << MemberDecl->getDeclName() << SourceRange(BuiltinLoc, RParenLoc); return ExprError(); } CXXBasePath &Path = Paths.front(); for (const CXXBasePathElement &B : Path) Comps.push_back(OffsetOfNode(B.Base)); } if (IndirectMemberDecl) { for (auto *FI : IndirectMemberDecl->chain()) { assert(isa(FI)); Comps.push_back(OffsetOfNode(OC.LocStart, cast(FI), OC.LocEnd)); } } else Comps.push_back(OffsetOfNode(OC.LocStart, MemberDecl, OC.LocEnd)); CurrentType = MemberDecl->getType().getNonReferenceType(); } return OffsetOfExpr::Create(Context, Context.getSizeType(), BuiltinLoc, TInfo, Comps, Exprs, RParenLoc); } ExprResult Sema::ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef Components, SourceLocation RParenLoc) { TypeSourceInfo *ArgTInfo; QualType ArgTy = GetTypeFromParser(ParsedArgTy, &ArgTInfo); if (ArgTy.isNull()) return ExprError(); if (!ArgTInfo) ArgTInfo = Context.getTrivialTypeSourceInfo(ArgTy, TypeLoc); return BuildBuiltinOffsetOf(BuiltinLoc, ArgTInfo, Components, RParenLoc); } ExprResult Sema::ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc) { assert((CondExpr && LHSExpr && RHSExpr) && "Missing type argument(s)"); ExprValueKind VK = VK_RValue; ExprObjectKind OK = OK_Ordinary; QualType resType; bool CondIsTrue = false; if (CondExpr->isTypeDependent() || CondExpr->isValueDependent()) { resType = Context.DependentTy; } else { // The conditional expression is required to be a constant expression. llvm::APSInt condEval(32); ExprResult CondICE = VerifyIntegerConstantExpression(CondExpr, &condEval, diag::err_typecheck_choose_expr_requires_constant, false); if (CondICE.isInvalid()) return ExprError(); CondExpr = CondICE.get(); CondIsTrue = condEval.getZExtValue(); // If the condition is > zero, then the AST type is the same as the LHSExpr. Expr *ActiveExpr = CondIsTrue ? LHSExpr : RHSExpr; resType = ActiveExpr->getType(); VK = ActiveExpr->getValueKind(); OK = ActiveExpr->getObjectKind(); } return new (Context) ChooseExpr(BuiltinLoc, CondExpr, LHSExpr, RHSExpr, resType, VK, OK, RPLoc, CondIsTrue); } //===----------------------------------------------------------------------===// // Clang Extensions. //===----------------------------------------------------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is started. void Sema::ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope) { BlockDecl *Block = BlockDecl::Create(Context, CurContext, CaretLoc); if (LangOpts.CPlusPlus) { MangleNumberingContext *MCtx; Decl *ManglingContextDecl; std::tie(MCtx, ManglingContextDecl) = getCurrentMangleNumberContext(Block->getDeclContext()); if (MCtx) { unsigned ManglingNumber = MCtx->getManglingNumber(Block); Block->setBlockMangling(ManglingNumber, ManglingContextDecl); } } PushBlockScope(CurScope, Block); CurContext->addDecl(Block); if (CurScope) PushDeclContext(CurScope, Block); else CurContext = Block; getCurBlock()->HasImplicitReturnType = true; // Enter a new evaluation context to insulate the block from any // cleanups from the enclosing full-expression. PushExpressionEvaluationContext( ExpressionEvaluationContext::PotentiallyEvaluated); } void Sema::ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope) { assert(ParamInfo.getIdentifier() == nullptr && "block-id should have no identifier!"); assert(ParamInfo.getContext() == DeclaratorContext::BlockLiteralContext); BlockScopeInfo *CurBlock = getCurBlock(); TypeSourceInfo *Sig = GetTypeForDeclarator(ParamInfo, CurScope); QualType T = Sig->getType(); // FIXME: We should allow unexpanded parameter packs here, but that would, // in turn, make the block expression contain unexpanded parameter packs. if (DiagnoseUnexpandedParameterPack(CaretLoc, Sig, UPPC_Block)) { // Drop the parameters. FunctionProtoType::ExtProtoInfo EPI; EPI.HasTrailingReturn = false; EPI.TypeQuals.addConst(); T = Context.getFunctionType(Context.DependentTy, None, EPI); Sig = Context.getTrivialTypeSourceInfo(T); } // GetTypeForDeclarator always produces a function type for a block // literal signature. Furthermore, it is always a FunctionProtoType // unless the function was written with a typedef. assert(T->isFunctionType() && "GetTypeForDeclarator made a non-function block signature"); // Look for an explicit signature in that function type. FunctionProtoTypeLoc ExplicitSignature; if ((ExplicitSignature = Sig->getTypeLoc() .getAsAdjusted())) { // Check whether that explicit signature was synthesized by // GetTypeForDeclarator. If so, don't save that as part of the // written signature. if (ExplicitSignature.getLocalRangeBegin() == ExplicitSignature.getLocalRangeEnd()) { // This would be much cheaper if we stored TypeLocs instead of // TypeSourceInfos. TypeLoc Result = ExplicitSignature.getReturnLoc(); unsigned Size = Result.getFullDataSize(); Sig = Context.CreateTypeSourceInfo(Result.getType(), Size); Sig->getTypeLoc().initializeFullCopy(Result, Size); ExplicitSignature = FunctionProtoTypeLoc(); } } CurBlock->TheDecl->setSignatureAsWritten(Sig); CurBlock->FunctionType = T; const FunctionType *Fn = T->getAs(); QualType RetTy = Fn->getReturnType(); bool isVariadic = (isa(Fn) && cast(Fn)->isVariadic()); CurBlock->TheDecl->setIsVariadic(isVariadic); // Context.DependentTy is used as a placeholder for a missing block // return type. TODO: what should we do with declarators like: // ^ * { ... } // If the answer is "apply template argument deduction".... if (RetTy != Context.DependentTy) { CurBlock->ReturnType = RetTy; CurBlock->TheDecl->setBlockMissingReturnType(false); CurBlock->HasImplicitReturnType = false; } // Push block parameters from the declarator if we had them. SmallVector Params; if (ExplicitSignature) { for (unsigned I = 0, E = ExplicitSignature.getNumParams(); I != E; ++I) { ParmVarDecl *Param = ExplicitSignature.getParam(I); if (Param->getIdentifier() == nullptr && !Param->isImplicit() && !Param->isInvalidDecl() && !getLangOpts().CPlusPlus) { // Diagnose this as an extension in C17 and earlier. if (!getLangOpts().C2x) Diag(Param->getLocation(), diag::ext_parameter_name_omitted_c2x); } Params.push_back(Param); } // Fake up parameter variables if we have a typedef, like // ^ fntype { ... } } else if (const FunctionProtoType *Fn = T->getAs()) { for (const auto &I : Fn->param_types()) { ParmVarDecl *Param = BuildParmVarDeclForTypedef( CurBlock->TheDecl, ParamInfo.getBeginLoc(), I); Params.push_back(Param); } } // Set the parameters on the block decl. if (!Params.empty()) { CurBlock->TheDecl->setParams(Params); CheckParmsForFunctionDef(CurBlock->TheDecl->parameters(), /*CheckParameterNames=*/false); } // Finally we can process decl attributes. ProcessDeclAttributes(CurScope, CurBlock->TheDecl, ParamInfo); // Put the parameter variables in scope. for (auto AI : CurBlock->TheDecl->parameters()) { AI->setOwningFunction(CurBlock->TheDecl); // If this has an identifier, add it to the scope stack. if (AI->getIdentifier()) { CheckShadow(CurBlock->TheScope, AI); PushOnScopeChains(AI, CurBlock->TheScope); } } } /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void Sema::ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope) { // Leave the expression-evaluation context. DiscardCleanupsInEvaluationContext(); PopExpressionEvaluationContext(); // Pop off CurBlock, handle nested blocks. PopDeclContext(); PopFunctionScopeInfo(); } /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope) { // If blocks are disabled, emit an error. if (!LangOpts.Blocks) Diag(CaretLoc, diag::err_blocks_disable) << LangOpts.OpenCL; // Leave the expression-evaluation context. if (hasAnyUnrecoverableErrorsInThisFunction()) DiscardCleanupsInEvaluationContext(); assert(!Cleanup.exprNeedsCleanups() && "cleanups within block not correctly bound!"); PopExpressionEvaluationContext(); BlockScopeInfo *BSI = cast(FunctionScopes.back()); BlockDecl *BD = BSI->TheDecl; if (BSI->HasImplicitReturnType) deduceClosureReturnType(*BSI); QualType RetTy = Context.VoidTy; if (!BSI->ReturnType.isNull()) RetTy = BSI->ReturnType; bool NoReturn = BD->hasAttr(); QualType BlockTy; // If the user wrote a function type in some form, try to use that. if (!BSI->FunctionType.isNull()) { const FunctionType *FTy = BSI->FunctionType->castAs(); FunctionType::ExtInfo Ext = FTy->getExtInfo(); if (NoReturn && !Ext.getNoReturn()) Ext = Ext.withNoReturn(true); // Turn protoless block types into nullary block types. if (isa(FTy)) { FunctionProtoType::ExtProtoInfo EPI; EPI.ExtInfo = Ext; BlockTy = Context.getFunctionType(RetTy, None, EPI); // Otherwise, if we don't need to change anything about the function type, // preserve its sugar structure. } else if (FTy->getReturnType() == RetTy && (!NoReturn || FTy->getNoReturnAttr())) { BlockTy = BSI->FunctionType; // Otherwise, make the minimal modifications to the function type. } else { const FunctionProtoType *FPT = cast(FTy); FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); EPI.TypeQuals = Qualifiers(); EPI.ExtInfo = Ext; BlockTy = Context.getFunctionType(RetTy, FPT->getParamTypes(), EPI); } // If we don't have a function type, just build one from nothing. } else { FunctionProtoType::ExtProtoInfo EPI; EPI.ExtInfo = FunctionType::ExtInfo().withNoReturn(NoReturn); BlockTy = Context.getFunctionType(RetTy, None, EPI); } DiagnoseUnusedParameters(BD->parameters()); BlockTy = Context.getBlockPointerType(BlockTy); // If needed, diagnose invalid gotos and switches in the block. if (getCurFunction()->NeedsScopeChecking() && !PP.isCodeCompletionEnabled()) DiagnoseInvalidJumps(cast(Body)); BD->setBody(cast(Body)); if (Body && getCurFunction()->HasPotentialAvailabilityViolations) DiagnoseUnguardedAvailabilityViolations(BD); // Try to apply the named return value optimization. We have to check again // if we can do this, though, because blocks keep return statements around // to deduce an implicit return type. if (getLangOpts().CPlusPlus && RetTy->isRecordType() && !BD->isDependentContext()) computeNRVO(Body, BSI); if (RetTy.hasNonTrivialToPrimitiveDestructCUnion() || RetTy.hasNonTrivialToPrimitiveCopyCUnion()) checkNonTrivialCUnion(RetTy, BD->getCaretLocation(), NTCUC_FunctionReturn, NTCUK_Destruct|NTCUK_Copy); PopDeclContext(); // Pop the block scope now but keep it alive to the end of this function. AnalysisBasedWarnings::Policy WP = AnalysisWarnings.getDefaultPolicy(); PoppedFunctionScopePtr ScopeRAII = PopFunctionScopeInfo(&WP, BD, BlockTy); // Set the captured variables on the block. SmallVector Captures; for (Capture &Cap : BSI->Captures) { if (Cap.isInvalid() || Cap.isThisCapture()) continue; VarDecl *Var = Cap.getVariable(); Expr *CopyExpr = nullptr; if (getLangOpts().CPlusPlus && Cap.isCopyCapture()) { if (const RecordType *Record = Cap.getCaptureType()->getAs()) { // The capture logic needs the destructor, so make sure we mark it. // Usually this is unnecessary because most local variables have // their destructors marked at declaration time, but parameters are // an exception because it's technically only the call site that // actually requires the destructor. if (isa(Var)) FinalizeVarWithDestructor(Var, Record); // Enter a separate potentially-evaluated context while building block // initializers to isolate their cleanups from those of the block // itself. // FIXME: Is this appropriate even when the block itself occurs in an // unevaluated operand? EnterExpressionEvaluationContext EvalContext( *this, ExpressionEvaluationContext::PotentiallyEvaluated); SourceLocation Loc = Cap.getLocation(); ExprResult Result = BuildDeclarationNameExpr( CXXScopeSpec(), DeclarationNameInfo(Var->getDeclName(), Loc), Var); // According to the blocks spec, the capture of a variable from // the stack requires a const copy constructor. This is not true // of the copy/move done to move a __block variable to the heap. if (!Result.isInvalid() && !Result.get()->getType().isConstQualified()) { Result = ImpCastExprToType(Result.get(), Result.get()->getType().withConst(), CK_NoOp, VK_LValue); } if (!Result.isInvalid()) { Result = PerformCopyInitialization( InitializedEntity::InitializeBlock(Var->getLocation(), Cap.getCaptureType(), false), Loc, Result.get()); } // Build a full-expression copy expression if initialization // succeeded and used a non-trivial constructor. Recover from // errors by pretending that the copy isn't necessary. if (!Result.isInvalid() && !cast(Result.get())->getConstructor() ->isTrivial()) { Result = MaybeCreateExprWithCleanups(Result); CopyExpr = Result.get(); } } } BlockDecl::Capture NewCap(Var, Cap.isBlockCapture(), Cap.isNested(), CopyExpr); Captures.push_back(NewCap); } BD->setCaptures(Context, Captures, BSI->CXXThisCaptureIndex != 0); BlockExpr *Result = new (Context) BlockExpr(BD, BlockTy); // If the block isn't obviously global, i.e. it captures anything at // all, then we need to do a few things in the surrounding context: if (Result->getBlockDecl()->hasCaptures()) { // First, this expression has a new cleanup object. ExprCleanupObjects.push_back(Result->getBlockDecl()); Cleanup.setExprNeedsCleanups(true); // It also gets a branch-protected scope if any of the captured // variables needs destruction. for (const auto &CI : Result->getBlockDecl()->captures()) { const VarDecl *var = CI.getVariable(); if (var->getType().isDestructedType() != QualType::DK_none) { setFunctionHasBranchProtectedScope(); break; } } } if (getCurFunction()) getCurFunction()->addBlock(BD); return Result; } ExprResult Sema::ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc) { TypeSourceInfo *TInfo; GetTypeFromParser(Ty, &TInfo); return BuildVAArgExpr(BuiltinLoc, E, TInfo, RPLoc); } ExprResult Sema::BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc) { Expr *OrigExpr = E; bool IsMS = false; // CUDA device code does not support varargs. if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) { if (const FunctionDecl *F = dyn_cast(CurContext)) { CUDAFunctionTarget T = IdentifyCUDATarget(F); if (T == CFT_Global || T == CFT_Device || T == CFT_HostDevice) return ExprError(Diag(E->getBeginLoc(), diag::err_va_arg_in_device)); } } // NVPTX does not support va_arg expression. if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice && Context.getTargetInfo().getTriple().isNVPTX()) targetDiag(E->getBeginLoc(), diag::err_va_arg_in_device); // It might be a __builtin_ms_va_list. (But don't ever mark a va_arg() // as Microsoft ABI on an actual Microsoft platform, where // __builtin_ms_va_list and __builtin_va_list are the same.) if (!E->isTypeDependent() && Context.getTargetInfo().hasBuiltinMSVaList() && Context.getTargetInfo().getBuiltinVaListKind() != TargetInfo::CharPtrBuiltinVaList) { QualType MSVaListType = Context.getBuiltinMSVaListType(); if (Context.hasSameType(MSVaListType, E->getType())) { if (CheckForModifiableLvalue(E, BuiltinLoc, *this)) return ExprError(); IsMS = true; } } // Get the va_list type QualType VaListType = Context.getBuiltinVaListType(); if (!IsMS) { if (VaListType->isArrayType()) { // Deal with implicit array decay; for example, on x86-64, // va_list is an array, but it's supposed to decay to // a pointer for va_arg. VaListType = Context.getArrayDecayedType(VaListType); // Make sure the input expression also decays appropriately. ExprResult Result = UsualUnaryConversions(E); if (Result.isInvalid()) return ExprError(); E = Result.get(); } else if (VaListType->isRecordType() && getLangOpts().CPlusPlus) { // If va_list is a record type and we are compiling in C++ mode, // check the argument using reference binding. InitializedEntity Entity = InitializedEntity::InitializeParameter( Context, Context.getLValueReferenceType(VaListType), false); ExprResult Init = PerformCopyInitialization(Entity, SourceLocation(), E); if (Init.isInvalid()) return ExprError(); E = Init.getAs(); } else { // Otherwise, the va_list argument must be an l-value because // it is modified by va_arg. if (!E->isTypeDependent() && CheckForModifiableLvalue(E, BuiltinLoc, *this)) return ExprError(); } } if (!IsMS && !E->isTypeDependent() && !Context.hasSameType(VaListType, E->getType())) return ExprError( Diag(E->getBeginLoc(), diag::err_first_argument_to_va_arg_not_of_type_va_list) << OrigExpr->getType() << E->getSourceRange()); if (!TInfo->getType()->isDependentType()) { if (RequireCompleteType(TInfo->getTypeLoc().getBeginLoc(), TInfo->getType(), diag::err_second_parameter_to_va_arg_incomplete, TInfo->getTypeLoc())) return ExprError(); if (RequireNonAbstractType(TInfo->getTypeLoc().getBeginLoc(), TInfo->getType(), diag::err_second_parameter_to_va_arg_abstract, TInfo->getTypeLoc())) return ExprError(); if (!TInfo->getType().isPODType(Context)) { Diag(TInfo->getTypeLoc().getBeginLoc(), TInfo->getType()->isObjCLifetimeType() ? diag::warn_second_parameter_to_va_arg_ownership_qualified : diag::warn_second_parameter_to_va_arg_not_pod) << TInfo->getType() << TInfo->getTypeLoc().getSourceRange(); } // Check for va_arg where arguments of the given type will be promoted // (i.e. this va_arg is guaranteed to have undefined behavior). QualType PromoteType; if (TInfo->getType()->isPromotableIntegerType()) { PromoteType = Context.getPromotedIntegerType(TInfo->getType()); if (Context.typesAreCompatible(PromoteType, TInfo->getType())) PromoteType = QualType(); } if (TInfo->getType()->isSpecificBuiltinType(BuiltinType::Float)) PromoteType = Context.DoubleTy; if (!PromoteType.isNull()) DiagRuntimeBehavior(TInfo->getTypeLoc().getBeginLoc(), E, PDiag(diag::warn_second_parameter_to_va_arg_never_compatible) << TInfo->getType() << PromoteType << TInfo->getTypeLoc().getSourceRange()); } QualType T = TInfo->getType().getNonLValueExprType(Context); return new (Context) VAArgExpr(BuiltinLoc, E, TInfo, RPLoc, T, IsMS); } ExprResult Sema::ActOnGNUNullExpr(SourceLocation TokenLoc) { // The type of __null will be int or long, depending on the size of // pointers on the target. QualType Ty; unsigned pw = Context.getTargetInfo().getPointerWidth(0); if (pw == Context.getTargetInfo().getIntWidth()) Ty = Context.IntTy; else if (pw == Context.getTargetInfo().getLongWidth()) Ty = Context.LongTy; else if (pw == Context.getTargetInfo().getLongLongWidth()) Ty = Context.LongLongTy; else { llvm_unreachable("I don't know size of pointer!"); } return new (Context) GNUNullExpr(Ty, TokenLoc); } ExprResult Sema::ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc) { return BuildSourceLocExpr(Kind, BuiltinLoc, RPLoc, CurContext); } ExprResult Sema::BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext) { return new (Context) SourceLocExpr(Context, Kind, BuiltinLoc, RPLoc, ParentContext); } bool Sema::CheckConversionToObjCLiteral(QualType DstType, Expr *&Exp, bool Diagnose) { if (!getLangOpts().ObjC) return false; const ObjCObjectPointerType *PT = DstType->getAs(); if (!PT) return false; const ObjCInterfaceDecl *ID = PT->getInterfaceDecl(); // Ignore any parens, implicit casts (should only be // array-to-pointer decays), and not-so-opaque values. The last is // important for making this trigger for property assignments. Expr *SrcExpr = Exp->IgnoreParenImpCasts(); if (OpaqueValueExpr *OV = dyn_cast(SrcExpr)) if (OV->getSourceExpr()) SrcExpr = OV->getSourceExpr()->IgnoreParenImpCasts(); if (auto *SL = dyn_cast(SrcExpr)) { if (!PT->isObjCIdType() && !(ID && ID->getIdentifier()->isStr("NSString"))) return false; if (!SL->isAscii()) return false; if (Diagnose) { Diag(SL->getBeginLoc(), diag::err_missing_atsign_prefix) << /*string*/0 << FixItHint::CreateInsertion(SL->getBeginLoc(), "@"); Exp = BuildObjCStringLiteral(SL->getBeginLoc(), SL).get(); } return true; } if ((isa(SrcExpr) || isa(SrcExpr) || isa(SrcExpr) || isa(SrcExpr) || isa(SrcExpr)) && !SrcExpr->isNullPointerConstant( getASTContext(), Expr::NPC_NeverValueDependent)) { if (!ID || !ID->getIdentifier()->isStr("NSNumber")) return false; if (Diagnose) { Diag(SrcExpr->getBeginLoc(), diag::err_missing_atsign_prefix) << /*number*/1 << FixItHint::CreateInsertion(SrcExpr->getBeginLoc(), "@"); Expr *NumLit = BuildObjCNumericLiteral(SrcExpr->getBeginLoc(), SrcExpr).get(); if (NumLit) Exp = NumLit; } return true; } return false; } static bool maybeDiagnoseAssignmentToFunction(Sema &S, QualType DstType, const Expr *SrcExpr) { if (!DstType->isFunctionPointerType() || !SrcExpr->getType()->isFunctionType()) return false; auto *DRE = dyn_cast(SrcExpr->IgnoreParenImpCasts()); if (!DRE) return false; auto *FD = dyn_cast(DRE->getDecl()); if (!FD) return false; return !S.checkAddressOfFunctionIsAvailable(FD, /*Complain=*/true, SrcExpr->getBeginLoc()); } bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained) { if (Complained) *Complained = false; // Decode the result (notice that AST's are still created for extensions). bool CheckInferredResultType = false; bool isInvalid = false; unsigned DiagKind = 0; FixItHint Hint; ConversionFixItGenerator ConvHints; bool MayHaveConvFixit = false; bool MayHaveFunctionDiff = false; const ObjCInterfaceDecl *IFace = nullptr; const ObjCProtocolDecl *PDecl = nullptr; switch (ConvTy) { case Compatible: DiagnoseAssignmentEnum(DstType, SrcType, SrcExpr); return false; case PointerToInt: if (getLangOpts().CPlusPlus) { DiagKind = diag::err_typecheck_convert_pointer_int; isInvalid = true; } else { DiagKind = diag::ext_typecheck_convert_pointer_int; } ConvHints.tryToFixConversion(SrcExpr, SrcType, DstType, *this); MayHaveConvFixit = true; break; case IntToPointer: if (getLangOpts().CPlusPlus) { DiagKind = diag::err_typecheck_convert_int_pointer; isInvalid = true; } else { DiagKind = diag::ext_typecheck_convert_int_pointer; } ConvHints.tryToFixConversion(SrcExpr, SrcType, DstType, *this); MayHaveConvFixit = true; break; case IncompatibleFunctionPointer: if (getLangOpts().CPlusPlus) { DiagKind = diag::err_typecheck_convert_incompatible_function_pointer; isInvalid = true; } else { DiagKind = diag::ext_typecheck_convert_incompatible_function_pointer; } ConvHints.tryToFixConversion(SrcExpr, SrcType, DstType, *this); MayHaveConvFixit = true; break; case IncompatiblePointer: if (Action == AA_Passing_CFAudited) { DiagKind = diag::err_arc_typecheck_convert_incompatible_pointer; } else if (getLangOpts().CPlusPlus) { DiagKind = diag::err_typecheck_convert_incompatible_pointer; isInvalid = true; } else { DiagKind = diag::ext_typecheck_convert_incompatible_pointer; } CheckInferredResultType = DstType->isObjCObjectPointerType() && SrcType->isObjCObjectPointerType(); if (Hint.isNull() && !CheckInferredResultType) { ConvHints.tryToFixConversion(SrcExpr, SrcType, DstType, *this); } else if (CheckInferredResultType) { SrcType = SrcType.getUnqualifiedType(); DstType = DstType.getUnqualifiedType(); } MayHaveConvFixit = true; break; case IncompatiblePointerSign: if (getLangOpts().CPlusPlus) { DiagKind = diag::err_typecheck_convert_incompatible_pointer_sign; isInvalid = true; } else { DiagKind = diag::ext_typecheck_convert_incompatible_pointer_sign; } break; case FunctionVoidPointer: if (getLangOpts().CPlusPlus) { DiagKind = diag::err_typecheck_convert_pointer_void_func; isInvalid = true; } else { DiagKind = diag::ext_typecheck_convert_pointer_void_func; } break; case IncompatiblePointerDiscardsQualifiers: { // Perform array-to-pointer decay if necessary. if (SrcType->isArrayType()) SrcType = Context.getArrayDecayedType(SrcType); isInvalid = true; Qualifiers lhq = SrcType->getPointeeType().getQualifiers(); Qualifiers rhq = DstType->getPointeeType().getQualifiers(); if (lhq.getAddressSpace() != rhq.getAddressSpace()) { DiagKind = diag::err_typecheck_incompatible_address_space; break; } else if (lhq.getObjCLifetime() != rhq.getObjCLifetime()) { DiagKind = diag::err_typecheck_incompatible_ownership; break; } llvm_unreachable("unknown error case for discarding qualifiers!"); // fallthrough } case CompatiblePointerDiscardsQualifiers: // If the qualifiers lost were because we were applying the // (deprecated) C++ conversion from a string literal to a char* // (or wchar_t*), then there was no error (C++ 4.2p2). FIXME: // Ideally, this check would be performed in // checkPointerTypesForAssignment. However, that would require a // bit of refactoring (so that the second argument is an // expression, rather than a type), which should be done as part // of a larger effort to fix checkPointerTypesForAssignment for // C++ semantics. if (getLangOpts().CPlusPlus && IsStringLiteralToNonConstPointerConversion(SrcExpr, DstType)) return false; if (getLangOpts().CPlusPlus) { DiagKind = diag::err_typecheck_convert_discards_qualifiers; isInvalid = true; } else { DiagKind = diag::ext_typecheck_convert_discards_qualifiers; } break; case IncompatibleNestedPointerQualifiers: if (getLangOpts().CPlusPlus) { isInvalid = true; DiagKind = diag::err_nested_pointer_qualifier_mismatch; } else { DiagKind = diag::ext_nested_pointer_qualifier_mismatch; } break; case IncompatibleNestedPointerAddressSpaceMismatch: DiagKind = diag::err_typecheck_incompatible_nested_address_space; isInvalid = true; break; case IntToBlockPointer: DiagKind = diag::err_int_to_block_pointer; isInvalid = true; break; case IncompatibleBlockPointer: DiagKind = diag::err_typecheck_convert_incompatible_block_pointer; isInvalid = true; break; case IncompatibleObjCQualifiedId: { if (SrcType->isObjCQualifiedIdType()) { const ObjCObjectPointerType *srcOPT = SrcType->castAs(); for (auto *srcProto : srcOPT->quals()) { PDecl = srcProto; break; } if (const ObjCInterfaceType *IFaceT = DstType->castAs()->getInterfaceType()) IFace = IFaceT->getDecl(); } else if (DstType->isObjCQualifiedIdType()) { const ObjCObjectPointerType *dstOPT = DstType->castAs(); for (auto *dstProto : dstOPT->quals()) { PDecl = dstProto; break; } if (const ObjCInterfaceType *IFaceT = SrcType->castAs()->getInterfaceType()) IFace = IFaceT->getDecl(); } if (getLangOpts().CPlusPlus) { DiagKind = diag::err_incompatible_qualified_id; isInvalid = true; } else { DiagKind = diag::warn_incompatible_qualified_id; } break; } case IncompatibleVectors: if (getLangOpts().CPlusPlus) { DiagKind = diag::err_incompatible_vectors; isInvalid = true; } else { DiagKind = diag::warn_incompatible_vectors; } break; case IncompatibleObjCWeakRef: DiagKind = diag::err_arc_weak_unavailable_assign; isInvalid = true; break; case Incompatible: if (maybeDiagnoseAssignmentToFunction(*this, DstType, SrcExpr)) { if (Complained) *Complained = true; return true; } DiagKind = diag::err_typecheck_convert_incompatible; ConvHints.tryToFixConversion(SrcExpr, SrcType, DstType, *this); MayHaveConvFixit = true; isInvalid = true; MayHaveFunctionDiff = true; break; } QualType FirstType, SecondType; switch (Action) { case AA_Assigning: case AA_Initializing: // The destination type comes first. FirstType = DstType; SecondType = SrcType; break; case AA_Returning: case AA_Passing: case AA_Passing_CFAudited: case AA_Converting: case AA_Sending: case AA_Casting: // The source type comes first. FirstType = SrcType; SecondType = DstType; break; } PartialDiagnostic FDiag = PDiag(DiagKind); if (Action == AA_Passing_CFAudited) FDiag << FirstType << SecondType << AA_Passing << SrcExpr->getSourceRange(); else FDiag << FirstType << SecondType << Action << SrcExpr->getSourceRange(); // If we can fix the conversion, suggest the FixIts. assert(ConvHints.isNull() || Hint.isNull()); if (!ConvHints.isNull()) { for (FixItHint &H : ConvHints.Hints) FDiag << H; } else { FDiag << Hint; } if (MayHaveConvFixit) { FDiag << (unsigned) (ConvHints.Kind); } if (MayHaveFunctionDiff) HandleFunctionTypeMismatch(FDiag, SecondType, FirstType); Diag(Loc, FDiag); if ((DiagKind == diag::warn_incompatible_qualified_id || DiagKind == diag::err_incompatible_qualified_id) && PDecl && IFace && !IFace->hasDefinition()) Diag(IFace->getLocation(), diag::note_incomplete_class_and_qualified_id) << IFace << PDecl; if (SecondType == Context.OverloadTy) NoteAllOverloadCandidates(OverloadExpr::find(SrcExpr).Expression, FirstType, /*TakingAddress=*/true); if (CheckInferredResultType) EmitRelatedResultTypeNote(SrcExpr); if (Action == AA_Returning && ConvTy == IncompatiblePointer) EmitRelatedResultTypeNoteForReturn(DstType); if (Complained) *Complained = true; return isInvalid; } ExprResult Sema::VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result) { class SimpleICEDiagnoser : public VerifyICEDiagnoser { public: void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) override { S.Diag(Loc, diag::err_expr_not_ice) << S.LangOpts.CPlusPlus << SR; } } Diagnoser; return VerifyIntegerConstantExpression(E, Result, Diagnoser); } ExprResult Sema::VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold) { class IDDiagnoser : public VerifyICEDiagnoser { unsigned DiagID; public: IDDiagnoser(unsigned DiagID) : VerifyICEDiagnoser(DiagID == 0), DiagID(DiagID) { } void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) override { S.Diag(Loc, DiagID) << SR; } } Diagnoser(DiagID); return VerifyIntegerConstantExpression(E, Result, Diagnoser, AllowFold); } void Sema::VerifyICEDiagnoser::diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR) { S.Diag(Loc, diag::ext_expr_not_ice) << SR << S.LangOpts.CPlusPlus; } ExprResult Sema::VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold) { SourceLocation DiagLoc = E->getBeginLoc(); if (getLangOpts().CPlusPlus11) { // C++11 [expr.const]p5: // If an expression of literal class type is used in a context where an // integral constant expression is required, then that class type shall // have a single non-explicit conversion function to an integral or // unscoped enumeration type ExprResult Converted; class CXX11ConvertDiagnoser : public ICEConvertDiagnoser { public: CXX11ConvertDiagnoser(bool Silent) : ICEConvertDiagnoser(/*AllowScopedEnumerations*/false, Silent, true) {} SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) override { return S.Diag(Loc, diag::err_ice_not_integral) << T; } SemaDiagnosticBuilder diagnoseIncomplete( Sema &S, SourceLocation Loc, QualType T) override { return S.Diag(Loc, diag::err_ice_incomplete_type) << T; } SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override { return S.Diag(Loc, diag::err_ice_explicit_conversion) << T << ConvTy; } SemaDiagnosticBuilder noteExplicitConv( Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override { return S.Diag(Conv->getLocation(), diag::note_ice_conversion_here) << ConvTy->isEnumeralType() << ConvTy; } SemaDiagnosticBuilder diagnoseAmbiguous( Sema &S, SourceLocation Loc, QualType T) override { return S.Diag(Loc, diag::err_ice_ambiguous_conversion) << T; } SemaDiagnosticBuilder noteAmbiguous( Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override { return S.Diag(Conv->getLocation(), diag::note_ice_conversion_here) << ConvTy->isEnumeralType() << ConvTy; } SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override { llvm_unreachable("conversion functions are permitted"); } } ConvertDiagnoser(Diagnoser.Suppress); Converted = PerformContextualImplicitConversion(DiagLoc, E, ConvertDiagnoser); if (Converted.isInvalid()) return Converted; E = Converted.get(); if (!E->getType()->isIntegralOrUnscopedEnumerationType()) return ExprError(); } else if (!E->getType()->isIntegralOrUnscopedEnumerationType()) { // An ICE must be of integral or unscoped enumeration type. if (!Diagnoser.Suppress) Diagnoser.diagnoseNotICE(*this, DiagLoc, E->getSourceRange()); return ExprError(); } ExprResult RValueExpr = DefaultLvalueConversion(E); if (RValueExpr.isInvalid()) return ExprError(); E = RValueExpr.get(); // Circumvent ICE checking in C++11 to avoid evaluating the expression twice // in the non-ICE case. if (!getLangOpts().CPlusPlus11 && E->isIntegerConstantExpr(Context)) { if (Result) *Result = E->EvaluateKnownConstIntCheckOverflow(Context); if (!isa(E)) E = ConstantExpr::Create(Context, E); return E; } Expr::EvalResult EvalResult; SmallVector Notes; EvalResult.Diag = &Notes; // Try to evaluate the expression, and produce diagnostics explaining why it's // not a constant expression as a side-effect. bool Folded = E->EvaluateAsRValue(EvalResult, Context, /*isConstantContext*/ true) && EvalResult.Val.isInt() && !EvalResult.HasSideEffects; if (!isa(E)) E = ConstantExpr::Create(Context, E, EvalResult.Val); // In C++11, we can rely on diagnostics being produced for any expression // which is not a constant expression. If no diagnostics were produced, then // this is a constant expression. if (Folded && getLangOpts().CPlusPlus11 && Notes.empty()) { if (Result) *Result = EvalResult.Val.getInt(); return E; } // If our only note is the usual "invalid subexpression" note, just point // the caret at its location rather than producing an essentially // redundant note. if (Notes.size() == 1 && Notes[0].second.getDiagID() == diag::note_invalid_subexpr_in_const_expr) { DiagLoc = Notes[0].first; Notes.clear(); } if (!Folded || !AllowFold) { if (!Diagnoser.Suppress) { Diagnoser.diagnoseNotICE(*this, DiagLoc, E->getSourceRange()); for (const PartialDiagnosticAt &Note : Notes) Diag(Note.first, Note.second); } return ExprError(); } Diagnoser.diagnoseFold(*this, DiagLoc, E->getSourceRange()); for (const PartialDiagnosticAt &Note : Notes) Diag(Note.first, Note.second); if (Result) *Result = EvalResult.Val.getInt(); return E; } namespace { // Handle the case where we conclude a expression which we speculatively // considered to be unevaluated is actually evaluated. class TransformToPE : public TreeTransform { typedef TreeTransform BaseTransform; public: TransformToPE(Sema &SemaRef) : BaseTransform(SemaRef) { } // Make sure we redo semantic analysis bool AlwaysRebuild() { return true; } bool ReplacingOriginal() { return true; } // We need to special-case DeclRefExprs referring to FieldDecls which // are not part of a member pointer formation; normal TreeTransforming // doesn't catch this case because of the way we represent them in the AST. // FIXME: This is a bit ugly; is it really the best way to handle this // case? // // Error on DeclRefExprs referring to FieldDecls. ExprResult TransformDeclRefExpr(DeclRefExpr *E) { if (isa(E->getDecl()) && !SemaRef.isUnevaluatedContext()) return SemaRef.Diag(E->getLocation(), diag::err_invalid_non_static_member_use) << E->getDecl() << E->getSourceRange(); return BaseTransform::TransformDeclRefExpr(E); } // Exception: filter out member pointer formation ExprResult TransformUnaryOperator(UnaryOperator *E) { if (E->getOpcode() == UO_AddrOf && E->getType()->isMemberPointerType()) return E; return BaseTransform::TransformUnaryOperator(E); } // The body of a lambda-expression is in a separate expression evaluation // context so never needs to be transformed. // FIXME: Ideally we wouldn't transform the closure type either, and would // just recreate the capture expressions and lambda expression. StmtResult TransformLambdaBody(LambdaExpr *E, Stmt *Body) { return SkipLambdaBody(E, Body); } }; } ExprResult Sema::TransformToPotentiallyEvaluated(Expr *E) { assert(isUnevaluatedContext() && "Should only transform unevaluated expressions"); ExprEvalContexts.back().Context = ExprEvalContexts[ExprEvalContexts.size()-2].Context; if (isUnevaluatedContext()) return E; return TransformToPE(*this).TransformExpr(E); } void Sema::PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl, ExpressionEvaluationContextRecord::ExpressionKind ExprContext) { ExprEvalContexts.emplace_back(NewContext, ExprCleanupObjects.size(), Cleanup, LambdaContextDecl, ExprContext); Cleanup.reset(); if (!MaybeODRUseExprs.empty()) std::swap(MaybeODRUseExprs, ExprEvalContexts.back().SavedMaybeODRUseExprs); } void Sema::PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind ExprContext) { Decl *ClosureContextDecl = ExprEvalContexts.back().ManglingContextDecl; PushExpressionEvaluationContext(NewContext, ClosureContextDecl, ExprContext); } namespace { const DeclRefExpr *CheckPossibleDeref(Sema &S, const Expr *PossibleDeref) { PossibleDeref = PossibleDeref->IgnoreParenImpCasts(); if (const auto *E = dyn_cast(PossibleDeref)) { if (E->getOpcode() == UO_Deref) return CheckPossibleDeref(S, E->getSubExpr()); } else if (const auto *E = dyn_cast(PossibleDeref)) { return CheckPossibleDeref(S, E->getBase()); } else if (const auto *E = dyn_cast(PossibleDeref)) { return CheckPossibleDeref(S, E->getBase()); } else if (const auto E = dyn_cast(PossibleDeref)) { QualType Inner; QualType Ty = E->getType(); if (const auto *Ptr = Ty->getAs()) Inner = Ptr->getPointeeType(); else if (const auto *Arr = S.Context.getAsArrayType(Ty)) Inner = Arr->getElementType(); else return nullptr; if (Inner->hasAttr(attr::NoDeref)) return E; } return nullptr; } } // namespace void Sema::WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec) { for (const Expr *E : Rec.PossibleDerefs) { const DeclRefExpr *DeclRef = CheckPossibleDeref(*this, E); if (DeclRef) { const ValueDecl *Decl = DeclRef->getDecl(); Diag(E->getExprLoc(), diag::warn_dereference_of_noderef_type) << Decl->getName() << E->getSourceRange(); Diag(Decl->getLocation(), diag::note_previous_decl) << Decl->getName(); } else { Diag(E->getExprLoc(), diag::warn_dereference_of_noderef_type_no_decl) << E->getSourceRange(); } } Rec.PossibleDerefs.clear(); } /// Check whether E, which is either a discarded-value expression or an /// unevaluated operand, is a simple-assignment to a volatlie-qualified lvalue, /// and if so, remove it from the list of volatile-qualified assignments that /// we are going to warn are deprecated. void Sema::CheckUnusedVolatileAssignment(Expr *E) { if (!E->getType().isVolatileQualified() || !getLangOpts().CPlusPlus20) return; // Note: ignoring parens here is not justified by the standard rules, but // ignoring parentheses seems like a more reasonable approach, and this only // drives a deprecation warning so doesn't affect conformance. if (auto *BO = dyn_cast(E->IgnoreParenImpCasts())) { if (BO->getOpcode() == BO_Assign) { auto &LHSs = ExprEvalContexts.back().VolatileAssignmentLHSs; LHSs.erase(std::remove(LHSs.begin(), LHSs.end(), BO->getLHS()), LHSs.end()); } } } ExprResult Sema::CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl) { if (!E.isUsable() || !Decl || !Decl->isConsteval() || isConstantEvaluated() || RebuildingImmediateInvocation) return E; /// Opportunistically remove the callee from ReferencesToConsteval if we can. /// It's OK if this fails; we'll also remove this in /// HandleImmediateInvocations, but catching it here allows us to avoid /// walking the AST looking for it in simple cases. if (auto *Call = dyn_cast(E.get()->IgnoreImplicit())) if (auto *DeclRef = dyn_cast(Call->getCallee()->IgnoreImplicit())) ExprEvalContexts.back().ReferenceToConsteval.erase(DeclRef); E = MaybeCreateExprWithCleanups(E); ConstantExpr *Res = ConstantExpr::Create( getASTContext(), E.get(), ConstantExpr::getStorageKind(Decl->getReturnType().getTypePtr(), getASTContext()), /*IsImmediateInvocation*/ true); ExprEvalContexts.back().ImmediateInvocationCandidates.emplace_back(Res, 0); return Res; } static void EvaluateAndDiagnoseImmediateInvocation( Sema &SemaRef, Sema::ImmediateInvocationCandidate Candidate) { llvm::SmallVector Notes; Expr::EvalResult Eval; Eval.Diag = &Notes; ConstantExpr *CE = Candidate.getPointer(); bool Result = CE->EvaluateAsConstantExpr(Eval, Expr::EvaluateForCodeGen, SemaRef.getASTContext(), true); if (!Result || !Notes.empty()) { Expr *InnerExpr = CE->getSubExpr()->IgnoreImplicit(); if (auto *FunctionalCast = dyn_cast(InnerExpr)) InnerExpr = FunctionalCast->getSubExpr(); FunctionDecl *FD = nullptr; if (auto *Call = dyn_cast(InnerExpr)) FD = cast(Call->getCalleeDecl()); else if (auto *Call = dyn_cast(InnerExpr)) FD = Call->getConstructor(); else llvm_unreachable("unhandled decl kind"); assert(FD->isConsteval()); SemaRef.Diag(CE->getBeginLoc(), diag::err_invalid_consteval_call) << FD; for (auto &Note : Notes) SemaRef.Diag(Note.first, Note.second); return; } CE->MoveIntoResult(Eval.Val, SemaRef.getASTContext()); } static void RemoveNestedImmediateInvocation( Sema &SemaRef, Sema::ExpressionEvaluationContextRecord &Rec, SmallVector::reverse_iterator It) { struct ComplexRemove : TreeTransform { using Base = TreeTransform; llvm::SmallPtrSetImpl &DRSet; SmallVector &IISet; SmallVector::reverse_iterator CurrentII; ComplexRemove(Sema &SemaRef, llvm::SmallPtrSetImpl &DR, SmallVector &II, SmallVector::reverse_iterator Current) : Base(SemaRef), DRSet(DR), IISet(II), CurrentII(Current) {} void RemoveImmediateInvocation(ConstantExpr* E) { auto It = std::find_if(CurrentII, IISet.rend(), [E](Sema::ImmediateInvocationCandidate Elem) { return Elem.getPointer() == E; }); assert(It != IISet.rend() && "ConstantExpr marked IsImmediateInvocation should " "be present"); It->setInt(1); // Mark as deleted } ExprResult TransformConstantExpr(ConstantExpr *E) { if (!E->isImmediateInvocation()) return Base::TransformConstantExpr(E); RemoveImmediateInvocation(E); return Base::TransformExpr(E->getSubExpr()); } /// Base::TransfromCXXOperatorCallExpr doesn't traverse the callee so /// we need to remove its DeclRefExpr from the DRSet. ExprResult TransformCXXOperatorCallExpr(CXXOperatorCallExpr *E) { DRSet.erase(cast(E->getCallee()->IgnoreImplicit())); return Base::TransformCXXOperatorCallExpr(E); } /// Base::TransformInitializer skip ConstantExpr so we need to visit them /// here. ExprResult TransformInitializer(Expr *Init, bool NotCopyInit) { if (!Init) return Init; /// ConstantExpr are the first layer of implicit node to be removed so if /// Init isn't a ConstantExpr, no ConstantExpr will be skipped. if (auto *CE = dyn_cast(Init)) if (CE->isImmediateInvocation()) RemoveImmediateInvocation(CE); return Base::TransformInitializer(Init, NotCopyInit); } ExprResult TransformDeclRefExpr(DeclRefExpr *E) { DRSet.erase(E); return E; } bool AlwaysRebuild() { return false; } bool ReplacingOriginal() { return true; } bool AllowSkippingCXXConstructExpr() { bool Res = AllowSkippingFirstCXXConstructExpr; AllowSkippingFirstCXXConstructExpr = true; return Res; } bool AllowSkippingFirstCXXConstructExpr = true; } Transformer(SemaRef, Rec.ReferenceToConsteval, Rec.ImmediateInvocationCandidates, It); /// CXXConstructExpr with a single argument are getting skipped by /// TreeTransform in some situtation because they could be implicit. This /// can only occur for the top-level CXXConstructExpr because it is used /// nowhere in the expression being transformed therefore will not be rebuilt. /// Setting AllowSkippingFirstCXXConstructExpr to false will prevent from /// skipping the first CXXConstructExpr. if (isa(It->getPointer()->IgnoreImplicit())) Transformer.AllowSkippingFirstCXXConstructExpr = false; ExprResult Res = Transformer.TransformExpr(It->getPointer()->getSubExpr()); assert(Res.isUsable()); Res = SemaRef.MaybeCreateExprWithCleanups(Res); It->getPointer()->setSubExpr(Res.get()); } static void HandleImmediateInvocations(Sema &SemaRef, Sema::ExpressionEvaluationContextRecord &Rec) { if ((Rec.ImmediateInvocationCandidates.size() == 0 && Rec.ReferenceToConsteval.size() == 0) || SemaRef.RebuildingImmediateInvocation) return; /// When we have more then 1 ImmediateInvocationCandidates we need to check /// for nested ImmediateInvocationCandidates. when we have only 1 we only /// need to remove ReferenceToConsteval in the immediate invocation. if (Rec.ImmediateInvocationCandidates.size() > 1) { /// Prevent sema calls during the tree transform from adding pointers that /// are already in the sets. llvm::SaveAndRestore DisableIITracking( SemaRef.RebuildingImmediateInvocation, true); /// Prevent diagnostic during tree transfrom as they are duplicates Sema::TentativeAnalysisScope DisableDiag(SemaRef); for (auto It = Rec.ImmediateInvocationCandidates.rbegin(); It != Rec.ImmediateInvocationCandidates.rend(); It++) if (!It->getInt()) RemoveNestedImmediateInvocation(SemaRef, Rec, It); } else if (Rec.ImmediateInvocationCandidates.size() == 1 && Rec.ReferenceToConsteval.size()) { struct SimpleRemove : RecursiveASTVisitor { llvm::SmallPtrSetImpl &DRSet; SimpleRemove(llvm::SmallPtrSetImpl &S) : DRSet(S) {} bool VisitDeclRefExpr(DeclRefExpr *E) { DRSet.erase(E); return DRSet.size(); } } Visitor(Rec.ReferenceToConsteval); Visitor.TraverseStmt( Rec.ImmediateInvocationCandidates.front().getPointer()->getSubExpr()); } for (auto CE : Rec.ImmediateInvocationCandidates) if (!CE.getInt()) EvaluateAndDiagnoseImmediateInvocation(SemaRef, CE); for (auto DR : Rec.ReferenceToConsteval) { auto *FD = cast(DR->getDecl()); SemaRef.Diag(DR->getBeginLoc(), diag::err_invalid_consteval_take_address) << FD; SemaRef.Diag(FD->getLocation(), diag::note_declared_at); } } void Sema::PopExpressionEvaluationContext() { ExpressionEvaluationContextRecord& Rec = ExprEvalContexts.back(); unsigned NumTypos = Rec.NumTypos; if (!Rec.Lambdas.empty()) { using ExpressionKind = ExpressionEvaluationContextRecord::ExpressionKind; if (Rec.ExprContext == ExpressionKind::EK_TemplateArgument || Rec.isUnevaluated() || (Rec.isConstantEvaluated() && !getLangOpts().CPlusPlus17)) { unsigned D; if (Rec.isUnevaluated()) { // C++11 [expr.prim.lambda]p2: // A lambda-expression shall not appear in an unevaluated operand // (Clause 5). D = diag::err_lambda_unevaluated_operand; } else if (Rec.isConstantEvaluated() && !getLangOpts().CPlusPlus17) { // C++1y [expr.const]p2: // A conditional-expression e is a core constant expression unless the // evaluation of e, following the rules of the abstract machine, would // evaluate [...] a lambda-expression. D = diag::err_lambda_in_constant_expression; } else if (Rec.ExprContext == ExpressionKind::EK_TemplateArgument) { // C++17 [expr.prim.lamda]p2: // A lambda-expression shall not appear [...] in a template-argument. D = diag::err_lambda_in_invalid_context; } else llvm_unreachable("Couldn't infer lambda error message."); for (const auto *L : Rec.Lambdas) Diag(L->getBeginLoc(), D); } } WarnOnPendingNoDerefs(Rec); HandleImmediateInvocations(*this, Rec); // Warn on any volatile-qualified simple-assignments that are not discarded- // value expressions nor unevaluated operands (those cases get removed from // this list by CheckUnusedVolatileAssignment). for (auto *BO : Rec.VolatileAssignmentLHSs) Diag(BO->getBeginLoc(), diag::warn_deprecated_simple_assign_volatile) << BO->getType(); // When are coming out of an unevaluated context, clear out any // temporaries that we may have created as part of the evaluation of // the expression in that context: they aren't relevant because they // will never be constructed. if (Rec.isUnevaluated() || Rec.isConstantEvaluated()) { ExprCleanupObjects.erase(ExprCleanupObjects.begin() + Rec.NumCleanupObjects, ExprCleanupObjects.end()); Cleanup = Rec.ParentCleanup; CleanupVarDeclMarking(); std::swap(MaybeODRUseExprs, Rec.SavedMaybeODRUseExprs); // Otherwise, merge the contexts together. } else { Cleanup.mergeFrom(Rec.ParentCleanup); MaybeODRUseExprs.insert(Rec.SavedMaybeODRUseExprs.begin(), Rec.SavedMaybeODRUseExprs.end()); } // Pop the current expression evaluation context off the stack. ExprEvalContexts.pop_back(); // The global expression evaluation context record is never popped. ExprEvalContexts.back().NumTypos += NumTypos; } void Sema::DiscardCleanupsInEvaluationContext() { ExprCleanupObjects.erase( ExprCleanupObjects.begin() + ExprEvalContexts.back().NumCleanupObjects, ExprCleanupObjects.end()); Cleanup.reset(); MaybeODRUseExprs.clear(); } ExprResult Sema::HandleExprEvaluationContextForTypeof(Expr *E) { ExprResult Result = CheckPlaceholderExpr(E); if (Result.isInvalid()) return ExprError(); E = Result.get(); if (!E->getType()->isVariablyModifiedType()) return E; return TransformToPotentiallyEvaluated(E); } /// Are we in a context that is potentially constant evaluated per C++20 /// [expr.const]p12? static bool isPotentiallyConstantEvaluatedContext(Sema &SemaRef) { /// C++2a [expr.const]p12: // An expression or conversion is potentially constant evaluated if it is switch (SemaRef.ExprEvalContexts.back().Context) { case Sema::ExpressionEvaluationContext::ConstantEvaluated: // -- a manifestly constant-evaluated expression, case Sema::ExpressionEvaluationContext::PotentiallyEvaluated: case Sema::ExpressionEvaluationContext::PotentiallyEvaluatedIfUsed: case Sema::ExpressionEvaluationContext::DiscardedStatement: // -- a potentially-evaluated expression, case Sema::ExpressionEvaluationContext::UnevaluatedList: // -- an immediate subexpression of a braced-init-list, // -- [FIXME] an expression of the form & cast-expression that occurs // within a templated entity // -- a subexpression of one of the above that is not a subexpression of // a nested unevaluated operand. return true; case Sema::ExpressionEvaluationContext::Unevaluated: case Sema::ExpressionEvaluationContext::UnevaluatedAbstract: // Expressions in this context are never evaluated. return false; } llvm_unreachable("Invalid context"); } /// Return true if this function has a calling convention that requires mangling /// in the size of the parameter pack. static bool funcHasParameterSizeMangling(Sema &S, FunctionDecl *FD) { // These manglings don't do anything on non-Windows or non-x86 platforms, so // we don't need parameter type sizes. const llvm::Triple &TT = S.Context.getTargetInfo().getTriple(); if (!TT.isOSWindows() || !TT.isX86()) return false; // If this is C++ and this isn't an extern "C" function, parameters do not // need to be complete. In this case, C++ mangling will apply, which doesn't // use the size of the parameters. if (S.getLangOpts().CPlusPlus && !FD->isExternC()) return false; // Stdcall, fastcall, and vectorcall need this special treatment. CallingConv CC = FD->getType()->castAs()->getCallConv(); switch (CC) { case CC_X86StdCall: case CC_X86FastCall: case CC_X86VectorCall: return true; default: break; } return false; } /// Require that all of the parameter types of function be complete. Normally, /// parameter types are only required to be complete when a function is called /// or defined, but to mangle functions with certain calling conventions, the /// mangler needs to know the size of the parameter list. In this situation, /// MSVC doesn't emit an error or instantiate templates. Instead, MSVC mangles /// the function as _foo@0, i.e. zero bytes of parameters, which will usually /// result in a linker error. Clang doesn't implement this behavior, and instead /// attempts to error at compile time. static void CheckCompleteParameterTypesForMangler(Sema &S, FunctionDecl *FD, SourceLocation Loc) { class ParamIncompleteTypeDiagnoser : public Sema::TypeDiagnoser { FunctionDecl *FD; ParmVarDecl *Param; public: ParamIncompleteTypeDiagnoser(FunctionDecl *FD, ParmVarDecl *Param) : FD(FD), Param(Param) {} void diagnose(Sema &S, SourceLocation Loc, QualType T) override { CallingConv CC = FD->getType()->castAs()->getCallConv(); StringRef CCName; switch (CC) { case CC_X86StdCall: CCName = "stdcall"; break; case CC_X86FastCall: CCName = "fastcall"; break; case CC_X86VectorCall: CCName = "vectorcall"; break; default: llvm_unreachable("CC does not need mangling"); } S.Diag(Loc, diag::err_cconv_incomplete_param_type) << Param->getDeclName() << FD->getDeclName() << CCName; } }; for (ParmVarDecl *Param : FD->parameters()) { ParamIncompleteTypeDiagnoser Diagnoser(FD, Param); S.RequireCompleteType(Loc, Param->getType(), Diagnoser); } } namespace { enum class OdrUseContext { /// Declarations in this context are not odr-used. None, /// Declarations in this context are formally odr-used, but this is a /// dependent context. Dependent, /// Declarations in this context are odr-used but not actually used (yet). FormallyOdrUsed, /// Declarations in this context are used. Used }; } /// Are we within a context in which references to resolved functions or to /// variables result in odr-use? static OdrUseContext isOdrUseContext(Sema &SemaRef) { OdrUseContext Result; switch (SemaRef.ExprEvalContexts.back().Context) { case Sema::ExpressionEvaluationContext::Unevaluated: case Sema::ExpressionEvaluationContext::UnevaluatedList: case Sema::ExpressionEvaluationContext::UnevaluatedAbstract: return OdrUseContext::None; case Sema::ExpressionEvaluationContext::ConstantEvaluated: case Sema::ExpressionEvaluationContext::PotentiallyEvaluated: Result = OdrUseContext::Used; break; case Sema::ExpressionEvaluationContext::DiscardedStatement: Result = OdrUseContext::FormallyOdrUsed; break; case Sema::ExpressionEvaluationContext::PotentiallyEvaluatedIfUsed: // A default argument formally results in odr-use, but doesn't actually // result in a use in any real sense until it itself is used. Result = OdrUseContext::FormallyOdrUsed; break; } if (SemaRef.CurContext->isDependentContext()) return OdrUseContext::Dependent; return Result; } static bool isImplicitlyDefinableConstexprFunction(FunctionDecl *Func) { return Func->isConstexpr() && (Func->isImplicitlyInstantiable() || !Func->isUserProvided()); } /// Mark a function referenced, and check whether it is odr-used /// (C++ [basic.def.odr]p2, C99 6.9p3) void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse) { assert(Func && "No function?"); Func->setReferenced(); // Recursive functions aren't really used until they're used from some other // context. bool IsRecursiveCall = CurContext == Func; // C++11 [basic.def.odr]p3: // A function whose name appears as a potentially-evaluated expression is // odr-used if it is the unique lookup result or the selected member of a // set of overloaded functions [...]. // // We (incorrectly) mark overload resolution as an unevaluated context, so we // can just check that here. OdrUseContext OdrUse = MightBeOdrUse ? isOdrUseContext(*this) : OdrUseContext::None; if (IsRecursiveCall && OdrUse == OdrUseContext::Used) OdrUse = OdrUseContext::FormallyOdrUsed; // Trivial default constructors and destructors are never actually used. // FIXME: What about other special members? if (Func->isTrivial() && !Func->hasAttr() && OdrUse == OdrUseContext::Used) { if (auto *Constructor = dyn_cast(Func)) if (Constructor->isDefaultConstructor()) OdrUse = OdrUseContext::FormallyOdrUsed; if (isa(Func)) OdrUse = OdrUseContext::FormallyOdrUsed; } // C++20 [expr.const]p12: // A function [...] is needed for constant evaluation if it is [...] a // constexpr function that is named by an expression that is potentially // constant evaluated bool NeededForConstantEvaluation = isPotentiallyConstantEvaluatedContext(*this) && isImplicitlyDefinableConstexprFunction(Func); // Determine whether we require a function definition to exist, per // C++11 [temp.inst]p3: // Unless a function template specialization has been explicitly // instantiated or explicitly specialized, the function template // specialization is implicitly instantiated when the specialization is // referenced in a context that requires a function definition to exist. // C++20 [temp.inst]p7: // The existence of a definition of a [...] function is considered to // affect the semantics of the program if the [...] function is needed for // constant evaluation by an expression // C++20 [basic.def.odr]p10: // Every program shall contain exactly one definition of every non-inline // function or variable that is odr-used in that program outside of a // discarded statement // C++20 [special]p1: // The implementation will implicitly define [defaulted special members] // if they are odr-used or needed for constant evaluation. // // Note that we skip the implicit instantiation of templates that are only // used in unused default arguments or by recursive calls to themselves. // This is formally non-conforming, but seems reasonable in practice. bool NeedDefinition = !IsRecursiveCall && (OdrUse == OdrUseContext::Used || NeededForConstantEvaluation); // C++14 [temp.expl.spec]p6: // If a template [...] is explicitly specialized then that specialization // shall be declared before the first use of that specialization that would // cause an implicit instantiation to take place, in every translation unit // in which such a use occurs if (NeedDefinition && (Func->getTemplateSpecializationKind() != TSK_Undeclared || Func->getMemberSpecializationInfo())) checkSpecializationVisibility(Loc, Func); if (getLangOpts().CUDA) CheckCUDACall(Loc, Func); if (getLangOpts().SYCLIsDevice) checkSYCLDeviceFunction(Loc, Func); // If we need a definition, try to create one. if (NeedDefinition && !Func->getBody()) { runWithSufficientStackSpace(Loc, [&] { if (CXXConstructorDecl *Constructor = dyn_cast(Func)) { Constructor = cast(Constructor->getFirstDecl()); if (Constructor->isDefaulted() && !Constructor->isDeleted()) { if (Constructor->isDefaultConstructor()) { if (Constructor->isTrivial() && !Constructor->hasAttr()) return; DefineImplicitDefaultConstructor(Loc, Constructor); } else if (Constructor->isCopyConstructor()) { DefineImplicitCopyConstructor(Loc, Constructor); } else if (Constructor->isMoveConstructor()) { DefineImplicitMoveConstructor(Loc, Constructor); } } else if (Constructor->getInheritedConstructor()) { DefineInheritingConstructor(Loc, Constructor); } } else if (CXXDestructorDecl *Destructor = dyn_cast(Func)) { Destructor = cast(Destructor->getFirstDecl()); if (Destructor->isDefaulted() && !Destructor->isDeleted()) { if (Destructor->isTrivial() && !Destructor->hasAttr()) return; DefineImplicitDestructor(Loc, Destructor); } if (Destructor->isVirtual() && getLangOpts().AppleKext) MarkVTableUsed(Loc, Destructor->getParent()); } else if (CXXMethodDecl *MethodDecl = dyn_cast(Func)) { if (MethodDecl->isOverloadedOperator() && MethodDecl->getOverloadedOperator() == OO_Equal) { MethodDecl = cast(MethodDecl->getFirstDecl()); if (MethodDecl->isDefaulted() && !MethodDecl->isDeleted()) { if (MethodDecl->isCopyAssignmentOperator()) DefineImplicitCopyAssignment(Loc, MethodDecl); else if (MethodDecl->isMoveAssignmentOperator()) DefineImplicitMoveAssignment(Loc, MethodDecl); } } else if (isa(MethodDecl) && MethodDecl->getParent()->isLambda()) { CXXConversionDecl *Conversion = cast(MethodDecl->getFirstDecl()); if (Conversion->isLambdaToBlockPointerConversion()) DefineImplicitLambdaToBlockPointerConversion(Loc, Conversion); else DefineImplicitLambdaToFunctionPointerConversion(Loc, Conversion); } else if (MethodDecl->isVirtual() && getLangOpts().AppleKext) MarkVTableUsed(Loc, MethodDecl->getParent()); } if (Func->isDefaulted() && !Func->isDeleted()) { DefaultedComparisonKind DCK = getDefaultedComparisonKind(Func); if (DCK != DefaultedComparisonKind::None) DefineDefaultedComparison(Loc, Func, DCK); } // Implicit instantiation of function templates and member functions of // class templates. if (Func->isImplicitlyInstantiable()) { TemplateSpecializationKind TSK = Func->getTemplateSpecializationKindForInstantiation(); SourceLocation PointOfInstantiation = Func->getPointOfInstantiation(); bool FirstInstantiation = PointOfInstantiation.isInvalid(); if (FirstInstantiation) { PointOfInstantiation = Loc; Func->setTemplateSpecializationKind(TSK, PointOfInstantiation); } else if (TSK != TSK_ImplicitInstantiation) { // Use the point of use as the point of instantiation, instead of the // point of explicit instantiation (which we track as the actual point // of instantiation). This gives better backtraces in diagnostics. PointOfInstantiation = Loc; } if (FirstInstantiation || TSK != TSK_ImplicitInstantiation || Func->isConstexpr()) { if (isa(Func->getDeclContext()) && cast(Func->getDeclContext())->isLocalClass() && CodeSynthesisContexts.size()) PendingLocalImplicitInstantiations.push_back( std::make_pair(Func, PointOfInstantiation)); else if (Func->isConstexpr()) // Do not defer instantiations of constexpr functions, to avoid the // expression evaluator needing to call back into Sema if it sees a // call to such a function. InstantiateFunctionDefinition(PointOfInstantiation, Func); else { Func->setInstantiationIsPending(true); PendingInstantiations.push_back( std::make_pair(Func, PointOfInstantiation)); // Notify the consumer that a function was implicitly instantiated. Consumer.HandleCXXImplicitFunctionInstantiation(Func); } } } else { // Walk redefinitions, as some of them may be instantiable. for (auto i : Func->redecls()) { if (!i->isUsed(false) && i->isImplicitlyInstantiable()) MarkFunctionReferenced(Loc, i, MightBeOdrUse); } } }); } // C++14 [except.spec]p17: // An exception-specification is considered to be needed when: // - the function is odr-used or, if it appears in an unevaluated operand, // would be odr-used if the expression were potentially-evaluated; // // Note, we do this even if MightBeOdrUse is false. That indicates that the // function is a pure virtual function we're calling, and in that case the // function was selected by overload resolution and we need to resolve its // exception specification for a different reason. const FunctionProtoType *FPT = Func->getType()->getAs(); if (FPT && isUnresolvedExceptionSpec(FPT->getExceptionSpecType())) ResolveExceptionSpec(Loc, FPT); // If this is the first "real" use, act on that. if (OdrUse == OdrUseContext::Used && !Func->isUsed(/*CheckUsedAttr=*/false)) { // Keep track of used but undefined functions. if (!Func->isDefined()) { if (mightHaveNonExternalLinkage(Func)) UndefinedButUsed.insert(std::make_pair(Func->getCanonicalDecl(), Loc)); else if (Func->getMostRecentDecl()->isInlined() && !LangOpts.GNUInline && !Func->getMostRecentDecl()->hasAttr()) UndefinedButUsed.insert(std::make_pair(Func->getCanonicalDecl(), Loc)); else if (isExternalWithNoLinkageType(Func)) UndefinedButUsed.insert(std::make_pair(Func->getCanonicalDecl(), Loc)); } // Some x86 Windows calling conventions mangle the size of the parameter // pack into the name. Computing the size of the parameters requires the // parameter types to be complete. Check that now. if (funcHasParameterSizeMangling(*this, Func)) CheckCompleteParameterTypesForMangler(*this, Func, Loc); // In the MS C++ ABI, the compiler emits destructor variants where they are // used. If the destructor is used here but defined elsewhere, mark the // virtual base destructors referenced. If those virtual base destructors // are inline, this will ensure they are defined when emitting the complete // destructor variant. This checking may be redundant if the destructor is // provided later in this TU. if (Context.getTargetInfo().getCXXABI().isMicrosoft()) { if (auto *Dtor = dyn_cast(Func)) { CXXRecordDecl *Parent = Dtor->getParent(); if (Parent->getNumVBases() > 0 && !Dtor->getBody()) CheckCompleteDestructorVariant(Loc, Dtor); } } Func->markUsed(Context); } } /// Directly mark a variable odr-used. Given a choice, prefer to use /// MarkVariableReferenced since it does additional checks and then /// calls MarkVarDeclODRUsed. /// If the variable must be captured: /// - if FunctionScopeIndexToStopAt is null, capture it in the CurContext /// - else capture it in the DeclContext that maps to the /// *FunctionScopeIndexToStopAt on the FunctionScopeInfo stack. static void MarkVarDeclODRUsed(VarDecl *Var, SourceLocation Loc, Sema &SemaRef, const unsigned *const FunctionScopeIndexToStopAt = nullptr) { // Keep track of used but undefined variables. // FIXME: We shouldn't suppress this warning for static data members. if (Var->hasDefinition(SemaRef.Context) == VarDecl::DeclarationOnly && (!Var->isExternallyVisible() || Var->isInline() || SemaRef.isExternalWithNoLinkageType(Var)) && !(Var->isStaticDataMember() && Var->hasInit())) { SourceLocation &old = SemaRef.UndefinedButUsed[Var->getCanonicalDecl()]; if (old.isInvalid()) old = Loc; } QualType CaptureType, DeclRefType; if (SemaRef.LangOpts.OpenMP) SemaRef.tryCaptureOpenMPLambdas(Var); SemaRef.tryCaptureVariable(Var, Loc, Sema::TryCapture_Implicit, /*EllipsisLoc*/ SourceLocation(), /*BuildAndDiagnose*/ true, CaptureType, DeclRefType, FunctionScopeIndexToStopAt); Var->markUsed(SemaRef.Context); } void Sema::MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex) { MarkVarDeclODRUsed(Capture, Loc, *this, &CapturingScopeIndex); } static void diagnoseUncapturableValueReference(Sema &S, SourceLocation loc, ValueDecl *var, DeclContext *DC) { DeclContext *VarDC = var->getDeclContext(); // If the parameter still belongs to the translation unit, then // we're actually just using one parameter in the declaration of // the next. if (isa(var) && isa(VarDC)) return; // For C code, don't diagnose about capture if we're not actually in code // right now; it's impossible to write a non-constant expression outside of // function context, so we'll get other (more useful) diagnostics later. // // For C++, things get a bit more nasty... it would be nice to suppress this // diagnostic for certain cases like using a local variable in an array bound // for a member of a local class, but the correct predicate is not obvious. if (!S.getLangOpts().CPlusPlus && !S.CurContext->isFunctionOrMethod()) return; unsigned ValueKind = isa(var) ? 1 : 0; unsigned ContextKind = 3; // unknown if (isa(VarDC) && cast(VarDC->getParent())->isLambda()) { ContextKind = 2; } else if (isa(VarDC)) { ContextKind = 0; } else if (isa(VarDC)) { ContextKind = 1; } S.Diag(loc, diag::err_reference_to_local_in_enclosing_context) << var << ValueKind << ContextKind << VarDC; S.Diag(var->getLocation(), diag::note_entity_declared_at) << var; // FIXME: Add additional diagnostic info about class etc. which prevents // capture. } static bool isVariableAlreadyCapturedInScopeInfo(CapturingScopeInfo *CSI, VarDecl *Var, bool &SubCapturesAreNested, QualType &CaptureType, QualType &DeclRefType) { // Check whether we've already captured it. if (CSI->CaptureMap.count(Var)) { // If we found a capture, any subcaptures are nested. SubCapturesAreNested = true; // Retrieve the capture type for this variable. CaptureType = CSI->getCapture(Var).getCaptureType(); // Compute the type of an expression that refers to this variable. DeclRefType = CaptureType.getNonReferenceType(); // Similarly to mutable captures in lambda, all the OpenMP captures by copy // are mutable in the sense that user can change their value - they are // private instances of the captured declarations. const Capture &Cap = CSI->getCapture(Var); if (Cap.isCopyCapture() && !(isa(CSI) && cast(CSI)->Mutable) && !(isa(CSI) && cast(CSI)->CapRegionKind == CR_OpenMP)) DeclRefType.addConst(); return true; } return false; } // Only block literals, captured statements, and lambda expressions can // capture; other scopes don't work. static DeclContext *getParentOfCapturingContextOrNull(DeclContext *DC, VarDecl *Var, SourceLocation Loc, const bool Diagnose, Sema &S) { if (isa(DC) || isa(DC) || isLambdaCallOperator(DC)) return getLambdaAwareParentOfDeclContext(DC); else if (Var->hasLocalStorage()) { if (Diagnose) diagnoseUncapturableValueReference(S, Loc, Var, DC); } return nullptr; } // Certain capturing entities (lambdas, blocks etc.) are not allowed to capture // certain types of variables (unnamed, variably modified types etc.) // so check for eligibility. static bool isVariableCapturable(CapturingScopeInfo *CSI, VarDecl *Var, SourceLocation Loc, const bool Diagnose, Sema &S) { bool IsBlock = isa(CSI); bool IsLambda = isa(CSI); // Lambdas are not allowed to capture unnamed variables // (e.g. anonymous unions). // FIXME: The C++11 rule don't actually state this explicitly, but I'm // assuming that's the intent. if (IsLambda && !Var->getDeclName()) { if (Diagnose) { S.Diag(Loc, diag::err_lambda_capture_anonymous_var); S.Diag(Var->getLocation(), diag::note_declared_at); } return false; } // Prohibit variably-modified types in blocks; they're difficult to deal with. if (Var->getType()->isVariablyModifiedType() && IsBlock) { if (Diagnose) { S.Diag(Loc, diag::err_ref_vm_type); S.Diag(Var->getLocation(), diag::note_previous_decl) << Var->getDeclName(); } return false; } // Prohibit structs with flexible array members too. // We cannot capture what is in the tail end of the struct. if (const RecordType *VTTy = Var->getType()->getAs()) { if (VTTy->getDecl()->hasFlexibleArrayMember()) { if (Diagnose) { if (IsBlock) S.Diag(Loc, diag::err_ref_flexarray_type); else S.Diag(Loc, diag::err_lambda_capture_flexarray_type) << Var->getDeclName(); S.Diag(Var->getLocation(), diag::note_previous_decl) << Var->getDeclName(); } return false; } } const bool HasBlocksAttr = Var->hasAttr(); // Lambdas and captured statements are not allowed to capture __block // variables; they don't support the expected semantics. if (HasBlocksAttr && (IsLambda || isa(CSI))) { if (Diagnose) { S.Diag(Loc, diag::err_capture_block_variable) << Var->getDeclName() << !IsLambda; S.Diag(Var->getLocation(), diag::note_previous_decl) << Var->getDeclName(); } return false; } // OpenCL v2.0 s6.12.5: Blocks cannot reference/capture other blocks if (S.getLangOpts().OpenCL && IsBlock && Var->getType()->isBlockPointerType()) { if (Diagnose) S.Diag(Loc, diag::err_opencl_block_ref_block); return false; } return true; } // Returns true if the capture by block was successful. static bool captureInBlock(BlockScopeInfo *BSI, VarDecl *Var, SourceLocation Loc, const bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const bool Nested, Sema &S, bool Invalid) { bool ByRef = false; // Blocks are not allowed to capture arrays, excepting OpenCL. // OpenCL v2.0 s1.12.5 (revision 40): arrays are captured by reference // (decayed to pointers). if (!Invalid && !S.getLangOpts().OpenCL && CaptureType->isArrayType()) { if (BuildAndDiagnose) { S.Diag(Loc, diag::err_ref_array_type); S.Diag(Var->getLocation(), diag::note_previous_decl) << Var->getDeclName(); Invalid = true; } else { return false; } } // Forbid the block-capture of autoreleasing variables. if (!Invalid && CaptureType.getObjCLifetime() == Qualifiers::OCL_Autoreleasing) { if (BuildAndDiagnose) { S.Diag(Loc, diag::err_arc_autoreleasing_capture) << /*block*/ 0; S.Diag(Var->getLocation(), diag::note_previous_decl) << Var->getDeclName(); Invalid = true; } else { return false; } } // Warn about implicitly autoreleasing indirect parameters captured by blocks. if (const auto *PT = CaptureType->getAs()) { QualType PointeeTy = PT->getPointeeType(); if (!Invalid && PointeeTy->getAs() && PointeeTy.getObjCLifetime() == Qualifiers::OCL_Autoreleasing && !S.Context.hasDirectOwnershipQualifier(PointeeTy)) { if (BuildAndDiagnose) { SourceLocation VarLoc = Var->getLocation(); S.Diag(Loc, diag::warn_block_capture_autoreleasing); S.Diag(VarLoc, diag::note_declare_parameter_strong); } } } const bool HasBlocksAttr = Var->hasAttr(); if (HasBlocksAttr || CaptureType->isReferenceType() || (S.getLangOpts().OpenMP && S.isOpenMPCapturedDecl(Var))) { // Block capture by reference does not change the capture or // declaration reference types. ByRef = true; } else { // Block capture by copy introduces 'const'. CaptureType = CaptureType.getNonReferenceType().withConst(); DeclRefType = CaptureType; } // Actually capture the variable. if (BuildAndDiagnose) BSI->addCapture(Var, HasBlocksAttr, ByRef, Nested, Loc, SourceLocation(), CaptureType, Invalid); return !Invalid; } /// Capture the given variable in the captured region. static bool captureInCapturedRegion(CapturedRegionScopeInfo *RSI, VarDecl *Var, SourceLocation Loc, const bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const bool RefersToCapturedVariable, Sema &S, bool Invalid) { // By default, capture variables by reference. bool ByRef = true; // Using an LValue reference type is consistent with Lambdas (see below). if (S.getLangOpts().OpenMP && RSI->CapRegionKind == CR_OpenMP) { if (S.isOpenMPCapturedDecl(Var)) { bool HasConst = DeclRefType.isConstQualified(); DeclRefType = DeclRefType.getUnqualifiedType(); // Don't lose diagnostics about assignments to const. if (HasConst) DeclRefType.addConst(); } // Do not capture firstprivates in tasks. if (S.isOpenMPPrivateDecl(Var, RSI->OpenMPLevel, RSI->OpenMPCaptureLevel) != OMPC_unknown) return true; ByRef = S.isOpenMPCapturedByRef(Var, RSI->OpenMPLevel, RSI->OpenMPCaptureLevel); } if (ByRef) CaptureType = S.Context.getLValueReferenceType(DeclRefType); else CaptureType = DeclRefType; // Actually capture the variable. if (BuildAndDiagnose) RSI->addCapture(Var, /*isBlock*/ false, ByRef, RefersToCapturedVariable, Loc, SourceLocation(), CaptureType, Invalid); return !Invalid; } /// Capture the given variable in the lambda. static bool captureInLambda(LambdaScopeInfo *LSI, VarDecl *Var, SourceLocation Loc, const bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const bool RefersToCapturedVariable, const Sema::TryCaptureKind Kind, SourceLocation EllipsisLoc, const bool IsTopScope, Sema &S, bool Invalid) { // Determine whether we are capturing by reference or by value. bool ByRef = false; if (IsTopScope && Kind != Sema::TryCapture_Implicit) { ByRef = (Kind == Sema::TryCapture_ExplicitByRef); } else { ByRef = (LSI->ImpCaptureStyle == LambdaScopeInfo::ImpCap_LambdaByref); } // Compute the type of the field that will capture this variable. if (ByRef) { // C++11 [expr.prim.lambda]p15: // An entity is captured by reference if it is implicitly or // explicitly captured but not captured by copy. It is // unspecified whether additional unnamed non-static data // members are declared in the closure type for entities // captured by reference. // // FIXME: It is not clear whether we want to build an lvalue reference // to the DeclRefType or to CaptureType.getNonReferenceType(). GCC appears // to do the former, while EDG does the latter. Core issue 1249 will // clarify, but for now we follow GCC because it's a more permissive and // easily defensible position. CaptureType = S.Context.getLValueReferenceType(DeclRefType); } else { // C++11 [expr.prim.lambda]p14: // For each entity captured by copy, an unnamed non-static // data member is declared in the closure type. The // declaration order of these members is unspecified. The type // of such a data member is the type of the corresponding // captured entity if the entity is not a reference to an // object, or the referenced type otherwise. [Note: If the // captured entity is a reference to a function, the // corresponding data member is also a reference to a // function. - end note ] if (const ReferenceType *RefType = CaptureType->getAs()){ if (!RefType->getPointeeType()->isFunctionType()) CaptureType = RefType->getPointeeType(); } // Forbid the lambda copy-capture of autoreleasing variables. if (!Invalid && CaptureType.getObjCLifetime() == Qualifiers::OCL_Autoreleasing) { if (BuildAndDiagnose) { S.Diag(Loc, diag::err_arc_autoreleasing_capture) << /*lambda*/ 1; S.Diag(Var->getLocation(), diag::note_previous_decl) << Var->getDeclName(); Invalid = true; } else { return false; } } // Make sure that by-copy captures are of a complete and non-abstract type. if (!Invalid && BuildAndDiagnose) { if (!CaptureType->isDependentType() && S.RequireCompleteSizedType( Loc, CaptureType, diag::err_capture_of_incomplete_or_sizeless_type, Var->getDeclName())) Invalid = true; else if (S.RequireNonAbstractType(Loc, CaptureType, diag::err_capture_of_abstract_type)) Invalid = true; } } // Compute the type of a reference to this captured variable. if (ByRef) DeclRefType = CaptureType.getNonReferenceType(); else { // C++ [expr.prim.lambda]p5: // The closure type for a lambda-expression has a public inline // function call operator [...]. This function call operator is // declared const (9.3.1) if and only if the lambda-expression's // parameter-declaration-clause is not followed by mutable. DeclRefType = CaptureType.getNonReferenceType(); if (!LSI->Mutable && !CaptureType->isReferenceType()) DeclRefType.addConst(); } // Add the capture. if (BuildAndDiagnose) LSI->addCapture(Var, /*isBlock=*/false, ByRef, RefersToCapturedVariable, Loc, EllipsisLoc, CaptureType, Invalid); return !Invalid; } bool Sema::tryCaptureVariable( VarDecl *Var, SourceLocation ExprLoc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt) { // An init-capture is notionally from the context surrounding its // declaration, but its parent DC is the lambda class. DeclContext *VarDC = Var->getDeclContext(); if (Var->isInitCapture()) VarDC = VarDC->getParent(); DeclContext *DC = CurContext; const unsigned MaxFunctionScopesIndex = FunctionScopeIndexToStopAt ? *FunctionScopeIndexToStopAt : FunctionScopes.size() - 1; // We need to sync up the Declaration Context with the // FunctionScopeIndexToStopAt if (FunctionScopeIndexToStopAt) { unsigned FSIndex = FunctionScopes.size() - 1; while (FSIndex != MaxFunctionScopesIndex) { DC = getLambdaAwareParentOfDeclContext(DC); --FSIndex; } } // If the variable is declared in the current context, there is no need to // capture it. if (VarDC == DC) return true; // Capture global variables if it is required to use private copy of this // variable. bool IsGlobal = !Var->hasLocalStorage(); if (IsGlobal && !(LangOpts.OpenMP && isOpenMPCapturedDecl(Var, /*CheckScopeInfo=*/true, MaxFunctionScopesIndex))) return true; Var = Var->getCanonicalDecl(); // Walk up the stack to determine whether we can capture the variable, // performing the "simple" checks that don't depend on type. We stop when // we've either hit the declared scope of the variable or find an existing // capture of that variable. We start from the innermost capturing-entity // (the DC) and ensure that all intervening capturing-entities // (blocks/lambdas etc.) between the innermost capturer and the variable`s // declcontext can either capture the variable or have already captured // the variable. CaptureType = Var->getType(); DeclRefType = CaptureType.getNonReferenceType(); bool Nested = false; bool Explicit = (Kind != TryCapture_Implicit); unsigned FunctionScopesIndex = MaxFunctionScopesIndex; do { // Only block literals, captured statements, and lambda expressions can // capture; other scopes don't work. DeclContext *ParentDC = getParentOfCapturingContextOrNull(DC, Var, ExprLoc, BuildAndDiagnose, *this); // We need to check for the parent *first* because, if we *have* // private-captured a global variable, we need to recursively capture it in // intermediate blocks, lambdas, etc. if (!ParentDC) { if (IsGlobal) { FunctionScopesIndex = MaxFunctionScopesIndex - 1; break; } return true; } FunctionScopeInfo *FSI = FunctionScopes[FunctionScopesIndex]; CapturingScopeInfo *CSI = cast(FSI); // Check whether we've already captured it. if (isVariableAlreadyCapturedInScopeInfo(CSI, Var, Nested, CaptureType, DeclRefType)) { CSI->getCapture(Var).markUsed(BuildAndDiagnose); break; } // If we are instantiating a generic lambda call operator body, // we do not want to capture new variables. What was captured // during either a lambdas transformation or initial parsing // should be used. if (isGenericLambdaCallOperatorSpecialization(DC)) { if (BuildAndDiagnose) { LambdaScopeInfo *LSI = cast(CSI); if (LSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_None) { Diag(ExprLoc, diag::err_lambda_impcap) << Var->getDeclName(); Diag(Var->getLocation(), diag::note_previous_decl) << Var->getDeclName(); Diag(LSI->Lambda->getBeginLoc(), diag::note_lambda_decl); } else diagnoseUncapturableValueReference(*this, ExprLoc, Var, DC); } return true; } // Try to capture variable-length arrays types. if (Var->getType()->isVariablyModifiedType()) { // We're going to walk down into the type and look for VLA // expressions. QualType QTy = Var->getType(); if (ParmVarDecl *PVD = dyn_cast_or_null(Var)) QTy = PVD->getOriginalType(); captureVariablyModifiedType(Context, QTy, CSI); } if (getLangOpts().OpenMP) { if (auto *RSI = dyn_cast(CSI)) { // OpenMP private variables should not be captured in outer scope, so // just break here. Similarly, global variables that are captured in a // target region should not be captured outside the scope of the region. if (RSI->CapRegionKind == CR_OpenMP) { OpenMPClauseKind IsOpenMPPrivateDecl = isOpenMPPrivateDecl( Var, RSI->OpenMPLevel, RSI->OpenMPCaptureLevel); // If the variable is private (i.e. not captured) and has variably // modified type, we still need to capture the type for correct // codegen in all regions, associated with the construct. Currently, // it is captured in the innermost captured region only. if (IsOpenMPPrivateDecl != OMPC_unknown && Var->getType()->isVariablyModifiedType()) { QualType QTy = Var->getType(); if (ParmVarDecl *PVD = dyn_cast_or_null(Var)) QTy = PVD->getOriginalType(); for (int I = 1, E = getNumberOfConstructScopes(RSI->OpenMPLevel); I < E; ++I) { auto *OuterRSI = cast( FunctionScopes[FunctionScopesIndex - I]); assert(RSI->OpenMPLevel == OuterRSI->OpenMPLevel && "Wrong number of captured regions associated with the " "OpenMP construct."); captureVariablyModifiedType(Context, QTy, OuterRSI); } } bool IsTargetCap = IsOpenMPPrivateDecl != OMPC_private && isOpenMPTargetCapturedDecl(Var, RSI->OpenMPLevel, RSI->OpenMPCaptureLevel); // Do not capture global if it is not privatized in outer regions. bool IsGlobalCap = IsGlobal && isOpenMPGlobalCapturedDecl(Var, RSI->OpenMPLevel, RSI->OpenMPCaptureLevel); // When we detect target captures we are looking from inside the // target region, therefore we need to propagate the capture from the // enclosing region. Therefore, the capture is not initially nested. if (IsTargetCap) adjustOpenMPTargetScopeIndex(FunctionScopesIndex, RSI->OpenMPLevel); if (IsTargetCap || IsOpenMPPrivateDecl == OMPC_private || (IsGlobal && !IsGlobalCap)) { Nested = !IsTargetCap; DeclRefType = DeclRefType.getUnqualifiedType(); CaptureType = Context.getLValueReferenceType(DeclRefType); break; } } } } if (CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_None && !Explicit) { // No capture-default, and this is not an explicit capture // so cannot capture this variable. if (BuildAndDiagnose) { Diag(ExprLoc, diag::err_lambda_impcap) << Var->getDeclName(); Diag(Var->getLocation(), diag::note_previous_decl) << Var->getDeclName(); if (cast(CSI)->Lambda) Diag(cast(CSI)->Lambda->getBeginLoc(), diag::note_lambda_decl); // FIXME: If we error out because an outer lambda can not implicitly // capture a variable that an inner lambda explicitly captures, we // should have the inner lambda do the explicit capture - because // it makes for cleaner diagnostics later. This would purely be done // so that the diagnostic does not misleadingly claim that a variable // can not be captured by a lambda implicitly even though it is captured // explicitly. Suggestion: // - create const bool VariableCaptureWasInitiallyExplicit = Explicit // at the function head // - cache the StartingDeclContext - this must be a lambda // - captureInLambda in the innermost lambda the variable. } return true; } FunctionScopesIndex--; DC = ParentDC; Explicit = false; } while (!VarDC->Equals(DC)); // Walk back down the scope stack, (e.g. from outer lambda to inner lambda) // computing the type of the capture at each step, checking type-specific // requirements, and adding captures if requested. // If the variable had already been captured previously, we start capturing // at the lambda nested within that one. bool Invalid = false; for (unsigned I = ++FunctionScopesIndex, N = MaxFunctionScopesIndex + 1; I != N; ++I) { CapturingScopeInfo *CSI = cast(FunctionScopes[I]); // Certain capturing entities (lambdas, blocks etc.) are not allowed to capture // certain types of variables (unnamed, variably modified types etc.) // so check for eligibility. if (!Invalid) Invalid = !isVariableCapturable(CSI, Var, ExprLoc, BuildAndDiagnose, *this); // After encountering an error, if we're actually supposed to capture, keep // capturing in nested contexts to suppress any follow-on diagnostics. if (Invalid && !BuildAndDiagnose) return true; if (BlockScopeInfo *BSI = dyn_cast(CSI)) { Invalid = !captureInBlock(BSI, Var, ExprLoc, BuildAndDiagnose, CaptureType, DeclRefType, Nested, *this, Invalid); Nested = true; } else if (CapturedRegionScopeInfo *RSI = dyn_cast(CSI)) { Invalid = !captureInCapturedRegion(RSI, Var, ExprLoc, BuildAndDiagnose, CaptureType, DeclRefType, Nested, *this, Invalid); Nested = true; } else { LambdaScopeInfo *LSI = cast(CSI); Invalid = !captureInLambda(LSI, Var, ExprLoc, BuildAndDiagnose, CaptureType, DeclRefType, Nested, Kind, EllipsisLoc, /*IsTopScope*/ I == N - 1, *this, Invalid); Nested = true; } if (Invalid && !BuildAndDiagnose) return true; } return Invalid; } bool Sema::tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc) { QualType CaptureType; QualType DeclRefType; return tryCaptureVariable(Var, Loc, Kind, EllipsisLoc, /*BuildAndDiagnose=*/true, CaptureType, DeclRefType, nullptr); } bool Sema::NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc) { QualType CaptureType; QualType DeclRefType; return !tryCaptureVariable(Var, Loc, TryCapture_Implicit, SourceLocation(), /*BuildAndDiagnose=*/false, CaptureType, DeclRefType, nullptr); } QualType Sema::getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc) { QualType CaptureType; QualType DeclRefType; // Determine whether we can capture this variable. if (tryCaptureVariable(Var, Loc, TryCapture_Implicit, SourceLocation(), /*BuildAndDiagnose=*/false, CaptureType, DeclRefType, nullptr)) return QualType(); return DeclRefType; } namespace { // Helper to copy the template arguments from a DeclRefExpr or MemberExpr. // The produced TemplateArgumentListInfo* points to data stored within this // object, so should only be used in contexts where the pointer will not be // used after the CopiedTemplateArgs object is destroyed. class CopiedTemplateArgs { bool HasArgs; TemplateArgumentListInfo TemplateArgStorage; public: template CopiedTemplateArgs(RefExpr *E) : HasArgs(E->hasExplicitTemplateArgs()) { if (HasArgs) E->copyTemplateArgumentsInto(TemplateArgStorage); } operator TemplateArgumentListInfo*() #ifdef __has_cpp_attribute #if __has_cpp_attribute(clang::lifetimebound) [[clang::lifetimebound]] #endif #endif { return HasArgs ? &TemplateArgStorage : nullptr; } }; } /// Walk the set of potential results of an expression and mark them all as /// non-odr-uses if they satisfy the side-conditions of the NonOdrUseReason. /// /// \return A new expression if we found any potential results, ExprEmpty() if /// not, and ExprError() if we diagnosed an error. static ExprResult rebuildPotentialResultsAsNonOdrUsed(Sema &S, Expr *E, NonOdrUseReason NOUR) { // Per C++11 [basic.def.odr], a variable is odr-used "unless it is // an object that satisfies the requirements for appearing in a // constant expression (5.19) and the lvalue-to-rvalue conversion (4.1) // is immediately applied." This function handles the lvalue-to-rvalue // conversion part. // // If we encounter a node that claims to be an odr-use but shouldn't be, we // transform it into the relevant kind of non-odr-use node and rebuild the // tree of nodes leading to it. // // This is a mini-TreeTransform that only transforms a restricted subset of // nodes (and only certain operands of them). // Rebuild a subexpression. auto Rebuild = [&](Expr *Sub) { return rebuildPotentialResultsAsNonOdrUsed(S, Sub, NOUR); }; // Check whether a potential result satisfies the requirements of NOUR. auto IsPotentialResultOdrUsed = [&](NamedDecl *D) { // Any entity other than a VarDecl is always odr-used whenever it's named // in a potentially-evaluated expression. auto *VD = dyn_cast(D); if (!VD) return true; // C++2a [basic.def.odr]p4: // A variable x whose name appears as a potentially-evalauted expression // e is odr-used by e unless // -- x is a reference that is usable in constant expressions, or // -- x is a variable of non-reference type that is usable in constant // expressions and has no mutable subobjects, and e is an element of // the set of potential results of an expression of // non-volatile-qualified non-class type to which the lvalue-to-rvalue // conversion is applied, or // -- x is a variable of non-reference type, and e is an element of the // set of potential results of a discarded-value expression to which // the lvalue-to-rvalue conversion is not applied // // We check the first bullet and the "potentially-evaluated" condition in // BuildDeclRefExpr. We check the type requirements in the second bullet // in CheckLValueToRValueConversionOperand below. switch (NOUR) { case NOUR_None: case NOUR_Unevaluated: llvm_unreachable("unexpected non-odr-use-reason"); case NOUR_Constant: // Constant references were handled when they were built. if (VD->getType()->isReferenceType()) return true; if (auto *RD = VD->getType()->getAsCXXRecordDecl()) if (RD->hasMutableFields()) return true; if (!VD->isUsableInConstantExpressions(S.Context)) return true; break; case NOUR_Discarded: if (VD->getType()->isReferenceType()) return true; break; } return false; }; // Mark that this expression does not constitute an odr-use. auto MarkNotOdrUsed = [&] { S.MaybeODRUseExprs.remove(E); if (LambdaScopeInfo *LSI = S.getCurLambda()) LSI->markVariableExprAsNonODRUsed(E); }; // C++2a [basic.def.odr]p2: // The set of potential results of an expression e is defined as follows: switch (E->getStmtClass()) { // -- If e is an id-expression, ... case Expr::DeclRefExprClass: { auto *DRE = cast(E); if (DRE->isNonOdrUse() || IsPotentialResultOdrUsed(DRE->getDecl())) break; // Rebuild as a non-odr-use DeclRefExpr. MarkNotOdrUsed(); return DeclRefExpr::Create( S.Context, DRE->getQualifierLoc(), DRE->getTemplateKeywordLoc(), DRE->getDecl(), DRE->refersToEnclosingVariableOrCapture(), DRE->getNameInfo(), DRE->getType(), DRE->getValueKind(), DRE->getFoundDecl(), CopiedTemplateArgs(DRE), NOUR); } case Expr::FunctionParmPackExprClass: { auto *FPPE = cast(E); // If any of the declarations in the pack is odr-used, then the expression // as a whole constitutes an odr-use. for (VarDecl *D : *FPPE) if (IsPotentialResultOdrUsed(D)) return ExprEmpty(); // FIXME: Rebuild as a non-odr-use FunctionParmPackExpr? In practice, // nothing cares about whether we marked this as an odr-use, but it might // be useful for non-compiler tools. MarkNotOdrUsed(); break; } // -- If e is a subscripting operation with an array operand... case Expr::ArraySubscriptExprClass: { auto *ASE = cast(E); Expr *OldBase = ASE->getBase()->IgnoreImplicit(); if (!OldBase->getType()->isArrayType()) break; ExprResult Base = Rebuild(OldBase); if (!Base.isUsable()) return Base; Expr *LHS = ASE->getBase() == ASE->getLHS() ? Base.get() : ASE->getLHS(); Expr *RHS = ASE->getBase() == ASE->getRHS() ? Base.get() : ASE->getRHS(); SourceLocation LBracketLoc = ASE->getBeginLoc(); // FIXME: Not stored. return S.ActOnArraySubscriptExpr(nullptr, LHS, LBracketLoc, RHS, ASE->getRBracketLoc()); } case Expr::MemberExprClass: { auto *ME = cast(E); // -- If e is a class member access expression [...] naming a non-static // data member... if (isa(ME->getMemberDecl())) { ExprResult Base = Rebuild(ME->getBase()); if (!Base.isUsable()) return Base; return MemberExpr::Create( S.Context, Base.get(), ME->isArrow(), ME->getOperatorLoc(), ME->getQualifierLoc(), ME->getTemplateKeywordLoc(), ME->getMemberDecl(), ME->getFoundDecl(), ME->getMemberNameInfo(), CopiedTemplateArgs(ME), ME->getType(), ME->getValueKind(), ME->getObjectKind(), ME->isNonOdrUse()); } if (ME->getMemberDecl()->isCXXInstanceMember()) break; // -- If e is a class member access expression naming a static data member, // ... if (ME->isNonOdrUse() || IsPotentialResultOdrUsed(ME->getMemberDecl())) break; // Rebuild as a non-odr-use MemberExpr. MarkNotOdrUsed(); return MemberExpr::Create( S.Context, ME->getBase(), ME->isArrow(), ME->getOperatorLoc(), ME->getQualifierLoc(), ME->getTemplateKeywordLoc(), ME->getMemberDecl(), ME->getFoundDecl(), ME->getMemberNameInfo(), CopiedTemplateArgs(ME), ME->getType(), ME->getValueKind(), ME->getObjectKind(), NOUR); return ExprEmpty(); } case Expr::BinaryOperatorClass: { auto *BO = cast(E); Expr *LHS = BO->getLHS(); Expr *RHS = BO->getRHS(); // -- If e is a pointer-to-member expression of the form e1 .* e2 ... if (BO->getOpcode() == BO_PtrMemD) { ExprResult Sub = Rebuild(LHS); if (!Sub.isUsable()) return Sub; LHS = Sub.get(); // -- If e is a comma expression, ... } else if (BO->getOpcode() == BO_Comma) { ExprResult Sub = Rebuild(RHS); if (!Sub.isUsable()) return Sub; RHS = Sub.get(); } else { break; } return S.BuildBinOp(nullptr, BO->getOperatorLoc(), BO->getOpcode(), LHS, RHS); } // -- If e has the form (e1)... case Expr::ParenExprClass: { auto *PE = cast(E); ExprResult Sub = Rebuild(PE->getSubExpr()); if (!Sub.isUsable()) return Sub; return S.ActOnParenExpr(PE->getLParen(), PE->getRParen(), Sub.get()); } // -- If e is a glvalue conditional expression, ... // We don't apply this to a binary conditional operator. FIXME: Should we? case Expr::ConditionalOperatorClass: { auto *CO = cast(E); ExprResult LHS = Rebuild(CO->getLHS()); if (LHS.isInvalid()) return ExprError(); ExprResult RHS = Rebuild(CO->getRHS()); if (RHS.isInvalid()) return ExprError(); if (!LHS.isUsable() && !RHS.isUsable()) return ExprEmpty(); if (!LHS.isUsable()) LHS = CO->getLHS(); if (!RHS.isUsable()) RHS = CO->getRHS(); return S.ActOnConditionalOp(CO->getQuestionLoc(), CO->getColonLoc(), CO->getCond(), LHS.get(), RHS.get()); } // [Clang extension] // -- If e has the form __extension__ e1... case Expr::UnaryOperatorClass: { auto *UO = cast(E); if (UO->getOpcode() != UO_Extension) break; ExprResult Sub = Rebuild(UO->getSubExpr()); if (!Sub.isUsable()) return Sub; return S.BuildUnaryOp(nullptr, UO->getOperatorLoc(), UO_Extension, Sub.get()); } // [Clang extension] // -- If e has the form _Generic(...), the set of potential results is the // union of the sets of potential results of the associated expressions. case Expr::GenericSelectionExprClass: { auto *GSE = cast(E); SmallVector AssocExprs; bool AnyChanged = false; for (Expr *OrigAssocExpr : GSE->getAssocExprs()) { ExprResult AssocExpr = Rebuild(OrigAssocExpr); if (AssocExpr.isInvalid()) return ExprError(); if (AssocExpr.isUsable()) { AssocExprs.push_back(AssocExpr.get()); AnyChanged = true; } else { AssocExprs.push_back(OrigAssocExpr); } } return AnyChanged ? S.CreateGenericSelectionExpr( GSE->getGenericLoc(), GSE->getDefaultLoc(), GSE->getRParenLoc(), GSE->getControllingExpr(), GSE->getAssocTypeSourceInfos(), AssocExprs) : ExprEmpty(); } // [Clang extension] // -- If e has the form __builtin_choose_expr(...), the set of potential // results is the union of the sets of potential results of the // second and third subexpressions. case Expr::ChooseExprClass: { auto *CE = cast(E); ExprResult LHS = Rebuild(CE->getLHS()); if (LHS.isInvalid()) return ExprError(); ExprResult RHS = Rebuild(CE->getLHS()); if (RHS.isInvalid()) return ExprError(); if (!LHS.get() && !RHS.get()) return ExprEmpty(); if (!LHS.isUsable()) LHS = CE->getLHS(); if (!RHS.isUsable()) RHS = CE->getRHS(); return S.ActOnChooseExpr(CE->getBuiltinLoc(), CE->getCond(), LHS.get(), RHS.get(), CE->getRParenLoc()); } // Step through non-syntactic nodes. case Expr::ConstantExprClass: { auto *CE = cast(E); ExprResult Sub = Rebuild(CE->getSubExpr()); if (!Sub.isUsable()) return Sub; return ConstantExpr::Create(S.Context, Sub.get()); } // We could mostly rely on the recursive rebuilding to rebuild implicit // casts, but not at the top level, so rebuild them here. case Expr::ImplicitCastExprClass: { auto *ICE = cast(E); // Only step through the narrow set of cast kinds we expect to encounter. // Anything else suggests we've left the region in which potential results // can be found. switch (ICE->getCastKind()) { case CK_NoOp: case CK_DerivedToBase: case CK_UncheckedDerivedToBase: { ExprResult Sub = Rebuild(ICE->getSubExpr()); if (!Sub.isUsable()) return Sub; CXXCastPath Path(ICE->path()); return S.ImpCastExprToType(Sub.get(), ICE->getType(), ICE->getCastKind(), ICE->getValueKind(), &Path); } default: break; } break; } default: break; } // Can't traverse through this node. Nothing to do. return ExprEmpty(); } ExprResult Sema::CheckLValueToRValueConversionOperand(Expr *E) { // Check whether the operand is or contains an object of non-trivial C union // type. if (E->getType().isVolatileQualified() && (E->getType().hasNonTrivialToPrimitiveDestructCUnion() || E->getType().hasNonTrivialToPrimitiveCopyCUnion())) checkNonTrivialCUnion(E->getType(), E->getExprLoc(), Sema::NTCUC_LValueToRValueVolatile, NTCUK_Destruct|NTCUK_Copy); // C++2a [basic.def.odr]p4: // [...] an expression of non-volatile-qualified non-class type to which // the lvalue-to-rvalue conversion is applied [...] if (E->getType().isVolatileQualified() || E->getType()->getAs()) return E; ExprResult Result = rebuildPotentialResultsAsNonOdrUsed(*this, E, NOUR_Constant); if (Result.isInvalid()) return ExprError(); return Result.get() ? Result : E; } ExprResult Sema::ActOnConstantExpression(ExprResult Res) { Res = CorrectDelayedTyposInExpr(Res); if (!Res.isUsable()) return Res; // If a constant-expression is a reference to a variable where we delay // deciding whether it is an odr-use, just assume we will apply the // lvalue-to-rvalue conversion. In the one case where this doesn't happen // (a non-type template argument), we have special handling anyway. return CheckLValueToRValueConversionOperand(Res.get()); } void Sema::CleanupVarDeclMarking() { // Iterate through a local copy in case MarkVarDeclODRUsed makes a recursive // call. MaybeODRUseExprSet LocalMaybeODRUseExprs; std::swap(LocalMaybeODRUseExprs, MaybeODRUseExprs); for (Expr *E : LocalMaybeODRUseExprs) { if (auto *DRE = dyn_cast(E)) { MarkVarDeclODRUsed(cast(DRE->getDecl()), DRE->getLocation(), *this); } else if (auto *ME = dyn_cast(E)) { MarkVarDeclODRUsed(cast(ME->getMemberDecl()), ME->getMemberLoc(), *this); } else if (auto *FP = dyn_cast(E)) { for (VarDecl *VD : *FP) MarkVarDeclODRUsed(VD, FP->getParameterPackLocation(), *this); } else { llvm_unreachable("Unexpected expression"); } } assert(MaybeODRUseExprs.empty() && "MarkVarDeclODRUsed failed to cleanup MaybeODRUseExprs?"); } static void DoMarkVarDeclReferenced(Sema &SemaRef, SourceLocation Loc, VarDecl *Var, Expr *E) { assert((!E || isa(E) || isa(E) || isa(E)) && "Invalid Expr argument to DoMarkVarDeclReferenced"); Var->setReferenced(); if (Var->isInvalidDecl()) return; auto *MSI = Var->getMemberSpecializationInfo(); TemplateSpecializationKind TSK = MSI ? MSI->getTemplateSpecializationKind() : Var->getTemplateSpecializationKind(); OdrUseContext OdrUse = isOdrUseContext(SemaRef); bool UsableInConstantExpr = Var->mightBeUsableInConstantExpressions(SemaRef.Context); // C++20 [expr.const]p12: // A variable [...] is needed for constant evaluation if it is [...] a // variable whose name appears as a potentially constant evaluated // expression that is either a contexpr variable or is of non-volatile // const-qualified integral type or of reference type bool NeededForConstantEvaluation = isPotentiallyConstantEvaluatedContext(SemaRef) && UsableInConstantExpr; bool NeedDefinition = OdrUse == OdrUseContext::Used || NeededForConstantEvaluation; VarTemplateSpecializationDecl *VarSpec = dyn_cast(Var); assert(!isa(Var) && "Can't instantiate a partial template specialization."); // If this might be a member specialization of a static data member, check // the specialization is visible. We already did the checks for variable // template specializations when we created them. if (NeedDefinition && TSK != TSK_Undeclared && !isa(Var)) SemaRef.checkSpecializationVisibility(Loc, Var); // Perform implicit instantiation of static data members, static data member // templates of class templates, and variable template specializations. Delay // instantiations of variable templates, except for those that could be used // in a constant expression. if (NeedDefinition && isTemplateInstantiation(TSK)) { // Per C++17 [temp.explicit]p10, we may instantiate despite an explicit // instantiation declaration if a variable is usable in a constant // expression (among other cases). bool TryInstantiating = TSK == TSK_ImplicitInstantiation || (TSK == TSK_ExplicitInstantiationDeclaration && UsableInConstantExpr); if (TryInstantiating) { SourceLocation PointOfInstantiation = MSI ? MSI->getPointOfInstantiation() : Var->getPointOfInstantiation(); bool FirstInstantiation = PointOfInstantiation.isInvalid(); if (FirstInstantiation) { PointOfInstantiation = Loc; if (MSI) MSI->setPointOfInstantiation(PointOfInstantiation); else Var->setTemplateSpecializationKind(TSK, PointOfInstantiation); } bool InstantiationDependent = false; bool IsNonDependent = VarSpec ? !TemplateSpecializationType::anyDependentTemplateArguments( VarSpec->getTemplateArgsInfo(), InstantiationDependent) : true; // Do not instantiate specializations that are still type-dependent. if (IsNonDependent) { if (UsableInConstantExpr) { // Do not defer instantiations of variables that could be used in a // constant expression. SemaRef.runWithSufficientStackSpace(PointOfInstantiation, [&] { SemaRef.InstantiateVariableDefinition(PointOfInstantiation, Var); }); } else if (FirstInstantiation || isa(Var)) { // FIXME: For a specialization of a variable template, we don't // distinguish between "declaration and type implicitly instantiated" // and "implicit instantiation of definition requested", so we have // no direct way to avoid enqueueing the pending instantiation // multiple times. SemaRef.PendingInstantiations .push_back(std::make_pair(Var, PointOfInstantiation)); } } } } // C++2a [basic.def.odr]p4: // A variable x whose name appears as a potentially-evaluated expression e // is odr-used by e unless // -- x is a reference that is usable in constant expressions // -- x is a variable of non-reference type that is usable in constant // expressions and has no mutable subobjects [FIXME], and e is an // element of the set of potential results of an expression of // non-volatile-qualified non-class type to which the lvalue-to-rvalue // conversion is applied // -- x is a variable of non-reference type, and e is an element of the set // of potential results of a discarded-value expression to which the // lvalue-to-rvalue conversion is not applied [FIXME] // // We check the first part of the second bullet here, and // Sema::CheckLValueToRValueConversionOperand deals with the second part. // FIXME: To get the third bullet right, we need to delay this even for // variables that are not usable in constant expressions. // If we already know this isn't an odr-use, there's nothing more to do. if (DeclRefExpr *DRE = dyn_cast_or_null(E)) if (DRE->isNonOdrUse()) return; if (MemberExpr *ME = dyn_cast_or_null(E)) if (ME->isNonOdrUse()) return; switch (OdrUse) { case OdrUseContext::None: assert((!E || isa(E)) && "missing non-odr-use marking for unevaluated decl ref"); break; case OdrUseContext::FormallyOdrUsed: // FIXME: Ignoring formal odr-uses results in incorrect lambda capture // behavior. break; case OdrUseContext::Used: // If we might later find that this expression isn't actually an odr-use, // delay the marking. if (E && Var->isUsableInConstantExpressions(SemaRef.Context)) SemaRef.MaybeODRUseExprs.insert(E); else MarkVarDeclODRUsed(Var, Loc, SemaRef); break; case OdrUseContext::Dependent: // If this is a dependent context, we don't need to mark variables as // odr-used, but we may still need to track them for lambda capture. // FIXME: Do we also need to do this inside dependent typeid expressions // (which are modeled as unevaluated at this point)? const bool RefersToEnclosingScope = (SemaRef.CurContext != Var->getDeclContext() && Var->getDeclContext()->isFunctionOrMethod() && Var->hasLocalStorage()); if (RefersToEnclosingScope) { LambdaScopeInfo *const LSI = SemaRef.getCurLambda(/*IgnoreNonLambdaCapturingScope=*/true); if (LSI && (!LSI->CallOperator || !LSI->CallOperator->Encloses(Var->getDeclContext()))) { // If a variable could potentially be odr-used, defer marking it so // until we finish analyzing the full expression for any // lvalue-to-rvalue // or discarded value conversions that would obviate odr-use. // Add it to the list of potential captures that will be analyzed // later (ActOnFinishFullExpr) for eventual capture and odr-use marking // unless the variable is a reference that was initialized by a constant // expression (this will never need to be captured or odr-used). // // FIXME: We can simplify this a lot after implementing P0588R1. assert(E && "Capture variable should be used in an expression."); if (!Var->getType()->isReferenceType() || !Var->isUsableInConstantExpressions(SemaRef.Context)) LSI->addPotentialCapture(E->IgnoreParens()); } } break; } } /// Mark a variable referenced, and check whether it is odr-used /// (C++ [basic.def.odr]p2, C99 6.9p3). Note that this should not be /// used directly for normal expressions referring to VarDecl. void Sema::MarkVariableReferenced(SourceLocation Loc, VarDecl *Var) { DoMarkVarDeclReferenced(*this, Loc, Var, nullptr); } static void MarkExprReferenced(Sema &SemaRef, SourceLocation Loc, Decl *D, Expr *E, bool MightBeOdrUse) { if (SemaRef.isInOpenMPDeclareTargetContext()) SemaRef.checkDeclIsAllowedInOpenMPTarget(E, D); if (VarDecl *Var = dyn_cast(D)) { DoMarkVarDeclReferenced(SemaRef, Loc, Var, E); return; } SemaRef.MarkAnyDeclReferenced(Loc, D, MightBeOdrUse); // If this is a call to a method via a cast, also mark the method in the // derived class used in case codegen can devirtualize the call. const MemberExpr *ME = dyn_cast(E); if (!ME) return; CXXMethodDecl *MD = dyn_cast(ME->getMemberDecl()); if (!MD) return; // Only attempt to devirtualize if this is truly a virtual call. bool IsVirtualCall = MD->isVirtual() && ME->performsVirtualDispatch(SemaRef.getLangOpts()); if (!IsVirtualCall) return; // If it's possible to devirtualize the call, mark the called function // referenced. CXXMethodDecl *DM = MD->getDevirtualizedMethod( ME->getBase(), SemaRef.getLangOpts().AppleKext); if (DM) SemaRef.MarkAnyDeclReferenced(Loc, DM, MightBeOdrUse); } /// Perform reference-marking and odr-use handling for a DeclRefExpr. void Sema::MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base) { // TODO: update this with DR# once a defect report is filed. // C++11 defect. The address of a pure member should not be an ODR use, even // if it's a qualified reference. bool OdrUse = true; if (const CXXMethodDecl *Method = dyn_cast(E->getDecl())) if (Method->isVirtual() && !Method->getDevirtualizedMethod(Base, getLangOpts().AppleKext)) OdrUse = false; if (auto *FD = dyn_cast(E->getDecl())) if (!isConstantEvaluated() && FD->isConsteval() && !RebuildingImmediateInvocation) ExprEvalContexts.back().ReferenceToConsteval.insert(E); MarkExprReferenced(*this, E->getLocation(), E->getDecl(), E, OdrUse); } /// Perform reference-marking and odr-use handling for a MemberExpr. void Sema::MarkMemberReferenced(MemberExpr *E) { // C++11 [basic.def.odr]p2: // A non-overloaded function whose name appears as a potentially-evaluated // expression or a member of a set of candidate functions, if selected by // overload resolution when referred to from a potentially-evaluated // expression, is odr-used, unless it is a pure virtual function and its // name is not explicitly qualified. bool MightBeOdrUse = true; if (E->performsVirtualDispatch(getLangOpts())) { if (CXXMethodDecl *Method = dyn_cast(E->getMemberDecl())) if (Method->isPure()) MightBeOdrUse = false; } SourceLocation Loc = E->getMemberLoc().isValid() ? E->getMemberLoc() : E->getBeginLoc(); MarkExprReferenced(*this, Loc, E->getMemberDecl(), E, MightBeOdrUse); } /// Perform reference-marking and odr-use handling for a FunctionParmPackExpr. void Sema::MarkFunctionParmPackReferenced(FunctionParmPackExpr *E) { for (VarDecl *VD : *E) MarkExprReferenced(*this, E->getParameterPackLocation(), VD, E, true); } /// Perform marking for a reference to an arbitrary declaration. It /// marks the declaration referenced, and performs odr-use checking for /// functions and variables. This method should not be used when building a /// normal expression which refers to a variable. void Sema::MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse) { if (MightBeOdrUse) { if (auto *VD = dyn_cast(D)) { MarkVariableReferenced(Loc, VD); return; } } if (auto *FD = dyn_cast(D)) { MarkFunctionReferenced(Loc, FD, MightBeOdrUse); return; } D->setReferenced(); } namespace { // Mark all of the declarations used by a type as referenced. // FIXME: Not fully implemented yet! We need to have a better understanding // of when we're entering a context we should not recurse into. // FIXME: This is and EvaluatedExprMarker are more-or-less equivalent to // TreeTransforms rebuilding the type in a new context. Rather than // duplicating the TreeTransform logic, we should consider reusing it here. // Currently that causes problems when rebuilding LambdaExprs. class MarkReferencedDecls : public RecursiveASTVisitor { Sema &S; SourceLocation Loc; public: typedef RecursiveASTVisitor Inherited; MarkReferencedDecls(Sema &S, SourceLocation Loc) : S(S), Loc(Loc) { } bool TraverseTemplateArgument(const TemplateArgument &Arg); }; } bool MarkReferencedDecls::TraverseTemplateArgument( const TemplateArgument &Arg) { { // A non-type template argument is a constant-evaluated context. EnterExpressionEvaluationContext Evaluated( S, Sema::ExpressionEvaluationContext::ConstantEvaluated); if (Arg.getKind() == TemplateArgument::Declaration) { if (Decl *D = Arg.getAsDecl()) S.MarkAnyDeclReferenced(Loc, D, true); } else if (Arg.getKind() == TemplateArgument::Expression) { S.MarkDeclarationsReferencedInExpr(Arg.getAsExpr(), false); } } return Inherited::TraverseTemplateArgument(Arg); } void Sema::MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T) { MarkReferencedDecls Marker(*this, Loc); Marker.TraverseType(T); } namespace { /// Helper class that marks all of the declarations referenced by /// potentially-evaluated subexpressions as "referenced". class EvaluatedExprMarker : public UsedDeclVisitor { public: typedef UsedDeclVisitor Inherited; bool SkipLocalVariables; EvaluatedExprMarker(Sema &S, bool SkipLocalVariables) : Inherited(S), SkipLocalVariables(SkipLocalVariables) {} void visitUsedDecl(SourceLocation Loc, Decl *D) { S.MarkFunctionReferenced(Loc, cast(D)); } void VisitDeclRefExpr(DeclRefExpr *E) { // If we were asked not to visit local variables, don't. if (SkipLocalVariables) { if (VarDecl *VD = dyn_cast(E->getDecl())) if (VD->hasLocalStorage()) return; } S.MarkDeclRefReferenced(E); } void VisitMemberExpr(MemberExpr *E) { S.MarkMemberReferenced(E); Visit(E->getBase()); } }; } // namespace /// Mark any declarations that appear within this expression or any /// potentially-evaluated subexpressions as "referenced". /// /// \param SkipLocalVariables If true, don't mark local variables as /// 'referenced'. void Sema::MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables) { EvaluatedExprMarker(*this, SkipLocalVariables).Visit(E); } /// Emit a diagnostic that describes an effect on the run-time behavior /// of the program being compiled. /// /// This routine emits the given diagnostic when the code currently being /// type-checked is "potentially evaluated", meaning that there is a /// possibility that the code will actually be executable. Code in sizeof() /// expressions, code used only during overload resolution, etc., are not /// potentially evaluated. This routine will suppress such diagnostics or, /// in the absolutely nutty case of potentially potentially evaluated /// expressions (C++ typeid), queue the diagnostic to potentially emit it /// later. /// /// This routine should be used for all diagnostics that describe the run-time /// behavior of a program, such as passing a non-POD value through an ellipsis. /// Failure to do so will likely result in spurious diagnostics or failures /// during overload resolution or within sizeof/alignof/typeof/typeid. bool Sema::DiagRuntimeBehavior(SourceLocation Loc, ArrayRef Stmts, const PartialDiagnostic &PD) { switch (ExprEvalContexts.back().Context) { case ExpressionEvaluationContext::Unevaluated: case ExpressionEvaluationContext::UnevaluatedList: case ExpressionEvaluationContext::UnevaluatedAbstract: case ExpressionEvaluationContext::DiscardedStatement: // The argument will never be evaluated, so don't complain. break; case ExpressionEvaluationContext::ConstantEvaluated: // Relevant diagnostics should be produced by constant evaluation. break; case ExpressionEvaluationContext::PotentiallyEvaluated: case ExpressionEvaluationContext::PotentiallyEvaluatedIfUsed: if (!Stmts.empty() && getCurFunctionOrMethodDecl()) { FunctionScopes.back()->PossiblyUnreachableDiags. push_back(sema::PossiblyUnreachableDiag(PD, Loc, Stmts)); return true; } // The initializer of a constexpr variable or of the first declaration of a // static data member is not syntactically a constant evaluated constant, // but nonetheless is always required to be a constant expression, so we // can skip diagnosing. // FIXME: Using the mangling context here is a hack. if (auto *VD = dyn_cast_or_null( ExprEvalContexts.back().ManglingContextDecl)) { if (VD->isConstexpr() || (VD->isStaticDataMember() && VD->isFirstDecl() && !VD->isInline())) break; // FIXME: For any other kind of variable, we should build a CFG for its // initializer and check whether the context in question is reachable. } Diag(Loc, PD); return true; } return false; } bool Sema::DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD) { return DiagRuntimeBehavior( Loc, Statement ? llvm::makeArrayRef(Statement) : llvm::None, PD); } bool Sema::CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD) { if (ReturnType->isVoidType() || !ReturnType->isIncompleteType()) return false; // If we're inside a decltype's expression, don't check for a valid return // type or construct temporaries until we know whether this is the last call. if (ExprEvalContexts.back().ExprContext == ExpressionEvaluationContextRecord::EK_Decltype) { ExprEvalContexts.back().DelayedDecltypeCalls.push_back(CE); return false; } class CallReturnIncompleteDiagnoser : public TypeDiagnoser { FunctionDecl *FD; CallExpr *CE; public: CallReturnIncompleteDiagnoser(FunctionDecl *FD, CallExpr *CE) : FD(FD), CE(CE) { } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { if (!FD) { S.Diag(Loc, diag::err_call_incomplete_return) << T << CE->getSourceRange(); return; } S.Diag(Loc, diag::err_call_function_incomplete_return) << CE->getSourceRange() << FD->getDeclName() << T; S.Diag(FD->getLocation(), diag::note_entity_declared_at) << FD->getDeclName(); } } Diagnoser(FD, CE); if (RequireCompleteType(Loc, ReturnType, Diagnoser)) return true; return false; } // Diagnose the s/=/==/ and s/\|=/!=/ typos. Note that adding parentheses // will prevent this condition from triggering, which is what we want. void Sema::DiagnoseAssignmentAsCondition(Expr *E) { SourceLocation Loc; unsigned diagnostic = diag::warn_condition_is_assignment; bool IsOrAssign = false; if (BinaryOperator *Op = dyn_cast(E)) { if (Op->getOpcode() != BO_Assign && Op->getOpcode() != BO_OrAssign) return; IsOrAssign = Op->getOpcode() == BO_OrAssign; // Greylist some idioms by putting them into a warning subcategory. if (ObjCMessageExpr *ME = dyn_cast(Op->getRHS()->IgnoreParenCasts())) { Selector Sel = ME->getSelector(); // self = [ init...] if (isSelfExpr(Op->getLHS()) && ME->getMethodFamily() == OMF_init) diagnostic = diag::warn_condition_is_idiomatic_assignment; // = [ nextObject] else if (Sel.isUnarySelector() && Sel.getNameForSlot(0) == "nextObject") diagnostic = diag::warn_condition_is_idiomatic_assignment; } Loc = Op->getOperatorLoc(); } else if (CXXOperatorCallExpr *Op = dyn_cast(E)) { if (Op->getOperator() != OO_Equal && Op->getOperator() != OO_PipeEqual) return; IsOrAssign = Op->getOperator() == OO_PipeEqual; Loc = Op->getOperatorLoc(); } else if (PseudoObjectExpr *POE = dyn_cast(E)) return DiagnoseAssignmentAsCondition(POE->getSyntacticForm()); else { // Not an assignment. return; } Diag(Loc, diagnostic) << E->getSourceRange(); SourceLocation Open = E->getBeginLoc(); SourceLocation Close = getLocForEndOfToken(E->getSourceRange().getEnd()); Diag(Loc, diag::note_condition_assign_silence) << FixItHint::CreateInsertion(Open, "(") << FixItHint::CreateInsertion(Close, ")"); if (IsOrAssign) Diag(Loc, diag::note_condition_or_assign_to_comparison) << FixItHint::CreateReplacement(Loc, "!="); else Diag(Loc, diag::note_condition_assign_to_comparison) << FixItHint::CreateReplacement(Loc, "=="); } /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void Sema::DiagnoseEqualityWithExtraParens(ParenExpr *ParenE) { // Don't warn if the parens came from a macro. SourceLocation parenLoc = ParenE->getBeginLoc(); if (parenLoc.isInvalid() || parenLoc.isMacroID()) return; // Don't warn for dependent expressions. if (ParenE->isTypeDependent()) return; Expr *E = ParenE->IgnoreParens(); if (BinaryOperator *opE = dyn_cast(E)) if (opE->getOpcode() == BO_EQ && opE->getLHS()->IgnoreParenImpCasts()->isModifiableLvalue(Context) == Expr::MLV_Valid) { SourceLocation Loc = opE->getOperatorLoc(); Diag(Loc, diag::warn_equality_with_extra_parens) << E->getSourceRange(); SourceRange ParenERange = ParenE->getSourceRange(); Diag(Loc, diag::note_equality_comparison_silence) << FixItHint::CreateRemoval(ParenERange.getBegin()) << FixItHint::CreateRemoval(ParenERange.getEnd()); Diag(Loc, diag::note_equality_comparison_to_assign) << FixItHint::CreateReplacement(Loc, "="); } } ExprResult Sema::CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr) { DiagnoseAssignmentAsCondition(E); if (ParenExpr *parenE = dyn_cast(E)) DiagnoseEqualityWithExtraParens(parenE); ExprResult result = CheckPlaceholderExpr(E); if (result.isInvalid()) return ExprError(); E = result.get(); if (!E->isTypeDependent()) { if (getLangOpts().CPlusPlus) return CheckCXXBooleanCondition(E, IsConstexpr); // C++ 6.4p4 ExprResult ERes = DefaultFunctionArrayLvalueConversion(E); if (ERes.isInvalid()) return ExprError(); E = ERes.get(); QualType T = E->getType(); if (!T->isScalarType()) { // C99 6.8.4.1p1 Diag(Loc, diag::err_typecheck_statement_requires_scalar) << T << E->getSourceRange(); return ExprError(); } CheckBoolLikeConversion(E, Loc); } return E; } Sema::ConditionResult Sema::ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK) { // Empty conditions are valid in for-statements. if (!SubExpr) return ConditionResult(); ExprResult Cond; switch (CK) { case ConditionKind::Boolean: Cond = CheckBooleanCondition(Loc, SubExpr); break; case ConditionKind::ConstexprIf: Cond = CheckBooleanCondition(Loc, SubExpr, true); break; case ConditionKind::Switch: Cond = CheckSwitchCondition(Loc, SubExpr); break; } if (Cond.isInvalid()) return ConditionError(); // FIXME: FullExprArg doesn't have an invalid bit, so check nullness instead. FullExprArg FullExpr = MakeFullExpr(Cond.get(), Loc); if (!FullExpr.get()) return ConditionError(); return ConditionResult(*this, nullptr, FullExpr, CK == ConditionKind::ConstexprIf); } namespace { /// A visitor for rebuilding a call to an __unknown_any expression /// to have an appropriate type. struct RebuildUnknownAnyFunction : StmtVisitor { Sema &S; RebuildUnknownAnyFunction(Sema &S) : S(S) {} ExprResult VisitStmt(Stmt *S) { llvm_unreachable("unexpected statement!"); } ExprResult VisitExpr(Expr *E) { S.Diag(E->getExprLoc(), diag::err_unsupported_unknown_any_call) << E->getSourceRange(); return ExprError(); } /// Rebuild an expression which simply semantically wraps another /// expression which it shares the type and value kind of. template ExprResult rebuildSugarExpr(T *E) { ExprResult SubResult = Visit(E->getSubExpr()); if (SubResult.isInvalid()) return ExprError(); Expr *SubExpr = SubResult.get(); E->setSubExpr(SubExpr); E->setType(SubExpr->getType()); E->setValueKind(SubExpr->getValueKind()); assert(E->getObjectKind() == OK_Ordinary); return E; } ExprResult VisitParenExpr(ParenExpr *E) { return rebuildSugarExpr(E); } ExprResult VisitUnaryExtension(UnaryOperator *E) { return rebuildSugarExpr(E); } ExprResult VisitUnaryAddrOf(UnaryOperator *E) { ExprResult SubResult = Visit(E->getSubExpr()); if (SubResult.isInvalid()) return ExprError(); Expr *SubExpr = SubResult.get(); E->setSubExpr(SubExpr); E->setType(S.Context.getPointerType(SubExpr->getType())); assert(E->getValueKind() == VK_RValue); assert(E->getObjectKind() == OK_Ordinary); return E; } ExprResult resolveDecl(Expr *E, ValueDecl *VD) { if (!isa(VD)) return VisitExpr(E); E->setType(VD->getType()); assert(E->getValueKind() == VK_RValue); if (S.getLangOpts().CPlusPlus && !(isa(VD) && cast(VD)->isInstance())) E->setValueKind(VK_LValue); return E; } ExprResult VisitMemberExpr(MemberExpr *E) { return resolveDecl(E, E->getMemberDecl()); } ExprResult VisitDeclRefExpr(DeclRefExpr *E) { return resolveDecl(E, E->getDecl()); } }; } /// Given a function expression of unknown-any type, try to rebuild it /// to have a function type. static ExprResult rebuildUnknownAnyFunction(Sema &S, Expr *FunctionExpr) { ExprResult Result = RebuildUnknownAnyFunction(S).Visit(FunctionExpr); if (Result.isInvalid()) return ExprError(); return S.DefaultFunctionArrayConversion(Result.get()); } namespace { /// A visitor for rebuilding an expression of type __unknown_anytype /// into one which resolves the type directly on the referring /// expression. Strict preservation of the original source /// structure is not a goal. struct RebuildUnknownAnyExpr : StmtVisitor { Sema &S; /// The current destination type. QualType DestType; RebuildUnknownAnyExpr(Sema &S, QualType CastType) : S(S), DestType(CastType) {} ExprResult VisitStmt(Stmt *S) { llvm_unreachable("unexpected statement!"); } ExprResult VisitExpr(Expr *E) { S.Diag(E->getExprLoc(), diag::err_unsupported_unknown_any_expr) << E->getSourceRange(); return ExprError(); } ExprResult VisitCallExpr(CallExpr *E); ExprResult VisitObjCMessageExpr(ObjCMessageExpr *E); /// Rebuild an expression which simply semantically wraps another /// expression which it shares the type and value kind of. template ExprResult rebuildSugarExpr(T *E) { ExprResult SubResult = Visit(E->getSubExpr()); if (SubResult.isInvalid()) return ExprError(); Expr *SubExpr = SubResult.get(); E->setSubExpr(SubExpr); E->setType(SubExpr->getType()); E->setValueKind(SubExpr->getValueKind()); assert(E->getObjectKind() == OK_Ordinary); return E; } ExprResult VisitParenExpr(ParenExpr *E) { return rebuildSugarExpr(E); } ExprResult VisitUnaryExtension(UnaryOperator *E) { return rebuildSugarExpr(E); } ExprResult VisitUnaryAddrOf(UnaryOperator *E) { const PointerType *Ptr = DestType->getAs(); if (!Ptr) { S.Diag(E->getOperatorLoc(), diag::err_unknown_any_addrof) << E->getSourceRange(); return ExprError(); } if (isa(E->getSubExpr())) { S.Diag(E->getOperatorLoc(), diag::err_unknown_any_addrof_call) << E->getSourceRange(); return ExprError(); } assert(E->getValueKind() == VK_RValue); assert(E->getObjectKind() == OK_Ordinary); E->setType(DestType); // Build the sub-expression as if it were an object of the pointee type. DestType = Ptr->getPointeeType(); ExprResult SubResult = Visit(E->getSubExpr()); if (SubResult.isInvalid()) return ExprError(); E->setSubExpr(SubResult.get()); return E; } ExprResult VisitImplicitCastExpr(ImplicitCastExpr *E); ExprResult resolveDecl(Expr *E, ValueDecl *VD); ExprResult VisitMemberExpr(MemberExpr *E) { return resolveDecl(E, E->getMemberDecl()); } ExprResult VisitDeclRefExpr(DeclRefExpr *E) { return resolveDecl(E, E->getDecl()); } }; } /// Rebuilds a call expression which yielded __unknown_anytype. ExprResult RebuildUnknownAnyExpr::VisitCallExpr(CallExpr *E) { Expr *CalleeExpr = E->getCallee(); enum FnKind { FK_MemberFunction, FK_FunctionPointer, FK_BlockPointer }; FnKind Kind; QualType CalleeType = CalleeExpr->getType(); if (CalleeType == S.Context.BoundMemberTy) { assert(isa(E) || isa(E)); Kind = FK_MemberFunction; CalleeType = Expr::findBoundMemberType(CalleeExpr); } else if (const PointerType *Ptr = CalleeType->getAs()) { CalleeType = Ptr->getPointeeType(); Kind = FK_FunctionPointer; } else { CalleeType = CalleeType->castAs()->getPointeeType(); Kind = FK_BlockPointer; } const FunctionType *FnType = CalleeType->castAs(); // Verify that this is a legal result type of a function. if (DestType->isArrayType() || DestType->isFunctionType()) { unsigned diagID = diag::err_func_returning_array_function; if (Kind == FK_BlockPointer) diagID = diag::err_block_returning_array_function; S.Diag(E->getExprLoc(), diagID) << DestType->isFunctionType() << DestType; return ExprError(); } // Otherwise, go ahead and set DestType as the call's result. E->setType(DestType.getNonLValueExprType(S.Context)); E->setValueKind(Expr::getValueKindForType(DestType)); assert(E->getObjectKind() == OK_Ordinary); // Rebuild the function type, replacing the result type with DestType. const FunctionProtoType *Proto = dyn_cast(FnType); if (Proto) { // __unknown_anytype(...) is a special case used by the debugger when // it has no idea what a function's signature is. // // We want to build this call essentially under the K&R // unprototyped rules, but making a FunctionNoProtoType in C++ // would foul up all sorts of assumptions. However, we cannot // simply pass all arguments as variadic arguments, nor can we // portably just call the function under a non-variadic type; see // the comment on IR-gen's TargetInfo::isNoProtoCallVariadic. // However, it turns out that in practice it is generally safe to // call a function declared as "A foo(B,C,D);" under the prototype // "A foo(B,C,D,...);". The only known exception is with the // Windows ABI, where any variadic function is implicitly cdecl // regardless of its normal CC. Therefore we change the parameter // types to match the types of the arguments. // // This is a hack, but it is far superior to moving the // corresponding target-specific code from IR-gen to Sema/AST. ArrayRef ParamTypes = Proto->getParamTypes(); SmallVector ArgTypes; if (ParamTypes.empty() && Proto->isVariadic()) { // the special case ArgTypes.reserve(E->getNumArgs()); for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) { Expr *Arg = E->getArg(i); QualType ArgType = Arg->getType(); if (E->isLValue()) { ArgType = S.Context.getLValueReferenceType(ArgType); } else if (E->isXValue()) { ArgType = S.Context.getRValueReferenceType(ArgType); } ArgTypes.push_back(ArgType); } ParamTypes = ArgTypes; } DestType = S.Context.getFunctionType(DestType, ParamTypes, Proto->getExtProtoInfo()); } else { DestType = S.Context.getFunctionNoProtoType(DestType, FnType->getExtInfo()); } // Rebuild the appropriate pointer-to-function type. switch (Kind) { case FK_MemberFunction: // Nothing to do. break; case FK_FunctionPointer: DestType = S.Context.getPointerType(DestType); break; case FK_BlockPointer: DestType = S.Context.getBlockPointerType(DestType); break; } // Finally, we can recurse. ExprResult CalleeResult = Visit(CalleeExpr); if (!CalleeResult.isUsable()) return ExprError(); E->setCallee(CalleeResult.get()); // Bind a temporary if necessary. return S.MaybeBindToTemporary(E); } ExprResult RebuildUnknownAnyExpr::VisitObjCMessageExpr(ObjCMessageExpr *E) { // Verify that this is a legal result type of a call. if (DestType->isArrayType() || DestType->isFunctionType()) { S.Diag(E->getExprLoc(), diag::err_func_returning_array_function) << DestType->isFunctionType() << DestType; return ExprError(); } // Rewrite the method result type if available. if (ObjCMethodDecl *Method = E->getMethodDecl()) { assert(Method->getReturnType() == S.Context.UnknownAnyTy); Method->setReturnType(DestType); } // Change the type of the message. E->setType(DestType.getNonReferenceType()); E->setValueKind(Expr::getValueKindForType(DestType)); return S.MaybeBindToTemporary(E); } ExprResult RebuildUnknownAnyExpr::VisitImplicitCastExpr(ImplicitCastExpr *E) { // The only case we should ever see here is a function-to-pointer decay. if (E->getCastKind() == CK_FunctionToPointerDecay) { assert(E->getValueKind() == VK_RValue); assert(E->getObjectKind() == OK_Ordinary); E->setType(DestType); // Rebuild the sub-expression as the pointee (function) type. DestType = DestType->castAs()->getPointeeType(); ExprResult Result = Visit(E->getSubExpr()); if (!Result.isUsable()) return ExprError(); E->setSubExpr(Result.get()); return E; } else if (E->getCastKind() == CK_LValueToRValue) { assert(E->getValueKind() == VK_RValue); assert(E->getObjectKind() == OK_Ordinary); assert(isa(E->getType())); E->setType(DestType); // The sub-expression has to be a lvalue reference, so rebuild it as such. DestType = S.Context.getLValueReferenceType(DestType); ExprResult Result = Visit(E->getSubExpr()); if (!Result.isUsable()) return ExprError(); E->setSubExpr(Result.get()); return E; } else { llvm_unreachable("Unhandled cast type!"); } } ExprResult RebuildUnknownAnyExpr::resolveDecl(Expr *E, ValueDecl *VD) { ExprValueKind ValueKind = VK_LValue; QualType Type = DestType; // We know how to make this work for certain kinds of decls: // - functions if (FunctionDecl *FD = dyn_cast(VD)) { if (const PointerType *Ptr = Type->getAs()) { DestType = Ptr->getPointeeType(); ExprResult Result = resolveDecl(E, VD); if (Result.isInvalid()) return ExprError(); return S.ImpCastExprToType(Result.get(), Type, CK_FunctionToPointerDecay, VK_RValue); } if (!Type->isFunctionType()) { S.Diag(E->getExprLoc(), diag::err_unknown_any_function) << VD << E->getSourceRange(); return ExprError(); } if (const FunctionProtoType *FT = Type->getAs()) { // We must match the FunctionDecl's type to the hack introduced in // RebuildUnknownAnyExpr::VisitCallExpr to vararg functions of unknown // type. See the lengthy commentary in that routine. QualType FDT = FD->getType(); const FunctionType *FnType = FDT->castAs(); const FunctionProtoType *Proto = dyn_cast_or_null(FnType); DeclRefExpr *DRE = dyn_cast(E); if (DRE && Proto && Proto->getParamTypes().empty() && Proto->isVariadic()) { SourceLocation Loc = FD->getLocation(); FunctionDecl *NewFD = FunctionDecl::Create( S.Context, FD->getDeclContext(), Loc, Loc, FD->getNameInfo().getName(), DestType, FD->getTypeSourceInfo(), SC_None, false /*isInlineSpecified*/, FD->hasPrototype(), /*ConstexprKind*/ CSK_unspecified); if (FD->getQualifier()) NewFD->setQualifierInfo(FD->getQualifierLoc()); SmallVector Params; for (const auto &AI : FT->param_types()) { ParmVarDecl *Param = S.BuildParmVarDeclForTypedef(FD, Loc, AI); Param->setScopeInfo(0, Params.size()); Params.push_back(Param); } NewFD->setParams(Params); DRE->setDecl(NewFD); VD = DRE->getDecl(); } } if (CXXMethodDecl *MD = dyn_cast(FD)) if (MD->isInstance()) { ValueKind = VK_RValue; Type = S.Context.BoundMemberTy; } // Function references aren't l-values in C. if (!S.getLangOpts().CPlusPlus) ValueKind = VK_RValue; // - variables } else if (isa(VD)) { if (const ReferenceType *RefTy = Type->getAs()) { Type = RefTy->getPointeeType(); } else if (Type->isFunctionType()) { S.Diag(E->getExprLoc(), diag::err_unknown_any_var_function_type) << VD << E->getSourceRange(); return ExprError(); } // - nothing else } else { S.Diag(E->getExprLoc(), diag::err_unsupported_unknown_any_decl) << VD << E->getSourceRange(); return ExprError(); } // Modifying the declaration like this is friendly to IR-gen but // also really dangerous. VD->setType(DestType); E->setType(Type); E->setValueKind(ValueKind); return E; } /// Check a cast of an unknown-any type. We intentionally only /// trigger this for C-style casts. ExprResult Sema::checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path) { // The type we're casting to must be either void or complete. if (!CastType->isVoidType() && RequireCompleteType(TypeRange.getBegin(), CastType, diag::err_typecheck_cast_to_incomplete)) return ExprError(); // Rewrite the casted expression from scratch. ExprResult result = RebuildUnknownAnyExpr(*this, CastType).Visit(CastExpr); if (!result.isUsable()) return ExprError(); CastExpr = result.get(); VK = CastExpr->getValueKind(); CastKind = CK_NoOp; return CastExpr; } ExprResult Sema::forceUnknownAnyToType(Expr *E, QualType ToType) { return RebuildUnknownAnyExpr(*this, ToType).Visit(E); } ExprResult Sema::checkUnknownAnyArg(SourceLocation callLoc, Expr *arg, QualType ¶mType) { // If the syntactic form of the argument is not an explicit cast of // any sort, just do default argument promotion. ExplicitCastExpr *castArg = dyn_cast(arg->IgnoreParens()); if (!castArg) { ExprResult result = DefaultArgumentPromotion(arg); if (result.isInvalid()) return ExprError(); paramType = result.get()->getType(); return result; } // Otherwise, use the type that was written in the explicit cast. assert(!arg->hasPlaceholderType()); paramType = castArg->getTypeAsWritten(); // Copy-initialize a parameter of that type. InitializedEntity entity = InitializedEntity::InitializeParameter(Context, paramType, /*consumed*/ false); return PerformCopyInitialization(entity, callLoc, arg); } static ExprResult diagnoseUnknownAnyExpr(Sema &S, Expr *E) { Expr *orig = E; unsigned diagID = diag::err_uncasted_use_of_unknown_any; while (true) { E = E->IgnoreParenImpCasts(); if (CallExpr *call = dyn_cast(E)) { E = call->getCallee(); diagID = diag::err_uncasted_call_of_unknown_any; } else { break; } } SourceLocation loc; NamedDecl *d; if (DeclRefExpr *ref = dyn_cast(E)) { loc = ref->getLocation(); d = ref->getDecl(); } else if (MemberExpr *mem = dyn_cast(E)) { loc = mem->getMemberLoc(); d = mem->getMemberDecl(); } else if (ObjCMessageExpr *msg = dyn_cast(E)) { diagID = diag::err_uncasted_call_of_unknown_any; loc = msg->getSelectorStartLoc(); d = msg->getMethodDecl(); if (!d) { S.Diag(loc, diag::err_uncasted_send_to_unknown_any_method) << static_cast(msg->isClassMessage()) << msg->getSelector() << orig->getSourceRange(); return ExprError(); } } else { S.Diag(E->getExprLoc(), diag::err_unsupported_unknown_any_expr) << E->getSourceRange(); return ExprError(); } S.Diag(loc, diagID) << d << orig->getSourceRange(); // Never recoverable. return ExprError(); } /// Check for operands with placeholder types and complain if found. /// Returns ExprError() if there was an error and no recovery was possible. ExprResult Sema::CheckPlaceholderExpr(Expr *E) { if (!getLangOpts().CPlusPlus) { // C cannot handle TypoExpr nodes on either side of a binop because it // doesn't handle dependent types properly, so make sure any TypoExprs have // been dealt with before checking the operands. ExprResult Result = CorrectDelayedTyposInExpr(E); if (!Result.isUsable()) return ExprError(); E = Result.get(); } const BuiltinType *placeholderType = E->getType()->getAsPlaceholderType(); if (!placeholderType) return E; switch (placeholderType->getKind()) { // Overloaded expressions. case BuiltinType::Overload: { // Try to resolve a single function template specialization. // This is obligatory. ExprResult Result = E; if (ResolveAndFixSingleFunctionTemplateSpecialization(Result, false)) return Result; // No guarantees that ResolveAndFixSingleFunctionTemplateSpecialization // leaves Result unchanged on failure. Result = E; if (resolveAndFixAddressOfSingleOverloadCandidate(Result)) return Result; // If that failed, try to recover with a call. tryToRecoverWithCall(Result, PDiag(diag::err_ovl_unresolvable), /*complain*/ true); return Result; } // Bound member functions. case BuiltinType::BoundMember: { ExprResult result = E; const Expr *BME = E->IgnoreParens(); PartialDiagnostic PD = PDiag(diag::err_bound_member_function); // Try to give a nicer diagnostic if it is a bound member that we recognize. if (isa(BME)) { PD = PDiag(diag::err_dtor_expr_without_call) << /*pseudo-destructor*/ 1; } else if (const auto *ME = dyn_cast(BME)) { if (ME->getMemberNameInfo().getName().getNameKind() == DeclarationName::CXXDestructorName) PD = PDiag(diag::err_dtor_expr_without_call) << /*destructor*/ 0; } tryToRecoverWithCall(result, PD, /*complain*/ true); return result; } // ARC unbridged casts. case BuiltinType::ARCUnbridgedCast: { Expr *realCast = stripARCUnbridgedCast(E); diagnoseARCUnbridgedCast(realCast); return realCast; } // Expressions of unknown type. case BuiltinType::UnknownAny: return diagnoseUnknownAnyExpr(*this, E); // Pseudo-objects. case BuiltinType::PseudoObject: return checkPseudoObjectRValue(E); case BuiltinType::BuiltinFn: { // Accept __noop without parens by implicitly converting it to a call expr. auto *DRE = dyn_cast(E->IgnoreParenImpCasts()); if (DRE) { auto *FD = cast(DRE->getDecl()); if (FD->getBuiltinID() == Builtin::BI__noop) { E = ImpCastExprToType(E, Context.getPointerType(FD->getType()), CK_BuiltinFnToFnPtr) .get(); return CallExpr::Create(Context, E, /*Args=*/{}, Context.IntTy, VK_RValue, SourceLocation()); } } Diag(E->getBeginLoc(), diag::err_builtin_fn_use); return ExprError(); } case BuiltinType::IncompleteMatrixIdx: Diag(cast(E->IgnoreParens()) ->getRowIdx() ->getBeginLoc(), diag::err_matrix_incomplete_index); return ExprError(); // Expressions of unknown type. case BuiltinType::OMPArraySection: Diag(E->getBeginLoc(), diag::err_omp_array_section_use); return ExprError(); // Expressions of unknown type. case BuiltinType::OMPArrayShaping: return ExprError(Diag(E->getBeginLoc(), diag::err_omp_array_shaping_use)); case BuiltinType::OMPIterator: return ExprError(Diag(E->getBeginLoc(), diag::err_omp_iterator_use)); // Everything else should be impossible. #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ case BuiltinType::Id: #include "clang/Basic/OpenCLImageTypes.def" #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ case BuiltinType::Id: #include "clang/Basic/OpenCLExtensionTypes.def" #define SVE_TYPE(Name, Id, SingletonId) \ case BuiltinType::Id: #include "clang/Basic/AArch64SVEACLETypes.def" #define BUILTIN_TYPE(Id, SingletonId) case BuiltinType::Id: #define PLACEHOLDER_TYPE(Id, SingletonId) #include "clang/AST/BuiltinTypes.def" break; } llvm_unreachable("invalid placeholder type!"); } bool Sema::CheckCaseExpression(Expr *E) { if (E->isTypeDependent()) return true; if (E->isValueDependent() || E->isIntegerConstantExpr(Context)) return E->getType()->isIntegralOrEnumerationType(); return false; } /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult Sema::ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind) { assert((Kind == tok::kw___objc_yes || Kind == tok::kw___objc_no) && "Unknown Objective-C Boolean value!"); QualType BoolT = Context.ObjCBuiltinBoolTy; if (!Context.getBOOLDecl()) { LookupResult Result(*this, &Context.Idents.get("BOOL"), OpLoc, Sema::LookupOrdinaryName); if (LookupName(Result, getCurScope()) && Result.isSingleResult()) { NamedDecl *ND = Result.getFoundDecl(); if (TypedefDecl *TD = dyn_cast(ND)) Context.setBOOLDecl(TD); } } if (Context.getBOOLDecl()) BoolT = Context.getBOOLType(); return new (Context) ObjCBoolLiteralExpr(Kind == tok::kw___objc_yes, BoolT, OpLoc); } ExprResult Sema::ActOnObjCAvailabilityCheckExpr( llvm::ArrayRef AvailSpecs, SourceLocation AtLoc, SourceLocation RParen) { StringRef Platform = getASTContext().getTargetInfo().getPlatformName(); auto Spec = llvm::find_if(AvailSpecs, [&](const AvailabilitySpec &Spec) { return Spec.getPlatform() == Platform; }); VersionTuple Version; if (Spec != AvailSpecs.end()) Version = Spec->getVersion(); // The use of `@available` in the enclosing function should be analyzed to // warn when it's used inappropriately (i.e. not if(@available)). if (getCurFunctionOrMethodDecl()) getEnclosingFunction()->HasPotentialAvailabilityViolations = true; else if (getCurBlock() || getCurLambda()) getCurFunction()->HasPotentialAvailabilityViolations = true; return new (Context) ObjCAvailabilityCheckExpr(Version, AtLoc, RParen, Context.BoolTy); } ExprResult Sema::CreateRecoveryExpr(SourceLocation Begin, SourceLocation End, ArrayRef SubExprs, QualType T) { if (!Context.getLangOpts().RecoveryAST) return ExprError(); if (isSFINAEContext()) return ExprError(); if (T.isNull() || !Context.getLangOpts().RecoveryASTType) // We don't know the concrete type, fallback to dependent type. T = Context.DependentTy; return RecoveryExpr::Create(Context, T, Begin, End, SubExprs); } diff --git a/contrib/llvm-project/clang/lib/Sema/SemaLambda.cpp b/contrib/llvm-project/clang/lib/Sema/SemaLambda.cpp index 657ed13f207a..dc74f6e2f7dc 100644 --- a/contrib/llvm-project/clang/lib/Sema/SemaLambda.cpp +++ b/contrib/llvm-project/clang/lib/Sema/SemaLambda.cpp @@ -1,1940 +1,1941 @@ //===--- SemaLambda.cpp - Semantic Analysis for C++11 Lambdas -------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements semantic analysis for C++ lambda expressions. // //===----------------------------------------------------------------------===// #include "clang/Sema/DeclSpec.h" #include "TypeLocBuilder.h" #include "clang/AST/ASTLambda.h" #include "clang/AST/ExprCXX.h" #include "clang/Basic/TargetInfo.h" #include "clang/Sema/Initialization.h" #include "clang/Sema/Lookup.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/SemaInternal.h" #include "clang/Sema/SemaLambda.h" #include "llvm/ADT/STLExtras.h" using namespace clang; using namespace sema; /// Examines the FunctionScopeInfo stack to determine the nearest /// enclosing lambda (to the current lambda) that is 'capture-ready' for /// the variable referenced in the current lambda (i.e. \p VarToCapture). /// If successful, returns the index into Sema's FunctionScopeInfo stack /// of the capture-ready lambda's LambdaScopeInfo. /// /// Climbs down the stack of lambdas (deepest nested lambda - i.e. current /// lambda - is on top) to determine the index of the nearest enclosing/outer /// lambda that is ready to capture the \p VarToCapture being referenced in /// the current lambda. /// As we climb down the stack, we want the index of the first such lambda - /// that is the lambda with the highest index that is 'capture-ready'. /// /// A lambda 'L' is capture-ready for 'V' (var or this) if: /// - its enclosing context is non-dependent /// - and if the chain of lambdas between L and the lambda in which /// V is potentially used (i.e. the lambda at the top of the scope info /// stack), can all capture or have already captured V. /// If \p VarToCapture is 'null' then we are trying to capture 'this'. /// /// Note that a lambda that is deemed 'capture-ready' still needs to be checked /// for whether it is 'capture-capable' (see /// getStackIndexOfNearestEnclosingCaptureCapableLambda), before it can truly /// capture. /// /// \param FunctionScopes - Sema's stack of nested FunctionScopeInfo's (which a /// LambdaScopeInfo inherits from). The current/deepest/innermost lambda /// is at the top of the stack and has the highest index. /// \param VarToCapture - the variable to capture. If NULL, capture 'this'. /// /// \returns An Optional Index that if evaluates to 'true' contains /// the index (into Sema's FunctionScopeInfo stack) of the innermost lambda /// which is capture-ready. If the return value evaluates to 'false' then /// no lambda is capture-ready for \p VarToCapture. static inline Optional getStackIndexOfNearestEnclosingCaptureReadyLambda( ArrayRef FunctionScopes, VarDecl *VarToCapture) { // Label failure to capture. const Optional NoLambdaIsCaptureReady; // Ignore all inner captured regions. unsigned CurScopeIndex = FunctionScopes.size() - 1; while (CurScopeIndex > 0 && isa( FunctionScopes[CurScopeIndex])) --CurScopeIndex; assert( isa(FunctionScopes[CurScopeIndex]) && "The function on the top of sema's function-info stack must be a lambda"); // If VarToCapture is null, we are attempting to capture 'this'. const bool IsCapturingThis = !VarToCapture; const bool IsCapturingVariable = !IsCapturingThis; // Start with the current lambda at the top of the stack (highest index). DeclContext *EnclosingDC = cast(FunctionScopes[CurScopeIndex])->CallOperator; do { const clang::sema::LambdaScopeInfo *LSI = cast(FunctionScopes[CurScopeIndex]); // IF we have climbed down to an intervening enclosing lambda that contains // the variable declaration - it obviously can/must not capture the // variable. // Since its enclosing DC is dependent, all the lambdas between it and the // innermost nested lambda are dependent (otherwise we wouldn't have // arrived here) - so we don't yet have a lambda that can capture the // variable. if (IsCapturingVariable && VarToCapture->getDeclContext()->Equals(EnclosingDC)) return NoLambdaIsCaptureReady; // For an enclosing lambda to be capture ready for an entity, all // intervening lambda's have to be able to capture that entity. If even // one of the intervening lambda's is not capable of capturing the entity // then no enclosing lambda can ever capture that entity. // For e.g. // const int x = 10; // [=](auto a) { #1 // [](auto b) { #2 <-- an intervening lambda that can never capture 'x' // [=](auto c) { #3 // f(x, c); <-- can not lead to x's speculative capture by #1 or #2 // }; }; }; // If they do not have a default implicit capture, check to see // if the entity has already been explicitly captured. // If even a single dependent enclosing lambda lacks the capability // to ever capture this variable, there is no further enclosing // non-dependent lambda that can capture this variable. if (LSI->ImpCaptureStyle == sema::LambdaScopeInfo::ImpCap_None) { if (IsCapturingVariable && !LSI->isCaptured(VarToCapture)) return NoLambdaIsCaptureReady; if (IsCapturingThis && !LSI->isCXXThisCaptured()) return NoLambdaIsCaptureReady; } EnclosingDC = getLambdaAwareParentOfDeclContext(EnclosingDC); assert(CurScopeIndex); --CurScopeIndex; } while (!EnclosingDC->isTranslationUnit() && EnclosingDC->isDependentContext() && isLambdaCallOperator(EnclosingDC)); assert(CurScopeIndex < (FunctionScopes.size() - 1)); // If the enclosingDC is not dependent, then the immediately nested lambda // (one index above) is capture-ready. if (!EnclosingDC->isDependentContext()) return CurScopeIndex + 1; return NoLambdaIsCaptureReady; } /// Examines the FunctionScopeInfo stack to determine the nearest /// enclosing lambda (to the current lambda) that is 'capture-capable' for /// the variable referenced in the current lambda (i.e. \p VarToCapture). /// If successful, returns the index into Sema's FunctionScopeInfo stack /// of the capture-capable lambda's LambdaScopeInfo. /// /// Given the current stack of lambdas being processed by Sema and /// the variable of interest, to identify the nearest enclosing lambda (to the /// current lambda at the top of the stack) that can truly capture /// a variable, it has to have the following two properties: /// a) 'capture-ready' - be the innermost lambda that is 'capture-ready': /// - climb down the stack (i.e. starting from the innermost and examining /// each outer lambda step by step) checking if each enclosing /// lambda can either implicitly or explicitly capture the variable. /// Record the first such lambda that is enclosed in a non-dependent /// context. If no such lambda currently exists return failure. /// b) 'capture-capable' - make sure the 'capture-ready' lambda can truly /// capture the variable by checking all its enclosing lambdas: /// - check if all outer lambdas enclosing the 'capture-ready' lambda /// identified above in 'a' can also capture the variable (this is done /// via tryCaptureVariable for variables and CheckCXXThisCapture for /// 'this' by passing in the index of the Lambda identified in step 'a') /// /// \param FunctionScopes - Sema's stack of nested FunctionScopeInfo's (which a /// LambdaScopeInfo inherits from). The current/deepest/innermost lambda /// is at the top of the stack. /// /// \param VarToCapture - the variable to capture. If NULL, capture 'this'. /// /// /// \returns An Optional Index that if evaluates to 'true' contains /// the index (into Sema's FunctionScopeInfo stack) of the innermost lambda /// which is capture-capable. If the return value evaluates to 'false' then /// no lambda is capture-capable for \p VarToCapture. Optional clang::getStackIndexOfNearestEnclosingCaptureCapableLambda( ArrayRef FunctionScopes, VarDecl *VarToCapture, Sema &S) { const Optional NoLambdaIsCaptureCapable; const Optional OptionalStackIndex = getStackIndexOfNearestEnclosingCaptureReadyLambda(FunctionScopes, VarToCapture); if (!OptionalStackIndex) return NoLambdaIsCaptureCapable; const unsigned IndexOfCaptureReadyLambda = OptionalStackIndex.getValue(); assert(((IndexOfCaptureReadyLambda != (FunctionScopes.size() - 1)) || S.getCurGenericLambda()) && "The capture ready lambda for a potential capture can only be the " "current lambda if it is a generic lambda"); const sema::LambdaScopeInfo *const CaptureReadyLambdaLSI = cast(FunctionScopes[IndexOfCaptureReadyLambda]); // If VarToCapture is null, we are attempting to capture 'this' const bool IsCapturingThis = !VarToCapture; const bool IsCapturingVariable = !IsCapturingThis; if (IsCapturingVariable) { // Check if the capture-ready lambda can truly capture the variable, by // checking whether all enclosing lambdas of the capture-ready lambda allow // the capture - i.e. make sure it is capture-capable. QualType CaptureType, DeclRefType; const bool CanCaptureVariable = !S.tryCaptureVariable(VarToCapture, /*ExprVarIsUsedInLoc*/ SourceLocation(), clang::Sema::TryCapture_Implicit, /*EllipsisLoc*/ SourceLocation(), /*BuildAndDiagnose*/ false, CaptureType, DeclRefType, &IndexOfCaptureReadyLambda); if (!CanCaptureVariable) return NoLambdaIsCaptureCapable; } else { // Check if the capture-ready lambda can truly capture 'this' by checking // whether all enclosing lambdas of the capture-ready lambda can capture // 'this'. const bool CanCaptureThis = !S.CheckCXXThisCapture( CaptureReadyLambdaLSI->PotentialThisCaptureLocation, /*Explicit*/ false, /*BuildAndDiagnose*/ false, &IndexOfCaptureReadyLambda); if (!CanCaptureThis) return NoLambdaIsCaptureCapable; } return IndexOfCaptureReadyLambda; } static inline TemplateParameterList * getGenericLambdaTemplateParameterList(LambdaScopeInfo *LSI, Sema &SemaRef) { if (!LSI->GLTemplateParameterList && !LSI->TemplateParams.empty()) { LSI->GLTemplateParameterList = TemplateParameterList::Create( SemaRef.Context, /*Template kw loc*/ SourceLocation(), /*L angle loc*/ LSI->ExplicitTemplateParamsRange.getBegin(), LSI->TemplateParams, /*R angle loc*/LSI->ExplicitTemplateParamsRange.getEnd(), nullptr); } return LSI->GLTemplateParameterList; } CXXRecordDecl *Sema::createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault) { DeclContext *DC = CurContext; while (!(DC->isFunctionOrMethod() || DC->isRecord() || DC->isFileContext())) DC = DC->getParent(); bool IsGenericLambda = getGenericLambdaTemplateParameterList(getCurLambda(), *this); // Start constructing the lambda class. CXXRecordDecl *Class = CXXRecordDecl::CreateLambda(Context, DC, Info, IntroducerRange.getBegin(), KnownDependent, IsGenericLambda, CaptureDefault); DC->addDecl(Class); return Class; } /// Determine whether the given context is or is enclosed in an inline /// function. static bool isInInlineFunction(const DeclContext *DC) { while (!DC->isFileContext()) { if (const FunctionDecl *FD = dyn_cast(DC)) if (FD->isInlined()) return true; DC = DC->getLexicalParent(); } return false; } std::tuple Sema::getCurrentMangleNumberContext(const DeclContext *DC) { // Compute the context for allocating mangling numbers in the current // expression, if the ABI requires them. Decl *ManglingContextDecl = ExprEvalContexts.back().ManglingContextDecl; enum ContextKind { Normal, DefaultArgument, DataMember, StaticDataMember, InlineVariable, VariableTemplate } Kind = Normal; // Default arguments of member function parameters that appear in a class // definition, as well as the initializers of data members, receive special // treatment. Identify them. if (ManglingContextDecl) { if (ParmVarDecl *Param = dyn_cast(ManglingContextDecl)) { if (const DeclContext *LexicalDC = Param->getDeclContext()->getLexicalParent()) if (LexicalDC->isRecord()) Kind = DefaultArgument; } else if (VarDecl *Var = dyn_cast(ManglingContextDecl)) { if (Var->getDeclContext()->isRecord()) Kind = StaticDataMember; else if (Var->getMostRecentDecl()->isInline()) Kind = InlineVariable; else if (Var->getDescribedVarTemplate()) Kind = VariableTemplate; else if (auto *VTS = dyn_cast(Var)) { if (!VTS->isExplicitSpecialization()) Kind = VariableTemplate; } } else if (isa(ManglingContextDecl)) { Kind = DataMember; } } // Itanium ABI [5.1.7]: // In the following contexts [...] the one-definition rule requires closure // types in different translation units to "correspond": bool IsInNonspecializedTemplate = inTemplateInstantiation() || CurContext->isDependentContext(); switch (Kind) { case Normal: { // -- the bodies of non-exported nonspecialized template functions // -- the bodies of inline functions if ((IsInNonspecializedTemplate && !(ManglingContextDecl && isa(ManglingContextDecl))) || isInInlineFunction(CurContext)) { while (auto *CD = dyn_cast(DC)) DC = CD->getParent(); return std::make_tuple(&Context.getManglingNumberContext(DC), nullptr); } return std::make_tuple(nullptr, nullptr); } case StaticDataMember: // -- the initializers of nonspecialized static members of template classes if (!IsInNonspecializedTemplate) return std::make_tuple(nullptr, ManglingContextDecl); // Fall through to get the current context. LLVM_FALLTHROUGH; case DataMember: // -- the in-class initializers of class members case DefaultArgument: // -- default arguments appearing in class definitions case InlineVariable: // -- the initializers of inline variables case VariableTemplate: // -- the initializers of templated variables return std::make_tuple( &Context.getManglingNumberContext(ASTContext::NeedExtraManglingDecl, ManglingContextDecl), ManglingContextDecl); } llvm_unreachable("unexpected context"); } CXXMethodDecl *Sema::startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodTypeInfo, SourceLocation EndLoc, ArrayRef Params, ConstexprSpecKind ConstexprKind, Expr *TrailingRequiresClause) { QualType MethodType = MethodTypeInfo->getType(); TemplateParameterList *TemplateParams = getGenericLambdaTemplateParameterList(getCurLambda(), *this); // If a lambda appears in a dependent context or is a generic lambda (has // template parameters) and has an 'auto' return type, deduce it to a // dependent type. if (Class->isDependentContext() || TemplateParams) { const FunctionProtoType *FPT = MethodType->castAs(); QualType Result = FPT->getReturnType(); if (Result->isUndeducedType()) { Result = SubstAutoType(Result, Context.DependentTy); MethodType = Context.getFunctionType(Result, FPT->getParamTypes(), FPT->getExtProtoInfo()); } } // C++11 [expr.prim.lambda]p5: // The closure type for a lambda-expression has a public inline function // call operator (13.5.4) whose parameters and return type are described by // the lambda-expression's parameter-declaration-clause and // trailing-return-type respectively. DeclarationName MethodName = Context.DeclarationNames.getCXXOperatorName(OO_Call); DeclarationNameLoc MethodNameLoc; MethodNameLoc.CXXOperatorName.BeginOpNameLoc = IntroducerRange.getBegin().getRawEncoding(); MethodNameLoc.CXXOperatorName.EndOpNameLoc = IntroducerRange.getEnd().getRawEncoding(); CXXMethodDecl *Method = CXXMethodDecl::Create( Context, Class, EndLoc, DeclarationNameInfo(MethodName, IntroducerRange.getBegin(), MethodNameLoc), MethodType, MethodTypeInfo, SC_None, /*isInline=*/true, ConstexprKind, EndLoc, TrailingRequiresClause); Method->setAccess(AS_public); if (!TemplateParams) Class->addDecl(Method); // Temporarily set the lexical declaration context to the current // context, so that the Scope stack matches the lexical nesting. Method->setLexicalDeclContext(CurContext); // Create a function template if we have a template parameter list FunctionTemplateDecl *const TemplateMethod = TemplateParams ? FunctionTemplateDecl::Create(Context, Class, Method->getLocation(), MethodName, TemplateParams, Method) : nullptr; if (TemplateMethod) { TemplateMethod->setAccess(AS_public); Method->setDescribedFunctionTemplate(TemplateMethod); Class->addDecl(TemplateMethod); TemplateMethod->setLexicalDeclContext(CurContext); } // Add parameters. if (!Params.empty()) { Method->setParams(Params); CheckParmsForFunctionDef(Params, /*CheckParameterNames=*/false); for (auto P : Method->parameters()) P->setOwningFunction(Method); } return Method; } void Sema::handleLambdaNumbering( CXXRecordDecl *Class, CXXMethodDecl *Method, Optional> Mangling) { if (Mangling) { unsigned ManglingNumber; bool HasKnownInternalLinkage; Decl *ManglingContextDecl; std::tie(ManglingNumber, HasKnownInternalLinkage, ManglingContextDecl) = Mangling.getValue(); Class->setLambdaMangling(ManglingNumber, ManglingContextDecl, HasKnownInternalLinkage); return; } auto getMangleNumberingContext = [this](CXXRecordDecl *Class, Decl *ManglingContextDecl) -> MangleNumberingContext * { // Get mangle numbering context if there's any extra decl context. if (ManglingContextDecl) return &Context.getManglingNumberContext( ASTContext::NeedExtraManglingDecl, ManglingContextDecl); // Otherwise, from that lambda's decl context. auto DC = Class->getDeclContext(); while (auto *CD = dyn_cast(DC)) DC = CD->getParent(); return &Context.getManglingNumberContext(DC); }; MangleNumberingContext *MCtx; Decl *ManglingContextDecl; std::tie(MCtx, ManglingContextDecl) = getCurrentMangleNumberContext(Class->getDeclContext()); bool HasKnownInternalLinkage = false; if (!MCtx && getLangOpts().CUDA) { // Force lambda numbering in CUDA/HIP as we need to name lambdas following // ODR. Both device- and host-compilation need to have a consistent naming // on kernel functions. As lambdas are potential part of these `__global__` // function names, they needs numbering following ODR. MCtx = getMangleNumberingContext(Class, ManglingContextDecl); assert(MCtx && "Retrieving mangle numbering context failed!"); HasKnownInternalLinkage = true; } if (MCtx) { unsigned ManglingNumber = MCtx->getManglingNumber(Method); Class->setLambdaMangling(ManglingNumber, ManglingContextDecl, HasKnownInternalLinkage); } } void Sema::buildLambdaScope(LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable) { LSI->CallOperator = CallOperator; CXXRecordDecl *LambdaClass = CallOperator->getParent(); LSI->Lambda = LambdaClass; if (CaptureDefault == LCD_ByCopy) LSI->ImpCaptureStyle = LambdaScopeInfo::ImpCap_LambdaByval; else if (CaptureDefault == LCD_ByRef) LSI->ImpCaptureStyle = LambdaScopeInfo::ImpCap_LambdaByref; LSI->CaptureDefaultLoc = CaptureDefaultLoc; LSI->IntroducerRange = IntroducerRange; LSI->ExplicitParams = ExplicitParams; LSI->Mutable = Mutable; if (ExplicitResultType) { LSI->ReturnType = CallOperator->getReturnType(); if (!LSI->ReturnType->isDependentType() && !LSI->ReturnType->isVoidType()) { if (RequireCompleteType(CallOperator->getBeginLoc(), LSI->ReturnType, diag::err_lambda_incomplete_result)) { // Do nothing. } } } else { LSI->HasImplicitReturnType = true; } } void Sema::finishLambdaExplicitCaptures(LambdaScopeInfo *LSI) { LSI->finishedExplicitCaptures(); } void Sema::ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef TParams, SourceLocation RAngleLoc) { LambdaScopeInfo *LSI = getCurLambda(); assert(LSI && "Expected a lambda scope"); assert(LSI->NumExplicitTemplateParams == 0 && "Already acted on explicit template parameters"); assert(LSI->TemplateParams.empty() && "Explicit template parameters should come " "before invented (auto) ones"); assert(!TParams.empty() && "No template parameters to act on"); LSI->TemplateParams.append(TParams.begin(), TParams.end()); LSI->NumExplicitTemplateParams = TParams.size(); LSI->ExplicitTemplateParamsRange = {LAngleLoc, RAngleLoc}; } void Sema::addLambdaParameters( ArrayRef Captures, CXXMethodDecl *CallOperator, Scope *CurScope) { // Introduce our parameters into the function scope for (unsigned p = 0, NumParams = CallOperator->getNumParams(); p < NumParams; ++p) { ParmVarDecl *Param = CallOperator->getParamDecl(p); // If this has an identifier, add it to the scope stack. if (CurScope && Param->getIdentifier()) { bool Error = false; // Resolution of CWG 2211 in C++17 renders shadowing ill-formed, but we // retroactively apply it. for (const auto &Capture : Captures) { if (Capture.Id == Param->getIdentifier()) { Error = true; Diag(Param->getLocation(), diag::err_parameter_shadow_capture); Diag(Capture.Loc, diag::note_var_explicitly_captured_here) << Capture.Id << true; } } if (!Error) CheckShadow(CurScope, Param); PushOnScopeChains(Param, CurScope); } } } /// If this expression is an enumerator-like expression of some type /// T, return the type T; otherwise, return null. /// /// Pointer comparisons on the result here should always work because /// it's derived from either the parent of an EnumConstantDecl /// (i.e. the definition) or the declaration returned by /// EnumType::getDecl() (i.e. the definition). static EnumDecl *findEnumForBlockReturn(Expr *E) { // An expression is an enumerator-like expression of type T if, // ignoring parens and parens-like expressions: E = E->IgnoreParens(); // - it is an enumerator whose enum type is T or if (DeclRefExpr *DRE = dyn_cast(E)) { if (EnumConstantDecl *D = dyn_cast(DRE->getDecl())) { return cast(D->getDeclContext()); } return nullptr; } // - it is a comma expression whose RHS is an enumerator-like // expression of type T or if (BinaryOperator *BO = dyn_cast(E)) { if (BO->getOpcode() == BO_Comma) return findEnumForBlockReturn(BO->getRHS()); return nullptr; } // - it is a statement-expression whose value expression is an // enumerator-like expression of type T or if (StmtExpr *SE = dyn_cast(E)) { if (Expr *last = dyn_cast_or_null(SE->getSubStmt()->body_back())) return findEnumForBlockReturn(last); return nullptr; } // - it is a ternary conditional operator (not the GNU ?: // extension) whose second and third operands are // enumerator-like expressions of type T or if (ConditionalOperator *CO = dyn_cast(E)) { if (EnumDecl *ED = findEnumForBlockReturn(CO->getTrueExpr())) if (ED == findEnumForBlockReturn(CO->getFalseExpr())) return ED; return nullptr; } // (implicitly:) // - it is an implicit integral conversion applied to an // enumerator-like expression of type T or if (ImplicitCastExpr *ICE = dyn_cast(E)) { // We can sometimes see integral conversions in valid // enumerator-like expressions. if (ICE->getCastKind() == CK_IntegralCast) return findEnumForBlockReturn(ICE->getSubExpr()); // Otherwise, just rely on the type. } // - it is an expression of that formal enum type. if (const EnumType *ET = E->getType()->getAs()) { return ET->getDecl(); } // Otherwise, nope. return nullptr; } /// Attempt to find a type T for which the returned expression of the /// given statement is an enumerator-like expression of that type. static EnumDecl *findEnumForBlockReturn(ReturnStmt *ret) { if (Expr *retValue = ret->getRetValue()) return findEnumForBlockReturn(retValue); return nullptr; } /// Attempt to find a common type T for which all of the returned /// expressions in a block are enumerator-like expressions of that /// type. static EnumDecl *findCommonEnumForBlockReturns(ArrayRef returns) { ArrayRef::iterator i = returns.begin(), e = returns.end(); // Try to find one for the first return. EnumDecl *ED = findEnumForBlockReturn(*i); if (!ED) return nullptr; // Check that the rest of the returns have the same enum. for (++i; i != e; ++i) { if (findEnumForBlockReturn(*i) != ED) return nullptr; } // Never infer an anonymous enum type. if (!ED->hasNameForLinkage()) return nullptr; return ED; } /// Adjust the given return statements so that they formally return /// the given type. It should require, at most, an IntegralCast. static void adjustBlockReturnsToEnum(Sema &S, ArrayRef returns, QualType returnType) { for (ArrayRef::iterator i = returns.begin(), e = returns.end(); i != e; ++i) { ReturnStmt *ret = *i; Expr *retValue = ret->getRetValue(); if (S.Context.hasSameType(retValue->getType(), returnType)) continue; // Right now we only support integral fixup casts. assert(returnType->isIntegralOrUnscopedEnumerationType()); assert(retValue->getType()->isIntegralOrUnscopedEnumerationType()); ExprWithCleanups *cleanups = dyn_cast(retValue); Expr *E = (cleanups ? cleanups->getSubExpr() : retValue); E = ImplicitCastExpr::Create(S.Context, returnType, CK_IntegralCast, E, /*base path*/ nullptr, VK_RValue); if (cleanups) { cleanups->setSubExpr(E); } else { ret->setRetValue(E); } } } void Sema::deduceClosureReturnType(CapturingScopeInfo &CSI) { assert(CSI.HasImplicitReturnType); // If it was ever a placeholder, it had to been deduced to DependentTy. assert(CSI.ReturnType.isNull() || !CSI.ReturnType->isUndeducedType()); assert((!isa(CSI) || !getLangOpts().CPlusPlus14) && "lambda expressions use auto deduction in C++14 onwards"); // C++ core issue 975: // If a lambda-expression does not include a trailing-return-type, // it is as if the trailing-return-type denotes the following type: // - if there are no return statements in the compound-statement, // or all return statements return either an expression of type // void or no expression or braced-init-list, the type void; // - otherwise, if all return statements return an expression // and the types of the returned expressions after // lvalue-to-rvalue conversion (4.1 [conv.lval]), // array-to-pointer conversion (4.2 [conv.array]), and // function-to-pointer conversion (4.3 [conv.func]) are the // same, that common type; // - otherwise, the program is ill-formed. // // C++ core issue 1048 additionally removes top-level cv-qualifiers // from the types of returned expressions to match the C++14 auto // deduction rules. // // In addition, in blocks in non-C++ modes, if all of the return // statements are enumerator-like expressions of some type T, where // T has a name for linkage, then we infer the return type of the // block to be that type. // First case: no return statements, implicit void return type. ASTContext &Ctx = getASTContext(); if (CSI.Returns.empty()) { // It's possible there were simply no /valid/ return statements. // In this case, the first one we found may have at least given us a type. if (CSI.ReturnType.isNull()) CSI.ReturnType = Ctx.VoidTy; return; } // Second case: at least one return statement has dependent type. // Delay type checking until instantiation. assert(!CSI.ReturnType.isNull() && "We should have a tentative return type."); if (CSI.ReturnType->isDependentType()) return; // Try to apply the enum-fuzz rule. if (!getLangOpts().CPlusPlus) { assert(isa(CSI)); const EnumDecl *ED = findCommonEnumForBlockReturns(CSI.Returns); if (ED) { CSI.ReturnType = Context.getTypeDeclType(ED); adjustBlockReturnsToEnum(*this, CSI.Returns, CSI.ReturnType); return; } } // Third case: only one return statement. Don't bother doing extra work! if (CSI.Returns.size() == 1) return; // General case: many return statements. // Check that they all have compatible return types. // We require the return types to strictly match here. // Note that we've already done the required promotions as part of // processing the return statement. for (const ReturnStmt *RS : CSI.Returns) { const Expr *RetE = RS->getRetValue(); QualType ReturnType = (RetE ? RetE->getType() : Context.VoidTy).getUnqualifiedType(); if (Context.getCanonicalFunctionResultType(ReturnType) == Context.getCanonicalFunctionResultType(CSI.ReturnType)) { // Use the return type with the strictest possible nullability annotation. auto RetTyNullability = ReturnType->getNullability(Ctx); auto BlockNullability = CSI.ReturnType->getNullability(Ctx); if (BlockNullability && (!RetTyNullability || hasWeakerNullability(*RetTyNullability, *BlockNullability))) CSI.ReturnType = ReturnType; continue; } // FIXME: This is a poor diagnostic for ReturnStmts without expressions. // TODO: It's possible that the *first* return is the divergent one. Diag(RS->getBeginLoc(), diag::err_typecheck_missing_return_type_incompatible) << ReturnType << CSI.ReturnType << isa(CSI); // Continue iterating so that we keep emitting diagnostics. } } QualType Sema::buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional NumExpansions, IdentifierInfo *Id, bool IsDirectInit, Expr *&Init) { // Create an 'auto' or 'auto&' TypeSourceInfo that we can use to // deduce against. QualType DeductType = Context.getAutoDeductType(); TypeLocBuilder TLB; AutoTypeLoc TL = TLB.push(DeductType); TL.setNameLoc(Loc); if (ByRef) { DeductType = BuildReferenceType(DeductType, true, Loc, Id); assert(!DeductType.isNull() && "can't build reference to auto"); TLB.push(DeductType).setSigilLoc(Loc); } if (EllipsisLoc.isValid()) { if (Init->containsUnexpandedParameterPack()) { Diag(EllipsisLoc, getLangOpts().CPlusPlus20 ? diag::warn_cxx17_compat_init_capture_pack : diag::ext_init_capture_pack); - DeductType = Context.getPackExpansionType(DeductType, NumExpansions); + DeductType = Context.getPackExpansionType(DeductType, NumExpansions, + /*ExpectPackInType=*/false); TLB.push(DeductType).setEllipsisLoc(EllipsisLoc); } else { // Just ignore the ellipsis for now and form a non-pack variable. We'll // diagnose this later when we try to capture it. } } TypeSourceInfo *TSI = TLB.getTypeSourceInfo(Context, DeductType); // Deduce the type of the init capture. QualType DeducedType = deduceVarTypeFromInitializer( /*VarDecl*/nullptr, DeclarationName(Id), DeductType, TSI, SourceRange(Loc, Loc), IsDirectInit, Init); if (DeducedType.isNull()) return QualType(); // Are we a non-list direct initialization? ParenListExpr *CXXDirectInit = dyn_cast(Init); // Perform initialization analysis and ensure any implicit conversions // (such as lvalue-to-rvalue) are enforced. InitializedEntity Entity = InitializedEntity::InitializeLambdaCapture(Id, DeducedType, Loc); InitializationKind Kind = IsDirectInit ? (CXXDirectInit ? InitializationKind::CreateDirect( Loc, Init->getBeginLoc(), Init->getEndLoc()) : InitializationKind::CreateDirectList(Loc)) : InitializationKind::CreateCopy(Loc, Init->getBeginLoc()); MultiExprArg Args = Init; if (CXXDirectInit) Args = MultiExprArg(CXXDirectInit->getExprs(), CXXDirectInit->getNumExprs()); QualType DclT; InitializationSequence InitSeq(*this, Entity, Kind, Args); ExprResult Result = InitSeq.Perform(*this, Entity, Kind, Args, &DclT); if (Result.isInvalid()) return QualType(); Init = Result.getAs(); return DeducedType; } VarDecl *Sema::createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init) { // FIXME: Retain the TypeSourceInfo from buildLambdaInitCaptureInitialization // rather than reconstructing it here. TypeSourceInfo *TSI = Context.getTrivialTypeSourceInfo(InitCaptureType, Loc); if (auto PETL = TSI->getTypeLoc().getAs()) PETL.setEllipsisLoc(EllipsisLoc); // Create a dummy variable representing the init-capture. This is not actually // used as a variable, and only exists as a way to name and refer to the // init-capture. // FIXME: Pass in separate source locations for '&' and identifier. VarDecl *NewVD = VarDecl::Create(Context, CurContext, Loc, Loc, Id, InitCaptureType, TSI, SC_Auto); NewVD->setInitCapture(true); NewVD->setReferenced(true); // FIXME: Pass in a VarDecl::InitializationStyle. NewVD->setInitStyle(static_cast(InitStyle)); NewVD->markUsed(Context); NewVD->setInit(Init); if (NewVD->isParameterPack()) getCurLambda()->LocalPacks.push_back(NewVD); return NewVD; } void Sema::addInitCapture(LambdaScopeInfo *LSI, VarDecl *Var) { assert(Var->isInitCapture() && "init capture flag should be set"); LSI->addCapture(Var, /*isBlock*/false, Var->getType()->isReferenceType(), /*isNested*/false, Var->getLocation(), SourceLocation(), Var->getType(), /*Invalid*/false); } void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope) { LambdaScopeInfo *const LSI = getCurLambda(); assert(LSI && "LambdaScopeInfo should be on stack!"); // Determine if we're within a context where we know that the lambda will // be dependent, because there are template parameters in scope. bool KnownDependent; if (LSI->NumExplicitTemplateParams > 0) { auto *TemplateParamScope = CurScope->getTemplateParamParent(); assert(TemplateParamScope && "Lambda with explicit template param list should establish a " "template param scope"); assert(TemplateParamScope->getParent()); KnownDependent = TemplateParamScope->getParent() ->getTemplateParamParent() != nullptr; } else { KnownDependent = CurScope->getTemplateParamParent() != nullptr; } // Determine the signature of the call operator. TypeSourceInfo *MethodTyInfo; bool ExplicitParams = true; bool ExplicitResultType = true; bool ContainsUnexpandedParameterPack = false; SourceLocation EndLoc; SmallVector Params; if (ParamInfo.getNumTypeObjects() == 0) { // C++11 [expr.prim.lambda]p4: // If a lambda-expression does not include a lambda-declarator, it is as // if the lambda-declarator were (). FunctionProtoType::ExtProtoInfo EPI(Context.getDefaultCallingConvention( /*IsVariadic=*/false, /*IsCXXMethod=*/true)); EPI.HasTrailingReturn = true; EPI.TypeQuals.addConst(); LangAS AS = getDefaultCXXMethodAddrSpace(); if (AS != LangAS::Default) EPI.TypeQuals.addAddressSpace(AS); // C++1y [expr.prim.lambda]: // The lambda return type is 'auto', which is replaced by the // trailing-return type if provided and/or deduced from 'return' // statements // We don't do this before C++1y, because we don't support deduced return // types there. QualType DefaultTypeForNoTrailingReturn = getLangOpts().CPlusPlus14 ? Context.getAutoDeductType() : Context.DependentTy; QualType MethodTy = Context.getFunctionType(DefaultTypeForNoTrailingReturn, None, EPI); MethodTyInfo = Context.getTrivialTypeSourceInfo(MethodTy); ExplicitParams = false; ExplicitResultType = false; EndLoc = Intro.Range.getEnd(); } else { assert(ParamInfo.isFunctionDeclarator() && "lambda-declarator is a function"); DeclaratorChunk::FunctionTypeInfo &FTI = ParamInfo.getFunctionTypeInfo(); // C++11 [expr.prim.lambda]p5: // This function call operator is declared const (9.3.1) if and only if // the lambda-expression's parameter-declaration-clause is not followed // by mutable. It is neither virtual nor declared volatile. [...] if (!FTI.hasMutableQualifier()) { FTI.getOrCreateMethodQualifiers().SetTypeQual(DeclSpec::TQ_const, SourceLocation()); } MethodTyInfo = GetTypeForDeclarator(ParamInfo, CurScope); assert(MethodTyInfo && "no type from lambda-declarator"); EndLoc = ParamInfo.getSourceRange().getEnd(); ExplicitResultType = FTI.hasTrailingReturnType(); if (FTIHasNonVoidParameters(FTI)) { Params.reserve(FTI.NumParams); for (unsigned i = 0, e = FTI.NumParams; i != e; ++i) Params.push_back(cast(FTI.Params[i].Param)); } // Check for unexpanded parameter packs in the method type. if (MethodTyInfo->getType()->containsUnexpandedParameterPack()) DiagnoseUnexpandedParameterPack(Intro.Range.getBegin(), MethodTyInfo, UPPC_DeclarationType); } CXXRecordDecl *Class = createLambdaClosureType(Intro.Range, MethodTyInfo, KnownDependent, Intro.Default); CXXMethodDecl *Method = startLambdaDefinition(Class, Intro.Range, MethodTyInfo, EndLoc, Params, ParamInfo.getDeclSpec().getConstexprSpecifier(), ParamInfo.getTrailingRequiresClause()); if (ExplicitParams) CheckCXXDefaultArguments(Method); // This represents the function body for the lambda function, check if we // have to apply optnone due to a pragma. AddRangeBasedOptnone(Method); // code_seg attribute on lambda apply to the method. if (Attr *A = getImplicitCodeSegOrSectionAttrForFunction(Method, /*IsDefinition=*/true)) Method->addAttr(A); // Attributes on the lambda apply to the method. ProcessDeclAttributes(CurScope, Method, ParamInfo); // CUDA lambdas get implicit host and device attributes. if (getLangOpts().CUDA) CUDASetLambdaAttrs(Method); // Number the lambda for linkage purposes if necessary. handleLambdaNumbering(Class, Method); // Introduce the function call operator as the current declaration context. PushDeclContext(CurScope, Method); // Build the lambda scope. buildLambdaScope(LSI, Method, Intro.Range, Intro.Default, Intro.DefaultLoc, ExplicitParams, ExplicitResultType, !Method->isConst()); // C++11 [expr.prim.lambda]p9: // A lambda-expression whose smallest enclosing scope is a block scope is a // local lambda expression; any other lambda expression shall not have a // capture-default or simple-capture in its lambda-introducer. // // For simple-captures, this is covered by the check below that any named // entity is a variable that can be captured. // // For DR1632, we also allow a capture-default in any context where we can // odr-use 'this' (in particular, in a default initializer for a non-static // data member). if (Intro.Default != LCD_None && !Class->getParent()->isFunctionOrMethod() && (getCurrentThisType().isNull() || CheckCXXThisCapture(SourceLocation(), /*Explicit*/true, /*BuildAndDiagnose*/false))) Diag(Intro.DefaultLoc, diag::err_capture_default_non_local); // Distinct capture names, for diagnostics. llvm::SmallSet CaptureNames; // Handle explicit captures. SourceLocation PrevCaptureLoc = Intro.Default == LCD_None? Intro.Range.getBegin() : Intro.DefaultLoc; for (auto C = Intro.Captures.begin(), E = Intro.Captures.end(); C != E; PrevCaptureLoc = C->Loc, ++C) { if (C->Kind == LCK_This || C->Kind == LCK_StarThis) { if (C->Kind == LCK_StarThis) Diag(C->Loc, !getLangOpts().CPlusPlus17 ? diag::ext_star_this_lambda_capture_cxx17 : diag::warn_cxx14_compat_star_this_lambda_capture); // C++11 [expr.prim.lambda]p8: // An identifier or this shall not appear more than once in a // lambda-capture. if (LSI->isCXXThisCaptured()) { Diag(C->Loc, diag::err_capture_more_than_once) << "'this'" << SourceRange(LSI->getCXXThisCapture().getLocation()) << FixItHint::CreateRemoval( SourceRange(getLocForEndOfToken(PrevCaptureLoc), C->Loc)); continue; } // C++2a [expr.prim.lambda]p8: // If a lambda-capture includes a capture-default that is =, // each simple-capture of that lambda-capture shall be of the form // "&identifier", "this", or "* this". [ Note: The form [&,this] is // redundant but accepted for compatibility with ISO C++14. --end note ] if (Intro.Default == LCD_ByCopy && C->Kind != LCK_StarThis) Diag(C->Loc, !getLangOpts().CPlusPlus20 ? diag::ext_equals_this_lambda_capture_cxx20 : diag::warn_cxx17_compat_equals_this_lambda_capture); // C++11 [expr.prim.lambda]p12: // If this is captured by a local lambda expression, its nearest // enclosing function shall be a non-static member function. QualType ThisCaptureType = getCurrentThisType(); if (ThisCaptureType.isNull()) { Diag(C->Loc, diag::err_this_capture) << true; continue; } CheckCXXThisCapture(C->Loc, /*Explicit=*/true, /*BuildAndDiagnose*/ true, /*FunctionScopeIndexToStopAtPtr*/ nullptr, C->Kind == LCK_StarThis); if (!LSI->Captures.empty()) LSI->ExplicitCaptureRanges[LSI->Captures.size() - 1] = C->ExplicitRange; continue; } assert(C->Id && "missing identifier for capture"); if (C->Init.isInvalid()) continue; VarDecl *Var = nullptr; if (C->Init.isUsable()) { Diag(C->Loc, getLangOpts().CPlusPlus14 ? diag::warn_cxx11_compat_init_capture : diag::ext_init_capture); // If the initializer expression is usable, but the InitCaptureType // is not, then an error has occurred - so ignore the capture for now. // for e.g., [n{0}] { }; <-- if no is included. // FIXME: we should create the init capture variable and mark it invalid // in this case. if (C->InitCaptureType.get().isNull()) continue; if (C->Init.get()->containsUnexpandedParameterPack() && !C->InitCaptureType.get()->getAs()) DiagnoseUnexpandedParameterPack(C->Init.get(), UPPC_Initializer); unsigned InitStyle; switch (C->InitKind) { case LambdaCaptureInitKind::NoInit: llvm_unreachable("not an init-capture?"); case LambdaCaptureInitKind::CopyInit: InitStyle = VarDecl::CInit; break; case LambdaCaptureInitKind::DirectInit: InitStyle = VarDecl::CallInit; break; case LambdaCaptureInitKind::ListInit: InitStyle = VarDecl::ListInit; break; } Var = createLambdaInitCaptureVarDecl(C->Loc, C->InitCaptureType.get(), C->EllipsisLoc, C->Id, InitStyle, C->Init.get()); // C++1y [expr.prim.lambda]p11: // An init-capture behaves as if it declares and explicitly // captures a variable [...] whose declarative region is the // lambda-expression's compound-statement if (Var) PushOnScopeChains(Var, CurScope, false); } else { assert(C->InitKind == LambdaCaptureInitKind::NoInit && "init capture has valid but null init?"); // C++11 [expr.prim.lambda]p8: // If a lambda-capture includes a capture-default that is &, the // identifiers in the lambda-capture shall not be preceded by &. // If a lambda-capture includes a capture-default that is =, [...] // each identifier it contains shall be preceded by &. if (C->Kind == LCK_ByRef && Intro.Default == LCD_ByRef) { Diag(C->Loc, diag::err_reference_capture_with_reference_default) << FixItHint::CreateRemoval( SourceRange(getLocForEndOfToken(PrevCaptureLoc), C->Loc)); continue; } else if (C->Kind == LCK_ByCopy && Intro.Default == LCD_ByCopy) { Diag(C->Loc, diag::err_copy_capture_with_copy_default) << FixItHint::CreateRemoval( SourceRange(getLocForEndOfToken(PrevCaptureLoc), C->Loc)); continue; } // C++11 [expr.prim.lambda]p10: // The identifiers in a capture-list are looked up using the usual // rules for unqualified name lookup (3.4.1) DeclarationNameInfo Name(C->Id, C->Loc); LookupResult R(*this, Name, LookupOrdinaryName); LookupName(R, CurScope); if (R.isAmbiguous()) continue; if (R.empty()) { // FIXME: Disable corrections that would add qualification? CXXScopeSpec ScopeSpec; DeclFilterCCC Validator{}; if (DiagnoseEmptyLookup(CurScope, ScopeSpec, R, Validator)) continue; } Var = R.getAsSingle(); if (Var && DiagnoseUseOfDecl(Var, C->Loc)) continue; } // C++11 [expr.prim.lambda]p8: // An identifier or this shall not appear more than once in a // lambda-capture. if (!CaptureNames.insert(C->Id).second) { if (Var && LSI->isCaptured(Var)) { Diag(C->Loc, diag::err_capture_more_than_once) << C->Id << SourceRange(LSI->getCapture(Var).getLocation()) << FixItHint::CreateRemoval( SourceRange(getLocForEndOfToken(PrevCaptureLoc), C->Loc)); } else // Previous capture captured something different (one or both was // an init-cpature): no fixit. Diag(C->Loc, diag::err_capture_more_than_once) << C->Id; continue; } // C++11 [expr.prim.lambda]p10: // [...] each such lookup shall find a variable with automatic storage // duration declared in the reaching scope of the local lambda expression. // Note that the 'reaching scope' check happens in tryCaptureVariable(). if (!Var) { Diag(C->Loc, diag::err_capture_does_not_name_variable) << C->Id; continue; } // Ignore invalid decls; they'll just confuse the code later. if (Var->isInvalidDecl()) continue; if (!Var->hasLocalStorage()) { Diag(C->Loc, diag::err_capture_non_automatic_variable) << C->Id; Diag(Var->getLocation(), diag::note_previous_decl) << C->Id; continue; } // C++11 [expr.prim.lambda]p23: // A capture followed by an ellipsis is a pack expansion (14.5.3). SourceLocation EllipsisLoc; if (C->EllipsisLoc.isValid()) { if (Var->isParameterPack()) { EllipsisLoc = C->EllipsisLoc; } else { Diag(C->EllipsisLoc, diag::err_pack_expansion_without_parameter_packs) << (C->Init.isUsable() ? C->Init.get()->getSourceRange() : SourceRange(C->Loc)); // Just ignore the ellipsis. } } else if (Var->isParameterPack()) { ContainsUnexpandedParameterPack = true; } if (C->Init.isUsable()) { addInitCapture(LSI, Var); } else { TryCaptureKind Kind = C->Kind == LCK_ByRef ? TryCapture_ExplicitByRef : TryCapture_ExplicitByVal; tryCaptureVariable(Var, C->Loc, Kind, EllipsisLoc); } if (!LSI->Captures.empty()) LSI->ExplicitCaptureRanges[LSI->Captures.size() - 1] = C->ExplicitRange; } finishLambdaExplicitCaptures(LSI); LSI->ContainsUnexpandedParameterPack |= ContainsUnexpandedParameterPack; // Add lambda parameters into scope. addLambdaParameters(Intro.Captures, Method, CurScope); // Enter a new evaluation context to insulate the lambda from any // cleanups from the enclosing full-expression. PushExpressionEvaluationContext( LSI->CallOperator->isConsteval() ? ExpressionEvaluationContext::ConstantEvaluated : ExpressionEvaluationContext::PotentiallyEvaluated); } void Sema::ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation) { LambdaScopeInfo *LSI = cast(FunctionScopes.back()); // Leave the expression-evaluation context. DiscardCleanupsInEvaluationContext(); PopExpressionEvaluationContext(); // Leave the context of the lambda. if (!IsInstantiation) PopDeclContext(); // Finalize the lambda. CXXRecordDecl *Class = LSI->Lambda; Class->setInvalidDecl(); SmallVector Fields(Class->fields()); ActOnFields(nullptr, Class->getLocation(), Class, Fields, SourceLocation(), SourceLocation(), ParsedAttributesView()); CheckCompletedCXXClass(nullptr, Class); PopFunctionScopeInfo(); } QualType Sema::getLambdaConversionFunctionResultType( const FunctionProtoType *CallOpProto) { // The function type inside the pointer type is the same as the call // operator with some tweaks. The calling convention is the default free // function convention, and the type qualifications are lost. const FunctionProtoType::ExtProtoInfo CallOpExtInfo = CallOpProto->getExtProtoInfo(); FunctionProtoType::ExtProtoInfo InvokerExtInfo = CallOpExtInfo; CallingConv CC = Context.getDefaultCallingConvention( CallOpProto->isVariadic(), /*IsCXXMethod=*/false); InvokerExtInfo.ExtInfo = InvokerExtInfo.ExtInfo.withCallingConv(CC); InvokerExtInfo.TypeQuals = Qualifiers(); assert(InvokerExtInfo.RefQualifier == RQ_None && "Lambda's call operator should not have a reference qualifier"); return Context.getFunctionType(CallOpProto->getReturnType(), CallOpProto->getParamTypes(), InvokerExtInfo); } /// Add a lambda's conversion to function pointer, as described in /// C++11 [expr.prim.lambda]p6. static void addFunctionPointerConversion(Sema &S, SourceRange IntroducerRange, CXXRecordDecl *Class, CXXMethodDecl *CallOperator) { // This conversion is explicitly disabled if the lambda's function has // pass_object_size attributes on any of its parameters. auto HasPassObjectSizeAttr = [](const ParmVarDecl *P) { return P->hasAttr(); }; if (llvm::any_of(CallOperator->parameters(), HasPassObjectSizeAttr)) return; // Add the conversion to function pointer. QualType InvokerFunctionTy = S.getLambdaConversionFunctionResultType( CallOperator->getType()->castAs()); QualType PtrToFunctionTy = S.Context.getPointerType(InvokerFunctionTy); // Create the type of the conversion function. FunctionProtoType::ExtProtoInfo ConvExtInfo( S.Context.getDefaultCallingConvention( /*IsVariadic=*/false, /*IsCXXMethod=*/true)); // The conversion function is always const and noexcept. ConvExtInfo.TypeQuals = Qualifiers(); ConvExtInfo.TypeQuals.addConst(); ConvExtInfo.ExceptionSpec.Type = EST_BasicNoexcept; QualType ConvTy = S.Context.getFunctionType(PtrToFunctionTy, None, ConvExtInfo); SourceLocation Loc = IntroducerRange.getBegin(); DeclarationName ConversionName = S.Context.DeclarationNames.getCXXConversionFunctionName( S.Context.getCanonicalType(PtrToFunctionTy)); DeclarationNameLoc ConvNameLoc; // Construct a TypeSourceInfo for the conversion function, and wire // all the parameters appropriately for the FunctionProtoTypeLoc // so that everything works during transformation/instantiation of // generic lambdas. // The main reason for wiring up the parameters of the conversion // function with that of the call operator is so that constructs // like the following work: // auto L = [](auto b) { <-- 1 // return [](auto a) -> decltype(a) { <-- 2 // return a; // }; // }; // int (*fp)(int) = L(5); // Because the trailing return type can contain DeclRefExprs that refer // to the original call operator's variables, we hijack the call // operators ParmVarDecls below. TypeSourceInfo *ConvNamePtrToFunctionTSI = S.Context.getTrivialTypeSourceInfo(PtrToFunctionTy, Loc); ConvNameLoc.NamedType.TInfo = ConvNamePtrToFunctionTSI; // The conversion function is a conversion to a pointer-to-function. TypeSourceInfo *ConvTSI = S.Context.getTrivialTypeSourceInfo(ConvTy, Loc); FunctionProtoTypeLoc ConvTL = ConvTSI->getTypeLoc().getAs(); // Get the result of the conversion function which is a pointer-to-function. PointerTypeLoc PtrToFunctionTL = ConvTL.getReturnLoc().getAs(); // Do the same for the TypeSourceInfo that is used to name the conversion // operator. PointerTypeLoc ConvNamePtrToFunctionTL = ConvNamePtrToFunctionTSI->getTypeLoc().getAs(); // Get the underlying function types that the conversion function will // be converting to (should match the type of the call operator). FunctionProtoTypeLoc CallOpConvTL = PtrToFunctionTL.getPointeeLoc().getAs(); FunctionProtoTypeLoc CallOpConvNameTL = ConvNamePtrToFunctionTL.getPointeeLoc().getAs(); // Wire up the FunctionProtoTypeLocs with the call operator's parameters. // These parameter's are essentially used to transform the name and // the type of the conversion operator. By using the same parameters // as the call operator's we don't have to fix any back references that // the trailing return type of the call operator's uses (such as // decltype(some_type::type{} + decltype(a){}) etc.) // - we can simply use the return type of the call operator, and // everything should work. SmallVector InvokerParams; for (unsigned I = 0, N = CallOperator->getNumParams(); I != N; ++I) { ParmVarDecl *From = CallOperator->getParamDecl(I); InvokerParams.push_back(ParmVarDecl::Create( S.Context, // Temporarily add to the TU. This is set to the invoker below. S.Context.getTranslationUnitDecl(), From->getBeginLoc(), From->getLocation(), From->getIdentifier(), From->getType(), From->getTypeSourceInfo(), From->getStorageClass(), /*DefArg=*/nullptr)); CallOpConvTL.setParam(I, From); CallOpConvNameTL.setParam(I, From); } CXXConversionDecl *Conversion = CXXConversionDecl::Create( S.Context, Class, Loc, DeclarationNameInfo(ConversionName, Loc, ConvNameLoc), ConvTy, ConvTSI, /*isInline=*/true, ExplicitSpecifier(), S.getLangOpts().CPlusPlus17 ? CSK_constexpr : CSK_unspecified, CallOperator->getBody()->getEndLoc()); Conversion->setAccess(AS_public); Conversion->setImplicit(true); if (Class->isGenericLambda()) { // Create a template version of the conversion operator, using the template // parameter list of the function call operator. FunctionTemplateDecl *TemplateCallOperator = CallOperator->getDescribedFunctionTemplate(); FunctionTemplateDecl *ConversionTemplate = FunctionTemplateDecl::Create(S.Context, Class, Loc, ConversionName, TemplateCallOperator->getTemplateParameters(), Conversion); ConversionTemplate->setAccess(AS_public); ConversionTemplate->setImplicit(true); Conversion->setDescribedFunctionTemplate(ConversionTemplate); Class->addDecl(ConversionTemplate); } else Class->addDecl(Conversion); // Add a non-static member function that will be the result of // the conversion with a certain unique ID. DeclarationName InvokerName = &S.Context.Idents.get( getLambdaStaticInvokerName()); // FIXME: Instead of passing in the CallOperator->getTypeSourceInfo() // we should get a prebuilt TrivialTypeSourceInfo from Context // using FunctionTy & Loc and get its TypeLoc as a FunctionProtoTypeLoc // then rewire the parameters accordingly, by hoisting up the InvokeParams // loop below and then use its Params to set Invoke->setParams(...) below. // This would avoid the 'const' qualifier of the calloperator from // contaminating the type of the invoker, which is currently adjusted // in SemaTemplateDeduction.cpp:DeduceTemplateArguments. Fixing the // trailing return type of the invoker would require a visitor to rebuild // the trailing return type and adjusting all back DeclRefExpr's to refer // to the new static invoker parameters - not the call operator's. CXXMethodDecl *Invoke = CXXMethodDecl::Create( S.Context, Class, Loc, DeclarationNameInfo(InvokerName, Loc), InvokerFunctionTy, CallOperator->getTypeSourceInfo(), SC_Static, /*isInline=*/true, CSK_unspecified, CallOperator->getBody()->getEndLoc()); for (unsigned I = 0, N = CallOperator->getNumParams(); I != N; ++I) InvokerParams[I]->setOwningFunction(Invoke); Invoke->setParams(InvokerParams); Invoke->setAccess(AS_private); Invoke->setImplicit(true); if (Class->isGenericLambda()) { FunctionTemplateDecl *TemplateCallOperator = CallOperator->getDescribedFunctionTemplate(); FunctionTemplateDecl *StaticInvokerTemplate = FunctionTemplateDecl::Create( S.Context, Class, Loc, InvokerName, TemplateCallOperator->getTemplateParameters(), Invoke); StaticInvokerTemplate->setAccess(AS_private); StaticInvokerTemplate->setImplicit(true); Invoke->setDescribedFunctionTemplate(StaticInvokerTemplate); Class->addDecl(StaticInvokerTemplate); } else Class->addDecl(Invoke); } /// Add a lambda's conversion to block pointer. static void addBlockPointerConversion(Sema &S, SourceRange IntroducerRange, CXXRecordDecl *Class, CXXMethodDecl *CallOperator) { QualType FunctionTy = S.getLambdaConversionFunctionResultType( CallOperator->getType()->castAs()); QualType BlockPtrTy = S.Context.getBlockPointerType(FunctionTy); FunctionProtoType::ExtProtoInfo ConversionEPI( S.Context.getDefaultCallingConvention( /*IsVariadic=*/false, /*IsCXXMethod=*/true)); ConversionEPI.TypeQuals = Qualifiers(); ConversionEPI.TypeQuals.addConst(); QualType ConvTy = S.Context.getFunctionType(BlockPtrTy, None, ConversionEPI); SourceLocation Loc = IntroducerRange.getBegin(); DeclarationName Name = S.Context.DeclarationNames.getCXXConversionFunctionName( S.Context.getCanonicalType(BlockPtrTy)); DeclarationNameLoc NameLoc; NameLoc.NamedType.TInfo = S.Context.getTrivialTypeSourceInfo(BlockPtrTy, Loc); CXXConversionDecl *Conversion = CXXConversionDecl::Create( S.Context, Class, Loc, DeclarationNameInfo(Name, Loc, NameLoc), ConvTy, S.Context.getTrivialTypeSourceInfo(ConvTy, Loc), /*isInline=*/true, ExplicitSpecifier(), CSK_unspecified, CallOperator->getBody()->getEndLoc()); Conversion->setAccess(AS_public); Conversion->setImplicit(true); Class->addDecl(Conversion); } ExprResult Sema::BuildCaptureInit(const Capture &Cap, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping) { // VLA captures don't have a stored initialization expression. if (Cap.isVLATypeCapture()) return ExprResult(); // An init-capture is initialized directly from its stored initializer. if (Cap.isInitCapture()) return Cap.getVariable()->getInit(); // For anything else, build an initialization expression. For an implicit // capture, the capture notionally happens at the capture-default, so use // that location here. SourceLocation Loc = ImplicitCaptureLoc.isValid() ? ImplicitCaptureLoc : Cap.getLocation(); // C++11 [expr.prim.lambda]p21: // When the lambda-expression is evaluated, the entities that // are captured by copy are used to direct-initialize each // corresponding non-static data member of the resulting closure // object. (For array members, the array elements are // direct-initialized in increasing subscript order.) These // initializations are performed in the (unspecified) order in // which the non-static data members are declared. // C++ [expr.prim.lambda]p12: // An entity captured by a lambda-expression is odr-used (3.2) in // the scope containing the lambda-expression. ExprResult Init; IdentifierInfo *Name = nullptr; if (Cap.isThisCapture()) { QualType ThisTy = getCurrentThisType(); Expr *This = BuildCXXThisExpr(Loc, ThisTy, ImplicitCaptureLoc.isValid()); if (Cap.isCopyCapture()) Init = CreateBuiltinUnaryOp(Loc, UO_Deref, This); else Init = This; } else { assert(Cap.isVariableCapture() && "unknown kind of capture"); VarDecl *Var = Cap.getVariable(); Name = Var->getIdentifier(); Init = BuildDeclarationNameExpr( CXXScopeSpec(), DeclarationNameInfo(Var->getDeclName(), Loc), Var); } // In OpenMP, the capture kind doesn't actually describe how to capture: // variables are "mapped" onto the device in a process that does not formally // make a copy, even for a "copy capture". if (IsOpenMPMapping) return Init; if (Init.isInvalid()) return ExprError(); Expr *InitExpr = Init.get(); InitializedEntity Entity = InitializedEntity::InitializeLambdaCapture( Name, Cap.getCaptureType(), Loc); InitializationKind InitKind = InitializationKind::CreateDirect(Loc, Loc, Loc); InitializationSequence InitSeq(*this, Entity, InitKind, InitExpr); return InitSeq.Perform(*this, Entity, InitKind, InitExpr); } ExprResult Sema::ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope) { LambdaScopeInfo LSI = *cast(FunctionScopes.back()); ActOnFinishFunctionBody(LSI.CallOperator, Body); return BuildLambdaExpr(StartLoc, Body->getEndLoc(), &LSI); } static LambdaCaptureDefault mapImplicitCaptureStyle(CapturingScopeInfo::ImplicitCaptureStyle ICS) { switch (ICS) { case CapturingScopeInfo::ImpCap_None: return LCD_None; case CapturingScopeInfo::ImpCap_LambdaByval: return LCD_ByCopy; case CapturingScopeInfo::ImpCap_CapturedRegion: case CapturingScopeInfo::ImpCap_LambdaByref: return LCD_ByRef; case CapturingScopeInfo::ImpCap_Block: llvm_unreachable("block capture in lambda"); } llvm_unreachable("Unknown implicit capture style"); } bool Sema::CaptureHasSideEffects(const Capture &From) { if (From.isInitCapture()) { Expr *Init = From.getVariable()->getInit(); if (Init && Init->HasSideEffects(Context)) return true; } if (!From.isCopyCapture()) return false; const QualType T = From.isThisCapture() ? getCurrentThisType()->getPointeeType() : From.getCaptureType(); if (T.isVolatileQualified()) return true; const Type *BaseT = T->getBaseElementTypeUnsafe(); if (const CXXRecordDecl *RD = BaseT->getAsCXXRecordDecl()) return !RD->isCompleteDefinition() || !RD->hasTrivialCopyConstructor() || !RD->hasTrivialDestructor(); return false; } bool Sema::DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const Capture &From) { if (CaptureHasSideEffects(From)) return false; if (From.isVLATypeCapture()) return false; auto diag = Diag(From.getLocation(), diag::warn_unused_lambda_capture); if (From.isThisCapture()) diag << "'this'"; else diag << From.getVariable(); diag << From.isNonODRUsed(); diag << FixItHint::CreateRemoval(CaptureRange); return true; } /// Create a field within the lambda class or captured statement record for the /// given capture. FieldDecl *Sema::BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture) { SourceLocation Loc = Capture.getLocation(); QualType FieldType = Capture.getCaptureType(); TypeSourceInfo *TSI = nullptr; if (Capture.isVariableCapture()) { auto *Var = Capture.getVariable(); if (Var->isInitCapture()) TSI = Capture.getVariable()->getTypeSourceInfo(); } // FIXME: Should we really be doing this? A null TypeSourceInfo seems more // appropriate, at least for an implicit capture. if (!TSI) TSI = Context.getTrivialTypeSourceInfo(FieldType, Loc); // Build the non-static data member. FieldDecl *Field = FieldDecl::Create(Context, RD, Loc, Loc, nullptr, FieldType, TSI, nullptr, false, ICIS_NoInit); // If the variable being captured has an invalid type, mark the class as // invalid as well. if (!FieldType->isDependentType()) { if (RequireCompleteSizedType(Loc, FieldType, diag::err_field_incomplete_or_sizeless)) { RD->setInvalidDecl(); Field->setInvalidDecl(); } else { NamedDecl *Def; FieldType->isIncompleteType(&Def); if (Def && Def->isInvalidDecl()) { RD->setInvalidDecl(); Field->setInvalidDecl(); } } } Field->setImplicit(true); Field->setAccess(AS_private); RD->addDecl(Field); if (Capture.isVLATypeCapture()) Field->setCapturedVLAType(Capture.getCapturedVLAType()); return Field; } ExprResult Sema::BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, LambdaScopeInfo *LSI) { // Collect information from the lambda scope. SmallVector Captures; SmallVector CaptureInits; SourceLocation CaptureDefaultLoc = LSI->CaptureDefaultLoc; LambdaCaptureDefault CaptureDefault = mapImplicitCaptureStyle(LSI->ImpCaptureStyle); CXXRecordDecl *Class; CXXMethodDecl *CallOperator; SourceRange IntroducerRange; bool ExplicitParams; bool ExplicitResultType; CleanupInfo LambdaCleanup; bool ContainsUnexpandedParameterPack; bool IsGenericLambda; { CallOperator = LSI->CallOperator; Class = LSI->Lambda; IntroducerRange = LSI->IntroducerRange; ExplicitParams = LSI->ExplicitParams; ExplicitResultType = !LSI->HasImplicitReturnType; LambdaCleanup = LSI->Cleanup; ContainsUnexpandedParameterPack = LSI->ContainsUnexpandedParameterPack; IsGenericLambda = Class->isGenericLambda(); CallOperator->setLexicalDeclContext(Class); Decl *TemplateOrNonTemplateCallOperatorDecl = CallOperator->getDescribedFunctionTemplate() ? CallOperator->getDescribedFunctionTemplate() : cast(CallOperator); // FIXME: Is this really the best choice? Keeping the lexical decl context // set as CurContext seems more faithful to the source. TemplateOrNonTemplateCallOperatorDecl->setLexicalDeclContext(Class); PopExpressionEvaluationContext(); // True if the current capture has a used capture or default before it. bool CurHasPreviousCapture = CaptureDefault != LCD_None; SourceLocation PrevCaptureLoc = CurHasPreviousCapture ? CaptureDefaultLoc : IntroducerRange.getBegin(); for (unsigned I = 0, N = LSI->Captures.size(); I != N; ++I) { const Capture &From = LSI->Captures[I]; if (From.isInvalid()) return ExprError(); assert(!From.isBlockCapture() && "Cannot capture __block variables"); bool IsImplicit = I >= LSI->NumExplicitCaptures; SourceLocation ImplicitCaptureLoc = IsImplicit ? CaptureDefaultLoc : SourceLocation(); // Use source ranges of explicit captures for fixits where available. SourceRange CaptureRange = LSI->ExplicitCaptureRanges[I]; // Warn about unused explicit captures. bool IsCaptureUsed = true; if (!CurContext->isDependentContext() && !IsImplicit && !From.isODRUsed()) { // Initialized captures that are non-ODR used may not be eliminated. // FIXME: Where did the IsGenericLambda here come from? bool NonODRUsedInitCapture = IsGenericLambda && From.isNonODRUsed() && From.isInitCapture(); if (!NonODRUsedInitCapture) { bool IsLast = (I + 1) == LSI->NumExplicitCaptures; SourceRange FixItRange; if (CaptureRange.isValid()) { if (!CurHasPreviousCapture && !IsLast) { // If there are no captures preceding this capture, remove the // following comma. FixItRange = SourceRange(CaptureRange.getBegin(), getLocForEndOfToken(CaptureRange.getEnd())); } else { // Otherwise, remove the comma since the last used capture. FixItRange = SourceRange(getLocForEndOfToken(PrevCaptureLoc), CaptureRange.getEnd()); } } IsCaptureUsed = !DiagnoseUnusedLambdaCapture(FixItRange, From); } } if (CaptureRange.isValid()) { CurHasPreviousCapture |= IsCaptureUsed; PrevCaptureLoc = CaptureRange.getEnd(); } // Map the capture to our AST representation. LambdaCapture Capture = [&] { if (From.isThisCapture()) { // Capturing 'this' implicitly with a default of '[=]' is deprecated, // because it results in a reference capture. Don't warn prior to // C++2a; there's nothing that can be done about it before then. if (getLangOpts().CPlusPlus20 && IsImplicit && CaptureDefault == LCD_ByCopy) { Diag(From.getLocation(), diag::warn_deprecated_this_capture); Diag(CaptureDefaultLoc, diag::note_deprecated_this_capture) << FixItHint::CreateInsertion( getLocForEndOfToken(CaptureDefaultLoc), ", this"); } return LambdaCapture(From.getLocation(), IsImplicit, From.isCopyCapture() ? LCK_StarThis : LCK_This); } else if (From.isVLATypeCapture()) { return LambdaCapture(From.getLocation(), IsImplicit, LCK_VLAType); } else { assert(From.isVariableCapture() && "unknown kind of capture"); VarDecl *Var = From.getVariable(); LambdaCaptureKind Kind = From.isCopyCapture() ? LCK_ByCopy : LCK_ByRef; return LambdaCapture(From.getLocation(), IsImplicit, Kind, Var, From.getEllipsisLoc()); } }(); // Form the initializer for the capture field. ExprResult Init = BuildCaptureInit(From, ImplicitCaptureLoc); // FIXME: Skip this capture if the capture is not used, the initializer // has no side-effects, the type of the capture is trivial, and the // lambda is not externally visible. // Add a FieldDecl for the capture and form its initializer. BuildCaptureField(Class, From); Captures.push_back(Capture); CaptureInits.push_back(Init.get()); if (LangOpts.CUDA) CUDACheckLambdaCapture(CallOperator, From); } Class->setCaptures(Captures); // C++11 [expr.prim.lambda]p6: // The closure type for a lambda-expression with no lambda-capture // has a public non-virtual non-explicit const conversion function // to pointer to function having the same parameter and return // types as the closure type's function call operator. if (Captures.empty() && CaptureDefault == LCD_None) addFunctionPointerConversion(*this, IntroducerRange, Class, CallOperator); // Objective-C++: // The closure type for a lambda-expression has a public non-virtual // non-explicit const conversion function to a block pointer having the // same parameter and return types as the closure type's function call // operator. // FIXME: Fix generic lambda to block conversions. if (getLangOpts().Blocks && getLangOpts().ObjC && !IsGenericLambda) addBlockPointerConversion(*this, IntroducerRange, Class, CallOperator); // Finalize the lambda class. SmallVector Fields(Class->fields()); ActOnFields(nullptr, Class->getLocation(), Class, Fields, SourceLocation(), SourceLocation(), ParsedAttributesView()); CheckCompletedCXXClass(nullptr, Class); } Cleanup.mergeFrom(LambdaCleanup); LambdaExpr *Lambda = LambdaExpr::Create(Context, Class, IntroducerRange, CaptureDefault, CaptureDefaultLoc, ExplicitParams, ExplicitResultType, CaptureInits, EndLoc, ContainsUnexpandedParameterPack); // If the lambda expression's call operator is not explicitly marked constexpr // and we are not in a dependent context, analyze the call operator to infer // its constexpr-ness, suppressing diagnostics while doing so. if (getLangOpts().CPlusPlus17 && !CallOperator->isInvalidDecl() && !CallOperator->isConstexpr() && !isa(CallOperator->getBody()) && !Class->getDeclContext()->isDependentContext()) { CallOperator->setConstexprKind( CheckConstexprFunctionDefinition(CallOperator, CheckConstexprKind::CheckValid) ? CSK_constexpr : CSK_unspecified); } // Emit delayed shadowing warnings now that the full capture list is known. DiagnoseShadowingLambdaDecls(LSI); if (!CurContext->isDependentContext()) { switch (ExprEvalContexts.back().Context) { // C++11 [expr.prim.lambda]p2: // A lambda-expression shall not appear in an unevaluated operand // (Clause 5). case ExpressionEvaluationContext::Unevaluated: case ExpressionEvaluationContext::UnevaluatedList: case ExpressionEvaluationContext::UnevaluatedAbstract: // C++1y [expr.const]p2: // A conditional-expression e is a core constant expression unless the // evaluation of e, following the rules of the abstract machine, would // evaluate [...] a lambda-expression. // // This is technically incorrect, there are some constant evaluated contexts // where this should be allowed. We should probably fix this when DR1607 is // ratified, it lays out the exact set of conditions where we shouldn't // allow a lambda-expression. case ExpressionEvaluationContext::ConstantEvaluated: // We don't actually diagnose this case immediately, because we // could be within a context where we might find out later that // the expression is potentially evaluated (e.g., for typeid). ExprEvalContexts.back().Lambdas.push_back(Lambda); break; case ExpressionEvaluationContext::DiscardedStatement: case ExpressionEvaluationContext::PotentiallyEvaluated: case ExpressionEvaluationContext::PotentiallyEvaluatedIfUsed: break; } } return MaybeBindToTemporary(Lambda); } ExprResult Sema::BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src) { // Make sure that the lambda call operator is marked used. CXXRecordDecl *Lambda = Conv->getParent(); CXXMethodDecl *CallOperator = cast( Lambda->lookup( Context.DeclarationNames.getCXXOperatorName(OO_Call)).front()); CallOperator->setReferenced(); CallOperator->markUsed(Context); ExprResult Init = PerformCopyInitialization( InitializedEntity::InitializeLambdaToBlock(ConvLocation, Src->getType(), /*NRVO=*/false), CurrentLocation, Src); if (!Init.isInvalid()) Init = ActOnFinishFullExpr(Init.get(), /*DiscardedValue*/ false); if (Init.isInvalid()) return ExprError(); // Create the new block to be returned. BlockDecl *Block = BlockDecl::Create(Context, CurContext, ConvLocation); // Set the type information. Block->setSignatureAsWritten(CallOperator->getTypeSourceInfo()); Block->setIsVariadic(CallOperator->isVariadic()); Block->setBlockMissingReturnType(false); // Add parameters. SmallVector BlockParams; for (unsigned I = 0, N = CallOperator->getNumParams(); I != N; ++I) { ParmVarDecl *From = CallOperator->getParamDecl(I); BlockParams.push_back(ParmVarDecl::Create( Context, Block, From->getBeginLoc(), From->getLocation(), From->getIdentifier(), From->getType(), From->getTypeSourceInfo(), From->getStorageClass(), /*DefArg=*/nullptr)); } Block->setParams(BlockParams); Block->setIsConversionFromLambda(true); // Add capture. The capture uses a fake variable, which doesn't correspond // to any actual memory location. However, the initializer copy-initializes // the lambda object. TypeSourceInfo *CapVarTSI = Context.getTrivialTypeSourceInfo(Src->getType()); VarDecl *CapVar = VarDecl::Create(Context, Block, ConvLocation, ConvLocation, nullptr, Src->getType(), CapVarTSI, SC_None); BlockDecl::Capture Capture(/*variable=*/CapVar, /*byRef=*/false, /*nested=*/false, /*copy=*/Init.get()); Block->setCaptures(Context, Capture, /*CapturesCXXThis=*/false); // Add a fake function body to the block. IR generation is responsible // for filling in the actual body, which cannot be expressed as an AST. Block->setBody(new (Context) CompoundStmt(ConvLocation)); // Create the block literal expression. Expr *BuildBlock = new (Context) BlockExpr(Block, Conv->getConversionType()); ExprCleanupObjects.push_back(Block); Cleanup.setExprNeedsCleanups(true); return BuildBlock; } diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp index 5392be57a3aa..eb8677d0e481 100644 --- a/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp +++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp @@ -1,6157 +1,6164 @@ //===- SemaTemplateDeduction.cpp - Template Argument Deduction ------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements C++ template argument deduction. // //===----------------------------------------------------------------------===// #include "clang/Sema/TemplateDeduction.h" #include "TreeTransform.h" #include "TypeLocBuilder.h" #include "clang/AST/ASTContext.h" #include "clang/AST/ASTLambda.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclAccessPair.h" #include "clang/AST/DeclBase.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/RecursiveASTVisitor.h" #include "clang/AST/TemplateBase.h" #include "clang/AST/TemplateName.h" #include "clang/AST/Type.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/UnresolvedSet.h" #include "clang/Basic/AddressSpaces.h" #include "clang/Basic/ExceptionSpecificationType.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/LangOptions.h" #include "clang/Basic/PartialDiagnostic.h" #include "clang/Basic/SourceLocation.h" #include "clang/Basic/Specifiers.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Sema.h" #include "clang/Sema/Template.h" #include "llvm/ADT/APInt.h" #include "llvm/ADT/APSInt.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/FoldingSet.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include #include #include #include namespace clang { /// Various flags that control template argument deduction. /// /// These flags can be bitwise-OR'd together. enum TemplateDeductionFlags { /// No template argument deduction flags, which indicates the /// strictest results for template argument deduction (as used for, e.g., /// matching class template partial specializations). TDF_None = 0, /// Within template argument deduction from a function call, we are /// matching with a parameter type for which the original parameter was /// a reference. TDF_ParamWithReferenceType = 0x1, /// Within template argument deduction from a function call, we /// are matching in a case where we ignore cv-qualifiers. TDF_IgnoreQualifiers = 0x02, /// Within template argument deduction from a function call, /// we are matching in a case where we can perform template argument /// deduction from a template-id of a derived class of the argument type. TDF_DerivedClass = 0x04, /// Allow non-dependent types to differ, e.g., when performing /// template argument deduction from a function call where conversions /// may apply. TDF_SkipNonDependent = 0x08, /// Whether we are performing template argument deduction for /// parameters and arguments in a top-level template argument TDF_TopLevelParameterTypeList = 0x10, /// Within template argument deduction from overload resolution per /// C++ [over.over] allow matching function types that are compatible in /// terms of noreturn and default calling convention adjustments, or /// similarly matching a declared template specialization against a /// possible template, per C++ [temp.deduct.decl]. In either case, permit /// deduction where the parameter is a function type that can be converted /// to the argument type. TDF_AllowCompatibleFunctionType = 0x20, /// Within template argument deduction for a conversion function, we are /// matching with an argument type for which the original argument was /// a reference. TDF_ArgWithReferenceType = 0x40, }; } using namespace clang; using namespace sema; /// Compare two APSInts, extending and switching the sign as /// necessary to compare their values regardless of underlying type. static bool hasSameExtendedValue(llvm::APSInt X, llvm::APSInt Y) { if (Y.getBitWidth() > X.getBitWidth()) X = X.extend(Y.getBitWidth()); else if (Y.getBitWidth() < X.getBitWidth()) Y = Y.extend(X.getBitWidth()); // If there is a signedness mismatch, correct it. if (X.isSigned() != Y.isSigned()) { // If the signed value is negative, then the values cannot be the same. if ((Y.isSigned() && Y.isNegative()) || (X.isSigned() && X.isNegative())) return false; Y.setIsSigned(true); X.setIsSigned(true); } return X == Y; } static Sema::TemplateDeductionResult DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams, const TemplateArgument &Param, TemplateArgument Arg, TemplateDeductionInfo &Info, SmallVectorImpl &Deduced); static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(Sema &S, TemplateParameterList *TemplateParams, QualType Param, QualType Arg, TemplateDeductionInfo &Info, SmallVectorImpl & Deduced, unsigned TDF, bool PartialOrdering = false, bool DeducedFromArrayBound = false); static Sema::TemplateDeductionResult DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams, ArrayRef Params, ArrayRef Args, TemplateDeductionInfo &Info, SmallVectorImpl &Deduced, bool NumberOfArgumentsMustMatch); static void MarkUsedTemplateParameters(ASTContext &Ctx, const TemplateArgument &TemplateArg, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); static void MarkUsedTemplateParameters(ASTContext &Ctx, QualType T, bool OnlyDeduced, unsigned Level, llvm::SmallBitVector &Deduced); /// If the given expression is of a form that permits the deduction /// of a non-type template parameter, return the declaration of that /// non-type template parameter. static NonTypeTemplateParmDecl * getDeducedParameterFromExpr(TemplateDeductionInfo &Info, Expr *E) { // If we are within an alias template, the expression may have undergone // any number of parameter substitutions already. while (true) { if (ImplicitCastExpr *IC = dyn_cast(E)) E = IC->getSubExpr(); else if (ConstantExpr *CE = dyn_cast(E)) E = CE->getSubExpr(); else if (SubstNonTypeTemplateParmExpr *Subst = dyn_cast(E)) E = Subst->getReplacement(); else break; } if (DeclRefExpr *DRE = dyn_cast(E)) if (auto *NTTP = dyn_cast(DRE->getDecl())) if (NTTP->getDepth() == Info.getDeducedDepth()) return NTTP; return nullptr; } /// Determine whether two declaration pointers refer to the same /// declaration. static bool isSameDeclaration(Decl *X, Decl *Y) { if (NamedDecl *NX = dyn_cast(X)) X = NX->getUnderlyingDecl(); if (NamedDecl *NY = dyn_cast(Y)) Y = NY->getUnderlyingDecl(); return X->getCanonicalDecl() == Y->getCanonicalDecl(); } /// Verify that the given, deduced template arguments are compatible. /// /// \returns The deduced template argument, or a NULL template argument if /// the deduced template arguments were incompatible. static DeducedTemplateArgument checkDeducedTemplateArguments(ASTContext &Context, const DeducedTemplateArgument &X, const DeducedTemplateArgument &Y) { // We have no deduction for one or both of the arguments; they're compatible. if (X.isNull()) return Y; if (Y.isNull()) return X; // If we have two non-type template argument values deduced for the same // parameter, they must both match the type of the parameter, and thus must // match each other's type. As we're only keeping one of them, we must check // for that now. The exception is that if either was deduced from an array // bound, the type is permitted to differ. if (!X.wasDeducedFromArrayBound() && !Y.wasDeducedFromArrayBound()) { QualType XType = X.getNonTypeTemplateArgumentType(); if (!XType.isNull()) { QualType YType = Y.getNonTypeTemplateArgumentType(); if (YType.isNull() || !Context.hasSameType(XType, YType)) return DeducedTemplateArgument(); } } switch (X.getKind()) { case TemplateArgument::Null: llvm_unreachable("Non-deduced template arguments handled above"); case TemplateArgument::Type: // If two template type arguments have the same type, they're compatible. if (Y.getKind() == TemplateArgument::Type && Context.hasSameType(X.getAsType(), Y.getAsType())) return X; // If one of the two arguments was deduced from an array bound, the other // supersedes it. if (X.wasDeducedFromArrayBound() != Y.wasDeducedFromArrayBound()) return X.wasDeducedFromArrayBound() ? Y : X; // The arguments are not compatible. return DeducedTemplateArgument(); case TemplateArgument::Integral: // If we deduced a constant in one case and either a dependent expression or // declaration in another case, keep the integral constant. // If both are integral constants with the same value, keep that value. if (Y.getKind() == TemplateArgument::Expression || Y.getKind() == TemplateArgument::Declaration || (Y.getKind() == TemplateArgument::Integral && hasSameExtendedValue(X.getAsIntegral(), Y.getAsIntegral()))) return X.wasDeducedFromArrayBound() ? Y : X; // All other combinations are incompatible. return DeducedTemplateArgument(); case TemplateArgument::Template: if (Y.getKind() == TemplateArgument::Template && Context.hasSameTemplateName(X.getAsTemplate(), Y.getAsTemplate())) return X; // All other combinations are incompatible. return DeducedTemplateArgument(); case TemplateArgument::TemplateExpansion: if (Y.getKind() == TemplateArgument::TemplateExpansion && Context.hasSameTemplateName(X.getAsTemplateOrTemplatePattern(), Y.getAsTemplateOrTemplatePattern())) return X; // All other combinations are incompatible. return DeducedTemplateArgument(); case TemplateArgument::Expression: { if (Y.getKind() != TemplateArgument::Expression) return checkDeducedTemplateArguments(Context, Y, X); // Compare the expressions for equality llvm::FoldingSetNodeID ID1, ID2; X.getAsExpr()->Profile(ID1, Context, true); Y.getAsExpr()->Profile(ID2, Context, true); if (ID1 == ID2) return X.wasDeducedFromArrayBound() ? Y : X; // Differing dependent expressions are incompatible. return DeducedTemplateArgument(); } case TemplateArgument::Declaration: assert(!X.wasDeducedFromArrayBound()); // If we deduced a declaration and a dependent expression, keep the // declaration. if (Y.getKind() == TemplateArgument::Expression) return X; // If we deduced a declaration and an integral constant, keep the // integral constant and whichever type did not come from an array // bound. if (Y.getKind() == TemplateArgument::Integral) { if (Y.wasDeducedFromArrayBound()) return TemplateArgument(Context, Y.getAsIntegral(), X.getParamTypeForDecl()); return Y; } // If we deduced two declarations, make sure that they refer to the // same declaration. if (Y.getKind() == TemplateArgument::Declaration && isSameDeclaration(X.getAsDecl(), Y.getAsDecl())) return X; // All other combinations are incompatible. return DeducedTemplateArgument(); case TemplateArgument::NullPtr: // If we deduced a null pointer and a dependent expression, keep the // null pointer. if (Y.getKind() == TemplateArgument::Expression) return X; // If we deduced a null pointer and an integral constant, keep the // integral constant. if (Y.getKind() == TemplateArgument::Integral) return Y; // If we deduced two null pointers, they are the same. if (Y.getKind() == TemplateArgument::NullPtr) return X; // All other combinations are incompatible. return DeducedTemplateArgument(); case TemplateArgument::Pack: { if (Y.getKind() != TemplateArgument::Pack || X.pack_size() != Y.pack_size()) return DeducedTemplateArgument(); llvm::SmallVector NewPack; for (TemplateArgument::pack_iterator XA = X.pack_begin(), XAEnd = X.pack_end(), YA = Y.pack_begin(); XA != XAEnd; ++XA, ++YA) { TemplateArgument Merged = checkDeducedTemplateArguments( Context, DeducedTemplateArgument(*XA, X.wasDeducedFromArrayBound()), DeducedTemplateArgument(*YA, Y.wasDeducedFromArrayBound())); if (Merged.isNull() && !(XA->isNull() && YA->isNull())) return DeducedTemplateArgument(); NewPack.push_back(Merged); } return DeducedTemplateArgument( TemplateArgument::CreatePackCopy(Context, NewPack), X.wasDeducedFromArrayBound() && Y.wasDeducedFromArrayBound()); } } llvm_unreachable("Invalid TemplateArgument Kind!"); } /// Deduce the value of the given non-type template parameter /// as the given deduced template argument. All non-type template parameter /// deduction is funneled through here. static Sema::TemplateDeductionResult DeduceNonTypeTemplateArgument( Sema &S, TemplateParameterList *TemplateParams, NonTypeTemplateParmDecl *NTTP, const DeducedTemplateArgument &NewDeduced, QualType ValueType, TemplateDeductionInfo &Info, SmallVectorImpl &Deduced) { assert(NTTP->getDepth() == Info.getDeducedDepth() && "deducing non-type template argument with wrong depth"); DeducedTemplateArgument Result = checkDeducedTemplateArguments( S.Context, Deduced[NTTP->getIndex()], NewDeduced); if (Result.isNull()) { Info.Param = NTTP; Info.FirstArg = Deduced[NTTP->getIndex()]; Info.SecondArg = NewDeduced; return Sema::TDK_Inconsistent; } Deduced[NTTP->getIndex()] = Result; if (!S.getLangOpts().CPlusPlus17) return Sema::TDK_Success; if (NTTP->isExpandedParameterPack()) // FIXME: We may still need to deduce parts of the type here! But we // don't have any way to find which slice of the type to use, and the // type stored on the NTTP itself is nonsense. Perhaps the type of an // expanded NTTP should be a pack expansion type? return Sema::TDK_Success; // Get the type of the parameter for deduction. If it's a (dependent) array // or function type, we will not have decayed it yet, so do that now. QualType ParamType = S.Context.getAdjustedParameterType(NTTP->getType()); if (auto *Expansion = dyn_cast(ParamType)) ParamType = Expansion->getPattern(); // FIXME: It's not clear how deduction of a parameter of reference // type from an argument (of non-reference type) should be performed. // For now, we just remove reference types from both sides and let // the final check for matching types sort out the mess. return DeduceTemplateArgumentsByTypeMatch( S, TemplateParams, ParamType.getNonReferenceType(), ValueType.getNonReferenceType(), Info, Deduced, TDF_SkipNonDependent, /*PartialOrdering=*/false, /*ArrayBound=*/NewDeduced.wasDeducedFromArrayBound()); } /// Deduce the value of the given non-type template parameter /// from the given integral constant. static Sema::TemplateDeductionResult DeduceNonTypeTemplateArgument( Sema &S, TemplateParameterList *TemplateParams, NonTypeTemplateParmDecl *NTTP, const llvm::APSInt &Value, QualType ValueType, bool DeducedFromArrayBound, TemplateDeductionInfo &Info, SmallVectorImpl &Deduced) { return DeduceNonTypeTemplateArgument( S, TemplateParams, NTTP, DeducedTemplateArgument(S.Context, Value, ValueType, DeducedFromArrayBound), ValueType, Info, Deduced); } /// Deduce the value of the given non-type template parameter /// from the given null pointer template argument type. static Sema::TemplateDeductionResult DeduceNullPtrTemplateArgument( Sema &S, TemplateParameterList *TemplateParams, NonTypeTemplateParmDecl *NTTP, QualType NullPtrType, TemplateDeductionInfo &Info, SmallVectorImpl &Deduced) { Expr *Value = S.ImpCastExprToType(new (S.Context) CXXNullPtrLiteralExpr( S.Context.NullPtrTy, NTTP->getLocation()), NullPtrType, CK_NullToPointer) .get(); return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP, DeducedTemplateArgument(Value), Value->getType(), Info, Deduced); } /// Deduce the value of the given non-type template parameter /// from the given type- or value-dependent expression. /// /// \returns true if deduction succeeded, false otherwise. static Sema::TemplateDeductionResult DeduceNonTypeTemplateArgument( Sema &S, TemplateParameterList *TemplateParams, NonTypeTemplateParmDecl *NTTP, Expr *Value, TemplateDeductionInfo &Info, SmallVectorImpl &Deduced) { return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP, DeducedTemplateArgument(Value), Value->getType(), Info, Deduced); } /// Deduce the value of the given non-type template parameter /// from the given declaration. /// /// \returns true if deduction succeeded, false otherwise. static Sema::TemplateDeductionResult DeduceNonTypeTemplateArgument( Sema &S, TemplateParameterList *TemplateParams, NonTypeTemplateParmDecl *NTTP, ValueDecl *D, QualType T, TemplateDeductionInfo &Info, SmallVectorImpl &Deduced) { D = D ? cast(D->getCanonicalDecl()) : nullptr; TemplateArgument New(D, T); return DeduceNonTypeTemplateArgument( S, TemplateParams, NTTP, DeducedTemplateArgument(New), T, Info, Deduced); } static Sema::TemplateDeductionResult DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams, TemplateName Param, TemplateName Arg, TemplateDeductionInfo &Info, SmallVectorImpl &Deduced) { TemplateDecl *ParamDecl = Param.getAsTemplateDecl(); if (!ParamDecl) { // The parameter type is dependent and is not a template template parameter, // so there is nothing that we can deduce. return Sema::TDK_Success; } if (TemplateTemplateParmDecl *TempParam = dyn_cast(ParamDecl)) { // If we're not deducing at this depth, there's nothing to deduce. if (TempParam->getDepth() != Info.getDeducedDepth()) return Sema::TDK_Success; DeducedTemplateArgument NewDeduced(S.Context.getCanonicalTemplateName(Arg)); DeducedTemplateArgument Result = checkDeducedTemplateArguments(S.Context, Deduced[TempParam->getIndex()], NewDeduced); if (Result.isNull()) { Info.Param = TempParam; Info.FirstArg = Deduced[TempParam->getIndex()]; Info.SecondArg = NewDeduced; return Sema::TDK_Inconsistent; } Deduced[TempParam->getIndex()] = Result; return Sema::TDK_Success; } // Verify that the two template names are equivalent. if (S.Context.hasSameTemplateName(Param, Arg)) return Sema::TDK_Success; // Mismatch of non-dependent template parameter to argument. Info.FirstArg = TemplateArgument(Param); Info.SecondArg = TemplateArgument(Arg); return Sema::TDK_NonDeducedMismatch; } /// Deduce the template arguments by comparing the template parameter /// type (which is a template-id) with the template argument type. /// /// \param S the Sema /// /// \param TemplateParams the template parameters that we are deducing /// /// \param Param the parameter type /// /// \param Arg the argument type /// /// \param Info information about the template argument deduction itself /// /// \param Deduced the deduced template arguments /// /// \returns the result of template argument deduction so far. Note that a /// "success" result means that template argument deduction has not yet failed, /// but it may still fail, later, for other reasons. static Sema::TemplateDeductionResult DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams, const TemplateSpecializationType *Param, QualType Arg, TemplateDeductionInfo &Info, SmallVectorImpl &Deduced) { assert(Arg.isCanonical() && "Argument type must be canonical"); // Treat an injected-class-name as its underlying template-id. if (auto *Injected = dyn_cast(Arg)) Arg = Injected->getInjectedSpecializationType(); // Check whether the template argument is a dependent template-id. if (const TemplateSpecializationType *SpecArg = dyn_cast(Arg)) { // Perform template argument deduction for the template name. if (Sema::TemplateDeductionResult Result = DeduceTemplateArguments(S, TemplateParams, Param->getTemplateName(), SpecArg->getTemplateName(), Info, Deduced)) return Result; // Perform template argument deduction on each template // argument. Ignore any missing/extra arguments, since they could be // filled in by default arguments. return DeduceTemplateArguments(S, TemplateParams, Param->template_arguments(), SpecArg->template_arguments(), Info, Deduced, /*NumberOfArgumentsMustMatch=*/false); } // If the argument type is a class template specialization, we // perform template argument deduction using its template // arguments. const RecordType *RecordArg = dyn_cast(Arg); if (!RecordArg) { Info.FirstArg = TemplateArgument(QualType(Param, 0)); Info.SecondArg = TemplateArgument(Arg); return Sema::TDK_NonDeducedMismatch; } ClassTemplateSpecializationDecl *SpecArg = dyn_cast(RecordArg->getDecl()); if (!SpecArg) { Info.FirstArg = TemplateArgument(QualType(Param, 0)); Info.SecondArg = TemplateArgument(Arg); return Sema::TDK_NonDeducedMismatch; } // Perform template argument deduction for the template name. if (Sema::TemplateDeductionResult Result = DeduceTemplateArguments(S, TemplateParams, Param->getTemplateName(), TemplateName(SpecArg->getSpecializedTemplate()), Info, Deduced)) return Result; // Perform template argument deduction for the template arguments. return DeduceTemplateArguments(S, TemplateParams, Param->template_arguments(), SpecArg->getTemplateArgs().asArray(), Info, Deduced, /*NumberOfArgumentsMustMatch=*/true); } /// Determines whether the given type is an opaque type that /// might be more qualified when instantiated. static bool IsPossiblyOpaquelyQualifiedType(QualType T) { switch (T->getTypeClass()) { case Type::TypeOfExpr: case Type::TypeOf: case Type::DependentName: case Type::Decltype: case Type::UnresolvedUsing: case Type::TemplateTypeParm: return true; case Type::ConstantArray: case Type::IncompleteArray: case Type::VariableArray: case Type::DependentSizedArray: return IsPossiblyOpaquelyQualifiedType( cast(T)->getElementType()); default: return false; } } /// Helper function to build a TemplateParameter when we don't /// know its type statically. static TemplateParameter makeTemplateParameter(Decl *D) { if (TemplateTypeParmDecl *TTP = dyn_cast(D)) return TemplateParameter(TTP); if (NonTypeTemplateParmDecl *NTTP = dyn_cast(D)) return TemplateParameter(NTTP); return TemplateParameter(cast(D)); } /// If \p Param is an expanded parameter pack, get the number of expansions. static Optional getExpandedPackSize(NamedDecl *Param) { if (auto *TTP = dyn_cast(Param)) if (TTP->isExpandedParameterPack()) return TTP->getNumExpansionParameters(); if (auto *NTTP = dyn_cast(Param)) if (NTTP->isExpandedParameterPack()) return NTTP->getNumExpansionTypes(); if (auto *TTP = dyn_cast(Param)) if (TTP->isExpandedParameterPack()) return TTP->getNumExpansionTemplateParameters(); return None; } /// A pack that we're currently deducing. struct clang::DeducedPack { // The index of the pack. unsigned Index; // The old value of the pack before we started deducing it. DeducedTemplateArgument Saved; // A deferred value of this pack from an inner deduction, that couldn't be // deduced because this deduction hadn't happened yet. DeducedTemplateArgument DeferredDeduction; // The new value of the pack. SmallVector New; // The outer deduction for this pack, if any. DeducedPack *Outer = nullptr; DeducedPack(unsigned Index) : Index(Index) {} }; namespace { /// A scope in which we're performing pack deduction. class PackDeductionScope { public: /// Prepare to deduce the packs named within Pattern. PackDeductionScope(Sema &S, TemplateParameterList *TemplateParams, SmallVectorImpl &Deduced, TemplateDeductionInfo &Info, TemplateArgument Pattern) : S(S), TemplateParams(TemplateParams), Deduced(Deduced), Info(Info) { unsigned NumNamedPacks = addPacks(Pattern); finishConstruction(NumNamedPacks); } /// Prepare to directly deduce arguments of the parameter with index \p Index. PackDeductionScope(Sema &S, TemplateParameterList *TemplateParams, SmallVectorImpl &Deduced, TemplateDeductionInfo &Info, unsigned Index) : S(S), TemplateParams(TemplateParams), Deduced(Deduced), Info(Info) { addPack(Index); finishConstruction(1); } private: void addPack(unsigned Index) { // Save the deduced template argument for the parameter pack expanded // by this pack expansion, then clear out the deduction. DeducedPack Pack(Index); Pack.Saved = Deduced[Index]; Deduced[Index] = TemplateArgument(); // FIXME: What if we encounter multiple packs with different numbers of // pre-expanded expansions? (This should already have been diagnosed // during substitution.) if (Optional ExpandedPackExpansions = getExpandedPackSize(TemplateParams->getParam(Index))) FixedNumExpansions = ExpandedPackExpansions; Packs.push_back(Pack); } unsigned addPacks(TemplateArgument Pattern) { // Compute the set of template parameter indices that correspond to // parameter packs expanded by the pack expansion. llvm::SmallBitVector SawIndices(TemplateParams->size()); llvm::SmallVector ExtraDeductions; auto AddPack = [&](unsigned Index) { if (SawIndices[Index]) return; SawIndices[Index] = true; addPack(Index); // Deducing a parameter pack that is a pack expansion also constrains the // packs appearing in that parameter to have the same deduced arity. Also, // in C++17 onwards, deducing a non-type template parameter deduces its // type, so we need to collect the pending deduced values for those packs. if (auto *NTTP = dyn_cast( TemplateParams->getParam(Index))) { if (!NTTP->isExpandedParameterPack()) if (auto *Expansion = dyn_cast(NTTP->getType())) ExtraDeductions.push_back(Expansion->getPattern()); } // FIXME: Also collect the unexpanded packs in any type and template // parameter packs that are pack expansions. }; auto Collect = [&](TemplateArgument Pattern) { SmallVector Unexpanded; S.collectUnexpandedParameterPacks(Pattern, Unexpanded); for (unsigned I = 0, N = Unexpanded.size(); I != N; ++I) { unsigned Depth, Index; std::tie(Depth, Index) = getDepthAndIndex(Unexpanded[I]); if (Depth == Info.getDeducedDepth()) AddPack(Index); } }; // Look for unexpanded packs in the pattern. Collect(Pattern); assert(!Packs.empty() && "Pack expansion without unexpanded packs?"); unsigned NumNamedPacks = Packs.size(); // Also look for unexpanded packs that are indirectly deduced by deducing // the sizes of the packs in this pattern. while (!ExtraDeductions.empty()) Collect(ExtraDeductions.pop_back_val()); return NumNamedPacks; } void finishConstruction(unsigned NumNamedPacks) { // Dig out the partially-substituted pack, if there is one. const TemplateArgument *PartialPackArgs = nullptr; unsigned NumPartialPackArgs = 0; std::pair PartialPackDepthIndex(-1u, -1u); if (auto *Scope = S.CurrentInstantiationScope) if (auto *Partial = Scope->getPartiallySubstitutedPack( &PartialPackArgs, &NumPartialPackArgs)) PartialPackDepthIndex = getDepthAndIndex(Partial); // This pack expansion will have been partially or fully expanded if // it only names explicitly-specified parameter packs (including the // partially-substituted one, if any). bool IsExpanded = true; for (unsigned I = 0; I != NumNamedPacks; ++I) { if (Packs[I].Index >= Info.getNumExplicitArgs()) { IsExpanded = false; IsPartiallyExpanded = false; break; } if (PartialPackDepthIndex == std::make_pair(Info.getDeducedDepth(), Packs[I].Index)) { IsPartiallyExpanded = true; } } // Skip over the pack elements that were expanded into separate arguments. // If we partially expanded, this is the number of partial arguments. if (IsPartiallyExpanded) PackElements += NumPartialPackArgs; else if (IsExpanded) PackElements += *FixedNumExpansions; for (auto &Pack : Packs) { if (Info.PendingDeducedPacks.size() > Pack.Index) Pack.Outer = Info.PendingDeducedPacks[Pack.Index]; else Info.PendingDeducedPacks.resize(Pack.Index + 1); Info.PendingDeducedPacks[Pack.Index] = &Pack; if (PartialPackDepthIndex == std::make_pair(Info.getDeducedDepth(), Pack.Index)) { Pack.New.append(PartialPackArgs, PartialPackArgs + NumPartialPackArgs); // We pre-populate the deduced value of the partially-substituted // pack with the specified value. This is not entirely correct: the // value is supposed to have been substituted, not deduced, but the // cases where this is observable require an exact type match anyway. // // FIXME: If we could represent a "depth i, index j, pack elem k" // parameter, we could substitute the partially-substituted pack // everywhere and avoid this. if (!IsPartiallyExpanded) Deduced[Pack.Index] = Pack.New[PackElements]; } } } public: ~PackDeductionScope() { for (auto &Pack : Packs) Info.PendingDeducedPacks[Pack.Index] = Pack.Outer; } /// Determine whether this pack has already been partially expanded into a /// sequence of (prior) function parameters / template arguments. bool isPartiallyExpanded() { return IsPartiallyExpanded; } /// Determine whether this pack expansion scope has a known, fixed arity. /// This happens if it involves a pack from an outer template that has /// (notionally) already been expanded. bool hasFixedArity() { return FixedNumExpansions.hasValue(); } /// Determine whether the next element of the argument is still part of this /// pack. This is the case unless the pack is already expanded to a fixed /// length. bool hasNextElement() { return !FixedNumExpansions || *FixedNumExpansions > PackElements; } /// Move to deducing the next element in each pack that is being deduced. void nextPackElement() { // Capture the deduced template arguments for each parameter pack expanded // by this pack expansion, add them to the list of arguments we've deduced // for that pack, then clear out the deduced argument. for (auto &Pack : Packs) { DeducedTemplateArgument &DeducedArg = Deduced[Pack.Index]; if (!Pack.New.empty() || !DeducedArg.isNull()) { while (Pack.New.size() < PackElements) Pack.New.push_back(DeducedTemplateArgument()); if (Pack.New.size() == PackElements) Pack.New.push_back(DeducedArg); else Pack.New[PackElements] = DeducedArg; DeducedArg = Pack.New.size() > PackElements + 1 ? Pack.New[PackElements + 1] : DeducedTemplateArgument(); } } ++PackElements; } /// Finish template argument deduction for a set of argument packs, /// producing the argument packs and checking for consistency with prior /// deductions. Sema::TemplateDeductionResult finish() { // Build argument packs for each of the parameter packs expanded by this // pack expansion. for (auto &Pack : Packs) { // Put back the old value for this pack. Deduced[Pack.Index] = Pack.Saved; // Always make sure the size of this pack is correct, even if we didn't // deduce any values for it. // // FIXME: This isn't required by the normative wording, but substitution // and post-substitution checking will always fail if the arity of any // pack is not equal to the number of elements we processed. (Either that // or something else has gone *very* wrong.) We're permitted to skip any // hard errors from those follow-on steps by the intent (but not the // wording) of C++ [temp.inst]p8: // // If the function selected by overload resolution can be determined // without instantiating a class template definition, it is unspecified // whether that instantiation actually takes place Pack.New.resize(PackElements); // Build or find a new value for this pack. DeducedTemplateArgument NewPack; if (Pack.New.empty()) { // If we deduced an empty argument pack, create it now. NewPack = DeducedTemplateArgument(TemplateArgument::getEmptyPack()); } else { TemplateArgument *ArgumentPack = new (S.Context) TemplateArgument[Pack.New.size()]; std::copy(Pack.New.begin(), Pack.New.end(), ArgumentPack); NewPack = DeducedTemplateArgument( TemplateArgument(llvm::makeArrayRef(ArgumentPack, Pack.New.size())), // FIXME: This is wrong, it's possible that some pack elements are // deduced from an array bound and others are not: // template void g(const T (&...p)[V]); // g({1, 2, 3}, {{}, {}}); // ... should deduce T = {int, size_t (from array bound)}. Pack.New[0].wasDeducedFromArrayBound()); } // Pick where we're going to put the merged pack. DeducedTemplateArgument *Loc; if (Pack.Outer) { if (Pack.Outer->DeferredDeduction.isNull()) { // Defer checking this pack until we have a complete pack to compare // it against. Pack.Outer->DeferredDeduction = NewPack; continue; } Loc = &Pack.Outer->DeferredDeduction; } else { Loc = &Deduced[Pack.Index]; } // Check the new pack matches any previous value. DeducedTemplateArgument OldPack = *Loc; DeducedTemplateArgument Result = checkDeducedTemplateArguments(S.Context, OldPack, NewPack); // If we deferred a deduction of this pack, check that one now too. if (!Result.isNull() && !Pack.DeferredDeduction.isNull()) { OldPack = Result; NewPack = Pack.DeferredDeduction; Result = checkDeducedTemplateArguments(S.Context, OldPack, NewPack); } NamedDecl *Param = TemplateParams->getParam(Pack.Index); if (Result.isNull()) { Info.Param = makeTemplateParameter(Param); Info.FirstArg = OldPack; Info.SecondArg = NewPack; return Sema::TDK_Inconsistent; } // If we have a pre-expanded pack and we didn't deduce enough elements // for it, fail deduction. if (Optional Expansions = getExpandedPackSize(Param)) { if (*Expansions != PackElements) { Info.Param = makeTemplateParameter(Param); Info.FirstArg = Result; return Sema::TDK_IncompletePack; } } *Loc = Result; } return Sema::TDK_Success; } private: Sema &S; TemplateParameterList *TemplateParams; SmallVectorImpl &Deduced; TemplateDeductionInfo &Info; unsigned PackElements = 0; bool IsPartiallyExpanded = false; /// The number of expansions, if we have a fully-expanded pack in this scope. Optional FixedNumExpansions; SmallVector Packs; }; } // namespace /// Deduce the template arguments by comparing the list of parameter /// types to the list of argument types, as in the parameter-type-lists of /// function types (C++ [temp.deduct.type]p10). /// /// \param S The semantic analysis object within which we are deducing /// /// \param TemplateParams The template parameters that we are deducing /// /// \param Params The list of parameter types /// /// \param NumParams The number of types in \c Params /// /// \param Args The list of argument types /// /// \param NumArgs The number of types in \c Args /// /// \param Info information about the template argument deduction itself /// /// \param Deduced the deduced template arguments /// /// \param TDF bitwise OR of the TemplateDeductionFlags bits that describe /// how template argument deduction is performed. /// /// \param PartialOrdering If true, we are performing template argument /// deduction for during partial ordering for a call /// (C++0x [temp.deduct.partial]). /// /// \returns the result of template argument deduction so far. Note that a /// "success" result means that template argument deduction has not yet failed, /// but it may still fail, later, for other reasons. static Sema::TemplateDeductionResult DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams, const QualType *Params, unsigned NumParams, const QualType *Args, unsigned NumArgs, TemplateDeductionInfo &Info, SmallVectorImpl &Deduced, unsigned TDF, bool PartialOrdering = false) { // C++0x [temp.deduct.type]p10: // Similarly, if P has a form that contains (T), then each parameter type // Pi of the respective parameter-type- list of P is compared with the // corresponding parameter type Ai of the corresponding parameter-type-list // of A. [...] unsigned ArgIdx = 0, ParamIdx = 0; for (; ParamIdx != NumParams; ++ParamIdx) { // Check argument types. const PackExpansionType *Expansion = dyn_cast(Params[ParamIdx]); if (!Expansion) { // Simple case: compare the parameter and argument types at this point. // Make sure we have an argument. if (ArgIdx >= NumArgs) return Sema::TDK_MiscellaneousDeductionFailure; if (isa(Args[ArgIdx])) { // C++0x [temp.deduct.type]p22: // If the original function parameter associated with A is a function // parameter pack and the function parameter associated with P is not // a function parameter pack, then template argument deduction fails. return Sema::TDK_MiscellaneousDeductionFailure; } if (Sema::TemplateDeductionResult Result = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, Params[ParamIdx], Args[ArgIdx], Info, Deduced, TDF, PartialOrdering)) return Result; ++ArgIdx; continue; } // C++0x [temp.deduct.type]p10: // If the parameter-declaration corresponding to Pi is a function // parameter pack, then the type of its declarator- id is compared with // each remaining parameter type in the parameter-type-list of A. Each // comparison deduces template arguments for subsequent positions in the // template parameter packs expanded by the function parameter pack. QualType Pattern = Expansion->getPattern(); PackDeductionScope PackScope(S, TemplateParams, Deduced, Info, Pattern); // A pack scope with fixed arity is not really a pack any more, so is not // a non-deduced context. if (ParamIdx + 1 == NumParams || PackScope.hasFixedArity()) { for (; ArgIdx < NumArgs && PackScope.hasNextElement(); ++ArgIdx) { // Deduce template arguments from the pattern. if (Sema::TemplateDeductionResult Result = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, Pattern, Args[ArgIdx], Info, Deduced, TDF, PartialOrdering)) return Result; PackScope.nextPackElement(); } } else { // C++0x [temp.deduct.type]p5: // The non-deduced contexts are: // - A function parameter pack that does not occur at the end of the // parameter-declaration-clause. // // FIXME: There is no wording to say what we should do in this case. We // choose to resolve this by applying the same rule that is applied for a // function call: that is, deduce all contained packs to their // explicitly-specified values (or to <> if there is no such value). // // This is seemingly-arbitrarily different from the case of a template-id // with a non-trailing pack-expansion in its arguments, which renders the // entire template-argument-list a non-deduced context. // If the parameter type contains an explicitly-specified pack that we // could not expand, skip the number of parameters notionally created // by the expansion. Optional NumExpansions = Expansion->getNumExpansions(); if (NumExpansions && !PackScope.isPartiallyExpanded()) { for (unsigned I = 0; I != *NumExpansions && ArgIdx < NumArgs; ++I, ++ArgIdx) PackScope.nextPackElement(); } } // Build argument packs for each of the parameter packs expanded by this // pack expansion. if (auto Result = PackScope.finish()) return Result; } // Make sure we don't have any extra arguments. if (ArgIdx < NumArgs) return Sema::TDK_MiscellaneousDeductionFailure; return Sema::TDK_Success; } /// Determine whether the parameter has qualifiers that the argument /// lacks. Put another way, determine whether there is no way to add /// a deduced set of qualifiers to the ParamType that would result in /// its qualifiers matching those of the ArgType. static bool hasInconsistentOrSupersetQualifiersOf(QualType ParamType, QualType ArgType) { Qualifiers ParamQs = ParamType.getQualifiers(); Qualifiers ArgQs = ArgType.getQualifiers(); if (ParamQs == ArgQs) return false; // Mismatched (but not missing) Objective-C GC attributes. if (ParamQs.getObjCGCAttr() != ArgQs.getObjCGCAttr() && ParamQs.hasObjCGCAttr()) return true; // Mismatched (but not missing) address spaces. if (ParamQs.getAddressSpace() != ArgQs.getAddressSpace() && ParamQs.hasAddressSpace()) return true; // Mismatched (but not missing) Objective-C lifetime qualifiers. if (ParamQs.getObjCLifetime() != ArgQs.getObjCLifetime() && ParamQs.hasObjCLifetime()) return true; // CVR qualifiers inconsistent or a superset. return (ParamQs.getCVRQualifiers() & ~ArgQs.getCVRQualifiers()) != 0; } /// Compare types for equality with respect to possibly compatible /// function types (noreturn adjustment, implicit calling conventions). If any /// of parameter and argument is not a function, just perform type comparison. /// /// \param Param the template parameter type. /// /// \param Arg the argument type. bool Sema::isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg) { const FunctionType *ParamFunction = Param->getAs(), *ArgFunction = Arg->getAs(); // Just compare if not functions. if (!ParamFunction || !ArgFunction) return Param == Arg; // Noreturn and noexcept adjustment. QualType AdjustedParam; if (IsFunctionConversion(Param, Arg, AdjustedParam)) return Arg == Context.getCanonicalType(AdjustedParam); // FIXME: Compatible calling conventions. return Param == Arg; } /// Get the index of the first template parameter that was originally from the /// innermost template-parameter-list. This is 0 except when we concatenate /// the template parameter lists of a class template and a constructor template /// when forming an implicit deduction guide. static unsigned getFirstInnerIndex(FunctionTemplateDecl *FTD) { auto *Guide = dyn_cast(FTD->getTemplatedDecl()); if (!Guide || !Guide->isImplicit()) return 0; return Guide->getDeducedTemplate()->getTemplateParameters()->size(); } /// Determine whether a type denotes a forwarding reference. static bool isForwardingReference(QualType Param, unsigned FirstInnerIndex) { // C++1z [temp.deduct.call]p3: // A forwarding reference is an rvalue reference to a cv-unqualified // template parameter that does not represent a template parameter of a // class template. if (auto *ParamRef = Param->getAs()) { if (ParamRef->getPointeeType().getQualifiers()) return false; auto *TypeParm = ParamRef->getPointeeType()->getAs(); return TypeParm && TypeParm->getIndex() >= FirstInnerIndex; } return false; } /// Deduce the template arguments by comparing the parameter type and /// the argument type (C++ [temp.deduct.type]). /// /// \param S the semantic analysis object within which we are deducing /// /// \param TemplateParams the template parameters that we are deducing /// /// \param ParamIn the parameter type /// /// \param ArgIn the argument type /// /// \param Info information about the template argument deduction itself /// /// \param Deduced the deduced template arguments /// /// \param TDF bitwise OR of the TemplateDeductionFlags bits that describe /// how template argument deduction is performed. /// /// \param PartialOrdering Whether we're performing template argument deduction /// in the context of partial ordering (C++0x [temp.deduct.partial]). /// /// \returns the result of template argument deduction so far. Note that a /// "success" result means that template argument deduction has not yet failed, /// but it may still fail, later, for other reasons. static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(Sema &S, TemplateParameterList *TemplateParams, QualType ParamIn, QualType ArgIn, TemplateDeductionInfo &Info, SmallVectorImpl &Deduced, unsigned TDF, bool PartialOrdering, bool DeducedFromArrayBound) { // We only want to look at the canonical types, since typedefs and // sugar are not part of template argument deduction. QualType Param = S.Context.getCanonicalType(ParamIn); QualType Arg = S.Context.getCanonicalType(ArgIn); // If the argument type is a pack expansion, look at its pattern. // This isn't explicitly called out if (const PackExpansionType *ArgExpansion = dyn_cast(Arg)) Arg = ArgExpansion->getPattern(); if (PartialOrdering) { // C++11 [temp.deduct.partial]p5: // Before the partial ordering is done, certain transformations are // performed on the types used for partial ordering: // - If P is a reference type, P is replaced by the type referred to. const ReferenceType *ParamRef = Param->getAs(); if (ParamRef) Param = ParamRef->getPointeeType(); // - If A is a reference type, A is replaced by the type referred to. const ReferenceType *ArgRef = Arg->getAs(); if (ArgRef) Arg = ArgRef->getPointeeType(); if (ParamRef && ArgRef && S.Context.hasSameUnqualifiedType(Param, Arg)) { // C++11 [temp.deduct.partial]p9: // If, for a given type, deduction succeeds in both directions (i.e., // the types are identical after the transformations above) and both // P and A were reference types [...]: // - if [one type] was an lvalue reference and [the other type] was // not, [the other type] is not considered to be at least as // specialized as [the first type] // - if [one type] is more cv-qualified than [the other type], // [the other type] is not considered to be at least as specialized // as [the first type] // Objective-C ARC adds: // - [one type] has non-trivial lifetime, [the other type] has // __unsafe_unretained lifetime, and the types are otherwise // identical // // A is "considered to be at least as specialized" as P iff deduction // succeeds, so we model this as a deduction failure. Note that // [the first type] is P and [the other type] is A here; the standard // gets this backwards. Qualifiers ParamQuals = Param.getQualifiers(); Qualifiers ArgQuals = Arg.getQualifiers(); if ((ParamRef->isLValueReferenceType() && !ArgRef->isLValueReferenceType()) || ParamQuals.isStrictSupersetOf(ArgQuals) || (ParamQuals.hasNonTrivialObjCLifetime() && ArgQuals.getObjCLifetime() == Qualifiers::OCL_ExplicitNone && ParamQuals.withoutObjCLifetime() == ArgQuals.withoutObjCLifetime())) { Info.FirstArg = TemplateArgument(ParamIn); Info.SecondArg = TemplateArgument(ArgIn); return Sema::TDK_NonDeducedMismatch; } } // C++11 [temp.deduct.partial]p7: // Remove any top-level cv-qualifiers: // - If P is a cv-qualified type, P is replaced by the cv-unqualified // version of P. Param = Param.getUnqualifiedType(); // - If A is a cv-qualified type, A is replaced by the cv-unqualified // version of A. Arg = Arg.getUnqualifiedType(); } else { // C++0x [temp.deduct.call]p4 bullet 1: // - If the original P is a reference type, the deduced A (i.e., the type // referred to by the reference) can be more cv-qualified than the // transformed A. if (TDF & TDF_ParamWithReferenceType) { Qualifiers Quals; QualType UnqualParam = S.Context.getUnqualifiedArrayType(Param, Quals); Quals.setCVRQualifiers(Quals.getCVRQualifiers() & Arg.getCVRQualifiers()); Param = S.Context.getQualifiedType(UnqualParam, Quals); } if ((TDF & TDF_TopLevelParameterTypeList) && !Param->isFunctionType()) { // C++0x [temp.deduct.type]p10: // If P and A are function types that originated from deduction when // taking the address of a function template (14.8.2.2) or when deducing // template arguments from a function declaration (14.8.2.6) and Pi and // Ai are parameters of the top-level parameter-type-list of P and A, // respectively, Pi is adjusted if it is a forwarding reference and Ai // is an lvalue reference, in // which case the type of Pi is changed to be the template parameter // type (i.e., T&& is changed to simply T). [ Note: As a result, when // Pi is T&& and Ai is X&, the adjusted Pi will be T, causing T to be // deduced as X&. - end note ] TDF &= ~TDF_TopLevelParameterTypeList; if (isForwardingReference(Param, 0) && Arg->isLValueReferenceType()) Param = Param->getPointeeType(); } } // C++ [temp.deduct.type]p9: // A template type argument T, a template template argument TT or a // template non-type argument i can be deduced if P and A have one of // the following forms: // // T // cv-list T if (const TemplateTypeParmType *TemplateTypeParm = Param->getAs()) { // Just skip any attempts to deduce from a placeholder type or a parameter // at a different depth. if (Arg->isPlaceholderType() || Info.getDeducedDepth() != TemplateTypeParm->getDepth()) return Sema::TDK_Success; unsigned Index = TemplateTypeParm->getIndex(); bool RecanonicalizeArg = false; // If the argument type is an array type, move the qualifiers up to the // top level, so they can be matched with the qualifiers on the parameter. if (isa(Arg)) { Qualifiers Quals; Arg = S.Context.getUnqualifiedArrayType(Arg, Quals); if (Quals) { Arg = S.Context.getQualifiedType(Arg, Quals); RecanonicalizeArg = true; } } // The argument type can not be less qualified than the parameter // type. if (!(TDF & TDF_IgnoreQualifiers) && hasInconsistentOrSupersetQualifiersOf(Param, Arg)) { Info.Param = cast(TemplateParams->getParam(Index)); Info.FirstArg = TemplateArgument(Param); Info.SecondArg = TemplateArgument(Arg); return Sema::TDK_Underqualified; } // Do not match a function type with a cv-qualified type. // http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#1584 if (Arg->isFunctionType() && Param.hasQualifiers()) { return Sema::TDK_NonDeducedMismatch; } assert(TemplateTypeParm->getDepth() == Info.getDeducedDepth() && "saw template type parameter with wrong depth"); assert(Arg != S.Context.OverloadTy && "Unresolved overloaded function"); QualType DeducedType = Arg; // Remove any qualifiers on the parameter from the deduced type. // We checked the qualifiers for consistency above. Qualifiers DeducedQs = DeducedType.getQualifiers(); Qualifiers ParamQs = Param.getQualifiers(); DeducedQs.removeCVRQualifiers(ParamQs.getCVRQualifiers()); if (ParamQs.hasObjCGCAttr()) DeducedQs.removeObjCGCAttr(); if (ParamQs.hasAddressSpace()) DeducedQs.removeAddressSpace(); if (ParamQs.hasObjCLifetime()) DeducedQs.removeObjCLifetime(); // Objective-C ARC: // If template deduction would produce a lifetime qualifier on a type // that is not a lifetime type, template argument deduction fails. if (ParamQs.hasObjCLifetime() && !DeducedType->isObjCLifetimeType() && !DeducedType->isDependentType()) { Info.Param = cast(TemplateParams->getParam(Index)); Info.FirstArg = TemplateArgument(Param); Info.SecondArg = TemplateArgument(Arg); return Sema::TDK_Underqualified; } // Objective-C ARC: // If template deduction would produce an argument type with lifetime type // but no lifetime qualifier, the __strong lifetime qualifier is inferred. if (S.getLangOpts().ObjCAutoRefCount && DeducedType->isObjCLifetimeType() && !DeducedQs.hasObjCLifetime()) DeducedQs.setObjCLifetime(Qualifiers::OCL_Strong); DeducedType = S.Context.getQualifiedType(DeducedType.getUnqualifiedType(), DeducedQs); if (RecanonicalizeArg) DeducedType = S.Context.getCanonicalType(DeducedType); DeducedTemplateArgument NewDeduced(DeducedType, DeducedFromArrayBound); DeducedTemplateArgument Result = checkDeducedTemplateArguments(S.Context, Deduced[Index], NewDeduced); if (Result.isNull()) { Info.Param = cast(TemplateParams->getParam(Index)); Info.FirstArg = Deduced[Index]; Info.SecondArg = NewDeduced; return Sema::TDK_Inconsistent; } Deduced[Index] = Result; return Sema::TDK_Success; } // Set up the template argument deduction information for a failure. Info.FirstArg = TemplateArgument(ParamIn); Info.SecondArg = TemplateArgument(ArgIn); // If the parameter is an already-substituted template parameter // pack, do nothing: we don't know which of its arguments to look // at, so we have to wait until all of the parameter packs in this // expansion have arguments. if (isa(Param)) return Sema::TDK_Success; // Check the cv-qualifiers on the parameter and argument types. CanQualType CanParam = S.Context.getCanonicalType(Param); CanQualType CanArg = S.Context.getCanonicalType(Arg); if (!(TDF & TDF_IgnoreQualifiers)) { if (TDF & TDF_ParamWithReferenceType) { if (hasInconsistentOrSupersetQualifiersOf(Param, Arg)) return Sema::TDK_NonDeducedMismatch; } else if (TDF & TDF_ArgWithReferenceType) { // C++ [temp.deduct.conv]p4: // If the original A is a reference type, A can be more cv-qualified // than the deduced A if (!Arg.getQualifiers().compatiblyIncludes(Param.getQualifiers())) return Sema::TDK_NonDeducedMismatch; // Strip out all extra qualifiers from the argument to figure out the // type we're converting to, prior to the qualification conversion. Qualifiers Quals; Arg = S.Context.getUnqualifiedArrayType(Arg, Quals); Arg = S.Context.getQualifiedType(Arg, Param.getQualifiers()); } else if (!IsPossiblyOpaquelyQualifiedType(Param)) { if (Param.getCVRQualifiers() != Arg.getCVRQualifiers()) return Sema::TDK_NonDeducedMismatch; } // If the parameter type is not dependent, there is nothing to deduce. if (!Param->isDependentType()) { if (!(TDF & TDF_SkipNonDependent)) { bool NonDeduced = (TDF & TDF_AllowCompatibleFunctionType) ? !S.isSameOrCompatibleFunctionType(CanParam, CanArg) : Param != Arg; if (NonDeduced) { return Sema::TDK_NonDeducedMismatch; } } return Sema::TDK_Success; } } else if (!Param->isDependentType()) { CanQualType ParamUnqualType = CanParam.getUnqualifiedType(), ArgUnqualType = CanArg.getUnqualifiedType(); bool Success = (TDF & TDF_AllowCompatibleFunctionType) ? S.isSameOrCompatibleFunctionType(ParamUnqualType, ArgUnqualType) : ParamUnqualType == ArgUnqualType; if (Success) return Sema::TDK_Success; } switch (Param->getTypeClass()) { // Non-canonical types cannot appear here. #define NON_CANONICAL_TYPE(Class, Base) \ case Type::Class: llvm_unreachable("deducing non-canonical type: " #Class); #define TYPE(Class, Base) #include "clang/AST/TypeNodes.inc" case Type::TemplateTypeParm: case Type::SubstTemplateTypeParmPack: llvm_unreachable("Type nodes handled above"); // These types cannot be dependent, so simply check whether the types are // the same. case Type::Builtin: case Type::VariableArray: case Type::Vector: case Type::FunctionNoProto: case Type::Record: case Type::Enum: case Type::ObjCObject: case Type::ObjCInterface: case Type::ObjCObjectPointer: case Type::ExtInt: if (TDF & TDF_SkipNonDependent) return Sema::TDK_Success; if (TDF & TDF_IgnoreQualifiers) { Param = Param.getUnqualifiedType(); Arg = Arg.getUnqualifiedType(); } return Param == Arg? Sema::TDK_Success : Sema::TDK_NonDeducedMismatch; // _Complex T [placeholder extension] case Type::Complex: if (const ComplexType *ComplexArg = Arg->getAs()) return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, cast(Param)->getElementType(), ComplexArg->getElementType(), Info, Deduced, TDF); return Sema::TDK_NonDeducedMismatch; // _Atomic T [extension] case Type::Atomic: if (const AtomicType *AtomicArg = Arg->getAs()) return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, cast(Param)->getValueType(), AtomicArg->getValueType(), Info, Deduced, TDF); return Sema::TDK_NonDeducedMismatch; // T * case Type::Pointer: { QualType PointeeType; if (const PointerType *PointerArg = Arg->getAs()) { PointeeType = PointerArg->getPointeeType(); } else if (const ObjCObjectPointerType *PointerArg = Arg->getAs()) { PointeeType = PointerArg->getPointeeType(); } else { return Sema::TDK_NonDeducedMismatch; } unsigned SubTDF = TDF & (TDF_IgnoreQualifiers | TDF_DerivedClass); return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, cast(Param)->getPointeeType(), PointeeType, Info, Deduced, SubTDF); } // T & case Type::LValueReference: { const LValueReferenceType *ReferenceArg = Arg->getAs(); if (!ReferenceArg) return Sema::TDK_NonDeducedMismatch; return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, cast(Param)->getPointeeType(), ReferenceArg->getPointeeType(), Info, Deduced, 0); } // T && [C++0x] case Type::RValueReference: { const RValueReferenceType *ReferenceArg = Arg->getAs(); if (!ReferenceArg) return Sema::TDK_NonDeducedMismatch; return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, cast(Param)->getPointeeType(), ReferenceArg->getPointeeType(), Info, Deduced, 0); } // T [] (implied, but not stated explicitly) case Type::IncompleteArray: { const IncompleteArrayType *IncompleteArrayArg = S.Context.getAsIncompleteArrayType(Arg); if (!IncompleteArrayArg) return Sema::TDK_NonDeducedMismatch; unsigned SubTDF = TDF & TDF_IgnoreQualifiers; return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, S.Context.getAsIncompleteArrayType(Param)->getElementType(), IncompleteArrayArg->getElementType(), Info, Deduced, SubTDF); } // T [integer-constant] case Type::ConstantArray: { const ConstantArrayType *ConstantArrayArg = S.Context.getAsConstantArrayType(Arg); if (!ConstantArrayArg) return Sema::TDK_NonDeducedMismatch; const ConstantArrayType *ConstantArrayParm = S.Context.getAsConstantArrayType(Param); if (ConstantArrayArg->getSize() != ConstantArrayParm->getSize()) return Sema::TDK_NonDeducedMismatch; unsigned SubTDF = TDF & TDF_IgnoreQualifiers; return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, ConstantArrayParm->getElementType(), ConstantArrayArg->getElementType(), Info, Deduced, SubTDF); } // type [i] case Type::DependentSizedArray: { const ArrayType *ArrayArg = S.Context.getAsArrayType(Arg); if (!ArrayArg) return Sema::TDK_NonDeducedMismatch; unsigned SubTDF = TDF & TDF_IgnoreQualifiers; // Check the element type of the arrays const DependentSizedArrayType *DependentArrayParm = S.Context.getAsDependentSizedArrayType(Param); if (Sema::TemplateDeductionResult Result = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, DependentArrayParm->getElementType(), ArrayArg->getElementType(), Info, Deduced, SubTDF)) return Result; // Determine the array bound is something we can deduce. NonTypeTemplateParmDecl *NTTP = getDeducedParameterFromExpr(Info, DependentArrayParm->getSizeExpr()); if (!NTTP) return Sema::TDK_Success; // We can perform template argument deduction for the given non-type // template parameter. assert(NTTP->getDepth() == Info.getDeducedDepth() && "saw non-type template parameter with wrong depth"); if (const ConstantArrayType *ConstantArrayArg = dyn_cast(ArrayArg)) { llvm::APSInt Size(ConstantArrayArg->getSize()); return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP, Size, S.Context.getSizeType(), /*ArrayBound=*/true, Info, Deduced); } if (const DependentSizedArrayType *DependentArrayArg = dyn_cast(ArrayArg)) if (DependentArrayArg->getSizeExpr()) return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP, DependentArrayArg->getSizeExpr(), Info, Deduced); // Incomplete type does not match a dependently-sized array type return Sema::TDK_NonDeducedMismatch; } // type(*)(T) // T(*)() // T(*)(T) case Type::FunctionProto: { unsigned SubTDF = TDF & TDF_TopLevelParameterTypeList; const FunctionProtoType *FunctionProtoArg = dyn_cast(Arg); if (!FunctionProtoArg) return Sema::TDK_NonDeducedMismatch; const FunctionProtoType *FunctionProtoParam = cast(Param); if (FunctionProtoParam->getMethodQuals() != FunctionProtoArg->getMethodQuals() || FunctionProtoParam->getRefQualifier() != FunctionProtoArg->getRefQualifier() || FunctionProtoParam->isVariadic() != FunctionProtoArg->isVariadic()) return Sema::TDK_NonDeducedMismatch; // Check return types. if (auto Result = DeduceTemplateArgumentsByTypeMatch( S, TemplateParams, FunctionProtoParam->getReturnType(), FunctionProtoArg->getReturnType(), Info, Deduced, 0)) return Result; // Check parameter types. if (auto Result = DeduceTemplateArguments( S, TemplateParams, FunctionProtoParam->param_type_begin(), FunctionProtoParam->getNumParams(), FunctionProtoArg->param_type_begin(), FunctionProtoArg->getNumParams(), Info, Deduced, SubTDF)) return Result; if (TDF & TDF_AllowCompatibleFunctionType) return Sema::TDK_Success; // FIXME: Per core-2016/10/1019 (no corresponding core issue yet), permit // deducing through the noexcept-specifier if it's part of the canonical // type. libstdc++ relies on this. Expr *NoexceptExpr = FunctionProtoParam->getNoexceptExpr(); if (NonTypeTemplateParmDecl *NTTP = NoexceptExpr ? getDeducedParameterFromExpr(Info, NoexceptExpr) : nullptr) { assert(NTTP->getDepth() == Info.getDeducedDepth() && "saw non-type template parameter with wrong depth"); llvm::APSInt Noexcept(1); switch (FunctionProtoArg->canThrow()) { case CT_Cannot: Noexcept = 1; LLVM_FALLTHROUGH; case CT_Can: // We give E in noexcept(E) the "deduced from array bound" treatment. // FIXME: Should we? return DeduceNonTypeTemplateArgument( S, TemplateParams, NTTP, Noexcept, S.Context.BoolTy, /*ArrayBound*/true, Info, Deduced); case CT_Dependent: if (Expr *ArgNoexceptExpr = FunctionProtoArg->getNoexceptExpr()) return DeduceNonTypeTemplateArgument( S, TemplateParams, NTTP, ArgNoexceptExpr, Info, Deduced); // Can't deduce anything from throw(T...). break; } } // FIXME: Detect non-deduced exception specification mismatches? // // Careful about [temp.deduct.call] and [temp.deduct.conv], which allow // top-level differences in noexcept-specifications. return Sema::TDK_Success; } case Type::InjectedClassName: // Treat a template's injected-class-name as if the template // specialization type had been used. Param = cast(Param) ->getInjectedSpecializationType(); assert(isa(Param) && "injected class name is not a template specialization type"); LLVM_FALLTHROUGH; // template-name (where template-name refers to a class template) // template-name // TT // TT // TT<> case Type::TemplateSpecialization: { const TemplateSpecializationType *SpecParam = cast(Param); // When Arg cannot be a derived class, we can just try to deduce template // arguments from the template-id. const RecordType *RecordT = Arg->getAs(); if (!(TDF & TDF_DerivedClass) || !RecordT) return DeduceTemplateArguments(S, TemplateParams, SpecParam, Arg, Info, Deduced); SmallVector DeducedOrig(Deduced.begin(), Deduced.end()); Sema::TemplateDeductionResult Result = DeduceTemplateArguments( S, TemplateParams, SpecParam, Arg, Info, Deduced); if (Result == Sema::TDK_Success) return Result; // We cannot inspect base classes as part of deduction when the type // is incomplete, so either instantiate any templates necessary to // complete the type, or skip over it if it cannot be completed. if (!S.isCompleteType(Info.getLocation(), Arg)) return Result; // C++14 [temp.deduct.call] p4b3: // If P is a class and P has the form simple-template-id, then the // transformed A can be a derived class of the deduced A. Likewise if // P is a pointer to a class of the form simple-template-id, the // transformed A can be a pointer to a derived class pointed to by the // deduced A. // // These alternatives are considered only if type deduction would // otherwise fail. If they yield more than one possible deduced A, the // type deduction fails. // Reset the incorrectly deduced argument from above. Deduced = DeducedOrig; // Use data recursion to crawl through the list of base classes. // Visited contains the set of nodes we have already visited, while // ToVisit is our stack of records that we still need to visit. llvm::SmallPtrSet Visited; SmallVector ToVisit; ToVisit.push_back(RecordT); bool Successful = false; SmallVector SuccessfulDeduced; while (!ToVisit.empty()) { // Retrieve the next class in the inheritance hierarchy. const RecordType *NextT = ToVisit.pop_back_val(); // If we have already seen this type, skip it. if (!Visited.insert(NextT).second) continue; // If this is a base class, try to perform template argument // deduction from it. if (NextT != RecordT) { TemplateDeductionInfo BaseInfo(TemplateDeductionInfo::ForBase, Info); Sema::TemplateDeductionResult BaseResult = DeduceTemplateArguments(S, TemplateParams, SpecParam, QualType(NextT, 0), BaseInfo, Deduced); // If template argument deduction for this base was successful, // note that we had some success. Otherwise, ignore any deductions // from this base class. if (BaseResult == Sema::TDK_Success) { // If we've already seen some success, then deduction fails due to // an ambiguity (temp.deduct.call p5). if (Successful) return Sema::TDK_MiscellaneousDeductionFailure; Successful = true; std::swap(SuccessfulDeduced, Deduced); Info.Param = BaseInfo.Param; Info.FirstArg = BaseInfo.FirstArg; Info.SecondArg = BaseInfo.SecondArg; } Deduced = DeducedOrig; } // Visit base classes CXXRecordDecl *Next = cast(NextT->getDecl()); for (const auto &Base : Next->bases()) { assert(Base.getType()->isRecordType() && "Base class that isn't a record?"); ToVisit.push_back(Base.getType()->getAs()); } } if (Successful) { std::swap(SuccessfulDeduced, Deduced); return Sema::TDK_Success; } return Result; } // T type::* // T T::* // T (type::*)() // type (T::*)() // type (type::*)(T) // type (T::*)(T) // T (type::*)(T) // T (T::*)() // T (T::*)(T) case Type::MemberPointer: { const MemberPointerType *MemPtrParam = cast(Param); const MemberPointerType *MemPtrArg = dyn_cast(Arg); if (!MemPtrArg) return Sema::TDK_NonDeducedMismatch; QualType ParamPointeeType = MemPtrParam->getPointeeType(); if (ParamPointeeType->isFunctionType()) S.adjustMemberFunctionCC(ParamPointeeType, /*IsStatic=*/true, /*IsCtorOrDtor=*/false, Info.getLocation()); QualType ArgPointeeType = MemPtrArg->getPointeeType(); if (ArgPointeeType->isFunctionType()) S.adjustMemberFunctionCC(ArgPointeeType, /*IsStatic=*/true, /*IsCtorOrDtor=*/false, Info.getLocation()); if (Sema::TemplateDeductionResult Result = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, ParamPointeeType, ArgPointeeType, Info, Deduced, TDF & TDF_IgnoreQualifiers)) return Result; return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, QualType(MemPtrParam->getClass(), 0), QualType(MemPtrArg->getClass(), 0), Info, Deduced, TDF & TDF_IgnoreQualifiers); } // (clang extension) // // type(^)(T) // T(^)() // T(^)(T) case Type::BlockPointer: { const BlockPointerType *BlockPtrParam = cast(Param); const BlockPointerType *BlockPtrArg = dyn_cast(Arg); if (!BlockPtrArg) return Sema::TDK_NonDeducedMismatch; return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, BlockPtrParam->getPointeeType(), BlockPtrArg->getPointeeType(), Info, Deduced, 0); } // (clang extension) // // T __attribute__(((ext_vector_type()))) case Type::ExtVector: { const ExtVectorType *VectorParam = cast(Param); if (const ExtVectorType *VectorArg = dyn_cast(Arg)) { // Make sure that the vectors have the same number of elements. if (VectorParam->getNumElements() != VectorArg->getNumElements()) return Sema::TDK_NonDeducedMismatch; // Perform deduction on the element types. return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, VectorParam->getElementType(), VectorArg->getElementType(), Info, Deduced, TDF); } if (const DependentSizedExtVectorType *VectorArg = dyn_cast(Arg)) { // We can't check the number of elements, since the argument has a // dependent number of elements. This can only occur during partial // ordering. // Perform deduction on the element types. return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, VectorParam->getElementType(), VectorArg->getElementType(), Info, Deduced, TDF); } return Sema::TDK_NonDeducedMismatch; } case Type::DependentVector: { const auto *VectorParam = cast(Param); if (const auto *VectorArg = dyn_cast(Arg)) { // Perform deduction on the element types. if (Sema::TemplateDeductionResult Result = DeduceTemplateArgumentsByTypeMatch( S, TemplateParams, VectorParam->getElementType(), VectorArg->getElementType(), Info, Deduced, TDF)) return Result; // Perform deduction on the vector size, if we can. NonTypeTemplateParmDecl *NTTP = getDeducedParameterFromExpr(Info, VectorParam->getSizeExpr()); if (!NTTP) return Sema::TDK_Success; llvm::APSInt ArgSize(S.Context.getTypeSize(S.Context.IntTy), false); ArgSize = VectorArg->getNumElements(); // Note that we use the "array bound" rules here; just like in that // case, we don't have any particular type for the vector size, but // we can provide one if necessary. return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP, ArgSize, S.Context.UnsignedIntTy, true, Info, Deduced); } if (const auto *VectorArg = dyn_cast(Arg)) { // Perform deduction on the element types. if (Sema::TemplateDeductionResult Result = DeduceTemplateArgumentsByTypeMatch( S, TemplateParams, VectorParam->getElementType(), VectorArg->getElementType(), Info, Deduced, TDF)) return Result; // Perform deduction on the vector size, if we can. NonTypeTemplateParmDecl *NTTP = getDeducedParameterFromExpr( Info, VectorParam->getSizeExpr()); if (!NTTP) return Sema::TDK_Success; return DeduceNonTypeTemplateArgument( S, TemplateParams, NTTP, VectorArg->getSizeExpr(), Info, Deduced); } return Sema::TDK_NonDeducedMismatch; } // (clang extension) // // T __attribute__(((ext_vector_type(N)))) case Type::DependentSizedExtVector: { const DependentSizedExtVectorType *VectorParam = cast(Param); if (const ExtVectorType *VectorArg = dyn_cast(Arg)) { // Perform deduction on the element types. if (Sema::TemplateDeductionResult Result = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, VectorParam->getElementType(), VectorArg->getElementType(), Info, Deduced, TDF)) return Result; // Perform deduction on the vector size, if we can. NonTypeTemplateParmDecl *NTTP = getDeducedParameterFromExpr(Info, VectorParam->getSizeExpr()); if (!NTTP) return Sema::TDK_Success; llvm::APSInt ArgSize(S.Context.getTypeSize(S.Context.IntTy), false); ArgSize = VectorArg->getNumElements(); // Note that we use the "array bound" rules here; just like in that // case, we don't have any particular type for the vector size, but // we can provide one if necessary. return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP, ArgSize, S.Context.IntTy, true, Info, Deduced); } if (const DependentSizedExtVectorType *VectorArg = dyn_cast(Arg)) { // Perform deduction on the element types. if (Sema::TemplateDeductionResult Result = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, VectorParam->getElementType(), VectorArg->getElementType(), Info, Deduced, TDF)) return Result; // Perform deduction on the vector size, if we can. NonTypeTemplateParmDecl *NTTP = getDeducedParameterFromExpr(Info, VectorParam->getSizeExpr()); if (!NTTP) return Sema::TDK_Success; return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP, VectorArg->getSizeExpr(), Info, Deduced); } return Sema::TDK_NonDeducedMismatch; } // (clang extension) // // T __attribute__((matrix_type(, // ))) case Type::ConstantMatrix: { const ConstantMatrixType *MatrixArg = dyn_cast(Arg); if (!MatrixArg) return Sema::TDK_NonDeducedMismatch; const ConstantMatrixType *MatrixParam = cast(Param); // Check that the dimensions are the same if (MatrixParam->getNumRows() != MatrixArg->getNumRows() || MatrixParam->getNumColumns() != MatrixArg->getNumColumns()) { return Sema::TDK_NonDeducedMismatch; } // Perform deduction on element types. return DeduceTemplateArgumentsByTypeMatch( S, TemplateParams, MatrixParam->getElementType(), MatrixArg->getElementType(), Info, Deduced, TDF); } case Type::DependentSizedMatrix: { const MatrixType *MatrixArg = dyn_cast(Arg); if (!MatrixArg) return Sema::TDK_NonDeducedMismatch; // Check the element type of the matrixes. const DependentSizedMatrixType *MatrixParam = cast(Param); if (Sema::TemplateDeductionResult Result = DeduceTemplateArgumentsByTypeMatch( S, TemplateParams, MatrixParam->getElementType(), MatrixArg->getElementType(), Info, Deduced, TDF)) return Result; // Try to deduce a matrix dimension. auto DeduceMatrixArg = [&S, &Info, &Deduced, &TemplateParams]( Expr *ParamExpr, const MatrixType *Arg, unsigned (ConstantMatrixType::*GetArgDimension)() const, Expr *(DependentSizedMatrixType::*GetArgDimensionExpr)() const) { const auto *ArgConstMatrix = dyn_cast(Arg); const auto *ArgDepMatrix = dyn_cast(Arg); if (!ParamExpr->isValueDependent()) { llvm::APSInt ParamConst( S.Context.getTypeSize(S.Context.getSizeType())); if (!ParamExpr->isIntegerConstantExpr(ParamConst, S.Context)) return Sema::TDK_NonDeducedMismatch; if (ArgConstMatrix) { if ((ArgConstMatrix->*GetArgDimension)() == ParamConst) return Sema::TDK_Success; return Sema::TDK_NonDeducedMismatch; } Expr *ArgExpr = (ArgDepMatrix->*GetArgDimensionExpr)(); llvm::APSInt ArgConst( S.Context.getTypeSize(S.Context.getSizeType())); if (!ArgExpr->isValueDependent() && ArgExpr->isIntegerConstantExpr(ArgConst, S.Context) && ArgConst == ParamConst) return Sema::TDK_Success; return Sema::TDK_NonDeducedMismatch; } NonTypeTemplateParmDecl *NTTP = getDeducedParameterFromExpr(Info, ParamExpr); if (!NTTP) return Sema::TDK_Success; if (ArgConstMatrix) { llvm::APSInt ArgConst( S.Context.getTypeSize(S.Context.getSizeType())); ArgConst = (ArgConstMatrix->*GetArgDimension)(); return DeduceNonTypeTemplateArgument( S, TemplateParams, NTTP, ArgConst, S.Context.getSizeType(), /*ArrayBound=*/true, Info, Deduced); } return DeduceNonTypeTemplateArgument( S, TemplateParams, NTTP, (ArgDepMatrix->*GetArgDimensionExpr)(), Info, Deduced); }; auto Result = DeduceMatrixArg(MatrixParam->getRowExpr(), MatrixArg, &ConstantMatrixType::getNumRows, &DependentSizedMatrixType::getRowExpr); if (Result) return Result; return DeduceMatrixArg(MatrixParam->getColumnExpr(), MatrixArg, &ConstantMatrixType::getNumColumns, &DependentSizedMatrixType::getColumnExpr); } // (clang extension) // // T __attribute__(((address_space(N)))) case Type::DependentAddressSpace: { const DependentAddressSpaceType *AddressSpaceParam = cast(Param); if (const DependentAddressSpaceType *AddressSpaceArg = dyn_cast(Arg)) { // Perform deduction on the pointer type. if (Sema::TemplateDeductionResult Result = DeduceTemplateArgumentsByTypeMatch( S, TemplateParams, AddressSpaceParam->getPointeeType(), AddressSpaceArg->getPointeeType(), Info, Deduced, TDF)) return Result; // Perform deduction on the address space, if we can. NonTypeTemplateParmDecl *NTTP = getDeducedParameterFromExpr( Info, AddressSpaceParam->getAddrSpaceExpr()); if (!NTTP) return Sema::TDK_Success; return DeduceNonTypeTemplateArgument( S, TemplateParams, NTTP, AddressSpaceArg->getAddrSpaceExpr(), Info, Deduced); } if (isTargetAddressSpace(Arg.getAddressSpace())) { llvm::APSInt ArgAddressSpace(S.Context.getTypeSize(S.Context.IntTy), false); ArgAddressSpace = toTargetAddressSpace(Arg.getAddressSpace()); // Perform deduction on the pointer types. if (Sema::TemplateDeductionResult Result = DeduceTemplateArgumentsByTypeMatch( S, TemplateParams, AddressSpaceParam->getPointeeType(), S.Context.removeAddrSpaceQualType(Arg), Info, Deduced, TDF)) return Result; // Perform deduction on the address space, if we can. NonTypeTemplateParmDecl *NTTP = getDeducedParameterFromExpr( Info, AddressSpaceParam->getAddrSpaceExpr()); if (!NTTP) return Sema::TDK_Success; return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP, ArgAddressSpace, S.Context.IntTy, true, Info, Deduced); } return Sema::TDK_NonDeducedMismatch; } case Type::DependentExtInt: { const auto *IntParam = cast(Param); if (const auto *IntArg = dyn_cast(Arg)){ if (IntParam->isUnsigned() != IntArg->isUnsigned()) return Sema::TDK_NonDeducedMismatch; NonTypeTemplateParmDecl *NTTP = getDeducedParameterFromExpr(Info, IntParam->getNumBitsExpr()); if (!NTTP) return Sema::TDK_Success; llvm::APSInt ArgSize(S.Context.getTypeSize(S.Context.IntTy), false); ArgSize = IntArg->getNumBits(); return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP, ArgSize, S.Context.IntTy, true, Info, Deduced); } if (const auto *IntArg = dyn_cast(Arg)) { if (IntParam->isUnsigned() != IntArg->isUnsigned()) return Sema::TDK_NonDeducedMismatch; return Sema::TDK_Success; } return Sema::TDK_NonDeducedMismatch; } case Type::TypeOfExpr: case Type::TypeOf: case Type::DependentName: case Type::UnresolvedUsing: case Type::Decltype: case Type::UnaryTransform: case Type::Auto: case Type::DeducedTemplateSpecialization: case Type::DependentTemplateSpecialization: case Type::PackExpansion: case Type::Pipe: // No template argument deduction for these types return Sema::TDK_Success; } llvm_unreachable("Invalid Type Class!"); } static Sema::TemplateDeductionResult DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams, const TemplateArgument &Param, TemplateArgument Arg, TemplateDeductionInfo &Info, SmallVectorImpl &Deduced) { // If the template argument is a pack expansion, perform template argument // deduction against the pattern of that expansion. This only occurs during // partial ordering. if (Arg.isPackExpansion()) Arg = Arg.getPackExpansionPattern(); switch (Param.getKind()) { case TemplateArgument::Null: llvm_unreachable("Null template argument in parameter list"); case TemplateArgument::Type: if (Arg.getKind() == TemplateArgument::Type) return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, Param.getAsType(), Arg.getAsType(), Info, Deduced, 0); Info.FirstArg = Param; Info.SecondArg = Arg; return Sema::TDK_NonDeducedMismatch; case TemplateArgument::Template: if (Arg.getKind() == TemplateArgument::Template) return DeduceTemplateArguments(S, TemplateParams, Param.getAsTemplate(), Arg.getAsTemplate(), Info, Deduced); Info.FirstArg = Param; Info.SecondArg = Arg; return Sema::TDK_NonDeducedMismatch; case TemplateArgument::TemplateExpansion: llvm_unreachable("caller should handle pack expansions"); case TemplateArgument::Declaration: if (Arg.getKind() == TemplateArgument::Declaration && isSameDeclaration(Param.getAsDecl(), Arg.getAsDecl())) return Sema::TDK_Success; Info.FirstArg = Param; Info.SecondArg = Arg; return Sema::TDK_NonDeducedMismatch; case TemplateArgument::NullPtr: if (Arg.getKind() == TemplateArgument::NullPtr && S.Context.hasSameType(Param.getNullPtrType(), Arg.getNullPtrType())) return Sema::TDK_Success; Info.FirstArg = Param; Info.SecondArg = Arg; return Sema::TDK_NonDeducedMismatch; case TemplateArgument::Integral: if (Arg.getKind() == TemplateArgument::Integral) { if (hasSameExtendedValue(Param.getAsIntegral(), Arg.getAsIntegral())) return Sema::TDK_Success; Info.FirstArg = Param; Info.SecondArg = Arg; return Sema::TDK_NonDeducedMismatch; } if (Arg.getKind() == TemplateArgument::Expression) { Info.FirstArg = Param; Info.SecondArg = Arg; return Sema::TDK_NonDeducedMismatch; } Info.FirstArg = Param; Info.SecondArg = Arg; return Sema::TDK_NonDeducedMismatch; case TemplateArgument::Expression: if (NonTypeTemplateParmDecl *NTTP = getDeducedParameterFromExpr(Info, Param.getAsExpr())) { if (Arg.getKind() == TemplateArgument::Integral) return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP, Arg.getAsIntegral(), Arg.getIntegralType(), /*ArrayBound=*/false, Info, Deduced); if (Arg.getKind() == TemplateArgument::NullPtr) return DeduceNullPtrTemplateArgument(S, TemplateParams, NTTP, Arg.getNullPtrType(), Info, Deduced); if (Arg.getKind() == TemplateArgument::Expression) return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP, Arg.getAsExpr(), Info, Deduced); if (Arg.getKind() == TemplateArgument::Declaration) return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP, Arg.getAsDecl(), Arg.getParamTypeForDecl(), Info, Deduced); Info.FirstArg = Param; Info.SecondArg = Arg; return Sema::TDK_NonDeducedMismatch; } // Can't deduce anything, but that's okay. return Sema::TDK_Success; case TemplateArgument::Pack: llvm_unreachable("Argument packs should be expanded by the caller!"); } llvm_unreachable("Invalid TemplateArgument Kind!"); } /// Determine whether there is a template argument to be used for /// deduction. /// /// This routine "expands" argument packs in-place, overriding its input /// parameters so that \c Args[ArgIdx] will be the available template argument. /// /// \returns true if there is another template argument (which will be at /// \c Args[ArgIdx]), false otherwise. static bool hasTemplateArgumentForDeduction(ArrayRef &Args, unsigned &ArgIdx) { if (ArgIdx == Args.size()) return false; const TemplateArgument &Arg = Args[ArgIdx]; if (Arg.getKind() != TemplateArgument::Pack) return true; assert(ArgIdx == Args.size() - 1 && "Pack not at the end of argument list?"); Args = Arg.pack_elements(); ArgIdx = 0; return ArgIdx < Args.size(); } /// Determine whether the given set of template arguments has a pack /// expansion that is not the last template argument. static bool hasPackExpansionBeforeEnd(ArrayRef Args) { bool FoundPackExpansion = false; for (const auto &A : Args) { if (FoundPackExpansion) return true; if (A.getKind() == TemplateArgument::Pack) return hasPackExpansionBeforeEnd(A.pack_elements()); // FIXME: If this is a fixed-arity pack expansion from an outer level of // templates, it should not be treated as a pack expansion. if (A.isPackExpansion()) FoundPackExpansion = true; } return false; } static Sema::TemplateDeductionResult DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams, ArrayRef Params, ArrayRef Args, TemplateDeductionInfo &Info, SmallVectorImpl &Deduced, bool NumberOfArgumentsMustMatch) { // C++0x [temp.deduct.type]p9: // If the template argument list of P contains a pack expansion that is not // the last template argument, the entire template argument list is a // non-deduced context. if (hasPackExpansionBeforeEnd(Params)) return Sema::TDK_Success; // C++0x [temp.deduct.type]p9: // If P has a form that contains or , then each argument Pi of the // respective template argument list P is compared with the corresponding // argument Ai of the corresponding template argument list of A. unsigned ArgIdx = 0, ParamIdx = 0; for (; hasTemplateArgumentForDeduction(Params, ParamIdx); ++ParamIdx) { if (!Params[ParamIdx].isPackExpansion()) { // The simple case: deduce template arguments by matching Pi and Ai. // Check whether we have enough arguments. if (!hasTemplateArgumentForDeduction(Args, ArgIdx)) return NumberOfArgumentsMustMatch ? Sema::TDK_MiscellaneousDeductionFailure : Sema::TDK_Success; // C++1z [temp.deduct.type]p9: // During partial ordering, if Ai was originally a pack expansion [and] // Pi is not a pack expansion, template argument deduction fails. if (Args[ArgIdx].isPackExpansion()) return Sema::TDK_MiscellaneousDeductionFailure; // Perform deduction for this Pi/Ai pair. if (Sema::TemplateDeductionResult Result = DeduceTemplateArguments(S, TemplateParams, Params[ParamIdx], Args[ArgIdx], Info, Deduced)) return Result; // Move to the next argument. ++ArgIdx; continue; } // The parameter is a pack expansion. // C++0x [temp.deduct.type]p9: // If Pi is a pack expansion, then the pattern of Pi is compared with // each remaining argument in the template argument list of A. Each // comparison deduces template arguments for subsequent positions in the // template parameter packs expanded by Pi. TemplateArgument Pattern = Params[ParamIdx].getPackExpansionPattern(); // Prepare to deduce the packs within the pattern. PackDeductionScope PackScope(S, TemplateParams, Deduced, Info, Pattern); // Keep track of the deduced template arguments for each parameter pack // expanded by this pack expansion (the outer index) and for each // template argument (the inner SmallVectors). for (; hasTemplateArgumentForDeduction(Args, ArgIdx) && PackScope.hasNextElement(); ++ArgIdx) { // Deduce template arguments from the pattern. if (Sema::TemplateDeductionResult Result = DeduceTemplateArguments(S, TemplateParams, Pattern, Args[ArgIdx], Info, Deduced)) return Result; PackScope.nextPackElement(); } // Build argument packs for each of the parameter packs expanded by this // pack expansion. if (auto Result = PackScope.finish()) return Result; } return Sema::TDK_Success; } static Sema::TemplateDeductionResult DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams, const TemplateArgumentList &ParamList, const TemplateArgumentList &ArgList, TemplateDeductionInfo &Info, SmallVectorImpl &Deduced) { return DeduceTemplateArguments(S, TemplateParams, ParamList.asArray(), ArgList.asArray(), Info, Deduced, /*NumberOfArgumentsMustMatch*/false); } /// Determine whether two template arguments are the same. static bool isSameTemplateArg(ASTContext &Context, TemplateArgument X, const TemplateArgument &Y, bool PackExpansionMatchesPack = false) { // If we're checking deduced arguments (X) against original arguments (Y), // we will have flattened packs to non-expansions in X. if (PackExpansionMatchesPack && X.isPackExpansion() && !Y.isPackExpansion()) X = X.getPackExpansionPattern(); if (X.getKind() != Y.getKind()) return false; switch (X.getKind()) { case TemplateArgument::Null: llvm_unreachable("Comparing NULL template argument"); case TemplateArgument::Type: return Context.getCanonicalType(X.getAsType()) == Context.getCanonicalType(Y.getAsType()); case TemplateArgument::Declaration: return isSameDeclaration(X.getAsDecl(), Y.getAsDecl()); case TemplateArgument::NullPtr: return Context.hasSameType(X.getNullPtrType(), Y.getNullPtrType()); case TemplateArgument::Template: case TemplateArgument::TemplateExpansion: return Context.getCanonicalTemplateName( X.getAsTemplateOrTemplatePattern()).getAsVoidPointer() == Context.getCanonicalTemplateName( Y.getAsTemplateOrTemplatePattern()).getAsVoidPointer(); case TemplateArgument::Integral: return hasSameExtendedValue(X.getAsIntegral(), Y.getAsIntegral()); case TemplateArgument::Expression: { llvm::FoldingSetNodeID XID, YID; X.getAsExpr()->Profile(XID, Context, true); Y.getAsExpr()->Profile(YID, Context, true); return XID == YID; } case TemplateArgument::Pack: if (X.pack_size() != Y.pack_size()) return false; for (TemplateArgument::pack_iterator XP = X.pack_begin(), XPEnd = X.pack_end(), YP = Y.pack_begin(); XP != XPEnd; ++XP, ++YP) if (!isSameTemplateArg(Context, *XP, *YP, PackExpansionMatchesPack)) return false; return true; } llvm_unreachable("Invalid TemplateArgument Kind!"); } /// Allocate a TemplateArgumentLoc where all locations have /// been initialized to the given location. /// /// \param Arg The template argument we are producing template argument /// location information for. /// /// \param NTTPType For a declaration template argument, the type of /// the non-type template parameter that corresponds to this template /// argument. Can be null if no type sugar is available to add to the /// type from the template argument. /// /// \param Loc The source location to use for the resulting template /// argument. TemplateArgumentLoc Sema::getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc) { switch (Arg.getKind()) { case TemplateArgument::Null: llvm_unreachable("Can't get a NULL template argument here"); case TemplateArgument::Type: return TemplateArgumentLoc( Arg, Context.getTrivialTypeSourceInfo(Arg.getAsType(), Loc)); case TemplateArgument::Declaration: { if (NTTPType.isNull()) NTTPType = Arg.getParamTypeForDecl(); Expr *E = BuildExpressionFromDeclTemplateArgument(Arg, NTTPType, Loc) .getAs(); return TemplateArgumentLoc(TemplateArgument(E), E); } case TemplateArgument::NullPtr: { if (NTTPType.isNull()) NTTPType = Arg.getNullPtrType(); Expr *E = BuildExpressionFromDeclTemplateArgument(Arg, NTTPType, Loc) .getAs(); return TemplateArgumentLoc(TemplateArgument(NTTPType, /*isNullPtr*/true), E); } case TemplateArgument::Integral: { Expr *E = BuildExpressionFromIntegralTemplateArgument(Arg, Loc).getAs(); return TemplateArgumentLoc(TemplateArgument(E), E); } case TemplateArgument::Template: case TemplateArgument::TemplateExpansion: { NestedNameSpecifierLocBuilder Builder; TemplateName Template = Arg.getAsTemplateOrTemplatePattern(); if (DependentTemplateName *DTN = Template.getAsDependentTemplateName()) Builder.MakeTrivial(Context, DTN->getQualifier(), Loc); else if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) Builder.MakeTrivial(Context, QTN->getQualifier(), Loc); if (Arg.getKind() == TemplateArgument::Template) return TemplateArgumentLoc(Arg, Builder.getWithLocInContext(Context), Loc); return TemplateArgumentLoc(Arg, Builder.getWithLocInContext(Context), Loc, Loc); } case TemplateArgument::Expression: return TemplateArgumentLoc(Arg, Arg.getAsExpr()); case TemplateArgument::Pack: return TemplateArgumentLoc(Arg, TemplateArgumentLocInfo()); } llvm_unreachable("Invalid TemplateArgument Kind!"); } TemplateArgumentLoc Sema::getIdentityTemplateArgumentLoc(NamedDecl *TemplateParm, SourceLocation Location) { return getTrivialTemplateArgumentLoc( Context.getInjectedTemplateArg(TemplateParm), QualType(), Location); } /// Convert the given deduced template argument and add it to the set of /// fully-converted template arguments. static bool ConvertDeducedTemplateArgument(Sema &S, NamedDecl *Param, DeducedTemplateArgument Arg, NamedDecl *Template, TemplateDeductionInfo &Info, bool IsDeduced, SmallVectorImpl &Output) { auto ConvertArg = [&](DeducedTemplateArgument Arg, unsigned ArgumentPackIndex) { // Convert the deduced template argument into a template // argument that we can check, almost as if the user had written // the template argument explicitly. TemplateArgumentLoc ArgLoc = S.getTrivialTemplateArgumentLoc(Arg, QualType(), Info.getLocation()); // Check the template argument, converting it as necessary. return S.CheckTemplateArgument( Param, ArgLoc, Template, Template->getLocation(), Template->getSourceRange().getEnd(), ArgumentPackIndex, Output, IsDeduced ? (Arg.wasDeducedFromArrayBound() ? Sema::CTAK_DeducedFromArrayBound : Sema::CTAK_Deduced) : Sema::CTAK_Specified); }; if (Arg.getKind() == TemplateArgument::Pack) { // This is a template argument pack, so check each of its arguments against // the template parameter. SmallVector PackedArgsBuilder; for (const auto &P : Arg.pack_elements()) { // When converting the deduced template argument, append it to the // general output list. We need to do this so that the template argument // checking logic has all of the prior template arguments available. DeducedTemplateArgument InnerArg(P); InnerArg.setDeducedFromArrayBound(Arg.wasDeducedFromArrayBound()); assert(InnerArg.getKind() != TemplateArgument::Pack && "deduced nested pack"); if (P.isNull()) { // We deduced arguments for some elements of this pack, but not for // all of them. This happens if we get a conditionally-non-deduced // context in a pack expansion (such as an overload set in one of the // arguments). S.Diag(Param->getLocation(), diag::err_template_arg_deduced_incomplete_pack) << Arg << Param; return true; } if (ConvertArg(InnerArg, PackedArgsBuilder.size())) return true; // Move the converted template argument into our argument pack. PackedArgsBuilder.push_back(Output.pop_back_val()); } // If the pack is empty, we still need to substitute into the parameter // itself, in case that substitution fails. if (PackedArgsBuilder.empty()) { LocalInstantiationScope Scope(S); TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack, Output); MultiLevelTemplateArgumentList Args(TemplateArgs); if (auto *NTTP = dyn_cast(Param)) { Sema::InstantiatingTemplate Inst(S, Template->getLocation(), Template, NTTP, Output, Template->getSourceRange()); if (Inst.isInvalid() || S.SubstType(NTTP->getType(), Args, NTTP->getLocation(), NTTP->getDeclName()).isNull()) return true; } else if (auto *TTP = dyn_cast(Param)) { Sema::InstantiatingTemplate Inst(S, Template->getLocation(), Template, TTP, Output, Template->getSourceRange()); if (Inst.isInvalid() || !S.SubstDecl(TTP, S.CurContext, Args)) return true; } // For type parameters, no substitution is ever required. } // Create the resulting argument pack. Output.push_back( TemplateArgument::CreatePackCopy(S.Context, PackedArgsBuilder)); return false; } return ConvertArg(Arg, 0); } // FIXME: This should not be a template, but // ClassTemplatePartialSpecializationDecl sadly does not derive from // TemplateDecl. template static Sema::TemplateDeductionResult ConvertDeducedTemplateArguments( Sema &S, TemplateDeclT *Template, bool IsDeduced, SmallVectorImpl &Deduced, TemplateDeductionInfo &Info, SmallVectorImpl &Builder, LocalInstantiationScope *CurrentInstantiationScope = nullptr, unsigned NumAlreadyConverted = 0, bool PartialOverloading = false) { TemplateParameterList *TemplateParams = Template->getTemplateParameters(); for (unsigned I = 0, N = TemplateParams->size(); I != N; ++I) { NamedDecl *Param = TemplateParams->getParam(I); // C++0x [temp.arg.explicit]p3: // A trailing template parameter pack (14.5.3) not otherwise deduced will // be deduced to an empty sequence of template arguments. // FIXME: Where did the word "trailing" come from? if (Deduced[I].isNull() && Param->isTemplateParameterPack()) { if (auto Result = PackDeductionScope(S, TemplateParams, Deduced, Info, I).finish()) return Result; } if (!Deduced[I].isNull()) { if (I < NumAlreadyConverted) { // We may have had explicitly-specified template arguments for a // template parameter pack (that may or may not have been extended // via additional deduced arguments). if (Param->isParameterPack() && CurrentInstantiationScope && CurrentInstantiationScope->getPartiallySubstitutedPack() == Param) { // Forget the partially-substituted pack; its substitution is now // complete. CurrentInstantiationScope->ResetPartiallySubstitutedPack(); // We still need to check the argument in case it was extended by // deduction. } else { // We have already fully type-checked and converted this // argument, because it was explicitly-specified. Just record the // presence of this argument. Builder.push_back(Deduced[I]); continue; } } // We may have deduced this argument, so it still needs to be // checked and converted. if (ConvertDeducedTemplateArgument(S, Param, Deduced[I], Template, Info, IsDeduced, Builder)) { Info.Param = makeTemplateParameter(Param); // FIXME: These template arguments are temporary. Free them! Info.reset(TemplateArgumentList::CreateCopy(S.Context, Builder)); return Sema::TDK_SubstitutionFailure; } continue; } // Substitute into the default template argument, if available. bool HasDefaultArg = false; TemplateDecl *TD = dyn_cast(Template); if (!TD) { assert(isa(Template) || isa(Template)); return Sema::TDK_Incomplete; } TemplateArgumentLoc DefArg = S.SubstDefaultTemplateArgumentIfAvailable( TD, TD->getLocation(), TD->getSourceRange().getEnd(), Param, Builder, HasDefaultArg); // If there was no default argument, deduction is incomplete. if (DefArg.getArgument().isNull()) { Info.Param = makeTemplateParameter( const_cast(TemplateParams->getParam(I))); Info.reset(TemplateArgumentList::CreateCopy(S.Context, Builder)); if (PartialOverloading) break; return HasDefaultArg ? Sema::TDK_SubstitutionFailure : Sema::TDK_Incomplete; } // Check whether we can actually use the default argument. if (S.CheckTemplateArgument(Param, DefArg, TD, TD->getLocation(), TD->getSourceRange().getEnd(), 0, Builder, Sema::CTAK_Specified)) { Info.Param = makeTemplateParameter( const_cast(TemplateParams->getParam(I))); // FIXME: These template arguments are temporary. Free them! Info.reset(TemplateArgumentList::CreateCopy(S.Context, Builder)); return Sema::TDK_SubstitutionFailure; } // If we get here, we successfully used the default template argument. } return Sema::TDK_Success; } static DeclContext *getAsDeclContextOrEnclosing(Decl *D) { if (auto *DC = dyn_cast(D)) return DC; return D->getDeclContext(); } template struct IsPartialSpecialization { static constexpr bool value = false; }; template<> struct IsPartialSpecialization { static constexpr bool value = true; }; template<> struct IsPartialSpecialization { static constexpr bool value = true; }; template static Sema::TemplateDeductionResult CheckDeducedArgumentConstraints(Sema& S, TemplateDeclT *Template, ArrayRef DeducedArgs, TemplateDeductionInfo& Info) { llvm::SmallVector AssociatedConstraints; Template->getAssociatedConstraints(AssociatedConstraints); if (S.CheckConstraintSatisfaction(Template, AssociatedConstraints, DeducedArgs, Info.getLocation(), Info.AssociatedConstraintsSatisfaction) || !Info.AssociatedConstraintsSatisfaction.IsSatisfied) { Info.reset(TemplateArgumentList::CreateCopy(S.Context, DeducedArgs)); return Sema::TDK_ConstraintsNotSatisfied; } return Sema::TDK_Success; } /// Complete template argument deduction for a partial specialization. template static std::enable_if_t::value, Sema::TemplateDeductionResult> FinishTemplateArgumentDeduction( Sema &S, T *Partial, bool IsPartialOrdering, const TemplateArgumentList &TemplateArgs, SmallVectorImpl &Deduced, TemplateDeductionInfo &Info) { // Unevaluated SFINAE context. EnterExpressionEvaluationContext Unevaluated( S, Sema::ExpressionEvaluationContext::Unevaluated); Sema::SFINAETrap Trap(S); Sema::ContextRAII SavedContext(S, getAsDeclContextOrEnclosing(Partial)); // C++ [temp.deduct.type]p2: // [...] or if any template argument remains neither deduced nor // explicitly specified, template argument deduction fails. SmallVector Builder; if (auto Result = ConvertDeducedTemplateArguments( S, Partial, IsPartialOrdering, Deduced, Info, Builder)) return Result; // Form the template argument list from the deduced template arguments. TemplateArgumentList *DeducedArgumentList = TemplateArgumentList::CreateCopy(S.Context, Builder); Info.reset(DeducedArgumentList); // Substitute the deduced template arguments into the template // arguments of the class template partial specialization, and // verify that the instantiated template arguments are both valid // and are equivalent to the template arguments originally provided // to the class template. LocalInstantiationScope InstScope(S); auto *Template = Partial->getSpecializedTemplate(); const ASTTemplateArgumentListInfo *PartialTemplArgInfo = Partial->getTemplateArgsAsWritten(); const TemplateArgumentLoc *PartialTemplateArgs = PartialTemplArgInfo->getTemplateArgs(); TemplateArgumentListInfo InstArgs(PartialTemplArgInfo->LAngleLoc, PartialTemplArgInfo->RAngleLoc); if (S.Subst(PartialTemplateArgs, PartialTemplArgInfo->NumTemplateArgs, InstArgs, MultiLevelTemplateArgumentList(*DeducedArgumentList))) { unsigned ArgIdx = InstArgs.size(), ParamIdx = ArgIdx; if (ParamIdx >= Partial->getTemplateParameters()->size()) ParamIdx = Partial->getTemplateParameters()->size() - 1; Decl *Param = const_cast( Partial->getTemplateParameters()->getParam(ParamIdx)); Info.Param = makeTemplateParameter(Param); Info.FirstArg = PartialTemplateArgs[ArgIdx].getArgument(); return Sema::TDK_SubstitutionFailure; } bool ConstraintsNotSatisfied; SmallVector ConvertedInstArgs; if (S.CheckTemplateArgumentList(Template, Partial->getLocation(), InstArgs, false, ConvertedInstArgs, /*UpdateArgsWithConversions=*/true, &ConstraintsNotSatisfied)) return ConstraintsNotSatisfied ? Sema::TDK_ConstraintsNotSatisfied : Sema::TDK_SubstitutionFailure; TemplateParameterList *TemplateParams = Template->getTemplateParameters(); for (unsigned I = 0, E = TemplateParams->size(); I != E; ++I) { TemplateArgument InstArg = ConvertedInstArgs.data()[I]; if (!isSameTemplateArg(S.Context, TemplateArgs[I], InstArg)) { Info.Param = makeTemplateParameter(TemplateParams->getParam(I)); Info.FirstArg = TemplateArgs[I]; Info.SecondArg = InstArg; return Sema::TDK_NonDeducedMismatch; } } if (Trap.hasErrorOccurred()) return Sema::TDK_SubstitutionFailure; if (auto Result = CheckDeducedArgumentConstraints(S, Partial, Builder, Info)) return Result; return Sema::TDK_Success; } /// Complete template argument deduction for a class or variable template, /// when partial ordering against a partial specialization. // FIXME: Factor out duplication with partial specialization version above. static Sema::TemplateDeductionResult FinishTemplateArgumentDeduction( Sema &S, TemplateDecl *Template, bool PartialOrdering, const TemplateArgumentList &TemplateArgs, SmallVectorImpl &Deduced, TemplateDeductionInfo &Info) { // Unevaluated SFINAE context. EnterExpressionEvaluationContext Unevaluated( S, Sema::ExpressionEvaluationContext::Unevaluated); Sema::SFINAETrap Trap(S); Sema::ContextRAII SavedContext(S, getAsDeclContextOrEnclosing(Template)); // C++ [temp.deduct.type]p2: // [...] or if any template argument remains neither deduced nor // explicitly specified, template argument deduction fails. SmallVector Builder; if (auto Result = ConvertDeducedTemplateArguments( S, Template, /*IsDeduced*/PartialOrdering, Deduced, Info, Builder)) return Result; // Check that we produced the correct argument list. TemplateParameterList *TemplateParams = Template->getTemplateParameters(); for (unsigned I = 0, E = TemplateParams->size(); I != E; ++I) { TemplateArgument InstArg = Builder[I]; if (!isSameTemplateArg(S.Context, TemplateArgs[I], InstArg, /*PackExpansionMatchesPack*/true)) { Info.Param = makeTemplateParameter(TemplateParams->getParam(I)); Info.FirstArg = TemplateArgs[I]; Info.SecondArg = InstArg; return Sema::TDK_NonDeducedMismatch; } } if (Trap.hasErrorOccurred()) return Sema::TDK_SubstitutionFailure; if (auto Result = CheckDeducedArgumentConstraints(S, Template, Builder, Info)) return Result; return Sema::TDK_Success; } /// Perform template argument deduction to determine whether /// the given template arguments match the given class template /// partial specialization per C++ [temp.class.spec.match]. Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, TemplateDeductionInfo &Info) { if (Partial->isInvalidDecl()) return TDK_Invalid; // C++ [temp.class.spec.match]p2: // A partial specialization matches a given actual template // argument list if the template arguments of the partial // specialization can be deduced from the actual template argument // list (14.8.2). // Unevaluated SFINAE context. EnterExpressionEvaluationContext Unevaluated( *this, Sema::ExpressionEvaluationContext::Unevaluated); SFINAETrap Trap(*this); SmallVector Deduced; Deduced.resize(Partial->getTemplateParameters()->size()); if (TemplateDeductionResult Result = ::DeduceTemplateArguments(*this, Partial->getTemplateParameters(), Partial->getTemplateArgs(), TemplateArgs, Info, Deduced)) return Result; SmallVector DeducedArgs(Deduced.begin(), Deduced.end()); InstantiatingTemplate Inst(*this, Info.getLocation(), Partial, DeducedArgs, Info); if (Inst.isInvalid()) return TDK_InstantiationDepth; if (Trap.hasErrorOccurred()) return Sema::TDK_SubstitutionFailure; TemplateDeductionResult Result; runWithSufficientStackSpace(Info.getLocation(), [&] { Result = ::FinishTemplateArgumentDeduction(*this, Partial, /*IsPartialOrdering=*/false, TemplateArgs, Deduced, Info); }); return Result; } /// Perform template argument deduction to determine whether /// the given template arguments match the given variable template /// partial specialization per C++ [temp.class.spec.match]. Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, TemplateDeductionInfo &Info) { if (Partial->isInvalidDecl()) return TDK_Invalid; // C++ [temp.class.spec.match]p2: // A partial specialization matches a given actual template // argument list if the template arguments of the partial // specialization can be deduced from the actual template argument // list (14.8.2). // Unevaluated SFINAE context. EnterExpressionEvaluationContext Unevaluated( *this, Sema::ExpressionEvaluationContext::Unevaluated); SFINAETrap Trap(*this); SmallVector Deduced; Deduced.resize(Partial->getTemplateParameters()->size()); if (TemplateDeductionResult Result = ::DeduceTemplateArguments( *this, Partial->getTemplateParameters(), Partial->getTemplateArgs(), TemplateArgs, Info, Deduced)) return Result; SmallVector DeducedArgs(Deduced.begin(), Deduced.end()); InstantiatingTemplate Inst(*this, Info.getLocation(), Partial, DeducedArgs, Info); if (Inst.isInvalid()) return TDK_InstantiationDepth; if (Trap.hasErrorOccurred()) return Sema::TDK_SubstitutionFailure; TemplateDeductionResult Result; runWithSufficientStackSpace(Info.getLocation(), [&] { Result = ::FinishTemplateArgumentDeduction(*this, Partial, /*IsPartialOrdering=*/false, TemplateArgs, Deduced, Info); }); return Result; } /// Determine whether the given type T is a simple-template-id type. static bool isSimpleTemplateIdType(QualType T) { if (const TemplateSpecializationType *Spec = T->getAs()) return Spec->getTemplateName().getAsTemplateDecl() != nullptr; // C++17 [temp.local]p2: // the injected-class-name [...] is equivalent to the template-name followed // by the template-arguments of the class template specialization or partial // specialization enclosed in <> // ... which means it's equivalent to a simple-template-id. // // This only arises during class template argument deduction for a copy // deduction candidate, where it permits slicing. if (T->getAs()) return true; return false; } /// Substitute the explicitly-provided template arguments into the /// given function template according to C++ [temp.arg.explicit]. /// /// \param FunctionTemplate the function template into which the explicit /// template arguments will be substituted. /// /// \param ExplicitTemplateArgs the explicitly-specified template /// arguments. /// /// \param Deduced the deduced template arguments, which will be populated /// with the converted and checked explicit template arguments. /// /// \param ParamTypes will be populated with the instantiated function /// parameters. /// /// \param FunctionType if non-NULL, the result type of the function template /// will also be instantiated and the pointed-to value will be updated with /// the instantiated function type. /// /// \param Info if substitution fails for any reason, this object will be /// populated with more information about the failure. /// /// \returns TDK_Success if substitution was successful, or some failure /// condition. Sema::TemplateDeductionResult Sema::SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl &Deduced, SmallVectorImpl &ParamTypes, QualType *FunctionType, TemplateDeductionInfo &Info) { FunctionDecl *Function = FunctionTemplate->getTemplatedDecl(); TemplateParameterList *TemplateParams = FunctionTemplate->getTemplateParameters(); if (ExplicitTemplateArgs.size() == 0) { // No arguments to substitute; just copy over the parameter types and // fill in the function type. for (auto P : Function->parameters()) ParamTypes.push_back(P->getType()); if (FunctionType) *FunctionType = Function->getType(); return TDK_Success; } // Unevaluated SFINAE context. EnterExpressionEvaluationContext Unevaluated( *this, Sema::ExpressionEvaluationContext::Unevaluated); SFINAETrap Trap(*this); // C++ [temp.arg.explicit]p3: // Template arguments that are present shall be specified in the // declaration order of their corresponding template-parameters. The // template argument list shall not specify more template-arguments than // there are corresponding template-parameters. SmallVector Builder; // Enter a new template instantiation context where we check the // explicitly-specified template arguments against this function template, // and then substitute them into the function parameter types. SmallVector DeducedArgs; InstantiatingTemplate Inst( *this, Info.getLocation(), FunctionTemplate, DeducedArgs, CodeSynthesisContext::ExplicitTemplateArgumentSubstitution, Info); if (Inst.isInvalid()) return TDK_InstantiationDepth; if (CheckTemplateArgumentList(FunctionTemplate, SourceLocation(), ExplicitTemplateArgs, true, Builder, false) || Trap.hasErrorOccurred()) { unsigned Index = Builder.size(); if (Index >= TemplateParams->size()) return TDK_SubstitutionFailure; Info.Param = makeTemplateParameter(TemplateParams->getParam(Index)); return TDK_InvalidExplicitArguments; } // Form the template argument list from the explicitly-specified // template arguments. TemplateArgumentList *ExplicitArgumentList = TemplateArgumentList::CreateCopy(Context, Builder); Info.setExplicitArgs(ExplicitArgumentList); // Template argument deduction and the final substitution should be // done in the context of the templated declaration. Explicit // argument substitution, on the other hand, needs to happen in the // calling context. ContextRAII SavedContext(*this, FunctionTemplate->getTemplatedDecl()); // If we deduced template arguments for a template parameter pack, // note that the template argument pack is partially substituted and record // the explicit template arguments. They'll be used as part of deduction // for this template parameter pack. unsigned PartiallySubstitutedPackIndex = -1u; if (!Builder.empty()) { const TemplateArgument &Arg = Builder.back(); if (Arg.getKind() == TemplateArgument::Pack) { auto *Param = TemplateParams->getParam(Builder.size() - 1); // If this is a fully-saturated fixed-size pack, it should be // fully-substituted, not partially-substituted. Optional Expansions = getExpandedPackSize(Param); if (!Expansions || Arg.pack_size() < *Expansions) { PartiallySubstitutedPackIndex = Builder.size() - 1; CurrentInstantiationScope->SetPartiallySubstitutedPack( Param, Arg.pack_begin(), Arg.pack_size()); } } } const FunctionProtoType *Proto = Function->getType()->getAs(); assert(Proto && "Function template does not have a prototype?"); // Isolate our substituted parameters from our caller. LocalInstantiationScope InstScope(*this, /*MergeWithOuterScope*/true); ExtParameterInfoBuilder ExtParamInfos; // Instantiate the types of each of the function parameters given the // explicitly-specified template arguments. If the function has a trailing // return type, substitute it after the arguments to ensure we substitute // in lexical order. if (Proto->hasTrailingReturn()) { if (SubstParmTypes(Function->getLocation(), Function->parameters(), Proto->getExtParameterInfosOrNull(), MultiLevelTemplateArgumentList(*ExplicitArgumentList), ParamTypes, /*params*/ nullptr, ExtParamInfos)) return TDK_SubstitutionFailure; } // Instantiate the return type. QualType ResultType; { // C++11 [expr.prim.general]p3: // If a declaration declares a member function or member function // template of a class X, the expression this is a prvalue of type // "pointer to cv-qualifier-seq X" between the optional cv-qualifer-seq // and the end of the function-definition, member-declarator, or // declarator. Qualifiers ThisTypeQuals; CXXRecordDecl *ThisContext = nullptr; if (CXXMethodDecl *Method = dyn_cast(Function)) { ThisContext = Method->getParent(); ThisTypeQuals = Method->getMethodQualifiers(); } CXXThisScopeRAII ThisScope(*this, ThisContext, ThisTypeQuals, getLangOpts().CPlusPlus11); ResultType = SubstType(Proto->getReturnType(), MultiLevelTemplateArgumentList(*ExplicitArgumentList), Function->getTypeSpecStartLoc(), Function->getDeclName()); if (ResultType.isNull() || Trap.hasErrorOccurred()) return TDK_SubstitutionFailure; // CUDA: Kernel function must have 'void' return type. if (getLangOpts().CUDA) if (Function->hasAttr() && !ResultType->isVoidType()) { Diag(Function->getLocation(), diag::err_kern_type_not_void_return) << Function->getType() << Function->getSourceRange(); return TDK_SubstitutionFailure; } } // Instantiate the types of each of the function parameters given the // explicitly-specified template arguments if we didn't do so earlier. if (!Proto->hasTrailingReturn() && SubstParmTypes(Function->getLocation(), Function->parameters(), Proto->getExtParameterInfosOrNull(), MultiLevelTemplateArgumentList(*ExplicitArgumentList), ParamTypes, /*params*/ nullptr, ExtParamInfos)) return TDK_SubstitutionFailure; if (FunctionType) { auto EPI = Proto->getExtProtoInfo(); EPI.ExtParameterInfos = ExtParamInfos.getPointerOrNull(ParamTypes.size()); // In C++1z onwards, exception specifications are part of the function type, // so substitution into the type must also substitute into the exception // specification. SmallVector ExceptionStorage; if (getLangOpts().CPlusPlus17 && SubstExceptionSpec( Function->getLocation(), EPI.ExceptionSpec, ExceptionStorage, MultiLevelTemplateArgumentList(*ExplicitArgumentList))) return TDK_SubstitutionFailure; *FunctionType = BuildFunctionType(ResultType, ParamTypes, Function->getLocation(), Function->getDeclName(), EPI); if (FunctionType->isNull() || Trap.hasErrorOccurred()) return TDK_SubstitutionFailure; } // C++ [temp.arg.explicit]p2: // Trailing template arguments that can be deduced (14.8.2) may be // omitted from the list of explicit template-arguments. If all of the // template arguments can be deduced, they may all be omitted; in this // case, the empty template argument list <> itself may also be omitted. // // Take all of the explicitly-specified arguments and put them into // the set of deduced template arguments. The partially-substituted // parameter pack, however, will be set to NULL since the deduction // mechanism handles the partially-substituted argument pack directly. Deduced.reserve(TemplateParams->size()); for (unsigned I = 0, N = ExplicitArgumentList->size(); I != N; ++I) { const TemplateArgument &Arg = ExplicitArgumentList->get(I); if (I == PartiallySubstitutedPackIndex) Deduced.push_back(DeducedTemplateArgument()); else Deduced.push_back(Arg); } return TDK_Success; } /// Check whether the deduced argument type for a call to a function /// template matches the actual argument type per C++ [temp.deduct.call]p4. static Sema::TemplateDeductionResult CheckOriginalCallArgDeduction(Sema &S, TemplateDeductionInfo &Info, Sema::OriginalCallArg OriginalArg, QualType DeducedA) { ASTContext &Context = S.Context; auto Failed = [&]() -> Sema::TemplateDeductionResult { Info.FirstArg = TemplateArgument(DeducedA); Info.SecondArg = TemplateArgument(OriginalArg.OriginalArgType); Info.CallArgIndex = OriginalArg.ArgIdx; return OriginalArg.DecomposedParam ? Sema::TDK_DeducedMismatchNested : Sema::TDK_DeducedMismatch; }; QualType A = OriginalArg.OriginalArgType; QualType OriginalParamType = OriginalArg.OriginalParamType; // Check for type equality (top-level cv-qualifiers are ignored). if (Context.hasSameUnqualifiedType(A, DeducedA)) return Sema::TDK_Success; // Strip off references on the argument types; they aren't needed for // the following checks. if (const ReferenceType *DeducedARef = DeducedA->getAs()) DeducedA = DeducedARef->getPointeeType(); if (const ReferenceType *ARef = A->getAs()) A = ARef->getPointeeType(); // C++ [temp.deduct.call]p4: // [...] However, there are three cases that allow a difference: // - If the original P is a reference type, the deduced A (i.e., the // type referred to by the reference) can be more cv-qualified than // the transformed A. if (const ReferenceType *OriginalParamRef = OriginalParamType->getAs()) { // We don't want to keep the reference around any more. OriginalParamType = OriginalParamRef->getPointeeType(); // FIXME: Resolve core issue (no number yet): if the original P is a // reference type and the transformed A is function type "noexcept F", // the deduced A can be F. QualType Tmp; if (A->isFunctionType() && S.IsFunctionConversion(A, DeducedA, Tmp)) return Sema::TDK_Success; Qualifiers AQuals = A.getQualifiers(); Qualifiers DeducedAQuals = DeducedA.getQualifiers(); // Under Objective-C++ ARC, the deduced type may have implicitly // been given strong or (when dealing with a const reference) // unsafe_unretained lifetime. If so, update the original // qualifiers to include this lifetime. if (S.getLangOpts().ObjCAutoRefCount && ((DeducedAQuals.getObjCLifetime() == Qualifiers::OCL_Strong && AQuals.getObjCLifetime() == Qualifiers::OCL_None) || (DeducedAQuals.hasConst() && DeducedAQuals.getObjCLifetime() == Qualifiers::OCL_ExplicitNone))) { AQuals.setObjCLifetime(DeducedAQuals.getObjCLifetime()); } if (AQuals == DeducedAQuals) { // Qualifiers match; there's nothing to do. } else if (!DeducedAQuals.compatiblyIncludes(AQuals)) { return Failed(); } else { // Qualifiers are compatible, so have the argument type adopt the // deduced argument type's qualifiers as if we had performed the // qualification conversion. A = Context.getQualifiedType(A.getUnqualifiedType(), DeducedAQuals); } } // - The transformed A can be another pointer or pointer to member // type that can be converted to the deduced A via a function pointer // conversion and/or a qualification conversion. // // Also allow conversions which merely strip __attribute__((noreturn)) from // function types (recursively). bool ObjCLifetimeConversion = false; QualType ResultTy; if ((A->isAnyPointerType() || A->isMemberPointerType()) && (S.IsQualificationConversion(A, DeducedA, false, ObjCLifetimeConversion) || S.IsFunctionConversion(A, DeducedA, ResultTy))) return Sema::TDK_Success; // - If P is a class and P has the form simple-template-id, then the // transformed A can be a derived class of the deduced A. [...] // [...] Likewise, if P is a pointer to a class of the form // simple-template-id, the transformed A can be a pointer to a // derived class pointed to by the deduced A. if (const PointerType *OriginalParamPtr = OriginalParamType->getAs()) { if (const PointerType *DeducedAPtr = DeducedA->getAs()) { if (const PointerType *APtr = A->getAs()) { if (A->getPointeeType()->isRecordType()) { OriginalParamType = OriginalParamPtr->getPointeeType(); DeducedA = DeducedAPtr->getPointeeType(); A = APtr->getPointeeType(); } } } } if (Context.hasSameUnqualifiedType(A, DeducedA)) return Sema::TDK_Success; if (A->isRecordType() && isSimpleTemplateIdType(OriginalParamType) && S.IsDerivedFrom(Info.getLocation(), A, DeducedA)) return Sema::TDK_Success; return Failed(); } /// Find the pack index for a particular parameter index in an instantiation of /// a function template with specific arguments. /// /// \return The pack index for whichever pack produced this parameter, or -1 /// if this was not produced by a parameter. Intended to be used as the /// ArgumentPackSubstitutionIndex for further substitutions. // FIXME: We should track this in OriginalCallArgs so we don't need to // reconstruct it here. static unsigned getPackIndexForParam(Sema &S, FunctionTemplateDecl *FunctionTemplate, const MultiLevelTemplateArgumentList &Args, unsigned ParamIdx) { unsigned Idx = 0; for (auto *PD : FunctionTemplate->getTemplatedDecl()->parameters()) { if (PD->isParameterPack()) { unsigned NumExpansions = S.getNumArgumentsInExpansion(PD->getType(), Args).getValueOr(1); if (Idx + NumExpansions > ParamIdx) return ParamIdx - Idx; Idx += NumExpansions; } else { if (Idx == ParamIdx) return -1; // Not a pack expansion ++Idx; } } llvm_unreachable("parameter index would not be produced from template"); } /// Finish template argument deduction for a function template, /// checking the deduced template arguments for completeness and forming /// the function template specialization. /// /// \param OriginalCallArgs If non-NULL, the original call arguments against /// which the deduced argument types should be compared. Sema::TemplateDeductionResult Sema::FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, TemplateDeductionInfo &Info, SmallVectorImpl const *OriginalCallArgs, bool PartialOverloading, llvm::function_ref CheckNonDependent) { // Unevaluated SFINAE context. EnterExpressionEvaluationContext Unevaluated( *this, Sema::ExpressionEvaluationContext::Unevaluated); SFINAETrap Trap(*this); // Enter a new template instantiation context while we instantiate the // actual function declaration. SmallVector DeducedArgs(Deduced.begin(), Deduced.end()); InstantiatingTemplate Inst( *this, Info.getLocation(), FunctionTemplate, DeducedArgs, CodeSynthesisContext::DeducedTemplateArgumentSubstitution, Info); if (Inst.isInvalid()) return TDK_InstantiationDepth; ContextRAII SavedContext(*this, FunctionTemplate->getTemplatedDecl()); // C++ [temp.deduct.type]p2: // [...] or if any template argument remains neither deduced nor // explicitly specified, template argument deduction fails. SmallVector Builder; if (auto Result = ConvertDeducedTemplateArguments( *this, FunctionTemplate, /*IsDeduced*/true, Deduced, Info, Builder, CurrentInstantiationScope, NumExplicitlySpecified, PartialOverloading)) return Result; // C++ [temp.deduct.call]p10: [DR1391] // If deduction succeeds for all parameters that contain // template-parameters that participate in template argument deduction, // and all template arguments are explicitly specified, deduced, or // obtained from default template arguments, remaining parameters are then // compared with the corresponding arguments. For each remaining parameter // P with a type that was non-dependent before substitution of any // explicitly-specified template arguments, if the corresponding argument // A cannot be implicitly converted to P, deduction fails. if (CheckNonDependent()) return TDK_NonDependentConversionFailure; // Form the template argument list from the deduced template arguments. TemplateArgumentList *DeducedArgumentList = TemplateArgumentList::CreateCopy(Context, Builder); Info.reset(DeducedArgumentList); // Substitute the deduced template arguments into the function template // declaration to produce the function template specialization. DeclContext *Owner = FunctionTemplate->getDeclContext(); if (FunctionTemplate->getFriendObjectKind()) Owner = FunctionTemplate->getLexicalDeclContext(); MultiLevelTemplateArgumentList SubstArgs(*DeducedArgumentList); Specialization = cast_or_null( SubstDecl(FunctionTemplate->getTemplatedDecl(), Owner, SubstArgs)); if (!Specialization || Specialization->isInvalidDecl()) return TDK_SubstitutionFailure; assert(Specialization->getPrimaryTemplate()->getCanonicalDecl() == FunctionTemplate->getCanonicalDecl()); // If the template argument list is owned by the function template // specialization, release it. if (Specialization->getTemplateSpecializationArgs() == DeducedArgumentList && !Trap.hasErrorOccurred()) Info.take(); // There may have been an error that did not prevent us from constructing a // declaration. Mark the declaration invalid and return with a substitution // failure. if (Trap.hasErrorOccurred()) { Specialization->setInvalidDecl(true); return TDK_SubstitutionFailure; } // C++2a [temp.deduct]p5 // [...] When all template arguments have been deduced [...] all uses of // template parameters [...] are replaced with the corresponding deduced // or default argument values. // [...] If the function template has associated constraints // ([temp.constr.decl]), those constraints are checked for satisfaction // ([temp.constr.constr]). If the constraints are not satisfied, type // deduction fails. if (!PartialOverloading || (Builder.size() == FunctionTemplate->getTemplateParameters()->size())) { if (CheckInstantiatedFunctionTemplateConstraints(Info.getLocation(), Specialization, Builder, Info.AssociatedConstraintsSatisfaction)) return TDK_MiscellaneousDeductionFailure; if (!Info.AssociatedConstraintsSatisfaction.IsSatisfied) { Info.reset(TemplateArgumentList::CreateCopy(Context, Builder)); return TDK_ConstraintsNotSatisfied; } } if (OriginalCallArgs) { // C++ [temp.deduct.call]p4: // In general, the deduction process attempts to find template argument // values that will make the deduced A identical to A (after the type A // is transformed as described above). [...] llvm::SmallDenseMap, QualType> DeducedATypes; for (unsigned I = 0, N = OriginalCallArgs->size(); I != N; ++I) { OriginalCallArg OriginalArg = (*OriginalCallArgs)[I]; auto ParamIdx = OriginalArg.ArgIdx; if (ParamIdx >= Specialization->getNumParams()) // FIXME: This presumably means a pack ended up smaller than we // expected while deducing. Should this not result in deduction // failure? Can it even happen? continue; QualType DeducedA; if (!OriginalArg.DecomposedParam) { // P is one of the function parameters, just look up its substituted // type. DeducedA = Specialization->getParamDecl(ParamIdx)->getType(); } else { // P is a decomposed element of a parameter corresponding to a // braced-init-list argument. Substitute back into P to find the // deduced A. QualType &CacheEntry = DeducedATypes[{ParamIdx, OriginalArg.OriginalParamType}]; if (CacheEntry.isNull()) { ArgumentPackSubstitutionIndexRAII PackIndex( *this, getPackIndexForParam(*this, FunctionTemplate, SubstArgs, ParamIdx)); CacheEntry = SubstType(OriginalArg.OriginalParamType, SubstArgs, Specialization->getTypeSpecStartLoc(), Specialization->getDeclName()); } DeducedA = CacheEntry; } if (auto TDK = CheckOriginalCallArgDeduction(*this, Info, OriginalArg, DeducedA)) return TDK; } } // If we suppressed any diagnostics while performing template argument // deduction, and if we haven't already instantiated this declaration, // keep track of these diagnostics. They'll be emitted if this specialization // is actually used. if (Info.diag_begin() != Info.diag_end()) { SuppressedDiagnosticsMap::iterator Pos = SuppressedDiagnostics.find(Specialization->getCanonicalDecl()); if (Pos == SuppressedDiagnostics.end()) SuppressedDiagnostics[Specialization->getCanonicalDecl()] .append(Info.diag_begin(), Info.diag_end()); } return TDK_Success; } /// Gets the type of a function for template-argument-deducton /// purposes when it's considered as part of an overload set. static QualType GetTypeOfFunction(Sema &S, const OverloadExpr::FindResult &R, FunctionDecl *Fn) { // We may need to deduce the return type of the function now. if (S.getLangOpts().CPlusPlus14 && Fn->getReturnType()->isUndeducedType() && S.DeduceReturnType(Fn, R.Expression->getExprLoc(), /*Diagnose*/ false)) return {}; if (CXXMethodDecl *Method = dyn_cast(Fn)) if (Method->isInstance()) { // An instance method that's referenced in a form that doesn't // look like a member pointer is just invalid. if (!R.HasFormOfMemberPointer) return {}; return S.Context.getMemberPointerType(Fn->getType(), S.Context.getTypeDeclType(Method->getParent()).getTypePtr()); } if (!R.IsAddressOfOperand) return Fn->getType(); return S.Context.getPointerType(Fn->getType()); } /// Apply the deduction rules for overload sets. /// /// \return the null type if this argument should be treated as an /// undeduced context static QualType ResolveOverloadForDeduction(Sema &S, TemplateParameterList *TemplateParams, Expr *Arg, QualType ParamType, bool ParamWasReference) { OverloadExpr::FindResult R = OverloadExpr::find(Arg); OverloadExpr *Ovl = R.Expression; // C++0x [temp.deduct.call]p4 unsigned TDF = 0; if (ParamWasReference) TDF |= TDF_ParamWithReferenceType; if (R.IsAddressOfOperand) TDF |= TDF_IgnoreQualifiers; // C++0x [temp.deduct.call]p6: // When P is a function type, pointer to function type, or pointer // to member function type: if (!ParamType->isFunctionType() && !ParamType->isFunctionPointerType() && !ParamType->isMemberFunctionPointerType()) { if (Ovl->hasExplicitTemplateArgs()) { // But we can still look for an explicit specialization. if (FunctionDecl *ExplicitSpec = S.ResolveSingleFunctionTemplateSpecialization(Ovl)) return GetTypeOfFunction(S, R, ExplicitSpec); } DeclAccessPair DAP; if (FunctionDecl *Viable = S.resolveAddressOfSingleOverloadCandidate(Arg, DAP)) return GetTypeOfFunction(S, R, Viable); return {}; } // Gather the explicit template arguments, if any. TemplateArgumentListInfo ExplicitTemplateArgs; if (Ovl->hasExplicitTemplateArgs()) Ovl->copyTemplateArgumentsInto(ExplicitTemplateArgs); QualType Match; for (UnresolvedSetIterator I = Ovl->decls_begin(), E = Ovl->decls_end(); I != E; ++I) { NamedDecl *D = (*I)->getUnderlyingDecl(); if (FunctionTemplateDecl *FunTmpl = dyn_cast(D)) { // - If the argument is an overload set containing one or more // function templates, the parameter is treated as a // non-deduced context. if (!Ovl->hasExplicitTemplateArgs()) return {}; // Otherwise, see if we can resolve a function type FunctionDecl *Specialization = nullptr; TemplateDeductionInfo Info(Ovl->getNameLoc()); if (S.DeduceTemplateArguments(FunTmpl, &ExplicitTemplateArgs, Specialization, Info)) continue; D = Specialization; } FunctionDecl *Fn = cast(D); QualType ArgType = GetTypeOfFunction(S, R, Fn); if (ArgType.isNull()) continue; // Function-to-pointer conversion. if (!ParamWasReference && ParamType->isPointerType() && ArgType->isFunctionType()) ArgType = S.Context.getPointerType(ArgType); // - If the argument is an overload set (not containing function // templates), trial argument deduction is attempted using each // of the members of the set. If deduction succeeds for only one // of the overload set members, that member is used as the // argument value for the deduction. If deduction succeeds for // more than one member of the overload set the parameter is // treated as a non-deduced context. // We do all of this in a fresh context per C++0x [temp.deduct.type]p2: // Type deduction is done independently for each P/A pair, and // the deduced template argument values are then combined. // So we do not reject deductions which were made elsewhere. SmallVector Deduced(TemplateParams->size()); TemplateDeductionInfo Info(Ovl->getNameLoc()); Sema::TemplateDeductionResult Result = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, ParamType, ArgType, Info, Deduced, TDF); if (Result) continue; if (!Match.isNull()) return {}; Match = ArgType; } return Match; } /// Perform the adjustments to the parameter and argument types /// described in C++ [temp.deduct.call]. /// /// \returns true if the caller should not attempt to perform any template /// argument deduction based on this P/A pair because the argument is an /// overloaded function set that could not be resolved. static bool AdjustFunctionParmAndArgTypesForDeduction( Sema &S, TemplateParameterList *TemplateParams, unsigned FirstInnerIndex, QualType &ParamType, QualType &ArgType, Expr *Arg, unsigned &TDF) { // C++0x [temp.deduct.call]p3: // If P is a cv-qualified type, the top level cv-qualifiers of P's type // are ignored for type deduction. if (ParamType.hasQualifiers()) ParamType = ParamType.getUnqualifiedType(); // [...] If P is a reference type, the type referred to by P is // used for type deduction. const ReferenceType *ParamRefType = ParamType->getAs(); if (ParamRefType) ParamType = ParamRefType->getPointeeType(); // Overload sets usually make this parameter an undeduced context, // but there are sometimes special circumstances. Typically // involving a template-id-expr. if (ArgType == S.Context.OverloadTy) { ArgType = ResolveOverloadForDeduction(S, TemplateParams, Arg, ParamType, ParamRefType != nullptr); if (ArgType.isNull()) return true; } if (ParamRefType) { // If the argument has incomplete array type, try to complete its type. if (ArgType->isIncompleteArrayType()) { S.completeExprArrayBound(Arg); ArgType = Arg->getType(); } // C++1z [temp.deduct.call]p3: // If P is a forwarding reference and the argument is an lvalue, the type // "lvalue reference to A" is used in place of A for type deduction. if (isForwardingReference(QualType(ParamRefType, 0), FirstInnerIndex) && Arg->isLValue()) ArgType = S.Context.getLValueReferenceType(ArgType); } else { // C++ [temp.deduct.call]p2: // If P is not a reference type: // - If A is an array type, the pointer type produced by the // array-to-pointer standard conversion (4.2) is used in place of // A for type deduction; otherwise, if (ArgType->isArrayType()) ArgType = S.Context.getArrayDecayedType(ArgType); // - If A is a function type, the pointer type produced by the // function-to-pointer standard conversion (4.3) is used in place // of A for type deduction; otherwise, else if (ArgType->isFunctionType()) ArgType = S.Context.getPointerType(ArgType); else { // - If A is a cv-qualified type, the top level cv-qualifiers of A's // type are ignored for type deduction. ArgType = ArgType.getUnqualifiedType(); } } // C++0x [temp.deduct.call]p4: // In general, the deduction process attempts to find template argument // values that will make the deduced A identical to A (after the type A // is transformed as described above). [...] TDF = TDF_SkipNonDependent; // - If the original P is a reference type, the deduced A (i.e., the // type referred to by the reference) can be more cv-qualified than // the transformed A. if (ParamRefType) TDF |= TDF_ParamWithReferenceType; // - The transformed A can be another pointer or pointer to member // type that can be converted to the deduced A via a qualification // conversion (4.4). if (ArgType->isPointerType() || ArgType->isMemberPointerType() || ArgType->isObjCObjectPointerType()) TDF |= TDF_IgnoreQualifiers; // - If P is a class and P has the form simple-template-id, then the // transformed A can be a derived class of the deduced A. Likewise, // if P is a pointer to a class of the form simple-template-id, the // transformed A can be a pointer to a derived class pointed to by // the deduced A. if (isSimpleTemplateIdType(ParamType) || (isa(ParamType) && isSimpleTemplateIdType( ParamType->getAs()->getPointeeType()))) TDF |= TDF_DerivedClass; return false; } static bool hasDeducibleTemplateParameters(Sema &S, FunctionTemplateDecl *FunctionTemplate, QualType T); static Sema::TemplateDeductionResult DeduceTemplateArgumentsFromCallArgument( Sema &S, TemplateParameterList *TemplateParams, unsigned FirstInnerIndex, QualType ParamType, Expr *Arg, TemplateDeductionInfo &Info, SmallVectorImpl &Deduced, SmallVectorImpl &OriginalCallArgs, bool DecomposedParam, unsigned ArgIdx, unsigned TDF); /// Attempt template argument deduction from an initializer list /// deemed to be an argument in a function call. static Sema::TemplateDeductionResult DeduceFromInitializerList( Sema &S, TemplateParameterList *TemplateParams, QualType AdjustedParamType, InitListExpr *ILE, TemplateDeductionInfo &Info, SmallVectorImpl &Deduced, SmallVectorImpl &OriginalCallArgs, unsigned ArgIdx, unsigned TDF) { // C++ [temp.deduct.call]p1: (CWG 1591) // If removing references and cv-qualifiers from P gives // std::initializer_list or P0[N] for some P0 and N and the argument is // a non-empty initializer list, then deduction is performed instead for // each element of the initializer list, taking P0 as a function template // parameter type and the initializer element as its argument // // We've already removed references and cv-qualifiers here. if (!ILE->getNumInits()) return Sema::TDK_Success; QualType ElTy; auto *ArrTy = S.Context.getAsArrayType(AdjustedParamType); if (ArrTy) ElTy = ArrTy->getElementType(); else if (!S.isStdInitializerList(AdjustedParamType, &ElTy)) { // Otherwise, an initializer list argument causes the parameter to be // considered a non-deduced context return Sema::TDK_Success; } // Resolving a core issue: a braced-init-list containing any designators is // a non-deduced context. for (Expr *E : ILE->inits()) if (isa(E)) return Sema::TDK_Success; // Deduction only needs to be done for dependent types. if (ElTy->isDependentType()) { for (Expr *E : ILE->inits()) { if (auto Result = DeduceTemplateArgumentsFromCallArgument( S, TemplateParams, 0, ElTy, E, Info, Deduced, OriginalCallArgs, true, ArgIdx, TDF)) return Result; } } // in the P0[N] case, if N is a non-type template parameter, N is deduced // from the length of the initializer list. if (auto *DependentArrTy = dyn_cast_or_null(ArrTy)) { // Determine the array bound is something we can deduce. if (NonTypeTemplateParmDecl *NTTP = getDeducedParameterFromExpr(Info, DependentArrTy->getSizeExpr())) { // We can perform template argument deduction for the given non-type // template parameter. // C++ [temp.deduct.type]p13: // The type of N in the type T[N] is std::size_t. QualType T = S.Context.getSizeType(); llvm::APInt Size(S.Context.getIntWidth(T), ILE->getNumInits()); if (auto Result = DeduceNonTypeTemplateArgument( S, TemplateParams, NTTP, llvm::APSInt(Size), T, /*ArrayBound=*/true, Info, Deduced)) return Result; } } return Sema::TDK_Success; } /// Perform template argument deduction per [temp.deduct.call] for a /// single parameter / argument pair. static Sema::TemplateDeductionResult DeduceTemplateArgumentsFromCallArgument( Sema &S, TemplateParameterList *TemplateParams, unsigned FirstInnerIndex, QualType ParamType, Expr *Arg, TemplateDeductionInfo &Info, SmallVectorImpl &Deduced, SmallVectorImpl &OriginalCallArgs, bool DecomposedParam, unsigned ArgIdx, unsigned TDF) { QualType ArgType = Arg->getType(); QualType OrigParamType = ParamType; // If P is a reference type [...] // If P is a cv-qualified type [...] if (AdjustFunctionParmAndArgTypesForDeduction( S, TemplateParams, FirstInnerIndex, ParamType, ArgType, Arg, TDF)) return Sema::TDK_Success; // If [...] the argument is a non-empty initializer list [...] if (InitListExpr *ILE = dyn_cast(Arg)) return DeduceFromInitializerList(S, TemplateParams, ParamType, ILE, Info, Deduced, OriginalCallArgs, ArgIdx, TDF); // [...] the deduction process attempts to find template argument values // that will make the deduced A identical to A // // Keep track of the argument type and corresponding parameter index, // so we can check for compatibility between the deduced A and A. OriginalCallArgs.push_back( Sema::OriginalCallArg(OrigParamType, DecomposedParam, ArgIdx, ArgType)); return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, ParamType, ArgType, Info, Deduced, TDF); } /// Perform template argument deduction from a function call /// (C++ [temp.deduct.call]). /// /// \param FunctionTemplate the function template for which we are performing /// template argument deduction. /// /// \param ExplicitTemplateArgs the explicit template arguments provided /// for this call. /// /// \param Args the function call arguments /// /// \param Specialization if template argument deduction was successful, /// this will be set to the function template specialization produced by /// template argument deduction. /// /// \param Info the argument will be updated to provide additional information /// about template argument deduction. /// /// \param CheckNonDependent A callback to invoke to check conversions for /// non-dependent parameters, between deduction and substitution, per DR1391. /// If this returns true, substitution will be skipped and we return /// TDK_NonDependentConversionFailure. The callback is passed the parameter /// types (after substituting explicit template arguments). /// /// \returns the result of template argument deduction. Sema::TemplateDeductionResult Sema::DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef Args, FunctionDecl *&Specialization, TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref)> CheckNonDependent) { if (FunctionTemplate->isInvalidDecl()) return TDK_Invalid; FunctionDecl *Function = FunctionTemplate->getTemplatedDecl(); unsigned NumParams = Function->getNumParams(); unsigned FirstInnerIndex = getFirstInnerIndex(FunctionTemplate); // C++ [temp.deduct.call]p1: // Template argument deduction is done by comparing each function template // parameter type (call it P) with the type of the corresponding argument // of the call (call it A) as described below. if (Args.size() < Function->getMinRequiredArguments() && !PartialOverloading) return TDK_TooFewArguments; else if (TooManyArguments(NumParams, Args.size(), PartialOverloading)) { const auto *Proto = Function->getType()->castAs(); if (Proto->isTemplateVariadic()) /* Do nothing */; else if (!Proto->isVariadic()) return TDK_TooManyArguments; } // The types of the parameters from which we will perform template argument // deduction. LocalInstantiationScope InstScope(*this); TemplateParameterList *TemplateParams = FunctionTemplate->getTemplateParameters(); SmallVector Deduced; SmallVector ParamTypes; unsigned NumExplicitlySpecified = 0; if (ExplicitTemplateArgs) { TemplateDeductionResult Result; runWithSufficientStackSpace(Info.getLocation(), [&] { Result = SubstituteExplicitTemplateArguments( FunctionTemplate, *ExplicitTemplateArgs, Deduced, ParamTypes, nullptr, Info); }); if (Result) return Result; NumExplicitlySpecified = Deduced.size(); } else { // Just fill in the parameter types from the function declaration. for (unsigned I = 0; I != NumParams; ++I) ParamTypes.push_back(Function->getParamDecl(I)->getType()); } SmallVector OriginalCallArgs; // Deduce an argument of type ParamType from an expression with index ArgIdx. auto DeduceCallArgument = [&](QualType ParamType, unsigned ArgIdx) { // C++ [demp.deduct.call]p1: (DR1391) // Template argument deduction is done by comparing each function template // parameter that contains template-parameters that participate in // template argument deduction ... if (!hasDeducibleTemplateParameters(*this, FunctionTemplate, ParamType)) return Sema::TDK_Success; // ... with the type of the corresponding argument return DeduceTemplateArgumentsFromCallArgument( *this, TemplateParams, FirstInnerIndex, ParamType, Args[ArgIdx], Info, Deduced, OriginalCallArgs, /*Decomposed*/false, ArgIdx, /*TDF*/ 0); }; // Deduce template arguments from the function parameters. Deduced.resize(TemplateParams->size()); SmallVector ParamTypesForArgChecking; for (unsigned ParamIdx = 0, NumParamTypes = ParamTypes.size(), ArgIdx = 0; ParamIdx != NumParamTypes; ++ParamIdx) { QualType ParamType = ParamTypes[ParamIdx]; const PackExpansionType *ParamExpansion = dyn_cast(ParamType); if (!ParamExpansion) { // Simple case: matching a function parameter to a function argument. if (ArgIdx >= Args.size()) break; ParamTypesForArgChecking.push_back(ParamType); if (auto Result = DeduceCallArgument(ParamType, ArgIdx++)) return Result; continue; } QualType ParamPattern = ParamExpansion->getPattern(); PackDeductionScope PackScope(*this, TemplateParams, Deduced, Info, ParamPattern); // C++0x [temp.deduct.call]p1: // For a function parameter pack that occurs at the end of the // parameter-declaration-list, the type A of each remaining argument of // the call is compared with the type P of the declarator-id of the // function parameter pack. Each comparison deduces template arguments // for subsequent positions in the template parameter packs expanded by // the function parameter pack. When a function parameter pack appears // in a non-deduced context [not at the end of the list], the type of // that parameter pack is never deduced. // // FIXME: The above rule allows the size of the parameter pack to change // after we skip it (in the non-deduced case). That makes no sense, so // we instead notionally deduce the pack against N arguments, where N is // the length of the explicitly-specified pack if it's expanded by the // parameter pack and 0 otherwise, and we treat each deduction as a // non-deduced context. if (ParamIdx + 1 == NumParamTypes || PackScope.hasFixedArity()) { for (; ArgIdx < Args.size() && PackScope.hasNextElement(); PackScope.nextPackElement(), ++ArgIdx) { ParamTypesForArgChecking.push_back(ParamPattern); if (auto Result = DeduceCallArgument(ParamPattern, ArgIdx)) return Result; } } else { // If the parameter type contains an explicitly-specified pack that we // could not expand, skip the number of parameters notionally created // by the expansion. Optional NumExpansions = ParamExpansion->getNumExpansions(); if (NumExpansions && !PackScope.isPartiallyExpanded()) { for (unsigned I = 0; I != *NumExpansions && ArgIdx < Args.size(); ++I, ++ArgIdx) { ParamTypesForArgChecking.push_back(ParamPattern); // FIXME: Should we add OriginalCallArgs for these? What if the // corresponding argument is a list? PackScope.nextPackElement(); } } } // Build argument packs for each of the parameter packs expanded by this // pack expansion. if (auto Result = PackScope.finish()) return Result; } // Capture the context in which the function call is made. This is the context // that is needed when the accessibility of template arguments is checked. DeclContext *CallingCtx = CurContext; TemplateDeductionResult Result; runWithSufficientStackSpace(Info.getLocation(), [&] { Result = FinishTemplateArgumentDeduction( FunctionTemplate, Deduced, NumExplicitlySpecified, Specialization, Info, &OriginalCallArgs, PartialOverloading, [&, CallingCtx]() { ContextRAII SavedContext(*this, CallingCtx); return CheckNonDependent(ParamTypesForArgChecking); }); }); return Result; } QualType Sema::adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec) { if (ArgFunctionType.isNull()) return ArgFunctionType; const auto *FunctionTypeP = FunctionType->castAs(); const auto *ArgFunctionTypeP = ArgFunctionType->castAs(); FunctionProtoType::ExtProtoInfo EPI = ArgFunctionTypeP->getExtProtoInfo(); bool Rebuild = false; CallingConv CC = FunctionTypeP->getCallConv(); if (EPI.ExtInfo.getCC() != CC) { EPI.ExtInfo = EPI.ExtInfo.withCallingConv(CC); Rebuild = true; } bool NoReturn = FunctionTypeP->getNoReturnAttr(); if (EPI.ExtInfo.getNoReturn() != NoReturn) { EPI.ExtInfo = EPI.ExtInfo.withNoReturn(NoReturn); Rebuild = true; } if (AdjustExceptionSpec && (FunctionTypeP->hasExceptionSpec() || ArgFunctionTypeP->hasExceptionSpec())) { EPI.ExceptionSpec = FunctionTypeP->getExtProtoInfo().ExceptionSpec; Rebuild = true; } if (!Rebuild) return ArgFunctionType; return Context.getFunctionType(ArgFunctionTypeP->getReturnType(), ArgFunctionTypeP->getParamTypes(), EPI); } /// Deduce template arguments when taking the address of a function /// template (C++ [temp.deduct.funcaddr]) or matching a specialization to /// a template. /// /// \param FunctionTemplate the function template for which we are performing /// template argument deduction. /// /// \param ExplicitTemplateArgs the explicitly-specified template /// arguments. /// /// \param ArgFunctionType the function type that will be used as the /// "argument" type (A) when performing template argument deduction from the /// function template's function type. This type may be NULL, if there is no /// argument type to compare against, in C++0x [temp.arg.explicit]p3. /// /// \param Specialization if template argument deduction was successful, /// this will be set to the function template specialization produced by /// template argument deduction. /// /// \param Info the argument will be updated to provide additional information /// about template argument deduction. /// /// \param IsAddressOfFunction If \c true, we are deducing as part of taking /// the address of a function template per [temp.deduct.funcaddr] and /// [over.over]. If \c false, we are looking up a function template /// specialization based on its signature, per [temp.deduct.decl]. /// /// \returns the result of template argument deduction. Sema::TemplateDeductionResult Sema::DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, TemplateDeductionInfo &Info, bool IsAddressOfFunction) { if (FunctionTemplate->isInvalidDecl()) return TDK_Invalid; FunctionDecl *Function = FunctionTemplate->getTemplatedDecl(); TemplateParameterList *TemplateParams = FunctionTemplate->getTemplateParameters(); QualType FunctionType = Function->getType(); // Substitute any explicit template arguments. LocalInstantiationScope InstScope(*this); SmallVector Deduced; unsigned NumExplicitlySpecified = 0; SmallVector ParamTypes; if (ExplicitTemplateArgs) { TemplateDeductionResult Result; runWithSufficientStackSpace(Info.getLocation(), [&] { Result = SubstituteExplicitTemplateArguments( FunctionTemplate, *ExplicitTemplateArgs, Deduced, ParamTypes, &FunctionType, Info); }); if (Result) return Result; NumExplicitlySpecified = Deduced.size(); } // When taking the address of a function, we require convertibility of // the resulting function type. Otherwise, we allow arbitrary mismatches // of calling convention and noreturn. if (!IsAddressOfFunction) ArgFunctionType = adjustCCAndNoReturn(ArgFunctionType, FunctionType, /*AdjustExceptionSpec*/false); // Unevaluated SFINAE context. EnterExpressionEvaluationContext Unevaluated( *this, Sema::ExpressionEvaluationContext::Unevaluated); SFINAETrap Trap(*this); Deduced.resize(TemplateParams->size()); // If the function has a deduced return type, substitute it for a dependent // type so that we treat it as a non-deduced context in what follows. If we // are looking up by signature, the signature type should also have a deduced // return type, which we instead expect to exactly match. bool HasDeducedReturnType = false; if (getLangOpts().CPlusPlus14 && IsAddressOfFunction && Function->getReturnType()->getContainedAutoType()) { FunctionType = SubstAutoType(FunctionType, Context.DependentTy); HasDeducedReturnType = true; } if (!ArgFunctionType.isNull()) { unsigned TDF = TDF_TopLevelParameterTypeList | TDF_AllowCompatibleFunctionType; // Deduce template arguments from the function type. if (TemplateDeductionResult Result = DeduceTemplateArgumentsByTypeMatch(*this, TemplateParams, FunctionType, ArgFunctionType, Info, Deduced, TDF)) return Result; } TemplateDeductionResult Result; runWithSufficientStackSpace(Info.getLocation(), [&] { Result = FinishTemplateArgumentDeduction(FunctionTemplate, Deduced, NumExplicitlySpecified, Specialization, Info); }); if (Result) return Result; // If the function has a deduced return type, deduce it now, so we can check // that the deduced function type matches the requested type. if (HasDeducedReturnType && Specialization->getReturnType()->isUndeducedType() && DeduceReturnType(Specialization, Info.getLocation(), false)) return TDK_MiscellaneousDeductionFailure; // If the function has a dependent exception specification, resolve it now, // so we can check that the exception specification matches. auto *SpecializationFPT = Specialization->getType()->castAs(); if (getLangOpts().CPlusPlus17 && isUnresolvedExceptionSpec(SpecializationFPT->getExceptionSpecType()) && !ResolveExceptionSpec(Info.getLocation(), SpecializationFPT)) return TDK_MiscellaneousDeductionFailure; // Adjust the exception specification of the argument to match the // substituted and resolved type we just formed. (Calling convention and // noreturn can't be dependent, so we don't actually need this for them // right now.) QualType SpecializationType = Specialization->getType(); if (!IsAddressOfFunction) ArgFunctionType = adjustCCAndNoReturn(ArgFunctionType, SpecializationType, /*AdjustExceptionSpec*/true); // If the requested function type does not match the actual type of the // specialization with respect to arguments of compatible pointer to function // types, template argument deduction fails. if (!ArgFunctionType.isNull()) { if (IsAddressOfFunction && !isSameOrCompatibleFunctionType( Context.getCanonicalType(SpecializationType), Context.getCanonicalType(ArgFunctionType))) return TDK_MiscellaneousDeductionFailure; if (!IsAddressOfFunction && !Context.hasSameType(SpecializationType, ArgFunctionType)) return TDK_MiscellaneousDeductionFailure; } return TDK_Success; } /// Deduce template arguments for a templated conversion /// function (C++ [temp.deduct.conv]) and, if successful, produce a /// conversion function template specialization. Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(FunctionTemplateDecl *ConversionTemplate, QualType ToType, CXXConversionDecl *&Specialization, TemplateDeductionInfo &Info) { if (ConversionTemplate->isInvalidDecl()) return TDK_Invalid; CXXConversionDecl *ConversionGeneric = cast(ConversionTemplate->getTemplatedDecl()); QualType FromType = ConversionGeneric->getConversionType(); // Canonicalize the types for deduction. QualType P = Context.getCanonicalType(FromType); QualType A = Context.getCanonicalType(ToType); // C++0x [temp.deduct.conv]p2: // If P is a reference type, the type referred to by P is used for // type deduction. if (const ReferenceType *PRef = P->getAs()) P = PRef->getPointeeType(); // C++0x [temp.deduct.conv]p4: // [...] If A is a reference type, the type referred to by A is used // for type deduction. if (const ReferenceType *ARef = A->getAs()) { A = ARef->getPointeeType(); // We work around a defect in the standard here: cv-qualifiers are also // removed from P and A in this case, unless P was a reference type. This // seems to mostly match what other compilers are doing. if (!FromType->getAs()) { A = A.getUnqualifiedType(); P = P.getUnqualifiedType(); } // C++ [temp.deduct.conv]p3: // // If A is not a reference type: } else { assert(!A->isReferenceType() && "Reference types were handled above"); // - If P is an array type, the pointer type produced by the // array-to-pointer standard conversion (4.2) is used in place // of P for type deduction; otherwise, if (P->isArrayType()) P = Context.getArrayDecayedType(P); // - If P is a function type, the pointer type produced by the // function-to-pointer standard conversion (4.3) is used in // place of P for type deduction; otherwise, else if (P->isFunctionType()) P = Context.getPointerType(P); // - If P is a cv-qualified type, the top level cv-qualifiers of // P's type are ignored for type deduction. else P = P.getUnqualifiedType(); // C++0x [temp.deduct.conv]p4: // If A is a cv-qualified type, the top level cv-qualifiers of A's // type are ignored for type deduction. If A is a reference type, the type // referred to by A is used for type deduction. A = A.getUnqualifiedType(); } // Unevaluated SFINAE context. EnterExpressionEvaluationContext Unevaluated( *this, Sema::ExpressionEvaluationContext::Unevaluated); SFINAETrap Trap(*this); // C++ [temp.deduct.conv]p1: // Template argument deduction is done by comparing the return // type of the template conversion function (call it P) with the // type that is required as the result of the conversion (call it // A) as described in 14.8.2.4. TemplateParameterList *TemplateParams = ConversionTemplate->getTemplateParameters(); SmallVector Deduced; Deduced.resize(TemplateParams->size()); // C++0x [temp.deduct.conv]p4: // In general, the deduction process attempts to find template // argument values that will make the deduced A identical to // A. However, there are two cases that allow a difference: unsigned TDF = 0; // - If the original A is a reference type, A can be more // cv-qualified than the deduced A (i.e., the type referred to // by the reference) if (ToType->isReferenceType()) TDF |= TDF_ArgWithReferenceType; // - The deduced A can be another pointer or pointer to member // type that can be converted to A via a qualification // conversion. // // (C++0x [temp.deduct.conv]p6 clarifies that this only happens when // both P and A are pointers or member pointers. In this case, we // just ignore cv-qualifiers completely). if ((P->isPointerType() && A->isPointerType()) || (P->isMemberPointerType() && A->isMemberPointerType())) TDF |= TDF_IgnoreQualifiers; if (TemplateDeductionResult Result = DeduceTemplateArgumentsByTypeMatch(*this, TemplateParams, P, A, Info, Deduced, TDF)) return Result; // Create an Instantiation Scope for finalizing the operator. LocalInstantiationScope InstScope(*this); // Finish template argument deduction. FunctionDecl *ConversionSpecialized = nullptr; TemplateDeductionResult Result; runWithSufficientStackSpace(Info.getLocation(), [&] { Result = FinishTemplateArgumentDeduction(ConversionTemplate, Deduced, 0, ConversionSpecialized, Info); }); Specialization = cast_or_null(ConversionSpecialized); return Result; } /// Deduce template arguments for a function template when there is /// nothing to deduce against (C++0x [temp.arg.explicit]p3). /// /// \param FunctionTemplate the function template for which we are performing /// template argument deduction. /// /// \param ExplicitTemplateArgs the explicitly-specified template /// arguments. /// /// \param Specialization if template argument deduction was successful, /// this will be set to the function template specialization produced by /// template argument deduction. /// /// \param Info the argument will be updated to provide additional information /// about template argument deduction. /// /// \param IsAddressOfFunction If \c true, we are deducing as part of taking /// the address of a function template in a context where we do not have a /// target type, per [over.over]. If \c false, we are looking up a function /// template specialization based on its signature, which only happens when /// deducing a function parameter type from an argument that is a template-id /// naming a function template specialization. /// /// \returns the result of template argument deduction. Sema::TemplateDeductionResult Sema::DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, TemplateDeductionInfo &Info, bool IsAddressOfFunction) { return DeduceTemplateArguments(FunctionTemplate, ExplicitTemplateArgs, QualType(), Specialization, Info, IsAddressOfFunction); } namespace { struct DependentAuto { bool IsPack; }; /// Substitute the 'auto' specifier or deduced template specialization type /// specifier within a type for a given replacement type. class SubstituteDeducedTypeTransform : public TreeTransform { QualType Replacement; bool ReplacementIsPack; bool UseTypeSugar; public: SubstituteDeducedTypeTransform(Sema &SemaRef, DependentAuto DA) : TreeTransform(SemaRef), Replacement(), ReplacementIsPack(DA.IsPack), UseTypeSugar(true) {} SubstituteDeducedTypeTransform(Sema &SemaRef, QualType Replacement, bool UseTypeSugar = true) : TreeTransform(SemaRef), Replacement(Replacement), ReplacementIsPack(false), UseTypeSugar(UseTypeSugar) {} QualType TransformDesugared(TypeLocBuilder &TLB, DeducedTypeLoc TL) { assert(isa(Replacement) && "unexpected unsugared replacement kind"); QualType Result = Replacement; TemplateTypeParmTypeLoc NewTL = TLB.push(Result); NewTL.setNameLoc(TL.getNameLoc()); return Result; } QualType TransformAutoType(TypeLocBuilder &TLB, AutoTypeLoc TL) { // If we're building the type pattern to deduce against, don't wrap the // substituted type in an AutoType. Certain template deduction rules // apply only when a template type parameter appears directly (and not if // the parameter is found through desugaring). For instance: // auto &&lref = lvalue; // must transform into "rvalue reference to T" not "rvalue reference to // auto type deduced as T" in order for [temp.deduct.call]p3 to apply. // // FIXME: Is this still necessary? if (!UseTypeSugar) return TransformDesugared(TLB, TL); QualType Result = SemaRef.Context.getAutoType( Replacement, TL.getTypePtr()->getKeyword(), Replacement.isNull(), ReplacementIsPack, TL.getTypePtr()->getTypeConstraintConcept(), TL.getTypePtr()->getTypeConstraintArguments()); auto NewTL = TLB.push(Result); NewTL.copy(TL); return Result; } QualType TransformDeducedTemplateSpecializationType( TypeLocBuilder &TLB, DeducedTemplateSpecializationTypeLoc TL) { if (!UseTypeSugar) return TransformDesugared(TLB, TL); QualType Result = SemaRef.Context.getDeducedTemplateSpecializationType( TL.getTypePtr()->getTemplateName(), Replacement, Replacement.isNull()); auto NewTL = TLB.push(Result); NewTL.setNameLoc(TL.getNameLoc()); return Result; } ExprResult TransformLambdaExpr(LambdaExpr *E) { // Lambdas never need to be transformed. return E; } QualType Apply(TypeLoc TL) { // Create some scratch storage for the transformed type locations. // FIXME: We're just going to throw this information away. Don't build it. TypeLocBuilder TLB; TLB.reserve(TL.getFullDataSize()); return TransformType(TLB, TL); } }; } // namespace Sema::DeduceAutoResult Sema::DeduceAutoType(TypeSourceInfo *Type, Expr *&Init, QualType &Result, Optional DependentDeductionDepth, bool IgnoreConstraints) { return DeduceAutoType(Type->getTypeLoc(), Init, Result, DependentDeductionDepth, IgnoreConstraints); } /// Attempt to produce an informative diagostic explaining why auto deduction /// failed. /// \return \c true if diagnosed, \c false if not. static bool diagnoseAutoDeductionFailure(Sema &S, Sema::TemplateDeductionResult TDK, TemplateDeductionInfo &Info, ArrayRef Ranges) { switch (TDK) { case Sema::TDK_Inconsistent: { // Inconsistent deduction means we were deducing from an initializer list. auto D = S.Diag(Info.getLocation(), diag::err_auto_inconsistent_deduction); D << Info.FirstArg << Info.SecondArg; for (auto R : Ranges) D << R; return true; } // FIXME: Are there other cases for which a custom diagnostic is more useful // than the basic "types don't match" diagnostic? default: return false; } } static Sema::DeduceAutoResult CheckDeducedPlaceholderConstraints(Sema &S, const AutoType &Type, AutoTypeLoc TypeLoc, QualType Deduced) { ConstraintSatisfaction Satisfaction; ConceptDecl *Concept = Type.getTypeConstraintConcept(); TemplateArgumentListInfo TemplateArgs(TypeLoc.getLAngleLoc(), TypeLoc.getRAngleLoc()); TemplateArgs.addArgument( TemplateArgumentLoc(TemplateArgument(Deduced), S.Context.getTrivialTypeSourceInfo( Deduced, TypeLoc.getNameLoc()))); for (unsigned I = 0, C = TypeLoc.getNumArgs(); I != C; ++I) TemplateArgs.addArgument(TypeLoc.getArgLoc(I)); llvm::SmallVector Converted; if (S.CheckTemplateArgumentList(Concept, SourceLocation(), TemplateArgs, /*PartialTemplateArgs=*/false, Converted)) return Sema::DAR_FailedAlreadyDiagnosed; if (S.CheckConstraintSatisfaction(Concept, {Concept->getConstraintExpr()}, Converted, TypeLoc.getLocalSourceRange(), Satisfaction)) return Sema::DAR_FailedAlreadyDiagnosed; if (!Satisfaction.IsSatisfied) { std::string Buf; llvm::raw_string_ostream OS(Buf); OS << "'" << Concept->getName(); if (TypeLoc.hasExplicitTemplateArgs()) { OS << "<"; for (const auto &Arg : Type.getTypeConstraintArguments()) Arg.print(S.getPrintingPolicy(), OS); OS << ">"; } OS << "'"; OS.flush(); S.Diag(TypeLoc.getConceptNameLoc(), diag::err_placeholder_constraints_not_satisfied) << Deduced << Buf << TypeLoc.getLocalSourceRange(); S.DiagnoseUnsatisfiedConstraint(Satisfaction); return Sema::DAR_FailedAlreadyDiagnosed; } return Sema::DAR_Succeeded; } /// Deduce the type for an auto type-specifier (C++11 [dcl.spec.auto]p6) /// /// Note that this is done even if the initializer is dependent. (This is /// necessary to support partial ordering of templates using 'auto'.) /// A dependent type will be produced when deducing from a dependent type. /// /// \param Type the type pattern using the auto type-specifier. /// \param Init the initializer for the variable whose type is to be deduced. /// \param Result if type deduction was successful, this will be set to the /// deduced type. /// \param DependentDeductionDepth Set if we should permit deduction in /// dependent cases. This is necessary for template partial ordering with /// 'auto' template parameters. The value specified is the template /// parameter depth at which we should perform 'auto' deduction. /// \param IgnoreConstraints Set if we should not fail if the deduced type does /// not satisfy the type-constraint in the auto type. Sema::DeduceAutoResult Sema::DeduceAutoType(TypeLoc Type, Expr *&Init, QualType &Result, Optional DependentDeductionDepth, bool IgnoreConstraints) { if (Init->containsErrors()) return DAR_FailedAlreadyDiagnosed; if (Init->getType()->isNonOverloadPlaceholderType()) { ExprResult NonPlaceholder = CheckPlaceholderExpr(Init); if (NonPlaceholder.isInvalid()) return DAR_FailedAlreadyDiagnosed; Init = NonPlaceholder.get(); } DependentAuto DependentResult = { /*.IsPack = */ (bool)Type.getAs()}; if (!DependentDeductionDepth && (Type.getType()->isDependentType() || Init->isTypeDependent() || Init->containsUnexpandedParameterPack())) { Result = SubstituteDeducedTypeTransform(*this, DependentResult).Apply(Type); assert(!Result.isNull() && "substituting DependentTy can't fail"); return DAR_Succeeded; } // Find the depth of template parameter to synthesize. unsigned Depth = DependentDeductionDepth.getValueOr(0); // If this is a 'decltype(auto)' specifier, do the decltype dance. // Since 'decltype(auto)' can only occur at the top of the type, we // don't need to go digging for it. if (const AutoType *AT = Type.getType()->getAs()) { if (AT->isDecltypeAuto()) { if (isa(Init)) { Diag(Init->getBeginLoc(), diag::err_decltype_auto_initializer_list); return DAR_FailedAlreadyDiagnosed; } ExprResult ER = CheckPlaceholderExpr(Init); if (ER.isInvalid()) return DAR_FailedAlreadyDiagnosed; Init = ER.get(); QualType Deduced = BuildDecltypeType(Init, Init->getBeginLoc(), false); if (Deduced.isNull()) return DAR_FailedAlreadyDiagnosed; // FIXME: Support a non-canonical deduced type for 'auto'. Deduced = Context.getCanonicalType(Deduced); if (AT->isConstrained() && !IgnoreConstraints) { auto ConstraintsResult = CheckDeducedPlaceholderConstraints(*this, *AT, Type.getContainedAutoTypeLoc(), Deduced); if (ConstraintsResult != DAR_Succeeded) return ConstraintsResult; } Result = SubstituteDeducedTypeTransform(*this, Deduced).Apply(Type); if (Result.isNull()) return DAR_FailedAlreadyDiagnosed; return DAR_Succeeded; } else if (!getLangOpts().CPlusPlus) { if (isa(Init)) { Diag(Init->getBeginLoc(), diag::err_auto_init_list_from_c); return DAR_FailedAlreadyDiagnosed; } } } SourceLocation Loc = Init->getExprLoc(); LocalInstantiationScope InstScope(*this); // Build template void Func(FuncParam); TemplateTypeParmDecl *TemplParam = TemplateTypeParmDecl::Create( Context, nullptr, SourceLocation(), Loc, Depth, 0, nullptr, false, false, false); QualType TemplArg = QualType(TemplParam->getTypeForDecl(), 0); NamedDecl *TemplParamPtr = TemplParam; FixedSizeTemplateParameterListStorage<1, false> TemplateParamsSt( Context, Loc, Loc, TemplParamPtr, Loc, nullptr); QualType FuncParam = SubstituteDeducedTypeTransform(*this, TemplArg, /*UseTypeSugar*/false) .Apply(Type); assert(!FuncParam.isNull() && "substituting template parameter for 'auto' failed"); // Deduce type of TemplParam in Func(Init) SmallVector Deduced; Deduced.resize(1); TemplateDeductionInfo Info(Loc, Depth); // If deduction failed, don't diagnose if the initializer is dependent; it // might acquire a matching type in the instantiation. auto DeductionFailed = [&](TemplateDeductionResult TDK, ArrayRef Ranges) -> DeduceAutoResult { if (Init->isTypeDependent()) { Result = SubstituteDeducedTypeTransform(*this, DependentResult).Apply(Type); assert(!Result.isNull() && "substituting DependentTy can't fail"); return DAR_Succeeded; } if (diagnoseAutoDeductionFailure(*this, TDK, Info, Ranges)) return DAR_FailedAlreadyDiagnosed; return DAR_Failed; }; SmallVector OriginalCallArgs; InitListExpr *InitList = dyn_cast(Init); if (InitList) { // Notionally, we substitute std::initializer_list for 'auto' and deduce // against that. Such deduction only succeeds if removing cv-qualifiers and // references results in std::initializer_list. if (!Type.getType().getNonReferenceType()->getAs()) return DAR_Failed; // Resolving a core issue: a braced-init-list containing any designators is // a non-deduced context. for (Expr *E : InitList->inits()) if (isa(E)) return DAR_Failed; SourceRange DeducedFromInitRange; for (unsigned i = 0, e = InitList->getNumInits(); i < e; ++i) { Expr *Init = InitList->getInit(i); if (auto TDK = DeduceTemplateArgumentsFromCallArgument( *this, TemplateParamsSt.get(), 0, TemplArg, Init, Info, Deduced, OriginalCallArgs, /*Decomposed*/ true, /*ArgIdx*/ 0, /*TDF*/ 0)) return DeductionFailed(TDK, {DeducedFromInitRange, Init->getSourceRange()}); if (DeducedFromInitRange.isInvalid() && Deduced[0].getKind() != TemplateArgument::Null) DeducedFromInitRange = Init->getSourceRange(); } } else { if (!getLangOpts().CPlusPlus && Init->refersToBitField()) { Diag(Loc, diag::err_auto_bitfield); return DAR_FailedAlreadyDiagnosed; } if (auto TDK = DeduceTemplateArgumentsFromCallArgument( *this, TemplateParamsSt.get(), 0, FuncParam, Init, Info, Deduced, OriginalCallArgs, /*Decomposed*/ false, /*ArgIdx*/ 0, /*TDF*/ 0)) return DeductionFailed(TDK, {}); } // Could be null if somehow 'auto' appears in a non-deduced context. if (Deduced[0].getKind() != TemplateArgument::Type) return DeductionFailed(TDK_Incomplete, {}); QualType DeducedType = Deduced[0].getAsType(); if (InitList) { DeducedType = BuildStdInitializerList(DeducedType, Loc); if (DeducedType.isNull()) return DAR_FailedAlreadyDiagnosed; } if (const auto *AT = Type.getType()->getAs()) { if (AT->isConstrained() && !IgnoreConstraints) { auto ConstraintsResult = CheckDeducedPlaceholderConstraints(*this, *AT, Type.getContainedAutoTypeLoc(), DeducedType); if (ConstraintsResult != DAR_Succeeded) return ConstraintsResult; } } Result = SubstituteDeducedTypeTransform(*this, DeducedType).Apply(Type); if (Result.isNull()) return DAR_FailedAlreadyDiagnosed; // Check that the deduced argument type is compatible with the original // argument type per C++ [temp.deduct.call]p4. QualType DeducedA = InitList ? Deduced[0].getAsType() : Result; for (const OriginalCallArg &OriginalArg : OriginalCallArgs) { assert((bool)InitList == OriginalArg.DecomposedParam && "decomposed non-init-list in auto deduction?"); if (auto TDK = CheckOriginalCallArgDeduction(*this, Info, OriginalArg, DeducedA)) { Result = QualType(); return DeductionFailed(TDK, {}); } } return DAR_Succeeded; } QualType Sema::SubstAutoType(QualType TypeWithAuto, QualType TypeToReplaceAuto) { if (TypeToReplaceAuto->isDependentType()) return SubstituteDeducedTypeTransform( *this, DependentAuto{ TypeToReplaceAuto->containsUnexpandedParameterPack()}) .TransformType(TypeWithAuto); return SubstituteDeducedTypeTransform(*this, TypeToReplaceAuto) .TransformType(TypeWithAuto); } TypeSourceInfo *Sema::SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType TypeToReplaceAuto) { if (TypeToReplaceAuto->isDependentType()) return SubstituteDeducedTypeTransform( *this, DependentAuto{ TypeToReplaceAuto->containsUnexpandedParameterPack()}) .TransformType(TypeWithAuto); return SubstituteDeducedTypeTransform(*this, TypeToReplaceAuto) .TransformType(TypeWithAuto); } QualType Sema::ReplaceAutoType(QualType TypeWithAuto, QualType TypeToReplaceAuto) { return SubstituteDeducedTypeTransform(*this, TypeToReplaceAuto, /*UseTypeSugar*/ false) .TransformType(TypeWithAuto); } +TypeSourceInfo *Sema::ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, + QualType TypeToReplaceAuto) { + return SubstituteDeducedTypeTransform(*this, TypeToReplaceAuto, + /*UseTypeSugar*/ false) + .TransformType(TypeWithAuto); +} + void Sema::DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init) { if (isa(Init)) Diag(VDecl->getLocation(), VDecl->isInitCapture() ? diag::err_init_capture_deduction_failure_from_init_list : diag::err_auto_var_deduction_failure_from_init_list) << VDecl->getDeclName() << VDecl->getType() << Init->getSourceRange(); else Diag(VDecl->getLocation(), VDecl->isInitCapture() ? diag::err_init_capture_deduction_failure : diag::err_auto_var_deduction_failure) << VDecl->getDeclName() << VDecl->getType() << Init->getType() << Init->getSourceRange(); } bool Sema::DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose) { assert(FD->getReturnType()->isUndeducedType()); // For a lambda's conversion operator, deduce any 'auto' or 'decltype(auto)' // within the return type from the call operator's type. if (isLambdaConversionOperator(FD)) { CXXRecordDecl *Lambda = cast(FD)->getParent(); FunctionDecl *CallOp = Lambda->getLambdaCallOperator(); // For a generic lambda, instantiate the call operator if needed. if (auto *Args = FD->getTemplateSpecializationArgs()) { CallOp = InstantiateFunctionDeclaration( CallOp->getDescribedFunctionTemplate(), Args, Loc); if (!CallOp || CallOp->isInvalidDecl()) return true; // We might need to deduce the return type by instantiating the definition // of the operator() function. if (CallOp->getReturnType()->isUndeducedType()) { runWithSufficientStackSpace(Loc, [&] { InstantiateFunctionDefinition(Loc, CallOp); }); } } if (CallOp->isInvalidDecl()) return true; assert(!CallOp->getReturnType()->isUndeducedType() && "failed to deduce lambda return type"); // Build the new return type from scratch. QualType RetType = getLambdaConversionFunctionResultType( CallOp->getType()->castAs()); if (FD->getReturnType()->getAs()) RetType = Context.getPointerType(RetType); else { assert(FD->getReturnType()->getAs()); RetType = Context.getBlockPointerType(RetType); } Context.adjustDeducedFunctionResultType(FD, RetType); return false; } if (FD->getTemplateInstantiationPattern()) { runWithSufficientStackSpace(Loc, [&] { InstantiateFunctionDefinition(Loc, FD); }); } bool StillUndeduced = FD->getReturnType()->isUndeducedType(); if (StillUndeduced && Diagnose && !FD->isInvalidDecl()) { Diag(Loc, diag::err_auto_fn_used_before_defined) << FD; Diag(FD->getLocation(), diag::note_callee_decl) << FD; } return StillUndeduced; } /// If this is a non-static member function, static void AddImplicitObjectParameterType(ASTContext &Context, CXXMethodDecl *Method, SmallVectorImpl &ArgTypes) { // C++11 [temp.func.order]p3: // [...] The new parameter is of type "reference to cv A," where cv are // the cv-qualifiers of the function template (if any) and A is // the class of which the function template is a member. // // The standard doesn't say explicitly, but we pick the appropriate kind of // reference type based on [over.match.funcs]p4. QualType ArgTy = Context.getTypeDeclType(Method->getParent()); ArgTy = Context.getQualifiedType(ArgTy, Method->getMethodQualifiers()); if (Method->getRefQualifier() == RQ_RValue) ArgTy = Context.getRValueReferenceType(ArgTy); else ArgTy = Context.getLValueReferenceType(ArgTy); ArgTypes.push_back(ArgTy); } /// Determine whether the function template \p FT1 is at least as /// specialized as \p FT2. static bool isAtLeastAsSpecializedAs(Sema &S, SourceLocation Loc, FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, bool Reversed) { assert(!Reversed || TPOC == TPOC_Call); FunctionDecl *FD1 = FT1->getTemplatedDecl(); FunctionDecl *FD2 = FT2->getTemplatedDecl(); const FunctionProtoType *Proto1 = FD1->getType()->getAs(); const FunctionProtoType *Proto2 = FD2->getType()->getAs(); assert(Proto1 && Proto2 && "Function templates must have prototypes"); TemplateParameterList *TemplateParams = FT2->getTemplateParameters(); SmallVector Deduced; Deduced.resize(TemplateParams->size()); // C++0x [temp.deduct.partial]p3: // The types used to determine the ordering depend on the context in which // the partial ordering is done: TemplateDeductionInfo Info(Loc); SmallVector Args2; switch (TPOC) { case TPOC_Call: { // - In the context of a function call, the function parameter types are // used. CXXMethodDecl *Method1 = dyn_cast(FD1); CXXMethodDecl *Method2 = dyn_cast(FD2); // C++11 [temp.func.order]p3: // [...] If only one of the function templates is a non-static // member, that function template is considered to have a new // first parameter inserted in its function parameter list. The // new parameter is of type "reference to cv A," where cv are // the cv-qualifiers of the function template (if any) and A is // the class of which the function template is a member. // // Note that we interpret this to mean "if one of the function // templates is a non-static member and the other is a non-member"; // otherwise, the ordering rules for static functions against non-static // functions don't make any sense. // // C++98/03 doesn't have this provision but we've extended DR532 to cover // it as wording was broken prior to it. SmallVector Args1; unsigned NumComparedArguments = NumCallArguments1; if (!Method2 && Method1 && !Method1->isStatic()) { // Compare 'this' from Method1 against first parameter from Method2. AddImplicitObjectParameterType(S.Context, Method1, Args1); ++NumComparedArguments; } else if (!Method1 && Method2 && !Method2->isStatic()) { // Compare 'this' from Method2 against first parameter from Method1. AddImplicitObjectParameterType(S.Context, Method2, Args2); } else if (Method1 && Method2 && Reversed) { // Compare 'this' from Method1 against second parameter from Method2 // and 'this' from Method2 against second parameter from Method1. AddImplicitObjectParameterType(S.Context, Method1, Args1); AddImplicitObjectParameterType(S.Context, Method2, Args2); ++NumComparedArguments; } Args1.insert(Args1.end(), Proto1->param_type_begin(), Proto1->param_type_end()); Args2.insert(Args2.end(), Proto2->param_type_begin(), Proto2->param_type_end()); // C++ [temp.func.order]p5: // The presence of unused ellipsis and default arguments has no effect on // the partial ordering of function templates. if (Args1.size() > NumComparedArguments) Args1.resize(NumComparedArguments); if (Args2.size() > NumComparedArguments) Args2.resize(NumComparedArguments); if (Reversed) std::reverse(Args2.begin(), Args2.end()); if (DeduceTemplateArguments(S, TemplateParams, Args2.data(), Args2.size(), Args1.data(), Args1.size(), Info, Deduced, TDF_None, /*PartialOrdering=*/true)) return false; break; } case TPOC_Conversion: // - In the context of a call to a conversion operator, the return types // of the conversion function templates are used. if (DeduceTemplateArgumentsByTypeMatch( S, TemplateParams, Proto2->getReturnType(), Proto1->getReturnType(), Info, Deduced, TDF_None, /*PartialOrdering=*/true)) return false; break; case TPOC_Other: // - In other contexts (14.6.6.2) the function template's function type // is used. if (DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, FD2->getType(), FD1->getType(), Info, Deduced, TDF_None, /*PartialOrdering=*/true)) return false; break; } // C++0x [temp.deduct.partial]p11: // In most cases, all template parameters must have values in order for // deduction to succeed, but for partial ordering purposes a template // parameter may remain without a value provided it is not used in the // types being used for partial ordering. [ Note: a template parameter used // in a non-deduced context is considered used. -end note] unsigned ArgIdx = 0, NumArgs = Deduced.size(); for (; ArgIdx != NumArgs; ++ArgIdx) if (Deduced[ArgIdx].isNull()) break; // FIXME: We fail to implement [temp.deduct.type]p1 along this path. We need // to substitute the deduced arguments back into the template and check that // we get the right type. if (ArgIdx == NumArgs) { // All template arguments were deduced. FT1 is at least as specialized // as FT2. return true; } // Figure out which template parameters were used. llvm::SmallBitVector UsedParameters(TemplateParams->size()); switch (TPOC) { case TPOC_Call: for (unsigned I = 0, N = Args2.size(); I != N; ++I) ::MarkUsedTemplateParameters(S.Context, Args2[I], false, TemplateParams->getDepth(), UsedParameters); break; case TPOC_Conversion: ::MarkUsedTemplateParameters(S.Context, Proto2->getReturnType(), false, TemplateParams->getDepth(), UsedParameters); break; case TPOC_Other: ::MarkUsedTemplateParameters(S.Context, FD2->getType(), false, TemplateParams->getDepth(), UsedParameters); break; } for (; ArgIdx != NumArgs; ++ArgIdx) // If this argument had no value deduced but was used in one of the types // used for partial ordering, then deduction fails. if (Deduced[ArgIdx].isNull() && UsedParameters[ArgIdx]) return false; return true; } /// Determine whether this a function template whose parameter-type-list /// ends with a function parameter pack. static bool isVariadicFunctionTemplate(FunctionTemplateDecl *FunTmpl) { FunctionDecl *Function = FunTmpl->getTemplatedDecl(); unsigned NumParams = Function->getNumParams(); if (NumParams == 0) return false; ParmVarDecl *Last = Function->getParamDecl(NumParams - 1); if (!Last->isParameterPack()) return false; // Make sure that no previous parameter is a parameter pack. while (--NumParams > 0) { if (Function->getParamDecl(NumParams - 1)->isParameterPack()) return false; } return true; } /// Returns the more specialized function template according /// to the rules of function template partial ordering (C++ [temp.func.order]). /// /// \param FT1 the first function template /// /// \param FT2 the second function template /// /// \param TPOC the context in which we are performing partial ordering of /// function templates. /// /// \param NumCallArguments1 The number of arguments in the call to FT1, used /// only when \c TPOC is \c TPOC_Call. /// /// \param NumCallArguments2 The number of arguments in the call to FT2, used /// only when \c TPOC is \c TPOC_Call. /// /// \param Reversed If \c true, exactly one of FT1 and FT2 is an overload /// candidate with a reversed parameter order. In this case, the corresponding /// P/A pairs between FT1 and FT2 are reversed. /// /// \returns the more specialized function template. If neither /// template is more specialized, returns NULL. FunctionTemplateDecl * Sema::getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2, bool Reversed) { auto JudgeByConstraints = [&] () -> FunctionTemplateDecl * { llvm::SmallVector AC1, AC2; FT1->getAssociatedConstraints(AC1); FT2->getAssociatedConstraints(AC2); bool AtLeastAsConstrained1, AtLeastAsConstrained2; if (IsAtLeastAsConstrained(FT1, AC1, FT2, AC2, AtLeastAsConstrained1)) return nullptr; if (IsAtLeastAsConstrained(FT2, AC2, FT1, AC1, AtLeastAsConstrained2)) return nullptr; if (AtLeastAsConstrained1 == AtLeastAsConstrained2) return nullptr; return AtLeastAsConstrained1 ? FT1 : FT2; }; bool Better1 = isAtLeastAsSpecializedAs(*this, Loc, FT1, FT2, TPOC, NumCallArguments1, Reversed); bool Better2 = isAtLeastAsSpecializedAs(*this, Loc, FT2, FT1, TPOC, NumCallArguments2, Reversed); if (Better1 != Better2) // We have a clear winner return Better1 ? FT1 : FT2; if (!Better1 && !Better2) // Neither is better than the other return JudgeByConstraints(); // FIXME: This mimics what GCC implements, but doesn't match up with the // proposed resolution for core issue 692. This area needs to be sorted out, // but for now we attempt to maintain compatibility. bool Variadic1 = isVariadicFunctionTemplate(FT1); bool Variadic2 = isVariadicFunctionTemplate(FT2); if (Variadic1 != Variadic2) return Variadic1? FT2 : FT1; return JudgeByConstraints(); } /// Determine if the two templates are equivalent. static bool isSameTemplate(TemplateDecl *T1, TemplateDecl *T2) { if (T1 == T2) return true; if (!T1 || !T2) return false; return T1->getCanonicalDecl() == T2->getCanonicalDecl(); } /// Retrieve the most specialized of the given function template /// specializations. /// /// \param SpecBegin the start iterator of the function template /// specializations that we will be comparing. /// /// \param SpecEnd the end iterator of the function template /// specializations, paired with \p SpecBegin. /// /// \param Loc the location where the ambiguity or no-specializations /// diagnostic should occur. /// /// \param NoneDiag partial diagnostic used to diagnose cases where there are /// no matching candidates. /// /// \param AmbigDiag partial diagnostic used to diagnose an ambiguity, if one /// occurs. /// /// \param CandidateDiag partial diagnostic used for each function template /// specialization that is a candidate in the ambiguous ordering. One parameter /// in this diagnostic should be unbound, which will correspond to the string /// describing the template arguments for the function template specialization. /// /// \returns the most specialized function template specialization, if /// found. Otherwise, returns SpecEnd. UnresolvedSetIterator Sema::getMostSpecialized( UnresolvedSetIterator SpecBegin, UnresolvedSetIterator SpecEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain, QualType TargetType) { if (SpecBegin == SpecEnd) { if (Complain) { Diag(Loc, NoneDiag); FailedCandidates.NoteCandidates(*this, Loc); } return SpecEnd; } if (SpecBegin + 1 == SpecEnd) return SpecBegin; // Find the function template that is better than all of the templates it // has been compared to. UnresolvedSetIterator Best = SpecBegin; FunctionTemplateDecl *BestTemplate = cast(*Best)->getPrimaryTemplate(); assert(BestTemplate && "Not a function template specialization?"); for (UnresolvedSetIterator I = SpecBegin + 1; I != SpecEnd; ++I) { FunctionTemplateDecl *Challenger = cast(*I)->getPrimaryTemplate(); assert(Challenger && "Not a function template specialization?"); if (isSameTemplate(getMoreSpecializedTemplate(BestTemplate, Challenger, Loc, TPOC_Other, 0, 0), Challenger)) { Best = I; BestTemplate = Challenger; } } // Make sure that the "best" function template is more specialized than all // of the others. bool Ambiguous = false; for (UnresolvedSetIterator I = SpecBegin; I != SpecEnd; ++I) { FunctionTemplateDecl *Challenger = cast(*I)->getPrimaryTemplate(); if (I != Best && !isSameTemplate(getMoreSpecializedTemplate(BestTemplate, Challenger, Loc, TPOC_Other, 0, 0), BestTemplate)) { Ambiguous = true; break; } } if (!Ambiguous) { // We found an answer. Return it. return Best; } // Diagnose the ambiguity. if (Complain) { Diag(Loc, AmbigDiag); // FIXME: Can we order the candidates in some sane way? for (UnresolvedSetIterator I = SpecBegin; I != SpecEnd; ++I) { PartialDiagnostic PD = CandidateDiag; const auto *FD = cast(*I); PD << FD << getTemplateArgumentBindingsText( FD->getPrimaryTemplate()->getTemplateParameters(), *FD->getTemplateSpecializationArgs()); if (!TargetType.isNull()) HandleFunctionTypeMismatch(PD, FD->getType(), TargetType); Diag((*I)->getLocation(), PD); } } return SpecEnd; } /// Determine whether one partial specialization, P1, is at least as /// specialized than another, P2. /// /// \tparam TemplateLikeDecl The kind of P2, which must be a /// TemplateDecl or {Class,Var}TemplatePartialSpecializationDecl. /// \param T1 The injected-class-name of P1 (faked for a variable template). /// \param T2 The injected-class-name of P2 (faked for a variable template). template static bool isAtLeastAsSpecializedAs(Sema &S, QualType T1, QualType T2, TemplateLikeDecl *P2, TemplateDeductionInfo &Info) { // C++ [temp.class.order]p1: // For two class template partial specializations, the first is at least as // specialized as the second if, given the following rewrite to two // function templates, the first function template is at least as // specialized as the second according to the ordering rules for function // templates (14.6.6.2): // - the first function template has the same template parameters as the // first partial specialization and has a single function parameter // whose type is a class template specialization with the template // arguments of the first partial specialization, and // - the second function template has the same template parameters as the // second partial specialization and has a single function parameter // whose type is a class template specialization with the template // arguments of the second partial specialization. // // Rather than synthesize function templates, we merely perform the // equivalent partial ordering by performing deduction directly on // the template arguments of the class template partial // specializations. This computation is slightly simpler than the // general problem of function template partial ordering, because // class template partial specializations are more constrained. We // know that every template parameter is deducible from the class // template partial specialization's template arguments, for // example. SmallVector Deduced; // Determine whether P1 is at least as specialized as P2. Deduced.resize(P2->getTemplateParameters()->size()); if (DeduceTemplateArgumentsByTypeMatch(S, P2->getTemplateParameters(), T2, T1, Info, Deduced, TDF_None, /*PartialOrdering=*/true)) return false; SmallVector DeducedArgs(Deduced.begin(), Deduced.end()); Sema::InstantiatingTemplate Inst(S, Info.getLocation(), P2, DeducedArgs, Info); auto *TST1 = T1->castAs(); bool AtLeastAsSpecialized; S.runWithSufficientStackSpace(Info.getLocation(), [&] { AtLeastAsSpecialized = !FinishTemplateArgumentDeduction( S, P2, /*IsPartialOrdering=*/true, TemplateArgumentList(TemplateArgumentList::OnStack, TST1->template_arguments()), Deduced, Info); }); return AtLeastAsSpecialized; } /// Returns the more specialized class template partial specialization /// according to the rules of partial ordering of class template partial /// specializations (C++ [temp.class.order]). /// /// \param PS1 the first class template partial specialization /// /// \param PS2 the second class template partial specialization /// /// \returns the more specialized class template partial specialization. If /// neither partial specialization is more specialized, returns NULL. ClassTemplatePartialSpecializationDecl * Sema::getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc) { QualType PT1 = PS1->getInjectedSpecializationType(); QualType PT2 = PS2->getInjectedSpecializationType(); TemplateDeductionInfo Info(Loc); bool Better1 = isAtLeastAsSpecializedAs(*this, PT1, PT2, PS2, Info); bool Better2 = isAtLeastAsSpecializedAs(*this, PT2, PT1, PS1, Info); if (!Better1 && !Better2) return nullptr; if (Better1 && Better2) { llvm::SmallVector AC1, AC2; PS1->getAssociatedConstraints(AC1); PS2->getAssociatedConstraints(AC2); bool AtLeastAsConstrained1, AtLeastAsConstrained2; if (IsAtLeastAsConstrained(PS1, AC1, PS2, AC2, AtLeastAsConstrained1)) return nullptr; if (IsAtLeastAsConstrained(PS2, AC2, PS1, AC1, AtLeastAsConstrained2)) return nullptr; if (AtLeastAsConstrained1 == AtLeastAsConstrained2) return nullptr; return AtLeastAsConstrained1 ? PS1 : PS2; } return Better1 ? PS1 : PS2; } bool Sema::isMoreSpecializedThanPrimary( ClassTemplatePartialSpecializationDecl *Spec, TemplateDeductionInfo &Info) { ClassTemplateDecl *Primary = Spec->getSpecializedTemplate(); QualType PrimaryT = Primary->getInjectedClassNameSpecialization(); QualType PartialT = Spec->getInjectedSpecializationType(); if (!isAtLeastAsSpecializedAs(*this, PartialT, PrimaryT, Primary, Info)) return false; if (!isAtLeastAsSpecializedAs(*this, PrimaryT, PartialT, Spec, Info)) return true; Info.clearSFINAEDiagnostic(); llvm::SmallVector PrimaryAC, SpecAC; Primary->getAssociatedConstraints(PrimaryAC); Spec->getAssociatedConstraints(SpecAC); bool AtLeastAsConstrainedPrimary, AtLeastAsConstrainedSpec; if (IsAtLeastAsConstrained(Spec, SpecAC, Primary, PrimaryAC, AtLeastAsConstrainedSpec)) return false; if (!AtLeastAsConstrainedSpec) return false; if (IsAtLeastAsConstrained(Primary, PrimaryAC, Spec, SpecAC, AtLeastAsConstrainedPrimary)) return false; return !AtLeastAsConstrainedPrimary; } VarTemplatePartialSpecializationDecl * Sema::getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc) { // Pretend the variable template specializations are class template // specializations and form a fake injected class name type for comparison. assert(PS1->getSpecializedTemplate() == PS2->getSpecializedTemplate() && "the partial specializations being compared should specialize" " the same template."); TemplateName Name(PS1->getSpecializedTemplate()); TemplateName CanonTemplate = Context.getCanonicalTemplateName(Name); QualType PT1 = Context.getTemplateSpecializationType( CanonTemplate, PS1->getTemplateArgs().asArray()); QualType PT2 = Context.getTemplateSpecializationType( CanonTemplate, PS2->getTemplateArgs().asArray()); TemplateDeductionInfo Info(Loc); bool Better1 = isAtLeastAsSpecializedAs(*this, PT1, PT2, PS2, Info); bool Better2 = isAtLeastAsSpecializedAs(*this, PT2, PT1, PS1, Info); if (!Better1 && !Better2) return nullptr; if (Better1 && Better2) { llvm::SmallVector AC1, AC2; PS1->getAssociatedConstraints(AC1); PS2->getAssociatedConstraints(AC2); bool AtLeastAsConstrained1, AtLeastAsConstrained2; if (IsAtLeastAsConstrained(PS1, AC1, PS2, AC2, AtLeastAsConstrained1)) return nullptr; if (IsAtLeastAsConstrained(PS2, AC2, PS1, AC1, AtLeastAsConstrained2)) return nullptr; if (AtLeastAsConstrained1 == AtLeastAsConstrained2) return nullptr; return AtLeastAsConstrained1 ? PS1 : PS2; } return Better1 ? PS1 : PS2; } bool Sema::isMoreSpecializedThanPrimary( VarTemplatePartialSpecializationDecl *Spec, TemplateDeductionInfo &Info) { TemplateDecl *Primary = Spec->getSpecializedTemplate(); // FIXME: Cache the injected template arguments rather than recomputing // them for each partial specialization. SmallVector PrimaryArgs; Context.getInjectedTemplateArgs(Primary->getTemplateParameters(), PrimaryArgs); TemplateName CanonTemplate = Context.getCanonicalTemplateName(TemplateName(Primary)); QualType PrimaryT = Context.getTemplateSpecializationType( CanonTemplate, PrimaryArgs); QualType PartialT = Context.getTemplateSpecializationType( CanonTemplate, Spec->getTemplateArgs().asArray()); if (!isAtLeastAsSpecializedAs(*this, PartialT, PrimaryT, Primary, Info)) return false; if (!isAtLeastAsSpecializedAs(*this, PrimaryT, PartialT, Spec, Info)) return true; Info.clearSFINAEDiagnostic(); llvm::SmallVector PrimaryAC, SpecAC; Primary->getAssociatedConstraints(PrimaryAC); Spec->getAssociatedConstraints(SpecAC); bool AtLeastAsConstrainedPrimary, AtLeastAsConstrainedSpec; if (IsAtLeastAsConstrained(Spec, SpecAC, Primary, PrimaryAC, AtLeastAsConstrainedSpec)) return false; if (!AtLeastAsConstrainedSpec) return false; if (IsAtLeastAsConstrained(Primary, PrimaryAC, Spec, SpecAC, AtLeastAsConstrainedPrimary)) return false; return !AtLeastAsConstrainedPrimary; } bool Sema::isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc) { // C++1z [temp.arg.template]p4: (DR 150) // A template template-parameter P is at least as specialized as a // template template-argument A if, given the following rewrite to two // function templates... // Rather than synthesize function templates, we merely perform the // equivalent partial ordering by performing deduction directly on // the template parameter lists of the template template parameters. // // Given an invented class template X with the template parameter list of // A (including default arguments): TemplateName X = Context.getCanonicalTemplateName(TemplateName(AArg)); TemplateParameterList *A = AArg->getTemplateParameters(); // - Each function template has a single function parameter whose type is // a specialization of X with template arguments corresponding to the // template parameters from the respective function template SmallVector AArgs; Context.getInjectedTemplateArgs(A, AArgs); // Check P's arguments against A's parameter list. This will fill in default // template arguments as needed. AArgs are already correct by construction. // We can't just use CheckTemplateIdType because that will expand alias // templates. SmallVector PArgs; { SFINAETrap Trap(*this); Context.getInjectedTemplateArgs(P, PArgs); TemplateArgumentListInfo PArgList(P->getLAngleLoc(), P->getRAngleLoc()); for (unsigned I = 0, N = P->size(); I != N; ++I) { // Unwrap packs that getInjectedTemplateArgs wrapped around pack // expansions, to form an "as written" argument list. TemplateArgument Arg = PArgs[I]; if (Arg.getKind() == TemplateArgument::Pack) { assert(Arg.pack_size() == 1 && Arg.pack_begin()->isPackExpansion()); Arg = *Arg.pack_begin(); } PArgList.addArgument(getTrivialTemplateArgumentLoc( Arg, QualType(), P->getParam(I)->getLocation())); } PArgs.clear(); // C++1z [temp.arg.template]p3: // If the rewrite produces an invalid type, then P is not at least as // specialized as A. if (CheckTemplateArgumentList(AArg, Loc, PArgList, false, PArgs) || Trap.hasErrorOccurred()) return false; } QualType AType = Context.getTemplateSpecializationType(X, AArgs); QualType PType = Context.getTemplateSpecializationType(X, PArgs); // ... the function template corresponding to P is at least as specialized // as the function template corresponding to A according to the partial // ordering rules for function templates. TemplateDeductionInfo Info(Loc, A->getDepth()); return isAtLeastAsSpecializedAs(*this, PType, AType, AArg, Info); } namespace { struct MarkUsedTemplateParameterVisitor : RecursiveASTVisitor { llvm::SmallBitVector &Used; unsigned Depth; MarkUsedTemplateParameterVisitor(llvm::SmallBitVector &Used, unsigned Depth) : Used(Used), Depth(Depth) { } bool VisitTemplateTypeParmType(TemplateTypeParmType *T) { if (T->getDepth() == Depth) Used[T->getIndex()] = true; return true; } bool TraverseTemplateName(TemplateName Template) { if (auto *TTP = dyn_cast(Template.getAsTemplateDecl())) if (TTP->getDepth() == Depth) Used[TTP->getIndex()] = true; RecursiveASTVisitor:: TraverseTemplateName(Template); return true; } bool VisitDeclRefExpr(DeclRefExpr *E) { if (auto *NTTP = dyn_cast(E->getDecl())) if (NTTP->getDepth() == Depth) Used[NTTP->getIndex()] = true; return true; } }; } /// Mark the template parameters that are used by the given /// expression. static void MarkUsedTemplateParameters(ASTContext &Ctx, const Expr *E, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used) { if (!OnlyDeduced) { MarkUsedTemplateParameterVisitor(Used, Depth) .TraverseStmt(const_cast(E)); return; } // We can deduce from a pack expansion. if (const PackExpansionExpr *Expansion = dyn_cast(E)) E = Expansion->getPattern(); // Skip through any implicit casts we added while type-checking, and any // substitutions performed by template alias expansion. while (true) { if (const ImplicitCastExpr *ICE = dyn_cast(E)) E = ICE->getSubExpr(); else if (const ConstantExpr *CE = dyn_cast(E)) E = CE->getSubExpr(); else if (const SubstNonTypeTemplateParmExpr *Subst = dyn_cast(E)) E = Subst->getReplacement(); else break; } const DeclRefExpr *DRE = dyn_cast(E); if (!DRE) return; const NonTypeTemplateParmDecl *NTTP = dyn_cast(DRE->getDecl()); if (!NTTP) return; if (NTTP->getDepth() == Depth) Used[NTTP->getIndex()] = true; // In C++17 mode, additional arguments may be deduced from the type of a // non-type argument. if (Ctx.getLangOpts().CPlusPlus17) MarkUsedTemplateParameters(Ctx, NTTP->getType(), OnlyDeduced, Depth, Used); } /// Mark the template parameters that are used by the given /// nested name specifier. static void MarkUsedTemplateParameters(ASTContext &Ctx, NestedNameSpecifier *NNS, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used) { if (!NNS) return; MarkUsedTemplateParameters(Ctx, NNS->getPrefix(), OnlyDeduced, Depth, Used); MarkUsedTemplateParameters(Ctx, QualType(NNS->getAsType(), 0), OnlyDeduced, Depth, Used); } /// Mark the template parameters that are used by the given /// template name. static void MarkUsedTemplateParameters(ASTContext &Ctx, TemplateName Name, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used) { if (TemplateDecl *Template = Name.getAsTemplateDecl()) { if (TemplateTemplateParmDecl *TTP = dyn_cast(Template)) { if (TTP->getDepth() == Depth) Used[TTP->getIndex()] = true; } return; } if (QualifiedTemplateName *QTN = Name.getAsQualifiedTemplateName()) MarkUsedTemplateParameters(Ctx, QTN->getQualifier(), OnlyDeduced, Depth, Used); if (DependentTemplateName *DTN = Name.getAsDependentTemplateName()) MarkUsedTemplateParameters(Ctx, DTN->getQualifier(), OnlyDeduced, Depth, Used); } /// Mark the template parameters that are used by the given /// type. static void MarkUsedTemplateParameters(ASTContext &Ctx, QualType T, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used) { if (T.isNull()) return; // Non-dependent types have nothing deducible if (!T->isDependentType()) return; T = Ctx.getCanonicalType(T); switch (T->getTypeClass()) { case Type::Pointer: MarkUsedTemplateParameters(Ctx, cast(T)->getPointeeType(), OnlyDeduced, Depth, Used); break; case Type::BlockPointer: MarkUsedTemplateParameters(Ctx, cast(T)->getPointeeType(), OnlyDeduced, Depth, Used); break; case Type::LValueReference: case Type::RValueReference: MarkUsedTemplateParameters(Ctx, cast(T)->getPointeeType(), OnlyDeduced, Depth, Used); break; case Type::MemberPointer: { const MemberPointerType *MemPtr = cast(T.getTypePtr()); MarkUsedTemplateParameters(Ctx, MemPtr->getPointeeType(), OnlyDeduced, Depth, Used); MarkUsedTemplateParameters(Ctx, QualType(MemPtr->getClass(), 0), OnlyDeduced, Depth, Used); break; } case Type::DependentSizedArray: MarkUsedTemplateParameters(Ctx, cast(T)->getSizeExpr(), OnlyDeduced, Depth, Used); // Fall through to check the element type LLVM_FALLTHROUGH; case Type::ConstantArray: case Type::IncompleteArray: MarkUsedTemplateParameters(Ctx, cast(T)->getElementType(), OnlyDeduced, Depth, Used); break; case Type::Vector: case Type::ExtVector: MarkUsedTemplateParameters(Ctx, cast(T)->getElementType(), OnlyDeduced, Depth, Used); break; case Type::DependentVector: { const auto *VecType = cast(T); MarkUsedTemplateParameters(Ctx, VecType->getElementType(), OnlyDeduced, Depth, Used); MarkUsedTemplateParameters(Ctx, VecType->getSizeExpr(), OnlyDeduced, Depth, Used); break; } case Type::DependentSizedExtVector: { const DependentSizedExtVectorType *VecType = cast(T); MarkUsedTemplateParameters(Ctx, VecType->getElementType(), OnlyDeduced, Depth, Used); MarkUsedTemplateParameters(Ctx, VecType->getSizeExpr(), OnlyDeduced, Depth, Used); break; } case Type::DependentAddressSpace: { const DependentAddressSpaceType *DependentASType = cast(T); MarkUsedTemplateParameters(Ctx, DependentASType->getPointeeType(), OnlyDeduced, Depth, Used); MarkUsedTemplateParameters(Ctx, DependentASType->getAddrSpaceExpr(), OnlyDeduced, Depth, Used); break; } case Type::ConstantMatrix: { const ConstantMatrixType *MatType = cast(T); MarkUsedTemplateParameters(Ctx, MatType->getElementType(), OnlyDeduced, Depth, Used); break; } case Type::DependentSizedMatrix: { const DependentSizedMatrixType *MatType = cast(T); MarkUsedTemplateParameters(Ctx, MatType->getElementType(), OnlyDeduced, Depth, Used); MarkUsedTemplateParameters(Ctx, MatType->getRowExpr(), OnlyDeduced, Depth, Used); MarkUsedTemplateParameters(Ctx, MatType->getColumnExpr(), OnlyDeduced, Depth, Used); break; } case Type::FunctionProto: { const FunctionProtoType *Proto = cast(T); MarkUsedTemplateParameters(Ctx, Proto->getReturnType(), OnlyDeduced, Depth, Used); for (unsigned I = 0, N = Proto->getNumParams(); I != N; ++I) { // C++17 [temp.deduct.type]p5: // The non-deduced contexts are: [...] // -- A function parameter pack that does not occur at the end of the // parameter-declaration-list. if (!OnlyDeduced || I + 1 == N || !Proto->getParamType(I)->getAs()) { MarkUsedTemplateParameters(Ctx, Proto->getParamType(I), OnlyDeduced, Depth, Used); } else { // FIXME: C++17 [temp.deduct.call]p1: // When a function parameter pack appears in a non-deduced context, // the type of that pack is never deduced. // // We should also track a set of "never deduced" parameters, and // subtract that from the list of deduced parameters after marking. } } if (auto *E = Proto->getNoexceptExpr()) MarkUsedTemplateParameters(Ctx, E, OnlyDeduced, Depth, Used); break; } case Type::TemplateTypeParm: { const TemplateTypeParmType *TTP = cast(T); if (TTP->getDepth() == Depth) Used[TTP->getIndex()] = true; break; } case Type::SubstTemplateTypeParmPack: { const SubstTemplateTypeParmPackType *Subst = cast(T); MarkUsedTemplateParameters(Ctx, QualType(Subst->getReplacedParameter(), 0), OnlyDeduced, Depth, Used); MarkUsedTemplateParameters(Ctx, Subst->getArgumentPack(), OnlyDeduced, Depth, Used); break; } case Type::InjectedClassName: T = cast(T)->getInjectedSpecializationType(); LLVM_FALLTHROUGH; case Type::TemplateSpecialization: { const TemplateSpecializationType *Spec = cast(T); MarkUsedTemplateParameters(Ctx, Spec->getTemplateName(), OnlyDeduced, Depth, Used); // C++0x [temp.deduct.type]p9: // If the template argument list of P contains a pack expansion that is // not the last template argument, the entire template argument list is a // non-deduced context. if (OnlyDeduced && hasPackExpansionBeforeEnd(Spec->template_arguments())) break; for (unsigned I = 0, N = Spec->getNumArgs(); I != N; ++I) MarkUsedTemplateParameters(Ctx, Spec->getArg(I), OnlyDeduced, Depth, Used); break; } case Type::Complex: if (!OnlyDeduced) MarkUsedTemplateParameters(Ctx, cast(T)->getElementType(), OnlyDeduced, Depth, Used); break; case Type::Atomic: if (!OnlyDeduced) MarkUsedTemplateParameters(Ctx, cast(T)->getValueType(), OnlyDeduced, Depth, Used); break; case Type::DependentName: if (!OnlyDeduced) MarkUsedTemplateParameters(Ctx, cast(T)->getQualifier(), OnlyDeduced, Depth, Used); break; case Type::DependentTemplateSpecialization: { // C++14 [temp.deduct.type]p5: // The non-deduced contexts are: // -- The nested-name-specifier of a type that was specified using a // qualified-id // // C++14 [temp.deduct.type]p6: // When a type name is specified in a way that includes a non-deduced // context, all of the types that comprise that type name are also // non-deduced. if (OnlyDeduced) break; const DependentTemplateSpecializationType *Spec = cast(T); MarkUsedTemplateParameters(Ctx, Spec->getQualifier(), OnlyDeduced, Depth, Used); for (unsigned I = 0, N = Spec->getNumArgs(); I != N; ++I) MarkUsedTemplateParameters(Ctx, Spec->getArg(I), OnlyDeduced, Depth, Used); break; } case Type::TypeOf: if (!OnlyDeduced) MarkUsedTemplateParameters(Ctx, cast(T)->getUnderlyingType(), OnlyDeduced, Depth, Used); break; case Type::TypeOfExpr: if (!OnlyDeduced) MarkUsedTemplateParameters(Ctx, cast(T)->getUnderlyingExpr(), OnlyDeduced, Depth, Used); break; case Type::Decltype: if (!OnlyDeduced) MarkUsedTemplateParameters(Ctx, cast(T)->getUnderlyingExpr(), OnlyDeduced, Depth, Used); break; case Type::UnaryTransform: if (!OnlyDeduced) MarkUsedTemplateParameters(Ctx, cast(T)->getUnderlyingType(), OnlyDeduced, Depth, Used); break; case Type::PackExpansion: MarkUsedTemplateParameters(Ctx, cast(T)->getPattern(), OnlyDeduced, Depth, Used); break; case Type::Auto: case Type::DeducedTemplateSpecialization: MarkUsedTemplateParameters(Ctx, cast(T)->getDeducedType(), OnlyDeduced, Depth, Used); break; case Type::DependentExtInt: MarkUsedTemplateParameters(Ctx, cast(T)->getNumBitsExpr(), OnlyDeduced, Depth, Used); break; // None of these types have any template parameters in them. case Type::Builtin: case Type::VariableArray: case Type::FunctionNoProto: case Type::Record: case Type::Enum: case Type::ObjCInterface: case Type::ObjCObject: case Type::ObjCObjectPointer: case Type::UnresolvedUsing: case Type::Pipe: case Type::ExtInt: #define TYPE(Class, Base) #define ABSTRACT_TYPE(Class, Base) #define DEPENDENT_TYPE(Class, Base) #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: #include "clang/AST/TypeNodes.inc" break; } } /// Mark the template parameters that are used by this /// template argument. static void MarkUsedTemplateParameters(ASTContext &Ctx, const TemplateArgument &TemplateArg, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used) { switch (TemplateArg.getKind()) { case TemplateArgument::Null: case TemplateArgument::Integral: case TemplateArgument::Declaration: break; case TemplateArgument::NullPtr: MarkUsedTemplateParameters(Ctx, TemplateArg.getNullPtrType(), OnlyDeduced, Depth, Used); break; case TemplateArgument::Type: MarkUsedTemplateParameters(Ctx, TemplateArg.getAsType(), OnlyDeduced, Depth, Used); break; case TemplateArgument::Template: case TemplateArgument::TemplateExpansion: MarkUsedTemplateParameters(Ctx, TemplateArg.getAsTemplateOrTemplatePattern(), OnlyDeduced, Depth, Used); break; case TemplateArgument::Expression: MarkUsedTemplateParameters(Ctx, TemplateArg.getAsExpr(), OnlyDeduced, Depth, Used); break; case TemplateArgument::Pack: for (const auto &P : TemplateArg.pack_elements()) MarkUsedTemplateParameters(Ctx, P, OnlyDeduced, Depth, Used); break; } } /// Mark which template parameters are used in a given expression. /// /// \param E the expression from which template parameters will be deduced. /// /// \param Used a bit vector whose elements will be set to \c true /// to indicate when the corresponding template parameter will be /// deduced. void Sema::MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used) { ::MarkUsedTemplateParameters(Context, E, OnlyDeduced, Depth, Used); } /// Mark which template parameters can be deduced from a given /// template argument list. /// /// \param TemplateArgs the template argument list from which template /// parameters will be deduced. /// /// \param Used a bit vector whose elements will be set to \c true /// to indicate when the corresponding template parameter will be /// deduced. void Sema::MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used) { // C++0x [temp.deduct.type]p9: // If the template argument list of P contains a pack expansion that is not // the last template argument, the entire template argument list is a // non-deduced context. if (OnlyDeduced && hasPackExpansionBeforeEnd(TemplateArgs.asArray())) return; for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I) ::MarkUsedTemplateParameters(Context, TemplateArgs[I], OnlyDeduced, Depth, Used); } /// Marks all of the template parameters that will be deduced by a /// call to the given function template. void Sema::MarkDeducedTemplateParameters( ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { TemplateParameterList *TemplateParams = FunctionTemplate->getTemplateParameters(); Deduced.clear(); Deduced.resize(TemplateParams->size()); FunctionDecl *Function = FunctionTemplate->getTemplatedDecl(); for (unsigned I = 0, N = Function->getNumParams(); I != N; ++I) ::MarkUsedTemplateParameters(Ctx, Function->getParamDecl(I)->getType(), true, TemplateParams->getDepth(), Deduced); } bool hasDeducibleTemplateParameters(Sema &S, FunctionTemplateDecl *FunctionTemplate, QualType T) { if (!T->isDependentType()) return false; TemplateParameterList *TemplateParams = FunctionTemplate->getTemplateParameters(); llvm::SmallBitVector Deduced(TemplateParams->size()); ::MarkUsedTemplateParameters(S.Context, T, true, TemplateParams->getDepth(), Deduced); return Deduced.any(); } diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp index 7b77d1cb482a..259cc5165776 100644 --- a/contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp +++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp @@ -1,1247 +1,1248 @@ //===------- SemaTemplateVariadic.cpp - C++ Variadic Templates ------------===/ // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception //===----------------------------------------------------------------------===/ // // This file implements semantic analysis for C++0x variadic templates. //===----------------------------------------------------------------------===/ #include "clang/Sema/Sema.h" #include "TypeLocBuilder.h" #include "clang/AST/Expr.h" #include "clang/AST/RecursiveASTVisitor.h" #include "clang/AST/TypeLoc.h" #include "clang/Sema/Lookup.h" #include "clang/Sema/ParsedTemplate.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/SemaInternal.h" #include "clang/Sema/Template.h" using namespace clang; //---------------------------------------------------------------------------- // Visitor that collects unexpanded parameter packs //---------------------------------------------------------------------------- namespace { /// A class that collects unexpanded parameter packs. class CollectUnexpandedParameterPacksVisitor : public RecursiveASTVisitor { typedef RecursiveASTVisitor inherited; SmallVectorImpl &Unexpanded; bool InLambda = false; unsigned DepthLimit = (unsigned)-1; void addUnexpanded(NamedDecl *ND, SourceLocation Loc = SourceLocation()) { if (auto *VD = dyn_cast(ND)) { // For now, the only problematic case is a generic lambda's templated // call operator, so we don't need to look for all the other ways we // could have reached a dependent parameter pack. auto *FD = dyn_cast(VD->getDeclContext()); auto *FTD = FD ? FD->getDescribedFunctionTemplate() : nullptr; if (FTD && FTD->getTemplateParameters()->getDepth() >= DepthLimit) return; } else if (getDepthAndIndex(ND).first >= DepthLimit) return; Unexpanded.push_back({ND, Loc}); } void addUnexpanded(const TemplateTypeParmType *T, SourceLocation Loc = SourceLocation()) { if (T->getDepth() < DepthLimit) Unexpanded.push_back({T, Loc}); } public: explicit CollectUnexpandedParameterPacksVisitor( SmallVectorImpl &Unexpanded) : Unexpanded(Unexpanded) {} bool shouldWalkTypesOfTypeLocs() const { return false; } //------------------------------------------------------------------------ // Recording occurrences of (unexpanded) parameter packs. //------------------------------------------------------------------------ /// Record occurrences of template type parameter packs. bool VisitTemplateTypeParmTypeLoc(TemplateTypeParmTypeLoc TL) { if (TL.getTypePtr()->isParameterPack()) addUnexpanded(TL.getTypePtr(), TL.getNameLoc()); return true; } /// Record occurrences of template type parameter packs /// when we don't have proper source-location information for /// them. /// /// Ideally, this routine would never be used. bool VisitTemplateTypeParmType(TemplateTypeParmType *T) { if (T->isParameterPack()) addUnexpanded(T); return true; } /// Record occurrences of function and non-type template /// parameter packs in an expression. bool VisitDeclRefExpr(DeclRefExpr *E) { if (E->getDecl()->isParameterPack()) addUnexpanded(E->getDecl(), E->getLocation()); return true; } /// Record occurrences of template template parameter packs. bool TraverseTemplateName(TemplateName Template) { if (auto *TTP = dyn_cast_or_null( Template.getAsTemplateDecl())) { if (TTP->isParameterPack()) addUnexpanded(TTP); } return inherited::TraverseTemplateName(Template); } /// Suppress traversal into Objective-C container literal /// elements that are pack expansions. bool TraverseObjCDictionaryLiteral(ObjCDictionaryLiteral *E) { if (!E->containsUnexpandedParameterPack()) return true; for (unsigned I = 0, N = E->getNumElements(); I != N; ++I) { ObjCDictionaryElement Element = E->getKeyValueElement(I); if (Element.isPackExpansion()) continue; TraverseStmt(Element.Key); TraverseStmt(Element.Value); } return true; } //------------------------------------------------------------------------ // Pruning the search for unexpanded parameter packs. //------------------------------------------------------------------------ /// Suppress traversal into statements and expressions that /// do not contain unexpanded parameter packs. bool TraverseStmt(Stmt *S) { Expr *E = dyn_cast_or_null(S); if ((E && E->containsUnexpandedParameterPack()) || InLambda) return inherited::TraverseStmt(S); return true; } /// Suppress traversal into types that do not contain /// unexpanded parameter packs. bool TraverseType(QualType T) { if ((!T.isNull() && T->containsUnexpandedParameterPack()) || InLambda) return inherited::TraverseType(T); return true; } /// Suppress traversal into types with location information /// that do not contain unexpanded parameter packs. bool TraverseTypeLoc(TypeLoc TL) { if ((!TL.getType().isNull() && TL.getType()->containsUnexpandedParameterPack()) || InLambda) return inherited::TraverseTypeLoc(TL); return true; } /// Suppress traversal of parameter packs. bool TraverseDecl(Decl *D) { // A function parameter pack is a pack expansion, so cannot contain // an unexpanded parameter pack. Likewise for a template parameter // pack that contains any references to other packs. if (D && D->isParameterPack()) return true; return inherited::TraverseDecl(D); } /// Suppress traversal of pack-expanded attributes. bool TraverseAttr(Attr *A) { if (A->isPackExpansion()) return true; return inherited::TraverseAttr(A); } /// Suppress traversal of pack expansion expressions and types. ///@{ bool TraversePackExpansionType(PackExpansionType *T) { return true; } bool TraversePackExpansionTypeLoc(PackExpansionTypeLoc TL) { return true; } bool TraversePackExpansionExpr(PackExpansionExpr *E) { return true; } bool TraverseCXXFoldExpr(CXXFoldExpr *E) { return true; } ///@} /// Suppress traversal of using-declaration pack expansion. bool TraverseUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D) { if (D->isPackExpansion()) return true; return inherited::TraverseUnresolvedUsingValueDecl(D); } /// Suppress traversal of using-declaration pack expansion. bool TraverseUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D) { if (D->isPackExpansion()) return true; return inherited::TraverseUnresolvedUsingTypenameDecl(D); } /// Suppress traversal of template argument pack expansions. bool TraverseTemplateArgument(const TemplateArgument &Arg) { if (Arg.isPackExpansion()) return true; return inherited::TraverseTemplateArgument(Arg); } /// Suppress traversal of template argument pack expansions. bool TraverseTemplateArgumentLoc(const TemplateArgumentLoc &ArgLoc) { if (ArgLoc.getArgument().isPackExpansion()) return true; return inherited::TraverseTemplateArgumentLoc(ArgLoc); } /// Suppress traversal of base specifier pack expansions. bool TraverseCXXBaseSpecifier(const CXXBaseSpecifier &Base) { if (Base.isPackExpansion()) return true; return inherited::TraverseCXXBaseSpecifier(Base); } /// Suppress traversal of mem-initializer pack expansions. bool TraverseConstructorInitializer(CXXCtorInitializer *Init) { if (Init->isPackExpansion()) return true; return inherited::TraverseConstructorInitializer(Init); } /// Note whether we're traversing a lambda containing an unexpanded /// parameter pack. In this case, the unexpanded pack can occur anywhere, /// including all the places where we normally wouldn't look. Within a /// lambda, we don't propagate the 'contains unexpanded parameter pack' bit /// outside an expression. bool TraverseLambdaExpr(LambdaExpr *Lambda) { // The ContainsUnexpandedParameterPack bit on a lambda is always correct, // even if it's contained within another lambda. if (!Lambda->containsUnexpandedParameterPack()) return true; bool WasInLambda = InLambda; unsigned OldDepthLimit = DepthLimit; InLambda = true; if (auto *TPL = Lambda->getTemplateParameterList()) DepthLimit = TPL->getDepth(); inherited::TraverseLambdaExpr(Lambda); InLambda = WasInLambda; DepthLimit = OldDepthLimit; return true; } /// Suppress traversal within pack expansions in lambda captures. bool TraverseLambdaCapture(LambdaExpr *Lambda, const LambdaCapture *C, Expr *Init) { if (C->isPackExpansion()) return true; return inherited::TraverseLambdaCapture(Lambda, C, Init); } }; } /// Determine whether it's possible for an unexpanded parameter pack to /// be valid in this location. This only happens when we're in a declaration /// that is nested within an expression that could be expanded, such as a /// lambda-expression within a function call. /// /// This is conservatively correct, but may claim that some unexpanded packs are /// permitted when they are not. bool Sema::isUnexpandedParameterPackPermitted() { for (auto *SI : FunctionScopes) if (isa(SI)) return true; return false; } /// Diagnose all of the unexpanded parameter packs in the given /// vector. bool Sema::DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef Unexpanded) { if (Unexpanded.empty()) return false; // If we are within a lambda expression and referencing a pack that is not // declared within the lambda itself, that lambda contains an unexpanded // parameter pack, and we are done. // FIXME: Store 'Unexpanded' on the lambda so we don't need to recompute it // later. SmallVector LambdaParamPackReferences; if (auto *LSI = getEnclosingLambda()) { for (auto &Pack : Unexpanded) { auto DeclaresThisPack = [&](NamedDecl *LocalPack) { if (auto *TTPT = Pack.first.dyn_cast()) { auto *TTPD = dyn_cast(LocalPack); return TTPD && TTPD->getTypeForDecl() == TTPT; } return declaresSameEntity(Pack.first.get(), LocalPack); }; if (std::find_if(LSI->LocalPacks.begin(), LSI->LocalPacks.end(), DeclaresThisPack) != LSI->LocalPacks.end()) LambdaParamPackReferences.push_back(Pack); } if (LambdaParamPackReferences.empty()) { // Construct in lambda only references packs declared outside the lambda. // That's OK for now, but the lambda itself is considered to contain an // unexpanded pack in this case, which will require expansion outside the // lambda. // We do not permit pack expansion that would duplicate a statement // expression, not even within a lambda. // FIXME: We could probably support this for statement expressions that // do not contain labels. // FIXME: This is insufficient to detect this problem; consider // f( ({ bad: 0; }) + pack ... ); bool EnclosingStmtExpr = false; for (unsigned N = FunctionScopes.size(); N; --N) { sema::FunctionScopeInfo *Func = FunctionScopes[N-1]; if (std::any_of( Func->CompoundScopes.begin(), Func->CompoundScopes.end(), [](sema::CompoundScopeInfo &CSI) { return CSI.IsStmtExpr; })) { EnclosingStmtExpr = true; break; } // Coumpound-statements outside the lambda are OK for now; we'll check // for those when we finish handling the lambda. if (Func == LSI) break; } if (!EnclosingStmtExpr) { LSI->ContainsUnexpandedParameterPack = true; return false; } } else { Unexpanded = LambdaParamPackReferences; } } SmallVector Locations; SmallVector Names; llvm::SmallPtrSet NamesKnown; for (unsigned I = 0, N = Unexpanded.size(); I != N; ++I) { IdentifierInfo *Name = nullptr; if (const TemplateTypeParmType *TTP = Unexpanded[I].first.dyn_cast()) Name = TTP->getIdentifier(); else Name = Unexpanded[I].first.get()->getIdentifier(); if (Name && NamesKnown.insert(Name).second) Names.push_back(Name); if (Unexpanded[I].second.isValid()) Locations.push_back(Unexpanded[I].second); } DiagnosticBuilder DB = Diag(Loc, diag::err_unexpanded_parameter_pack) << (int)UPPC << (int)Names.size(); for (size_t I = 0, E = std::min(Names.size(), (size_t)2); I != E; ++I) DB << Names[I]; for (unsigned I = 0, N = Locations.size(); I != N; ++I) DB << SourceRange(Locations[I]); return true; } bool Sema::DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC) { // C++0x [temp.variadic]p5: // An appearance of a name of a parameter pack that is not expanded is // ill-formed. if (!T->getType()->containsUnexpandedParameterPack()) return false; SmallVector Unexpanded; CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseTypeLoc( T->getTypeLoc()); assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs"); return DiagnoseUnexpandedParameterPacks(Loc, UPPC, Unexpanded); } bool Sema::DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC) { // C++0x [temp.variadic]p5: // An appearance of a name of a parameter pack that is not expanded is // ill-formed. if (!E->containsUnexpandedParameterPack()) return false; SmallVector Unexpanded; CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseStmt(E); assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs"); return DiagnoseUnexpandedParameterPacks(E->getBeginLoc(), UPPC, Unexpanded); } bool Sema::DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC) { // C++0x [temp.variadic]p5: // An appearance of a name of a parameter pack that is not expanded is // ill-formed. if (!SS.getScopeRep() || !SS.getScopeRep()->containsUnexpandedParameterPack()) return false; SmallVector Unexpanded; CollectUnexpandedParameterPacksVisitor(Unexpanded) .TraverseNestedNameSpecifier(SS.getScopeRep()); assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs"); return DiagnoseUnexpandedParameterPacks(SS.getRange().getBegin(), UPPC, Unexpanded); } bool Sema::DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC) { // C++0x [temp.variadic]p5: // An appearance of a name of a parameter pack that is not expanded is // ill-formed. switch (NameInfo.getName().getNameKind()) { case DeclarationName::Identifier: case DeclarationName::ObjCZeroArgSelector: case DeclarationName::ObjCOneArgSelector: case DeclarationName::ObjCMultiArgSelector: case DeclarationName::CXXOperatorName: case DeclarationName::CXXLiteralOperatorName: case DeclarationName::CXXUsingDirective: case DeclarationName::CXXDeductionGuideName: return false; case DeclarationName::CXXConstructorName: case DeclarationName::CXXDestructorName: case DeclarationName::CXXConversionFunctionName: // FIXME: We shouldn't need this null check! if (TypeSourceInfo *TSInfo = NameInfo.getNamedTypeInfo()) return DiagnoseUnexpandedParameterPack(NameInfo.getLoc(), TSInfo, UPPC); if (!NameInfo.getName().getCXXNameType()->containsUnexpandedParameterPack()) return false; break; } SmallVector Unexpanded; CollectUnexpandedParameterPacksVisitor(Unexpanded) .TraverseType(NameInfo.getName().getCXXNameType()); assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs"); return DiagnoseUnexpandedParameterPacks(NameInfo.getLoc(), UPPC, Unexpanded); } bool Sema::DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC) { if (Template.isNull() || !Template.containsUnexpandedParameterPack()) return false; SmallVector Unexpanded; CollectUnexpandedParameterPacksVisitor(Unexpanded) .TraverseTemplateName(Template); assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs"); return DiagnoseUnexpandedParameterPacks(Loc, UPPC, Unexpanded); } bool Sema::DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC) { if (Arg.getArgument().isNull() || !Arg.getArgument().containsUnexpandedParameterPack()) return false; SmallVector Unexpanded; CollectUnexpandedParameterPacksVisitor(Unexpanded) .TraverseTemplateArgumentLoc(Arg); assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs"); return DiagnoseUnexpandedParameterPacks(Arg.getLocation(), UPPC, Unexpanded); } void Sema::collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl &Unexpanded) { CollectUnexpandedParameterPacksVisitor(Unexpanded) .TraverseTemplateArgument(Arg); } void Sema::collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl &Unexpanded) { CollectUnexpandedParameterPacksVisitor(Unexpanded) .TraverseTemplateArgumentLoc(Arg); } void Sema::collectUnexpandedParameterPacks(QualType T, SmallVectorImpl &Unexpanded) { CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseType(T); } void Sema::collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl &Unexpanded) { CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseTypeLoc(TL); } void Sema::collectUnexpandedParameterPacks( NestedNameSpecifierLoc NNS, SmallVectorImpl &Unexpanded) { CollectUnexpandedParameterPacksVisitor(Unexpanded) .TraverseNestedNameSpecifierLoc(NNS); } void Sema::collectUnexpandedParameterPacks( const DeclarationNameInfo &NameInfo, SmallVectorImpl &Unexpanded) { CollectUnexpandedParameterPacksVisitor(Unexpanded) .TraverseDeclarationNameInfo(NameInfo); } ParsedTemplateArgument Sema::ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc) { if (Arg.isInvalid()) return Arg; switch (Arg.getKind()) { case ParsedTemplateArgument::Type: { TypeResult Result = ActOnPackExpansion(Arg.getAsType(), EllipsisLoc); if (Result.isInvalid()) return ParsedTemplateArgument(); return ParsedTemplateArgument(Arg.getKind(), Result.get().getAsOpaquePtr(), Arg.getLocation()); } case ParsedTemplateArgument::NonType: { ExprResult Result = ActOnPackExpansion(Arg.getAsExpr(), EllipsisLoc); if (Result.isInvalid()) return ParsedTemplateArgument(); return ParsedTemplateArgument(Arg.getKind(), Result.get(), Arg.getLocation()); } case ParsedTemplateArgument::Template: if (!Arg.getAsTemplate().get().containsUnexpandedParameterPack()) { SourceRange R(Arg.getLocation()); if (Arg.getScopeSpec().isValid()) R.setBegin(Arg.getScopeSpec().getBeginLoc()); Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs) << R; return ParsedTemplateArgument(); } return Arg.getTemplatePackExpansion(EllipsisLoc); } llvm_unreachable("Unhandled template argument kind?"); } TypeResult Sema::ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc) { TypeSourceInfo *TSInfo; GetTypeFromParser(Type, &TSInfo); if (!TSInfo) return true; TypeSourceInfo *TSResult = CheckPackExpansion(TSInfo, EllipsisLoc, None); if (!TSResult) return true; return CreateParsedType(TSResult->getType(), TSResult); } TypeSourceInfo * Sema::CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional NumExpansions) { // Create the pack expansion type and source-location information. QualType Result = CheckPackExpansion(Pattern->getType(), Pattern->getTypeLoc().getSourceRange(), EllipsisLoc, NumExpansions); if (Result.isNull()) return nullptr; TypeLocBuilder TLB; TLB.pushFullCopy(Pattern->getTypeLoc()); PackExpansionTypeLoc TL = TLB.push(Result); TL.setEllipsisLoc(EllipsisLoc); return TLB.getTypeSourceInfo(Context, Result); } QualType Sema::CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional NumExpansions) { // C++11 [temp.variadic]p5: // The pattern of a pack expansion shall name one or more // parameter packs that are not expanded by a nested pack // expansion. // // A pattern containing a deduced type can't occur "naturally" but arises in // the desugaring of an init-capture pack. if (!Pattern->containsUnexpandedParameterPack() && !Pattern->getContainedDeducedType()) { Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs) << PatternRange; return QualType(); } - return Context.getPackExpansionType(Pattern, NumExpansions); + return Context.getPackExpansionType(Pattern, NumExpansions, + /*ExpectPackInType=*/false); } ExprResult Sema::ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc) { return CheckPackExpansion(Pattern, EllipsisLoc, None); } ExprResult Sema::CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional NumExpansions) { if (!Pattern) return ExprError(); // C++0x [temp.variadic]p5: // The pattern of a pack expansion shall name one or more // parameter packs that are not expanded by a nested pack // expansion. if (!Pattern->containsUnexpandedParameterPack()) { Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs) << Pattern->getSourceRange(); CorrectDelayedTyposInExpr(Pattern); return ExprError(); } // Create the pack expansion expression and source-location information. return new (Context) PackExpansionExpr(Context.DependentTy, Pattern, EllipsisLoc, NumExpansions); } bool Sema::CheckParameterPacksForExpansion( SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional &NumExpansions) { ShouldExpand = true; RetainExpansion = false; std::pair FirstPack; bool HaveFirstPack = false; Optional NumPartialExpansions; SourceLocation PartiallySubstitutedPackLoc; for (ArrayRef::iterator i = Unexpanded.begin(), end = Unexpanded.end(); i != end; ++i) { // Compute the depth and index for this parameter pack. unsigned Depth = 0, Index = 0; IdentifierInfo *Name; bool IsVarDeclPack = false; if (const TemplateTypeParmType *TTP = i->first.dyn_cast()) { Depth = TTP->getDepth(); Index = TTP->getIndex(); Name = TTP->getIdentifier(); } else { NamedDecl *ND = i->first.get(); if (isa(ND)) IsVarDeclPack = true; else std::tie(Depth, Index) = getDepthAndIndex(ND); Name = ND->getIdentifier(); } // Determine the size of this argument pack. unsigned NewPackSize; if (IsVarDeclPack) { // Figure out whether we're instantiating to an argument pack or not. typedef LocalInstantiationScope::DeclArgumentPack DeclArgumentPack; llvm::PointerUnion *Instantiation = CurrentInstantiationScope->findInstantiationOf( i->first.get()); if (Instantiation->is()) { // We could expand this function parameter pack. NewPackSize = Instantiation->get()->size(); } else { // We can't expand this function parameter pack, so we can't expand // the pack expansion. ShouldExpand = false; continue; } } else { // If we don't have a template argument at this depth/index, then we // cannot expand the pack expansion. Make a note of this, but we still // want to check any parameter packs we *do* have arguments for. if (Depth >= TemplateArgs.getNumLevels() || !TemplateArgs.hasTemplateArgument(Depth, Index)) { ShouldExpand = false; continue; } // Determine the size of the argument pack. NewPackSize = TemplateArgs(Depth, Index).pack_size(); } // C++0x [temp.arg.explicit]p9: // Template argument deduction can extend the sequence of template // arguments corresponding to a template parameter pack, even when the // sequence contains explicitly specified template arguments. if (!IsVarDeclPack && CurrentInstantiationScope) { if (NamedDecl *PartialPack = CurrentInstantiationScope->getPartiallySubstitutedPack()){ unsigned PartialDepth, PartialIndex; std::tie(PartialDepth, PartialIndex) = getDepthAndIndex(PartialPack); if (PartialDepth == Depth && PartialIndex == Index) { RetainExpansion = true; // We don't actually know the new pack size yet. NumPartialExpansions = NewPackSize; PartiallySubstitutedPackLoc = i->second; continue; } } } if (!NumExpansions) { // The is the first pack we've seen for which we have an argument. // Record it. NumExpansions = NewPackSize; FirstPack.first = Name; FirstPack.second = i->second; HaveFirstPack = true; continue; } if (NewPackSize != *NumExpansions) { // C++0x [temp.variadic]p5: // All of the parameter packs expanded by a pack expansion shall have // the same number of arguments specified. if (HaveFirstPack) Diag(EllipsisLoc, diag::err_pack_expansion_length_conflict) << FirstPack.first << Name << *NumExpansions << NewPackSize << SourceRange(FirstPack.second) << SourceRange(i->second); else Diag(EllipsisLoc, diag::err_pack_expansion_length_conflict_multilevel) << Name << *NumExpansions << NewPackSize << SourceRange(i->second); return true; } } // If we're performing a partial expansion but we also have a full expansion, // expand to the number of common arguments. For example, given: // // template struct A { // template void f(pair...); // }; // // ... a call to 'A().f' should expand the pack once and // retain an expansion. if (NumPartialExpansions) { if (NumExpansions && *NumExpansions < *NumPartialExpansions) { NamedDecl *PartialPack = CurrentInstantiationScope->getPartiallySubstitutedPack(); Diag(EllipsisLoc, diag::err_pack_expansion_length_conflict_partial) << PartialPack << *NumPartialExpansions << *NumExpansions << SourceRange(PartiallySubstitutedPackLoc); return true; } NumExpansions = NumPartialExpansions; } return false; } Optional Sema::getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs) { QualType Pattern = cast(T)->getPattern(); SmallVector Unexpanded; CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseType(Pattern); Optional Result; for (unsigned I = 0, N = Unexpanded.size(); I != N; ++I) { // Compute the depth and index for this parameter pack. unsigned Depth; unsigned Index; if (const TemplateTypeParmType *TTP = Unexpanded[I].first.dyn_cast()) { Depth = TTP->getDepth(); Index = TTP->getIndex(); } else { NamedDecl *ND = Unexpanded[I].first.get(); if (isa(ND)) { // Function parameter pack or init-capture pack. typedef LocalInstantiationScope::DeclArgumentPack DeclArgumentPack; llvm::PointerUnion *Instantiation = CurrentInstantiationScope->findInstantiationOf( Unexpanded[I].first.get()); if (Instantiation->is()) // The pattern refers to an unexpanded pack. We're not ready to expand // this pack yet. return None; unsigned Size = Instantiation->get()->size(); assert((!Result || *Result == Size) && "inconsistent pack sizes"); Result = Size; continue; } std::tie(Depth, Index) = getDepthAndIndex(ND); } if (Depth >= TemplateArgs.getNumLevels() || !TemplateArgs.hasTemplateArgument(Depth, Index)) // The pattern refers to an unknown template argument. We're not ready to // expand this pack yet. return None; // Determine the size of the argument pack. unsigned Size = TemplateArgs(Depth, Index).pack_size(); assert((!Result || *Result == Size) && "inconsistent pack sizes"); Result = Size; } return Result; } bool Sema::containsUnexpandedParameterPacks(Declarator &D) { const DeclSpec &DS = D.getDeclSpec(); switch (DS.getTypeSpecType()) { case TST_typename: case TST_typeofType: case TST_underlyingType: case TST_atomic: { QualType T = DS.getRepAsType().get(); if (!T.isNull() && T->containsUnexpandedParameterPack()) return true; break; } case TST_typeofExpr: case TST_decltype: case TST_extint: if (DS.getRepAsExpr() && DS.getRepAsExpr()->containsUnexpandedParameterPack()) return true; break; case TST_unspecified: case TST_void: case TST_char: case TST_wchar: case TST_char8: case TST_char16: case TST_char32: case TST_int: case TST_int128: case TST_half: case TST_float: case TST_double: case TST_Accum: case TST_Fract: case TST_Float16: case TST_float128: case TST_bool: case TST_decimal32: case TST_decimal64: case TST_decimal128: case TST_enum: case TST_union: case TST_struct: case TST_interface: case TST_class: case TST_auto: case TST_auto_type: case TST_decltype_auto: case TST_BFloat16: #define GENERIC_IMAGE_TYPE(ImgType, Id) case TST_##ImgType##_t: #include "clang/Basic/OpenCLImageTypes.def" case TST_unknown_anytype: case TST_error: break; } for (unsigned I = 0, N = D.getNumTypeObjects(); I != N; ++I) { const DeclaratorChunk &Chunk = D.getTypeObject(I); switch (Chunk.Kind) { case DeclaratorChunk::Pointer: case DeclaratorChunk::Reference: case DeclaratorChunk::Paren: case DeclaratorChunk::Pipe: case DeclaratorChunk::BlockPointer: // These declarator chunks cannot contain any parameter packs. break; case DeclaratorChunk::Array: if (Chunk.Arr.NumElts && Chunk.Arr.NumElts->containsUnexpandedParameterPack()) return true; break; case DeclaratorChunk::Function: for (unsigned i = 0, e = Chunk.Fun.NumParams; i != e; ++i) { ParmVarDecl *Param = cast(Chunk.Fun.Params[i].Param); QualType ParamTy = Param->getType(); assert(!ParamTy.isNull() && "Couldn't parse type?"); if (ParamTy->containsUnexpandedParameterPack()) return true; } if (Chunk.Fun.getExceptionSpecType() == EST_Dynamic) { for (unsigned i = 0; i != Chunk.Fun.getNumExceptions(); ++i) { if (Chunk.Fun.Exceptions[i] .Ty.get() ->containsUnexpandedParameterPack()) return true; } } else if (isComputedNoexcept(Chunk.Fun.getExceptionSpecType()) && Chunk.Fun.NoexceptExpr->containsUnexpandedParameterPack()) return true; if (Chunk.Fun.hasTrailingReturnType()) { QualType T = Chunk.Fun.getTrailingReturnType().get(); if (!T.isNull() && T->containsUnexpandedParameterPack()) return true; } break; case DeclaratorChunk::MemberPointer: if (Chunk.Mem.Scope().getScopeRep() && Chunk.Mem.Scope().getScopeRep()->containsUnexpandedParameterPack()) return true; break; } } if (Expr *TRC = D.getTrailingRequiresClause()) if (TRC->containsUnexpandedParameterPack()) return true; return false; } namespace { // Callback to only accept typo corrections that refer to parameter packs. class ParameterPackValidatorCCC final : public CorrectionCandidateCallback { public: bool ValidateCandidate(const TypoCorrection &candidate) override { NamedDecl *ND = candidate.getCorrectionDecl(); return ND && ND->isParameterPack(); } std::unique_ptr clone() override { return std::make_unique(*this); } }; } /// Called when an expression computing the size of a parameter pack /// is parsed. /// /// \code /// template struct count { /// static const unsigned value = sizeof...(Types); /// }; /// \endcode /// // /// \param OpLoc The location of the "sizeof" keyword. /// \param Name The name of the parameter pack whose size will be determined. /// \param NameLoc The source location of the name of the parameter pack. /// \param RParenLoc The location of the closing parentheses. ExprResult Sema::ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc) { // C++0x [expr.sizeof]p5: // The identifier in a sizeof... expression shall name a parameter pack. LookupResult R(*this, &Name, NameLoc, LookupOrdinaryName); LookupName(R, S); NamedDecl *ParameterPack = nullptr; switch (R.getResultKind()) { case LookupResult::Found: ParameterPack = R.getFoundDecl(); break; case LookupResult::NotFound: case LookupResult::NotFoundInCurrentInstantiation: { ParameterPackValidatorCCC CCC{}; if (TypoCorrection Corrected = CorrectTypo(R.getLookupNameInfo(), R.getLookupKind(), S, nullptr, CCC, CTK_ErrorRecovery)) { diagnoseTypo(Corrected, PDiag(diag::err_sizeof_pack_no_pack_name_suggest) << &Name, PDiag(diag::note_parameter_pack_here)); ParameterPack = Corrected.getCorrectionDecl(); } break; } case LookupResult::FoundOverloaded: case LookupResult::FoundUnresolvedValue: break; case LookupResult::Ambiguous: DiagnoseAmbiguousLookup(R); return ExprError(); } if (!ParameterPack || !ParameterPack->isParameterPack()) { Diag(NameLoc, diag::err_sizeof_pack_no_pack_name) << &Name; return ExprError(); } MarkAnyDeclReferenced(OpLoc, ParameterPack, true); return SizeOfPackExpr::Create(Context, OpLoc, ParameterPack, NameLoc, RParenLoc); } TemplateArgumentLoc Sema::getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional &NumExpansions) const { const TemplateArgument &Argument = OrigLoc.getArgument(); assert(Argument.isPackExpansion()); switch (Argument.getKind()) { case TemplateArgument::Type: { // FIXME: We shouldn't ever have to worry about missing // type-source info! TypeSourceInfo *ExpansionTSInfo = OrigLoc.getTypeSourceInfo(); if (!ExpansionTSInfo) ExpansionTSInfo = Context.getTrivialTypeSourceInfo(Argument.getAsType(), Ellipsis); PackExpansionTypeLoc Expansion = ExpansionTSInfo->getTypeLoc().castAs(); Ellipsis = Expansion.getEllipsisLoc(); TypeLoc Pattern = Expansion.getPatternLoc(); NumExpansions = Expansion.getTypePtr()->getNumExpansions(); // We need to copy the TypeLoc because TemplateArgumentLocs store a // TypeSourceInfo. // FIXME: Find some way to avoid the copy? TypeLocBuilder TLB; TLB.pushFullCopy(Pattern); TypeSourceInfo *PatternTSInfo = TLB.getTypeSourceInfo(Context, Pattern.getType()); return TemplateArgumentLoc(TemplateArgument(Pattern.getType()), PatternTSInfo); } case TemplateArgument::Expression: { PackExpansionExpr *Expansion = cast(Argument.getAsExpr()); Expr *Pattern = Expansion->getPattern(); Ellipsis = Expansion->getEllipsisLoc(); NumExpansions = Expansion->getNumExpansions(); return TemplateArgumentLoc(Pattern, Pattern); } case TemplateArgument::TemplateExpansion: Ellipsis = OrigLoc.getTemplateEllipsisLoc(); NumExpansions = Argument.getNumTemplateExpansions(); return TemplateArgumentLoc(Argument.getPackExpansionPattern(), OrigLoc.getTemplateQualifierLoc(), OrigLoc.getTemplateNameLoc()); case TemplateArgument::Declaration: case TemplateArgument::NullPtr: case TemplateArgument::Template: case TemplateArgument::Integral: case TemplateArgument::Pack: case TemplateArgument::Null: return TemplateArgumentLoc(); } llvm_unreachable("Invalid TemplateArgument Kind!"); } Optional Sema::getFullyPackExpandedSize(TemplateArgument Arg) { assert(Arg.containsUnexpandedParameterPack()); // If this is a substituted pack, grab that pack. If not, we don't know // the size yet. // FIXME: We could find a size in more cases by looking for a substituted // pack anywhere within this argument, but that's not necessary in the common // case for 'sizeof...(A)' handling. TemplateArgument Pack; switch (Arg.getKind()) { case TemplateArgument::Type: if (auto *Subst = Arg.getAsType()->getAs()) Pack = Subst->getArgumentPack(); else return None; break; case TemplateArgument::Expression: if (auto *Subst = dyn_cast(Arg.getAsExpr())) Pack = Subst->getArgumentPack(); else if (auto *Subst = dyn_cast(Arg.getAsExpr())) { for (VarDecl *PD : *Subst) if (PD->isParameterPack()) return None; return Subst->getNumExpansions(); } else return None; break; case TemplateArgument::Template: if (SubstTemplateTemplateParmPackStorage *Subst = Arg.getAsTemplate().getAsSubstTemplateTemplateParmPack()) Pack = Subst->getArgumentPack(); else return None; break; case TemplateArgument::Declaration: case TemplateArgument::NullPtr: case TemplateArgument::TemplateExpansion: case TemplateArgument::Integral: case TemplateArgument::Pack: case TemplateArgument::Null: return None; } // Check that no argument in the pack is itself a pack expansion. for (TemplateArgument Elem : Pack.pack_elements()) { // There's no point recursing in this case; we would have already // expanded this pack expansion into the enclosing pack if we could. if (Elem.isPackExpansion()) return None; } return Pack.pack_size(); } static void CheckFoldOperand(Sema &S, Expr *E) { if (!E) return; E = E->IgnoreImpCasts(); auto *OCE = dyn_cast(E); if ((OCE && OCE->isInfixBinaryOp()) || isa(E) || isa(E)) { S.Diag(E->getExprLoc(), diag::err_fold_expression_bad_operand) << E->getSourceRange() << FixItHint::CreateInsertion(E->getBeginLoc(), "(") << FixItHint::CreateInsertion(E->getEndLoc(), ")"); } } ExprResult Sema::ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc) { // LHS and RHS must be cast-expressions. We allow an arbitrary expression // in the parser and reduce down to just cast-expressions here. CheckFoldOperand(*this, LHS); CheckFoldOperand(*this, RHS); auto DiscardOperands = [&] { CorrectDelayedTyposInExpr(LHS); CorrectDelayedTyposInExpr(RHS); }; // [expr.prim.fold]p3: // In a binary fold, op1 and op2 shall be the same fold-operator, and // either e1 shall contain an unexpanded parameter pack or e2 shall contain // an unexpanded parameter pack, but not both. if (LHS && RHS && LHS->containsUnexpandedParameterPack() == RHS->containsUnexpandedParameterPack()) { DiscardOperands(); return Diag(EllipsisLoc, LHS->containsUnexpandedParameterPack() ? diag::err_fold_expression_packs_both_sides : diag::err_pack_expansion_without_parameter_packs) << LHS->getSourceRange() << RHS->getSourceRange(); } // [expr.prim.fold]p2: // In a unary fold, the cast-expression shall contain an unexpanded // parameter pack. if (!LHS || !RHS) { Expr *Pack = LHS ? LHS : RHS; assert(Pack && "fold expression with neither LHS nor RHS"); DiscardOperands(); if (!Pack->containsUnexpandedParameterPack()) return Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs) << Pack->getSourceRange(); } BinaryOperatorKind Opc = ConvertTokenKindToBinaryOpcode(Operator); return BuildCXXFoldExpr(LParenLoc, LHS, Opc, EllipsisLoc, RHS, RParenLoc, None); } ExprResult Sema::BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional NumExpansions) { return new (Context) CXXFoldExpr(Context.DependentTy, LParenLoc, LHS, Operator, EllipsisLoc, RHS, RParenLoc, NumExpansions); } ExprResult Sema::BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator) { // [temp.variadic]p9: // If N is zero for a unary fold-expression, the value of the expression is // && -> true // || -> false // , -> void() // if the operator is not listed [above], the instantiation is ill-formed. // // Note that we need to use something like int() here, not merely 0, to // prevent the result from being a null pointer constant. QualType ScalarType; switch (Operator) { case BO_LOr: return ActOnCXXBoolLiteral(EllipsisLoc, tok::kw_false); case BO_LAnd: return ActOnCXXBoolLiteral(EllipsisLoc, tok::kw_true); case BO_Comma: ScalarType = Context.VoidTy; break; default: return Diag(EllipsisLoc, diag::err_fold_expression_empty) << BinaryOperator::getOpcodeStr(Operator); } return new (Context) CXXScalarValueInitExpr( ScalarType, Context.getTrivialTypeSourceInfo(ScalarType, EllipsisLoc), EllipsisLoc); } diff --git a/contrib/llvm-project/clang/lib/Sema/SemaType.cpp b/contrib/llvm-project/clang/lib/Sema/SemaType.cpp index cc151a048b98..f2c3c6373948 100644 --- a/contrib/llvm-project/clang/lib/Sema/SemaType.cpp +++ b/contrib/llvm-project/clang/lib/Sema/SemaType.cpp @@ -1,8897 +1,8902 @@ //===--- SemaType.cpp - Semantic Analysis for Types -----------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements type-related semantic analysis. // //===----------------------------------------------------------------------===// #include "TypeLocBuilder.h" #include "clang/AST/ASTConsumer.h" #include "clang/AST/ASTContext.h" #include "clang/AST/ASTMutationListener.h" #include "clang/AST/ASTStructuralEquivalence.h" #include "clang/AST/CXXInheritance.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/Expr.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeLocVisitor.h" #include "clang/Basic/PartialDiagnostic.h" #include "clang/Basic/TargetInfo.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/DelayedDiagnostic.h" #include "clang/Sema/Lookup.h" #include "clang/Sema/ParsedTemplate.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/SemaInternal.h" #include "clang/Sema/Template.h" #include "clang/Sema/TemplateInstCallback.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/StringSwitch.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/Support/ErrorHandling.h" using namespace clang; enum TypeDiagSelector { TDS_Function, TDS_Pointer, TDS_ObjCObjOrBlock }; /// isOmittedBlockReturnType - Return true if this declarator is missing a /// return type because this is a omitted return type on a block literal. static bool isOmittedBlockReturnType(const Declarator &D) { if (D.getContext() != DeclaratorContext::BlockLiteralContext || D.getDeclSpec().hasTypeSpecifier()) return false; if (D.getNumTypeObjects() == 0) return true; // ^{ ... } if (D.getNumTypeObjects() == 1 && D.getTypeObject(0).Kind == DeclaratorChunk::Function) return true; // ^(int X, float Y) { ... } return false; } /// diagnoseBadTypeAttribute - Diagnoses a type attribute which /// doesn't apply to the given type. static void diagnoseBadTypeAttribute(Sema &S, const ParsedAttr &attr, QualType type) { TypeDiagSelector WhichType; bool useExpansionLoc = true; switch (attr.getKind()) { case ParsedAttr::AT_ObjCGC: WhichType = TDS_Pointer; break; case ParsedAttr::AT_ObjCOwnership: WhichType = TDS_ObjCObjOrBlock; break; default: // Assume everything else was a function attribute. WhichType = TDS_Function; useExpansionLoc = false; break; } SourceLocation loc = attr.getLoc(); StringRef name = attr.getAttrName()->getName(); // The GC attributes are usually written with macros; special-case them. IdentifierInfo *II = attr.isArgIdent(0) ? attr.getArgAsIdent(0)->Ident : nullptr; if (useExpansionLoc && loc.isMacroID() && II) { if (II->isStr("strong")) { if (S.findMacroSpelling(loc, "__strong")) name = "__strong"; } else if (II->isStr("weak")) { if (S.findMacroSpelling(loc, "__weak")) name = "__weak"; } } S.Diag(loc, diag::warn_type_attribute_wrong_type) << name << WhichType << type; } // objc_gc applies to Objective-C pointers or, otherwise, to the // smallest available pointer type (i.e. 'void*' in 'void**'). #define OBJC_POINTER_TYPE_ATTRS_CASELIST \ case ParsedAttr::AT_ObjCGC: \ case ParsedAttr::AT_ObjCOwnership // Calling convention attributes. #define CALLING_CONV_ATTRS_CASELIST \ case ParsedAttr::AT_CDecl: \ case ParsedAttr::AT_FastCall: \ case ParsedAttr::AT_StdCall: \ case ParsedAttr::AT_ThisCall: \ case ParsedAttr::AT_RegCall: \ case ParsedAttr::AT_Pascal: \ case ParsedAttr::AT_SwiftCall: \ case ParsedAttr::AT_VectorCall: \ case ParsedAttr::AT_AArch64VectorPcs: \ case ParsedAttr::AT_MSABI: \ case ParsedAttr::AT_SysVABI: \ case ParsedAttr::AT_Pcs: \ case ParsedAttr::AT_IntelOclBicc: \ case ParsedAttr::AT_PreserveMost: \ case ParsedAttr::AT_PreserveAll // Function type attributes. #define FUNCTION_TYPE_ATTRS_CASELIST \ case ParsedAttr::AT_NSReturnsRetained: \ case ParsedAttr::AT_NoReturn: \ case ParsedAttr::AT_Regparm: \ case ParsedAttr::AT_CmseNSCall: \ case ParsedAttr::AT_AnyX86NoCallerSavedRegisters: \ case ParsedAttr::AT_AnyX86NoCfCheck: \ CALLING_CONV_ATTRS_CASELIST // Microsoft-specific type qualifiers. #define MS_TYPE_ATTRS_CASELIST \ case ParsedAttr::AT_Ptr32: \ case ParsedAttr::AT_Ptr64: \ case ParsedAttr::AT_SPtr: \ case ParsedAttr::AT_UPtr // Nullability qualifiers. #define NULLABILITY_TYPE_ATTRS_CASELIST \ case ParsedAttr::AT_TypeNonNull: \ case ParsedAttr::AT_TypeNullable: \ case ParsedAttr::AT_TypeNullUnspecified namespace { /// An object which stores processing state for the entire /// GetTypeForDeclarator process. class TypeProcessingState { Sema &sema; /// The declarator being processed. Declarator &declarator; /// The index of the declarator chunk we're currently processing. /// May be the total number of valid chunks, indicating the /// DeclSpec. unsigned chunkIndex; /// Whether there are non-trivial modifications to the decl spec. bool trivial; /// Whether we saved the attributes in the decl spec. bool hasSavedAttrs; /// The original set of attributes on the DeclSpec. SmallVector savedAttrs; /// A list of attributes to diagnose the uselessness of when the /// processing is complete. SmallVector ignoredTypeAttrs; /// Attributes corresponding to AttributedTypeLocs that we have not yet /// populated. // FIXME: The two-phase mechanism by which we construct Types and fill // their TypeLocs makes it hard to correctly assign these. We keep the // attributes in creation order as an attempt to make them line up // properly. using TypeAttrPair = std::pair; SmallVector AttrsForTypes; bool AttrsForTypesSorted = true; /// MacroQualifiedTypes mapping to macro expansion locations that will be /// stored in a MacroQualifiedTypeLoc. llvm::DenseMap LocsForMacros; /// Flag to indicate we parsed a noderef attribute. This is used for /// validating that noderef was used on a pointer or array. bool parsedNoDeref; public: TypeProcessingState(Sema &sema, Declarator &declarator) : sema(sema), declarator(declarator), chunkIndex(declarator.getNumTypeObjects()), trivial(true), hasSavedAttrs(false), parsedNoDeref(false) {} Sema &getSema() const { return sema; } Declarator &getDeclarator() const { return declarator; } bool isProcessingDeclSpec() const { return chunkIndex == declarator.getNumTypeObjects(); } unsigned getCurrentChunkIndex() const { return chunkIndex; } void setCurrentChunkIndex(unsigned idx) { assert(idx <= declarator.getNumTypeObjects()); chunkIndex = idx; } ParsedAttributesView &getCurrentAttributes() const { if (isProcessingDeclSpec()) return getMutableDeclSpec().getAttributes(); return declarator.getTypeObject(chunkIndex).getAttrs(); } /// Save the current set of attributes on the DeclSpec. void saveDeclSpecAttrs() { // Don't try to save them multiple times. if (hasSavedAttrs) return; DeclSpec &spec = getMutableDeclSpec(); for (ParsedAttr &AL : spec.getAttributes()) savedAttrs.push_back(&AL); trivial &= savedAttrs.empty(); hasSavedAttrs = true; } /// Record that we had nowhere to put the given type attribute. /// We will diagnose such attributes later. void addIgnoredTypeAttr(ParsedAttr &attr) { ignoredTypeAttrs.push_back(&attr); } /// Diagnose all the ignored type attributes, given that the /// declarator worked out to the given type. void diagnoseIgnoredTypeAttrs(QualType type) const { for (auto *Attr : ignoredTypeAttrs) diagnoseBadTypeAttribute(getSema(), *Attr, type); } /// Get an attributed type for the given attribute, and remember the Attr /// object so that we can attach it to the AttributedTypeLoc. QualType getAttributedType(Attr *A, QualType ModifiedType, QualType EquivType) { QualType T = sema.Context.getAttributedType(A->getKind(), ModifiedType, EquivType); AttrsForTypes.push_back({cast(T.getTypePtr()), A}); AttrsForTypesSorted = false; return T; } /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. Also replace \p TypeWithAuto in \c TypeAttrPair if /// necessary. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement) { QualType T = sema.ReplaceAutoType(TypeWithAuto, Replacement); if (auto *AttrTy = TypeWithAuto->getAs()) { // Attributed type still should be an attributed type after replacement. auto *NewAttrTy = cast(T.getTypePtr()); for (TypeAttrPair &A : AttrsForTypes) { if (A.first == AttrTy) A.first = NewAttrTy; } AttrsForTypesSorted = false; } return T; } /// Extract and remove the Attr* for a given attributed type. const Attr *takeAttrForAttributedType(const AttributedType *AT) { if (!AttrsForTypesSorted) { llvm::stable_sort(AttrsForTypes, llvm::less_first()); AttrsForTypesSorted = true; } // FIXME: This is quadratic if we have lots of reuses of the same // attributed type. for (auto It = std::partition_point( AttrsForTypes.begin(), AttrsForTypes.end(), [=](const TypeAttrPair &A) { return A.first < AT; }); It != AttrsForTypes.end() && It->first == AT; ++It) { if (It->second) { const Attr *Result = It->second; It->second = nullptr; return Result; } } llvm_unreachable("no Attr* for AttributedType*"); } SourceLocation getExpansionLocForMacroQualifiedType(const MacroQualifiedType *MQT) const { auto FoundLoc = LocsForMacros.find(MQT); assert(FoundLoc != LocsForMacros.end() && "Unable to find macro expansion location for MacroQualifedType"); return FoundLoc->second; } void setExpansionLocForMacroQualifiedType(const MacroQualifiedType *MQT, SourceLocation Loc) { LocsForMacros[MQT] = Loc; } void setParsedNoDeref(bool parsed) { parsedNoDeref = parsed; } bool didParseNoDeref() const { return parsedNoDeref; } ~TypeProcessingState() { if (trivial) return; restoreDeclSpecAttrs(); } private: DeclSpec &getMutableDeclSpec() const { return const_cast(declarator.getDeclSpec()); } void restoreDeclSpecAttrs() { assert(hasSavedAttrs); getMutableDeclSpec().getAttributes().clearListOnly(); for (ParsedAttr *AL : savedAttrs) getMutableDeclSpec().getAttributes().addAtEnd(AL); } }; } // end anonymous namespace static void moveAttrFromListToList(ParsedAttr &attr, ParsedAttributesView &fromList, ParsedAttributesView &toList) { fromList.remove(&attr); toList.addAtEnd(&attr); } /// The location of a type attribute. enum TypeAttrLocation { /// The attribute is in the decl-specifier-seq. TAL_DeclSpec, /// The attribute is part of a DeclaratorChunk. TAL_DeclChunk, /// The attribute is immediately after the declaration's name. TAL_DeclName }; static void processTypeAttrs(TypeProcessingState &state, QualType &type, TypeAttrLocation TAL, ParsedAttributesView &attrs); static bool handleFunctionTypeAttr(TypeProcessingState &state, ParsedAttr &attr, QualType &type); static bool handleMSPointerTypeQualifierAttr(TypeProcessingState &state, ParsedAttr &attr, QualType &type); static bool handleObjCGCTypeAttr(TypeProcessingState &state, ParsedAttr &attr, QualType &type); static bool handleObjCOwnershipTypeAttr(TypeProcessingState &state, ParsedAttr &attr, QualType &type); static bool handleObjCPointerTypeAttr(TypeProcessingState &state, ParsedAttr &attr, QualType &type) { if (attr.getKind() == ParsedAttr::AT_ObjCGC) return handleObjCGCTypeAttr(state, attr, type); assert(attr.getKind() == ParsedAttr::AT_ObjCOwnership); return handleObjCOwnershipTypeAttr(state, attr, type); } /// Given the index of a declarator chunk, check whether that chunk /// directly specifies the return type of a function and, if so, find /// an appropriate place for it. /// /// \param i - a notional index which the search will start /// immediately inside /// /// \param onlyBlockPointers Whether we should only look into block /// pointer types (vs. all pointer types). static DeclaratorChunk *maybeMovePastReturnType(Declarator &declarator, unsigned i, bool onlyBlockPointers) { assert(i <= declarator.getNumTypeObjects()); DeclaratorChunk *result = nullptr; // First, look inwards past parens for a function declarator. for (; i != 0; --i) { DeclaratorChunk &fnChunk = declarator.getTypeObject(i-1); switch (fnChunk.Kind) { case DeclaratorChunk::Paren: continue; // If we find anything except a function, bail out. case DeclaratorChunk::Pointer: case DeclaratorChunk::BlockPointer: case DeclaratorChunk::Array: case DeclaratorChunk::Reference: case DeclaratorChunk::MemberPointer: case DeclaratorChunk::Pipe: return result; // If we do find a function declarator, scan inwards from that, // looking for a (block-)pointer declarator. case DeclaratorChunk::Function: for (--i; i != 0; --i) { DeclaratorChunk &ptrChunk = declarator.getTypeObject(i-1); switch (ptrChunk.Kind) { case DeclaratorChunk::Paren: case DeclaratorChunk::Array: case DeclaratorChunk::Function: case DeclaratorChunk::Reference: case DeclaratorChunk::Pipe: continue; case DeclaratorChunk::MemberPointer: case DeclaratorChunk::Pointer: if (onlyBlockPointers) continue; LLVM_FALLTHROUGH; case DeclaratorChunk::BlockPointer: result = &ptrChunk; goto continue_outer; } llvm_unreachable("bad declarator chunk kind"); } // If we run out of declarators doing that, we're done. return result; } llvm_unreachable("bad declarator chunk kind"); // Okay, reconsider from our new point. continue_outer: ; } // Ran out of chunks, bail out. return result; } /// Given that an objc_gc attribute was written somewhere on a /// declaration *other* than on the declarator itself (for which, use /// distributeObjCPointerTypeAttrFromDeclarator), and given that it /// didn't apply in whatever position it was written in, try to move /// it to a more appropriate position. static void distributeObjCPointerTypeAttr(TypeProcessingState &state, ParsedAttr &attr, QualType type) { Declarator &declarator = state.getDeclarator(); // Move it to the outermost normal or block pointer declarator. for (unsigned i = state.getCurrentChunkIndex(); i != 0; --i) { DeclaratorChunk &chunk = declarator.getTypeObject(i-1); switch (chunk.Kind) { case DeclaratorChunk::Pointer: case DeclaratorChunk::BlockPointer: { // But don't move an ARC ownership attribute to the return type // of a block. DeclaratorChunk *destChunk = nullptr; if (state.isProcessingDeclSpec() && attr.getKind() == ParsedAttr::AT_ObjCOwnership) destChunk = maybeMovePastReturnType(declarator, i - 1, /*onlyBlockPointers=*/true); if (!destChunk) destChunk = &chunk; moveAttrFromListToList(attr, state.getCurrentAttributes(), destChunk->getAttrs()); return; } case DeclaratorChunk::Paren: case DeclaratorChunk::Array: continue; // We may be starting at the return type of a block. case DeclaratorChunk::Function: if (state.isProcessingDeclSpec() && attr.getKind() == ParsedAttr::AT_ObjCOwnership) { if (DeclaratorChunk *dest = maybeMovePastReturnType( declarator, i, /*onlyBlockPointers=*/true)) { moveAttrFromListToList(attr, state.getCurrentAttributes(), dest->getAttrs()); return; } } goto error; // Don't walk through these. case DeclaratorChunk::Reference: case DeclaratorChunk::MemberPointer: case DeclaratorChunk::Pipe: goto error; } } error: diagnoseBadTypeAttribute(state.getSema(), attr, type); } /// Distribute an objc_gc type attribute that was written on the /// declarator. static void distributeObjCPointerTypeAttrFromDeclarator( TypeProcessingState &state, ParsedAttr &attr, QualType &declSpecType) { Declarator &declarator = state.getDeclarator(); // objc_gc goes on the innermost pointer to something that's not a // pointer. unsigned innermost = -1U; bool considerDeclSpec = true; for (unsigned i = 0, e = declarator.getNumTypeObjects(); i != e; ++i) { DeclaratorChunk &chunk = declarator.getTypeObject(i); switch (chunk.Kind) { case DeclaratorChunk::Pointer: case DeclaratorChunk::BlockPointer: innermost = i; continue; case DeclaratorChunk::Reference: case DeclaratorChunk::MemberPointer: case DeclaratorChunk::Paren: case DeclaratorChunk::Array: case DeclaratorChunk::Pipe: continue; case DeclaratorChunk::Function: considerDeclSpec = false; goto done; } } done: // That might actually be the decl spec if we weren't blocked by // anything in the declarator. if (considerDeclSpec) { if (handleObjCPointerTypeAttr(state, attr, declSpecType)) { // Splice the attribute into the decl spec. Prevents the // attribute from being applied multiple times and gives // the source-location-filler something to work with. state.saveDeclSpecAttrs(); declarator.getMutableDeclSpec().getAttributes().takeOneFrom( declarator.getAttributes(), &attr); return; } } // Otherwise, if we found an appropriate chunk, splice the attribute // into it. if (innermost != -1U) { moveAttrFromListToList(attr, declarator.getAttributes(), declarator.getTypeObject(innermost).getAttrs()); return; } // Otherwise, diagnose when we're done building the type. declarator.getAttributes().remove(&attr); state.addIgnoredTypeAttr(attr); } /// A function type attribute was written somewhere in a declaration /// *other* than on the declarator itself or in the decl spec. Given /// that it didn't apply in whatever position it was written in, try /// to move it to a more appropriate position. static void distributeFunctionTypeAttr(TypeProcessingState &state, ParsedAttr &attr, QualType type) { Declarator &declarator = state.getDeclarator(); // Try to push the attribute from the return type of a function to // the function itself. for (unsigned i = state.getCurrentChunkIndex(); i != 0; --i) { DeclaratorChunk &chunk = declarator.getTypeObject(i-1); switch (chunk.Kind) { case DeclaratorChunk::Function: moveAttrFromListToList(attr, state.getCurrentAttributes(), chunk.getAttrs()); return; case DeclaratorChunk::Paren: case DeclaratorChunk::Pointer: case DeclaratorChunk::BlockPointer: case DeclaratorChunk::Array: case DeclaratorChunk::Reference: case DeclaratorChunk::MemberPointer: case DeclaratorChunk::Pipe: continue; } } diagnoseBadTypeAttribute(state.getSema(), attr, type); } /// Try to distribute a function type attribute to the innermost /// function chunk or type. Returns true if the attribute was /// distributed, false if no location was found. static bool distributeFunctionTypeAttrToInnermost( TypeProcessingState &state, ParsedAttr &attr, ParsedAttributesView &attrList, QualType &declSpecType) { Declarator &declarator = state.getDeclarator(); // Put it on the innermost function chunk, if there is one. for (unsigned i = 0, e = declarator.getNumTypeObjects(); i != e; ++i) { DeclaratorChunk &chunk = declarator.getTypeObject(i); if (chunk.Kind != DeclaratorChunk::Function) continue; moveAttrFromListToList(attr, attrList, chunk.getAttrs()); return true; } return handleFunctionTypeAttr(state, attr, declSpecType); } /// A function type attribute was written in the decl spec. Try to /// apply it somewhere. static void distributeFunctionTypeAttrFromDeclSpec(TypeProcessingState &state, ParsedAttr &attr, QualType &declSpecType) { state.saveDeclSpecAttrs(); // C++11 attributes before the decl specifiers actually appertain to // the declarators. Move them straight there. We don't support the // 'put them wherever you like' semantics we allow for GNU attributes. if (attr.isCXX11Attribute()) { moveAttrFromListToList(attr, state.getCurrentAttributes(), state.getDeclarator().getAttributes()); return; } // Try to distribute to the innermost. if (distributeFunctionTypeAttrToInnermost( state, attr, state.getCurrentAttributes(), declSpecType)) return; // If that failed, diagnose the bad attribute when the declarator is // fully built. state.addIgnoredTypeAttr(attr); } /// A function type attribute was written on the declarator. Try to /// apply it somewhere. static void distributeFunctionTypeAttrFromDeclarator(TypeProcessingState &state, ParsedAttr &attr, QualType &declSpecType) { Declarator &declarator = state.getDeclarator(); // Try to distribute to the innermost. if (distributeFunctionTypeAttrToInnermost( state, attr, declarator.getAttributes(), declSpecType)) return; // If that failed, diagnose the bad attribute when the declarator is // fully built. declarator.getAttributes().remove(&attr); state.addIgnoredTypeAttr(attr); } /// Given that there are attributes written on the declarator /// itself, try to distribute any type attributes to the appropriate /// declarator chunk. /// /// These are attributes like the following: /// int f ATTR; /// int (f ATTR)(); /// but not necessarily this: /// int f() ATTR; static void distributeTypeAttrsFromDeclarator(TypeProcessingState &state, QualType &declSpecType) { // Collect all the type attributes from the declarator itself. assert(!state.getDeclarator().getAttributes().empty() && "declarator has no attrs!"); // The called functions in this loop actually remove things from the current // list, so iterating over the existing list isn't possible. Instead, make a // non-owning copy and iterate over that. ParsedAttributesView AttrsCopy{state.getDeclarator().getAttributes()}; for (ParsedAttr &attr : AttrsCopy) { // Do not distribute C++11 attributes. They have strict rules for what // they appertain to. if (attr.isCXX11Attribute()) continue; switch (attr.getKind()) { OBJC_POINTER_TYPE_ATTRS_CASELIST: distributeObjCPointerTypeAttrFromDeclarator(state, attr, declSpecType); break; FUNCTION_TYPE_ATTRS_CASELIST: distributeFunctionTypeAttrFromDeclarator(state, attr, declSpecType); break; MS_TYPE_ATTRS_CASELIST: // Microsoft type attributes cannot go after the declarator-id. continue; NULLABILITY_TYPE_ATTRS_CASELIST: // Nullability specifiers cannot go after the declarator-id. // Objective-C __kindof does not get distributed. case ParsedAttr::AT_ObjCKindOf: continue; default: break; } } } /// Add a synthetic '()' to a block-literal declarator if it is /// required, given the return type. static void maybeSynthesizeBlockSignature(TypeProcessingState &state, QualType declSpecType) { Declarator &declarator = state.getDeclarator(); // First, check whether the declarator would produce a function, // i.e. whether the innermost semantic chunk is a function. if (declarator.isFunctionDeclarator()) { // If so, make that declarator a prototyped declarator. declarator.getFunctionTypeInfo().hasPrototype = true; return; } // If there are any type objects, the type as written won't name a // function, regardless of the decl spec type. This is because a // block signature declarator is always an abstract-declarator, and // abstract-declarators can't just be parentheses chunks. Therefore // we need to build a function chunk unless there are no type // objects and the decl spec type is a function. if (!declarator.getNumTypeObjects() && declSpecType->isFunctionType()) return; // Note that there *are* cases with invalid declarators where // declarators consist solely of parentheses. In general, these // occur only in failed efforts to make function declarators, so // faking up the function chunk is still the right thing to do. // Otherwise, we need to fake up a function declarator. SourceLocation loc = declarator.getBeginLoc(); // ...and *prepend* it to the declarator. SourceLocation NoLoc; declarator.AddInnermostTypeInfo(DeclaratorChunk::getFunction( /*HasProto=*/true, /*IsAmbiguous=*/false, /*LParenLoc=*/NoLoc, /*ArgInfo=*/nullptr, /*NumParams=*/0, /*EllipsisLoc=*/NoLoc, /*RParenLoc=*/NoLoc, /*RefQualifierIsLvalueRef=*/true, /*RefQualifierLoc=*/NoLoc, /*MutableLoc=*/NoLoc, EST_None, /*ESpecRange=*/SourceRange(), /*Exceptions=*/nullptr, /*ExceptionRanges=*/nullptr, /*NumExceptions=*/0, /*NoexceptExpr=*/nullptr, /*ExceptionSpecTokens=*/nullptr, /*DeclsInPrototype=*/None, loc, loc, declarator)); // For consistency, make sure the state still has us as processing // the decl spec. assert(state.getCurrentChunkIndex() == declarator.getNumTypeObjects() - 1); state.setCurrentChunkIndex(declarator.getNumTypeObjects()); } static void diagnoseAndRemoveTypeQualifiers(Sema &S, const DeclSpec &DS, unsigned &TypeQuals, QualType TypeSoFar, unsigned RemoveTQs, unsigned DiagID) { // If this occurs outside a template instantiation, warn the user about // it; they probably didn't mean to specify a redundant qualifier. typedef std::pair QualLoc; for (QualLoc Qual : {QualLoc(DeclSpec::TQ_const, DS.getConstSpecLoc()), QualLoc(DeclSpec::TQ_restrict, DS.getRestrictSpecLoc()), QualLoc(DeclSpec::TQ_volatile, DS.getVolatileSpecLoc()), QualLoc(DeclSpec::TQ_atomic, DS.getAtomicSpecLoc())}) { if (!(RemoveTQs & Qual.first)) continue; if (!S.inTemplateInstantiation()) { if (TypeQuals & Qual.first) S.Diag(Qual.second, DiagID) << DeclSpec::getSpecifierName(Qual.first) << TypeSoFar << FixItHint::CreateRemoval(Qual.second); } TypeQuals &= ~Qual.first; } } /// Return true if this is omitted block return type. Also check type /// attributes and type qualifiers when returning true. static bool checkOmittedBlockReturnType(Sema &S, Declarator &declarator, QualType Result) { if (!isOmittedBlockReturnType(declarator)) return false; // Warn if we see type attributes for omitted return type on a block literal. SmallVector ToBeRemoved; for (ParsedAttr &AL : declarator.getMutableDeclSpec().getAttributes()) { if (AL.isInvalid() || !AL.isTypeAttr()) continue; S.Diag(AL.getLoc(), diag::warn_block_literal_attributes_on_omitted_return_type) << AL; ToBeRemoved.push_back(&AL); } // Remove bad attributes from the list. for (ParsedAttr *AL : ToBeRemoved) declarator.getMutableDeclSpec().getAttributes().remove(AL); // Warn if we see type qualifiers for omitted return type on a block literal. const DeclSpec &DS = declarator.getDeclSpec(); unsigned TypeQuals = DS.getTypeQualifiers(); diagnoseAndRemoveTypeQualifiers(S, DS, TypeQuals, Result, (unsigned)-1, diag::warn_block_literal_qualifiers_on_omitted_return_type); declarator.getMutableDeclSpec().ClearTypeQualifiers(); return true; } /// Apply Objective-C type arguments to the given type. static QualType applyObjCTypeArgs(Sema &S, SourceLocation loc, QualType type, ArrayRef typeArgs, SourceRange typeArgsRange, bool failOnError = false) { // We can only apply type arguments to an Objective-C class type. const auto *objcObjectType = type->getAs(); if (!objcObjectType || !objcObjectType->getInterface()) { S.Diag(loc, diag::err_objc_type_args_non_class) << type << typeArgsRange; if (failOnError) return QualType(); return type; } // The class type must be parameterized. ObjCInterfaceDecl *objcClass = objcObjectType->getInterface(); ObjCTypeParamList *typeParams = objcClass->getTypeParamList(); if (!typeParams) { S.Diag(loc, diag::err_objc_type_args_non_parameterized_class) << objcClass->getDeclName() << FixItHint::CreateRemoval(typeArgsRange); if (failOnError) return QualType(); return type; } // The type must not already be specialized. if (objcObjectType->isSpecialized()) { S.Diag(loc, diag::err_objc_type_args_specialized_class) << type << FixItHint::CreateRemoval(typeArgsRange); if (failOnError) return QualType(); return type; } // Check the type arguments. SmallVector finalTypeArgs; unsigned numTypeParams = typeParams->size(); bool anyPackExpansions = false; for (unsigned i = 0, n = typeArgs.size(); i != n; ++i) { TypeSourceInfo *typeArgInfo = typeArgs[i]; QualType typeArg = typeArgInfo->getType(); // Type arguments cannot have explicit qualifiers or nullability. // We ignore indirect sources of these, e.g. behind typedefs or // template arguments. if (TypeLoc qual = typeArgInfo->getTypeLoc().findExplicitQualifierLoc()) { bool diagnosed = false; SourceRange rangeToRemove; if (auto attr = qual.getAs()) { rangeToRemove = attr.getLocalSourceRange(); if (attr.getTypePtr()->getImmediateNullability()) { typeArg = attr.getTypePtr()->getModifiedType(); S.Diag(attr.getBeginLoc(), diag::err_objc_type_arg_explicit_nullability) << typeArg << FixItHint::CreateRemoval(rangeToRemove); diagnosed = true; } } if (!diagnosed) { S.Diag(qual.getBeginLoc(), diag::err_objc_type_arg_qualified) << typeArg << typeArg.getQualifiers().getAsString() << FixItHint::CreateRemoval(rangeToRemove); } } // Remove qualifiers even if they're non-local. typeArg = typeArg.getUnqualifiedType(); finalTypeArgs.push_back(typeArg); if (typeArg->getAs()) anyPackExpansions = true; // Find the corresponding type parameter, if there is one. ObjCTypeParamDecl *typeParam = nullptr; if (!anyPackExpansions) { if (i < numTypeParams) { typeParam = typeParams->begin()[i]; } else { // Too many arguments. S.Diag(loc, diag::err_objc_type_args_wrong_arity) << false << objcClass->getDeclName() << (unsigned)typeArgs.size() << numTypeParams; S.Diag(objcClass->getLocation(), diag::note_previous_decl) << objcClass; if (failOnError) return QualType(); return type; } } // Objective-C object pointer types must be substitutable for the bounds. if (const auto *typeArgObjC = typeArg->getAs()) { // If we don't have a type parameter to match against, assume // everything is fine. There was a prior pack expansion that // means we won't be able to match anything. if (!typeParam) { assert(anyPackExpansions && "Too many arguments?"); continue; } // Retrieve the bound. QualType bound = typeParam->getUnderlyingType(); const auto *boundObjC = bound->getAs(); // Determine whether the type argument is substitutable for the bound. if (typeArgObjC->isObjCIdType()) { // When the type argument is 'id', the only acceptable type // parameter bound is 'id'. if (boundObjC->isObjCIdType()) continue; } else if (S.Context.canAssignObjCInterfaces(boundObjC, typeArgObjC)) { // Otherwise, we follow the assignability rules. continue; } // Diagnose the mismatch. S.Diag(typeArgInfo->getTypeLoc().getBeginLoc(), diag::err_objc_type_arg_does_not_match_bound) << typeArg << bound << typeParam->getDeclName(); S.Diag(typeParam->getLocation(), diag::note_objc_type_param_here) << typeParam->getDeclName(); if (failOnError) return QualType(); return type; } // Block pointer types are permitted for unqualified 'id' bounds. if (typeArg->isBlockPointerType()) { // If we don't have a type parameter to match against, assume // everything is fine. There was a prior pack expansion that // means we won't be able to match anything. if (!typeParam) { assert(anyPackExpansions && "Too many arguments?"); continue; } // Retrieve the bound. QualType bound = typeParam->getUnderlyingType(); if (bound->isBlockCompatibleObjCPointerType(S.Context)) continue; // Diagnose the mismatch. S.Diag(typeArgInfo->getTypeLoc().getBeginLoc(), diag::err_objc_type_arg_does_not_match_bound) << typeArg << bound << typeParam->getDeclName(); S.Diag(typeParam->getLocation(), diag::note_objc_type_param_here) << typeParam->getDeclName(); if (failOnError) return QualType(); return type; } // Dependent types will be checked at instantiation time. if (typeArg->isDependentType()) { continue; } // Diagnose non-id-compatible type arguments. S.Diag(typeArgInfo->getTypeLoc().getBeginLoc(), diag::err_objc_type_arg_not_id_compatible) << typeArg << typeArgInfo->getTypeLoc().getSourceRange(); if (failOnError) return QualType(); return type; } // Make sure we didn't have the wrong number of arguments. if (!anyPackExpansions && finalTypeArgs.size() != numTypeParams) { S.Diag(loc, diag::err_objc_type_args_wrong_arity) << (typeArgs.size() < typeParams->size()) << objcClass->getDeclName() << (unsigned)finalTypeArgs.size() << (unsigned)numTypeParams; S.Diag(objcClass->getLocation(), diag::note_previous_decl) << objcClass; if (failOnError) return QualType(); return type; } // Success. Form the specialized type. return S.Context.getObjCObjectType(type, finalTypeArgs, { }, false); } QualType Sema::BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef Protocols, ArrayRef ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError) { QualType Result = QualType(Decl->getTypeForDecl(), 0); if (!Protocols.empty()) { bool HasError; Result = Context.applyObjCProtocolQualifiers(Result, Protocols, HasError); if (HasError) { Diag(SourceLocation(), diag::err_invalid_protocol_qualifiers) << SourceRange(ProtocolLAngleLoc, ProtocolRAngleLoc); if (FailOnError) Result = QualType(); } if (FailOnError && Result.isNull()) return QualType(); } return Result; } QualType Sema::BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef Protocols, ArrayRef ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError) { QualType Result = BaseType; if (!TypeArgs.empty()) { Result = applyObjCTypeArgs(*this, Loc, Result, TypeArgs, SourceRange(TypeArgsLAngleLoc, TypeArgsRAngleLoc), FailOnError); if (FailOnError && Result.isNull()) return QualType(); } if (!Protocols.empty()) { bool HasError; Result = Context.applyObjCProtocolQualifiers(Result, Protocols, HasError); if (HasError) { Diag(Loc, diag::err_invalid_protocol_qualifiers) << SourceRange(ProtocolLAngleLoc, ProtocolRAngleLoc); if (FailOnError) Result = QualType(); } if (FailOnError && Result.isNull()) return QualType(); } return Result; } TypeResult Sema::actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef protocols, ArrayRef protocolLocs, SourceLocation rAngleLoc) { // Form id. QualType Result = Context.getObjCObjectType( Context.ObjCBuiltinIdTy, { }, llvm::makeArrayRef( (ObjCProtocolDecl * const *)protocols.data(), protocols.size()), false); Result = Context.getObjCObjectPointerType(Result); TypeSourceInfo *ResultTInfo = Context.CreateTypeSourceInfo(Result); TypeLoc ResultTL = ResultTInfo->getTypeLoc(); auto ObjCObjectPointerTL = ResultTL.castAs(); ObjCObjectPointerTL.setStarLoc(SourceLocation()); // implicit auto ObjCObjectTL = ObjCObjectPointerTL.getPointeeLoc() .castAs(); ObjCObjectTL.setHasBaseTypeAsWritten(false); ObjCObjectTL.getBaseLoc().initialize(Context, SourceLocation()); // No type arguments. ObjCObjectTL.setTypeArgsLAngleLoc(SourceLocation()); ObjCObjectTL.setTypeArgsRAngleLoc(SourceLocation()); // Fill in protocol qualifiers. ObjCObjectTL.setProtocolLAngleLoc(lAngleLoc); ObjCObjectTL.setProtocolRAngleLoc(rAngleLoc); for (unsigned i = 0, n = protocols.size(); i != n; ++i) ObjCObjectTL.setProtocolLoc(i, protocolLocs[i]); // We're done. Return the completed type to the parser. return CreateParsedType(Result, ResultTInfo); } TypeResult Sema::actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef Protocols, ArrayRef ProtocolLocs, SourceLocation ProtocolRAngleLoc) { TypeSourceInfo *BaseTypeInfo = nullptr; QualType T = GetTypeFromParser(BaseType, &BaseTypeInfo); if (T.isNull()) return true; // Handle missing type-source info. if (!BaseTypeInfo) BaseTypeInfo = Context.getTrivialTypeSourceInfo(T, Loc); // Extract type arguments. SmallVector ActualTypeArgInfos; for (unsigned i = 0, n = TypeArgs.size(); i != n; ++i) { TypeSourceInfo *TypeArgInfo = nullptr; QualType TypeArg = GetTypeFromParser(TypeArgs[i], &TypeArgInfo); if (TypeArg.isNull()) { ActualTypeArgInfos.clear(); break; } assert(TypeArgInfo && "No type source info?"); ActualTypeArgInfos.push_back(TypeArgInfo); } // Build the object type. QualType Result = BuildObjCObjectType( T, BaseTypeInfo->getTypeLoc().getSourceRange().getBegin(), TypeArgsLAngleLoc, ActualTypeArgInfos, TypeArgsRAngleLoc, ProtocolLAngleLoc, llvm::makeArrayRef((ObjCProtocolDecl * const *)Protocols.data(), Protocols.size()), ProtocolLocs, ProtocolRAngleLoc, /*FailOnError=*/false); if (Result == T) return BaseType; // Create source information for this type. TypeSourceInfo *ResultTInfo = Context.CreateTypeSourceInfo(Result); TypeLoc ResultTL = ResultTInfo->getTypeLoc(); // For id or Class, we'll have an // object pointer type. Fill in source information for it. if (auto ObjCObjectPointerTL = ResultTL.getAs()) { // The '*' is implicit. ObjCObjectPointerTL.setStarLoc(SourceLocation()); ResultTL = ObjCObjectPointerTL.getPointeeLoc(); } if (auto OTPTL = ResultTL.getAs()) { // Protocol qualifier information. if (OTPTL.getNumProtocols() > 0) { assert(OTPTL.getNumProtocols() == Protocols.size()); OTPTL.setProtocolLAngleLoc(ProtocolLAngleLoc); OTPTL.setProtocolRAngleLoc(ProtocolRAngleLoc); for (unsigned i = 0, n = Protocols.size(); i != n; ++i) OTPTL.setProtocolLoc(i, ProtocolLocs[i]); } // We're done. Return the completed type to the parser. return CreateParsedType(Result, ResultTInfo); } auto ObjCObjectTL = ResultTL.castAs(); // Type argument information. if (ObjCObjectTL.getNumTypeArgs() > 0) { assert(ObjCObjectTL.getNumTypeArgs() == ActualTypeArgInfos.size()); ObjCObjectTL.setTypeArgsLAngleLoc(TypeArgsLAngleLoc); ObjCObjectTL.setTypeArgsRAngleLoc(TypeArgsRAngleLoc); for (unsigned i = 0, n = ActualTypeArgInfos.size(); i != n; ++i) ObjCObjectTL.setTypeArgTInfo(i, ActualTypeArgInfos[i]); } else { ObjCObjectTL.setTypeArgsLAngleLoc(SourceLocation()); ObjCObjectTL.setTypeArgsRAngleLoc(SourceLocation()); } // Protocol qualifier information. if (ObjCObjectTL.getNumProtocols() > 0) { assert(ObjCObjectTL.getNumProtocols() == Protocols.size()); ObjCObjectTL.setProtocolLAngleLoc(ProtocolLAngleLoc); ObjCObjectTL.setProtocolRAngleLoc(ProtocolRAngleLoc); for (unsigned i = 0, n = Protocols.size(); i != n; ++i) ObjCObjectTL.setProtocolLoc(i, ProtocolLocs[i]); } else { ObjCObjectTL.setProtocolLAngleLoc(SourceLocation()); ObjCObjectTL.setProtocolRAngleLoc(SourceLocation()); } // Base type. ObjCObjectTL.setHasBaseTypeAsWritten(true); if (ObjCObjectTL.getType() == T) ObjCObjectTL.getBaseLoc().initializeFullCopy(BaseTypeInfo->getTypeLoc()); else ObjCObjectTL.getBaseLoc().initialize(Context, Loc); // We're done. Return the completed type to the parser. return CreateParsedType(Result, ResultTInfo); } static OpenCLAccessAttr::Spelling getImageAccess(const ParsedAttributesView &Attrs) { for (const ParsedAttr &AL : Attrs) if (AL.getKind() == ParsedAttr::AT_OpenCLAccess) return static_cast(AL.getSemanticSpelling()); return OpenCLAccessAttr::Keyword_read_only; } static QualType ConvertConstrainedAutoDeclSpecToType(Sema &S, DeclSpec &DS, AutoTypeKeyword AutoKW) { assert(DS.isConstrainedAuto()); TemplateIdAnnotation *TemplateId = DS.getRepAsTemplateId(); TemplateArgumentListInfo TemplateArgsInfo; TemplateArgsInfo.setLAngleLoc(TemplateId->LAngleLoc); TemplateArgsInfo.setRAngleLoc(TemplateId->RAngleLoc); ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(), TemplateId->NumArgs); S.translateTemplateArguments(TemplateArgsPtr, TemplateArgsInfo); llvm::SmallVector TemplateArgs; for (auto &ArgLoc : TemplateArgsInfo.arguments()) TemplateArgs.push_back(ArgLoc.getArgument()); return S.Context.getAutoType(QualType(), AutoTypeKeyword::Auto, false, /*IsPack=*/false, cast(TemplateId->Template.get() .getAsTemplateDecl()), TemplateArgs); } /// Convert the specified declspec to the appropriate type /// object. /// \param state Specifies the declarator containing the declaration specifier /// to be converted, along with other associated processing state. /// \returns The type described by the declaration specifiers. This function /// never returns null. static QualType ConvertDeclSpecToType(TypeProcessingState &state) { // FIXME: Should move the logic from DeclSpec::Finish to here for validity // checking. Sema &S = state.getSema(); Declarator &declarator = state.getDeclarator(); DeclSpec &DS = declarator.getMutableDeclSpec(); SourceLocation DeclLoc = declarator.getIdentifierLoc(); if (DeclLoc.isInvalid()) DeclLoc = DS.getBeginLoc(); ASTContext &Context = S.Context; QualType Result; switch (DS.getTypeSpecType()) { case DeclSpec::TST_void: Result = Context.VoidTy; break; case DeclSpec::TST_char: if (DS.getTypeSpecSign() == DeclSpec::TSS_unspecified) Result = Context.CharTy; else if (DS.getTypeSpecSign() == DeclSpec::TSS_signed) Result = Context.SignedCharTy; else { assert(DS.getTypeSpecSign() == DeclSpec::TSS_unsigned && "Unknown TSS value"); Result = Context.UnsignedCharTy; } break; case DeclSpec::TST_wchar: if (DS.getTypeSpecSign() == DeclSpec::TSS_unspecified) Result = Context.WCharTy; else if (DS.getTypeSpecSign() == DeclSpec::TSS_signed) { S.Diag(DS.getTypeSpecSignLoc(), diag::ext_wchar_t_sign_spec) << DS.getSpecifierName(DS.getTypeSpecType(), Context.getPrintingPolicy()); Result = Context.getSignedWCharType(); } else { assert(DS.getTypeSpecSign() == DeclSpec::TSS_unsigned && "Unknown TSS value"); S.Diag(DS.getTypeSpecSignLoc(), diag::ext_wchar_t_sign_spec) << DS.getSpecifierName(DS.getTypeSpecType(), Context.getPrintingPolicy()); Result = Context.getUnsignedWCharType(); } break; case DeclSpec::TST_char8: assert(DS.getTypeSpecSign() == DeclSpec::TSS_unspecified && "Unknown TSS value"); Result = Context.Char8Ty; break; case DeclSpec::TST_char16: assert(DS.getTypeSpecSign() == DeclSpec::TSS_unspecified && "Unknown TSS value"); Result = Context.Char16Ty; break; case DeclSpec::TST_char32: assert(DS.getTypeSpecSign() == DeclSpec::TSS_unspecified && "Unknown TSS value"); Result = Context.Char32Ty; break; case DeclSpec::TST_unspecified: // If this is a missing declspec in a block literal return context, then it // is inferred from the return statements inside the block. // The declspec is always missing in a lambda expr context; it is either // specified with a trailing return type or inferred. if (S.getLangOpts().CPlusPlus14 && declarator.getContext() == DeclaratorContext::LambdaExprContext) { // In C++1y, a lambda's implicit return type is 'auto'. Result = Context.getAutoDeductType(); break; } else if (declarator.getContext() == DeclaratorContext::LambdaExprContext || checkOmittedBlockReturnType(S, declarator, Context.DependentTy)) { Result = Context.DependentTy; break; } // Unspecified typespec defaults to int in C90. However, the C90 grammar // [C90 6.5] only allows a decl-spec if there was *some* type-specifier, // type-qualifier, or storage-class-specifier. If not, emit an extwarn. // Note that the one exception to this is function definitions, which are // allowed to be completely missing a declspec. This is handled in the // parser already though by it pretending to have seen an 'int' in this // case. if (S.getLangOpts().ImplicitInt) { // In C89 mode, we only warn if there is a completely missing declspec // when one is not allowed. if (DS.isEmpty()) { S.Diag(DeclLoc, diag::ext_missing_declspec) << DS.getSourceRange() << FixItHint::CreateInsertion(DS.getBeginLoc(), "int"); } } else if (!DS.hasTypeSpecifier()) { // C99 and C++ require a type specifier. For example, C99 6.7.2p2 says: // "At least one type specifier shall be given in the declaration // specifiers in each declaration, and in the specifier-qualifier list in // each struct declaration and type name." if (S.getLangOpts().CPlusPlus && !DS.isTypeSpecPipe()) { S.Diag(DeclLoc, diag::err_missing_type_specifier) << DS.getSourceRange(); // When this occurs in C++ code, often something is very broken with the // value being declared, poison it as invalid so we don't get chains of // errors. declarator.setInvalidType(true); } else if ((S.getLangOpts().OpenCLVersion >= 200 || S.getLangOpts().OpenCLCPlusPlus) && DS.isTypeSpecPipe()) { S.Diag(DeclLoc, diag::err_missing_actual_pipe_type) << DS.getSourceRange(); declarator.setInvalidType(true); } else { S.Diag(DeclLoc, diag::ext_missing_type_specifier) << DS.getSourceRange(); } } LLVM_FALLTHROUGH; case DeclSpec::TST_int: { if (DS.getTypeSpecSign() != DeclSpec::TSS_unsigned) { switch (DS.getTypeSpecWidth()) { case DeclSpec::TSW_unspecified: Result = Context.IntTy; break; case DeclSpec::TSW_short: Result = Context.ShortTy; break; case DeclSpec::TSW_long: Result = Context.LongTy; break; case DeclSpec::TSW_longlong: Result = Context.LongLongTy; // 'long long' is a C99 or C++11 feature. if (!S.getLangOpts().C99) { if (S.getLangOpts().CPlusPlus) S.Diag(DS.getTypeSpecWidthLoc(), S.getLangOpts().CPlusPlus11 ? diag::warn_cxx98_compat_longlong : diag::ext_cxx11_longlong); else S.Diag(DS.getTypeSpecWidthLoc(), diag::ext_c99_longlong); } break; } } else { switch (DS.getTypeSpecWidth()) { case DeclSpec::TSW_unspecified: Result = Context.UnsignedIntTy; break; case DeclSpec::TSW_short: Result = Context.UnsignedShortTy; break; case DeclSpec::TSW_long: Result = Context.UnsignedLongTy; break; case DeclSpec::TSW_longlong: Result = Context.UnsignedLongLongTy; // 'long long' is a C99 or C++11 feature. if (!S.getLangOpts().C99) { if (S.getLangOpts().CPlusPlus) S.Diag(DS.getTypeSpecWidthLoc(), S.getLangOpts().CPlusPlus11 ? diag::warn_cxx98_compat_longlong : diag::ext_cxx11_longlong); else S.Diag(DS.getTypeSpecWidthLoc(), diag::ext_c99_longlong); } break; } } break; } case DeclSpec::TST_extint: { if (!S.Context.getTargetInfo().hasExtIntType()) S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported) << "_ExtInt"; Result = S.BuildExtIntType(DS.getTypeSpecSign() == TSS_unsigned, DS.getRepAsExpr(), DS.getBeginLoc()); if (Result.isNull()) { Result = Context.IntTy; declarator.setInvalidType(true); } break; } case DeclSpec::TST_accum: { switch (DS.getTypeSpecWidth()) { case DeclSpec::TSW_short: Result = Context.ShortAccumTy; break; case DeclSpec::TSW_unspecified: Result = Context.AccumTy; break; case DeclSpec::TSW_long: Result = Context.LongAccumTy; break; case DeclSpec::TSW_longlong: llvm_unreachable("Unable to specify long long as _Accum width"); } if (DS.getTypeSpecSign() == DeclSpec::TSS_unsigned) Result = Context.getCorrespondingUnsignedType(Result); if (DS.isTypeSpecSat()) Result = Context.getCorrespondingSaturatedType(Result); break; } case DeclSpec::TST_fract: { switch (DS.getTypeSpecWidth()) { case DeclSpec::TSW_short: Result = Context.ShortFractTy; break; case DeclSpec::TSW_unspecified: Result = Context.FractTy; break; case DeclSpec::TSW_long: Result = Context.LongFractTy; break; case DeclSpec::TSW_longlong: llvm_unreachable("Unable to specify long long as _Fract width"); } if (DS.getTypeSpecSign() == DeclSpec::TSS_unsigned) Result = Context.getCorrespondingUnsignedType(Result); if (DS.isTypeSpecSat()) Result = Context.getCorrespondingSaturatedType(Result); break; } case DeclSpec::TST_int128: if (!S.Context.getTargetInfo().hasInt128Type() && !(S.getLangOpts().OpenMP && S.getLangOpts().OpenMPIsDevice)) S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported) << "__int128"; if (DS.getTypeSpecSign() == DeclSpec::TSS_unsigned) Result = Context.UnsignedInt128Ty; else Result = Context.Int128Ty; break; case DeclSpec::TST_float16: // CUDA host and device may have different _Float16 support, therefore // do not diagnose _Float16 usage to avoid false alarm. // ToDo: more precise diagnostics for CUDA. if (!S.Context.getTargetInfo().hasFloat16Type() && !S.getLangOpts().CUDA && !(S.getLangOpts().OpenMP && S.getLangOpts().OpenMPIsDevice)) S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported) << "_Float16"; Result = Context.Float16Ty; break; case DeclSpec::TST_half: Result = Context.HalfTy; break; case DeclSpec::TST_BFloat16: if (!S.Context.getTargetInfo().hasBFloat16Type()) S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported) << "__bf16"; Result = Context.BFloat16Ty; break; case DeclSpec::TST_float: Result = Context.FloatTy; break; case DeclSpec::TST_double: if (DS.getTypeSpecWidth() == DeclSpec::TSW_long) Result = Context.LongDoubleTy; else Result = Context.DoubleTy; break; case DeclSpec::TST_float128: if (!S.Context.getTargetInfo().hasFloat128Type() && !S.getLangOpts().SYCLIsDevice && !(S.getLangOpts().OpenMP && S.getLangOpts().OpenMPIsDevice)) S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported) << "__float128"; Result = Context.Float128Ty; break; case DeclSpec::TST_bool: Result = Context.BoolTy; break; // _Bool or bool break; case DeclSpec::TST_decimal32: // _Decimal32 case DeclSpec::TST_decimal64: // _Decimal64 case DeclSpec::TST_decimal128: // _Decimal128 S.Diag(DS.getTypeSpecTypeLoc(), diag::err_decimal_unsupported); Result = Context.IntTy; declarator.setInvalidType(true); break; case DeclSpec::TST_class: case DeclSpec::TST_enum: case DeclSpec::TST_union: case DeclSpec::TST_struct: case DeclSpec::TST_interface: { TagDecl *D = dyn_cast_or_null(DS.getRepAsDecl()); if (!D) { // This can happen in C++ with ambiguous lookups. Result = Context.IntTy; declarator.setInvalidType(true); break; } // If the type is deprecated or unavailable, diagnose it. S.DiagnoseUseOfDecl(D, DS.getTypeSpecTypeNameLoc()); assert(DS.getTypeSpecWidth() == 0 && DS.getTypeSpecComplex() == 0 && DS.getTypeSpecSign() == 0 && "No qualifiers on tag names!"); // TypeQuals handled by caller. Result = Context.getTypeDeclType(D); // In both C and C++, make an ElaboratedType. ElaboratedTypeKeyword Keyword = ElaboratedType::getKeywordForTypeSpec(DS.getTypeSpecType()); Result = S.getElaboratedType(Keyword, DS.getTypeSpecScope(), Result, DS.isTypeSpecOwned() ? D : nullptr); break; } case DeclSpec::TST_typename: { assert(DS.getTypeSpecWidth() == 0 && DS.getTypeSpecComplex() == 0 && DS.getTypeSpecSign() == 0 && "Can't handle qualifiers on typedef names yet!"); Result = S.GetTypeFromParser(DS.getRepAsType()); if (Result.isNull()) { declarator.setInvalidType(true); } // TypeQuals handled by caller. break; } case DeclSpec::TST_typeofType: // FIXME: Preserve type source info. Result = S.GetTypeFromParser(DS.getRepAsType()); assert(!Result.isNull() && "Didn't get a type for typeof?"); if (!Result->isDependentType()) if (const TagType *TT = Result->getAs()) S.DiagnoseUseOfDecl(TT->getDecl(), DS.getTypeSpecTypeLoc()); // TypeQuals handled by caller. Result = Context.getTypeOfType(Result); break; case DeclSpec::TST_typeofExpr: { Expr *E = DS.getRepAsExpr(); assert(E && "Didn't get an expression for typeof?"); // TypeQuals handled by caller. Result = S.BuildTypeofExprType(E, DS.getTypeSpecTypeLoc()); if (Result.isNull()) { Result = Context.IntTy; declarator.setInvalidType(true); } break; } case DeclSpec::TST_decltype: { Expr *E = DS.getRepAsExpr(); assert(E && "Didn't get an expression for decltype?"); // TypeQuals handled by caller. Result = S.BuildDecltypeType(E, DS.getTypeSpecTypeLoc()); if (Result.isNull()) { Result = Context.IntTy; declarator.setInvalidType(true); } break; } case DeclSpec::TST_underlyingType: Result = S.GetTypeFromParser(DS.getRepAsType()); assert(!Result.isNull() && "Didn't get a type for __underlying_type?"); Result = S.BuildUnaryTransformType(Result, UnaryTransformType::EnumUnderlyingType, DS.getTypeSpecTypeLoc()); if (Result.isNull()) { Result = Context.IntTy; declarator.setInvalidType(true); } break; case DeclSpec::TST_auto: if (DS.isConstrainedAuto()) { Result = ConvertConstrainedAutoDeclSpecToType(S, DS, AutoTypeKeyword::Auto); break; } Result = Context.getAutoType(QualType(), AutoTypeKeyword::Auto, false); break; case DeclSpec::TST_auto_type: Result = Context.getAutoType(QualType(), AutoTypeKeyword::GNUAutoType, false); break; case DeclSpec::TST_decltype_auto: if (DS.isConstrainedAuto()) { Result = ConvertConstrainedAutoDeclSpecToType(S, DS, AutoTypeKeyword::DecltypeAuto); break; } Result = Context.getAutoType(QualType(), AutoTypeKeyword::DecltypeAuto, /*IsDependent*/ false); break; case DeclSpec::TST_unknown_anytype: Result = Context.UnknownAnyTy; break; case DeclSpec::TST_atomic: Result = S.GetTypeFromParser(DS.getRepAsType()); assert(!Result.isNull() && "Didn't get a type for _Atomic?"); Result = S.BuildAtomicType(Result, DS.getTypeSpecTypeLoc()); if (Result.isNull()) { Result = Context.IntTy; declarator.setInvalidType(true); } break; #define GENERIC_IMAGE_TYPE(ImgType, Id) \ case DeclSpec::TST_##ImgType##_t: \ switch (getImageAccess(DS.getAttributes())) { \ case OpenCLAccessAttr::Keyword_write_only: \ Result = Context.Id##WOTy; \ break; \ case OpenCLAccessAttr::Keyword_read_write: \ Result = Context.Id##RWTy; \ break; \ case OpenCLAccessAttr::Keyword_read_only: \ Result = Context.Id##ROTy; \ break; \ case OpenCLAccessAttr::SpellingNotCalculated: \ llvm_unreachable("Spelling not yet calculated"); \ } \ break; #include "clang/Basic/OpenCLImageTypes.def" case DeclSpec::TST_error: Result = Context.IntTy; declarator.setInvalidType(true); break; } // FIXME: we want resulting declarations to be marked invalid, but claiming // the type is invalid is too strong - e.g. it causes ActOnTypeName to return // a null type. if (Result->containsErrors()) declarator.setInvalidType(); if (S.getLangOpts().OpenCL && S.checkOpenCLDisabledTypeDeclSpec(DS, Result)) declarator.setInvalidType(true); bool IsFixedPointType = DS.getTypeSpecType() == DeclSpec::TST_accum || DS.getTypeSpecType() == DeclSpec::TST_fract; // Only fixed point types can be saturated if (DS.isTypeSpecSat() && !IsFixedPointType) S.Diag(DS.getTypeSpecSatLoc(), diag::err_invalid_saturation_spec) << DS.getSpecifierName(DS.getTypeSpecType(), Context.getPrintingPolicy()); // Handle complex types. if (DS.getTypeSpecComplex() == DeclSpec::TSC_complex) { if (S.getLangOpts().Freestanding) S.Diag(DS.getTypeSpecComplexLoc(), diag::ext_freestanding_complex); Result = Context.getComplexType(Result); } else if (DS.isTypeAltiVecVector()) { unsigned typeSize = static_cast(Context.getTypeSize(Result)); assert(typeSize > 0 && "type size for vector must be greater than 0 bits"); VectorType::VectorKind VecKind = VectorType::AltiVecVector; if (DS.isTypeAltiVecPixel()) VecKind = VectorType::AltiVecPixel; else if (DS.isTypeAltiVecBool()) VecKind = VectorType::AltiVecBool; Result = Context.getVectorType(Result, 128/typeSize, VecKind); } // FIXME: Imaginary. if (DS.getTypeSpecComplex() == DeclSpec::TSC_imaginary) S.Diag(DS.getTypeSpecComplexLoc(), diag::err_imaginary_not_supported); // Before we process any type attributes, synthesize a block literal // function declarator if necessary. if (declarator.getContext() == DeclaratorContext::BlockLiteralContext) maybeSynthesizeBlockSignature(state, Result); // Apply any type attributes from the decl spec. This may cause the // list of type attributes to be temporarily saved while the type // attributes are pushed around. // pipe attributes will be handled later ( at GetFullTypeForDeclarator ) if (!DS.isTypeSpecPipe()) processTypeAttrs(state, Result, TAL_DeclSpec, DS.getAttributes()); // Apply const/volatile/restrict qualifiers to T. if (unsigned TypeQuals = DS.getTypeQualifiers()) { // Warn about CV qualifiers on function types. // C99 6.7.3p8: // If the specification of a function type includes any type qualifiers, // the behavior is undefined. // C++11 [dcl.fct]p7: // The effect of a cv-qualifier-seq in a function declarator is not the // same as adding cv-qualification on top of the function type. In the // latter case, the cv-qualifiers are ignored. if (Result->isFunctionType()) { diagnoseAndRemoveTypeQualifiers( S, DS, TypeQuals, Result, DeclSpec::TQ_const | DeclSpec::TQ_volatile, S.getLangOpts().CPlusPlus ? diag::warn_typecheck_function_qualifiers_ignored : diag::warn_typecheck_function_qualifiers_unspecified); // No diagnostic for 'restrict' or '_Atomic' applied to a // function type; we'll diagnose those later, in BuildQualifiedType. } // C++11 [dcl.ref]p1: // Cv-qualified references are ill-formed except when the // cv-qualifiers are introduced through the use of a typedef-name // or decltype-specifier, in which case the cv-qualifiers are ignored. // // There don't appear to be any other contexts in which a cv-qualified // reference type could be formed, so the 'ill-formed' clause here appears // to never happen. if (TypeQuals && Result->isReferenceType()) { diagnoseAndRemoveTypeQualifiers( S, DS, TypeQuals, Result, DeclSpec::TQ_const | DeclSpec::TQ_volatile | DeclSpec::TQ_atomic, diag::warn_typecheck_reference_qualifiers); } // C90 6.5.3 constraints: "The same type qualifier shall not appear more // than once in the same specifier-list or qualifier-list, either directly // or via one or more typedefs." if (!S.getLangOpts().C99 && !S.getLangOpts().CPlusPlus && TypeQuals & Result.getCVRQualifiers()) { if (TypeQuals & DeclSpec::TQ_const && Result.isConstQualified()) { S.Diag(DS.getConstSpecLoc(), diag::ext_duplicate_declspec) << "const"; } if (TypeQuals & DeclSpec::TQ_volatile && Result.isVolatileQualified()) { S.Diag(DS.getVolatileSpecLoc(), diag::ext_duplicate_declspec) << "volatile"; } // C90 doesn't have restrict nor _Atomic, so it doesn't force us to // produce a warning in this case. } QualType Qualified = S.BuildQualifiedType(Result, DeclLoc, TypeQuals, &DS); // If adding qualifiers fails, just use the unqualified type. if (Qualified.isNull()) declarator.setInvalidType(true); else Result = Qualified; } assert(!Result.isNull() && "This function should not return a null type"); return Result; } static std::string getPrintableNameForEntity(DeclarationName Entity) { if (Entity) return Entity.getAsString(); return "type name"; } QualType Sema::BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS) { if (T.isNull()) return QualType(); // Ignore any attempt to form a cv-qualified reference. if (T->isReferenceType()) { Qs.removeConst(); Qs.removeVolatile(); } // Enforce C99 6.7.3p2: "Types other than pointer types derived from // object or incomplete types shall not be restrict-qualified." if (Qs.hasRestrict()) { unsigned DiagID = 0; QualType ProblemTy; if (T->isAnyPointerType() || T->isReferenceType() || T->isMemberPointerType()) { QualType EltTy; if (T->isObjCObjectPointerType()) EltTy = T; else if (const MemberPointerType *PTy = T->getAs()) EltTy = PTy->getPointeeType(); else EltTy = T->getPointeeType(); // If we have a pointer or reference, the pointee must have an object // incomplete type. if (!EltTy->isIncompleteOrObjectType()) { DiagID = diag::err_typecheck_invalid_restrict_invalid_pointee; ProblemTy = EltTy; } } else if (!T->isDependentType()) { DiagID = diag::err_typecheck_invalid_restrict_not_pointer; ProblemTy = T; } if (DiagID) { Diag(DS ? DS->getRestrictSpecLoc() : Loc, DiagID) << ProblemTy; Qs.removeRestrict(); } } return Context.getQualifiedType(T, Qs); } QualType Sema::BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRAU, const DeclSpec *DS) { if (T.isNull()) return QualType(); // Ignore any attempt to form a cv-qualified reference. if (T->isReferenceType()) CVRAU &= ~(DeclSpec::TQ_const | DeclSpec::TQ_volatile | DeclSpec::TQ_atomic); // Convert from DeclSpec::TQ to Qualifiers::TQ by just dropping TQ_atomic and // TQ_unaligned; unsigned CVR = CVRAU & ~(DeclSpec::TQ_atomic | DeclSpec::TQ_unaligned); // C11 6.7.3/5: // If the same qualifier appears more than once in the same // specifier-qualifier-list, either directly or via one or more typedefs, // the behavior is the same as if it appeared only once. // // It's not specified what happens when the _Atomic qualifier is applied to // a type specified with the _Atomic specifier, but we assume that this // should be treated as if the _Atomic qualifier appeared multiple times. if (CVRAU & DeclSpec::TQ_atomic && !T->isAtomicType()) { // C11 6.7.3/5: // If other qualifiers appear along with the _Atomic qualifier in a // specifier-qualifier-list, the resulting type is the so-qualified // atomic type. // // Don't need to worry about array types here, since _Atomic can't be // applied to such types. SplitQualType Split = T.getSplitUnqualifiedType(); T = BuildAtomicType(QualType(Split.Ty, 0), DS ? DS->getAtomicSpecLoc() : Loc); if (T.isNull()) return T; Split.Quals.addCVRQualifiers(CVR); return BuildQualifiedType(T, Loc, Split.Quals); } Qualifiers Q = Qualifiers::fromCVRMask(CVR); Q.setUnaligned(CVRAU & DeclSpec::TQ_unaligned); return BuildQualifiedType(T, Loc, Q, DS); } /// Build a paren type including \p T. QualType Sema::BuildParenType(QualType T) { return Context.getParenType(T); } /// Given that we're building a pointer or reference to the given static QualType inferARCLifetimeForPointee(Sema &S, QualType type, SourceLocation loc, bool isReference) { // Bail out if retention is unrequired or already specified. if (!type->isObjCLifetimeType() || type.getObjCLifetime() != Qualifiers::OCL_None) return type; Qualifiers::ObjCLifetime implicitLifetime = Qualifiers::OCL_None; // If the object type is const-qualified, we can safely use // __unsafe_unretained. This is safe (because there are no read // barriers), and it'll be safe to coerce anything but __weak* to // the resulting type. if (type.isConstQualified()) { implicitLifetime = Qualifiers::OCL_ExplicitNone; // Otherwise, check whether the static type does not require // retaining. This currently only triggers for Class (possibly // protocol-qualifed, and arrays thereof). } else if (type->isObjCARCImplicitlyUnretainedType()) { implicitLifetime = Qualifiers::OCL_ExplicitNone; // If we are in an unevaluated context, like sizeof, skip adding a // qualification. } else if (S.isUnevaluatedContext()) { return type; // If that failed, give an error and recover using __strong. __strong // is the option most likely to prevent spurious second-order diagnostics, // like when binding a reference to a field. } else { // These types can show up in private ivars in system headers, so // we need this to not be an error in those cases. Instead we // want to delay. if (S.DelayedDiagnostics.shouldDelayDiagnostics()) { S.DelayedDiagnostics.add( sema::DelayedDiagnostic::makeForbiddenType(loc, diag::err_arc_indirect_no_ownership, type, isReference)); } else { S.Diag(loc, diag::err_arc_indirect_no_ownership) << type << isReference; } implicitLifetime = Qualifiers::OCL_Strong; } assert(implicitLifetime && "didn't infer any lifetime!"); Qualifiers qs; qs.addObjCLifetime(implicitLifetime); return S.Context.getQualifiedType(type, qs); } static std::string getFunctionQualifiersAsString(const FunctionProtoType *FnTy){ std::string Quals = FnTy->getMethodQuals().getAsString(); switch (FnTy->getRefQualifier()) { case RQ_None: break; case RQ_LValue: if (!Quals.empty()) Quals += ' '; Quals += '&'; break; case RQ_RValue: if (!Quals.empty()) Quals += ' '; Quals += "&&"; break; } return Quals; } namespace { /// Kinds of declarator that cannot contain a qualified function type. /// /// C++98 [dcl.fct]p4 / C++11 [dcl.fct]p6: /// a function type with a cv-qualifier or a ref-qualifier can only appear /// at the topmost level of a type. /// /// Parens and member pointers are permitted. We don't diagnose array and /// function declarators, because they don't allow function types at all. /// /// The values of this enum are used in diagnostics. enum QualifiedFunctionKind { QFK_BlockPointer, QFK_Pointer, QFK_Reference }; } // end anonymous namespace /// Check whether the type T is a qualified function type, and if it is, /// diagnose that it cannot be contained within the given kind of declarator. static bool checkQualifiedFunction(Sema &S, QualType T, SourceLocation Loc, QualifiedFunctionKind QFK) { // Does T refer to a function type with a cv-qualifier or a ref-qualifier? const FunctionProtoType *FPT = T->getAs(); if (!FPT || (FPT->getMethodQuals().empty() && FPT->getRefQualifier() == RQ_None)) return false; S.Diag(Loc, diag::err_compound_qualified_function_type) << QFK << isa(T.IgnoreParens()) << T << getFunctionQualifiersAsString(FPT); return true; } bool Sema::CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc) { const FunctionProtoType *FPT = T->getAs(); if (!FPT || (FPT->getMethodQuals().empty() && FPT->getRefQualifier() == RQ_None)) return false; Diag(Loc, diag::err_qualified_function_typeid) << T << getFunctionQualifiersAsString(FPT); return true; } // Helper to deduce addr space of a pointee type in OpenCL mode. static QualType deduceOpenCLPointeeAddrSpace(Sema &S, QualType PointeeType) { if (!PointeeType->isUndeducedAutoType() && !PointeeType->isDependentType() && !PointeeType->isSamplerT() && !PointeeType.hasAddressSpace()) PointeeType = S.getASTContext().getAddrSpaceQualType( PointeeType, S.getLangOpts().OpenCLCPlusPlus || S.getLangOpts().OpenCLVersion == 200 ? LangAS::opencl_generic : LangAS::opencl_private); return PointeeType; } /// Build a pointer type. /// /// \param T The type to which we'll be building a pointer. /// /// \param Loc The location of the entity whose type involves this /// pointer type or, if there is no such entity, the location of the /// type that will have pointer type. /// /// \param Entity The name of the entity that involves the pointer /// type, if known. /// /// \returns A suitable pointer type, if there are no /// errors. Otherwise, returns a NULL type. QualType Sema::BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity) { if (T->isReferenceType()) { // C++ 8.3.2p4: There shall be no ... pointers to references ... Diag(Loc, diag::err_illegal_decl_pointer_to_reference) << getPrintableNameForEntity(Entity) << T; return QualType(); } if (T->isFunctionType() && getLangOpts().OpenCL) { Diag(Loc, diag::err_opencl_function_pointer); return QualType(); } if (checkQualifiedFunction(*this, T, Loc, QFK_Pointer)) return QualType(); assert(!T->isObjCObjectType() && "Should build ObjCObjectPointerType"); // In ARC, it is forbidden to build pointers to unqualified pointers. if (getLangOpts().ObjCAutoRefCount) T = inferARCLifetimeForPointee(*this, T, Loc, /*reference*/ false); if (getLangOpts().OpenCL) T = deduceOpenCLPointeeAddrSpace(*this, T); // Build the pointer type. return Context.getPointerType(T); } /// Build a reference type. /// /// \param T The type to which we'll be building a reference. /// /// \param Loc The location of the entity whose type involves this /// reference type or, if there is no such entity, the location of the /// type that will have reference type. /// /// \param Entity The name of the entity that involves the reference /// type, if known. /// /// \returns A suitable reference type, if there are no /// errors. Otherwise, returns a NULL type. QualType Sema::BuildReferenceType(QualType T, bool SpelledAsLValue, SourceLocation Loc, DeclarationName Entity) { assert(Context.getCanonicalType(T) != Context.OverloadTy && "Unresolved overloaded function type"); // C++0x [dcl.ref]p6: // If a typedef (7.1.3), a type template-parameter (14.3.1), or a // decltype-specifier (7.1.6.2) denotes a type TR that is a reference to a // type T, an attempt to create the type "lvalue reference to cv TR" creates // the type "lvalue reference to T", while an attempt to create the type // "rvalue reference to cv TR" creates the type TR. bool LValueRef = SpelledAsLValue || T->getAs(); // C++ [dcl.ref]p4: There shall be no references to references. // // According to C++ DR 106, references to references are only // diagnosed when they are written directly (e.g., "int & &"), // but not when they happen via a typedef: // // typedef int& intref; // typedef intref& intref2; // // Parser::ParseDeclaratorInternal diagnoses the case where // references are written directly; here, we handle the // collapsing of references-to-references as described in C++0x. // DR 106 and 540 introduce reference-collapsing into C++98/03. // C++ [dcl.ref]p1: // A declarator that specifies the type "reference to cv void" // is ill-formed. if (T->isVoidType()) { Diag(Loc, diag::err_reference_to_void); return QualType(); } if (checkQualifiedFunction(*this, T, Loc, QFK_Reference)) return QualType(); // In ARC, it is forbidden to build references to unqualified pointers. if (getLangOpts().ObjCAutoRefCount) T = inferARCLifetimeForPointee(*this, T, Loc, /*reference*/ true); if (getLangOpts().OpenCL) T = deduceOpenCLPointeeAddrSpace(*this, T); // Handle restrict on references. if (LValueRef) return Context.getLValueReferenceType(T, SpelledAsLValue); return Context.getRValueReferenceType(T); } /// Build a Read-only Pipe type. /// /// \param T The type to which we'll be building a Pipe. /// /// \param Loc We do not use it for now. /// /// \returns A suitable pipe type, if there are no errors. Otherwise, returns a /// NULL type. QualType Sema::BuildReadPipeType(QualType T, SourceLocation Loc) { return Context.getReadPipeType(T); } /// Build a Write-only Pipe type. /// /// \param T The type to which we'll be building a Pipe. /// /// \param Loc We do not use it for now. /// /// \returns A suitable pipe type, if there are no errors. Otherwise, returns a /// NULL type. QualType Sema::BuildWritePipeType(QualType T, SourceLocation Loc) { return Context.getWritePipeType(T); } /// Build a extended int type. /// /// \param IsUnsigned Boolean representing the signedness of the type. /// /// \param BitWidth Size of this int type in bits, or an expression representing /// that. /// /// \param Loc Location of the keyword. QualType Sema::BuildExtIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc) { if (BitWidth->isInstantiationDependent()) return Context.getDependentExtIntType(IsUnsigned, BitWidth); llvm::APSInt Bits(32); ExprResult ICE = VerifyIntegerConstantExpression(BitWidth, &Bits); if (ICE.isInvalid()) return QualType(); int64_t NumBits = Bits.getSExtValue(); if (!IsUnsigned && NumBits < 2) { Diag(Loc, diag::err_ext_int_bad_size) << 0; return QualType(); } if (IsUnsigned && NumBits < 1) { Diag(Loc, diag::err_ext_int_bad_size) << 1; return QualType(); } if (NumBits > llvm::IntegerType::MAX_INT_BITS) { Diag(Loc, diag::err_ext_int_max_size) << IsUnsigned << llvm::IntegerType::MAX_INT_BITS; return QualType(); } return Context.getExtIntType(IsUnsigned, NumBits); } /// Check whether the specified array size makes the array type a VLA. If so, /// return true, if not, return the size of the array in SizeVal. static bool isArraySizeVLA(Sema &S, Expr *ArraySize, llvm::APSInt &SizeVal) { // If the size is an ICE, it certainly isn't a VLA. If we're in a GNU mode // (like gnu99, but not c99) accept any evaluatable value as an extension. class VLADiagnoser : public Sema::VerifyICEDiagnoser { public: VLADiagnoser() : Sema::VerifyICEDiagnoser(true) {} void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) override { } void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR) override { S.Diag(Loc, diag::ext_vla_folded_to_constant) << SR; } } Diagnoser; return S.VerifyIntegerConstantExpression(ArraySize, &SizeVal, Diagnoser, S.LangOpts.GNUMode || S.LangOpts.OpenCL).isInvalid(); } /// Build an array type. /// /// \param T The type of each element in the array. /// /// \param ASM C99 array size modifier (e.g., '*', 'static'). /// /// \param ArraySize Expression describing the size of the array. /// /// \param Brackets The range from the opening '[' to the closing ']'. /// /// \param Entity The name of the entity that involves the array /// type, if known. /// /// \returns A suitable array type, if there are no errors. Otherwise, /// returns a NULL type. QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity) { SourceLocation Loc = Brackets.getBegin(); if (getLangOpts().CPlusPlus) { // C++ [dcl.array]p1: // T is called the array element type; this type shall not be a reference // type, the (possibly cv-qualified) type void, a function type or an // abstract class type. // // C++ [dcl.array]p3: // When several "array of" specifications are adjacent, [...] only the // first of the constant expressions that specify the bounds of the arrays // may be omitted. // // Note: function types are handled in the common path with C. if (T->isReferenceType()) { Diag(Loc, diag::err_illegal_decl_array_of_references) << getPrintableNameForEntity(Entity) << T; return QualType(); } if (T->isVoidType() || T->isIncompleteArrayType()) { Diag(Loc, diag::err_array_incomplete_or_sizeless_type) << 0 << T; return QualType(); } if (RequireNonAbstractType(Brackets.getBegin(), T, diag::err_array_of_abstract_type)) return QualType(); // Mentioning a member pointer type for an array type causes us to lock in // an inheritance model, even if it's inside an unused typedef. if (Context.getTargetInfo().getCXXABI().isMicrosoft()) if (const MemberPointerType *MPTy = T->getAs()) if (!MPTy->getClass()->isDependentType()) (void)isCompleteType(Loc, T); } else { // C99 6.7.5.2p1: If the element type is an incomplete or function type, // reject it (e.g. void ary[7], struct foo ary[7], void ary[7]()) if (RequireCompleteSizedType(Loc, T, diag::err_array_incomplete_or_sizeless_type)) return QualType(); } if (T->isSizelessType()) { Diag(Loc, diag::err_array_incomplete_or_sizeless_type) << 1 << T; return QualType(); } if (T->isFunctionType()) { Diag(Loc, diag::err_illegal_decl_array_of_functions) << getPrintableNameForEntity(Entity) << T; return QualType(); } if (const RecordType *EltTy = T->getAs()) { // If the element type is a struct or union that contains a variadic // array, accept it as a GNU extension: C99 6.7.2.1p2. if (EltTy->getDecl()->hasFlexibleArrayMember()) Diag(Loc, diag::ext_flexible_array_in_array) << T; } else if (T->isObjCObjectType()) { Diag(Loc, diag::err_objc_array_of_interfaces) << T; return QualType(); } // Do placeholder conversions on the array size expression. if (ArraySize && ArraySize->hasPlaceholderType()) { ExprResult Result = CheckPlaceholderExpr(ArraySize); if (Result.isInvalid()) return QualType(); ArraySize = Result.get(); } // Do lvalue-to-rvalue conversions on the array size expression. if (ArraySize && !ArraySize->isRValue()) { ExprResult Result = DefaultLvalueConversion(ArraySize); if (Result.isInvalid()) return QualType(); ArraySize = Result.get(); } // C99 6.7.5.2p1: The size expression shall have integer type. // C++11 allows contextual conversions to such types. if (!getLangOpts().CPlusPlus11 && ArraySize && !ArraySize->isTypeDependent() && !ArraySize->getType()->isIntegralOrUnscopedEnumerationType()) { Diag(ArraySize->getBeginLoc(), diag::err_array_size_non_int) << ArraySize->getType() << ArraySize->getSourceRange(); return QualType(); } llvm::APSInt ConstVal(Context.getTypeSize(Context.getSizeType())); if (!ArraySize) { if (ASM == ArrayType::Star) T = Context.getVariableArrayType(T, nullptr, ASM, Quals, Brackets); else T = Context.getIncompleteArrayType(T, ASM, Quals); } else if (ArraySize->isTypeDependent() || ArraySize->isValueDependent()) { T = Context.getDependentSizedArrayType(T, ArraySize, ASM, Quals, Brackets); } else if ((!T->isDependentType() && !T->isIncompleteType() && !T->isConstantSizeType()) || isArraySizeVLA(*this, ArraySize, ConstVal)) { // Even in C++11, don't allow contextual conversions in the array bound // of a VLA. if (getLangOpts().CPlusPlus11 && !ArraySize->getType()->isIntegralOrUnscopedEnumerationType()) { Diag(ArraySize->getBeginLoc(), diag::err_array_size_non_int) << ArraySize->getType() << ArraySize->getSourceRange(); return QualType(); } // C99: an array with an element type that has a non-constant-size is a VLA. // C99: an array with a non-ICE size is a VLA. We accept any expression // that we can fold to a non-zero positive value as an extension. T = Context.getVariableArrayType(T, ArraySize, ASM, Quals, Brackets); } else { // C99 6.7.5.2p1: If the expression is a constant expression, it shall // have a value greater than zero. if (ConstVal.isSigned() && ConstVal.isNegative()) { if (Entity) Diag(ArraySize->getBeginLoc(), diag::err_decl_negative_array_size) << getPrintableNameForEntity(Entity) << ArraySize->getSourceRange(); else Diag(ArraySize->getBeginLoc(), diag::err_typecheck_negative_array_size) << ArraySize->getSourceRange(); return QualType(); } if (ConstVal == 0) { // GCC accepts zero sized static arrays. We allow them when // we're not in a SFINAE context. Diag(ArraySize->getBeginLoc(), isSFINAEContext() ? diag::err_typecheck_zero_array_size : diag::ext_typecheck_zero_array_size) << ArraySize->getSourceRange(); } else if (!T->isDependentType() && !T->isVariablyModifiedType() && !T->isIncompleteType() && !T->isUndeducedType()) { // Is the array too large? unsigned ActiveSizeBits = ConstantArrayType::getNumAddressingBits(Context, T, ConstVal); if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context)) { Diag(ArraySize->getBeginLoc(), diag::err_array_too_large) << ConstVal.toString(10) << ArraySize->getSourceRange(); return QualType(); } } T = Context.getConstantArrayType(T, ConstVal, ArraySize, ASM, Quals); } // OpenCL v1.2 s6.9.d: variable length arrays are not supported. if (getLangOpts().OpenCL && T->isVariableArrayType()) { Diag(Loc, diag::err_opencl_vla); return QualType(); } if (T->isVariableArrayType() && !Context.getTargetInfo().isVLASupported()) { // CUDA device code and some other targets don't support VLAs. targetDiag(Loc, (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) ? diag::err_cuda_vla : diag::err_vla_unsupported) << ((getLangOpts().CUDA && getLangOpts().CUDAIsDevice) ? CurrentCUDATarget() : CFT_InvalidTarget); } // If this is not C99, extwarn about VLA's and C99 array size modifiers. if (!getLangOpts().C99) { if (T->isVariableArrayType()) { // Prohibit the use of VLAs during template argument deduction. if (isSFINAEContext()) { Diag(Loc, diag::err_vla_in_sfinae); return QualType(); } // Just extwarn about VLAs. else Diag(Loc, diag::ext_vla); } else if (ASM != ArrayType::Normal || Quals != 0) Diag(Loc, getLangOpts().CPlusPlus? diag::err_c99_array_usage_cxx : diag::ext_c99_array_usage) << ASM; } if (T->isVariableArrayType()) { // Warn about VLAs for -Wvla. Diag(Loc, diag::warn_vla_used); } // OpenCL v2.0 s6.12.5 - Arrays of blocks are not supported. // OpenCL v2.0 s6.16.13.1 - Arrays of pipe type are not supported. // OpenCL v2.0 s6.9.b - Arrays of image/sampler type are not supported. if (getLangOpts().OpenCL) { const QualType ArrType = Context.getBaseElementType(T); if (ArrType->isBlockPointerType() || ArrType->isPipeType() || ArrType->isSamplerT() || ArrType->isImageType()) { Diag(Loc, diag::err_opencl_invalid_type_array) << ArrType; return QualType(); } } return T; } QualType Sema::BuildVectorType(QualType CurType, Expr *SizeExpr, SourceLocation AttrLoc) { // The base type must be integer (not Boolean or enumeration) or float, and // can't already be a vector. if (!CurType->isDependentType() && (!CurType->isBuiltinType() || CurType->isBooleanType() || (!CurType->isIntegerType() && !CurType->isRealFloatingType()))) { Diag(AttrLoc, diag::err_attribute_invalid_vector_type) << CurType; return QualType(); } if (SizeExpr->isTypeDependent() || SizeExpr->isValueDependent()) return Context.getDependentVectorType(CurType, SizeExpr, AttrLoc, VectorType::GenericVector); llvm::APSInt VecSize(32); if (!SizeExpr->isIntegerConstantExpr(VecSize, Context)) { Diag(AttrLoc, diag::err_attribute_argument_type) << "vector_size" << AANT_ArgumentIntegerConstant << SizeExpr->getSourceRange(); return QualType(); } if (CurType->isDependentType()) return Context.getDependentVectorType(CurType, SizeExpr, AttrLoc, VectorType::GenericVector); // vecSize is specified in bytes - convert to bits. if (!VecSize.isIntN(61)) { // Bit size will overflow uint64. Diag(AttrLoc, diag::err_attribute_size_too_large) << SizeExpr->getSourceRange() << "vector"; return QualType(); } uint64_t VectorSizeBits = VecSize.getZExtValue() * 8; unsigned TypeSize = static_cast(Context.getTypeSize(CurType)); if (VectorSizeBits == 0) { Diag(AttrLoc, diag::err_attribute_zero_size) << SizeExpr->getSourceRange() << "vector"; return QualType(); } if (VectorSizeBits % TypeSize) { Diag(AttrLoc, diag::err_attribute_invalid_size) << SizeExpr->getSourceRange(); return QualType(); } if (VectorSizeBits / TypeSize > std::numeric_limits::max()) { Diag(AttrLoc, diag::err_attribute_size_too_large) << SizeExpr->getSourceRange() << "vector"; return QualType(); } return Context.getVectorType(CurType, VectorSizeBits / TypeSize, VectorType::GenericVector); } /// Build an ext-vector type. /// /// Run the required checks for the extended vector type. QualType Sema::BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc) { // Unlike gcc's vector_size attribute, we do not allow vectors to be defined // in conjunction with complex types (pointers, arrays, functions, etc.). // // Additionally, OpenCL prohibits vectors of booleans (they're considered a // reserved data type under OpenCL v2.0 s6.1.4), we don't support selects // on bitvectors, and we have no well-defined ABI for bitvectors, so vectors // of bool aren't allowed. if ((!T->isDependentType() && !T->isIntegerType() && !T->isRealFloatingType()) || T->isBooleanType()) { Diag(AttrLoc, diag::err_attribute_invalid_vector_type) << T; return QualType(); } if (!ArraySize->isTypeDependent() && !ArraySize->isValueDependent()) { llvm::APSInt vecSize(32); if (!ArraySize->isIntegerConstantExpr(vecSize, Context)) { Diag(AttrLoc, diag::err_attribute_argument_type) << "ext_vector_type" << AANT_ArgumentIntegerConstant << ArraySize->getSourceRange(); return QualType(); } if (!vecSize.isIntN(32)) { Diag(AttrLoc, diag::err_attribute_size_too_large) << ArraySize->getSourceRange() << "vector"; return QualType(); } // Unlike gcc's vector_size attribute, the size is specified as the // number of elements, not the number of bytes. unsigned vectorSize = static_cast(vecSize.getZExtValue()); if (vectorSize == 0) { Diag(AttrLoc, diag::err_attribute_zero_size) << ArraySize->getSourceRange() << "vector"; return QualType(); } return Context.getExtVectorType(T, vectorSize); } return Context.getDependentSizedExtVectorType(T, ArraySize, AttrLoc); } QualType Sema::BuildMatrixType(QualType ElementTy, Expr *NumRows, Expr *NumCols, SourceLocation AttrLoc) { assert(Context.getLangOpts().MatrixTypes && "Should never build a matrix type when it is disabled"); // Check element type, if it is not dependent. if (!ElementTy->isDependentType() && !MatrixType::isValidElementType(ElementTy)) { Diag(AttrLoc, diag::err_attribute_invalid_matrix_type) << ElementTy; return QualType(); } if (NumRows->isTypeDependent() || NumCols->isTypeDependent() || NumRows->isValueDependent() || NumCols->isValueDependent()) return Context.getDependentSizedMatrixType(ElementTy, NumRows, NumCols, AttrLoc); // Both row and column values can only be 20 bit wide currently. llvm::APSInt ValueRows(32), ValueColumns(32); bool const RowsIsInteger = NumRows->isIntegerConstantExpr(ValueRows, Context); bool const ColumnsIsInteger = NumCols->isIntegerConstantExpr(ValueColumns, Context); auto const RowRange = NumRows->getSourceRange(); auto const ColRange = NumCols->getSourceRange(); // Both are row and column expressions are invalid. if (!RowsIsInteger && !ColumnsIsInteger) { Diag(AttrLoc, diag::err_attribute_argument_type) << "matrix_type" << AANT_ArgumentIntegerConstant << RowRange << ColRange; return QualType(); } // Only the row expression is invalid. if (!RowsIsInteger) { Diag(AttrLoc, diag::err_attribute_argument_type) << "matrix_type" << AANT_ArgumentIntegerConstant << RowRange; return QualType(); } // Only the column expression is invalid. if (!ColumnsIsInteger) { Diag(AttrLoc, diag::err_attribute_argument_type) << "matrix_type" << AANT_ArgumentIntegerConstant << ColRange; return QualType(); } // Check the matrix dimensions. unsigned MatrixRows = static_cast(ValueRows.getZExtValue()); unsigned MatrixColumns = static_cast(ValueColumns.getZExtValue()); if (MatrixRows == 0 && MatrixColumns == 0) { Diag(AttrLoc, diag::err_attribute_zero_size) << "matrix" << RowRange << ColRange; return QualType(); } if (MatrixRows == 0) { Diag(AttrLoc, diag::err_attribute_zero_size) << "matrix" << RowRange; return QualType(); } if (MatrixColumns == 0) { Diag(AttrLoc, diag::err_attribute_zero_size) << "matrix" << ColRange; return QualType(); } if (!ConstantMatrixType::isDimensionValid(MatrixRows)) { Diag(AttrLoc, diag::err_attribute_size_too_large) << RowRange << "matrix row"; return QualType(); } if (!ConstantMatrixType::isDimensionValid(MatrixColumns)) { Diag(AttrLoc, diag::err_attribute_size_too_large) << ColRange << "matrix column"; return QualType(); } return Context.getConstantMatrixType(ElementTy, MatrixRows, MatrixColumns); } bool Sema::CheckFunctionReturnType(QualType T, SourceLocation Loc) { if (T->isArrayType() || T->isFunctionType()) { Diag(Loc, diag::err_func_returning_array_function) << T->isFunctionType() << T; return true; } // Functions cannot return half FP. if (T->isHalfType() && !getLangOpts().HalfArgsAndReturns) { Diag(Loc, diag::err_parameters_retval_cannot_have_fp16_type) << 1 << FixItHint::CreateInsertion(Loc, "*"); return true; } // Methods cannot return interface types. All ObjC objects are // passed by reference. if (T->isObjCObjectType()) { Diag(Loc, diag::err_object_cannot_be_passed_returned_by_value) << 0 << T << FixItHint::CreateInsertion(Loc, "*"); return true; } if (T.hasNonTrivialToPrimitiveDestructCUnion() || T.hasNonTrivialToPrimitiveCopyCUnion()) checkNonTrivialCUnion(T, Loc, NTCUC_FunctionReturn, NTCUK_Destruct|NTCUK_Copy); // C++2a [dcl.fct]p12: // A volatile-qualified return type is deprecated if (T.isVolatileQualified() && getLangOpts().CPlusPlus20) Diag(Loc, diag::warn_deprecated_volatile_return) << T; return false; } /// Check the extended parameter information. Most of the necessary /// checking should occur when applying the parameter attribute; the /// only other checks required are positional restrictions. static void checkExtParameterInfos(Sema &S, ArrayRef paramTypes, const FunctionProtoType::ExtProtoInfo &EPI, llvm::function_ref getParamLoc) { assert(EPI.ExtParameterInfos && "shouldn't get here without param infos"); bool hasCheckedSwiftCall = false; auto checkForSwiftCC = [&](unsigned paramIndex) { // Only do this once. if (hasCheckedSwiftCall) return; hasCheckedSwiftCall = true; if (EPI.ExtInfo.getCC() == CC_Swift) return; S.Diag(getParamLoc(paramIndex), diag::err_swift_param_attr_not_swiftcall) << getParameterABISpelling(EPI.ExtParameterInfos[paramIndex].getABI()); }; for (size_t paramIndex = 0, numParams = paramTypes.size(); paramIndex != numParams; ++paramIndex) { switch (EPI.ExtParameterInfos[paramIndex].getABI()) { // Nothing interesting to check for orindary-ABI parameters. case ParameterABI::Ordinary: continue; // swift_indirect_result parameters must be a prefix of the function // arguments. case ParameterABI::SwiftIndirectResult: checkForSwiftCC(paramIndex); if (paramIndex != 0 && EPI.ExtParameterInfos[paramIndex - 1].getABI() != ParameterABI::SwiftIndirectResult) { S.Diag(getParamLoc(paramIndex), diag::err_swift_indirect_result_not_first); } continue; case ParameterABI::SwiftContext: checkForSwiftCC(paramIndex); continue; // swift_error parameters must be preceded by a swift_context parameter. case ParameterABI::SwiftErrorResult: checkForSwiftCC(paramIndex); if (paramIndex == 0 || EPI.ExtParameterInfos[paramIndex - 1].getABI() != ParameterABI::SwiftContext) { S.Diag(getParamLoc(paramIndex), diag::err_swift_error_result_not_after_swift_context); } continue; } llvm_unreachable("bad ABI kind"); } } QualType Sema::BuildFunctionType(QualType T, MutableArrayRef ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI) { bool Invalid = false; Invalid |= CheckFunctionReturnType(T, Loc); for (unsigned Idx = 0, Cnt = ParamTypes.size(); Idx < Cnt; ++Idx) { // FIXME: Loc is too inprecise here, should use proper locations for args. QualType ParamType = Context.getAdjustedParameterType(ParamTypes[Idx]); if (ParamType->isVoidType()) { Diag(Loc, diag::err_param_with_void_type); Invalid = true; } else if (ParamType->isHalfType() && !getLangOpts().HalfArgsAndReturns) { // Disallow half FP arguments. Diag(Loc, diag::err_parameters_retval_cannot_have_fp16_type) << 0 << FixItHint::CreateInsertion(Loc, "*"); Invalid = true; } // C++2a [dcl.fct]p4: // A parameter with volatile-qualified type is deprecated if (ParamType.isVolatileQualified() && getLangOpts().CPlusPlus20) Diag(Loc, diag::warn_deprecated_volatile_param) << ParamType; ParamTypes[Idx] = ParamType; } if (EPI.ExtParameterInfos) { checkExtParameterInfos(*this, ParamTypes, EPI, [=](unsigned i) { return Loc; }); } if (EPI.ExtInfo.getProducesResult()) { // This is just a warning, so we can't fail to build if we see it. checkNSReturnsRetainedReturnType(Loc, T); } if (Invalid) return QualType(); return Context.getFunctionType(T, ParamTypes, EPI); } /// Build a member pointer type \c T Class::*. /// /// \param T the type to which the member pointer refers. /// \param Class the class type into which the member pointer points. /// \param Loc the location where this type begins /// \param Entity the name of the entity that will have this member pointer type /// /// \returns a member pointer type, if successful, or a NULL type if there was /// an error. QualType Sema::BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity) { // Verify that we're not building a pointer to pointer to function with // exception specification. if (CheckDistantExceptionSpec(T)) { Diag(Loc, diag::err_distant_exception_spec); return QualType(); } // C++ 8.3.3p3: A pointer to member shall not point to ... a member // with reference type, or "cv void." if (T->isReferenceType()) { Diag(Loc, diag::err_illegal_decl_mempointer_to_reference) << getPrintableNameForEntity(Entity) << T; return QualType(); } if (T->isVoidType()) { Diag(Loc, diag::err_illegal_decl_mempointer_to_void) << getPrintableNameForEntity(Entity); return QualType(); } if (!Class->isDependentType() && !Class->isRecordType()) { Diag(Loc, diag::err_mempointer_in_nonclass_type) << Class; return QualType(); } // Adjust the default free function calling convention to the default method // calling convention. bool IsCtorOrDtor = (Entity.getNameKind() == DeclarationName::CXXConstructorName) || (Entity.getNameKind() == DeclarationName::CXXDestructorName); if (T->isFunctionType()) adjustMemberFunctionCC(T, /*IsStatic=*/false, IsCtorOrDtor, Loc); return Context.getMemberPointerType(T, Class.getTypePtr()); } /// Build a block pointer type. /// /// \param T The type to which we'll be building a block pointer. /// /// \param Loc The source location, used for diagnostics. /// /// \param Entity The name of the entity that involves the block pointer /// type, if known. /// /// \returns A suitable block pointer type, if there are no /// errors. Otherwise, returns a NULL type. QualType Sema::BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity) { if (!T->isFunctionType()) { Diag(Loc, diag::err_nonfunction_block_type); return QualType(); } if (checkQualifiedFunction(*this, T, Loc, QFK_BlockPointer)) return QualType(); if (getLangOpts().OpenCL) T = deduceOpenCLPointeeAddrSpace(*this, T); return Context.getBlockPointerType(T); } QualType Sema::GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo) { QualType QT = Ty.get(); if (QT.isNull()) { if (TInfo) *TInfo = nullptr; return QualType(); } TypeSourceInfo *DI = nullptr; if (const LocInfoType *LIT = dyn_cast(QT)) { QT = LIT->getType(); DI = LIT->getTypeSourceInfo(); } if (TInfo) *TInfo = DI; return QT; } static void transferARCOwnershipToDeclaratorChunk(TypeProcessingState &state, Qualifiers::ObjCLifetime ownership, unsigned chunkIndex); /// Given that this is the declaration of a parameter under ARC, /// attempt to infer attributes and such for pointer-to-whatever /// types. static void inferARCWriteback(TypeProcessingState &state, QualType &declSpecType) { Sema &S = state.getSema(); Declarator &declarator = state.getDeclarator(); // TODO: should we care about decl qualifiers? // Check whether the declarator has the expected form. We walk // from the inside out in order to make the block logic work. unsigned outermostPointerIndex = 0; bool isBlockPointer = false; unsigned numPointers = 0; for (unsigned i = 0, e = declarator.getNumTypeObjects(); i != e; ++i) { unsigned chunkIndex = i; DeclaratorChunk &chunk = declarator.getTypeObject(chunkIndex); switch (chunk.Kind) { case DeclaratorChunk::Paren: // Ignore parens. break; case DeclaratorChunk::Reference: case DeclaratorChunk::Pointer: // Count the number of pointers. Treat references // interchangeably as pointers; if they're mis-ordered, normal // type building will discover that. outermostPointerIndex = chunkIndex; numPointers++; break; case DeclaratorChunk::BlockPointer: // If we have a pointer to block pointer, that's an acceptable // indirect reference; anything else is not an application of // the rules. if (numPointers != 1) return; numPointers++; outermostPointerIndex = chunkIndex; isBlockPointer = true; // We don't care about pointer structure in return values here. goto done; case DeclaratorChunk::Array: // suppress if written (id[])? case DeclaratorChunk::Function: case DeclaratorChunk::MemberPointer: case DeclaratorChunk::Pipe: return; } } done: // If we have *one* pointer, then we want to throw the qualifier on // the declaration-specifiers, which means that it needs to be a // retainable object type. if (numPointers == 1) { // If it's not a retainable object type, the rule doesn't apply. if (!declSpecType->isObjCRetainableType()) return; // If it already has lifetime, don't do anything. if (declSpecType.getObjCLifetime()) return; // Otherwise, modify the type in-place. Qualifiers qs; if (declSpecType->isObjCARCImplicitlyUnretainedType()) qs.addObjCLifetime(Qualifiers::OCL_ExplicitNone); else qs.addObjCLifetime(Qualifiers::OCL_Autoreleasing); declSpecType = S.Context.getQualifiedType(declSpecType, qs); // If we have *two* pointers, then we want to throw the qualifier on // the outermost pointer. } else if (numPointers == 2) { // If we don't have a block pointer, we need to check whether the // declaration-specifiers gave us something that will turn into a // retainable object pointer after we slap the first pointer on it. if (!isBlockPointer && !declSpecType->isObjCObjectType()) return; // Look for an explicit lifetime attribute there. DeclaratorChunk &chunk = declarator.getTypeObject(outermostPointerIndex); if (chunk.Kind != DeclaratorChunk::Pointer && chunk.Kind != DeclaratorChunk::BlockPointer) return; for (const ParsedAttr &AL : chunk.getAttrs()) if (AL.getKind() == ParsedAttr::AT_ObjCOwnership) return; transferARCOwnershipToDeclaratorChunk(state, Qualifiers::OCL_Autoreleasing, outermostPointerIndex); // Any other number of pointers/references does not trigger the rule. } else return; // TODO: mark whether we did this inference? } void Sema::diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc, SourceLocation VolatileQualLoc, SourceLocation RestrictQualLoc, SourceLocation AtomicQualLoc, SourceLocation UnalignedQualLoc) { if (!Quals) return; struct Qual { const char *Name; unsigned Mask; SourceLocation Loc; } const QualKinds[5] = { { "const", DeclSpec::TQ_const, ConstQualLoc }, { "volatile", DeclSpec::TQ_volatile, VolatileQualLoc }, { "restrict", DeclSpec::TQ_restrict, RestrictQualLoc }, { "__unaligned", DeclSpec::TQ_unaligned, UnalignedQualLoc }, { "_Atomic", DeclSpec::TQ_atomic, AtomicQualLoc } }; SmallString<32> QualStr; unsigned NumQuals = 0; SourceLocation Loc; FixItHint FixIts[5]; // Build a string naming the redundant qualifiers. for (auto &E : QualKinds) { if (Quals & E.Mask) { if (!QualStr.empty()) QualStr += ' '; QualStr += E.Name; // If we have a location for the qualifier, offer a fixit. SourceLocation QualLoc = E.Loc; if (QualLoc.isValid()) { FixIts[NumQuals] = FixItHint::CreateRemoval(QualLoc); if (Loc.isInvalid() || getSourceManager().isBeforeInTranslationUnit(QualLoc, Loc)) Loc = QualLoc; } ++NumQuals; } } Diag(Loc.isInvalid() ? FallbackLoc : Loc, DiagID) << QualStr << NumQuals << FixIts[0] << FixIts[1] << FixIts[2] << FixIts[3]; } // Diagnose pointless type qualifiers on the return type of a function. static void diagnoseRedundantReturnTypeQualifiers(Sema &S, QualType RetTy, Declarator &D, unsigned FunctionChunkIndex) { if (D.getTypeObject(FunctionChunkIndex).Fun.hasTrailingReturnType()) { // FIXME: TypeSourceInfo doesn't preserve location information for // qualifiers. S.diagnoseIgnoredQualifiers(diag::warn_qual_return_type, RetTy.getLocalCVRQualifiers(), D.getIdentifierLoc()); return; } for (unsigned OuterChunkIndex = FunctionChunkIndex + 1, End = D.getNumTypeObjects(); OuterChunkIndex != End; ++OuterChunkIndex) { DeclaratorChunk &OuterChunk = D.getTypeObject(OuterChunkIndex); switch (OuterChunk.Kind) { case DeclaratorChunk::Paren: continue; case DeclaratorChunk::Pointer: { DeclaratorChunk::PointerTypeInfo &PTI = OuterChunk.Ptr; S.diagnoseIgnoredQualifiers( diag::warn_qual_return_type, PTI.TypeQuals, SourceLocation(), SourceLocation::getFromRawEncoding(PTI.ConstQualLoc), SourceLocation::getFromRawEncoding(PTI.VolatileQualLoc), SourceLocation::getFromRawEncoding(PTI.RestrictQualLoc), SourceLocation::getFromRawEncoding(PTI.AtomicQualLoc), SourceLocation::getFromRawEncoding(PTI.UnalignedQualLoc)); return; } case DeclaratorChunk::Function: case DeclaratorChunk::BlockPointer: case DeclaratorChunk::Reference: case DeclaratorChunk::Array: case DeclaratorChunk::MemberPointer: case DeclaratorChunk::Pipe: // FIXME: We can't currently provide an accurate source location and a // fix-it hint for these. unsigned AtomicQual = RetTy->isAtomicType() ? DeclSpec::TQ_atomic : 0; S.diagnoseIgnoredQualifiers(diag::warn_qual_return_type, RetTy.getCVRQualifiers() | AtomicQual, D.getIdentifierLoc()); return; } llvm_unreachable("unknown declarator chunk kind"); } // If the qualifiers come from a conversion function type, don't diagnose // them -- they're not necessarily redundant, since such a conversion // operator can be explicitly called as "x.operator const int()". if (D.getName().getKind() == UnqualifiedIdKind::IK_ConversionFunctionId) return; // Just parens all the way out to the decl specifiers. Diagnose any qualifiers // which are present there. S.diagnoseIgnoredQualifiers(diag::warn_qual_return_type, D.getDeclSpec().getTypeQualifiers(), D.getIdentifierLoc(), D.getDeclSpec().getConstSpecLoc(), D.getDeclSpec().getVolatileSpecLoc(), D.getDeclSpec().getRestrictSpecLoc(), D.getDeclSpec().getAtomicSpecLoc(), D.getDeclSpec().getUnalignedSpecLoc()); } -static void CopyTypeConstraintFromAutoType(Sema &SemaRef, const AutoType *Auto, - AutoTypeLoc AutoLoc, - TemplateTypeParmDecl *TP, - SourceLocation EllipsisLoc) { - - TemplateArgumentListInfo TAL(AutoLoc.getLAngleLoc(), AutoLoc.getRAngleLoc()); - for (unsigned Idx = 0; Idx < AutoLoc.getNumArgs(); ++Idx) - TAL.addArgument(AutoLoc.getArgLoc(Idx)); - - SemaRef.AttachTypeConstraint( - AutoLoc.getNestedNameSpecifierLoc(), AutoLoc.getConceptNameInfo(), - AutoLoc.getNamedConcept(), - AutoLoc.hasExplicitTemplateArgs() ? &TAL : nullptr, TP, EllipsisLoc); -} - -static QualType InventTemplateParameter( - TypeProcessingState &state, QualType T, TypeSourceInfo *TSI, AutoType *Auto, - InventedTemplateParameterInfo &Info) { +static std::pair +InventTemplateParameter(TypeProcessingState &state, QualType T, + TypeSourceInfo *TrailingTSI, AutoType *Auto, + InventedTemplateParameterInfo &Info) { Sema &S = state.getSema(); Declarator &D = state.getDeclarator(); const unsigned TemplateParameterDepth = Info.AutoTemplateParameterDepth; const unsigned AutoParameterPosition = Info.TemplateParams.size(); const bool IsParameterPack = D.hasEllipsis(); // If auto is mentioned in a lambda parameter or abbreviated function // template context, convert it to a template parameter type. // Create the TemplateTypeParmDecl here to retrieve the corresponding // template parameter type. Template parameters are temporarily added // to the TU until the associated TemplateDecl is created. TemplateTypeParmDecl *InventedTemplateParam = TemplateTypeParmDecl::Create( S.Context, S.Context.getTranslationUnitDecl(), /*KeyLoc=*/D.getDeclSpec().getTypeSpecTypeLoc(), /*NameLoc=*/D.getIdentifierLoc(), TemplateParameterDepth, AutoParameterPosition, S.InventAbbreviatedTemplateParameterTypeName( D.getIdentifier(), AutoParameterPosition), false, IsParameterPack, /*HasTypeConstraint=*/Auto->isConstrained()); InventedTemplateParam->setImplicit(); Info.TemplateParams.push_back(InventedTemplateParam); - // Attach type constraints + + // Attach type constraints to the new parameter. if (Auto->isConstrained()) { - if (TSI) { - CopyTypeConstraintFromAutoType( - S, Auto, TSI->getTypeLoc().getContainedAutoTypeLoc(), - InventedTemplateParam, D.getEllipsisLoc()); + if (TrailingTSI) { + // The 'auto' appears in a trailing return type we've already built; + // extract its type constraints to attach to the template parameter. + AutoTypeLoc AutoLoc = TrailingTSI->getTypeLoc().getContainedAutoTypeLoc(); + TemplateArgumentListInfo TAL(AutoLoc.getLAngleLoc(), AutoLoc.getRAngleLoc()); + for (unsigned Idx = 0; Idx < AutoLoc.getNumArgs(); ++Idx) + TAL.addArgument(AutoLoc.getArgLoc(Idx)); + + S.AttachTypeConstraint(AutoLoc.getNestedNameSpecifierLoc(), + AutoLoc.getConceptNameInfo(), + AutoLoc.getNamedConcept(), + AutoLoc.hasExplicitTemplateArgs() ? &TAL : nullptr, + InventedTemplateParam, D.getEllipsisLoc()); } else { + // The 'auto' appears in the decl-specifiers; we've not finished forming + // TypeSourceInfo for it yet. TemplateIdAnnotation *TemplateId = D.getDeclSpec().getRepAsTemplateId(); TemplateArgumentListInfo TemplateArgsInfo; if (TemplateId->LAngleLoc.isValid()) { ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(), TemplateId->NumArgs); S.translateTemplateArguments(TemplateArgsPtr, TemplateArgsInfo); } S.AttachTypeConstraint( D.getDeclSpec().getTypeSpecScope().getWithLocInContext(S.Context), DeclarationNameInfo(DeclarationName(TemplateId->Name), TemplateId->TemplateNameLoc), cast(TemplateId->Template.get().getAsTemplateDecl()), TemplateId->LAngleLoc.isValid() ? &TemplateArgsInfo : nullptr, InventedTemplateParam, D.getEllipsisLoc()); } } - // If TSI is nullptr, this is a constrained declspec auto and the type - // constraint will be attached later in TypeSpecLocFiller - // Replace the 'auto' in the function parameter with this invented // template type parameter. // FIXME: Retain some type sugar to indicate that this was written // as 'auto'? - return state.ReplaceAutoType( - T, QualType(InventedTemplateParam->getTypeForDecl(), 0)); + QualType Replacement(InventedTemplateParam->getTypeForDecl(), 0); + QualType NewT = state.ReplaceAutoType(T, Replacement); + TypeSourceInfo *NewTSI = + TrailingTSI ? S.ReplaceAutoTypeSourceInfo(TrailingTSI, Replacement) + : nullptr; + return {NewT, NewTSI}; } static TypeSourceInfo * GetTypeSourceInfoForDeclarator(TypeProcessingState &State, QualType T, TypeSourceInfo *ReturnTypeInfo); static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state, TypeSourceInfo *&ReturnTypeInfo) { Sema &SemaRef = state.getSema(); Declarator &D = state.getDeclarator(); QualType T; ReturnTypeInfo = nullptr; // The TagDecl owned by the DeclSpec. TagDecl *OwnedTagDecl = nullptr; switch (D.getName().getKind()) { case UnqualifiedIdKind::IK_ImplicitSelfParam: case UnqualifiedIdKind::IK_OperatorFunctionId: case UnqualifiedIdKind::IK_Identifier: case UnqualifiedIdKind::IK_LiteralOperatorId: case UnqualifiedIdKind::IK_TemplateId: T = ConvertDeclSpecToType(state); if (!D.isInvalidType() && D.getDeclSpec().isTypeSpecOwned()) { OwnedTagDecl = cast(D.getDeclSpec().getRepAsDecl()); // Owned declaration is embedded in declarator. OwnedTagDecl->setEmbeddedInDeclarator(true); } break; case UnqualifiedIdKind::IK_ConstructorName: case UnqualifiedIdKind::IK_ConstructorTemplateId: case UnqualifiedIdKind::IK_DestructorName: // Constructors and destructors don't have return types. Use // "void" instead. T = SemaRef.Context.VoidTy; processTypeAttrs(state, T, TAL_DeclSpec, D.getMutableDeclSpec().getAttributes()); break; case UnqualifiedIdKind::IK_DeductionGuideName: // Deduction guides have a trailing return type and no type in their // decl-specifier sequence. Use a placeholder return type for now. T = SemaRef.Context.DependentTy; break; case UnqualifiedIdKind::IK_ConversionFunctionId: // The result type of a conversion function is the type that it // converts to. T = SemaRef.GetTypeFromParser(D.getName().ConversionFunctionId, &ReturnTypeInfo); break; } if (!D.getAttributes().empty()) distributeTypeAttrsFromDeclarator(state, T); + // Find the deduced type in this type. Look in the trailing return type if we + // have one, otherwise in the DeclSpec type. + // FIXME: The standard wording doesn't currently describe this. + DeducedType *Deduced = T->getContainedDeducedType(); + bool DeducedIsTrailingReturnType = false; + if (Deduced && isa(Deduced) && D.hasTrailingReturnType()) { + QualType T = SemaRef.GetTypeFromParser(D.getTrailingReturnType()); + Deduced = T.isNull() ? nullptr : T->getContainedDeducedType(); + DeducedIsTrailingReturnType = true; + } + // C++11 [dcl.spec.auto]p5: reject 'auto' if it is not in an allowed context. - if (DeducedType *Deduced = T->getContainedDeducedType()) { + if (Deduced) { AutoType *Auto = dyn_cast(Deduced); int Error = -1; // Is this a 'auto' or 'decltype(auto)' type (as opposed to __auto_type or // class template argument deduction)? bool IsCXXAutoType = (Auto && Auto->getKeyword() != AutoTypeKeyword::GNUAutoType); bool IsDeducedReturnType = false; switch (D.getContext()) { case DeclaratorContext::LambdaExprContext: // Declared return type of a lambda-declarator is implicit and is always // 'auto'. break; case DeclaratorContext::ObjCParameterContext: case DeclaratorContext::ObjCResultContext: Error = 0; break; case DeclaratorContext::RequiresExprContext: Error = 22; break; case DeclaratorContext::PrototypeContext: case DeclaratorContext::LambdaExprParameterContext: { InventedTemplateParameterInfo *Info = nullptr; if (D.getContext() == DeclaratorContext::PrototypeContext) { // With concepts we allow 'auto' in function parameters. if (!SemaRef.getLangOpts().CPlusPlus20 || !Auto || Auto->getKeyword() != AutoTypeKeyword::Auto) { Error = 0; break; } else if (!SemaRef.getCurScope()->isFunctionDeclarationScope()) { Error = 21; break; - } else if (D.hasTrailingReturnType()) { - // This might be OK, but we'll need to convert the trailing return - // type later. - break; } Info = &SemaRef.InventedParameterInfos.back(); } else { // In C++14, generic lambdas allow 'auto' in their parameters. if (!SemaRef.getLangOpts().CPlusPlus14 || !Auto || Auto->getKeyword() != AutoTypeKeyword::Auto) { Error = 16; break; } Info = SemaRef.getCurLambda(); assert(Info && "No LambdaScopeInfo on the stack!"); } - T = InventTemplateParameter(state, T, nullptr, Auto, *Info); + + // We'll deal with inventing template parameters for 'auto' in trailing + // return types when we pick up the trailing return type when processing + // the function chunk. + if (!DeducedIsTrailingReturnType) + T = InventTemplateParameter(state, T, nullptr, Auto, *Info).first; break; } case DeclaratorContext::MemberContext: { if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static || D.isFunctionDeclarator()) break; bool Cxx = SemaRef.getLangOpts().CPlusPlus; if (isa(SemaRef.CurContext)) { Error = 6; // Interface member. } else { switch (cast(SemaRef.CurContext)->getTagKind()) { case TTK_Enum: llvm_unreachable("unhandled tag kind"); case TTK_Struct: Error = Cxx ? 1 : 2; /* Struct member */ break; case TTK_Union: Error = Cxx ? 3 : 4; /* Union member */ break; case TTK_Class: Error = 5; /* Class member */ break; case TTK_Interface: Error = 6; /* Interface member */ break; } } if (D.getDeclSpec().isFriendSpecified()) Error = 20; // Friend type break; } case DeclaratorContext::CXXCatchContext: case DeclaratorContext::ObjCCatchContext: Error = 7; // Exception declaration break; case DeclaratorContext::TemplateParamContext: if (isa(Deduced)) Error = 19; // Template parameter else if (!SemaRef.getLangOpts().CPlusPlus17) Error = 8; // Template parameter (until C++17) break; case DeclaratorContext::BlockLiteralContext: Error = 9; // Block literal break; case DeclaratorContext::TemplateArgContext: // Within a template argument list, a deduced template specialization // type will be reinterpreted as a template template argument. if (isa(Deduced) && !D.getNumTypeObjects() && D.getDeclSpec().getParsedSpecifiers() == DeclSpec::PQ_TypeSpecifier) break; LLVM_FALLTHROUGH; case DeclaratorContext::TemplateTypeArgContext: Error = 10; // Template type argument break; case DeclaratorContext::AliasDeclContext: case DeclaratorContext::AliasTemplateContext: Error = 12; // Type alias break; case DeclaratorContext::TrailingReturnContext: case DeclaratorContext::TrailingReturnVarContext: if (!SemaRef.getLangOpts().CPlusPlus14 || !IsCXXAutoType) Error = 13; // Function return type IsDeducedReturnType = true; break; case DeclaratorContext::ConversionIdContext: if (!SemaRef.getLangOpts().CPlusPlus14 || !IsCXXAutoType) Error = 14; // conversion-type-id IsDeducedReturnType = true; break; case DeclaratorContext::FunctionalCastContext: if (isa(Deduced)) break; LLVM_FALLTHROUGH; case DeclaratorContext::TypeNameContext: Error = 15; // Generic break; case DeclaratorContext::FileContext: case DeclaratorContext::BlockContext: case DeclaratorContext::ForContext: case DeclaratorContext::InitStmtContext: case DeclaratorContext::ConditionContext: // FIXME: P0091R3 (erroneously) does not permit class template argument // deduction in conditions, for-init-statements, and other declarations // that are not simple-declarations. break; case DeclaratorContext::CXXNewContext: // FIXME: P0091R3 does not permit class template argument deduction here, // but we follow GCC and allow it anyway. if (!IsCXXAutoType && !isa(Deduced)) Error = 17; // 'new' type break; case DeclaratorContext::KNRTypeListContext: Error = 18; // K&R function parameter break; } if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef) Error = 11; // In Objective-C it is an error to use 'auto' on a function declarator // (and everywhere for '__auto_type'). if (D.isFunctionDeclarator() && (!SemaRef.getLangOpts().CPlusPlus11 || !IsCXXAutoType)) Error = 13; - bool HaveTrailing = false; - - // C++11 [dcl.spec.auto]p2: 'auto' is always fine if the declarator - // contains a trailing return type. That is only legal at the outermost - // level. Check all declarator chunks (outermost first) anyway, to give - // better diagnostics. - // We don't support '__auto_type' with trailing return types. - // FIXME: Should we only do this for 'auto' and not 'decltype(auto)'? - if (SemaRef.getLangOpts().CPlusPlus11 && IsCXXAutoType && - D.hasTrailingReturnType()) { - HaveTrailing = true; - Error = -1; - } - SourceRange AutoRange = D.getDeclSpec().getTypeSpecTypeLoc(); if (D.getName().getKind() == UnqualifiedIdKind::IK_ConversionFunctionId) AutoRange = D.getName().getSourceRange(); if (Error != -1) { unsigned Kind; if (Auto) { switch (Auto->getKeyword()) { case AutoTypeKeyword::Auto: Kind = 0; break; case AutoTypeKeyword::DecltypeAuto: Kind = 1; break; case AutoTypeKeyword::GNUAutoType: Kind = 2; break; } } else { assert(isa(Deduced) && "unknown auto type"); Kind = 3; } auto *DTST = dyn_cast(Deduced); TemplateName TN = DTST ? DTST->getTemplateName() : TemplateName(); SemaRef.Diag(AutoRange.getBegin(), diag::err_auto_not_allowed) << Kind << Error << (int)SemaRef.getTemplateNameKindForDiagnostics(TN) << QualType(Deduced, 0) << AutoRange; if (auto *TD = TN.getAsTemplateDecl()) SemaRef.Diag(TD->getLocation(), diag::note_template_decl_here); T = SemaRef.Context.IntTy; D.setInvalidType(true); - } else if (Auto && !HaveTrailing && - D.getContext() != DeclaratorContext::LambdaExprContext) { + } else if (Auto && D.getContext() != DeclaratorContext::LambdaExprContext) { // If there was a trailing return type, we already got // warn_cxx98_compat_trailing_return_type in the parser. SemaRef.Diag(AutoRange.getBegin(), D.getContext() == DeclaratorContext::LambdaExprParameterContext ? diag::warn_cxx11_compat_generic_lambda : IsDeducedReturnType ? diag::warn_cxx11_compat_deduced_return_type : diag::warn_cxx98_compat_auto_type_specifier) << AutoRange; } } if (SemaRef.getLangOpts().CPlusPlus && OwnedTagDecl && OwnedTagDecl->isCompleteDefinition()) { // Check the contexts where C++ forbids the declaration of a new class // or enumeration in a type-specifier-seq. unsigned DiagID = 0; switch (D.getContext()) { case DeclaratorContext::TrailingReturnContext: case DeclaratorContext::TrailingReturnVarContext: // Class and enumeration definitions are syntactically not allowed in // trailing return types. llvm_unreachable("parser should not have allowed this"); break; case DeclaratorContext::FileContext: case DeclaratorContext::MemberContext: case DeclaratorContext::BlockContext: case DeclaratorContext::ForContext: case DeclaratorContext::InitStmtContext: case DeclaratorContext::BlockLiteralContext: case DeclaratorContext::LambdaExprContext: // C++11 [dcl.type]p3: // A type-specifier-seq shall not define a class or enumeration unless // it appears in the type-id of an alias-declaration (7.1.3) that is not // the declaration of a template-declaration. case DeclaratorContext::AliasDeclContext: break; case DeclaratorContext::AliasTemplateContext: DiagID = diag::err_type_defined_in_alias_template; break; case DeclaratorContext::TypeNameContext: case DeclaratorContext::FunctionalCastContext: case DeclaratorContext::ConversionIdContext: case DeclaratorContext::TemplateParamContext: case DeclaratorContext::CXXNewContext: case DeclaratorContext::CXXCatchContext: case DeclaratorContext::ObjCCatchContext: case DeclaratorContext::TemplateArgContext: case DeclaratorContext::TemplateTypeArgContext: DiagID = diag::err_type_defined_in_type_specifier; break; case DeclaratorContext::PrototypeContext: case DeclaratorContext::LambdaExprParameterContext: case DeclaratorContext::ObjCParameterContext: case DeclaratorContext::ObjCResultContext: case DeclaratorContext::KNRTypeListContext: case DeclaratorContext::RequiresExprContext: // C++ [dcl.fct]p6: // Types shall not be defined in return or parameter types. DiagID = diag::err_type_defined_in_param_type; break; case DeclaratorContext::ConditionContext: // C++ 6.4p2: // The type-specifier-seq shall not contain typedef and shall not declare // a new class or enumeration. DiagID = diag::err_type_defined_in_condition; break; } if (DiagID != 0) { SemaRef.Diag(OwnedTagDecl->getLocation(), DiagID) << SemaRef.Context.getTypeDeclType(OwnedTagDecl); D.setInvalidType(true); } } assert(!T.isNull() && "This function should not return a null type"); return T; } /// Produce an appropriate diagnostic for an ambiguity between a function /// declarator and a C++ direct-initializer. static void warnAboutAmbiguousFunction(Sema &S, Declarator &D, DeclaratorChunk &DeclType, QualType RT) { const DeclaratorChunk::FunctionTypeInfo &FTI = DeclType.Fun; assert(FTI.isAmbiguous && "no direct-initializer / function ambiguity"); // If the return type is void there is no ambiguity. if (RT->isVoidType()) return; // An initializer for a non-class type can have at most one argument. if (!RT->isRecordType() && FTI.NumParams > 1) return; // An initializer for a reference must have exactly one argument. if (RT->isReferenceType() && FTI.NumParams != 1) return; // Only warn if this declarator is declaring a function at block scope, and // doesn't have a storage class (such as 'extern') specified. if (!D.isFunctionDeclarator() || D.getFunctionDefinitionKind() != FDK_Declaration || !S.CurContext->isFunctionOrMethod() || D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_unspecified) return; // Inside a condition, a direct initializer is not permitted. We allow one to // be parsed in order to give better diagnostics in condition parsing. if (D.getContext() == DeclaratorContext::ConditionContext) return; SourceRange ParenRange(DeclType.Loc, DeclType.EndLoc); S.Diag(DeclType.Loc, FTI.NumParams ? diag::warn_parens_disambiguated_as_function_declaration : diag::warn_empty_parens_are_function_decl) << ParenRange; // If the declaration looks like: // T var1, // f(); // and name lookup finds a function named 'f', then the ',' was // probably intended to be a ';'. if (!D.isFirstDeclarator() && D.getIdentifier()) { FullSourceLoc Comma(D.getCommaLoc(), S.SourceMgr); FullSourceLoc Name(D.getIdentifierLoc(), S.SourceMgr); if (Comma.getFileID() != Name.getFileID() || Comma.getSpellingLineNumber() != Name.getSpellingLineNumber()) { LookupResult Result(S, D.getIdentifier(), SourceLocation(), Sema::LookupOrdinaryName); if (S.LookupName(Result, S.getCurScope())) S.Diag(D.getCommaLoc(), diag::note_empty_parens_function_call) << FixItHint::CreateReplacement(D.getCommaLoc(), ";") << D.getIdentifier(); Result.suppressDiagnostics(); } } if (FTI.NumParams > 0) { // For a declaration with parameters, eg. "T var(T());", suggest adding // parens around the first parameter to turn the declaration into a // variable declaration. SourceRange Range = FTI.Params[0].Param->getSourceRange(); SourceLocation B = Range.getBegin(); SourceLocation E = S.getLocForEndOfToken(Range.getEnd()); // FIXME: Maybe we should suggest adding braces instead of parens // in C++11 for classes that don't have an initializer_list constructor. S.Diag(B, diag::note_additional_parens_for_variable_declaration) << FixItHint::CreateInsertion(B, "(") << FixItHint::CreateInsertion(E, ")"); } else { // For a declaration without parameters, eg. "T var();", suggest replacing // the parens with an initializer to turn the declaration into a variable // declaration. const CXXRecordDecl *RD = RT->getAsCXXRecordDecl(); // Empty parens mean value-initialization, and no parens mean // default initialization. These are equivalent if the default // constructor is user-provided or if zero-initialization is a // no-op. if (RD && RD->hasDefinition() && (RD->isEmpty() || RD->hasUserProvidedDefaultConstructor())) S.Diag(DeclType.Loc, diag::note_empty_parens_default_ctor) << FixItHint::CreateRemoval(ParenRange); else { std::string Init = S.getFixItZeroInitializerForType(RT, ParenRange.getBegin()); if (Init.empty() && S.LangOpts.CPlusPlus11) Init = "{}"; if (!Init.empty()) S.Diag(DeclType.Loc, diag::note_empty_parens_zero_initialize) << FixItHint::CreateReplacement(ParenRange, Init); } } } /// Produce an appropriate diagnostic for a declarator with top-level /// parentheses. static void warnAboutRedundantParens(Sema &S, Declarator &D, QualType T) { DeclaratorChunk &Paren = D.getTypeObject(D.getNumTypeObjects() - 1); assert(Paren.Kind == DeclaratorChunk::Paren && "do not have redundant top-level parentheses"); // This is a syntactic check; we're not interested in cases that arise // during template instantiation. if (S.inTemplateInstantiation()) return; // Check whether this could be intended to be a construction of a temporary // object in C++ via a function-style cast. bool CouldBeTemporaryObject = S.getLangOpts().CPlusPlus && D.isExpressionContext() && !D.isInvalidType() && D.getIdentifier() && D.getDeclSpec().getParsedSpecifiers() == DeclSpec::PQ_TypeSpecifier && (T->isRecordType() || T->isDependentType()) && D.getDeclSpec().getTypeQualifiers() == 0 && D.isFirstDeclarator(); bool StartsWithDeclaratorId = true; for (auto &C : D.type_objects()) { switch (C.Kind) { case DeclaratorChunk::Paren: if (&C == &Paren) continue; LLVM_FALLTHROUGH; case DeclaratorChunk::Pointer: StartsWithDeclaratorId = false; continue; case DeclaratorChunk::Array: if (!C.Arr.NumElts) CouldBeTemporaryObject = false; continue; case DeclaratorChunk::Reference: // FIXME: Suppress the warning here if there is no initializer; we're // going to give an error anyway. // We assume that something like 'T (&x) = y;' is highly likely to not // be intended to be a temporary object. CouldBeTemporaryObject = false; StartsWithDeclaratorId = false; continue; case DeclaratorChunk::Function: // In a new-type-id, function chunks require parentheses. if (D.getContext() == DeclaratorContext::CXXNewContext) return; // FIXME: "A(f())" deserves a vexing-parse warning, not just a // redundant-parens warning, but we don't know whether the function // chunk was syntactically valid as an expression here. CouldBeTemporaryObject = false; continue; case DeclaratorChunk::BlockPointer: case DeclaratorChunk::MemberPointer: case DeclaratorChunk::Pipe: // These cannot appear in expressions. CouldBeTemporaryObject = false; StartsWithDeclaratorId = false; continue; } } // FIXME: If there is an initializer, assume that this is not intended to be // a construction of a temporary object. // Check whether the name has already been declared; if not, this is not a // function-style cast. if (CouldBeTemporaryObject) { LookupResult Result(S, D.getIdentifier(), SourceLocation(), Sema::LookupOrdinaryName); if (!S.LookupName(Result, S.getCurScope())) CouldBeTemporaryObject = false; Result.suppressDiagnostics(); } SourceRange ParenRange(Paren.Loc, Paren.EndLoc); if (!CouldBeTemporaryObject) { // If we have A (::B), the parentheses affect the meaning of the program. // Suppress the warning in that case. Don't bother looking at the DeclSpec // here: even (e.g.) "int ::x" is visually ambiguous even though it's // formally unambiguous. if (StartsWithDeclaratorId && D.getCXXScopeSpec().isValid()) { for (NestedNameSpecifier *NNS = D.getCXXScopeSpec().getScopeRep(); NNS; NNS = NNS->getPrefix()) { if (NNS->getKind() == NestedNameSpecifier::Global) return; } } S.Diag(Paren.Loc, diag::warn_redundant_parens_around_declarator) << ParenRange << FixItHint::CreateRemoval(Paren.Loc) << FixItHint::CreateRemoval(Paren.EndLoc); return; } S.Diag(Paren.Loc, diag::warn_parens_disambiguated_as_variable_declaration) << ParenRange << D.getIdentifier(); auto *RD = T->getAsCXXRecordDecl(); if (!RD || !RD->hasDefinition() || RD->hasNonTrivialDestructor()) S.Diag(Paren.Loc, diag::note_raii_guard_add_name) << FixItHint::CreateInsertion(Paren.Loc, " varname") << T << D.getIdentifier(); // FIXME: A cast to void is probably a better suggestion in cases where it's // valid (when there is no initializer and we're not in a condition). S.Diag(D.getBeginLoc(), diag::note_function_style_cast_add_parentheses) << FixItHint::CreateInsertion(D.getBeginLoc(), "(") << FixItHint::CreateInsertion(S.getLocForEndOfToken(D.getEndLoc()), ")"); S.Diag(Paren.Loc, diag::note_remove_parens_for_variable_declaration) << FixItHint::CreateRemoval(Paren.Loc) << FixItHint::CreateRemoval(Paren.EndLoc); } /// Helper for figuring out the default CC for a function declarator type. If /// this is the outermost chunk, then we can determine the CC from the /// declarator context. If not, then this could be either a member function /// type or normal function type. static CallingConv getCCForDeclaratorChunk( Sema &S, Declarator &D, const ParsedAttributesView &AttrList, const DeclaratorChunk::FunctionTypeInfo &FTI, unsigned ChunkIndex) { assert(D.getTypeObject(ChunkIndex).Kind == DeclaratorChunk::Function); // Check for an explicit CC attribute. for (const ParsedAttr &AL : AttrList) { switch (AL.getKind()) { CALLING_CONV_ATTRS_CASELIST : { // Ignore attributes that don't validate or can't apply to the // function type. We'll diagnose the failure to apply them in // handleFunctionTypeAttr. CallingConv CC; if (!S.CheckCallingConvAttr(AL, CC) && (!FTI.isVariadic || supportsVariadicCall(CC))) { return CC; } break; } default: break; } } bool IsCXXInstanceMethod = false; if (S.getLangOpts().CPlusPlus) { // Look inwards through parentheses to see if this chunk will form a // member pointer type or if we're the declarator. Any type attributes // between here and there will override the CC we choose here. unsigned I = ChunkIndex; bool FoundNonParen = false; while (I && !FoundNonParen) { --I; if (D.getTypeObject(I).Kind != DeclaratorChunk::Paren) FoundNonParen = true; } if (FoundNonParen) { // If we're not the declarator, we're a regular function type unless we're // in a member pointer. IsCXXInstanceMethod = D.getTypeObject(I).Kind == DeclaratorChunk::MemberPointer; } else if (D.getContext() == DeclaratorContext::LambdaExprContext) { // This can only be a call operator for a lambda, which is an instance // method. IsCXXInstanceMethod = true; } else { // We're the innermost decl chunk, so must be a function declarator. assert(D.isFunctionDeclarator()); // If we're inside a record, we're declaring a method, but it could be // explicitly or implicitly static. IsCXXInstanceMethod = D.isFirstDeclarationOfMember() && D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_typedef && !D.isStaticMember(); } } CallingConv CC = S.Context.getDefaultCallingConvention(FTI.isVariadic, IsCXXInstanceMethod); // Attribute AT_OpenCLKernel affects the calling convention for SPIR // and AMDGPU targets, hence it cannot be treated as a calling // convention attribute. This is the simplest place to infer // calling convention for OpenCL kernels. if (S.getLangOpts().OpenCL) { for (const ParsedAttr &AL : D.getDeclSpec().getAttributes()) { if (AL.getKind() == ParsedAttr::AT_OpenCLKernel) { CC = CC_OpenCLKernel; break; } } } return CC; } namespace { /// A simple notion of pointer kinds, which matches up with the various /// pointer declarators. enum class SimplePointerKind { Pointer, BlockPointer, MemberPointer, Array, }; } // end anonymous namespace IdentifierInfo *Sema::getNullabilityKeyword(NullabilityKind nullability) { switch (nullability) { case NullabilityKind::NonNull: if (!Ident__Nonnull) Ident__Nonnull = PP.getIdentifierInfo("_Nonnull"); return Ident__Nonnull; case NullabilityKind::Nullable: if (!Ident__Nullable) Ident__Nullable = PP.getIdentifierInfo("_Nullable"); return Ident__Nullable; case NullabilityKind::Unspecified: if (!Ident__Null_unspecified) Ident__Null_unspecified = PP.getIdentifierInfo("_Null_unspecified"); return Ident__Null_unspecified; } llvm_unreachable("Unknown nullability kind."); } /// Retrieve the identifier "NSError". IdentifierInfo *Sema::getNSErrorIdent() { if (!Ident_NSError) Ident_NSError = PP.getIdentifierInfo("NSError"); return Ident_NSError; } /// Check whether there is a nullability attribute of any kind in the given /// attribute list. static bool hasNullabilityAttr(const ParsedAttributesView &attrs) { for (const ParsedAttr &AL : attrs) { if (AL.getKind() == ParsedAttr::AT_TypeNonNull || AL.getKind() == ParsedAttr::AT_TypeNullable || AL.getKind() == ParsedAttr::AT_TypeNullUnspecified) return true; } return false; } namespace { /// Describes the kind of a pointer a declarator describes. enum class PointerDeclaratorKind { // Not a pointer. NonPointer, // Single-level pointer. SingleLevelPointer, // Multi-level pointer (of any pointer kind). MultiLevelPointer, // CFFooRef* MaybePointerToCFRef, // CFErrorRef* CFErrorRefPointer, // NSError** NSErrorPointerPointer, }; /// Describes a declarator chunk wrapping a pointer that marks inference as /// unexpected. // These values must be kept in sync with diagnostics. enum class PointerWrappingDeclaratorKind { /// Pointer is top-level. None = -1, /// Pointer is an array element. Array = 0, /// Pointer is the referent type of a C++ reference. Reference = 1 }; } // end anonymous namespace /// Classify the given declarator, whose type-specified is \c type, based on /// what kind of pointer it refers to. /// /// This is used to determine the default nullability. static PointerDeclaratorKind classifyPointerDeclarator(Sema &S, QualType type, Declarator &declarator, PointerWrappingDeclaratorKind &wrappingKind) { unsigned numNormalPointers = 0; // For any dependent type, we consider it a non-pointer. if (type->isDependentType()) return PointerDeclaratorKind::NonPointer; // Look through the declarator chunks to identify pointers. for (unsigned i = 0, n = declarator.getNumTypeObjects(); i != n; ++i) { DeclaratorChunk &chunk = declarator.getTypeObject(i); switch (chunk.Kind) { case DeclaratorChunk::Array: if (numNormalPointers == 0) wrappingKind = PointerWrappingDeclaratorKind::Array; break; case DeclaratorChunk::Function: case DeclaratorChunk::Pipe: break; case DeclaratorChunk::BlockPointer: case DeclaratorChunk::MemberPointer: return numNormalPointers > 0 ? PointerDeclaratorKind::MultiLevelPointer : PointerDeclaratorKind::SingleLevelPointer; case DeclaratorChunk::Paren: break; case DeclaratorChunk::Reference: if (numNormalPointers == 0) wrappingKind = PointerWrappingDeclaratorKind::Reference; break; case DeclaratorChunk::Pointer: ++numNormalPointers; if (numNormalPointers > 2) return PointerDeclaratorKind::MultiLevelPointer; break; } } // Then, dig into the type specifier itself. unsigned numTypeSpecifierPointers = 0; do { // Decompose normal pointers. if (auto ptrType = type->getAs()) { ++numNormalPointers; if (numNormalPointers > 2) return PointerDeclaratorKind::MultiLevelPointer; type = ptrType->getPointeeType(); ++numTypeSpecifierPointers; continue; } // Decompose block pointers. if (type->getAs()) { return numNormalPointers > 0 ? PointerDeclaratorKind::MultiLevelPointer : PointerDeclaratorKind::SingleLevelPointer; } // Decompose member pointers. if (type->getAs()) { return numNormalPointers > 0 ? PointerDeclaratorKind::MultiLevelPointer : PointerDeclaratorKind::SingleLevelPointer; } // Look at Objective-C object pointers. if (auto objcObjectPtr = type->getAs()) { ++numNormalPointers; ++numTypeSpecifierPointers; // If this is NSError**, report that. if (auto objcClassDecl = objcObjectPtr->getInterfaceDecl()) { if (objcClassDecl->getIdentifier() == S.getNSErrorIdent() && numNormalPointers == 2 && numTypeSpecifierPointers < 2) { return PointerDeclaratorKind::NSErrorPointerPointer; } } break; } // Look at Objective-C class types. if (auto objcClass = type->getAs()) { if (objcClass->getInterface()->getIdentifier() == S.getNSErrorIdent()) { if (numNormalPointers == 2 && numTypeSpecifierPointers < 2) return PointerDeclaratorKind::NSErrorPointerPointer; } break; } // If at this point we haven't seen a pointer, we won't see one. if (numNormalPointers == 0) return PointerDeclaratorKind::NonPointer; if (auto recordType = type->getAs()) { RecordDecl *recordDecl = recordType->getDecl(); bool isCFError = false; if (S.CFError) { // If we already know about CFError, test it directly. isCFError = (S.CFError == recordDecl); } else { // Check whether this is CFError, which we identify based on its bridge // to NSError. CFErrorRef used to be declared with "objc_bridge" but is // now declared with "objc_bridge_mutable", so look for either one of // the two attributes. if (recordDecl->getTagKind() == TTK_Struct && numNormalPointers > 0) { IdentifierInfo *bridgedType = nullptr; if (auto bridgeAttr = recordDecl->getAttr()) bridgedType = bridgeAttr->getBridgedType(); else if (auto bridgeAttr = recordDecl->getAttr()) bridgedType = bridgeAttr->getBridgedType(); if (bridgedType == S.getNSErrorIdent()) { S.CFError = recordDecl; isCFError = true; } } } // If this is CFErrorRef*, report it as such. if (isCFError && numNormalPointers == 2 && numTypeSpecifierPointers < 2) { return PointerDeclaratorKind::CFErrorRefPointer; } break; } break; } while (true); switch (numNormalPointers) { case 0: return PointerDeclaratorKind::NonPointer; case 1: return PointerDeclaratorKind::SingleLevelPointer; case 2: return PointerDeclaratorKind::MaybePointerToCFRef; default: return PointerDeclaratorKind::MultiLevelPointer; } } static FileID getNullabilityCompletenessCheckFileID(Sema &S, SourceLocation loc) { // If we're anywhere in a function, method, or closure context, don't perform // completeness checks. for (DeclContext *ctx = S.CurContext; ctx; ctx = ctx->getParent()) { if (ctx->isFunctionOrMethod()) return FileID(); if (ctx->isFileContext()) break; } // We only care about the expansion location. loc = S.SourceMgr.getExpansionLoc(loc); FileID file = S.SourceMgr.getFileID(loc); if (file.isInvalid()) return FileID(); // Retrieve file information. bool invalid = false; const SrcMgr::SLocEntry &sloc = S.SourceMgr.getSLocEntry(file, &invalid); if (invalid || !sloc.isFile()) return FileID(); // We don't want to perform completeness checks on the main file or in // system headers. const SrcMgr::FileInfo &fileInfo = sloc.getFile(); if (fileInfo.getIncludeLoc().isInvalid()) return FileID(); if (fileInfo.getFileCharacteristic() != SrcMgr::C_User && S.Diags.getSuppressSystemWarnings()) { return FileID(); } return file; } /// Creates a fix-it to insert a C-style nullability keyword at \p pointerLoc, /// taking into account whitespace before and after. static void fixItNullability(Sema &S, DiagnosticBuilder &Diag, SourceLocation PointerLoc, NullabilityKind Nullability) { assert(PointerLoc.isValid()); if (PointerLoc.isMacroID()) return; SourceLocation FixItLoc = S.getLocForEndOfToken(PointerLoc); if (!FixItLoc.isValid() || FixItLoc == PointerLoc) return; const char *NextChar = S.SourceMgr.getCharacterData(FixItLoc); if (!NextChar) return; SmallString<32> InsertionTextBuf{" "}; InsertionTextBuf += getNullabilitySpelling(Nullability); InsertionTextBuf += " "; StringRef InsertionText = InsertionTextBuf.str(); if (isWhitespace(*NextChar)) { InsertionText = InsertionText.drop_back(); } else if (NextChar[-1] == '[') { if (NextChar[0] == ']') InsertionText = InsertionText.drop_back().drop_front(); else InsertionText = InsertionText.drop_front(); } else if (!isIdentifierBody(NextChar[0], /*allow dollar*/true) && !isIdentifierBody(NextChar[-1], /*allow dollar*/true)) { InsertionText = InsertionText.drop_back().drop_front(); } Diag << FixItHint::CreateInsertion(FixItLoc, InsertionText); } static void emitNullabilityConsistencyWarning(Sema &S, SimplePointerKind PointerKind, SourceLocation PointerLoc, SourceLocation PointerEndLoc) { assert(PointerLoc.isValid()); if (PointerKind == SimplePointerKind::Array) { S.Diag(PointerLoc, diag::warn_nullability_missing_array); } else { S.Diag(PointerLoc, diag::warn_nullability_missing) << static_cast(PointerKind); } auto FixItLoc = PointerEndLoc.isValid() ? PointerEndLoc : PointerLoc; if (FixItLoc.isMacroID()) return; auto addFixIt = [&](NullabilityKind Nullability) { auto Diag = S.Diag(FixItLoc, diag::note_nullability_fix_it); Diag << static_cast(Nullability); Diag << static_cast(PointerKind); fixItNullability(S, Diag, FixItLoc, Nullability); }; addFixIt(NullabilityKind::Nullable); addFixIt(NullabilityKind::NonNull); } /// Complains about missing nullability if the file containing \p pointerLoc /// has other uses of nullability (either the keywords or the \c assume_nonnull /// pragma). /// /// If the file has \e not seen other uses of nullability, this particular /// pointer is saved for possible later diagnosis. See recordNullabilitySeen(). static void checkNullabilityConsistency(Sema &S, SimplePointerKind pointerKind, SourceLocation pointerLoc, SourceLocation pointerEndLoc = SourceLocation()) { // Determine which file we're performing consistency checking for. FileID file = getNullabilityCompletenessCheckFileID(S, pointerLoc); if (file.isInvalid()) return; // If we haven't seen any type nullability in this file, we won't warn now // about anything. FileNullability &fileNullability = S.NullabilityMap[file]; if (!fileNullability.SawTypeNullability) { // If this is the first pointer declarator in the file, and the appropriate // warning is on, record it in case we need to diagnose it retroactively. diag::kind diagKind; if (pointerKind == SimplePointerKind::Array) diagKind = diag::warn_nullability_missing_array; else diagKind = diag::warn_nullability_missing; if (fileNullability.PointerLoc.isInvalid() && !S.Context.getDiagnostics().isIgnored(diagKind, pointerLoc)) { fileNullability.PointerLoc = pointerLoc; fileNullability.PointerEndLoc = pointerEndLoc; fileNullability.PointerKind = static_cast(pointerKind); } return; } // Complain about missing nullability. emitNullabilityConsistencyWarning(S, pointerKind, pointerLoc, pointerEndLoc); } /// Marks that a nullability feature has been used in the file containing /// \p loc. /// /// If this file already had pointer types in it that were missing nullability, /// the first such instance is retroactively diagnosed. /// /// \sa checkNullabilityConsistency static void recordNullabilitySeen(Sema &S, SourceLocation loc) { FileID file = getNullabilityCompletenessCheckFileID(S, loc); if (file.isInvalid()) return; FileNullability &fileNullability = S.NullabilityMap[file]; if (fileNullability.SawTypeNullability) return; fileNullability.SawTypeNullability = true; // If we haven't seen any type nullability before, now we have. Retroactively // diagnose the first unannotated pointer, if there was one. if (fileNullability.PointerLoc.isInvalid()) return; auto kind = static_cast(fileNullability.PointerKind); emitNullabilityConsistencyWarning(S, kind, fileNullability.PointerLoc, fileNullability.PointerEndLoc); } /// Returns true if any of the declarator chunks before \p endIndex include a /// level of indirection: array, pointer, reference, or pointer-to-member. /// /// Because declarator chunks are stored in outer-to-inner order, testing /// every chunk before \p endIndex is testing all chunks that embed the current /// chunk as part of their type. /// /// It is legal to pass the result of Declarator::getNumTypeObjects() as the /// end index, in which case all chunks are tested. static bool hasOuterPointerLikeChunk(const Declarator &D, unsigned endIndex) { unsigned i = endIndex; while (i != 0) { // Walk outwards along the declarator chunks. --i; const DeclaratorChunk &DC = D.getTypeObject(i); switch (DC.Kind) { case DeclaratorChunk::Paren: break; case DeclaratorChunk::Array: case DeclaratorChunk::Pointer: case DeclaratorChunk::Reference: case DeclaratorChunk::MemberPointer: return true; case DeclaratorChunk::Function: case DeclaratorChunk::BlockPointer: case DeclaratorChunk::Pipe: // These are invalid anyway, so just ignore. break; } } return false; } static bool IsNoDerefableChunk(DeclaratorChunk Chunk) { return (Chunk.Kind == DeclaratorChunk::Pointer || Chunk.Kind == DeclaratorChunk::Array); } template static AttrT *createSimpleAttr(ASTContext &Ctx, ParsedAttr &AL) { AL.setUsedAsTypeAttr(); return ::new (Ctx) AttrT(Ctx, AL); } static Attr *createNullabilityAttr(ASTContext &Ctx, ParsedAttr &Attr, NullabilityKind NK) { switch (NK) { case NullabilityKind::NonNull: return createSimpleAttr(Ctx, Attr); case NullabilityKind::Nullable: return createSimpleAttr(Ctx, Attr); case NullabilityKind::Unspecified: return createSimpleAttr(Ctx, Attr); } llvm_unreachable("unknown NullabilityKind"); } // Diagnose whether this is a case with the multiple addr spaces. // Returns true if this is an invalid case. // ISO/IEC TR 18037 S5.3 (amending C99 6.7.3): "No type shall be qualified // by qualifiers for two or more different address spaces." static bool DiagnoseMultipleAddrSpaceAttributes(Sema &S, LangAS ASOld, LangAS ASNew, SourceLocation AttrLoc) { if (ASOld != LangAS::Default) { if (ASOld != ASNew) { S.Diag(AttrLoc, diag::err_attribute_address_multiple_qualifiers); return true; } // Emit a warning if they are identical; it's likely unintended. S.Diag(AttrLoc, diag::warn_attribute_address_multiple_identical_qualifiers); } return false; } static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state, QualType declSpecType, TypeSourceInfo *TInfo) { // The TypeSourceInfo that this function returns will not be a null type. // If there is an error, this function will fill in a dummy type as fallback. QualType T = declSpecType; Declarator &D = state.getDeclarator(); Sema &S = state.getSema(); ASTContext &Context = S.Context; const LangOptions &LangOpts = S.getLangOpts(); // The name we're declaring, if any. DeclarationName Name; if (D.getIdentifier()) Name = D.getIdentifier(); // Does this declaration declare a typedef-name? bool IsTypedefName = D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef || D.getContext() == DeclaratorContext::AliasDeclContext || D.getContext() == DeclaratorContext::AliasTemplateContext; // Does T refer to a function type with a cv-qualifier or a ref-qualifier? bool IsQualifiedFunction = T->isFunctionProtoType() && (!T->castAs()->getMethodQuals().empty() || T->castAs()->getRefQualifier() != RQ_None); // If T is 'decltype(auto)', the only declarators we can have are parens // and at most one function declarator if this is a function declaration. // If T is a deduced class template specialization type, we can have no // declarator chunks at all. if (auto *DT = T->getAs()) { const AutoType *AT = T->getAs(); bool IsClassTemplateDeduction = isa(DT); if ((AT && AT->isDecltypeAuto()) || IsClassTemplateDeduction) { for (unsigned I = 0, E = D.getNumTypeObjects(); I != E; ++I) { unsigned Index = E - I - 1; DeclaratorChunk &DeclChunk = D.getTypeObject(Index); unsigned DiagId = IsClassTemplateDeduction ? diag::err_deduced_class_template_compound_type : diag::err_decltype_auto_compound_type; unsigned DiagKind = 0; switch (DeclChunk.Kind) { case DeclaratorChunk::Paren: // FIXME: Rejecting this is a little silly. if (IsClassTemplateDeduction) { DiagKind = 4; break; } continue; case DeclaratorChunk::Function: { if (IsClassTemplateDeduction) { DiagKind = 3; break; } unsigned FnIndex; if (D.isFunctionDeclarationContext() && D.isFunctionDeclarator(FnIndex) && FnIndex == Index) continue; DiagId = diag::err_decltype_auto_function_declarator_not_declaration; break; } case DeclaratorChunk::Pointer: case DeclaratorChunk::BlockPointer: case DeclaratorChunk::MemberPointer: DiagKind = 0; break; case DeclaratorChunk::Reference: DiagKind = 1; break; case DeclaratorChunk::Array: DiagKind = 2; break; case DeclaratorChunk::Pipe: break; } S.Diag(DeclChunk.Loc, DiagId) << DiagKind; D.setInvalidType(true); break; } } } // Determine whether we should infer _Nonnull on pointer types. Optional inferNullability; bool inferNullabilityCS = false; bool inferNullabilityInnerOnly = false; bool inferNullabilityInnerOnlyComplete = false; // Are we in an assume-nonnull region? bool inAssumeNonNullRegion = false; SourceLocation assumeNonNullLoc = S.PP.getPragmaAssumeNonNullLoc(); if (assumeNonNullLoc.isValid()) { inAssumeNonNullRegion = true; recordNullabilitySeen(S, assumeNonNullLoc); } // Whether to complain about missing nullability specifiers or not. enum { /// Never complain. CAMN_No, /// Complain on the inner pointers (but not the outermost /// pointer). CAMN_InnerPointers, /// Complain about any pointers that don't have nullability /// specified or inferred. CAMN_Yes } complainAboutMissingNullability = CAMN_No; unsigned NumPointersRemaining = 0; auto complainAboutInferringWithinChunk = PointerWrappingDeclaratorKind::None; if (IsTypedefName) { // For typedefs, we do not infer any nullability (the default), // and we only complain about missing nullability specifiers on // inner pointers. complainAboutMissingNullability = CAMN_InnerPointers; if (T->canHaveNullability(/*ResultIfUnknown*/false) && !T->getNullability(S.Context)) { // Note that we allow but don't require nullability on dependent types. ++NumPointersRemaining; } for (unsigned i = 0, n = D.getNumTypeObjects(); i != n; ++i) { DeclaratorChunk &chunk = D.getTypeObject(i); switch (chunk.Kind) { case DeclaratorChunk::Array: case DeclaratorChunk::Function: case DeclaratorChunk::Pipe: break; case DeclaratorChunk::BlockPointer: case DeclaratorChunk::MemberPointer: ++NumPointersRemaining; break; case DeclaratorChunk::Paren: case DeclaratorChunk::Reference: continue; case DeclaratorChunk::Pointer: ++NumPointersRemaining; continue; } } } else { bool isFunctionOrMethod = false; switch (auto context = state.getDeclarator().getContext()) { case DeclaratorContext::ObjCParameterContext: case DeclaratorContext::ObjCResultContext: case DeclaratorContext::PrototypeContext: case DeclaratorContext::TrailingReturnContext: case DeclaratorContext::TrailingReturnVarContext: isFunctionOrMethod = true; LLVM_FALLTHROUGH; case DeclaratorContext::MemberContext: if (state.getDeclarator().isObjCIvar() && !isFunctionOrMethod) { complainAboutMissingNullability = CAMN_No; break; } // Weak properties are inferred to be nullable. if (state.getDeclarator().isObjCWeakProperty() && inAssumeNonNullRegion) { inferNullability = NullabilityKind::Nullable; break; } LLVM_FALLTHROUGH; case DeclaratorContext::FileContext: case DeclaratorContext::KNRTypeListContext: { complainAboutMissingNullability = CAMN_Yes; // Nullability inference depends on the type and declarator. auto wrappingKind = PointerWrappingDeclaratorKind::None; switch (classifyPointerDeclarator(S, T, D, wrappingKind)) { case PointerDeclaratorKind::NonPointer: case PointerDeclaratorKind::MultiLevelPointer: // Cannot infer nullability. break; case PointerDeclaratorKind::SingleLevelPointer: // Infer _Nonnull if we are in an assumes-nonnull region. if (inAssumeNonNullRegion) { complainAboutInferringWithinChunk = wrappingKind; inferNullability = NullabilityKind::NonNull; inferNullabilityCS = (context == DeclaratorContext::ObjCParameterContext || context == DeclaratorContext::ObjCResultContext); } break; case PointerDeclaratorKind::CFErrorRefPointer: case PointerDeclaratorKind::NSErrorPointerPointer: // Within a function or method signature, infer _Nullable at both // levels. if (isFunctionOrMethod && inAssumeNonNullRegion) inferNullability = NullabilityKind::Nullable; break; case PointerDeclaratorKind::MaybePointerToCFRef: if (isFunctionOrMethod) { // On pointer-to-pointer parameters marked cf_returns_retained or // cf_returns_not_retained, if the outer pointer is explicit then // infer the inner pointer as _Nullable. auto hasCFReturnsAttr = [](const ParsedAttributesView &AttrList) -> bool { return AttrList.hasAttribute(ParsedAttr::AT_CFReturnsRetained) || AttrList.hasAttribute(ParsedAttr::AT_CFReturnsNotRetained); }; if (const auto *InnermostChunk = D.getInnermostNonParenChunk()) { if (hasCFReturnsAttr(D.getAttributes()) || hasCFReturnsAttr(InnermostChunk->getAttrs()) || hasCFReturnsAttr(D.getDeclSpec().getAttributes())) { inferNullability = NullabilityKind::Nullable; inferNullabilityInnerOnly = true; } } } break; } break; } case DeclaratorContext::ConversionIdContext: complainAboutMissingNullability = CAMN_Yes; break; case DeclaratorContext::AliasDeclContext: case DeclaratorContext::AliasTemplateContext: case DeclaratorContext::BlockContext: case DeclaratorContext::BlockLiteralContext: case DeclaratorContext::ConditionContext: case DeclaratorContext::CXXCatchContext: case DeclaratorContext::CXXNewContext: case DeclaratorContext::ForContext: case DeclaratorContext::InitStmtContext: case DeclaratorContext::LambdaExprContext: case DeclaratorContext::LambdaExprParameterContext: case DeclaratorContext::ObjCCatchContext: case DeclaratorContext::TemplateParamContext: case DeclaratorContext::TemplateArgContext: case DeclaratorContext::TemplateTypeArgContext: case DeclaratorContext::TypeNameContext: case DeclaratorContext::FunctionalCastContext: case DeclaratorContext::RequiresExprContext: // Don't infer in these contexts. break; } } // Local function that returns true if its argument looks like a va_list. auto isVaList = [&S](QualType T) -> bool { auto *typedefTy = T->getAs(); if (!typedefTy) return false; TypedefDecl *vaListTypedef = S.Context.getBuiltinVaListDecl(); do { if (typedefTy->getDecl() == vaListTypedef) return true; if (auto *name = typedefTy->getDecl()->getIdentifier()) if (name->isStr("va_list")) return true; typedefTy = typedefTy->desugar()->getAs(); } while (typedefTy); return false; }; // Local function that checks the nullability for a given pointer declarator. // Returns true if _Nonnull was inferred. auto inferPointerNullability = [&](SimplePointerKind pointerKind, SourceLocation pointerLoc, SourceLocation pointerEndLoc, ParsedAttributesView &attrs, AttributePool &Pool) -> ParsedAttr * { // We've seen a pointer. if (NumPointersRemaining > 0) --NumPointersRemaining; // If a nullability attribute is present, there's nothing to do. if (hasNullabilityAttr(attrs)) return nullptr; // If we're supposed to infer nullability, do so now. if (inferNullability && !inferNullabilityInnerOnlyComplete) { ParsedAttr::Syntax syntax = inferNullabilityCS ? ParsedAttr::AS_ContextSensitiveKeyword : ParsedAttr::AS_Keyword; ParsedAttr *nullabilityAttr = Pool.create( S.getNullabilityKeyword(*inferNullability), SourceRange(pointerLoc), nullptr, SourceLocation(), nullptr, 0, syntax); attrs.addAtEnd(nullabilityAttr); if (inferNullabilityCS) { state.getDeclarator().getMutableDeclSpec().getObjCQualifiers() ->setObjCDeclQualifier(ObjCDeclSpec::DQ_CSNullability); } if (pointerLoc.isValid() && complainAboutInferringWithinChunk != PointerWrappingDeclaratorKind::None) { auto Diag = S.Diag(pointerLoc, diag::warn_nullability_inferred_on_nested_type); Diag << static_cast(complainAboutInferringWithinChunk); fixItNullability(S, Diag, pointerLoc, NullabilityKind::NonNull); } if (inferNullabilityInnerOnly) inferNullabilityInnerOnlyComplete = true; return nullabilityAttr; } // If we're supposed to complain about missing nullability, do so // now if it's truly missing. switch (complainAboutMissingNullability) { case CAMN_No: break; case CAMN_InnerPointers: if (NumPointersRemaining == 0) break; LLVM_FALLTHROUGH; case CAMN_Yes: checkNullabilityConsistency(S, pointerKind, pointerLoc, pointerEndLoc); } return nullptr; }; // If the type itself could have nullability but does not, infer pointer // nullability and perform consistency checking. if (S.CodeSynthesisContexts.empty()) { if (T->canHaveNullability(/*ResultIfUnknown*/false) && !T->getNullability(S.Context)) { if (isVaList(T)) { // Record that we've seen a pointer, but do nothing else. if (NumPointersRemaining > 0) --NumPointersRemaining; } else { SimplePointerKind pointerKind = SimplePointerKind::Pointer; if (T->isBlockPointerType()) pointerKind = SimplePointerKind::BlockPointer; else if (T->isMemberPointerType()) pointerKind = SimplePointerKind::MemberPointer; if (auto *attr = inferPointerNullability( pointerKind, D.getDeclSpec().getTypeSpecTypeLoc(), D.getDeclSpec().getEndLoc(), D.getMutableDeclSpec().getAttributes(), D.getMutableDeclSpec().getAttributePool())) { T = state.getAttributedType( createNullabilityAttr(Context, *attr, *inferNullability), T, T); } } } if (complainAboutMissingNullability == CAMN_Yes && T->isArrayType() && !T->getNullability(S.Context) && !isVaList(T) && D.isPrototypeContext() && !hasOuterPointerLikeChunk(D, D.getNumTypeObjects())) { checkNullabilityConsistency(S, SimplePointerKind::Array, D.getDeclSpec().getTypeSpecTypeLoc()); } } bool ExpectNoDerefChunk = state.getCurrentAttributes().hasAttribute(ParsedAttr::AT_NoDeref); // Walk the DeclTypeInfo, building the recursive type as we go. // DeclTypeInfos are ordered from the identifier out, which is // opposite of what we want :). for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) { unsigned chunkIndex = e - i - 1; state.setCurrentChunkIndex(chunkIndex); DeclaratorChunk &DeclType = D.getTypeObject(chunkIndex); IsQualifiedFunction &= DeclType.Kind == DeclaratorChunk::Paren; switch (DeclType.Kind) { case DeclaratorChunk::Paren: if (i == 0) warnAboutRedundantParens(S, D, T); T = S.BuildParenType(T); break; case DeclaratorChunk::BlockPointer: // If blocks are disabled, emit an error. if (!LangOpts.Blocks) S.Diag(DeclType.Loc, diag::err_blocks_disable) << LangOpts.OpenCL; // Handle pointer nullability. inferPointerNullability(SimplePointerKind::BlockPointer, DeclType.Loc, DeclType.EndLoc, DeclType.getAttrs(), state.getDeclarator().getAttributePool()); T = S.BuildBlockPointerType(T, D.getIdentifierLoc(), Name); if (DeclType.Cls.TypeQuals || LangOpts.OpenCL) { // OpenCL v2.0, s6.12.5 - Block variable declarations are implicitly // qualified with const. if (LangOpts.OpenCL) DeclType.Cls.TypeQuals |= DeclSpec::TQ_const; T = S.BuildQualifiedType(T, DeclType.Loc, DeclType.Cls.TypeQuals); } break; case DeclaratorChunk::Pointer: // Verify that we're not building a pointer to pointer to function with // exception specification. if (LangOpts.CPlusPlus && S.CheckDistantExceptionSpec(T)) { S.Diag(D.getIdentifierLoc(), diag::err_distant_exception_spec); D.setInvalidType(true); // Build the type anyway. } // Handle pointer nullability inferPointerNullability(SimplePointerKind::Pointer, DeclType.Loc, DeclType.EndLoc, DeclType.getAttrs(), state.getDeclarator().getAttributePool()); if (LangOpts.ObjC && T->getAs()) { T = Context.getObjCObjectPointerType(T); if (DeclType.Ptr.TypeQuals) T = S.BuildQualifiedType(T, DeclType.Loc, DeclType.Ptr.TypeQuals); break; } // OpenCL v2.0 s6.9b - Pointer to image/sampler cannot be used. // OpenCL v2.0 s6.13.16.1 - Pointer to pipe cannot be used. // OpenCL v2.0 s6.12.5 - Pointers to Blocks are not allowed. if (LangOpts.OpenCL) { if (T->isImageType() || T->isSamplerT() || T->isPipeType() || T->isBlockPointerType()) { S.Diag(D.getIdentifierLoc(), diag::err_opencl_pointer_to_type) << T; D.setInvalidType(true); } } T = S.BuildPointerType(T, DeclType.Loc, Name); if (DeclType.Ptr.TypeQuals) T = S.BuildQualifiedType(T, DeclType.Loc, DeclType.Ptr.TypeQuals); break; case DeclaratorChunk::Reference: { // Verify that we're not building a reference to pointer to function with // exception specification. if (LangOpts.CPlusPlus && S.CheckDistantExceptionSpec(T)) { S.Diag(D.getIdentifierLoc(), diag::err_distant_exception_spec); D.setInvalidType(true); // Build the type anyway. } T = S.BuildReferenceType(T, DeclType.Ref.LValueRef, DeclType.Loc, Name); if (DeclType.Ref.HasRestrict) T = S.BuildQualifiedType(T, DeclType.Loc, Qualifiers::Restrict); break; } case DeclaratorChunk::Array: { // Verify that we're not building an array of pointers to function with // exception specification. if (LangOpts.CPlusPlus && S.CheckDistantExceptionSpec(T)) { S.Diag(D.getIdentifierLoc(), diag::err_distant_exception_spec); D.setInvalidType(true); // Build the type anyway. } DeclaratorChunk::ArrayTypeInfo &ATI = DeclType.Arr; Expr *ArraySize = static_cast(ATI.NumElts); ArrayType::ArraySizeModifier ASM; if (ATI.isStar) ASM = ArrayType::Star; else if (ATI.hasStatic) ASM = ArrayType::Static; else ASM = ArrayType::Normal; if (ASM == ArrayType::Star && !D.isPrototypeContext()) { // FIXME: This check isn't quite right: it allows star in prototypes // for function definitions, and disallows some edge cases detailed // in http://gcc.gnu.org/ml/gcc-patches/2009-02/msg00133.html S.Diag(DeclType.Loc, diag::err_array_star_outside_prototype); ASM = ArrayType::Normal; D.setInvalidType(true); } // C99 6.7.5.2p1: The optional type qualifiers and the keyword static // shall appear only in a declaration of a function parameter with an // array type, ... if (ASM == ArrayType::Static || ATI.TypeQuals) { if (!(D.isPrototypeContext() || D.getContext() == DeclaratorContext::KNRTypeListContext)) { S.Diag(DeclType.Loc, diag::err_array_static_outside_prototype) << (ASM == ArrayType::Static ? "'static'" : "type qualifier"); // Remove the 'static' and the type qualifiers. if (ASM == ArrayType::Static) ASM = ArrayType::Normal; ATI.TypeQuals = 0; D.setInvalidType(true); } // C99 6.7.5.2p1: ... and then only in the outermost array type // derivation. if (hasOuterPointerLikeChunk(D, chunkIndex)) { S.Diag(DeclType.Loc, diag::err_array_static_not_outermost) << (ASM == ArrayType::Static ? "'static'" : "type qualifier"); if (ASM == ArrayType::Static) ASM = ArrayType::Normal; ATI.TypeQuals = 0; D.setInvalidType(true); } } const AutoType *AT = T->getContainedAutoType(); // Allow arrays of auto if we are a generic lambda parameter. // i.e. [](auto (&array)[5]) { return array[0]; }; OK if (AT && D.getContext() != DeclaratorContext::LambdaExprParameterContext) { // We've already diagnosed this for decltype(auto). if (!AT->isDecltypeAuto()) S.Diag(DeclType.Loc, diag::err_illegal_decl_array_of_auto) << getPrintableNameForEntity(Name) << T; T = QualType(); break; } // Array parameters can be marked nullable as well, although it's not // necessary if they're marked 'static'. if (complainAboutMissingNullability == CAMN_Yes && !hasNullabilityAttr(DeclType.getAttrs()) && ASM != ArrayType::Static && D.isPrototypeContext() && !hasOuterPointerLikeChunk(D, chunkIndex)) { checkNullabilityConsistency(S, SimplePointerKind::Array, DeclType.Loc); } T = S.BuildArrayType(T, ASM, ArraySize, ATI.TypeQuals, SourceRange(DeclType.Loc, DeclType.EndLoc), Name); break; } case DeclaratorChunk::Function: { // If the function declarator has a prototype (i.e. it is not () and // does not have a K&R-style identifier list), then the arguments are part // of the type, otherwise the argument list is (). DeclaratorChunk::FunctionTypeInfo &FTI = DeclType.Fun; IsQualifiedFunction = FTI.hasMethodTypeQualifiers() || FTI.hasRefQualifier(); // Check for auto functions and trailing return type and adjust the // return type accordingly. if (!D.isInvalidType()) { // trailing-return-type is only required if we're declaring a function, // and not, for instance, a pointer to a function. if (D.getDeclSpec().hasAutoTypeSpec() && !FTI.hasTrailingReturnType() && chunkIndex == 0) { if (!S.getLangOpts().CPlusPlus14) { S.Diag(D.getDeclSpec().getTypeSpecTypeLoc(), D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto ? diag::err_auto_missing_trailing_return : diag::err_deduced_return_type); T = Context.IntTy; D.setInvalidType(true); } else { S.Diag(D.getDeclSpec().getTypeSpecTypeLoc(), diag::warn_cxx11_compat_deduced_return_type); } } else if (FTI.hasTrailingReturnType()) { // T must be exactly 'auto' at this point. See CWG issue 681. if (isa(T)) { S.Diag(D.getBeginLoc(), diag::err_trailing_return_in_parens) << T << D.getSourceRange(); D.setInvalidType(true); } else if (D.getName().getKind() == UnqualifiedIdKind::IK_DeductionGuideName) { if (T != Context.DependentTy) { S.Diag(D.getDeclSpec().getBeginLoc(), diag::err_deduction_guide_with_complex_decl) << D.getSourceRange(); D.setInvalidType(true); } } else if (D.getContext() != DeclaratorContext::LambdaExprContext && (T.hasQualifiers() || !isa(T) || cast(T)->getKeyword() != AutoTypeKeyword::Auto || cast(T)->isConstrained())) { S.Diag(D.getDeclSpec().getTypeSpecTypeLoc(), diag::err_trailing_return_without_auto) << T << D.getDeclSpec().getSourceRange(); D.setInvalidType(true); } T = S.GetTypeFromParser(FTI.getTrailingReturnType(), &TInfo); if (T.isNull()) { // An error occurred parsing the trailing return type. T = Context.IntTy; D.setInvalidType(true); - } else if (S.getLangOpts().CPlusPlus20) - // Handle cases like: `auto f() -> auto` or `auto f() -> C auto`. - if (AutoType *Auto = T->getContainedAutoType()) - if (S.getCurScope()->isFunctionDeclarationScope()) - T = InventTemplateParameter(state, T, TInfo, Auto, - S.InventedParameterInfos.back()); + } else if (AutoType *Auto = T->getContainedAutoType()) { + // If the trailing return type contains an `auto`, we may need to + // invent a template parameter for it, for cases like + // `auto f() -> C auto` or `[](auto (*p) -> auto) {}`. + InventedTemplateParameterInfo *InventedParamInfo = nullptr; + if (D.getContext() == DeclaratorContext::PrototypeContext) + InventedParamInfo = &S.InventedParameterInfos.back(); + else if (D.getContext() == + DeclaratorContext::LambdaExprParameterContext) + InventedParamInfo = S.getCurLambda(); + if (InventedParamInfo) { + std::tie(T, TInfo) = InventTemplateParameter( + state, T, TInfo, Auto, *InventedParamInfo); + } + } } else { // This function type is not the type of the entity being declared, // so checking the 'auto' is not the responsibility of this chunk. } } // C99 6.7.5.3p1: The return type may not be a function or array type. // For conversion functions, we'll diagnose this particular error later. if (!D.isInvalidType() && (T->isArrayType() || T->isFunctionType()) && (D.getName().getKind() != UnqualifiedIdKind::IK_ConversionFunctionId)) { unsigned diagID = diag::err_func_returning_array_function; // Last processing chunk in block context means this function chunk // represents the block. if (chunkIndex == 0 && D.getContext() == DeclaratorContext::BlockLiteralContext) diagID = diag::err_block_returning_array_function; S.Diag(DeclType.Loc, diagID) << T->isFunctionType() << T; T = Context.IntTy; D.setInvalidType(true); } // Do not allow returning half FP value. // FIXME: This really should be in BuildFunctionType. if (T->isHalfType()) { if (S.getLangOpts().OpenCL) { if (!S.getOpenCLOptions().isEnabled("cl_khr_fp16")) { S.Diag(D.getIdentifierLoc(), diag::err_opencl_invalid_return) << T << 0 /*pointer hint*/; D.setInvalidType(true); } } else if (!S.getLangOpts().HalfArgsAndReturns) { S.Diag(D.getIdentifierLoc(), diag::err_parameters_retval_cannot_have_fp16_type) << 1; D.setInvalidType(true); } } if (LangOpts.OpenCL) { // OpenCL v2.0 s6.12.5 - A block cannot be the return value of a // function. if (T->isBlockPointerType() || T->isImageType() || T->isSamplerT() || T->isPipeType()) { S.Diag(D.getIdentifierLoc(), diag::err_opencl_invalid_return) << T << 1 /*hint off*/; D.setInvalidType(true); } // OpenCL doesn't support variadic functions and blocks // (s6.9.e and s6.12.5 OpenCL v2.0) except for printf. // We also allow here any toolchain reserved identifiers. if (FTI.isVariadic && !(D.getIdentifier() && ((D.getIdentifier()->getName() == "printf" && (LangOpts.OpenCLCPlusPlus || LangOpts.OpenCLVersion >= 120)) || D.getIdentifier()->getName().startswith("__")))) { S.Diag(D.getIdentifierLoc(), diag::err_opencl_variadic_function); D.setInvalidType(true); } } // Methods cannot return interface types. All ObjC objects are // passed by reference. if (T->isObjCObjectType()) { SourceLocation DiagLoc, FixitLoc; if (TInfo) { DiagLoc = TInfo->getTypeLoc().getBeginLoc(); FixitLoc = S.getLocForEndOfToken(TInfo->getTypeLoc().getEndLoc()); } else { DiagLoc = D.getDeclSpec().getTypeSpecTypeLoc(); FixitLoc = S.getLocForEndOfToken(D.getDeclSpec().getEndLoc()); } S.Diag(DiagLoc, diag::err_object_cannot_be_passed_returned_by_value) << 0 << T << FixItHint::CreateInsertion(FixitLoc, "*"); T = Context.getObjCObjectPointerType(T); if (TInfo) { TypeLocBuilder TLB; TLB.pushFullCopy(TInfo->getTypeLoc()); ObjCObjectPointerTypeLoc TLoc = TLB.push(T); TLoc.setStarLoc(FixitLoc); TInfo = TLB.getTypeSourceInfo(Context, T); } D.setInvalidType(true); } // cv-qualifiers on return types are pointless except when the type is a // class type in C++. if ((T.getCVRQualifiers() || T->isAtomicType()) && !(S.getLangOpts().CPlusPlus && (T->isDependentType() || T->isRecordType()))) { if (T->isVoidType() && !S.getLangOpts().CPlusPlus && D.getFunctionDefinitionKind() == FDK_Definition) { // [6.9.1/3] qualified void return is invalid on a C // function definition. Apparently ok on declarations and // in C++ though (!) S.Diag(DeclType.Loc, diag::err_func_returning_qualified_void) << T; } else diagnoseRedundantReturnTypeQualifiers(S, T, D, chunkIndex); // C++2a [dcl.fct]p12: // A volatile-qualified return type is deprecated if (T.isVolatileQualified() && S.getLangOpts().CPlusPlus20) S.Diag(DeclType.Loc, diag::warn_deprecated_volatile_return) << T; } // Objective-C ARC ownership qualifiers are ignored on the function // return type (by type canonicalization). Complain if this attribute // was written here. if (T.getQualifiers().hasObjCLifetime()) { SourceLocation AttrLoc; if (chunkIndex + 1 < D.getNumTypeObjects()) { DeclaratorChunk ReturnTypeChunk = D.getTypeObject(chunkIndex + 1); for (const ParsedAttr &AL : ReturnTypeChunk.getAttrs()) { if (AL.getKind() == ParsedAttr::AT_ObjCOwnership) { AttrLoc = AL.getLoc(); break; } } } if (AttrLoc.isInvalid()) { for (const ParsedAttr &AL : D.getDeclSpec().getAttributes()) { if (AL.getKind() == ParsedAttr::AT_ObjCOwnership) { AttrLoc = AL.getLoc(); break; } } } if (AttrLoc.isValid()) { // The ownership attributes are almost always written via // the predefined // __strong/__weak/__autoreleasing/__unsafe_unretained. if (AttrLoc.isMacroID()) AttrLoc = S.SourceMgr.getImmediateExpansionRange(AttrLoc).getBegin(); S.Diag(AttrLoc, diag::warn_arc_lifetime_result_type) << T.getQualifiers().getObjCLifetime(); } } if (LangOpts.CPlusPlus && D.getDeclSpec().hasTagDefinition()) { // C++ [dcl.fct]p6: // Types shall not be defined in return or parameter types. TagDecl *Tag = cast(D.getDeclSpec().getRepAsDecl()); S.Diag(Tag->getLocation(), diag::err_type_defined_in_result_type) << Context.getTypeDeclType(Tag); } // Exception specs are not allowed in typedefs. Complain, but add it // anyway. if (IsTypedefName && FTI.getExceptionSpecType() && !LangOpts.CPlusPlus17) S.Diag(FTI.getExceptionSpecLocBeg(), diag::err_exception_spec_in_typedef) << (D.getContext() == DeclaratorContext::AliasDeclContext || D.getContext() == DeclaratorContext::AliasTemplateContext); // If we see "T var();" or "T var(T());" at block scope, it is probably // an attempt to initialize a variable, not a function declaration. if (FTI.isAmbiguous) warnAboutAmbiguousFunction(S, D, DeclType, T); FunctionType::ExtInfo EI( getCCForDeclaratorChunk(S, D, DeclType.getAttrs(), FTI, chunkIndex)); if (!FTI.NumParams && !FTI.isVariadic && !LangOpts.CPlusPlus && !LangOpts.OpenCL) { // Simple void foo(), where the incoming T is the result type. T = Context.getFunctionNoProtoType(T, EI); } else { // We allow a zero-parameter variadic function in C if the // function is marked with the "overloadable" attribute. Scan // for this attribute now. if (!FTI.NumParams && FTI.isVariadic && !LangOpts.CPlusPlus) if (!D.getAttributes().hasAttribute(ParsedAttr::AT_Overloadable)) S.Diag(FTI.getEllipsisLoc(), diag::err_ellipsis_first_param); if (FTI.NumParams && FTI.Params[0].Param == nullptr) { // C99 6.7.5.3p3: Reject int(x,y,z) when it's not a function // definition. S.Diag(FTI.Params[0].IdentLoc, diag::err_ident_list_in_fn_declaration); D.setInvalidType(true); // Recover by creating a K&R-style function type. T = Context.getFunctionNoProtoType(T, EI); break; } FunctionProtoType::ExtProtoInfo EPI; EPI.ExtInfo = EI; EPI.Variadic = FTI.isVariadic; EPI.EllipsisLoc = FTI.getEllipsisLoc(); EPI.HasTrailingReturn = FTI.hasTrailingReturnType(); EPI.TypeQuals.addCVRUQualifiers( FTI.MethodQualifiers ? FTI.MethodQualifiers->getTypeQualifiers() : 0); EPI.RefQualifier = !FTI.hasRefQualifier()? RQ_None : FTI.RefQualifierIsLValueRef? RQ_LValue : RQ_RValue; // Otherwise, we have a function with a parameter list that is // potentially variadic. SmallVector ParamTys; ParamTys.reserve(FTI.NumParams); SmallVector ExtParameterInfos(FTI.NumParams); bool HasAnyInterestingExtParameterInfos = false; for (unsigned i = 0, e = FTI.NumParams; i != e; ++i) { ParmVarDecl *Param = cast(FTI.Params[i].Param); QualType ParamTy = Param->getType(); assert(!ParamTy.isNull() && "Couldn't parse type?"); // Look for 'void'. void is allowed only as a single parameter to a // function with no other parameters (C99 6.7.5.3p10). We record // int(void) as a FunctionProtoType with an empty parameter list. if (ParamTy->isVoidType()) { // If this is something like 'float(int, void)', reject it. 'void' // is an incomplete type (C99 6.2.5p19) and function decls cannot // have parameters of incomplete type. if (FTI.NumParams != 1 || FTI.isVariadic) { S.Diag(DeclType.Loc, diag::err_void_only_param); ParamTy = Context.IntTy; Param->setType(ParamTy); } else if (FTI.Params[i].Ident) { // Reject, but continue to parse 'int(void abc)'. S.Diag(FTI.Params[i].IdentLoc, diag::err_param_with_void_type); ParamTy = Context.IntTy; Param->setType(ParamTy); } else { // Reject, but continue to parse 'float(const void)'. if (ParamTy.hasQualifiers()) S.Diag(DeclType.Loc, diag::err_void_param_qualified); // Do not add 'void' to the list. break; } } else if (ParamTy->isHalfType()) { // Disallow half FP parameters. // FIXME: This really should be in BuildFunctionType. if (S.getLangOpts().OpenCL) { if (!S.getOpenCLOptions().isEnabled("cl_khr_fp16")) { S.Diag(Param->getLocation(), diag::err_opencl_invalid_param) << ParamTy << 0; D.setInvalidType(); Param->setInvalidDecl(); } } else if (!S.getLangOpts().HalfArgsAndReturns) { S.Diag(Param->getLocation(), diag::err_parameters_retval_cannot_have_fp16_type) << 0; D.setInvalidType(); } } else if (!FTI.hasPrototype) { if (ParamTy->isPromotableIntegerType()) { ParamTy = Context.getPromotedIntegerType(ParamTy); Param->setKNRPromoted(true); } else if (const BuiltinType* BTy = ParamTy->getAs()) { if (BTy->getKind() == BuiltinType::Float) { ParamTy = Context.DoubleTy; Param->setKNRPromoted(true); } } } else if (S.getLangOpts().OpenCL && ParamTy->isBlockPointerType()) { // OpenCL 2.0 s6.12.5: A block cannot be a parameter of a function. S.Diag(Param->getLocation(), diag::err_opencl_invalid_param) << ParamTy << 1 /*hint off*/; D.setInvalidType(); } if (LangOpts.ObjCAutoRefCount && Param->hasAttr()) { ExtParameterInfos[i] = ExtParameterInfos[i].withIsConsumed(true); HasAnyInterestingExtParameterInfos = true; } if (auto attr = Param->getAttr()) { ExtParameterInfos[i] = ExtParameterInfos[i].withABI(attr->getABI()); HasAnyInterestingExtParameterInfos = true; } if (Param->hasAttr()) { ExtParameterInfos[i] = ExtParameterInfos[i].withHasPassObjectSize(); HasAnyInterestingExtParameterInfos = true; } if (Param->hasAttr()) { ExtParameterInfos[i] = ExtParameterInfos[i].withIsNoEscape(true); HasAnyInterestingExtParameterInfos = true; } ParamTys.push_back(ParamTy); } if (HasAnyInterestingExtParameterInfos) { EPI.ExtParameterInfos = ExtParameterInfos.data(); checkExtParameterInfos(S, ParamTys, EPI, [&](unsigned i) { return FTI.Params[i].Param->getLocation(); }); } SmallVector Exceptions; SmallVector DynamicExceptions; SmallVector DynamicExceptionRanges; Expr *NoexceptExpr = nullptr; if (FTI.getExceptionSpecType() == EST_Dynamic) { // FIXME: It's rather inefficient to have to split into two vectors // here. unsigned N = FTI.getNumExceptions(); DynamicExceptions.reserve(N); DynamicExceptionRanges.reserve(N); for (unsigned I = 0; I != N; ++I) { DynamicExceptions.push_back(FTI.Exceptions[I].Ty); DynamicExceptionRanges.push_back(FTI.Exceptions[I].Range); } } else if (isComputedNoexcept(FTI.getExceptionSpecType())) { NoexceptExpr = FTI.NoexceptExpr; } S.checkExceptionSpecification(D.isFunctionDeclarationContext(), FTI.getExceptionSpecType(), DynamicExceptions, DynamicExceptionRanges, NoexceptExpr, Exceptions, EPI.ExceptionSpec); // FIXME: Set address space from attrs for C++ mode here. // OpenCLCPlusPlus: A class member function has an address space. auto IsClassMember = [&]() { return (!state.getDeclarator().getCXXScopeSpec().isEmpty() && state.getDeclarator() .getCXXScopeSpec() .getScopeRep() ->getKind() == NestedNameSpecifier::TypeSpec) || state.getDeclarator().getContext() == DeclaratorContext::MemberContext || state.getDeclarator().getContext() == DeclaratorContext::LambdaExprContext; }; if (state.getSema().getLangOpts().OpenCLCPlusPlus && IsClassMember()) { LangAS ASIdx = LangAS::Default; // Take address space attr if any and mark as invalid to avoid adding // them later while creating QualType. if (FTI.MethodQualifiers) for (ParsedAttr &attr : FTI.MethodQualifiers->getAttributes()) { LangAS ASIdxNew = attr.asOpenCLLangAS(); if (DiagnoseMultipleAddrSpaceAttributes(S, ASIdx, ASIdxNew, attr.getLoc())) D.setInvalidType(true); else ASIdx = ASIdxNew; } // If a class member function's address space is not set, set it to // __generic. LangAS AS = (ASIdx == LangAS::Default ? S.getDefaultCXXMethodAddrSpace() : ASIdx); EPI.TypeQuals.addAddressSpace(AS); } T = Context.getFunctionType(T, ParamTys, EPI); } break; } case DeclaratorChunk::MemberPointer: { // The scope spec must refer to a class, or be dependent. CXXScopeSpec &SS = DeclType.Mem.Scope(); QualType ClsType; // Handle pointer nullability. inferPointerNullability(SimplePointerKind::MemberPointer, DeclType.Loc, DeclType.EndLoc, DeclType.getAttrs(), state.getDeclarator().getAttributePool()); if (SS.isInvalid()) { // Avoid emitting extra errors if we already errored on the scope. D.setInvalidType(true); } else if (S.isDependentScopeSpecifier(SS) || dyn_cast_or_null(S.computeDeclContext(SS))) { NestedNameSpecifier *NNS = SS.getScopeRep(); NestedNameSpecifier *NNSPrefix = NNS->getPrefix(); switch (NNS->getKind()) { case NestedNameSpecifier::Identifier: ClsType = Context.getDependentNameType(ETK_None, NNSPrefix, NNS->getAsIdentifier()); break; case NestedNameSpecifier::Namespace: case NestedNameSpecifier::NamespaceAlias: case NestedNameSpecifier::Global: case NestedNameSpecifier::Super: llvm_unreachable("Nested-name-specifier must name a type"); case NestedNameSpecifier::TypeSpec: case NestedNameSpecifier::TypeSpecWithTemplate: ClsType = QualType(NNS->getAsType(), 0); // Note: if the NNS has a prefix and ClsType is a nondependent // TemplateSpecializationType, then the NNS prefix is NOT included // in ClsType; hence we wrap ClsType into an ElaboratedType. // NOTE: in particular, no wrap occurs if ClsType already is an // Elaborated, DependentName, or DependentTemplateSpecialization. if (NNSPrefix && isa(NNS->getAsType())) ClsType = Context.getElaboratedType(ETK_None, NNSPrefix, ClsType); break; } } else { S.Diag(DeclType.Mem.Scope().getBeginLoc(), diag::err_illegal_decl_mempointer_in_nonclass) << (D.getIdentifier() ? D.getIdentifier()->getName() : "type name") << DeclType.Mem.Scope().getRange(); D.setInvalidType(true); } if (!ClsType.isNull()) T = S.BuildMemberPointerType(T, ClsType, DeclType.Loc, D.getIdentifier()); if (T.isNull()) { T = Context.IntTy; D.setInvalidType(true); } else if (DeclType.Mem.TypeQuals) { T = S.BuildQualifiedType(T, DeclType.Loc, DeclType.Mem.TypeQuals); } break; } case DeclaratorChunk::Pipe: { T = S.BuildReadPipeType(T, DeclType.Loc); processTypeAttrs(state, T, TAL_DeclSpec, D.getMutableDeclSpec().getAttributes()); break; } } if (T.isNull()) { D.setInvalidType(true); T = Context.IntTy; } // See if there are any attributes on this declarator chunk. processTypeAttrs(state, T, TAL_DeclChunk, DeclType.getAttrs()); if (DeclType.Kind != DeclaratorChunk::Paren) { if (ExpectNoDerefChunk && !IsNoDerefableChunk(DeclType)) S.Diag(DeclType.Loc, diag::warn_noderef_on_non_pointer_or_array); ExpectNoDerefChunk = state.didParseNoDeref(); } } if (ExpectNoDerefChunk) S.Diag(state.getDeclarator().getBeginLoc(), diag::warn_noderef_on_non_pointer_or_array); // GNU warning -Wstrict-prototypes // Warn if a function declaration is without a prototype. // This warning is issued for all kinds of unprototyped function // declarations (i.e. function type typedef, function pointer etc.) // C99 6.7.5.3p14: // The empty list in a function declarator that is not part of a definition // of that function specifies that no information about the number or types // of the parameters is supplied. if (!LangOpts.CPlusPlus && D.getFunctionDefinitionKind() == FDK_Declaration) { bool IsBlock = false; for (const DeclaratorChunk &DeclType : D.type_objects()) { switch (DeclType.Kind) { case DeclaratorChunk::BlockPointer: IsBlock = true; break; case DeclaratorChunk::Function: { const DeclaratorChunk::FunctionTypeInfo &FTI = DeclType.Fun; // We supress the warning when there's no LParen location, as this // indicates the declaration was an implicit declaration, which gets // warned about separately via -Wimplicit-function-declaration. if (FTI.NumParams == 0 && !FTI.isVariadic && FTI.getLParenLoc().isValid()) S.Diag(DeclType.Loc, diag::warn_strict_prototypes) << IsBlock << FixItHint::CreateInsertion(FTI.getRParenLoc(), "void"); IsBlock = false; break; } default: break; } } } assert(!T.isNull() && "T must not be null after this point"); if (LangOpts.CPlusPlus && T->isFunctionType()) { const FunctionProtoType *FnTy = T->getAs(); assert(FnTy && "Why oh why is there not a FunctionProtoType here?"); // C++ 8.3.5p4: // A cv-qualifier-seq shall only be part of the function type // for a nonstatic member function, the function type to which a pointer // to member refers, or the top-level function type of a function typedef // declaration. // // Core issue 547 also allows cv-qualifiers on function types that are // top-level template type arguments. enum { NonMember, Member, DeductionGuide } Kind = NonMember; if (D.getName().getKind() == UnqualifiedIdKind::IK_DeductionGuideName) Kind = DeductionGuide; else if (!D.getCXXScopeSpec().isSet()) { if ((D.getContext() == DeclaratorContext::MemberContext || D.getContext() == DeclaratorContext::LambdaExprContext) && !D.getDeclSpec().isFriendSpecified()) Kind = Member; } else { DeclContext *DC = S.computeDeclContext(D.getCXXScopeSpec()); if (!DC || DC->isRecord()) Kind = Member; } // C++11 [dcl.fct]p6 (w/DR1417): // An attempt to specify a function type with a cv-qualifier-seq or a // ref-qualifier (including by typedef-name) is ill-formed unless it is: // - the function type for a non-static member function, // - the function type to which a pointer to member refers, // - the top-level function type of a function typedef declaration or // alias-declaration, // - the type-id in the default argument of a type-parameter, or // - the type-id of a template-argument for a type-parameter // // FIXME: Checking this here is insufficient. We accept-invalid on: // // template struct S { void f(T); }; // S s; // // ... for instance. if (IsQualifiedFunction && !(Kind == Member && D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_static) && !IsTypedefName && D.getContext() != DeclaratorContext::TemplateArgContext && D.getContext() != DeclaratorContext::TemplateTypeArgContext) { SourceLocation Loc = D.getBeginLoc(); SourceRange RemovalRange; unsigned I; if (D.isFunctionDeclarator(I)) { SmallVector RemovalLocs; const DeclaratorChunk &Chunk = D.getTypeObject(I); assert(Chunk.Kind == DeclaratorChunk::Function); if (Chunk.Fun.hasRefQualifier()) RemovalLocs.push_back(Chunk.Fun.getRefQualifierLoc()); if (Chunk.Fun.hasMethodTypeQualifiers()) Chunk.Fun.MethodQualifiers->forEachQualifier( [&](DeclSpec::TQ TypeQual, StringRef QualName, SourceLocation SL) { RemovalLocs.push_back(SL); }); if (!RemovalLocs.empty()) { llvm::sort(RemovalLocs, BeforeThanCompare(S.getSourceManager())); RemovalRange = SourceRange(RemovalLocs.front(), RemovalLocs.back()); Loc = RemovalLocs.front(); } } S.Diag(Loc, diag::err_invalid_qualified_function_type) << Kind << D.isFunctionDeclarator() << T << getFunctionQualifiersAsString(FnTy) << FixItHint::CreateRemoval(RemovalRange); // Strip the cv-qualifiers and ref-qualifiers from the type. FunctionProtoType::ExtProtoInfo EPI = FnTy->getExtProtoInfo(); EPI.TypeQuals.removeCVRQualifiers(); EPI.RefQualifier = RQ_None; T = Context.getFunctionType(FnTy->getReturnType(), FnTy->getParamTypes(), EPI); // Rebuild any parens around the identifier in the function type. for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) { if (D.getTypeObject(i).Kind != DeclaratorChunk::Paren) break; T = S.BuildParenType(T); } } } // Apply any undistributed attributes from the declarator. processTypeAttrs(state, T, TAL_DeclName, D.getAttributes()); // Diagnose any ignored type attributes. state.diagnoseIgnoredTypeAttrs(T); // C++0x [dcl.constexpr]p9: // A constexpr specifier used in an object declaration declares the object // as const. if (D.getDeclSpec().getConstexprSpecifier() == CSK_constexpr && T->isObjectType()) T.addConst(); // C++2a [dcl.fct]p4: // A parameter with volatile-qualified type is deprecated if (T.isVolatileQualified() && S.getLangOpts().CPlusPlus20 && (D.getContext() == DeclaratorContext::PrototypeContext || D.getContext() == DeclaratorContext::LambdaExprParameterContext)) S.Diag(D.getIdentifierLoc(), diag::warn_deprecated_volatile_param) << T; // If there was an ellipsis in the declarator, the declaration declares a // parameter pack whose type may be a pack expansion type. if (D.hasEllipsis()) { // C++0x [dcl.fct]p13: // A declarator-id or abstract-declarator containing an ellipsis shall // only be used in a parameter-declaration. Such a parameter-declaration // is a parameter pack (14.5.3). [...] switch (D.getContext()) { case DeclaratorContext::PrototypeContext: case DeclaratorContext::LambdaExprParameterContext: case DeclaratorContext::RequiresExprContext: // C++0x [dcl.fct]p13: // [...] When it is part of a parameter-declaration-clause, the // parameter pack is a function parameter pack (14.5.3). The type T // of the declarator-id of the function parameter pack shall contain // a template parameter pack; each template parameter pack in T is // expanded by the function parameter pack. // // We represent function parameter packs as function parameters whose // type is a pack expansion. if (!T->containsUnexpandedParameterPack() && (!LangOpts.CPlusPlus20 || !T->getContainedAutoType())) { S.Diag(D.getEllipsisLoc(), diag::err_function_parameter_pack_without_parameter_packs) << T << D.getSourceRange(); D.setEllipsisLoc(SourceLocation()); } else { - T = Context.getPackExpansionType(T, None); + T = Context.getPackExpansionType(T, None, /*ExpectPackInType=*/false); } break; case DeclaratorContext::TemplateParamContext: // C++0x [temp.param]p15: // If a template-parameter is a [...] is a parameter-declaration that // declares a parameter pack (8.3.5), then the template-parameter is a // template parameter pack (14.5.3). // // Note: core issue 778 clarifies that, if there are any unexpanded // parameter packs in the type of the non-type template parameter, then // it expands those parameter packs. if (T->containsUnexpandedParameterPack()) T = Context.getPackExpansionType(T, None); else S.Diag(D.getEllipsisLoc(), LangOpts.CPlusPlus11 ? diag::warn_cxx98_compat_variadic_templates : diag::ext_variadic_templates); break; case DeclaratorContext::FileContext: case DeclaratorContext::KNRTypeListContext: case DeclaratorContext::ObjCParameterContext: // FIXME: special diagnostic // here? case DeclaratorContext::ObjCResultContext: // FIXME: special diagnostic // here? case DeclaratorContext::TypeNameContext: case DeclaratorContext::FunctionalCastContext: case DeclaratorContext::CXXNewContext: case DeclaratorContext::AliasDeclContext: case DeclaratorContext::AliasTemplateContext: case DeclaratorContext::MemberContext: case DeclaratorContext::BlockContext: case DeclaratorContext::ForContext: case DeclaratorContext::InitStmtContext: case DeclaratorContext::ConditionContext: case DeclaratorContext::CXXCatchContext: case DeclaratorContext::ObjCCatchContext: case DeclaratorContext::BlockLiteralContext: case DeclaratorContext::LambdaExprContext: case DeclaratorContext::ConversionIdContext: case DeclaratorContext::TrailingReturnContext: case DeclaratorContext::TrailingReturnVarContext: case DeclaratorContext::TemplateArgContext: case DeclaratorContext::TemplateTypeArgContext: // FIXME: We may want to allow parameter packs in block-literal contexts // in the future. S.Diag(D.getEllipsisLoc(), diag::err_ellipsis_in_declarator_not_parameter); D.setEllipsisLoc(SourceLocation()); break; } } assert(!T.isNull() && "T must not be null at the end of this function"); if (D.isInvalidType()) return Context.getTrivialTypeSourceInfo(T); return GetTypeSourceInfoForDeclarator(state, T, TInfo); } /// GetTypeForDeclarator - Convert the type for the specified /// declarator to Type instances. /// /// The result of this call will never be null, but the associated /// type may be a null type if there's an unrecoverable error. TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S) { // Determine the type of the declarator. Not all forms of declarator // have a type. TypeProcessingState state(*this, D); TypeSourceInfo *ReturnTypeInfo = nullptr; QualType T = GetDeclSpecTypeForDeclarator(state, ReturnTypeInfo); if (D.isPrototypeContext() && getLangOpts().ObjCAutoRefCount) inferARCWriteback(state, T); return GetFullTypeForDeclarator(state, T, ReturnTypeInfo); } static void transferARCOwnershipToDeclSpec(Sema &S, QualType &declSpecTy, Qualifiers::ObjCLifetime ownership) { if (declSpecTy->isObjCRetainableType() && declSpecTy.getObjCLifetime() == Qualifiers::OCL_None) { Qualifiers qs; qs.addObjCLifetime(ownership); declSpecTy = S.Context.getQualifiedType(declSpecTy, qs); } } static void transferARCOwnershipToDeclaratorChunk(TypeProcessingState &state, Qualifiers::ObjCLifetime ownership, unsigned chunkIndex) { Sema &S = state.getSema(); Declarator &D = state.getDeclarator(); // Look for an explicit lifetime attribute. DeclaratorChunk &chunk = D.getTypeObject(chunkIndex); if (chunk.getAttrs().hasAttribute(ParsedAttr::AT_ObjCOwnership)) return; const char *attrStr = nullptr; switch (ownership) { case Qualifiers::OCL_None: llvm_unreachable("no ownership!"); case Qualifiers::OCL_ExplicitNone: attrStr = "none"; break; case Qualifiers::OCL_Strong: attrStr = "strong"; break; case Qualifiers::OCL_Weak: attrStr = "weak"; break; case Qualifiers::OCL_Autoreleasing: attrStr = "autoreleasing"; break; } IdentifierLoc *Arg = new (S.Context) IdentifierLoc; Arg->Ident = &S.Context.Idents.get(attrStr); Arg->Loc = SourceLocation(); ArgsUnion Args(Arg); // If there wasn't one, add one (with an invalid source location // so that we don't make an AttributedType for it). ParsedAttr *attr = D.getAttributePool().create( &S.Context.Idents.get("objc_ownership"), SourceLocation(), /*scope*/ nullptr, SourceLocation(), /*args*/ &Args, 1, ParsedAttr::AS_GNU); chunk.getAttrs().addAtEnd(attr); // TODO: mark whether we did this inference? } /// Used for transferring ownership in casts resulting in l-values. static void transferARCOwnership(TypeProcessingState &state, QualType &declSpecTy, Qualifiers::ObjCLifetime ownership) { Sema &S = state.getSema(); Declarator &D = state.getDeclarator(); int inner = -1; bool hasIndirection = false; for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) { DeclaratorChunk &chunk = D.getTypeObject(i); switch (chunk.Kind) { case DeclaratorChunk::Paren: // Ignore parens. break; case DeclaratorChunk::Array: case DeclaratorChunk::Reference: case DeclaratorChunk::Pointer: if (inner != -1) hasIndirection = true; inner = i; break; case DeclaratorChunk::BlockPointer: if (inner != -1) transferARCOwnershipToDeclaratorChunk(state, ownership, i); return; case DeclaratorChunk::Function: case DeclaratorChunk::MemberPointer: case DeclaratorChunk::Pipe: return; } } if (inner == -1) return; DeclaratorChunk &chunk = D.getTypeObject(inner); if (chunk.Kind == DeclaratorChunk::Pointer) { if (declSpecTy->isObjCRetainableType()) return transferARCOwnershipToDeclSpec(S, declSpecTy, ownership); if (declSpecTy->isObjCObjectType() && hasIndirection) return transferARCOwnershipToDeclaratorChunk(state, ownership, inner); } else { assert(chunk.Kind == DeclaratorChunk::Array || chunk.Kind == DeclaratorChunk::Reference); return transferARCOwnershipToDeclSpec(S, declSpecTy, ownership); } } TypeSourceInfo *Sema::GetTypeForDeclaratorCast(Declarator &D, QualType FromTy) { TypeProcessingState state(*this, D); TypeSourceInfo *ReturnTypeInfo = nullptr; QualType declSpecTy = GetDeclSpecTypeForDeclarator(state, ReturnTypeInfo); if (getLangOpts().ObjC) { Qualifiers::ObjCLifetime ownership = Context.getInnerObjCOwnership(FromTy); if (ownership != Qualifiers::OCL_None) transferARCOwnership(state, declSpecTy, ownership); } return GetFullTypeForDeclarator(state, declSpecTy, ReturnTypeInfo); } static void fillAttributedTypeLoc(AttributedTypeLoc TL, TypeProcessingState &State) { TL.setAttr(State.takeAttrForAttributedType(TL.getTypePtr())); } namespace { class TypeSpecLocFiller : public TypeLocVisitor { Sema &SemaRef; ASTContext &Context; TypeProcessingState &State; const DeclSpec &DS; public: TypeSpecLocFiller(Sema &S, ASTContext &Context, TypeProcessingState &State, const DeclSpec &DS) : SemaRef(S), Context(Context), State(State), DS(DS) {} void VisitAttributedTypeLoc(AttributedTypeLoc TL) { Visit(TL.getModifiedLoc()); fillAttributedTypeLoc(TL, State); } void VisitMacroQualifiedTypeLoc(MacroQualifiedTypeLoc TL) { Visit(TL.getInnerLoc()); TL.setExpansionLoc( State.getExpansionLocForMacroQualifiedType(TL.getTypePtr())); } void VisitQualifiedTypeLoc(QualifiedTypeLoc TL) { Visit(TL.getUnqualifiedLoc()); } void VisitTypedefTypeLoc(TypedefTypeLoc TL) { TL.setNameLoc(DS.getTypeSpecTypeLoc()); } void VisitObjCInterfaceTypeLoc(ObjCInterfaceTypeLoc TL) { TL.setNameLoc(DS.getTypeSpecTypeLoc()); // FIXME. We should have DS.getTypeSpecTypeEndLoc(). But, it requires // addition field. What we have is good enough for dispay of location // of 'fixit' on interface name. TL.setNameEndLoc(DS.getEndLoc()); } void VisitObjCObjectTypeLoc(ObjCObjectTypeLoc TL) { TypeSourceInfo *RepTInfo = nullptr; Sema::GetTypeFromParser(DS.getRepAsType(), &RepTInfo); TL.copy(RepTInfo->getTypeLoc()); } void VisitObjCObjectPointerTypeLoc(ObjCObjectPointerTypeLoc TL) { TypeSourceInfo *RepTInfo = nullptr; Sema::GetTypeFromParser(DS.getRepAsType(), &RepTInfo); TL.copy(RepTInfo->getTypeLoc()); } void VisitTemplateSpecializationTypeLoc(TemplateSpecializationTypeLoc TL) { TypeSourceInfo *TInfo = nullptr; Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo); // If we got no declarator info from previous Sema routines, // just fill with the typespec loc. if (!TInfo) { TL.initialize(Context, DS.getTypeSpecTypeNameLoc()); return; } TypeLoc OldTL = TInfo->getTypeLoc(); if (TInfo->getType()->getAs()) { ElaboratedTypeLoc ElabTL = OldTL.castAs(); TemplateSpecializationTypeLoc NamedTL = ElabTL.getNamedTypeLoc() .castAs(); TL.copy(NamedTL); } else { TL.copy(OldTL.castAs()); assert(TL.getRAngleLoc() == OldTL.castAs().getRAngleLoc()); } } void VisitTypeOfExprTypeLoc(TypeOfExprTypeLoc TL) { assert(DS.getTypeSpecType() == DeclSpec::TST_typeofExpr); TL.setTypeofLoc(DS.getTypeSpecTypeLoc()); TL.setParensRange(DS.getTypeofParensRange()); } void VisitTypeOfTypeLoc(TypeOfTypeLoc TL) { assert(DS.getTypeSpecType() == DeclSpec::TST_typeofType); TL.setTypeofLoc(DS.getTypeSpecTypeLoc()); TL.setParensRange(DS.getTypeofParensRange()); assert(DS.getRepAsType()); TypeSourceInfo *TInfo = nullptr; Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo); TL.setUnderlyingTInfo(TInfo); } void VisitUnaryTransformTypeLoc(UnaryTransformTypeLoc TL) { // FIXME: This holds only because we only have one unary transform. assert(DS.getTypeSpecType() == DeclSpec::TST_underlyingType); TL.setKWLoc(DS.getTypeSpecTypeLoc()); TL.setParensRange(DS.getTypeofParensRange()); assert(DS.getRepAsType()); TypeSourceInfo *TInfo = nullptr; Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo); TL.setUnderlyingTInfo(TInfo); } void VisitBuiltinTypeLoc(BuiltinTypeLoc TL) { // By default, use the source location of the type specifier. TL.setBuiltinLoc(DS.getTypeSpecTypeLoc()); if (TL.needsExtraLocalData()) { // Set info for the written builtin specifiers. TL.getWrittenBuiltinSpecs() = DS.getWrittenBuiltinSpecs(); // Try to have a meaningful source location. if (TL.getWrittenSignSpec() != TSS_unspecified) TL.expandBuiltinRange(DS.getTypeSpecSignLoc()); if (TL.getWrittenWidthSpec() != TSW_unspecified) TL.expandBuiltinRange(DS.getTypeSpecWidthRange()); } } void VisitElaboratedTypeLoc(ElaboratedTypeLoc TL) { ElaboratedTypeKeyword Keyword = TypeWithKeyword::getKeywordForTypeSpec(DS.getTypeSpecType()); if (DS.getTypeSpecType() == TST_typename) { TypeSourceInfo *TInfo = nullptr; Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo); if (TInfo) { TL.copy(TInfo->getTypeLoc().castAs()); return; } } TL.setElaboratedKeywordLoc(Keyword != ETK_None ? DS.getTypeSpecTypeLoc() : SourceLocation()); const CXXScopeSpec& SS = DS.getTypeSpecScope(); TL.setQualifierLoc(SS.getWithLocInContext(Context)); Visit(TL.getNextTypeLoc().getUnqualifiedLoc()); } void VisitDependentNameTypeLoc(DependentNameTypeLoc TL) { assert(DS.getTypeSpecType() == TST_typename); TypeSourceInfo *TInfo = nullptr; Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo); assert(TInfo); TL.copy(TInfo->getTypeLoc().castAs()); } void VisitDependentTemplateSpecializationTypeLoc( DependentTemplateSpecializationTypeLoc TL) { assert(DS.getTypeSpecType() == TST_typename); TypeSourceInfo *TInfo = nullptr; Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo); assert(TInfo); TL.copy( TInfo->getTypeLoc().castAs()); } void VisitAutoTypeLoc(AutoTypeLoc TL) { assert(DS.getTypeSpecType() == TST_auto || DS.getTypeSpecType() == TST_decltype_auto || DS.getTypeSpecType() == TST_auto_type || DS.getTypeSpecType() == TST_unspecified); TL.setNameLoc(DS.getTypeSpecTypeLoc()); if (!DS.isConstrainedAuto()) return; TemplateIdAnnotation *TemplateId = DS.getRepAsTemplateId(); if (DS.getTypeSpecScope().isNotEmpty()) TL.setNestedNameSpecifierLoc( DS.getTypeSpecScope().getWithLocInContext(Context)); else TL.setNestedNameSpecifierLoc(NestedNameSpecifierLoc()); TL.setTemplateKWLoc(TemplateId->TemplateKWLoc); TL.setConceptNameLoc(TemplateId->TemplateNameLoc); TL.setFoundDecl(nullptr); TL.setLAngleLoc(TemplateId->LAngleLoc); TL.setRAngleLoc(TemplateId->RAngleLoc); if (TemplateId->NumArgs == 0) return; TemplateArgumentListInfo TemplateArgsInfo; ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(), TemplateId->NumArgs); SemaRef.translateTemplateArguments(TemplateArgsPtr, TemplateArgsInfo); for (unsigned I = 0; I < TemplateId->NumArgs; ++I) TL.setArgLocInfo(I, TemplateArgsInfo.arguments()[I].getLocInfo()); } void VisitTagTypeLoc(TagTypeLoc TL) { TL.setNameLoc(DS.getTypeSpecTypeNameLoc()); } void VisitAtomicTypeLoc(AtomicTypeLoc TL) { // An AtomicTypeLoc can come from either an _Atomic(...) type specifier // or an _Atomic qualifier. if (DS.getTypeSpecType() == DeclSpec::TST_atomic) { TL.setKWLoc(DS.getTypeSpecTypeLoc()); TL.setParensRange(DS.getTypeofParensRange()); TypeSourceInfo *TInfo = nullptr; Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo); assert(TInfo); TL.getValueLoc().initializeFullCopy(TInfo->getTypeLoc()); } else { TL.setKWLoc(DS.getAtomicSpecLoc()); // No parens, to indicate this was spelled as an _Atomic qualifier. TL.setParensRange(SourceRange()); Visit(TL.getValueLoc()); } } void VisitPipeTypeLoc(PipeTypeLoc TL) { TL.setKWLoc(DS.getTypeSpecTypeLoc()); TypeSourceInfo *TInfo = nullptr; Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo); TL.getValueLoc().initializeFullCopy(TInfo->getTypeLoc()); } void VisitExtIntTypeLoc(ExtIntTypeLoc TL) { TL.setNameLoc(DS.getTypeSpecTypeLoc()); } void VisitDependentExtIntTypeLoc(DependentExtIntTypeLoc TL) { TL.setNameLoc(DS.getTypeSpecTypeLoc()); } void VisitTypeLoc(TypeLoc TL) { // FIXME: add other typespec types and change this to an assert. TL.initialize(Context, DS.getTypeSpecTypeLoc()); } }; class DeclaratorLocFiller : public TypeLocVisitor { ASTContext &Context; TypeProcessingState &State; const DeclaratorChunk &Chunk; public: DeclaratorLocFiller(ASTContext &Context, TypeProcessingState &State, const DeclaratorChunk &Chunk) : Context(Context), State(State), Chunk(Chunk) {} void VisitQualifiedTypeLoc(QualifiedTypeLoc TL) { llvm_unreachable("qualified type locs not expected here!"); } void VisitDecayedTypeLoc(DecayedTypeLoc TL) { llvm_unreachable("decayed type locs not expected here!"); } void VisitAttributedTypeLoc(AttributedTypeLoc TL) { fillAttributedTypeLoc(TL, State); } void VisitAdjustedTypeLoc(AdjustedTypeLoc TL) { // nothing } void VisitBlockPointerTypeLoc(BlockPointerTypeLoc TL) { assert(Chunk.Kind == DeclaratorChunk::BlockPointer); TL.setCaretLoc(Chunk.Loc); } void VisitPointerTypeLoc(PointerTypeLoc TL) { assert(Chunk.Kind == DeclaratorChunk::Pointer); TL.setStarLoc(Chunk.Loc); } void VisitObjCObjectPointerTypeLoc(ObjCObjectPointerTypeLoc TL) { assert(Chunk.Kind == DeclaratorChunk::Pointer); TL.setStarLoc(Chunk.Loc); } void VisitMemberPointerTypeLoc(MemberPointerTypeLoc TL) { assert(Chunk.Kind == DeclaratorChunk::MemberPointer); const CXXScopeSpec& SS = Chunk.Mem.Scope(); NestedNameSpecifierLoc NNSLoc = SS.getWithLocInContext(Context); const Type* ClsTy = TL.getClass(); QualType ClsQT = QualType(ClsTy, 0); TypeSourceInfo *ClsTInfo = Context.CreateTypeSourceInfo(ClsQT, 0); // Now copy source location info into the type loc component. TypeLoc ClsTL = ClsTInfo->getTypeLoc(); switch (NNSLoc.getNestedNameSpecifier()->getKind()) { case NestedNameSpecifier::Identifier: assert(isa(ClsTy) && "Unexpected TypeLoc"); { DependentNameTypeLoc DNTLoc = ClsTL.castAs(); DNTLoc.setElaboratedKeywordLoc(SourceLocation()); DNTLoc.setQualifierLoc(NNSLoc.getPrefix()); DNTLoc.setNameLoc(NNSLoc.getLocalBeginLoc()); } break; case NestedNameSpecifier::TypeSpec: case NestedNameSpecifier::TypeSpecWithTemplate: if (isa(ClsTy)) { ElaboratedTypeLoc ETLoc = ClsTL.castAs(); ETLoc.setElaboratedKeywordLoc(SourceLocation()); ETLoc.setQualifierLoc(NNSLoc.getPrefix()); TypeLoc NamedTL = ETLoc.getNamedTypeLoc(); NamedTL.initializeFullCopy(NNSLoc.getTypeLoc()); } else { ClsTL.initializeFullCopy(NNSLoc.getTypeLoc()); } break; case NestedNameSpecifier::Namespace: case NestedNameSpecifier::NamespaceAlias: case NestedNameSpecifier::Global: case NestedNameSpecifier::Super: llvm_unreachable("Nested-name-specifier must name a type"); } // Finally fill in MemberPointerLocInfo fields. TL.setStarLoc(SourceLocation::getFromRawEncoding(Chunk.Mem.StarLoc)); TL.setClassTInfo(ClsTInfo); } void VisitLValueReferenceTypeLoc(LValueReferenceTypeLoc TL) { assert(Chunk.Kind == DeclaratorChunk::Reference); // 'Amp' is misleading: this might have been originally /// spelled with AmpAmp. TL.setAmpLoc(Chunk.Loc); } void VisitRValueReferenceTypeLoc(RValueReferenceTypeLoc TL) { assert(Chunk.Kind == DeclaratorChunk::Reference); assert(!Chunk.Ref.LValueRef); TL.setAmpAmpLoc(Chunk.Loc); } void VisitArrayTypeLoc(ArrayTypeLoc TL) { assert(Chunk.Kind == DeclaratorChunk::Array); TL.setLBracketLoc(Chunk.Loc); TL.setRBracketLoc(Chunk.EndLoc); TL.setSizeExpr(static_cast(Chunk.Arr.NumElts)); } void VisitFunctionTypeLoc(FunctionTypeLoc TL) { assert(Chunk.Kind == DeclaratorChunk::Function); TL.setLocalRangeBegin(Chunk.Loc); TL.setLocalRangeEnd(Chunk.EndLoc); const DeclaratorChunk::FunctionTypeInfo &FTI = Chunk.Fun; TL.setLParenLoc(FTI.getLParenLoc()); TL.setRParenLoc(FTI.getRParenLoc()); for (unsigned i = 0, e = TL.getNumParams(), tpi = 0; i != e; ++i) { ParmVarDecl *Param = cast(FTI.Params[i].Param); TL.setParam(tpi++, Param); } TL.setExceptionSpecRange(FTI.getExceptionSpecRange()); } void VisitParenTypeLoc(ParenTypeLoc TL) { assert(Chunk.Kind == DeclaratorChunk::Paren); TL.setLParenLoc(Chunk.Loc); TL.setRParenLoc(Chunk.EndLoc); } void VisitPipeTypeLoc(PipeTypeLoc TL) { assert(Chunk.Kind == DeclaratorChunk::Pipe); TL.setKWLoc(Chunk.Loc); } void VisitExtIntTypeLoc(ExtIntTypeLoc TL) { TL.setNameLoc(Chunk.Loc); } void VisitMacroQualifiedTypeLoc(MacroQualifiedTypeLoc TL) { TL.setExpansionLoc(Chunk.Loc); } void VisitTypeLoc(TypeLoc TL) { llvm_unreachable("unsupported TypeLoc kind in declarator!"); } }; } // end anonymous namespace static void fillAtomicQualLoc(AtomicTypeLoc ATL, const DeclaratorChunk &Chunk) { SourceLocation Loc; switch (Chunk.Kind) { case DeclaratorChunk::Function: case DeclaratorChunk::Array: case DeclaratorChunk::Paren: case DeclaratorChunk::Pipe: llvm_unreachable("cannot be _Atomic qualified"); case DeclaratorChunk::Pointer: Loc = SourceLocation::getFromRawEncoding(Chunk.Ptr.AtomicQualLoc); break; case DeclaratorChunk::BlockPointer: case DeclaratorChunk::Reference: case DeclaratorChunk::MemberPointer: // FIXME: Provide a source location for the _Atomic keyword. break; } ATL.setKWLoc(Loc); ATL.setParensRange(SourceRange()); } static void fillDependentAddressSpaceTypeLoc(DependentAddressSpaceTypeLoc DASTL, const ParsedAttributesView &Attrs) { for (const ParsedAttr &AL : Attrs) { if (AL.getKind() == ParsedAttr::AT_AddressSpace) { DASTL.setAttrNameLoc(AL.getLoc()); DASTL.setAttrExprOperand(AL.getArgAsExpr(0)); DASTL.setAttrOperandParensRange(SourceRange()); return; } } llvm_unreachable( "no address_space attribute found at the expected location!"); } static void fillMatrixTypeLoc(MatrixTypeLoc MTL, const ParsedAttributesView &Attrs) { for (const ParsedAttr &AL : Attrs) { if (AL.getKind() == ParsedAttr::AT_MatrixType) { MTL.setAttrNameLoc(AL.getLoc()); MTL.setAttrRowOperand(AL.getArgAsExpr(0)); MTL.setAttrColumnOperand(AL.getArgAsExpr(1)); MTL.setAttrOperandParensRange(SourceRange()); return; } } llvm_unreachable("no matrix_type attribute found at the expected location!"); } /// Create and instantiate a TypeSourceInfo with type source information. /// /// \param T QualType referring to the type as written in source code. /// /// \param ReturnTypeInfo For declarators whose return type does not show /// up in the normal place in the declaration specifiers (such as a C++ /// conversion function), this pointer will refer to a type source information /// for that return type. static TypeSourceInfo * GetTypeSourceInfoForDeclarator(TypeProcessingState &State, QualType T, TypeSourceInfo *ReturnTypeInfo) { Sema &S = State.getSema(); Declarator &D = State.getDeclarator(); TypeSourceInfo *TInfo = S.Context.CreateTypeSourceInfo(T); UnqualTypeLoc CurrTL = TInfo->getTypeLoc().getUnqualifiedLoc(); // Handle parameter packs whose type is a pack expansion. if (isa(T)) { CurrTL.castAs().setEllipsisLoc(D.getEllipsisLoc()); CurrTL = CurrTL.getNextTypeLoc().getUnqualifiedLoc(); } for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) { // An AtomicTypeLoc might be produced by an atomic qualifier in this // declarator chunk. if (AtomicTypeLoc ATL = CurrTL.getAs()) { fillAtomicQualLoc(ATL, D.getTypeObject(i)); CurrTL = ATL.getValueLoc().getUnqualifiedLoc(); } while (MacroQualifiedTypeLoc TL = CurrTL.getAs()) { TL.setExpansionLoc( State.getExpansionLocForMacroQualifiedType(TL.getTypePtr())); CurrTL = TL.getNextTypeLoc().getUnqualifiedLoc(); } while (AttributedTypeLoc TL = CurrTL.getAs()) { fillAttributedTypeLoc(TL, State); CurrTL = TL.getNextTypeLoc().getUnqualifiedLoc(); } while (DependentAddressSpaceTypeLoc TL = CurrTL.getAs()) { fillDependentAddressSpaceTypeLoc(TL, D.getTypeObject(i).getAttrs()); CurrTL = TL.getPointeeTypeLoc().getUnqualifiedLoc(); } if (MatrixTypeLoc TL = CurrTL.getAs()) fillMatrixTypeLoc(TL, D.getTypeObject(i).getAttrs()); // FIXME: Ordering here? while (AdjustedTypeLoc TL = CurrTL.getAs()) CurrTL = TL.getNextTypeLoc().getUnqualifiedLoc(); DeclaratorLocFiller(S.Context, State, D.getTypeObject(i)).Visit(CurrTL); CurrTL = CurrTL.getNextTypeLoc().getUnqualifiedLoc(); } // If we have different source information for the return type, use // that. This really only applies to C++ conversion functions. if (ReturnTypeInfo) { TypeLoc TL = ReturnTypeInfo->getTypeLoc(); assert(TL.getFullDataSize() == CurrTL.getFullDataSize()); memcpy(CurrTL.getOpaqueData(), TL.getOpaqueData(), TL.getFullDataSize()); } else { TypeSpecLocFiller(S, S.Context, State, D.getDeclSpec()).Visit(CurrTL); } return TInfo; } /// Create a LocInfoType to hold the given QualType and TypeSourceInfo. ParsedType Sema::CreateParsedType(QualType T, TypeSourceInfo *TInfo) { // FIXME: LocInfoTypes are "transient", only needed for passing to/from Parser // and Sema during declaration parsing. Try deallocating/caching them when // it's appropriate, instead of allocating them and keeping them around. LocInfoType *LocT = (LocInfoType*)BumpAlloc.Allocate(sizeof(LocInfoType), TypeAlignment); new (LocT) LocInfoType(T, TInfo); assert(LocT->getTypeClass() != T->getTypeClass() && "LocInfoType's TypeClass conflicts with an existing Type class"); return ParsedType::make(QualType(LocT, 0)); } void LocInfoType::getAsStringInternal(std::string &Str, const PrintingPolicy &Policy) const { llvm_unreachable("LocInfoType leaked into the type system; an opaque TypeTy*" " was used directly instead of getting the QualType through" " GetTypeFromParser"); } TypeResult Sema::ActOnTypeName(Scope *S, Declarator &D) { // C99 6.7.6: Type names have no identifier. This is already validated by // the parser. assert(D.getIdentifier() == nullptr && "Type name should have no identifier!"); TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S); QualType T = TInfo->getType(); if (D.isInvalidType()) return true; // Make sure there are no unused decl attributes on the declarator. // We don't want to do this for ObjC parameters because we're going // to apply them to the actual parameter declaration. // Likewise, we don't want to do this for alias declarations, because // we are actually going to build a declaration from this eventually. if (D.getContext() != DeclaratorContext::ObjCParameterContext && D.getContext() != DeclaratorContext::AliasDeclContext && D.getContext() != DeclaratorContext::AliasTemplateContext) checkUnusedDeclAttributes(D); if (getLangOpts().CPlusPlus) { // Check that there are no default arguments (C++ only). CheckExtraCXXDefaultArguments(D); } return CreateParsedType(T, TInfo); } ParsedType Sema::ActOnObjCInstanceType(SourceLocation Loc) { QualType T = Context.getObjCInstanceType(); TypeSourceInfo *TInfo = Context.getTrivialTypeSourceInfo(T, Loc); return CreateParsedType(T, TInfo); } //===----------------------------------------------------------------------===// // Type Attribute Processing //===----------------------------------------------------------------------===// /// Build an AddressSpace index from a constant expression and diagnose any /// errors related to invalid address_spaces. Returns true on successfully /// building an AddressSpace index. static bool BuildAddressSpaceIndex(Sema &S, LangAS &ASIdx, const Expr *AddrSpace, SourceLocation AttrLoc) { if (!AddrSpace->isValueDependent()) { llvm::APSInt addrSpace(32); if (!AddrSpace->isIntegerConstantExpr(addrSpace, S.Context)) { S.Diag(AttrLoc, diag::err_attribute_argument_type) << "'address_space'" << AANT_ArgumentIntegerConstant << AddrSpace->getSourceRange(); return false; } // Bounds checking. if (addrSpace.isSigned()) { if (addrSpace.isNegative()) { S.Diag(AttrLoc, diag::err_attribute_address_space_negative) << AddrSpace->getSourceRange(); return false; } addrSpace.setIsSigned(false); } llvm::APSInt max(addrSpace.getBitWidth()); max = Qualifiers::MaxAddressSpace - (unsigned)LangAS::FirstTargetAddressSpace; if (addrSpace > max) { S.Diag(AttrLoc, diag::err_attribute_address_space_too_high) << (unsigned)max.getZExtValue() << AddrSpace->getSourceRange(); return false; } ASIdx = getLangASFromTargetAS(static_cast(addrSpace.getZExtValue())); return true; } // Default value for DependentAddressSpaceTypes ASIdx = LangAS::Default; return true; } /// BuildAddressSpaceAttr - Builds a DependentAddressSpaceType if an expression /// is uninstantiated. If instantiated it will apply the appropriate address /// space to the type. This function allows dependent template variables to be /// used in conjunction with the address_space attribute QualType Sema::BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc) { if (!AddrSpace->isValueDependent()) { if (DiagnoseMultipleAddrSpaceAttributes(*this, T.getAddressSpace(), ASIdx, AttrLoc)) return QualType(); return Context.getAddrSpaceQualType(T, ASIdx); } // A check with similar intentions as checking if a type already has an // address space except for on a dependent types, basically if the // current type is already a DependentAddressSpaceType then its already // lined up to have another address space on it and we can't have // multiple address spaces on the one pointer indirection if (T->getAs()) { Diag(AttrLoc, diag::err_attribute_address_multiple_qualifiers); return QualType(); } return Context.getDependentAddressSpaceType(T, AddrSpace, AttrLoc); } QualType Sema::BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc) { LangAS ASIdx; if (!BuildAddressSpaceIndex(*this, ASIdx, AddrSpace, AttrLoc)) return QualType(); return BuildAddressSpaceAttr(T, ASIdx, AddrSpace, AttrLoc); } /// HandleAddressSpaceTypeAttribute - Process an address_space attribute on the /// specified type. The attribute contains 1 argument, the id of the address /// space for the type. static void HandleAddressSpaceTypeAttribute(QualType &Type, const ParsedAttr &Attr, TypeProcessingState &State) { Sema &S = State.getSema(); // ISO/IEC TR 18037 S5.3 (amending C99 6.7.3): "A function type shall not be // qualified by an address-space qualifier." if (Type->isFunctionType()) { S.Diag(Attr.getLoc(), diag::err_attribute_address_function_type); Attr.setInvalid(); return; } LangAS ASIdx; if (Attr.getKind() == ParsedAttr::AT_AddressSpace) { // Check the attribute arguments. if (Attr.getNumArgs() != 1) { S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << Attr << 1; Attr.setInvalid(); return; } Expr *ASArgExpr; if (Attr.isArgIdent(0)) { // Special case where the argument is a template id. CXXScopeSpec SS; SourceLocation TemplateKWLoc; UnqualifiedId id; id.setIdentifier(Attr.getArgAsIdent(0)->Ident, Attr.getLoc()); ExprResult AddrSpace = S.ActOnIdExpression( S.getCurScope(), SS, TemplateKWLoc, id, /*HasTrailingLParen=*/false, /*IsAddressOfOperand=*/false); if (AddrSpace.isInvalid()) return; ASArgExpr = static_cast(AddrSpace.get()); } else { ASArgExpr = static_cast(Attr.getArgAsExpr(0)); } LangAS ASIdx; if (!BuildAddressSpaceIndex(S, ASIdx, ASArgExpr, Attr.getLoc())) { Attr.setInvalid(); return; } ASTContext &Ctx = S.Context; auto *ASAttr = ::new (Ctx) AddressSpaceAttr(Ctx, Attr, static_cast(ASIdx)); // If the expression is not value dependent (not templated), then we can // apply the address space qualifiers just to the equivalent type. // Otherwise, we make an AttributedType with the modified and equivalent // type the same, and wrap it in a DependentAddressSpaceType. When this // dependent type is resolved, the qualifier is added to the equivalent type // later. QualType T; if (!ASArgExpr->isValueDependent()) { QualType EquivType = S.BuildAddressSpaceAttr(Type, ASIdx, ASArgExpr, Attr.getLoc()); if (EquivType.isNull()) { Attr.setInvalid(); return; } T = State.getAttributedType(ASAttr, Type, EquivType); } else { T = State.getAttributedType(ASAttr, Type, Type); T = S.BuildAddressSpaceAttr(T, ASIdx, ASArgExpr, Attr.getLoc()); } if (!T.isNull()) Type = T; else Attr.setInvalid(); } else { // The keyword-based type attributes imply which address space to use. ASIdx = Attr.asOpenCLLangAS(); if (ASIdx == LangAS::Default) llvm_unreachable("Invalid address space"); if (DiagnoseMultipleAddrSpaceAttributes(S, Type.getAddressSpace(), ASIdx, Attr.getLoc())) { Attr.setInvalid(); return; } Type = S.Context.getAddrSpaceQualType(Type, ASIdx); } } /// handleObjCOwnershipTypeAttr - Process an objc_ownership /// attribute on the specified type. /// /// Returns 'true' if the attribute was handled. static bool handleObjCOwnershipTypeAttr(TypeProcessingState &state, ParsedAttr &attr, QualType &type) { bool NonObjCPointer = false; if (!type->isDependentType() && !type->isUndeducedType()) { if (const PointerType *ptr = type->getAs()) { QualType pointee = ptr->getPointeeType(); if (pointee->isObjCRetainableType() || pointee->isPointerType()) return false; // It is important not to lose the source info that there was an attribute // applied to non-objc pointer. We will create an attributed type but // its type will be the same as the original type. NonObjCPointer = true; } else if (!type->isObjCRetainableType()) { return false; } // Don't accept an ownership attribute in the declspec if it would // just be the return type of a block pointer. if (state.isProcessingDeclSpec()) { Declarator &D = state.getDeclarator(); if (maybeMovePastReturnType(D, D.getNumTypeObjects(), /*onlyBlockPointers=*/true)) return false; } } Sema &S = state.getSema(); SourceLocation AttrLoc = attr.getLoc(); if (AttrLoc.isMacroID()) AttrLoc = S.getSourceManager().getImmediateExpansionRange(AttrLoc).getBegin(); if (!attr.isArgIdent(0)) { S.Diag(AttrLoc, diag::err_attribute_argument_type) << attr << AANT_ArgumentString; attr.setInvalid(); return true; } IdentifierInfo *II = attr.getArgAsIdent(0)->Ident; Qualifiers::ObjCLifetime lifetime; if (II->isStr("none")) lifetime = Qualifiers::OCL_ExplicitNone; else if (II->isStr("strong")) lifetime = Qualifiers::OCL_Strong; else if (II->isStr("weak")) lifetime = Qualifiers::OCL_Weak; else if (II->isStr("autoreleasing")) lifetime = Qualifiers::OCL_Autoreleasing; else { S.Diag(AttrLoc, diag::warn_attribute_type_not_supported) << attr << II; attr.setInvalid(); return true; } // Just ignore lifetime attributes other than __weak and __unsafe_unretained // outside of ARC mode. if (!S.getLangOpts().ObjCAutoRefCount && lifetime != Qualifiers::OCL_Weak && lifetime != Qualifiers::OCL_ExplicitNone) { return true; } SplitQualType underlyingType = type.split(); // Check for redundant/conflicting ownership qualifiers. if (Qualifiers::ObjCLifetime previousLifetime = type.getQualifiers().getObjCLifetime()) { // If it's written directly, that's an error. if (S.Context.hasDirectOwnershipQualifier(type)) { S.Diag(AttrLoc, diag::err_attr_objc_ownership_redundant) << type; return true; } // Otherwise, if the qualifiers actually conflict, pull sugar off // and remove the ObjCLifetime qualifiers. if (previousLifetime != lifetime) { // It's possible to have multiple local ObjCLifetime qualifiers. We // can't stop after we reach a type that is directly qualified. const Type *prevTy = nullptr; while (!prevTy || prevTy != underlyingType.Ty) { prevTy = underlyingType.Ty; underlyingType = underlyingType.getSingleStepDesugaredType(); } underlyingType.Quals.removeObjCLifetime(); } } underlyingType.Quals.addObjCLifetime(lifetime); if (NonObjCPointer) { StringRef name = attr.getAttrName()->getName(); switch (lifetime) { case Qualifiers::OCL_None: case Qualifiers::OCL_ExplicitNone: break; case Qualifiers::OCL_Strong: name = "__strong"; break; case Qualifiers::OCL_Weak: name = "__weak"; break; case Qualifiers::OCL_Autoreleasing: name = "__autoreleasing"; break; } S.Diag(AttrLoc, diag::warn_type_attribute_wrong_type) << name << TDS_ObjCObjOrBlock << type; } // Don't actually add the __unsafe_unretained qualifier in non-ARC files, // because having both 'T' and '__unsafe_unretained T' exist in the type // system causes unfortunate widespread consistency problems. (For example, // they're not considered compatible types, and we mangle them identicially // as template arguments.) These problems are all individually fixable, // but it's easier to just not add the qualifier and instead sniff it out // in specific places using isObjCInertUnsafeUnretainedType(). // // Doing this does means we miss some trivial consistency checks that // would've triggered in ARC, but that's better than trying to solve all // the coexistence problems with __unsafe_unretained. if (!S.getLangOpts().ObjCAutoRefCount && lifetime == Qualifiers::OCL_ExplicitNone) { type = state.getAttributedType( createSimpleAttr(S.Context, attr), type, type); return true; } QualType origType = type; if (!NonObjCPointer) type = S.Context.getQualifiedType(underlyingType); // If we have a valid source location for the attribute, use an // AttributedType instead. if (AttrLoc.isValid()) { type = state.getAttributedType(::new (S.Context) ObjCOwnershipAttr(S.Context, attr, II), origType, type); } auto diagnoseOrDelay = [](Sema &S, SourceLocation loc, unsigned diagnostic, QualType type) { if (S.DelayedDiagnostics.shouldDelayDiagnostics()) { S.DelayedDiagnostics.add( sema::DelayedDiagnostic::makeForbiddenType( S.getSourceManager().getExpansionLoc(loc), diagnostic, type, /*ignored*/ 0)); } else { S.Diag(loc, diagnostic); } }; // Sometimes, __weak isn't allowed. if (lifetime == Qualifiers::OCL_Weak && !S.getLangOpts().ObjCWeak && !NonObjCPointer) { // Use a specialized diagnostic if the runtime just doesn't support them. unsigned diagnostic = (S.getLangOpts().ObjCWeakRuntime ? diag::err_arc_weak_disabled : diag::err_arc_weak_no_runtime); // In any case, delay the diagnostic until we know what we're parsing. diagnoseOrDelay(S, AttrLoc, diagnostic, type); attr.setInvalid(); return true; } // Forbid __weak for class objects marked as // objc_arc_weak_reference_unavailable if (lifetime == Qualifiers::OCL_Weak) { if (const ObjCObjectPointerType *ObjT = type->getAs()) { if (ObjCInterfaceDecl *Class = ObjT->getInterfaceDecl()) { if (Class->isArcWeakrefUnavailable()) { S.Diag(AttrLoc, diag::err_arc_unsupported_weak_class); S.Diag(ObjT->getInterfaceDecl()->getLocation(), diag::note_class_declared); } } } } return true; } /// handleObjCGCTypeAttr - Process the __attribute__((objc_gc)) type /// attribute on the specified type. Returns true to indicate that /// the attribute was handled, false to indicate that the type does /// not permit the attribute. static bool handleObjCGCTypeAttr(TypeProcessingState &state, ParsedAttr &attr, QualType &type) { Sema &S = state.getSema(); // Delay if this isn't some kind of pointer. if (!type->isPointerType() && !type->isObjCObjectPointerType() && !type->isBlockPointerType()) return false; if (type.getObjCGCAttr() != Qualifiers::GCNone) { S.Diag(attr.getLoc(), diag::err_attribute_multiple_objc_gc); attr.setInvalid(); return true; } // Check the attribute arguments. if (!attr.isArgIdent(0)) { S.Diag(attr.getLoc(), diag::err_attribute_argument_type) << attr << AANT_ArgumentString; attr.setInvalid(); return true; } Qualifiers::GC GCAttr; if (attr.getNumArgs() > 1) { S.Diag(attr.getLoc(), diag::err_attribute_wrong_number_arguments) << attr << 1; attr.setInvalid(); return true; } IdentifierInfo *II = attr.getArgAsIdent(0)->Ident; if (II->isStr("weak")) GCAttr = Qualifiers::Weak; else if (II->isStr("strong")) GCAttr = Qualifiers::Strong; else { S.Diag(attr.getLoc(), diag::warn_attribute_type_not_supported) << attr << II; attr.setInvalid(); return true; } QualType origType = type; type = S.Context.getObjCGCQualType(origType, GCAttr); // Make an attributed type to preserve the source information. if (attr.getLoc().isValid()) type = state.getAttributedType( ::new (S.Context) ObjCGCAttr(S.Context, attr, II), origType, type); return true; } namespace { /// A helper class to unwrap a type down to a function for the /// purposes of applying attributes there. /// /// Use: /// FunctionTypeUnwrapper unwrapped(SemaRef, T); /// if (unwrapped.isFunctionType()) { /// const FunctionType *fn = unwrapped.get(); /// // change fn somehow /// T = unwrapped.wrap(fn); /// } struct FunctionTypeUnwrapper { enum WrapKind { Desugar, Attributed, Parens, Array, Pointer, BlockPointer, Reference, MemberPointer, MacroQualified, }; QualType Original; const FunctionType *Fn; SmallVector Stack; FunctionTypeUnwrapper(Sema &S, QualType T) : Original(T) { while (true) { const Type *Ty = T.getTypePtr(); if (isa(Ty)) { Fn = cast(Ty); return; } else if (isa(Ty)) { T = cast(Ty)->getInnerType(); Stack.push_back(Parens); } else if (isa(Ty) || isa(Ty) || isa(Ty)) { T = cast(Ty)->getElementType(); Stack.push_back(Array); } else if (isa(Ty)) { T = cast(Ty)->getPointeeType(); Stack.push_back(Pointer); } else if (isa(Ty)) { T = cast(Ty)->getPointeeType(); Stack.push_back(BlockPointer); } else if (isa(Ty)) { T = cast(Ty)->getPointeeType(); Stack.push_back(MemberPointer); } else if (isa(Ty)) { T = cast(Ty)->getPointeeType(); Stack.push_back(Reference); } else if (isa(Ty)) { T = cast(Ty)->getEquivalentType(); Stack.push_back(Attributed); } else if (isa(Ty)) { T = cast(Ty)->getUnderlyingType(); Stack.push_back(MacroQualified); } else { const Type *DTy = Ty->getUnqualifiedDesugaredType(); if (Ty == DTy) { Fn = nullptr; return; } T = QualType(DTy, 0); Stack.push_back(Desugar); } } } bool isFunctionType() const { return (Fn != nullptr); } const FunctionType *get() const { return Fn; } QualType wrap(Sema &S, const FunctionType *New) { // If T wasn't modified from the unwrapped type, do nothing. if (New == get()) return Original; Fn = New; return wrap(S.Context, Original, 0); } private: QualType wrap(ASTContext &C, QualType Old, unsigned I) { if (I == Stack.size()) return C.getQualifiedType(Fn, Old.getQualifiers()); // Build up the inner type, applying the qualifiers from the old // type to the new type. SplitQualType SplitOld = Old.split(); // As a special case, tail-recurse if there are no qualifiers. if (SplitOld.Quals.empty()) return wrap(C, SplitOld.Ty, I); return C.getQualifiedType(wrap(C, SplitOld.Ty, I), SplitOld.Quals); } QualType wrap(ASTContext &C, const Type *Old, unsigned I) { if (I == Stack.size()) return QualType(Fn, 0); switch (static_cast(Stack[I++])) { case Desugar: // This is the point at which we potentially lose source // information. return wrap(C, Old->getUnqualifiedDesugaredType(), I); case Attributed: return wrap(C, cast(Old)->getEquivalentType(), I); case Parens: { QualType New = wrap(C, cast(Old)->getInnerType(), I); return C.getParenType(New); } case MacroQualified: return wrap(C, cast(Old)->getUnderlyingType(), I); case Array: { if (const auto *CAT = dyn_cast(Old)) { QualType New = wrap(C, CAT->getElementType(), I); return C.getConstantArrayType(New, CAT->getSize(), CAT->getSizeExpr(), CAT->getSizeModifier(), CAT->getIndexTypeCVRQualifiers()); } if (const auto *VAT = dyn_cast(Old)) { QualType New = wrap(C, VAT->getElementType(), I); return C.getVariableArrayType( New, VAT->getSizeExpr(), VAT->getSizeModifier(), VAT->getIndexTypeCVRQualifiers(), VAT->getBracketsRange()); } const auto *IAT = cast(Old); QualType New = wrap(C, IAT->getElementType(), I); return C.getIncompleteArrayType(New, IAT->getSizeModifier(), IAT->getIndexTypeCVRQualifiers()); } case Pointer: { QualType New = wrap(C, cast(Old)->getPointeeType(), I); return C.getPointerType(New); } case BlockPointer: { QualType New = wrap(C, cast(Old)->getPointeeType(),I); return C.getBlockPointerType(New); } case MemberPointer: { const MemberPointerType *OldMPT = cast(Old); QualType New = wrap(C, OldMPT->getPointeeType(), I); return C.getMemberPointerType(New, OldMPT->getClass()); } case Reference: { const ReferenceType *OldRef = cast(Old); QualType New = wrap(C, OldRef->getPointeeType(), I); if (isa(OldRef)) return C.getLValueReferenceType(New, OldRef->isSpelledAsLValue()); else return C.getRValueReferenceType(New); } } llvm_unreachable("unknown wrapping kind"); } }; } // end anonymous namespace static bool handleMSPointerTypeQualifierAttr(TypeProcessingState &State, ParsedAttr &PAttr, QualType &Type) { Sema &S = State.getSema(); Attr *A; switch (PAttr.getKind()) { default: llvm_unreachable("Unknown attribute kind"); case ParsedAttr::AT_Ptr32: A = createSimpleAttr(S.Context, PAttr); break; case ParsedAttr::AT_Ptr64: A = createSimpleAttr(S.Context, PAttr); break; case ParsedAttr::AT_SPtr: A = createSimpleAttr(S.Context, PAttr); break; case ParsedAttr::AT_UPtr: A = createSimpleAttr(S.Context, PAttr); break; } llvm::SmallSet Attrs; attr::Kind NewAttrKind = A->getKind(); QualType Desugared = Type; const AttributedType *AT = dyn_cast(Type); while (AT) { Attrs.insert(AT->getAttrKind()); Desugared = AT->getModifiedType(); AT = dyn_cast(Desugared); } // You cannot specify duplicate type attributes, so if the attribute has // already been applied, flag it. if (Attrs.count(NewAttrKind)) { S.Diag(PAttr.getLoc(), diag::warn_duplicate_attribute_exact) << PAttr; return true; } Attrs.insert(NewAttrKind); // You cannot have both __sptr and __uptr on the same type, nor can you // have __ptr32 and __ptr64. if (Attrs.count(attr::Ptr32) && Attrs.count(attr::Ptr64)) { S.Diag(PAttr.getLoc(), diag::err_attributes_are_not_compatible) << "'__ptr32'" << "'__ptr64'"; return true; } else if (Attrs.count(attr::SPtr) && Attrs.count(attr::UPtr)) { S.Diag(PAttr.getLoc(), diag::err_attributes_are_not_compatible) << "'__sptr'" << "'__uptr'"; return true; } // Pointer type qualifiers can only operate on pointer types, but not // pointer-to-member types. // // FIXME: Should we really be disallowing this attribute if there is any // type sugar between it and the pointer (other than attributes)? Eg, this // disallows the attribute on a parenthesized pointer. // And if so, should we really allow *any* type attribute? if (!isa(Desugared)) { if (Type->isMemberPointerType()) S.Diag(PAttr.getLoc(), diag::err_attribute_no_member_pointers) << PAttr; else S.Diag(PAttr.getLoc(), diag::err_attribute_pointers_only) << PAttr << 0; return true; } // Add address space to type based on its attributes. LangAS ASIdx = LangAS::Default; uint64_t PtrWidth = S.Context.getTargetInfo().getPointerWidth(0); if (PtrWidth == 32) { if (Attrs.count(attr::Ptr64)) ASIdx = LangAS::ptr64; else if (Attrs.count(attr::UPtr)) ASIdx = LangAS::ptr32_uptr; } else if (PtrWidth == 64 && Attrs.count(attr::Ptr32)) { if (Attrs.count(attr::UPtr)) ASIdx = LangAS::ptr32_uptr; else ASIdx = LangAS::ptr32_sptr; } QualType Pointee = Type->getPointeeType(); if (ASIdx != LangAS::Default) Pointee = S.Context.getAddrSpaceQualType( S.Context.removeAddrSpaceQualType(Pointee), ASIdx); Type = State.getAttributedType(A, Type, S.Context.getPointerType(Pointee)); return false; } /// Map a nullability attribute kind to a nullability kind. static NullabilityKind mapNullabilityAttrKind(ParsedAttr::Kind kind) { switch (kind) { case ParsedAttr::AT_TypeNonNull: return NullabilityKind::NonNull; case ParsedAttr::AT_TypeNullable: return NullabilityKind::Nullable; case ParsedAttr::AT_TypeNullUnspecified: return NullabilityKind::Unspecified; default: llvm_unreachable("not a nullability attribute kind"); } } /// Applies a nullability type specifier to the given type, if possible. /// /// \param state The type processing state. /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param attr The attribute as written on the type. /// /// \param allowOnArrayType Whether to accept nullability specifiers on an /// array type (e.g., because it will decay to a pointer). /// /// \returns true if a problem has been diagnosed, false on success. static bool checkNullabilityTypeSpecifier(TypeProcessingState &state, QualType &type, ParsedAttr &attr, bool allowOnArrayType) { Sema &S = state.getSema(); NullabilityKind nullability = mapNullabilityAttrKind(attr.getKind()); SourceLocation nullabilityLoc = attr.getLoc(); bool isContextSensitive = attr.isContextSensitiveKeywordAttribute(); recordNullabilitySeen(S, nullabilityLoc); // Check for existing nullability attributes on the type. QualType desugared = type; while (auto attributed = dyn_cast(desugared.getTypePtr())) { // Check whether there is already a null if (auto existingNullability = attributed->getImmediateNullability()) { // Duplicated nullability. if (nullability == *existingNullability) { S.Diag(nullabilityLoc, diag::warn_nullability_duplicate) << DiagNullabilityKind(nullability, isContextSensitive) << FixItHint::CreateRemoval(nullabilityLoc); break; } // Conflicting nullability. S.Diag(nullabilityLoc, diag::err_nullability_conflicting) << DiagNullabilityKind(nullability, isContextSensitive) << DiagNullabilityKind(*existingNullability, false); return true; } desugared = attributed->getModifiedType(); } // If there is already a different nullability specifier, complain. // This (unlike the code above) looks through typedefs that might // have nullability specifiers on them, which means we cannot // provide a useful Fix-It. if (auto existingNullability = desugared->getNullability(S.Context)) { if (nullability != *existingNullability) { S.Diag(nullabilityLoc, diag::err_nullability_conflicting) << DiagNullabilityKind(nullability, isContextSensitive) << DiagNullabilityKind(*existingNullability, false); // Try to find the typedef with the existing nullability specifier. if (auto typedefType = desugared->getAs()) { TypedefNameDecl *typedefDecl = typedefType->getDecl(); QualType underlyingType = typedefDecl->getUnderlyingType(); if (auto typedefNullability = AttributedType::stripOuterNullability(underlyingType)) { if (*typedefNullability == *existingNullability) { S.Diag(typedefDecl->getLocation(), diag::note_nullability_here) << DiagNullabilityKind(*existingNullability, false); } } } return true; } } // If this definitely isn't a pointer type, reject the specifier. if (!desugared->canHaveNullability() && !(allowOnArrayType && desugared->isArrayType())) { S.Diag(nullabilityLoc, diag::err_nullability_nonpointer) << DiagNullabilityKind(nullability, isContextSensitive) << type; return true; } // For the context-sensitive keywords/Objective-C property // attributes, require that the type be a single-level pointer. if (isContextSensitive) { // Make sure that the pointee isn't itself a pointer type. const Type *pointeeType = nullptr; if (desugared->isArrayType()) pointeeType = desugared->getArrayElementTypeNoTypeQual(); else if (desugared->isAnyPointerType()) pointeeType = desugared->getPointeeType().getTypePtr(); if (pointeeType && (pointeeType->isAnyPointerType() || pointeeType->isObjCObjectPointerType() || pointeeType->isMemberPointerType())) { S.Diag(nullabilityLoc, diag::err_nullability_cs_multilevel) << DiagNullabilityKind(nullability, true) << type; S.Diag(nullabilityLoc, diag::note_nullability_type_specifier) << DiagNullabilityKind(nullability, false) << type << FixItHint::CreateReplacement(nullabilityLoc, getNullabilitySpelling(nullability)); return true; } } // Form the attributed type. type = state.getAttributedType( createNullabilityAttr(S.Context, attr, nullability), type, type); return false; } /// Check the application of the Objective-C '__kindof' qualifier to /// the given type. static bool checkObjCKindOfType(TypeProcessingState &state, QualType &type, ParsedAttr &attr) { Sema &S = state.getSema(); if (isa(type)) { // Build the attributed type to record where __kindof occurred. type = state.getAttributedType( createSimpleAttr(S.Context, attr), type, type); return false; } // Find out if it's an Objective-C object or object pointer type; const ObjCObjectPointerType *ptrType = type->getAs(); const ObjCObjectType *objType = ptrType ? ptrType->getObjectType() : type->getAs(); // If not, we can't apply __kindof. if (!objType) { // FIXME: Handle dependent types that aren't yet object types. S.Diag(attr.getLoc(), diag::err_objc_kindof_nonobject) << type; return true; } // Rebuild the "equivalent" type, which pushes __kindof down into // the object type. // There is no need to apply kindof on an unqualified id type. QualType equivType = S.Context.getObjCObjectType( objType->getBaseType(), objType->getTypeArgsAsWritten(), objType->getProtocols(), /*isKindOf=*/objType->isObjCUnqualifiedId() ? false : true); // If we started with an object pointer type, rebuild it. if (ptrType) { equivType = S.Context.getObjCObjectPointerType(equivType); if (auto nullability = type->getNullability(S.Context)) { // We create a nullability attribute from the __kindof attribute. // Make sure that will make sense. assert(attr.getAttributeSpellingListIndex() == 0 && "multiple spellings for __kindof?"); Attr *A = createNullabilityAttr(S.Context, attr, *nullability); A->setImplicit(true); equivType = state.getAttributedType(A, equivType, equivType); } } // Build the attributed type to record where __kindof occurred. type = state.getAttributedType( createSimpleAttr(S.Context, attr), type, equivType); return false; } /// Distribute a nullability type attribute that cannot be applied to /// the type specifier to a pointer, block pointer, or member pointer /// declarator, complaining if necessary. /// /// \returns true if the nullability annotation was distributed, false /// otherwise. static bool distributeNullabilityTypeAttr(TypeProcessingState &state, QualType type, ParsedAttr &attr) { Declarator &declarator = state.getDeclarator(); /// Attempt to move the attribute to the specified chunk. auto moveToChunk = [&](DeclaratorChunk &chunk, bool inFunction) -> bool { // If there is already a nullability attribute there, don't add // one. if (hasNullabilityAttr(chunk.getAttrs())) return false; // Complain about the nullability qualifier being in the wrong // place. enum { PK_Pointer, PK_BlockPointer, PK_MemberPointer, PK_FunctionPointer, PK_MemberFunctionPointer, } pointerKind = chunk.Kind == DeclaratorChunk::Pointer ? (inFunction ? PK_FunctionPointer : PK_Pointer) : chunk.Kind == DeclaratorChunk::BlockPointer ? PK_BlockPointer : inFunction? PK_MemberFunctionPointer : PK_MemberPointer; auto diag = state.getSema().Diag(attr.getLoc(), diag::warn_nullability_declspec) << DiagNullabilityKind(mapNullabilityAttrKind(attr.getKind()), attr.isContextSensitiveKeywordAttribute()) << type << static_cast(pointerKind); // FIXME: MemberPointer chunks don't carry the location of the *. if (chunk.Kind != DeclaratorChunk::MemberPointer) { diag << FixItHint::CreateRemoval(attr.getLoc()) << FixItHint::CreateInsertion( state.getSema().getPreprocessor().getLocForEndOfToken( chunk.Loc), " " + attr.getAttrName()->getName().str() + " "); } moveAttrFromListToList(attr, state.getCurrentAttributes(), chunk.getAttrs()); return true; }; // Move it to the outermost pointer, member pointer, or block // pointer declarator. for (unsigned i = state.getCurrentChunkIndex(); i != 0; --i) { DeclaratorChunk &chunk = declarator.getTypeObject(i-1); switch (chunk.Kind) { case DeclaratorChunk::Pointer: case DeclaratorChunk::BlockPointer: case DeclaratorChunk::MemberPointer: return moveToChunk(chunk, false); case DeclaratorChunk::Paren: case DeclaratorChunk::Array: continue; case DeclaratorChunk::Function: // Try to move past the return type to a function/block/member // function pointer. if (DeclaratorChunk *dest = maybeMovePastReturnType( declarator, i, /*onlyBlockPointers=*/false)) { return moveToChunk(*dest, true); } return false; // Don't walk through these. case DeclaratorChunk::Reference: case DeclaratorChunk::Pipe: return false; } } return false; } static Attr *getCCTypeAttr(ASTContext &Ctx, ParsedAttr &Attr) { assert(!Attr.isInvalid()); switch (Attr.getKind()) { default: llvm_unreachable("not a calling convention attribute"); case ParsedAttr::AT_CDecl: return createSimpleAttr(Ctx, Attr); case ParsedAttr::AT_FastCall: return createSimpleAttr(Ctx, Attr); case ParsedAttr::AT_StdCall: return createSimpleAttr(Ctx, Attr); case ParsedAttr::AT_ThisCall: return createSimpleAttr(Ctx, Attr); case ParsedAttr::AT_RegCall: return createSimpleAttr(Ctx, Attr); case ParsedAttr::AT_Pascal: return createSimpleAttr(Ctx, Attr); case ParsedAttr::AT_SwiftCall: return createSimpleAttr(Ctx, Attr); case ParsedAttr::AT_VectorCall: return createSimpleAttr(Ctx, Attr); case ParsedAttr::AT_AArch64VectorPcs: return createSimpleAttr(Ctx, Attr); case ParsedAttr::AT_Pcs: { // The attribute may have had a fixit applied where we treated an // identifier as a string literal. The contents of the string are valid, // but the form may not be. StringRef Str; if (Attr.isArgExpr(0)) Str = cast(Attr.getArgAsExpr(0))->getString(); else Str = Attr.getArgAsIdent(0)->Ident->getName(); PcsAttr::PCSType Type; if (!PcsAttr::ConvertStrToPCSType(Str, Type)) llvm_unreachable("already validated the attribute"); return ::new (Ctx) PcsAttr(Ctx, Attr, Type); } case ParsedAttr::AT_IntelOclBicc: return createSimpleAttr(Ctx, Attr); case ParsedAttr::AT_MSABI: return createSimpleAttr(Ctx, Attr); case ParsedAttr::AT_SysVABI: return createSimpleAttr(Ctx, Attr); case ParsedAttr::AT_PreserveMost: return createSimpleAttr(Ctx, Attr); case ParsedAttr::AT_PreserveAll: return createSimpleAttr(Ctx, Attr); } llvm_unreachable("unexpected attribute kind!"); } /// Process an individual function attribute. Returns true to /// indicate that the attribute was handled, false if it wasn't. static bool handleFunctionTypeAttr(TypeProcessingState &state, ParsedAttr &attr, QualType &type) { Sema &S = state.getSema(); FunctionTypeUnwrapper unwrapped(S, type); if (attr.getKind() == ParsedAttr::AT_NoReturn) { if (S.CheckAttrNoArgs(attr)) return true; // Delay if this is not a function type. if (!unwrapped.isFunctionType()) return false; // Otherwise we can process right away. FunctionType::ExtInfo EI = unwrapped.get()->getExtInfo().withNoReturn(true); type = unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI)); return true; } if (attr.getKind() == ParsedAttr::AT_CmseNSCall) { // Delay if this is not a function type. if (!unwrapped.isFunctionType()) return false; // Ignore if we don't have CMSE enabled. if (!S.getLangOpts().Cmse) { S.Diag(attr.getLoc(), diag::warn_attribute_ignored) << attr; attr.setInvalid(); return true; } // Otherwise we can process right away. FunctionType::ExtInfo EI = unwrapped.get()->getExtInfo().withCmseNSCall(true); type = unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI)); return true; } // ns_returns_retained is not always a type attribute, but if we got // here, we're treating it as one right now. if (attr.getKind() == ParsedAttr::AT_NSReturnsRetained) { if (attr.getNumArgs()) return true; // Delay if this is not a function type. if (!unwrapped.isFunctionType()) return false; // Check whether the return type is reasonable. if (S.checkNSReturnsRetainedReturnType(attr.getLoc(), unwrapped.get()->getReturnType())) return true; // Only actually change the underlying type in ARC builds. QualType origType = type; if (state.getSema().getLangOpts().ObjCAutoRefCount) { FunctionType::ExtInfo EI = unwrapped.get()->getExtInfo().withProducesResult(true); type = unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI)); } type = state.getAttributedType( createSimpleAttr(S.Context, attr), origType, type); return true; } if (attr.getKind() == ParsedAttr::AT_AnyX86NoCallerSavedRegisters) { if (S.CheckAttrTarget(attr) || S.CheckAttrNoArgs(attr)) return true; // Delay if this is not a function type. if (!unwrapped.isFunctionType()) return false; FunctionType::ExtInfo EI = unwrapped.get()->getExtInfo().withNoCallerSavedRegs(true); type = unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI)); return true; } if (attr.getKind() == ParsedAttr::AT_AnyX86NoCfCheck) { if (!S.getLangOpts().CFProtectionBranch) { S.Diag(attr.getLoc(), diag::warn_nocf_check_attribute_ignored); attr.setInvalid(); return true; } if (S.CheckAttrTarget(attr) || S.CheckAttrNoArgs(attr)) return true; // If this is not a function type, warning will be asserted by subject // check. if (!unwrapped.isFunctionType()) return true; FunctionType::ExtInfo EI = unwrapped.get()->getExtInfo().withNoCfCheck(true); type = unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI)); return true; } if (attr.getKind() == ParsedAttr::AT_Regparm) { unsigned value; if (S.CheckRegparmAttr(attr, value)) return true; // Delay if this is not a function type. if (!unwrapped.isFunctionType()) return false; // Diagnose regparm with fastcall. const FunctionType *fn = unwrapped.get(); CallingConv CC = fn->getCallConv(); if (CC == CC_X86FastCall) { S.Diag(attr.getLoc(), diag::err_attributes_are_not_compatible) << FunctionType::getNameForCallConv(CC) << "regparm"; attr.setInvalid(); return true; } FunctionType::ExtInfo EI = unwrapped.get()->getExtInfo().withRegParm(value); type = unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI)); return true; } if (attr.getKind() == ParsedAttr::AT_NoThrow) { // Delay if this is not a function type. if (!unwrapped.isFunctionType()) return false; if (S.CheckAttrNoArgs(attr)) { attr.setInvalid(); return true; } // Otherwise we can process right away. auto *Proto = unwrapped.get()->castAs(); // MSVC ignores nothrow if it is in conflict with an explicit exception // specification. if (Proto->hasExceptionSpec()) { switch (Proto->getExceptionSpecType()) { case EST_None: llvm_unreachable("This doesn't have an exception spec!"); case EST_DynamicNone: case EST_BasicNoexcept: case EST_NoexceptTrue: case EST_NoThrow: // Exception spec doesn't conflict with nothrow, so don't warn. LLVM_FALLTHROUGH; case EST_Unparsed: case EST_Uninstantiated: case EST_DependentNoexcept: case EST_Unevaluated: // We don't have enough information to properly determine if there is a // conflict, so suppress the warning. break; case EST_Dynamic: case EST_MSAny: case EST_NoexceptFalse: S.Diag(attr.getLoc(), diag::warn_nothrow_attribute_ignored); break; } return true; } type = unwrapped.wrap( S, S.Context .getFunctionTypeWithExceptionSpec( QualType{Proto, 0}, FunctionProtoType::ExceptionSpecInfo{EST_NoThrow}) ->getAs()); return true; } // Delay if the type didn't work out to a function. if (!unwrapped.isFunctionType()) return false; // Otherwise, a calling convention. CallingConv CC; if (S.CheckCallingConvAttr(attr, CC)) return true; const FunctionType *fn = unwrapped.get(); CallingConv CCOld = fn->getCallConv(); Attr *CCAttr = getCCTypeAttr(S.Context, attr); if (CCOld != CC) { // Error out on when there's already an attribute on the type // and the CCs don't match. if (S.getCallingConvAttributedType(type)) { S.Diag(attr.getLoc(), diag::err_attributes_are_not_compatible) << FunctionType::getNameForCallConv(CC) << FunctionType::getNameForCallConv(CCOld); attr.setInvalid(); return true; } } // Diagnose use of variadic functions with calling conventions that // don't support them (e.g. because they're callee-cleanup). // We delay warning about this on unprototyped function declarations // until after redeclaration checking, just in case we pick up a // prototype that way. And apparently we also "delay" warning about // unprototyped function types in general, despite not necessarily having // much ability to diagnose it later. if (!supportsVariadicCall(CC)) { const FunctionProtoType *FnP = dyn_cast(fn); if (FnP && FnP->isVariadic()) { // stdcall and fastcall are ignored with a warning for GCC and MS // compatibility. if (CC == CC_X86StdCall || CC == CC_X86FastCall) return S.Diag(attr.getLoc(), diag::warn_cconv_unsupported) << FunctionType::getNameForCallConv(CC) << (int)Sema::CallingConventionIgnoredReason::VariadicFunction; attr.setInvalid(); return S.Diag(attr.getLoc(), diag::err_cconv_varargs) << FunctionType::getNameForCallConv(CC); } } // Also diagnose fastcall with regparm. if (CC == CC_X86FastCall && fn->getHasRegParm()) { S.Diag(attr.getLoc(), diag::err_attributes_are_not_compatible) << "regparm" << FunctionType::getNameForCallConv(CC_X86FastCall); attr.setInvalid(); return true; } // Modify the CC from the wrapped function type, wrap it all back, and then // wrap the whole thing in an AttributedType as written. The modified type // might have a different CC if we ignored the attribute. QualType Equivalent; if (CCOld == CC) { Equivalent = type; } else { auto EI = unwrapped.get()->getExtInfo().withCallingConv(CC); Equivalent = unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI)); } type = state.getAttributedType(CCAttr, type, Equivalent); return true; } bool Sema::hasExplicitCallingConv(QualType T) { const AttributedType *AT; // Stop if we'd be stripping off a typedef sugar node to reach the // AttributedType. while ((AT = T->getAs()) && AT->getAs() == T->getAs()) { if (AT->isCallingConv()) return true; T = AT->getModifiedType(); } return false; } void Sema::adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc) { FunctionTypeUnwrapper Unwrapped(*this, T); const FunctionType *FT = Unwrapped.get(); bool IsVariadic = (isa(FT) && cast(FT)->isVariadic()); CallingConv CurCC = FT->getCallConv(); CallingConv ToCC = Context.getDefaultCallingConvention(IsVariadic, !IsStatic); if (CurCC == ToCC) return; // MS compiler ignores explicit calling convention attributes on structors. We // should do the same. if (Context.getTargetInfo().getCXXABI().isMicrosoft() && IsCtorOrDtor) { // Issue a warning on ignored calling convention -- except of __stdcall. // Again, this is what MS compiler does. if (CurCC != CC_X86StdCall) Diag(Loc, diag::warn_cconv_unsupported) << FunctionType::getNameForCallConv(CurCC) << (int)Sema::CallingConventionIgnoredReason::ConstructorDestructor; // Default adjustment. } else { // Only adjust types with the default convention. For example, on Windows // we should adjust a __cdecl type to __thiscall for instance methods, and a // __thiscall type to __cdecl for static methods. CallingConv DefaultCC = Context.getDefaultCallingConvention(IsVariadic, IsStatic); if (CurCC != DefaultCC || DefaultCC == ToCC) return; if (hasExplicitCallingConv(T)) return; } FT = Context.adjustFunctionType(FT, FT->getExtInfo().withCallingConv(ToCC)); QualType Wrapped = Unwrapped.wrap(*this, FT); T = Context.getAdjustedType(T, Wrapped); } /// HandleVectorSizeAttribute - this attribute is only applicable to integral /// and float scalars, although arrays, pointers, and function return values are /// allowed in conjunction with this construct. Aggregates with this attribute /// are invalid, even if they are of the same size as a corresponding scalar. /// The raw attribute should contain precisely 1 argument, the vector size for /// the variable, measured in bytes. If curType and rawAttr are well formed, /// this routine will return a new vector type. static void HandleVectorSizeAttr(QualType &CurType, const ParsedAttr &Attr, Sema &S) { // Check the attribute arguments. if (Attr.getNumArgs() != 1) { S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << Attr << 1; Attr.setInvalid(); return; } Expr *SizeExpr; // Special case where the argument is a template id. if (Attr.isArgIdent(0)) { CXXScopeSpec SS; SourceLocation TemplateKWLoc; UnqualifiedId Id; Id.setIdentifier(Attr.getArgAsIdent(0)->Ident, Attr.getLoc()); ExprResult Size = S.ActOnIdExpression(S.getCurScope(), SS, TemplateKWLoc, Id, /*HasTrailingLParen=*/false, /*IsAddressOfOperand=*/false); if (Size.isInvalid()) return; SizeExpr = Size.get(); } else { SizeExpr = Attr.getArgAsExpr(0); } QualType T = S.BuildVectorType(CurType, SizeExpr, Attr.getLoc()); if (!T.isNull()) CurType = T; else Attr.setInvalid(); } /// Process the OpenCL-like ext_vector_type attribute when it occurs on /// a type. static void HandleExtVectorTypeAttr(QualType &CurType, const ParsedAttr &Attr, Sema &S) { // check the attribute arguments. if (Attr.getNumArgs() != 1) { S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << Attr << 1; return; } Expr *sizeExpr; // Special case where the argument is a template id. if (Attr.isArgIdent(0)) { CXXScopeSpec SS; SourceLocation TemplateKWLoc; UnqualifiedId id; id.setIdentifier(Attr.getArgAsIdent(0)->Ident, Attr.getLoc()); ExprResult Size = S.ActOnIdExpression(S.getCurScope(), SS, TemplateKWLoc, id, /*HasTrailingLParen=*/false, /*IsAddressOfOperand=*/false); if (Size.isInvalid()) return; sizeExpr = Size.get(); } else { sizeExpr = Attr.getArgAsExpr(0); } // Create the vector type. QualType T = S.BuildExtVectorType(CurType, sizeExpr, Attr.getLoc()); if (!T.isNull()) CurType = T; } static bool isPermittedNeonBaseType(QualType &Ty, VectorType::VectorKind VecKind, Sema &S) { const BuiltinType *BTy = Ty->getAs(); if (!BTy) return false; llvm::Triple Triple = S.Context.getTargetInfo().getTriple(); // Signed poly is mathematically wrong, but has been baked into some ABIs by // now. bool IsPolyUnsigned = Triple.getArch() == llvm::Triple::aarch64 || Triple.getArch() == llvm::Triple::aarch64_32 || Triple.getArch() == llvm::Triple::aarch64_be; if (VecKind == VectorType::NeonPolyVector) { if (IsPolyUnsigned) { // AArch64 polynomial vectors are unsigned. return BTy->getKind() == BuiltinType::UChar || BTy->getKind() == BuiltinType::UShort || BTy->getKind() == BuiltinType::ULong || BTy->getKind() == BuiltinType::ULongLong; } else { // AArch32 polynomial vectors are signed. return BTy->getKind() == BuiltinType::SChar || BTy->getKind() == BuiltinType::Short || BTy->getKind() == BuiltinType::LongLong; } } // Non-polynomial vector types: the usual suspects are allowed, as well as // float64_t on AArch64. if ((Triple.isArch64Bit() || Triple.getArch() == llvm::Triple::aarch64_32) && BTy->getKind() == BuiltinType::Double) return true; return BTy->getKind() == BuiltinType::SChar || BTy->getKind() == BuiltinType::UChar || BTy->getKind() == BuiltinType::Short || BTy->getKind() == BuiltinType::UShort || BTy->getKind() == BuiltinType::Int || BTy->getKind() == BuiltinType::UInt || BTy->getKind() == BuiltinType::Long || BTy->getKind() == BuiltinType::ULong || BTy->getKind() == BuiltinType::LongLong || BTy->getKind() == BuiltinType::ULongLong || BTy->getKind() == BuiltinType::Float || BTy->getKind() == BuiltinType::Half || BTy->getKind() == BuiltinType::BFloat16; } /// HandleNeonVectorTypeAttr - The "neon_vector_type" and /// "neon_polyvector_type" attributes are used to create vector types that /// are mangled according to ARM's ABI. Otherwise, these types are identical /// to those created with the "vector_size" attribute. Unlike "vector_size" /// the argument to these Neon attributes is the number of vector elements, /// not the vector size in bytes. The vector width and element type must /// match one of the standard Neon vector types. static void HandleNeonVectorTypeAttr(QualType &CurType, const ParsedAttr &Attr, Sema &S, VectorType::VectorKind VecKind) { // Target must have NEON (or MVE, whose vectors are similar enough // not to need a separate attribute) if (!S.Context.getTargetInfo().hasFeature("neon") && !S.Context.getTargetInfo().hasFeature("mve")) { S.Diag(Attr.getLoc(), diag::err_attribute_unsupported) << Attr; Attr.setInvalid(); return; } // Check the attribute arguments. if (Attr.getNumArgs() != 1) { S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << Attr << 1; Attr.setInvalid(); return; } // The number of elements must be an ICE. Expr *numEltsExpr = static_cast(Attr.getArgAsExpr(0)); llvm::APSInt numEltsInt(32); if (numEltsExpr->isTypeDependent() || numEltsExpr->isValueDependent() || !numEltsExpr->isIntegerConstantExpr(numEltsInt, S.Context)) { S.Diag(Attr.getLoc(), diag::err_attribute_argument_type) << Attr << AANT_ArgumentIntegerConstant << numEltsExpr->getSourceRange(); Attr.setInvalid(); return; } // Only certain element types are supported for Neon vectors. if (!isPermittedNeonBaseType(CurType, VecKind, S)) { S.Diag(Attr.getLoc(), diag::err_attribute_invalid_vector_type) << CurType; Attr.setInvalid(); return; } // The total size of the vector must be 64 or 128 bits. unsigned typeSize = static_cast(S.Context.getTypeSize(CurType)); unsigned numElts = static_cast(numEltsInt.getZExtValue()); unsigned vecSize = typeSize * numElts; if (vecSize != 64 && vecSize != 128) { S.Diag(Attr.getLoc(), diag::err_attribute_bad_neon_vector_size) << CurType; Attr.setInvalid(); return; } CurType = S.Context.getVectorType(CurType, numElts, VecKind); } static void HandleArmMveStrictPolymorphismAttr(TypeProcessingState &State, QualType &CurType, ParsedAttr &Attr) { const VectorType *VT = dyn_cast(CurType); if (!VT || VT->getVectorKind() != VectorType::NeonVector) { State.getSema().Diag(Attr.getLoc(), diag::err_attribute_arm_mve_polymorphism); Attr.setInvalid(); return; } CurType = State.getAttributedType(createSimpleAttr( State.getSema().Context, Attr), CurType, CurType); } /// Handle OpenCL Access Qualifier Attribute. static void HandleOpenCLAccessAttr(QualType &CurType, const ParsedAttr &Attr, Sema &S) { // OpenCL v2.0 s6.6 - Access qualifier can be used only for image and pipe type. if (!(CurType->isImageType() || CurType->isPipeType())) { S.Diag(Attr.getLoc(), diag::err_opencl_invalid_access_qualifier); Attr.setInvalid(); return; } if (const TypedefType* TypedefTy = CurType->getAs()) { QualType BaseTy = TypedefTy->desugar(); std::string PrevAccessQual; if (BaseTy->isPipeType()) { if (TypedefTy->getDecl()->hasAttr()) { OpenCLAccessAttr *Attr = TypedefTy->getDecl()->getAttr(); PrevAccessQual = Attr->getSpelling(); } else { PrevAccessQual = "read_only"; } } else if (const BuiltinType* ImgType = BaseTy->getAs()) { switch (ImgType->getKind()) { #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ case BuiltinType::Id: \ PrevAccessQual = #Access; \ break; #include "clang/Basic/OpenCLImageTypes.def" default: llvm_unreachable("Unable to find corresponding image type."); } } else { llvm_unreachable("unexpected type"); } StringRef AttrName = Attr.getAttrName()->getName(); if (PrevAccessQual == AttrName.ltrim("_")) { // Duplicated qualifiers S.Diag(Attr.getLoc(), diag::warn_duplicate_declspec) << AttrName << Attr.getRange(); } else { // Contradicting qualifiers S.Diag(Attr.getLoc(), diag::err_opencl_multiple_access_qualifiers); } S.Diag(TypedefTy->getDecl()->getBeginLoc(), diag::note_opencl_typedef_access_qualifier) << PrevAccessQual; } else if (CurType->isPipeType()) { if (Attr.getSemanticSpelling() == OpenCLAccessAttr::Keyword_write_only) { QualType ElemType = CurType->getAs()->getElementType(); CurType = S.Context.getWritePipeType(ElemType); } } } /// HandleMatrixTypeAttr - "matrix_type" attribute, like ext_vector_type static void HandleMatrixTypeAttr(QualType &CurType, const ParsedAttr &Attr, Sema &S) { if (!S.getLangOpts().MatrixTypes) { S.Diag(Attr.getLoc(), diag::err_builtin_matrix_disabled); return; } if (Attr.getNumArgs() != 2) { S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << Attr << 2; return; } Expr *RowsExpr = nullptr; Expr *ColsExpr = nullptr; // TODO: Refactor parameter extraction into separate function // Get the number of rows if (Attr.isArgIdent(0)) { CXXScopeSpec SS; SourceLocation TemplateKeywordLoc; UnqualifiedId id; id.setIdentifier(Attr.getArgAsIdent(0)->Ident, Attr.getLoc()); ExprResult Rows = S.ActOnIdExpression(S.getCurScope(), SS, TemplateKeywordLoc, id, false, false); if (Rows.isInvalid()) // TODO: maybe a good error message would be nice here return; RowsExpr = Rows.get(); } else { assert(Attr.isArgExpr(0) && "Argument to should either be an identity or expression"); RowsExpr = Attr.getArgAsExpr(0); } // Get the number of columns if (Attr.isArgIdent(1)) { CXXScopeSpec SS; SourceLocation TemplateKeywordLoc; UnqualifiedId id; id.setIdentifier(Attr.getArgAsIdent(1)->Ident, Attr.getLoc()); ExprResult Columns = S.ActOnIdExpression( S.getCurScope(), SS, TemplateKeywordLoc, id, false, false); if (Columns.isInvalid()) // TODO: a good error message would be nice here return; RowsExpr = Columns.get(); } else { assert(Attr.isArgExpr(1) && "Argument to should either be an identity or expression"); ColsExpr = Attr.getArgAsExpr(1); } // Create the matrix type. QualType T = S.BuildMatrixType(CurType, RowsExpr, ColsExpr, Attr.getLoc()); if (!T.isNull()) CurType = T; } static void HandleLifetimeBoundAttr(TypeProcessingState &State, QualType &CurType, ParsedAttr &Attr) { if (State.getDeclarator().isDeclarationOfFunction()) { CurType = State.getAttributedType( createSimpleAttr(State.getSema().Context, Attr), CurType, CurType); } else { Attr.diagnoseAppertainsTo(State.getSema(), nullptr); } } static bool isAddressSpaceKind(const ParsedAttr &attr) { auto attrKind = attr.getKind(); return attrKind == ParsedAttr::AT_AddressSpace || attrKind == ParsedAttr::AT_OpenCLPrivateAddressSpace || attrKind == ParsedAttr::AT_OpenCLGlobalAddressSpace || attrKind == ParsedAttr::AT_OpenCLLocalAddressSpace || attrKind == ParsedAttr::AT_OpenCLConstantAddressSpace || attrKind == ParsedAttr::AT_OpenCLGenericAddressSpace; } static void processTypeAttrs(TypeProcessingState &state, QualType &type, TypeAttrLocation TAL, ParsedAttributesView &attrs) { // Scan through and apply attributes to this type where it makes sense. Some // attributes (such as __address_space__, __vector_size__, etc) apply to the // type, but others can be present in the type specifiers even though they // apply to the decl. Here we apply type attributes and ignore the rest. // This loop modifies the list pretty frequently, but we still need to make // sure we visit every element once. Copy the attributes list, and iterate // over that. ParsedAttributesView AttrsCopy{attrs}; state.setParsedNoDeref(false); for (ParsedAttr &attr : AttrsCopy) { // Skip attributes that were marked to be invalid. if (attr.isInvalid()) continue; if (attr.isCXX11Attribute()) { // [[gnu::...]] attributes are treated as declaration attributes, so may // not appertain to a DeclaratorChunk. If we handle them as type // attributes, accept them in that position and diagnose the GCC // incompatibility. if (attr.isGNUScope()) { bool IsTypeAttr = attr.isTypeAttr(); if (TAL == TAL_DeclChunk) { state.getSema().Diag(attr.getLoc(), IsTypeAttr ? diag::warn_gcc_ignores_type_attr : diag::warn_cxx11_gnu_attribute_on_type) << attr; if (!IsTypeAttr) continue; } } else if (TAL != TAL_DeclChunk && !isAddressSpaceKind(attr)) { // Otherwise, only consider type processing for a C++11 attribute if // it's actually been applied to a type. // We also allow C++11 address_space and // OpenCL language address space attributes to pass through. continue; } } // If this is an attribute we can handle, do so now, // otherwise, add it to the FnAttrs list for rechaining. switch (attr.getKind()) { default: // A C++11 attribute on a declarator chunk must appertain to a type. if (attr.isCXX11Attribute() && TAL == TAL_DeclChunk) { state.getSema().Diag(attr.getLoc(), diag::err_attribute_not_type_attr) << attr; attr.setUsedAsTypeAttr(); } break; case ParsedAttr::UnknownAttribute: if (attr.isCXX11Attribute() && TAL == TAL_DeclChunk) state.getSema().Diag(attr.getLoc(), diag::warn_unknown_attribute_ignored) << attr; break; case ParsedAttr::IgnoredAttribute: break; case ParsedAttr::AT_MayAlias: // FIXME: This attribute needs to actually be handled, but if we ignore // it it breaks large amounts of Linux software. attr.setUsedAsTypeAttr(); break; case ParsedAttr::AT_OpenCLPrivateAddressSpace: case ParsedAttr::AT_OpenCLGlobalAddressSpace: case ParsedAttr::AT_OpenCLLocalAddressSpace: case ParsedAttr::AT_OpenCLConstantAddressSpace: case ParsedAttr::AT_OpenCLGenericAddressSpace: case ParsedAttr::AT_AddressSpace: HandleAddressSpaceTypeAttribute(type, attr, state); attr.setUsedAsTypeAttr(); break; OBJC_POINTER_TYPE_ATTRS_CASELIST: if (!handleObjCPointerTypeAttr(state, attr, type)) distributeObjCPointerTypeAttr(state, attr, type); attr.setUsedAsTypeAttr(); break; case ParsedAttr::AT_VectorSize: HandleVectorSizeAttr(type, attr, state.getSema()); attr.setUsedAsTypeAttr(); break; case ParsedAttr::AT_ExtVectorType: HandleExtVectorTypeAttr(type, attr, state.getSema()); attr.setUsedAsTypeAttr(); break; case ParsedAttr::AT_NeonVectorType: HandleNeonVectorTypeAttr(type, attr, state.getSema(), VectorType::NeonVector); attr.setUsedAsTypeAttr(); break; case ParsedAttr::AT_NeonPolyVectorType: HandleNeonVectorTypeAttr(type, attr, state.getSema(), VectorType::NeonPolyVector); attr.setUsedAsTypeAttr(); break; case ParsedAttr::AT_ArmMveStrictPolymorphism: { HandleArmMveStrictPolymorphismAttr(state, type, attr); attr.setUsedAsTypeAttr(); break; } case ParsedAttr::AT_OpenCLAccess: HandleOpenCLAccessAttr(type, attr, state.getSema()); attr.setUsedAsTypeAttr(); break; case ParsedAttr::AT_LifetimeBound: if (TAL == TAL_DeclChunk) HandleLifetimeBoundAttr(state, type, attr); break; case ParsedAttr::AT_NoDeref: { ASTContext &Ctx = state.getSema().Context; type = state.getAttributedType(createSimpleAttr(Ctx, attr), type, type); attr.setUsedAsTypeAttr(); state.setParsedNoDeref(true); break; } case ParsedAttr::AT_MatrixType: HandleMatrixTypeAttr(type, attr, state.getSema()); attr.setUsedAsTypeAttr(); break; MS_TYPE_ATTRS_CASELIST: if (!handleMSPointerTypeQualifierAttr(state, attr, type)) attr.setUsedAsTypeAttr(); break; NULLABILITY_TYPE_ATTRS_CASELIST: // Either add nullability here or try to distribute it. We // don't want to distribute the nullability specifier past any // dependent type, because that complicates the user model. if (type->canHaveNullability() || type->isDependentType() || type->isArrayType() || !distributeNullabilityTypeAttr(state, type, attr)) { unsigned endIndex; if (TAL == TAL_DeclChunk) endIndex = state.getCurrentChunkIndex(); else endIndex = state.getDeclarator().getNumTypeObjects(); bool allowOnArrayType = state.getDeclarator().isPrototypeContext() && !hasOuterPointerLikeChunk(state.getDeclarator(), endIndex); if (checkNullabilityTypeSpecifier( state, type, attr, allowOnArrayType)) { attr.setInvalid(); } attr.setUsedAsTypeAttr(); } break; case ParsedAttr::AT_ObjCKindOf: // '__kindof' must be part of the decl-specifiers. switch (TAL) { case TAL_DeclSpec: break; case TAL_DeclChunk: case TAL_DeclName: state.getSema().Diag(attr.getLoc(), diag::err_objc_kindof_wrong_position) << FixItHint::CreateRemoval(attr.getLoc()) << FixItHint::CreateInsertion( state.getDeclarator().getDeclSpec().getBeginLoc(), "__kindof "); break; } // Apply it regardless. if (checkObjCKindOfType(state, type, attr)) attr.setInvalid(); break; case ParsedAttr::AT_NoThrow: // Exception Specifications aren't generally supported in C mode throughout // clang, so revert to attribute-based handling for C. if (!state.getSema().getLangOpts().CPlusPlus) break; LLVM_FALLTHROUGH; FUNCTION_TYPE_ATTRS_CASELIST: attr.setUsedAsTypeAttr(); // Never process function type attributes as part of the // declaration-specifiers. if (TAL == TAL_DeclSpec) distributeFunctionTypeAttrFromDeclSpec(state, attr, type); // Otherwise, handle the possible delays. else if (!handleFunctionTypeAttr(state, attr, type)) distributeFunctionTypeAttr(state, attr, type); break; case ParsedAttr::AT_AcquireHandle: { if (!type->isFunctionType()) return; if (attr.getNumArgs() != 1) { state.getSema().Diag(attr.getLoc(), diag::err_attribute_wrong_number_arguments) << attr << 1; attr.setInvalid(); return; } StringRef HandleType; if (!state.getSema().checkStringLiteralArgumentAttr(attr, 0, HandleType)) return; type = state.getAttributedType( AcquireHandleAttr::Create(state.getSema().Context, HandleType, attr), type, type); attr.setUsedAsTypeAttr(); break; } } // Handle attributes that are defined in a macro. We do not want this to be // applied to ObjC builtin attributes. if (isa(type) && attr.hasMacroIdentifier() && !type.getQualifiers().hasObjCLifetime() && !type.getQualifiers().hasObjCGCAttr() && attr.getKind() != ParsedAttr::AT_ObjCGC && attr.getKind() != ParsedAttr::AT_ObjCOwnership) { const IdentifierInfo *MacroII = attr.getMacroIdentifier(); type = state.getSema().Context.getMacroQualifiedType(type, MacroII); state.setExpansionLocForMacroQualifiedType( cast(type.getTypePtr()), attr.getMacroExpansionLoc()); } } if (!state.getSema().getLangOpts().OpenCL || type.getAddressSpace() != LangAS::Default) return; } void Sema::completeExprArrayBound(Expr *E) { if (DeclRefExpr *DRE = dyn_cast(E->IgnoreParens())) { if (VarDecl *Var = dyn_cast(DRE->getDecl())) { if (isTemplateInstantiation(Var->getTemplateSpecializationKind())) { auto *Def = Var->getDefinition(); if (!Def) { SourceLocation PointOfInstantiation = E->getExprLoc(); runWithSufficientStackSpace(PointOfInstantiation, [&] { InstantiateVariableDefinition(PointOfInstantiation, Var); }); Def = Var->getDefinition(); // If we don't already have a point of instantiation, and we managed // to instantiate a definition, this is the point of instantiation. // Otherwise, we don't request an end-of-TU instantiation, so this is // not a point of instantiation. // FIXME: Is this really the right behavior? if (Var->getPointOfInstantiation().isInvalid() && Def) { assert(Var->getTemplateSpecializationKind() == TSK_ImplicitInstantiation && "explicit instantiation with no point of instantiation"); Var->setTemplateSpecializationKind( Var->getTemplateSpecializationKind(), PointOfInstantiation); } } // Update the type to the definition's type both here and within the // expression. if (Def) { DRE->setDecl(Def); QualType T = Def->getType(); DRE->setType(T); // FIXME: Update the type on all intervening expressions. E->setType(T); } // We still go on to try to complete the type independently, as it // may also require instantiations or diagnostics if it remains // incomplete. } } } } /// Ensure that the type of the given expression is complete. /// /// This routine checks whether the expression \p E has a complete type. If the /// expression refers to an instantiable construct, that instantiation is /// performed as needed to complete its type. Furthermore /// Sema::RequireCompleteType is called for the expression's type (or in the /// case of a reference type, the referred-to type). /// /// \param E The expression whose type is required to be complete. /// \param Kind Selects which completeness rules should be applied. /// \param Diagnoser The object that will emit a diagnostic if the type is /// incomplete. /// /// \returns \c true if the type of \p E is incomplete and diagnosed, \c false /// otherwise. bool Sema::RequireCompleteExprType(Expr *E, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser) { QualType T = E->getType(); // Incomplete array types may be completed by the initializer attached to // their definitions. For static data members of class templates and for // variable templates, we need to instantiate the definition to get this // initializer and complete the type. if (T->isIncompleteArrayType()) { completeExprArrayBound(E); T = E->getType(); } // FIXME: Are there other cases which require instantiating something other // than the type to complete the type of an expression? return RequireCompleteType(E->getExprLoc(), T, Kind, Diagnoser); } bool Sema::RequireCompleteExprType(Expr *E, unsigned DiagID) { BoundTypeDiagnoser<> Diagnoser(DiagID); return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser); } /// Ensure that the type T is a complete type. /// /// This routine checks whether the type @p T is complete in any /// context where a complete type is required. If @p T is a complete /// type, returns false. If @p T is a class template specialization, /// this routine then attempts to perform class template /// instantiation. If instantiation fails, or if @p T is incomplete /// and cannot be completed, issues the diagnostic @p diag (giving it /// the type @p T) and returns true. /// /// @param Loc The location in the source that the incomplete type /// diagnostic should refer to. /// /// @param T The type that this routine is examining for completeness. /// /// @param Kind Selects which completeness rules should be applied. /// /// @returns @c true if @p T is incomplete and a diagnostic was emitted, /// @c false otherwise. bool Sema::RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser) { if (RequireCompleteTypeImpl(Loc, T, Kind, &Diagnoser)) return true; if (const TagType *Tag = T->getAs()) { if (!Tag->getDecl()->isCompleteDefinitionRequired()) { Tag->getDecl()->setCompleteDefinitionRequired(); Consumer.HandleTagDeclRequiredDefinition(Tag->getDecl()); } } return false; } bool Sema::hasStructuralCompatLayout(Decl *D, Decl *Suggested) { llvm::DenseSet> NonEquivalentDecls; if (!Suggested) return false; // FIXME: Add a specific mode for C11 6.2.7/1 in StructuralEquivalenceContext // and isolate from other C++ specific checks. StructuralEquivalenceContext Ctx( D->getASTContext(), Suggested->getASTContext(), NonEquivalentDecls, StructuralEquivalenceKind::Default, false /*StrictTypeSpelling*/, true /*Complain*/, true /*ErrorOnTagTypeMismatch*/); return Ctx.IsEquivalent(D, Suggested); } /// Determine whether there is any declaration of \p D that was ever a /// definition (perhaps before module merging) and is currently visible. /// \param D The definition of the entity. /// \param Suggested Filled in with the declaration that should be made visible /// in order to provide a definition of this entity. /// \param OnlyNeedComplete If \c true, we only need the type to be complete, /// not defined. This only matters for enums with a fixed underlying /// type, since in all other cases, a type is complete if and only if it /// is defined. bool Sema::hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete) { // Easy case: if we don't have modules, all declarations are visible. if (!getLangOpts().Modules && !getLangOpts().ModulesLocalVisibility) return true; // If this definition was instantiated from a template, map back to the // pattern from which it was instantiated. if (isa(D) && cast(D)->isBeingDefined()) { // We're in the middle of defining it; this definition should be treated // as visible. return true; } else if (auto *RD = dyn_cast(D)) { if (auto *Pattern = RD->getTemplateInstantiationPattern()) RD = Pattern; D = RD->getDefinition(); } else if (auto *ED = dyn_cast(D)) { if (auto *Pattern = ED->getTemplateInstantiationPattern()) ED = Pattern; if (OnlyNeedComplete && (ED->isFixed() || getLangOpts().MSVCCompat)) { // If the enum has a fixed underlying type, it may have been forward // declared. In -fms-compatibility, `enum Foo;` will also forward declare // the enum and assign it the underlying type of `int`. Since we're only // looking for a complete type (not a definition), any visible declaration // of it will do. *Suggested = nullptr; for (auto *Redecl : ED->redecls()) { if (isVisible(Redecl)) return true; if (Redecl->isThisDeclarationADefinition() || (Redecl->isCanonicalDecl() && !*Suggested)) *Suggested = Redecl; } return false; } D = ED->getDefinition(); } else if (auto *FD = dyn_cast(D)) { if (auto *Pattern = FD->getTemplateInstantiationPattern()) FD = Pattern; D = FD->getDefinition(); } else if (auto *VD = dyn_cast(D)) { if (auto *Pattern = VD->getTemplateInstantiationPattern()) VD = Pattern; D = VD->getDefinition(); } assert(D && "missing definition for pattern of instantiated definition"); *Suggested = D; auto DefinitionIsVisible = [&] { // The (primary) definition might be in a visible module. if (isVisible(D)) return true; // A visible module might have a merged definition instead. if (D->isModulePrivate() ? hasMergedDefinitionInCurrentModule(D) : hasVisibleMergedDefinition(D)) { if (CodeSynthesisContexts.empty() && !getLangOpts().ModulesLocalVisibility) { // Cache the fact that this definition is implicitly visible because // there is a visible merged definition. D->setVisibleDespiteOwningModule(); } return true; } return false; }; if (DefinitionIsVisible()) return true; // The external source may have additional definitions of this entity that are // visible, so complete the redeclaration chain now and ask again. if (auto *Source = Context.getExternalSource()) { Source->CompleteRedeclChain(D); return DefinitionIsVisible(); } return false; } /// Locks in the inheritance model for the given class and all of its bases. static void assignInheritanceModel(Sema &S, CXXRecordDecl *RD) { RD = RD->getMostRecentNonInjectedDecl(); if (!RD->hasAttr()) { MSInheritanceModel IM; bool BestCase = false; switch (S.MSPointerToMemberRepresentationMethod) { case LangOptions::PPTMK_BestCase: BestCase = true; IM = RD->calculateInheritanceModel(); break; case LangOptions::PPTMK_FullGeneralitySingleInheritance: IM = MSInheritanceModel::Single; break; case LangOptions::PPTMK_FullGeneralityMultipleInheritance: IM = MSInheritanceModel::Multiple; break; case LangOptions::PPTMK_FullGeneralityVirtualInheritance: IM = MSInheritanceModel::Unspecified; break; } SourceRange Loc = S.ImplicitMSInheritanceAttrLoc.isValid() ? S.ImplicitMSInheritanceAttrLoc : RD->getSourceRange(); RD->addAttr(MSInheritanceAttr::CreateImplicit( S.getASTContext(), BestCase, Loc, AttributeCommonInfo::AS_Microsoft, MSInheritanceAttr::Spelling(IM))); S.Consumer.AssignInheritanceModel(RD); } } /// The implementation of RequireCompleteType bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser *Diagnoser) { // FIXME: Add this assertion to make sure we always get instantiation points. // assert(!Loc.isInvalid() && "Invalid location in RequireCompleteType"); // FIXME: Add this assertion to help us flush out problems with // checking for dependent types and type-dependent expressions. // // assert(!T->isDependentType() && // "Can't ask whether a dependent type is complete"); if (const MemberPointerType *MPTy = T->getAs()) { if (!MPTy->getClass()->isDependentType()) { if (getLangOpts().CompleteMemberPointers && !MPTy->getClass()->getAsCXXRecordDecl()->isBeingDefined() && RequireCompleteType(Loc, QualType(MPTy->getClass(), 0), Kind, diag::err_memptr_incomplete)) return true; // We lock in the inheritance model once somebody has asked us to ensure // that a pointer-to-member type is complete. if (Context.getTargetInfo().getCXXABI().isMicrosoft()) { (void)isCompleteType(Loc, QualType(MPTy->getClass(), 0)); assignInheritanceModel(*this, MPTy->getMostRecentCXXRecordDecl()); } } } NamedDecl *Def = nullptr; bool AcceptSizeless = (Kind == CompleteTypeKind::AcceptSizeless); bool Incomplete = (T->isIncompleteType(&Def) || (!AcceptSizeless && T->isSizelessBuiltinType())); // Check that any necessary explicit specializations are visible. For an // enum, we just need the declaration, so don't check this. if (Def && !isa(Def)) checkSpecializationVisibility(Loc, Def); // If we have a complete type, we're done. if (!Incomplete) { // If we know about the definition but it is not visible, complain. NamedDecl *SuggestedDef = nullptr; if (Def && !hasVisibleDefinition(Def, &SuggestedDef, /*OnlyNeedComplete*/true)) { // If the user is going to see an error here, recover by making the // definition visible. bool TreatAsComplete = Diagnoser && !isSFINAEContext(); if (Diagnoser && SuggestedDef) diagnoseMissingImport(Loc, SuggestedDef, MissingImportKind::Definition, /*Recover*/TreatAsComplete); return !TreatAsComplete; } else if (Def && !TemplateInstCallbacks.empty()) { CodeSynthesisContext TempInst; TempInst.Kind = CodeSynthesisContext::Memoization; TempInst.Template = Def; TempInst.Entity = Def; TempInst.PointOfInstantiation = Loc; atTemplateBegin(TemplateInstCallbacks, *this, TempInst); atTemplateEnd(TemplateInstCallbacks, *this, TempInst); } return false; } TagDecl *Tag = dyn_cast_or_null(Def); ObjCInterfaceDecl *IFace = dyn_cast_or_null(Def); // Give the external source a chance to provide a definition of the type. // This is kept separate from completing the redeclaration chain so that // external sources such as LLDB can avoid synthesizing a type definition // unless it's actually needed. if (Tag || IFace) { // Avoid diagnosing invalid decls as incomplete. if (Def->isInvalidDecl()) return true; // Give the external AST source a chance to complete the type. if (auto *Source = Context.getExternalSource()) { if (Tag && Tag->hasExternalLexicalStorage()) Source->CompleteType(Tag); if (IFace && IFace->hasExternalLexicalStorage()) Source->CompleteType(IFace); // If the external source completed the type, go through the motions // again to ensure we're allowed to use the completed type. if (!T->isIncompleteType()) return RequireCompleteTypeImpl(Loc, T, Kind, Diagnoser); } } // If we have a class template specialization or a class member of a // class template specialization, or an array with known size of such, // try to instantiate it. if (auto *RD = dyn_cast_or_null(Tag)) { bool Instantiated = false; bool Diagnosed = false; if (RD->isDependentContext()) { // Don't try to instantiate a dependent class (eg, a member template of // an instantiated class template specialization). // FIXME: Can this ever happen? } else if (auto *ClassTemplateSpec = dyn_cast(RD)) { if (ClassTemplateSpec->getSpecializationKind() == TSK_Undeclared) { runWithSufficientStackSpace(Loc, [&] { Diagnosed = InstantiateClassTemplateSpecialization( Loc, ClassTemplateSpec, TSK_ImplicitInstantiation, /*Complain=*/Diagnoser); }); Instantiated = true; } } else { CXXRecordDecl *Pattern = RD->getInstantiatedFromMemberClass(); if (!RD->isBeingDefined() && Pattern) { MemberSpecializationInfo *MSI = RD->getMemberSpecializationInfo(); assert(MSI && "Missing member specialization information?"); // This record was instantiated from a class within a template. if (MSI->getTemplateSpecializationKind() != TSK_ExplicitSpecialization) { runWithSufficientStackSpace(Loc, [&] { Diagnosed = InstantiateClass(Loc, RD, Pattern, getTemplateInstantiationArgs(RD), TSK_ImplicitInstantiation, /*Complain=*/Diagnoser); }); Instantiated = true; } } } if (Instantiated) { // Instantiate* might have already complained that the template is not // defined, if we asked it to. if (Diagnoser && Diagnosed) return true; // If we instantiated a definition, check that it's usable, even if // instantiation produced an error, so that repeated calls to this // function give consistent answers. if (!T->isIncompleteType()) return RequireCompleteTypeImpl(Loc, T, Kind, Diagnoser); } } // FIXME: If we didn't instantiate a definition because of an explicit // specialization declaration, check that it's visible. if (!Diagnoser) return true; Diagnoser->diagnose(*this, Loc, T); // If the type was a forward declaration of a class/struct/union // type, produce a note. if (Tag && !Tag->isInvalidDecl() && !Tag->getLocation().isInvalid()) Diag(Tag->getLocation(), Tag->isBeingDefined() ? diag::note_type_being_defined : diag::note_forward_declaration) << Context.getTagDeclType(Tag); // If the Objective-C class was a forward declaration, produce a note. if (IFace && !IFace->isInvalidDecl() && !IFace->getLocation().isInvalid()) Diag(IFace->getLocation(), diag::note_forward_class); // If we have external information that we can use to suggest a fix, // produce a note. if (ExternalSource) ExternalSource->MaybeDiagnoseMissingCompleteType(Loc, T); return true; } bool Sema::RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, unsigned DiagID) { BoundTypeDiagnoser<> Diagnoser(DiagID); return RequireCompleteType(Loc, T, Kind, Diagnoser); } /// Get diagnostic %select index for tag kind for /// literal type diagnostic message. /// WARNING: Indexes apply to particular diagnostics only! /// /// \returns diagnostic %select index. static unsigned getLiteralDiagFromTagKind(TagTypeKind Tag) { switch (Tag) { case TTK_Struct: return 0; case TTK_Interface: return 1; case TTK_Class: return 2; default: llvm_unreachable("Invalid tag kind for literal type diagnostic!"); } } /// Ensure that the type T is a literal type. /// /// This routine checks whether the type @p T is a literal type. If @p T is an /// incomplete type, an attempt is made to complete it. If @p T is a literal /// type, or @p AllowIncompleteType is true and @p T is an incomplete type, /// returns false. Otherwise, this routine issues the diagnostic @p PD (giving /// it the type @p T), along with notes explaining why the type is not a /// literal type, and returns true. /// /// @param Loc The location in the source that the non-literal type /// diagnostic should refer to. /// /// @param T The type that this routine is examining for literalness. /// /// @param Diagnoser Emits a diagnostic if T is not a literal type. /// /// @returns @c true if @p T is not a literal type and a diagnostic was emitted, /// @c false otherwise. bool Sema::RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser) { assert(!T->isDependentType() && "type should not be dependent"); QualType ElemType = Context.getBaseElementType(T); if ((isCompleteType(Loc, ElemType) || ElemType->isVoidType()) && T->isLiteralType(Context)) return false; Diagnoser.diagnose(*this, Loc, T); if (T->isVariableArrayType()) return true; const RecordType *RT = ElemType->getAs(); if (!RT) return true; const CXXRecordDecl *RD = cast(RT->getDecl()); // A partially-defined class type can't be a literal type, because a literal // class type must have a trivial destructor (which can't be checked until // the class definition is complete). if (RequireCompleteType(Loc, ElemType, diag::note_non_literal_incomplete, T)) return true; // [expr.prim.lambda]p3: // This class type is [not] a literal type. if (RD->isLambda() && !getLangOpts().CPlusPlus17) { Diag(RD->getLocation(), diag::note_non_literal_lambda); return true; } // If the class has virtual base classes, then it's not an aggregate, and // cannot have any constexpr constructors or a trivial default constructor, // so is non-literal. This is better to diagnose than the resulting absence // of constexpr constructors. if (RD->getNumVBases()) { Diag(RD->getLocation(), diag::note_non_literal_virtual_base) << getLiteralDiagFromTagKind(RD->getTagKind()) << RD->getNumVBases(); for (const auto &I : RD->vbases()) Diag(I.getBeginLoc(), diag::note_constexpr_virtual_base_here) << I.getSourceRange(); } else if (!RD->isAggregate() && !RD->hasConstexprNonCopyMoveConstructor() && !RD->hasTrivialDefaultConstructor()) { Diag(RD->getLocation(), diag::note_non_literal_no_constexpr_ctors) << RD; } else if (RD->hasNonLiteralTypeFieldsOrBases()) { for (const auto &I : RD->bases()) { if (!I.getType()->isLiteralType(Context)) { Diag(I.getBeginLoc(), diag::note_non_literal_base_class) << RD << I.getType() << I.getSourceRange(); return true; } } for (const auto *I : RD->fields()) { if (!I->getType()->isLiteralType(Context) || I->getType().isVolatileQualified()) { Diag(I->getLocation(), diag::note_non_literal_field) << RD << I << I->getType() << I->getType().isVolatileQualified(); return true; } } } else if (getLangOpts().CPlusPlus20 ? !RD->hasConstexprDestructor() : !RD->hasTrivialDestructor()) { // All fields and bases are of literal types, so have trivial or constexpr // destructors. If this class's destructor is non-trivial / non-constexpr, // it must be user-declared. CXXDestructorDecl *Dtor = RD->getDestructor(); assert(Dtor && "class has literal fields and bases but no dtor?"); if (!Dtor) return true; if (getLangOpts().CPlusPlus20) { Diag(Dtor->getLocation(), diag::note_non_literal_non_constexpr_dtor) << RD; } else { Diag(Dtor->getLocation(), Dtor->isUserProvided() ? diag::note_non_literal_user_provided_dtor : diag::note_non_literal_nontrivial_dtor) << RD; if (!Dtor->isUserProvided()) SpecialMemberIsTrivial(Dtor, CXXDestructor, TAH_IgnoreTrivialABI, /*Diagnose*/ true); } } return true; } bool Sema::RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID) { BoundTypeDiagnoser<> Diagnoser(DiagID); return RequireLiteralType(Loc, T, Diagnoser); } /// Retrieve a version of the type 'T' that is elaborated by Keyword, qualified /// by the nested-name-specifier contained in SS, and that is (re)declared by /// OwnedTagDecl, which is nullptr if this is not a (re)declaration. QualType Sema::getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl) { if (T.isNull()) return T; NestedNameSpecifier *NNS; if (SS.isValid()) NNS = SS.getScopeRep(); else { if (Keyword == ETK_None) return T; NNS = nullptr; } return Context.getElaboratedType(Keyword, NNS, T, OwnedTagDecl); } QualType Sema::BuildTypeofExprType(Expr *E, SourceLocation Loc) { assert(!E->hasPlaceholderType() && "unexpected placeholder"); if (!getLangOpts().CPlusPlus && E->refersToBitField()) Diag(E->getExprLoc(), diag::err_sizeof_alignof_typeof_bitfield) << 2; if (!E->isTypeDependent()) { QualType T = E->getType(); if (const TagType *TT = T->getAs()) DiagnoseUseOfDecl(TT->getDecl(), E->getExprLoc()); } return Context.getTypeOfExprType(E); } /// getDecltypeForExpr - Given an expr, will return the decltype for /// that expression, according to the rules in C++11 /// [dcl.type.simple]p4 and C++11 [expr.lambda.prim]p18. static QualType getDecltypeForExpr(Sema &S, Expr *E) { if (E->isTypeDependent()) return S.Context.DependentTy; // C++11 [dcl.type.simple]p4: // The type denoted by decltype(e) is defined as follows: // // - if e is an unparenthesized id-expression or an unparenthesized class // member access (5.2.5), decltype(e) is the type of the entity named // by e. If there is no such entity, or if e names a set of overloaded // functions, the program is ill-formed; // // We apply the same rules for Objective-C ivar and property references. if (const DeclRefExpr *DRE = dyn_cast(E)) { const ValueDecl *VD = DRE->getDecl(); return VD->getType(); } else if (const MemberExpr *ME = dyn_cast(E)) { if (const ValueDecl *VD = ME->getMemberDecl()) if (isa(VD) || isa(VD)) return VD->getType(); } else if (const ObjCIvarRefExpr *IR = dyn_cast(E)) { return IR->getDecl()->getType(); } else if (const ObjCPropertyRefExpr *PR = dyn_cast(E)) { if (PR->isExplicitProperty()) return PR->getExplicitProperty()->getType(); } else if (auto *PE = dyn_cast(E)) { return PE->getType(); } // C++11 [expr.lambda.prim]p18: // Every occurrence of decltype((x)) where x is a possibly // parenthesized id-expression that names an entity of automatic // storage duration is treated as if x were transformed into an // access to a corresponding data member of the closure type that // would have been declared if x were an odr-use of the denoted // entity. using namespace sema; if (S.getCurLambda()) { if (isa(E)) { if (DeclRefExpr *DRE = dyn_cast(E->IgnoreParens())) { if (VarDecl *Var = dyn_cast(DRE->getDecl())) { QualType T = S.getCapturedDeclRefType(Var, DRE->getLocation()); if (!T.isNull()) return S.Context.getLValueReferenceType(T); } } } } // C++11 [dcl.type.simple]p4: // [...] QualType T = E->getType(); switch (E->getValueKind()) { // - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the // type of e; case VK_XValue: T = S.Context.getRValueReferenceType(T); break; // - otherwise, if e is an lvalue, decltype(e) is T&, where T is the // type of e; case VK_LValue: T = S.Context.getLValueReferenceType(T); break; // - otherwise, decltype(e) is the type of e. case VK_RValue: break; } return T; } QualType Sema::BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated) { assert(!E->hasPlaceholderType() && "unexpected placeholder"); if (AsUnevaluated && CodeSynthesisContexts.empty() && E->HasSideEffects(Context, false)) { // The expression operand for decltype is in an unevaluated expression // context, so side effects could result in unintended consequences. Diag(E->getExprLoc(), diag::warn_side_effects_unevaluated_context); } return Context.getDecltypeType(E, getDecltypeForExpr(*this, E)); } QualType Sema::BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc) { switch (UKind) { case UnaryTransformType::EnumUnderlyingType: if (!BaseType->isDependentType() && !BaseType->isEnumeralType()) { Diag(Loc, diag::err_only_enums_have_underlying_types); return QualType(); } else { QualType Underlying = BaseType; if (!BaseType->isDependentType()) { // The enum could be incomplete if we're parsing its definition or // recovering from an error. NamedDecl *FwdDecl = nullptr; if (BaseType->isIncompleteType(&FwdDecl)) { Diag(Loc, diag::err_underlying_type_of_incomplete_enum) << BaseType; Diag(FwdDecl->getLocation(), diag::note_forward_declaration) << FwdDecl; return QualType(); } EnumDecl *ED = BaseType->getAs()->getDecl(); assert(ED && "EnumType has no EnumDecl"); DiagnoseUseOfDecl(ED, Loc); Underlying = ED->getIntegerType(); assert(!Underlying.isNull()); } return Context.getUnaryTransformType(BaseType, Underlying, UnaryTransformType::EnumUnderlyingType); } } llvm_unreachable("unknown unary transform type"); } QualType Sema::BuildAtomicType(QualType T, SourceLocation Loc) { if (!T->isDependentType()) { // FIXME: It isn't entirely clear whether incomplete atomic types // are allowed or not; for simplicity, ban them for the moment. if (RequireCompleteType(Loc, T, diag::err_atomic_specifier_bad_type, 0)) return QualType(); int DisallowedKind = -1; if (T->isArrayType()) DisallowedKind = 1; else if (T->isFunctionType()) DisallowedKind = 2; else if (T->isReferenceType()) DisallowedKind = 3; else if (T->isAtomicType()) DisallowedKind = 4; else if (T.hasQualifiers()) DisallowedKind = 5; else if (T->isSizelessType()) DisallowedKind = 6; else if (!T.isTriviallyCopyableType(Context)) // Some other non-trivially-copyable type (probably a C++ class) DisallowedKind = 7; else if (T->isExtIntType()) { DisallowedKind = 8; } if (DisallowedKind != -1) { Diag(Loc, diag::err_atomic_specifier_bad_type) << DisallowedKind << T; return QualType(); } // FIXME: Do we need any handling for ARC here? } // Build the pointer type. return Context.getAtomicType(T); }