diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp index c09797e91b99..ca98c7a57446 100644 --- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp +++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp @@ -1,13054 +1,13055 @@ //===----- CGOpenMPRuntime.cpp - Interface to OpenMP Runtimes -------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This provides a class for OpenMP runtime code generation. // //===----------------------------------------------------------------------===// #include "CGOpenMPRuntime.h" #include "CGCXXABI.h" #include "CGCleanup.h" #include "CGRecordLayout.h" #include "CodeGenFunction.h" #include "clang/AST/APValue.h" #include "clang/AST/Attr.h" #include "clang/AST/Decl.h" #include "clang/AST/OpenMPClause.h" #include "clang/AST/StmtOpenMP.h" #include "clang/AST/StmtVisitor.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/FileManager.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceManager.h" #include "clang/CodeGen/ConstantInitBuilder.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SetOperations.h" #include "llvm/ADT/StringExtras.h" #include "llvm/Bitcode/BitcodeReader.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/GlobalValue.h" #include "llvm/IR/Value.h" #include "llvm/Support/AtomicOrdering.h" #include "llvm/Support/Format.h" #include "llvm/Support/raw_ostream.h" #include #include using namespace clang; using namespace CodeGen; using namespace llvm::omp; namespace { /// Base class for handling code generation inside OpenMP regions. class CGOpenMPRegionInfo : public CodeGenFunction::CGCapturedStmtInfo { public: /// Kinds of OpenMP regions used in codegen. enum CGOpenMPRegionKind { /// Region with outlined function for standalone 'parallel' /// directive. ParallelOutlinedRegion, /// Region with outlined function for standalone 'task' directive. TaskOutlinedRegion, /// Region for constructs that do not require function outlining, /// like 'for', 'sections', 'atomic' etc. directives. InlinedRegion, /// Region with outlined function for standalone 'target' directive. TargetRegion, }; CGOpenMPRegionInfo(const CapturedStmt &CS, const CGOpenMPRegionKind RegionKind, const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind, bool HasCancel) : CGCapturedStmtInfo(CS, CR_OpenMP), RegionKind(RegionKind), CodeGen(CodeGen), Kind(Kind), HasCancel(HasCancel) {} CGOpenMPRegionInfo(const CGOpenMPRegionKind RegionKind, const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind, bool HasCancel) : CGCapturedStmtInfo(CR_OpenMP), RegionKind(RegionKind), CodeGen(CodeGen), Kind(Kind), HasCancel(HasCancel) {} /// Get a variable or parameter for storing global thread id /// inside OpenMP construct. virtual const VarDecl *getThreadIDVariable() const = 0; /// Emit the captured statement body. void EmitBody(CodeGenFunction &CGF, const Stmt *S) override; /// Get an LValue for the current ThreadID variable. /// \return LValue for thread id variable. This LValue always has type int32*. virtual LValue getThreadIDVariableLValue(CodeGenFunction &CGF); virtual void emitUntiedSwitch(CodeGenFunction & /*CGF*/) {} CGOpenMPRegionKind getRegionKind() const { return RegionKind; } OpenMPDirectiveKind getDirectiveKind() const { return Kind; } bool hasCancel() const { return HasCancel; } static bool classof(const CGCapturedStmtInfo *Info) { return Info->getKind() == CR_OpenMP; } ~CGOpenMPRegionInfo() override = default; protected: CGOpenMPRegionKind RegionKind; RegionCodeGenTy CodeGen; OpenMPDirectiveKind Kind; bool HasCancel; }; /// API for captured statement code generation in OpenMP constructs. class CGOpenMPOutlinedRegionInfo final : public CGOpenMPRegionInfo { public: CGOpenMPOutlinedRegionInfo(const CapturedStmt &CS, const VarDecl *ThreadIDVar, const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind, bool HasCancel, StringRef HelperName) : CGOpenMPRegionInfo(CS, ParallelOutlinedRegion, CodeGen, Kind, HasCancel), ThreadIDVar(ThreadIDVar), HelperName(HelperName) { assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region."); } /// Get a variable or parameter for storing global thread id /// inside OpenMP construct. const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; } /// Get the name of the capture helper. StringRef getHelperName() const override { return HelperName; } static bool classof(const CGCapturedStmtInfo *Info) { return CGOpenMPRegionInfo::classof(Info) && cast(Info)->getRegionKind() == ParallelOutlinedRegion; } private: /// A variable or parameter storing global thread id for OpenMP /// constructs. const VarDecl *ThreadIDVar; StringRef HelperName; }; /// API for captured statement code generation in OpenMP constructs. class CGOpenMPTaskOutlinedRegionInfo final : public CGOpenMPRegionInfo { public: class UntiedTaskActionTy final : public PrePostActionTy { bool Untied; const VarDecl *PartIDVar; const RegionCodeGenTy UntiedCodeGen; llvm::SwitchInst *UntiedSwitch = nullptr; public: UntiedTaskActionTy(bool Tied, const VarDecl *PartIDVar, const RegionCodeGenTy &UntiedCodeGen) : Untied(!Tied), PartIDVar(PartIDVar), UntiedCodeGen(UntiedCodeGen) {} void Enter(CodeGenFunction &CGF) override { if (Untied) { // Emit task switching point. LValue PartIdLVal = CGF.EmitLoadOfPointerLValue( CGF.GetAddrOfLocalVar(PartIDVar), PartIDVar->getType()->castAs()); llvm::Value *Res = CGF.EmitLoadOfScalar(PartIdLVal, PartIDVar->getLocation()); llvm::BasicBlock *DoneBB = CGF.createBasicBlock(".untied.done."); UntiedSwitch = CGF.Builder.CreateSwitch(Res, DoneBB); CGF.EmitBlock(DoneBB); CGF.EmitBranchThroughCleanup(CGF.ReturnBlock); CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp.")); UntiedSwitch->addCase(CGF.Builder.getInt32(0), CGF.Builder.GetInsertBlock()); emitUntiedSwitch(CGF); } } void emitUntiedSwitch(CodeGenFunction &CGF) const { if (Untied) { LValue PartIdLVal = CGF.EmitLoadOfPointerLValue( CGF.GetAddrOfLocalVar(PartIDVar), PartIDVar->getType()->castAs()); CGF.EmitStoreOfScalar(CGF.Builder.getInt32(UntiedSwitch->getNumCases()), PartIdLVal); UntiedCodeGen(CGF); CodeGenFunction::JumpDest CurPoint = CGF.getJumpDestInCurrentScope(".untied.next."); CGF.EmitBranch(CGF.ReturnBlock.getBlock()); CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp.")); UntiedSwitch->addCase(CGF.Builder.getInt32(UntiedSwitch->getNumCases()), CGF.Builder.GetInsertBlock()); CGF.EmitBranchThroughCleanup(CurPoint); CGF.EmitBlock(CurPoint.getBlock()); } } unsigned getNumberOfParts() const { return UntiedSwitch->getNumCases(); } }; CGOpenMPTaskOutlinedRegionInfo(const CapturedStmt &CS, const VarDecl *ThreadIDVar, const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind, bool HasCancel, const UntiedTaskActionTy &Action) : CGOpenMPRegionInfo(CS, TaskOutlinedRegion, CodeGen, Kind, HasCancel), ThreadIDVar(ThreadIDVar), Action(Action) { assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region."); } /// Get a variable or parameter for storing global thread id /// inside OpenMP construct. const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; } /// Get an LValue for the current ThreadID variable. LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override; /// Get the name of the capture helper. StringRef getHelperName() const override { return ".omp_outlined."; } void emitUntiedSwitch(CodeGenFunction &CGF) override { Action.emitUntiedSwitch(CGF); } static bool classof(const CGCapturedStmtInfo *Info) { return CGOpenMPRegionInfo::classof(Info) && cast(Info)->getRegionKind() == TaskOutlinedRegion; } private: /// A variable or parameter storing global thread id for OpenMP /// constructs. const VarDecl *ThreadIDVar; /// Action for emitting code for untied tasks. const UntiedTaskActionTy &Action; }; /// API for inlined captured statement code generation in OpenMP /// constructs. class CGOpenMPInlinedRegionInfo : public CGOpenMPRegionInfo { public: CGOpenMPInlinedRegionInfo(CodeGenFunction::CGCapturedStmtInfo *OldCSI, const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind, bool HasCancel) : CGOpenMPRegionInfo(InlinedRegion, CodeGen, Kind, HasCancel), OldCSI(OldCSI), OuterRegionInfo(dyn_cast_or_null(OldCSI)) {} // Retrieve the value of the context parameter. llvm::Value *getContextValue() const override { if (OuterRegionInfo) return OuterRegionInfo->getContextValue(); llvm_unreachable("No context value for inlined OpenMP region"); } void setContextValue(llvm::Value *V) override { if (OuterRegionInfo) { OuterRegionInfo->setContextValue(V); return; } llvm_unreachable("No context value for inlined OpenMP region"); } /// Lookup the captured field decl for a variable. const FieldDecl *lookup(const VarDecl *VD) const override { if (OuterRegionInfo) return OuterRegionInfo->lookup(VD); // If there is no outer outlined region,no need to lookup in a list of // captured variables, we can use the original one. return nullptr; } FieldDecl *getThisFieldDecl() const override { if (OuterRegionInfo) return OuterRegionInfo->getThisFieldDecl(); return nullptr; } /// Get a variable or parameter for storing global thread id /// inside OpenMP construct. const VarDecl *getThreadIDVariable() const override { if (OuterRegionInfo) return OuterRegionInfo->getThreadIDVariable(); return nullptr; } /// Get an LValue for the current ThreadID variable. LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override { if (OuterRegionInfo) return OuterRegionInfo->getThreadIDVariableLValue(CGF); llvm_unreachable("No LValue for inlined OpenMP construct"); } /// Get the name of the capture helper. StringRef getHelperName() const override { if (auto *OuterRegionInfo = getOldCSI()) return OuterRegionInfo->getHelperName(); llvm_unreachable("No helper name for inlined OpenMP construct"); } void emitUntiedSwitch(CodeGenFunction &CGF) override { if (OuterRegionInfo) OuterRegionInfo->emitUntiedSwitch(CGF); } CodeGenFunction::CGCapturedStmtInfo *getOldCSI() const { return OldCSI; } static bool classof(const CGCapturedStmtInfo *Info) { return CGOpenMPRegionInfo::classof(Info) && cast(Info)->getRegionKind() == InlinedRegion; } ~CGOpenMPInlinedRegionInfo() override = default; private: /// CodeGen info about outer OpenMP region. CodeGenFunction::CGCapturedStmtInfo *OldCSI; CGOpenMPRegionInfo *OuterRegionInfo; }; /// API for captured statement code generation in OpenMP target /// constructs. For this captures, implicit parameters are used instead of the /// captured fields. The name of the target region has to be unique in a given /// application so it is provided by the client, because only the client has /// the information to generate that. class CGOpenMPTargetRegionInfo final : public CGOpenMPRegionInfo { public: CGOpenMPTargetRegionInfo(const CapturedStmt &CS, const RegionCodeGenTy &CodeGen, StringRef HelperName) : CGOpenMPRegionInfo(CS, TargetRegion, CodeGen, OMPD_target, /*HasCancel=*/false), HelperName(HelperName) {} /// This is unused for target regions because each starts executing /// with a single thread. const VarDecl *getThreadIDVariable() const override { return nullptr; } /// Get the name of the capture helper. StringRef getHelperName() const override { return HelperName; } static bool classof(const CGCapturedStmtInfo *Info) { return CGOpenMPRegionInfo::classof(Info) && cast(Info)->getRegionKind() == TargetRegion; } private: StringRef HelperName; }; static void EmptyCodeGen(CodeGenFunction &, PrePostActionTy &) { llvm_unreachable("No codegen for expressions"); } /// API for generation of expressions captured in a innermost OpenMP /// region. class CGOpenMPInnerExprInfo final : public CGOpenMPInlinedRegionInfo { public: CGOpenMPInnerExprInfo(CodeGenFunction &CGF, const CapturedStmt &CS) : CGOpenMPInlinedRegionInfo(CGF.CapturedStmtInfo, EmptyCodeGen, OMPD_unknown, /*HasCancel=*/false), PrivScope(CGF) { // Make sure the globals captured in the provided statement are local by // using the privatization logic. We assume the same variable is not // captured more than once. for (const auto &C : CS.captures()) { if (!C.capturesVariable() && !C.capturesVariableByCopy()) continue; const VarDecl *VD = C.getCapturedVar(); if (VD->isLocalVarDeclOrParm()) continue; DeclRefExpr DRE(CGF.getContext(), const_cast(VD), /*RefersToEnclosingVariableOrCapture=*/false, VD->getType().getNonReferenceType(), VK_LValue, C.getLocation()); PrivScope.addPrivate( VD, [&CGF, &DRE]() { return CGF.EmitLValue(&DRE).getAddress(CGF); }); } (void)PrivScope.Privatize(); } /// Lookup the captured field decl for a variable. const FieldDecl *lookup(const VarDecl *VD) const override { if (const FieldDecl *FD = CGOpenMPInlinedRegionInfo::lookup(VD)) return FD; return nullptr; } /// Emit the captured statement body. void EmitBody(CodeGenFunction &CGF, const Stmt *S) override { llvm_unreachable("No body for expressions"); } /// Get a variable or parameter for storing global thread id /// inside OpenMP construct. const VarDecl *getThreadIDVariable() const override { llvm_unreachable("No thread id for expressions"); } /// Get the name of the capture helper. StringRef getHelperName() const override { llvm_unreachable("No helper name for expressions"); } static bool classof(const CGCapturedStmtInfo *Info) { return false; } private: /// Private scope to capture global variables. CodeGenFunction::OMPPrivateScope PrivScope; }; /// RAII for emitting code of OpenMP constructs. class InlinedOpenMPRegionRAII { CodeGenFunction &CGF; llvm::DenseMap LambdaCaptureFields; FieldDecl *LambdaThisCaptureField = nullptr; const CodeGen::CGBlockInfo *BlockInfo = nullptr; bool NoInheritance = false; public: /// Constructs region for combined constructs. /// \param CodeGen Code generation sequence for combined directives. Includes /// a list of functions used for code generation of implicitly inlined /// regions. InlinedOpenMPRegionRAII(CodeGenFunction &CGF, const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind, bool HasCancel, bool NoInheritance = true) : CGF(CGF), NoInheritance(NoInheritance) { // Start emission for the construct. CGF.CapturedStmtInfo = new CGOpenMPInlinedRegionInfo( CGF.CapturedStmtInfo, CodeGen, Kind, HasCancel); if (NoInheritance) { std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields); LambdaThisCaptureField = CGF.LambdaThisCaptureField; CGF.LambdaThisCaptureField = nullptr; BlockInfo = CGF.BlockInfo; CGF.BlockInfo = nullptr; } } ~InlinedOpenMPRegionRAII() { // Restore original CapturedStmtInfo only if we're done with code emission. auto *OldCSI = cast(CGF.CapturedStmtInfo)->getOldCSI(); delete CGF.CapturedStmtInfo; CGF.CapturedStmtInfo = OldCSI; if (NoInheritance) { std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields); CGF.LambdaThisCaptureField = LambdaThisCaptureField; CGF.BlockInfo = BlockInfo; } } }; /// Values for bit flags used in the ident_t to describe the fields. /// All enumeric elements are named and described in accordance with the code /// from https://github.com/llvm/llvm-project/blob/main/openmp/runtime/src/kmp.h enum OpenMPLocationFlags : unsigned { /// Use trampoline for internal microtask. OMP_IDENT_IMD = 0x01, /// Use c-style ident structure. OMP_IDENT_KMPC = 0x02, /// Atomic reduction option for kmpc_reduce. OMP_ATOMIC_REDUCE = 0x10, /// Explicit 'barrier' directive. OMP_IDENT_BARRIER_EXPL = 0x20, /// Implicit barrier in code. OMP_IDENT_BARRIER_IMPL = 0x40, /// Implicit barrier in 'for' directive. OMP_IDENT_BARRIER_IMPL_FOR = 0x40, /// Implicit barrier in 'sections' directive. OMP_IDENT_BARRIER_IMPL_SECTIONS = 0xC0, /// Implicit barrier in 'single' directive. OMP_IDENT_BARRIER_IMPL_SINGLE = 0x140, /// Call of __kmp_for_static_init for static loop. OMP_IDENT_WORK_LOOP = 0x200, /// Call of __kmp_for_static_init for sections. OMP_IDENT_WORK_SECTIONS = 0x400, /// Call of __kmp_for_static_init for distribute. OMP_IDENT_WORK_DISTRIBUTE = 0x800, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/OMP_IDENT_WORK_DISTRIBUTE) }; namespace { LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE(); /// Values for bit flags for marking which requires clauses have been used. enum OpenMPOffloadingRequiresDirFlags : int64_t { /// flag undefined. OMP_REQ_UNDEFINED = 0x000, /// no requires clause present. OMP_REQ_NONE = 0x001, /// reverse_offload clause. OMP_REQ_REVERSE_OFFLOAD = 0x002, /// unified_address clause. OMP_REQ_UNIFIED_ADDRESS = 0x004, /// unified_shared_memory clause. OMP_REQ_UNIFIED_SHARED_MEMORY = 0x008, /// dynamic_allocators clause. OMP_REQ_DYNAMIC_ALLOCATORS = 0x010, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/OMP_REQ_DYNAMIC_ALLOCATORS) }; enum OpenMPOffloadingReservedDeviceIDs { /// Device ID if the device was not defined, runtime should get it /// from environment variables in the spec. OMP_DEVICEID_UNDEF = -1, }; } // anonymous namespace /// Describes ident structure that describes a source location. /// All descriptions are taken from /// https://github.com/llvm/llvm-project/blob/main/openmp/runtime/src/kmp.h /// Original structure: /// typedef struct ident { /// kmp_int32 reserved_1; /**< might be used in Fortran; /// see above */ /// kmp_int32 flags; /**< also f.flags; KMP_IDENT_xxx flags; /// KMP_IDENT_KMPC identifies this union /// member */ /// kmp_int32 reserved_2; /**< not really used in Fortran any more; /// see above */ ///#if USE_ITT_BUILD /// /* but currently used for storing /// region-specific ITT */ /// /* contextual information. */ ///#endif /* USE_ITT_BUILD */ /// kmp_int32 reserved_3; /**< source[4] in Fortran, do not use for /// C++ */ /// char const *psource; /**< String describing the source location. /// The string is composed of semi-colon separated // fields which describe the source file, /// the function and a pair of line numbers that /// delimit the construct. /// */ /// } ident_t; enum IdentFieldIndex { /// might be used in Fortran IdentField_Reserved_1, /// OMP_IDENT_xxx flags; OMP_IDENT_KMPC identifies this union member. IdentField_Flags, /// Not really used in Fortran any more IdentField_Reserved_2, /// Source[4] in Fortran, do not use for C++ IdentField_Reserved_3, /// String describing the source location. The string is composed of /// semi-colon separated fields which describe the source file, the function /// and a pair of line numbers that delimit the construct. IdentField_PSource }; /// Schedule types for 'omp for' loops (these enumerators are taken from /// the enum sched_type in kmp.h). enum OpenMPSchedType { /// Lower bound for default (unordered) versions. OMP_sch_lower = 32, OMP_sch_static_chunked = 33, OMP_sch_static = 34, OMP_sch_dynamic_chunked = 35, OMP_sch_guided_chunked = 36, OMP_sch_runtime = 37, OMP_sch_auto = 38, /// static with chunk adjustment (e.g., simd) OMP_sch_static_balanced_chunked = 45, /// Lower bound for 'ordered' versions. OMP_ord_lower = 64, OMP_ord_static_chunked = 65, OMP_ord_static = 66, OMP_ord_dynamic_chunked = 67, OMP_ord_guided_chunked = 68, OMP_ord_runtime = 69, OMP_ord_auto = 70, OMP_sch_default = OMP_sch_static, /// dist_schedule types OMP_dist_sch_static_chunked = 91, OMP_dist_sch_static = 92, /// Support for OpenMP 4.5 monotonic and nonmonotonic schedule modifiers. /// Set if the monotonic schedule modifier was present. OMP_sch_modifier_monotonic = (1 << 29), /// Set if the nonmonotonic schedule modifier was present. OMP_sch_modifier_nonmonotonic = (1 << 30), }; /// A basic class for pre|post-action for advanced codegen sequence for OpenMP /// region. class CleanupTy final : public EHScopeStack::Cleanup { PrePostActionTy *Action; public: explicit CleanupTy(PrePostActionTy *Action) : Action(Action) {} void Emit(CodeGenFunction &CGF, Flags /*flags*/) override { if (!CGF.HaveInsertPoint()) return; Action->Exit(CGF); } }; } // anonymous namespace void RegionCodeGenTy::operator()(CodeGenFunction &CGF) const { CodeGenFunction::RunCleanupsScope Scope(CGF); if (PrePostAction) { CGF.EHStack.pushCleanup(NormalAndEHCleanup, PrePostAction); Callback(CodeGen, CGF, *PrePostAction); } else { PrePostActionTy Action; Callback(CodeGen, CGF, Action); } } /// Check if the combiner is a call to UDR combiner and if it is so return the /// UDR decl used for reduction. static const OMPDeclareReductionDecl * getReductionInit(const Expr *ReductionOp) { if (const auto *CE = dyn_cast(ReductionOp)) if (const auto *OVE = dyn_cast(CE->getCallee())) if (const auto *DRE = dyn_cast(OVE->getSourceExpr()->IgnoreImpCasts())) if (const auto *DRD = dyn_cast(DRE->getDecl())) return DRD; return nullptr; } static void emitInitWithReductionInitializer(CodeGenFunction &CGF, const OMPDeclareReductionDecl *DRD, const Expr *InitOp, Address Private, Address Original, QualType Ty) { if (DRD->getInitializer()) { std::pair Reduction = CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD); const auto *CE = cast(InitOp); const auto *OVE = cast(CE->getCallee()); const Expr *LHS = CE->getArg(/*Arg=*/0)->IgnoreParenImpCasts(); const Expr *RHS = CE->getArg(/*Arg=*/1)->IgnoreParenImpCasts(); const auto *LHSDRE = cast(cast(LHS)->getSubExpr()); const auto *RHSDRE = cast(cast(RHS)->getSubExpr()); CodeGenFunction::OMPPrivateScope PrivateScope(CGF); PrivateScope.addPrivate(cast(LHSDRE->getDecl()), [=]() { return Private; }); PrivateScope.addPrivate(cast(RHSDRE->getDecl()), [=]() { return Original; }); (void)PrivateScope.Privatize(); RValue Func = RValue::get(Reduction.second); CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func); CGF.EmitIgnoredExpr(InitOp); } else { llvm::Constant *Init = CGF.CGM.EmitNullConstant(Ty); std::string Name = CGF.CGM.getOpenMPRuntime().getName({"init"}); auto *GV = new llvm::GlobalVariable( CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, Init, Name); LValue LV = CGF.MakeNaturalAlignAddrLValue(GV, Ty); RValue InitRVal; switch (CGF.getEvaluationKind(Ty)) { case TEK_Scalar: InitRVal = CGF.EmitLoadOfLValue(LV, DRD->getLocation()); break; case TEK_Complex: InitRVal = RValue::getComplex(CGF.EmitLoadOfComplex(LV, DRD->getLocation())); break; case TEK_Aggregate: { OpaqueValueExpr OVE(DRD->getLocation(), Ty, VK_LValue); CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, LV); CGF.EmitAnyExprToMem(&OVE, Private, Ty.getQualifiers(), /*IsInitializer=*/false); return; } } OpaqueValueExpr OVE(DRD->getLocation(), Ty, VK_PRValue); CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, InitRVal); CGF.EmitAnyExprToMem(&OVE, Private, Ty.getQualifiers(), /*IsInitializer=*/false); } } /// Emit initialization of arrays of complex types. /// \param DestAddr Address of the array. /// \param Type Type of array. /// \param Init Initial expression of array. /// \param SrcAddr Address of the original array. static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr, QualType Type, bool EmitDeclareReductionInit, const Expr *Init, const OMPDeclareReductionDecl *DRD, Address SrcAddr = Address::invalid()) { // Perform element-by-element initialization. QualType ElementTy; // Drill down to the base element type on both arrays. const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe(); llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, DestAddr); DestAddr = CGF.Builder.CreateElementBitCast(DestAddr, DestAddr.getElementType()); if (DRD) SrcAddr = CGF.Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType()); llvm::Value *SrcBegin = nullptr; if (DRD) SrcBegin = SrcAddr.getPointer(); llvm::Value *DestBegin = DestAddr.getPointer(); // Cast from pointer to array type to pointer to single element. llvm::Value *DestEnd = CGF.Builder.CreateGEP(DestAddr.getElementType(), DestBegin, NumElements); // The basic structure here is a while-do loop. llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.arrayinit.body"); llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.arrayinit.done"); llvm::Value *IsEmpty = CGF.Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arrayinit.isempty"); CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB); // Enter the loop body, making that address the current address. llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock(); CGF.EmitBlock(BodyBB); CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy); llvm::PHINode *SrcElementPHI = nullptr; Address SrcElementCurrent = Address::invalid(); if (DRD) { SrcElementPHI = CGF.Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast"); SrcElementPHI->addIncoming(SrcBegin, EntryBB); SrcElementCurrent = Address(SrcElementPHI, SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize)); } llvm::PHINode *DestElementPHI = CGF.Builder.CreatePHI( DestBegin->getType(), 2, "omp.arraycpy.destElementPast"); DestElementPHI->addIncoming(DestBegin, EntryBB); Address DestElementCurrent = Address(DestElementPHI, DestAddr.getAlignment().alignmentOfArrayElement(ElementSize)); // Emit copy. { CodeGenFunction::RunCleanupsScope InitScope(CGF); if (EmitDeclareReductionInit) { emitInitWithReductionInitializer(CGF, DRD, Init, DestElementCurrent, SrcElementCurrent, ElementTy); } else CGF.EmitAnyExprToMem(Init, DestElementCurrent, ElementTy.getQualifiers(), /*IsInitializer=*/false); } if (DRD) { // Shift the address forward by one element. llvm::Value *SrcElementNext = CGF.Builder.CreateConstGEP1_32( SrcAddr.getElementType(), SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element"); SrcElementPHI->addIncoming(SrcElementNext, CGF.Builder.GetInsertBlock()); } // Shift the address forward by one element. llvm::Value *DestElementNext = CGF.Builder.CreateConstGEP1_32( DestAddr.getElementType(), DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element"); // Check whether we've reached the end. llvm::Value *Done = CGF.Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done"); CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB); DestElementPHI->addIncoming(DestElementNext, CGF.Builder.GetInsertBlock()); // Done. CGF.EmitBlock(DoneBB, /*IsFinished=*/true); } LValue ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, const Expr *E) { return CGF.EmitOMPSharedLValue(E); } LValue ReductionCodeGen::emitSharedLValueUB(CodeGenFunction &CGF, const Expr *E) { if (const auto *OASE = dyn_cast(E)) return CGF.EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false); return LValue(); } void ReductionCodeGen::emitAggregateInitialization( CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal, const OMPDeclareReductionDecl *DRD) { // Emit VarDecl with copy init for arrays. // Get the address of the original variable captured in current // captured region. const auto *PrivateVD = cast(cast(ClausesData[N].Private)->getDecl()); bool EmitDeclareReductionInit = DRD && (DRD->getInitializer() || !PrivateVD->hasInit()); EmitOMPAggregateInit(CGF, PrivateAddr, PrivateVD->getType(), EmitDeclareReductionInit, EmitDeclareReductionInit ? ClausesData[N].ReductionOp : PrivateVD->getInit(), DRD, SharedLVal.getAddress(CGF)); } ReductionCodeGen::ReductionCodeGen(ArrayRef Shareds, ArrayRef Origs, ArrayRef Privates, ArrayRef ReductionOps) { ClausesData.reserve(Shareds.size()); SharedAddresses.reserve(Shareds.size()); Sizes.reserve(Shareds.size()); BaseDecls.reserve(Shareds.size()); const auto *IOrig = Origs.begin(); const auto *IPriv = Privates.begin(); const auto *IRed = ReductionOps.begin(); for (const Expr *Ref : Shareds) { ClausesData.emplace_back(Ref, *IOrig, *IPriv, *IRed); std::advance(IOrig, 1); std::advance(IPriv, 1); std::advance(IRed, 1); } } void ReductionCodeGen::emitSharedOrigLValue(CodeGenFunction &CGF, unsigned N) { assert(SharedAddresses.size() == N && OrigAddresses.size() == N && "Number of generated lvalues must be exactly N."); LValue First = emitSharedLValue(CGF, ClausesData[N].Shared); LValue Second = emitSharedLValueUB(CGF, ClausesData[N].Shared); SharedAddresses.emplace_back(First, Second); if (ClausesData[N].Shared == ClausesData[N].Ref) { OrigAddresses.emplace_back(First, Second); } else { LValue First = emitSharedLValue(CGF, ClausesData[N].Ref); LValue Second = emitSharedLValueUB(CGF, ClausesData[N].Ref); OrigAddresses.emplace_back(First, Second); } } void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N) { const auto *PrivateVD = cast(cast(ClausesData[N].Private)->getDecl()); QualType PrivateType = PrivateVD->getType(); bool AsArraySection = isa(ClausesData[N].Ref); if (!PrivateType->isVariablyModifiedType()) { Sizes.emplace_back( CGF.getTypeSize(OrigAddresses[N].first.getType().getNonReferenceType()), nullptr); return; } llvm::Value *Size; llvm::Value *SizeInChars; auto *ElemType = cast(OrigAddresses[N].first.getPointer(CGF)->getType()) ->getElementType(); auto *ElemSizeOf = llvm::ConstantExpr::getSizeOf(ElemType); if (AsArraySection) { Size = CGF.Builder.CreatePtrDiff(OrigAddresses[N].second.getPointer(CGF), OrigAddresses[N].first.getPointer(CGF)); Size = CGF.Builder.CreateNUWAdd( Size, llvm::ConstantInt::get(Size->getType(), /*V=*/1)); SizeInChars = CGF.Builder.CreateNUWMul(Size, ElemSizeOf); } else { SizeInChars = CGF.getTypeSize(OrigAddresses[N].first.getType().getNonReferenceType()); Size = CGF.Builder.CreateExactUDiv(SizeInChars, ElemSizeOf); } Sizes.emplace_back(SizeInChars, Size); CodeGenFunction::OpaqueValueMapping OpaqueMap( CGF, cast( CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()), RValue::get(Size)); CGF.EmitVariablyModifiedType(PrivateType); } void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N, llvm::Value *Size) { const auto *PrivateVD = cast(cast(ClausesData[N].Private)->getDecl()); QualType PrivateType = PrivateVD->getType(); if (!PrivateType->isVariablyModifiedType()) { assert(!Size && !Sizes[N].second && "Size should be nullptr for non-variably modified reduction " "items."); return; } CodeGenFunction::OpaqueValueMapping OpaqueMap( CGF, cast( CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()), RValue::get(Size)); CGF.EmitVariablyModifiedType(PrivateType); } void ReductionCodeGen::emitInitialization( CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal, llvm::function_ref DefaultInit) { assert(SharedAddresses.size() > N && "No variable was generated"); const auto *PrivateVD = cast(cast(ClausesData[N].Private)->getDecl()); const OMPDeclareReductionDecl *DRD = getReductionInit(ClausesData[N].ReductionOp); QualType PrivateType = PrivateVD->getType(); PrivateAddr = CGF.Builder.CreateElementBitCast( PrivateAddr, CGF.ConvertTypeForMem(PrivateType)); QualType SharedType = SharedAddresses[N].first.getType(); SharedLVal = CGF.MakeAddrLValue( CGF.Builder.CreateElementBitCast(SharedLVal.getAddress(CGF), CGF.ConvertTypeForMem(SharedType)), SharedType, SharedAddresses[N].first.getBaseInfo(), CGF.CGM.getTBAAInfoForSubobject(SharedAddresses[N].first, SharedType)); if (CGF.getContext().getAsArrayType(PrivateVD->getType())) { if (DRD && DRD->getInitializer()) (void)DefaultInit(CGF); emitAggregateInitialization(CGF, N, PrivateAddr, SharedLVal, DRD); } else if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) { (void)DefaultInit(CGF); emitInitWithReductionInitializer(CGF, DRD, ClausesData[N].ReductionOp, PrivateAddr, SharedLVal.getAddress(CGF), SharedLVal.getType()); } else if (!DefaultInit(CGF) && PrivateVD->hasInit() && !CGF.isTrivialInitializer(PrivateVD->getInit())) { CGF.EmitAnyExprToMem(PrivateVD->getInit(), PrivateAddr, PrivateVD->getType().getQualifiers(), /*IsInitializer=*/false); } } bool ReductionCodeGen::needCleanups(unsigned N) { const auto *PrivateVD = cast(cast(ClausesData[N].Private)->getDecl()); QualType PrivateType = PrivateVD->getType(); QualType::DestructionKind DTorKind = PrivateType.isDestructedType(); return DTorKind != QualType::DK_none; } void ReductionCodeGen::emitCleanups(CodeGenFunction &CGF, unsigned N, Address PrivateAddr) { const auto *PrivateVD = cast(cast(ClausesData[N].Private)->getDecl()); QualType PrivateType = PrivateVD->getType(); QualType::DestructionKind DTorKind = PrivateType.isDestructedType(); if (needCleanups(N)) { PrivateAddr = CGF.Builder.CreateElementBitCast( PrivateAddr, CGF.ConvertTypeForMem(PrivateType)); CGF.pushDestroy(DTorKind, PrivateAddr, PrivateType); } } static LValue loadToBegin(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy, LValue BaseLV) { BaseTy = BaseTy.getNonReferenceType(); while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) && !CGF.getContext().hasSameType(BaseTy, ElTy)) { if (const auto *PtrTy = BaseTy->getAs()) { BaseLV = CGF.EmitLoadOfPointerLValue(BaseLV.getAddress(CGF), PtrTy); } else { LValue RefLVal = CGF.MakeAddrLValue(BaseLV.getAddress(CGF), BaseTy); BaseLV = CGF.EmitLoadOfReferenceLValue(RefLVal); } BaseTy = BaseTy->getPointeeType(); } return CGF.MakeAddrLValue( CGF.Builder.CreateElementBitCast(BaseLV.getAddress(CGF), CGF.ConvertTypeForMem(ElTy)), BaseLV.getType(), BaseLV.getBaseInfo(), CGF.CGM.getTBAAInfoForSubobject(BaseLV, BaseLV.getType())); } static Address castToBase(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy, llvm::Type *BaseLVType, CharUnits BaseLVAlignment, llvm::Value *Addr) { Address Tmp = Address::invalid(); Address TopTmp = Address::invalid(); Address MostTopTmp = Address::invalid(); BaseTy = BaseTy.getNonReferenceType(); while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) && !CGF.getContext().hasSameType(BaseTy, ElTy)) { Tmp = CGF.CreateMemTemp(BaseTy); if (TopTmp.isValid()) CGF.Builder.CreateStore(Tmp.getPointer(), TopTmp); else MostTopTmp = Tmp; TopTmp = Tmp; BaseTy = BaseTy->getPointeeType(); } llvm::Type *Ty = BaseLVType; if (Tmp.isValid()) Ty = Tmp.getElementType(); Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, Ty); if (Tmp.isValid()) { CGF.Builder.CreateStore(Addr, Tmp); return MostTopTmp; } return Address(Addr, BaseLVAlignment); } static const VarDecl *getBaseDecl(const Expr *Ref, const DeclRefExpr *&DE) { const VarDecl *OrigVD = nullptr; if (const auto *OASE = dyn_cast(Ref)) { const Expr *Base = OASE->getBase()->IgnoreParenImpCasts(); while (const auto *TempOASE = dyn_cast(Base)) Base = TempOASE->getBase()->IgnoreParenImpCasts(); while (const auto *TempASE = dyn_cast(Base)) Base = TempASE->getBase()->IgnoreParenImpCasts(); DE = cast(Base); OrigVD = cast(DE->getDecl()); } else if (const auto *ASE = dyn_cast(Ref)) { const Expr *Base = ASE->getBase()->IgnoreParenImpCasts(); while (const auto *TempASE = dyn_cast(Base)) Base = TempASE->getBase()->IgnoreParenImpCasts(); DE = cast(Base); OrigVD = cast(DE->getDecl()); } return OrigVD; } Address ReductionCodeGen::adjustPrivateAddress(CodeGenFunction &CGF, unsigned N, Address PrivateAddr) { const DeclRefExpr *DE; if (const VarDecl *OrigVD = ::getBaseDecl(ClausesData[N].Ref, DE)) { BaseDecls.emplace_back(OrigVD); LValue OriginalBaseLValue = CGF.EmitLValue(DE); LValue BaseLValue = loadToBegin(CGF, OrigVD->getType(), SharedAddresses[N].first.getType(), OriginalBaseLValue); Address SharedAddr = SharedAddresses[N].first.getAddress(CGF); llvm::Value *Adjustment = CGF.Builder.CreatePtrDiff( BaseLValue.getPointer(CGF), SharedAddr.getPointer()); llvm::Value *PrivatePointer = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( PrivateAddr.getPointer(), SharedAddr.getType()); llvm::Value *Ptr = CGF.Builder.CreateGEP( SharedAddr.getElementType(), PrivatePointer, Adjustment); return castToBase(CGF, OrigVD->getType(), SharedAddresses[N].first.getType(), OriginalBaseLValue.getAddress(CGF).getType(), OriginalBaseLValue.getAlignment(), Ptr); } BaseDecls.emplace_back( cast(cast(ClausesData[N].Ref)->getDecl())); return PrivateAddr; } bool ReductionCodeGen::usesReductionInitializer(unsigned N) const { const OMPDeclareReductionDecl *DRD = getReductionInit(ClausesData[N].ReductionOp); return DRD && DRD->getInitializer(); } LValue CGOpenMPRegionInfo::getThreadIDVariableLValue(CodeGenFunction &CGF) { return CGF.EmitLoadOfPointerLValue( CGF.GetAddrOfLocalVar(getThreadIDVariable()), getThreadIDVariable()->getType()->castAs()); } void CGOpenMPRegionInfo::EmitBody(CodeGenFunction &CGF, const Stmt *S) { if (!CGF.HaveInsertPoint()) return; // 1.2.2 OpenMP Language Terminology // Structured block - An executable statement with a single entry at the // top and a single exit at the bottom. // The point of exit cannot be a branch out of the structured block. // longjmp() and throw() must not violate the entry/exit criteria. CGF.EHStack.pushTerminate(); if (S) CGF.incrementProfileCounter(S); CodeGen(CGF); CGF.EHStack.popTerminate(); } LValue CGOpenMPTaskOutlinedRegionInfo::getThreadIDVariableLValue( CodeGenFunction &CGF) { return CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(getThreadIDVariable()), getThreadIDVariable()->getType(), AlignmentSource::Decl); } static FieldDecl *addFieldToRecordDecl(ASTContext &C, DeclContext *DC, QualType FieldTy) { auto *Field = FieldDecl::Create( C, DC, SourceLocation(), SourceLocation(), /*Id=*/nullptr, FieldTy, C.getTrivialTypeSourceInfo(FieldTy, SourceLocation()), /*BW=*/nullptr, /*Mutable=*/false, /*InitStyle=*/ICIS_NoInit); Field->setAccess(AS_public); DC->addDecl(Field); return Field; } CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator, StringRef Separator) : CGM(CGM), FirstSeparator(FirstSeparator), Separator(Separator), OMPBuilder(CGM.getModule()), OffloadEntriesInfoManager(CGM) { KmpCriticalNameTy = llvm::ArrayType::get(CGM.Int32Ty, /*NumElements*/ 8); // Initialize Types used in OpenMPIRBuilder from OMPKinds.def OMPBuilder.initialize(); loadOffloadInfoMetadata(); } void CGOpenMPRuntime::clear() { InternalVars.clear(); // Clean non-target variable declarations possibly used only in debug info. for (const auto &Data : EmittedNonTargetVariables) { if (!Data.getValue().pointsToAliveValue()) continue; auto *GV = dyn_cast(Data.getValue()); if (!GV) continue; if (!GV->isDeclaration() || GV->getNumUses() > 0) continue; GV->eraseFromParent(); } } std::string CGOpenMPRuntime::getName(ArrayRef Parts) const { SmallString<128> Buffer; llvm::raw_svector_ostream OS(Buffer); StringRef Sep = FirstSeparator; for (StringRef Part : Parts) { OS << Sep << Part; Sep = Separator; } return std::string(OS.str()); } static llvm::Function * emitCombinerOrInitializer(CodeGenModule &CGM, QualType Ty, const Expr *CombinerInitializer, const VarDecl *In, const VarDecl *Out, bool IsCombiner) { // void .omp_combiner.(Ty *in, Ty *out); ASTContext &C = CGM.getContext(); QualType PtrTy = C.getPointerType(Ty).withRestrict(); FunctionArgList Args; ImplicitParamDecl OmpOutParm(C, /*DC=*/nullptr, Out->getLocation(), /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other); ImplicitParamDecl OmpInParm(C, /*DC=*/nullptr, In->getLocation(), /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other); Args.push_back(&OmpOutParm); Args.push_back(&OmpInParm); const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo); std::string Name = CGM.getOpenMPRuntime().getName( {IsCombiner ? "omp_combiner" : "omp_initializer", ""}); auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule()); CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo); if (CGM.getLangOpts().Optimize) { Fn->removeFnAttr(llvm::Attribute::NoInline); Fn->removeFnAttr(llvm::Attribute::OptimizeNone); Fn->addFnAttr(llvm::Attribute::AlwaysInline); } CodeGenFunction CGF(CGM); // Map "T omp_in;" variable to "*omp_in_parm" value in all expressions. // Map "T omp_out;" variable to "*omp_out_parm" value in all expressions. CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, In->getLocation(), Out->getLocation()); CodeGenFunction::OMPPrivateScope Scope(CGF); Address AddrIn = CGF.GetAddrOfLocalVar(&OmpInParm); Scope.addPrivate(In, [&CGF, AddrIn, PtrTy]() { return CGF.EmitLoadOfPointerLValue(AddrIn, PtrTy->castAs()) .getAddress(CGF); }); Address AddrOut = CGF.GetAddrOfLocalVar(&OmpOutParm); Scope.addPrivate(Out, [&CGF, AddrOut, PtrTy]() { return CGF.EmitLoadOfPointerLValue(AddrOut, PtrTy->castAs()) .getAddress(CGF); }); (void)Scope.Privatize(); if (!IsCombiner && Out->hasInit() && !CGF.isTrivialInitializer(Out->getInit())) { CGF.EmitAnyExprToMem(Out->getInit(), CGF.GetAddrOfLocalVar(Out), Out->getType().getQualifiers(), /*IsInitializer=*/true); } if (CombinerInitializer) CGF.EmitIgnoredExpr(CombinerInitializer); Scope.ForceCleanup(); CGF.FinishFunction(); return Fn; } void CGOpenMPRuntime::emitUserDefinedReduction( CodeGenFunction *CGF, const OMPDeclareReductionDecl *D) { if (UDRMap.count(D) > 0) return; llvm::Function *Combiner = emitCombinerOrInitializer( CGM, D->getType(), D->getCombiner(), cast(cast(D->getCombinerIn())->getDecl()), cast(cast(D->getCombinerOut())->getDecl()), /*IsCombiner=*/true); llvm::Function *Initializer = nullptr; if (const Expr *Init = D->getInitializer()) { Initializer = emitCombinerOrInitializer( CGM, D->getType(), D->getInitializerKind() == OMPDeclareReductionDecl::CallInit ? Init : nullptr, cast(cast(D->getInitOrig())->getDecl()), cast(cast(D->getInitPriv())->getDecl()), /*IsCombiner=*/false); } UDRMap.try_emplace(D, Combiner, Initializer); if (CGF) { auto &Decls = FunctionUDRMap.FindAndConstruct(CGF->CurFn); Decls.second.push_back(D); } } std::pair CGOpenMPRuntime::getUserDefinedReduction(const OMPDeclareReductionDecl *D) { auto I = UDRMap.find(D); if (I != UDRMap.end()) return I->second; emitUserDefinedReduction(/*CGF=*/nullptr, D); return UDRMap.lookup(D); } namespace { // Temporary RAII solution to perform a push/pop stack event on the OpenMP IR // Builder if one is present. struct PushAndPopStackRAII { PushAndPopStackRAII(llvm::OpenMPIRBuilder *OMPBuilder, CodeGenFunction &CGF, bool HasCancel, llvm::omp::Directive Kind) : OMPBuilder(OMPBuilder) { if (!OMPBuilder) return; // The following callback is the crucial part of clangs cleanup process. // // NOTE: // Once the OpenMPIRBuilder is used to create parallel regions (and // similar), the cancellation destination (Dest below) is determined via // IP. That means if we have variables to finalize we split the block at IP, // use the new block (=BB) as destination to build a JumpDest (via // getJumpDestInCurrentScope(BB)) which then is fed to // EmitBranchThroughCleanup. Furthermore, there will not be the need // to push & pop an FinalizationInfo object. // The FiniCB will still be needed but at the point where the // OpenMPIRBuilder is asked to construct a parallel (or similar) construct. auto FiniCB = [&CGF](llvm::OpenMPIRBuilder::InsertPointTy IP) { assert(IP.getBlock()->end() == IP.getPoint() && "Clang CG should cause non-terminated block!"); CGBuilderTy::InsertPointGuard IPG(CGF.Builder); CGF.Builder.restoreIP(IP); CodeGenFunction::JumpDest Dest = CGF.getOMPCancelDestination(OMPD_parallel); CGF.EmitBranchThroughCleanup(Dest); }; // TODO: Remove this once we emit parallel regions through the // OpenMPIRBuilder as it can do this setup internally. llvm::OpenMPIRBuilder::FinalizationInfo FI({FiniCB, Kind, HasCancel}); OMPBuilder->pushFinalizationCB(std::move(FI)); } ~PushAndPopStackRAII() { if (OMPBuilder) OMPBuilder->popFinalizationCB(); } llvm::OpenMPIRBuilder *OMPBuilder; }; } // namespace static llvm::Function *emitParallelOrTeamsOutlinedFunction( CodeGenModule &CGM, const OMPExecutableDirective &D, const CapturedStmt *CS, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const StringRef OutlinedHelperName, const RegionCodeGenTy &CodeGen) { assert(ThreadIDVar->getType()->isPointerType() && "thread id variable must be of type kmp_int32 *"); CodeGenFunction CGF(CGM, true); bool HasCancel = false; if (const auto *OPD = dyn_cast(&D)) HasCancel = OPD->hasCancel(); else if (const auto *OPD = dyn_cast(&D)) HasCancel = OPD->hasCancel(); else if (const auto *OPSD = dyn_cast(&D)) HasCancel = OPSD->hasCancel(); else if (const auto *OPFD = dyn_cast(&D)) HasCancel = OPFD->hasCancel(); else if (const auto *OPFD = dyn_cast(&D)) HasCancel = OPFD->hasCancel(); else if (const auto *OPFD = dyn_cast(&D)) HasCancel = OPFD->hasCancel(); else if (const auto *OPFD = dyn_cast(&D)) HasCancel = OPFD->hasCancel(); else if (const auto *OPFD = dyn_cast(&D)) HasCancel = OPFD->hasCancel(); // TODO: Temporarily inform the OpenMPIRBuilder, if any, about the new // parallel region to make cancellation barriers work properly. llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); PushAndPopStackRAII PSR(&OMPBuilder, CGF, HasCancel, InnermostKind); CGOpenMPOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen, InnermostKind, HasCancel, OutlinedHelperName); CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo); return CGF.GenerateOpenMPCapturedStmtFunction(*CS, D.getBeginLoc()); } llvm::Function *CGOpenMPRuntime::emitParallelOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) { const CapturedStmt *CS = D.getCapturedStmt(OMPD_parallel); return emitParallelOrTeamsOutlinedFunction( CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen); } llvm::Function *CGOpenMPRuntime::emitTeamsOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) { const CapturedStmt *CS = D.getCapturedStmt(OMPD_teams); return emitParallelOrTeamsOutlinedFunction( CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen); } llvm::Function *CGOpenMPRuntime::emitTaskOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, const VarDecl *PartIDVar, const VarDecl *TaskTVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool Tied, unsigned &NumberOfParts) { auto &&UntiedCodeGen = [this, &D, TaskTVar](CodeGenFunction &CGF, PrePostActionTy &) { llvm::Value *ThreadID = getThreadID(CGF, D.getBeginLoc()); llvm::Value *UpLoc = emitUpdateLocation(CGF, D.getBeginLoc()); llvm::Value *TaskArgs[] = { UpLoc, ThreadID, CGF.EmitLoadOfPointerLValue(CGF.GetAddrOfLocalVar(TaskTVar), TaskTVar->getType()->castAs()) .getPointer(CGF)}; CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_omp_task), TaskArgs); }; CGOpenMPTaskOutlinedRegionInfo::UntiedTaskActionTy Action(Tied, PartIDVar, UntiedCodeGen); CodeGen.setAction(Action); assert(!ThreadIDVar->getType()->isPointerType() && "thread id variable must be of type kmp_int32 for tasks"); const OpenMPDirectiveKind Region = isOpenMPTaskLoopDirective(D.getDirectiveKind()) ? OMPD_taskloop : OMPD_task; const CapturedStmt *CS = D.getCapturedStmt(Region); bool HasCancel = false; if (const auto *TD = dyn_cast(&D)) HasCancel = TD->hasCancel(); else if (const auto *TD = dyn_cast(&D)) HasCancel = TD->hasCancel(); else if (const auto *TD = dyn_cast(&D)) HasCancel = TD->hasCancel(); else if (const auto *TD = dyn_cast(&D)) HasCancel = TD->hasCancel(); CodeGenFunction CGF(CGM, true); CGOpenMPTaskOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen, InnermostKind, HasCancel, Action); CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo); llvm::Function *Res = CGF.GenerateCapturedStmtFunction(*CS); if (!Tied) NumberOfParts = Action.getNumberOfParts(); return Res; } static void buildStructValue(ConstantStructBuilder &Fields, CodeGenModule &CGM, const RecordDecl *RD, const CGRecordLayout &RL, ArrayRef Data) { llvm::StructType *StructTy = RL.getLLVMType(); unsigned PrevIdx = 0; ConstantInitBuilder CIBuilder(CGM); auto DI = Data.begin(); for (const FieldDecl *FD : RD->fields()) { unsigned Idx = RL.getLLVMFieldNo(FD); // Fill the alignment. for (unsigned I = PrevIdx; I < Idx; ++I) Fields.add(llvm::Constant::getNullValue(StructTy->getElementType(I))); PrevIdx = Idx + 1; Fields.add(*DI); ++DI; } } template static llvm::GlobalVariable * createGlobalStruct(CodeGenModule &CGM, QualType Ty, bool IsConstant, ArrayRef Data, const Twine &Name, As &&... Args) { const auto *RD = cast(Ty->getAsTagDecl()); const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(RD); ConstantInitBuilder CIBuilder(CGM); ConstantStructBuilder Fields = CIBuilder.beginStruct(RL.getLLVMType()); buildStructValue(Fields, CGM, RD, RL, Data); return Fields.finishAndCreateGlobal( Name, CGM.getContext().getAlignOfGlobalVarInChars(Ty), IsConstant, std::forward(Args)...); } template static void createConstantGlobalStructAndAddToParent(CodeGenModule &CGM, QualType Ty, ArrayRef Data, T &Parent) { const auto *RD = cast(Ty->getAsTagDecl()); const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(RD); ConstantStructBuilder Fields = Parent.beginStruct(RL.getLLVMType()); buildStructValue(Fields, CGM, RD, RL, Data); Fields.finishAndAddTo(Parent); } void CGOpenMPRuntime::setLocThreadIdInsertPt(CodeGenFunction &CGF, bool AtCurrentPoint) { auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn); assert(!Elem.second.ServiceInsertPt && "Insert point is set already."); llvm::Value *Undef = llvm::UndefValue::get(CGF.Int32Ty); if (AtCurrentPoint) { Elem.second.ServiceInsertPt = new llvm::BitCastInst( Undef, CGF.Int32Ty, "svcpt", CGF.Builder.GetInsertBlock()); } else { Elem.second.ServiceInsertPt = new llvm::BitCastInst(Undef, CGF.Int32Ty, "svcpt"); Elem.second.ServiceInsertPt->insertAfter(CGF.AllocaInsertPt); } } void CGOpenMPRuntime::clearLocThreadIdInsertPt(CodeGenFunction &CGF) { auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn); if (Elem.second.ServiceInsertPt) { llvm::Instruction *Ptr = Elem.second.ServiceInsertPt; Elem.second.ServiceInsertPt = nullptr; Ptr->eraseFromParent(); } } static StringRef getIdentStringFromSourceLocation(CodeGenFunction &CGF, SourceLocation Loc, SmallString<128> &Buffer) { llvm::raw_svector_ostream OS(Buffer); // Build debug location PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc); OS << ";" << PLoc.getFilename() << ";"; if (const auto *FD = dyn_cast_or_null(CGF.CurFuncDecl)) OS << FD->getQualifiedNameAsString(); OS << ";" << PLoc.getLine() << ";" << PLoc.getColumn() << ";;"; return OS.str(); } llvm::Value *CGOpenMPRuntime::emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc, unsigned Flags) { llvm::Constant *SrcLocStr; if (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo || Loc.isInvalid()) { SrcLocStr = OMPBuilder.getOrCreateDefaultSrcLocStr(); } else { std::string FunctionName = ""; if (const auto *FD = dyn_cast_or_null(CGF.CurFuncDecl)) FunctionName = FD->getQualifiedNameAsString(); PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc); const char *FileName = PLoc.getFilename(); unsigned Line = PLoc.getLine(); unsigned Column = PLoc.getColumn(); SrcLocStr = OMPBuilder.getOrCreateSrcLocStr(FunctionName.c_str(), FileName, Line, Column); } unsigned Reserved2Flags = getDefaultLocationReserved2Flags(); return OMPBuilder.getOrCreateIdent(SrcLocStr, llvm::omp::IdentFlag(Flags), Reserved2Flags); } llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF, SourceLocation Loc) { assert(CGF.CurFn && "No function in current CodeGenFunction."); // If the OpenMPIRBuilder is used we need to use it for all thread id calls as // the clang invariants used below might be broken. if (CGM.getLangOpts().OpenMPIRBuilder) { SmallString<128> Buffer; OMPBuilder.updateToLocation(CGF.Builder.saveIP()); auto *SrcLocStr = OMPBuilder.getOrCreateSrcLocStr( getIdentStringFromSourceLocation(CGF, Loc, Buffer)); return OMPBuilder.getOrCreateThreadID( OMPBuilder.getOrCreateIdent(SrcLocStr)); } llvm::Value *ThreadID = nullptr; // Check whether we've already cached a load of the thread id in this // function. auto I = OpenMPLocThreadIDMap.find(CGF.CurFn); if (I != OpenMPLocThreadIDMap.end()) { ThreadID = I->second.ThreadID; if (ThreadID != nullptr) return ThreadID; } // If exceptions are enabled, do not use parameter to avoid possible crash. if (auto *OMPRegionInfo = dyn_cast_or_null(CGF.CapturedStmtInfo)) { if (OMPRegionInfo->getThreadIDVariable()) { // Check if this an outlined function with thread id passed as argument. LValue LVal = OMPRegionInfo->getThreadIDVariableLValue(CGF); llvm::BasicBlock *TopBlock = CGF.AllocaInsertPt->getParent(); if (!CGF.EHStack.requiresLandingPad() || !CGF.getLangOpts().Exceptions || !CGF.getLangOpts().CXXExceptions || CGF.Builder.GetInsertBlock() == TopBlock || !isa(LVal.getPointer(CGF)) || cast(LVal.getPointer(CGF))->getParent() == TopBlock || cast(LVal.getPointer(CGF))->getParent() == CGF.Builder.GetInsertBlock()) { ThreadID = CGF.EmitLoadOfScalar(LVal, Loc); // If value loaded in entry block, cache it and use it everywhere in // function. if (CGF.Builder.GetInsertBlock() == TopBlock) { auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn); Elem.second.ThreadID = ThreadID; } return ThreadID; } } } // This is not an outlined function region - need to call __kmpc_int32 // kmpc_global_thread_num(ident_t *loc). // Generate thread id value and cache this value for use across the // function. auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn); if (!Elem.second.ServiceInsertPt) setLocThreadIdInsertPt(CGF); CGBuilderTy::InsertPointGuard IPG(CGF.Builder); CGF.Builder.SetInsertPoint(Elem.second.ServiceInsertPt); llvm::CallInst *Call = CGF.Builder.CreateCall( OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), OMPRTL___kmpc_global_thread_num), emitUpdateLocation(CGF, Loc)); Call->setCallingConv(CGF.getRuntimeCC()); Elem.second.ThreadID = Call; return Call; } void CGOpenMPRuntime::functionFinished(CodeGenFunction &CGF) { assert(CGF.CurFn && "No function in current CodeGenFunction."); if (OpenMPLocThreadIDMap.count(CGF.CurFn)) { clearLocThreadIdInsertPt(CGF); OpenMPLocThreadIDMap.erase(CGF.CurFn); } if (FunctionUDRMap.count(CGF.CurFn) > 0) { for(const auto *D : FunctionUDRMap[CGF.CurFn]) UDRMap.erase(D); FunctionUDRMap.erase(CGF.CurFn); } auto I = FunctionUDMMap.find(CGF.CurFn); if (I != FunctionUDMMap.end()) { for(const auto *D : I->second) UDMMap.erase(D); FunctionUDMMap.erase(I); } LastprivateConditionalToTypes.erase(CGF.CurFn); FunctionToUntiedTaskStackMap.erase(CGF.CurFn); } llvm::Type *CGOpenMPRuntime::getIdentTyPointerTy() { return OMPBuilder.IdentPtr; } llvm::Type *CGOpenMPRuntime::getKmpc_MicroPointerTy() { if (!Kmpc_MicroTy) { // Build void (*kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid,...) llvm::Type *MicroParams[] = {llvm::PointerType::getUnqual(CGM.Int32Ty), llvm::PointerType::getUnqual(CGM.Int32Ty)}; Kmpc_MicroTy = llvm::FunctionType::get(CGM.VoidTy, MicroParams, true); } return llvm::PointerType::getUnqual(Kmpc_MicroTy); } llvm::FunctionCallee CGOpenMPRuntime::createForStaticInitFunction(unsigned IVSize, bool IVSigned) { assert((IVSize == 32 || IVSize == 64) && "IV size is not compatible with the omp runtime"); StringRef Name = IVSize == 32 ? (IVSigned ? "__kmpc_for_static_init_4" : "__kmpc_for_static_init_4u") : (IVSigned ? "__kmpc_for_static_init_8" : "__kmpc_for_static_init_8u"); llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty; auto *PtrTy = llvm::PointerType::getUnqual(ITy); llvm::Type *TypeParams[] = { getIdentTyPointerTy(), // loc CGM.Int32Ty, // tid CGM.Int32Ty, // schedtype llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter PtrTy, // p_lower PtrTy, // p_upper PtrTy, // p_stride ITy, // incr ITy // chunk }; auto *FnTy = llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); return CGM.CreateRuntimeFunction(FnTy, Name); } llvm::FunctionCallee CGOpenMPRuntime::createDispatchInitFunction(unsigned IVSize, bool IVSigned) { assert((IVSize == 32 || IVSize == 64) && "IV size is not compatible with the omp runtime"); StringRef Name = IVSize == 32 ? (IVSigned ? "__kmpc_dispatch_init_4" : "__kmpc_dispatch_init_4u") : (IVSigned ? "__kmpc_dispatch_init_8" : "__kmpc_dispatch_init_8u"); llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty; llvm::Type *TypeParams[] = { getIdentTyPointerTy(), // loc CGM.Int32Ty, // tid CGM.Int32Ty, // schedtype ITy, // lower ITy, // upper ITy, // stride ITy // chunk }; auto *FnTy = llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); return CGM.CreateRuntimeFunction(FnTy, Name); } llvm::FunctionCallee CGOpenMPRuntime::createDispatchFiniFunction(unsigned IVSize, bool IVSigned) { assert((IVSize == 32 || IVSize == 64) && "IV size is not compatible with the omp runtime"); StringRef Name = IVSize == 32 ? (IVSigned ? "__kmpc_dispatch_fini_4" : "__kmpc_dispatch_fini_4u") : (IVSigned ? "__kmpc_dispatch_fini_8" : "__kmpc_dispatch_fini_8u"); llvm::Type *TypeParams[] = { getIdentTyPointerTy(), // loc CGM.Int32Ty, // tid }; auto *FnTy = llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); return CGM.CreateRuntimeFunction(FnTy, Name); } llvm::FunctionCallee CGOpenMPRuntime::createDispatchNextFunction(unsigned IVSize, bool IVSigned) { assert((IVSize == 32 || IVSize == 64) && "IV size is not compatible with the omp runtime"); StringRef Name = IVSize == 32 ? (IVSigned ? "__kmpc_dispatch_next_4" : "__kmpc_dispatch_next_4u") : (IVSigned ? "__kmpc_dispatch_next_8" : "__kmpc_dispatch_next_8u"); llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty; auto *PtrTy = llvm::PointerType::getUnqual(ITy); llvm::Type *TypeParams[] = { getIdentTyPointerTy(), // loc CGM.Int32Ty, // tid llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter PtrTy, // p_lower PtrTy, // p_upper PtrTy // p_stride }; auto *FnTy = llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false); return CGM.CreateRuntimeFunction(FnTy, Name); } /// Obtain information that uniquely identifies a target entry. This /// consists of the file and device IDs as well as line number associated with /// the relevant entry source location. static void getTargetEntryUniqueInfo(ASTContext &C, SourceLocation Loc, unsigned &DeviceID, unsigned &FileID, unsigned &LineNum) { SourceManager &SM = C.getSourceManager(); // The loc should be always valid and have a file ID (the user cannot use // #pragma directives in macros) assert(Loc.isValid() && "Source location is expected to be always valid."); PresumedLoc PLoc = SM.getPresumedLoc(Loc); assert(PLoc.isValid() && "Source location is expected to be always valid."); llvm::sys::fs::UniqueID ID; if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID)) { PLoc = SM.getPresumedLoc(Loc, /*UseLineDirectives=*/false); assert(PLoc.isValid() && "Source location is expected to be always valid."); if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID)) SM.getDiagnostics().Report(diag::err_cannot_open_file) << PLoc.getFilename() << EC.message(); } DeviceID = ID.getDevice(); FileID = ID.getFile(); LineNum = PLoc.getLine(); } Address CGOpenMPRuntime::getAddrOfDeclareTargetVar(const VarDecl *VD) { if (CGM.getLangOpts().OpenMPSimd) return Address::invalid(); llvm::Optional Res = OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD); if (Res && (*Res == OMPDeclareTargetDeclAttr::MT_Link || (*Res == OMPDeclareTargetDeclAttr::MT_To && HasRequiresUnifiedSharedMemory))) { SmallString<64> PtrName; { llvm::raw_svector_ostream OS(PtrName); OS << CGM.getMangledName(GlobalDecl(VD)); if (!VD->isExternallyVisible()) { unsigned DeviceID, FileID, Line; getTargetEntryUniqueInfo(CGM.getContext(), VD->getCanonicalDecl()->getBeginLoc(), DeviceID, FileID, Line); OS << llvm::format("_%x", FileID); } OS << "_decl_tgt_ref_ptr"; } llvm::Value *Ptr = CGM.getModule().getNamedValue(PtrName); if (!Ptr) { QualType PtrTy = CGM.getContext().getPointerType(VD->getType()); Ptr = getOrCreateInternalVariable(CGM.getTypes().ConvertTypeForMem(PtrTy), PtrName); auto *GV = cast(Ptr); GV->setLinkage(llvm::GlobalValue::WeakAnyLinkage); if (!CGM.getLangOpts().OpenMPIsDevice) GV->setInitializer(CGM.GetAddrOfGlobal(VD)); registerTargetGlobalVariable(VD, cast(Ptr)); } return Address(Ptr, CGM.getContext().getDeclAlign(VD)); } return Address::invalid(); } llvm::Constant * CGOpenMPRuntime::getOrCreateThreadPrivateCache(const VarDecl *VD) { assert(!CGM.getLangOpts().OpenMPUseTLS || !CGM.getContext().getTargetInfo().isTLSSupported()); // Lookup the entry, lazily creating it if necessary. std::string Suffix = getName({"cache", ""}); return getOrCreateInternalVariable( CGM.Int8PtrPtrTy, Twine(CGM.getMangledName(VD)).concat(Suffix)); } Address CGOpenMPRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc) { if (CGM.getLangOpts().OpenMPUseTLS && CGM.getContext().getTargetInfo().isTLSSupported()) return VDAddr; llvm::Type *VarTy = VDAddr.getElementType(); llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc), CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.Int8PtrTy), CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)), getOrCreateThreadPrivateCache(VD)}; return Address(CGF.EmitRuntimeCall( OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_threadprivate_cached), Args), VDAddr.getAlignment()); } void CGOpenMPRuntime::emitThreadPrivateVarInit( CodeGenFunction &CGF, Address VDAddr, llvm::Value *Ctor, llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc) { // Call kmp_int32 __kmpc_global_thread_num(&loc) to init OpenMP runtime // library. llvm::Value *OMPLoc = emitUpdateLocation(CGF, Loc); CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_global_thread_num), OMPLoc); // Call __kmpc_threadprivate_register(&loc, &var, ctor, cctor/*NULL*/, dtor) // to register constructor/destructor for variable. llvm::Value *Args[] = { OMPLoc, CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.VoidPtrTy), Ctor, CopyCtor, Dtor}; CGF.EmitRuntimeCall( OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_threadprivate_register), Args); } llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition( const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit, CodeGenFunction *CGF) { if (CGM.getLangOpts().OpenMPUseTLS && CGM.getContext().getTargetInfo().isTLSSupported()) return nullptr; VD = VD->getDefinition(CGM.getContext()); if (VD && ThreadPrivateWithDefinition.insert(CGM.getMangledName(VD)).second) { QualType ASTTy = VD->getType(); llvm::Value *Ctor = nullptr, *CopyCtor = nullptr, *Dtor = nullptr; const Expr *Init = VD->getAnyInitializer(); if (CGM.getLangOpts().CPlusPlus && PerformInit) { // Generate function that re-emits the declaration's initializer into the // threadprivate copy of the variable VD CodeGenFunction CtorCGF(CGM); FunctionArgList Args; ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc, /*Id=*/nullptr, CGM.getContext().VoidPtrTy, ImplicitParamDecl::Other); Args.push_back(&Dst); const auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration( CGM.getContext().VoidPtrTy, Args); llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI); std::string Name = getName({"__kmpc_global_ctor_", ""}); llvm::Function *Fn = CGM.CreateGlobalInitOrCleanUpFunction(FTy, Name, FI, Loc); CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidPtrTy, Fn, FI, Args, Loc, Loc); llvm::Value *ArgVal = CtorCGF.EmitLoadOfScalar( CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false, CGM.getContext().VoidPtrTy, Dst.getLocation()); Address Arg = Address(ArgVal, VDAddr.getAlignment()); Arg = CtorCGF.Builder.CreateElementBitCast( Arg, CtorCGF.ConvertTypeForMem(ASTTy)); CtorCGF.EmitAnyExprToMem(Init, Arg, Init->getType().getQualifiers(), /*IsInitializer=*/true); ArgVal = CtorCGF.EmitLoadOfScalar( CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false, CGM.getContext().VoidPtrTy, Dst.getLocation()); CtorCGF.Builder.CreateStore(ArgVal, CtorCGF.ReturnValue); CtorCGF.FinishFunction(); Ctor = Fn; } if (VD->getType().isDestructedType() != QualType::DK_none) { // Generate function that emits destructor call for the threadprivate copy // of the variable VD CodeGenFunction DtorCGF(CGM); FunctionArgList Args; ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc, /*Id=*/nullptr, CGM.getContext().VoidPtrTy, ImplicitParamDecl::Other); Args.push_back(&Dst); const auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration( CGM.getContext().VoidTy, Args); llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI); std::string Name = getName({"__kmpc_global_dtor_", ""}); llvm::Function *Fn = CGM.CreateGlobalInitOrCleanUpFunction(FTy, Name, FI, Loc); auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF); DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI, Args, Loc, Loc); // Create a scope with an artificial location for the body of this function. auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF); llvm::Value *ArgVal = DtorCGF.EmitLoadOfScalar( DtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false, CGM.getContext().VoidPtrTy, Dst.getLocation()); DtorCGF.emitDestroy(Address(ArgVal, VDAddr.getAlignment()), ASTTy, DtorCGF.getDestroyer(ASTTy.isDestructedType()), DtorCGF.needsEHCleanup(ASTTy.isDestructedType())); DtorCGF.FinishFunction(); Dtor = Fn; } // Do not emit init function if it is not required. if (!Ctor && !Dtor) return nullptr; llvm::Type *CopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy}; auto *CopyCtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CopyCtorTyArgs, /*isVarArg=*/false) ->getPointerTo(); // Copying constructor for the threadprivate variable. // Must be NULL - reserved by runtime, but currently it requires that this // parameter is always NULL. Otherwise it fires assertion. CopyCtor = llvm::Constant::getNullValue(CopyCtorTy); if (Ctor == nullptr) { auto *CtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy, /*isVarArg=*/false) ->getPointerTo(); Ctor = llvm::Constant::getNullValue(CtorTy); } if (Dtor == nullptr) { auto *DtorTy = llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy, /*isVarArg=*/false) ->getPointerTo(); Dtor = llvm::Constant::getNullValue(DtorTy); } if (!CGF) { auto *InitFunctionTy = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg*/ false); std::string Name = getName({"__omp_threadprivate_init_", ""}); llvm::Function *InitFunction = CGM.CreateGlobalInitOrCleanUpFunction( InitFunctionTy, Name, CGM.getTypes().arrangeNullaryFunction()); CodeGenFunction InitCGF(CGM); FunctionArgList ArgList; InitCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, InitFunction, CGM.getTypes().arrangeNullaryFunction(), ArgList, Loc, Loc); emitThreadPrivateVarInit(InitCGF, VDAddr, Ctor, CopyCtor, Dtor, Loc); InitCGF.FinishFunction(); return InitFunction; } emitThreadPrivateVarInit(*CGF, VDAddr, Ctor, CopyCtor, Dtor, Loc); } return nullptr; } bool CGOpenMPRuntime::emitDeclareTargetVarDefinition(const VarDecl *VD, llvm::GlobalVariable *Addr, bool PerformInit) { if (CGM.getLangOpts().OMPTargetTriples.empty() && !CGM.getLangOpts().OpenMPIsDevice) return false; Optional Res = OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD); if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_Link || (*Res == OMPDeclareTargetDeclAttr::MT_To && HasRequiresUnifiedSharedMemory)) return CGM.getLangOpts().OpenMPIsDevice; VD = VD->getDefinition(CGM.getContext()); assert(VD && "Unknown VarDecl"); if (!DeclareTargetWithDefinition.insert(CGM.getMangledName(VD)).second) return CGM.getLangOpts().OpenMPIsDevice; QualType ASTTy = VD->getType(); SourceLocation Loc = VD->getCanonicalDecl()->getBeginLoc(); // Produce the unique prefix to identify the new target regions. We use // the source location of the variable declaration which we know to not // conflict with any target region. unsigned DeviceID; unsigned FileID; unsigned Line; getTargetEntryUniqueInfo(CGM.getContext(), Loc, DeviceID, FileID, Line); SmallString<128> Buffer, Out; { llvm::raw_svector_ostream OS(Buffer); OS << "__omp_offloading_" << llvm::format("_%x", DeviceID) << llvm::format("_%x_", FileID) << VD->getName() << "_l" << Line; } const Expr *Init = VD->getAnyInitializer(); if (CGM.getLangOpts().CPlusPlus && PerformInit) { llvm::Constant *Ctor; llvm::Constant *ID; if (CGM.getLangOpts().OpenMPIsDevice) { // Generate function that re-emits the declaration's initializer into // the threadprivate copy of the variable VD CodeGenFunction CtorCGF(CGM); const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction(); llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI); llvm::Function *Fn = CGM.CreateGlobalInitOrCleanUpFunction( FTy, Twine(Buffer, "_ctor"), FI, Loc); auto NL = ApplyDebugLocation::CreateEmpty(CtorCGF); CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI, FunctionArgList(), Loc, Loc); auto AL = ApplyDebugLocation::CreateArtificial(CtorCGF); CtorCGF.EmitAnyExprToMem(Init, Address(Addr, CGM.getContext().getDeclAlign(VD)), Init->getType().getQualifiers(), /*IsInitializer=*/true); CtorCGF.FinishFunction(); Ctor = Fn; ID = llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy); CGM.addUsedGlobal(cast(Ctor)); } else { Ctor = new llvm::GlobalVariable( CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, llvm::Constant::getNullValue(CGM.Int8Ty), Twine(Buffer, "_ctor")); ID = Ctor; } // Register the information for the entry associated with the constructor. Out.clear(); OffloadEntriesInfoManager.registerTargetRegionEntryInfo( DeviceID, FileID, Twine(Buffer, "_ctor").toStringRef(Out), Line, Ctor, ID, OffloadEntriesInfoManagerTy::OMPTargetRegionEntryCtor); } if (VD->getType().isDestructedType() != QualType::DK_none) { llvm::Constant *Dtor; llvm::Constant *ID; if (CGM.getLangOpts().OpenMPIsDevice) { // Generate function that emits destructor call for the threadprivate // copy of the variable VD CodeGenFunction DtorCGF(CGM); const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction(); llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI); llvm::Function *Fn = CGM.CreateGlobalInitOrCleanUpFunction( FTy, Twine(Buffer, "_dtor"), FI, Loc); auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF); DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI, FunctionArgList(), Loc, Loc); // Create a scope with an artificial location for the body of this // function. auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF); DtorCGF.emitDestroy(Address(Addr, CGM.getContext().getDeclAlign(VD)), ASTTy, DtorCGF.getDestroyer(ASTTy.isDestructedType()), DtorCGF.needsEHCleanup(ASTTy.isDestructedType())); DtorCGF.FinishFunction(); Dtor = Fn; ID = llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy); CGM.addUsedGlobal(cast(Dtor)); } else { Dtor = new llvm::GlobalVariable( CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, llvm::Constant::getNullValue(CGM.Int8Ty), Twine(Buffer, "_dtor")); ID = Dtor; } // Register the information for the entry associated with the destructor. Out.clear(); OffloadEntriesInfoManager.registerTargetRegionEntryInfo( DeviceID, FileID, Twine(Buffer, "_dtor").toStringRef(Out), Line, Dtor, ID, OffloadEntriesInfoManagerTy::OMPTargetRegionEntryDtor); } return CGM.getLangOpts().OpenMPIsDevice; } Address CGOpenMPRuntime::getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF, QualType VarType, StringRef Name) { std::string Suffix = getName({"artificial", ""}); llvm::Type *VarLVType = CGF.ConvertTypeForMem(VarType); llvm::Value *GAddr = getOrCreateInternalVariable(VarLVType, Twine(Name).concat(Suffix)); if (CGM.getLangOpts().OpenMP && CGM.getLangOpts().OpenMPUseTLS && CGM.getTarget().isTLSSupported()) { cast(GAddr)->setThreadLocal(/*Val=*/true); return Address(GAddr, CGM.getContext().getTypeAlignInChars(VarType)); } std::string CacheSuffix = getName({"cache", ""}); llvm::Value *Args[] = { emitUpdateLocation(CGF, SourceLocation()), getThreadID(CGF, SourceLocation()), CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(GAddr, CGM.VoidPtrTy), CGF.Builder.CreateIntCast(CGF.getTypeSize(VarType), CGM.SizeTy, /*isSigned=*/false), getOrCreateInternalVariable( CGM.VoidPtrPtrTy, Twine(Name).concat(Suffix).concat(CacheSuffix))}; return Address( CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( CGF.EmitRuntimeCall( OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_threadprivate_cached), Args), VarLVType->getPointerTo(/*AddrSpace=*/0)), CGM.getContext().getTypeAlignInChars(VarType)); } void CGOpenMPRuntime::emitIfClause(CodeGenFunction &CGF, const Expr *Cond, const RegionCodeGenTy &ThenGen, const RegionCodeGenTy &ElseGen) { CodeGenFunction::LexicalScope ConditionScope(CGF, Cond->getSourceRange()); // If the condition constant folds and can be elided, try to avoid emitting // the condition and the dead arm of the if/else. bool CondConstant; if (CGF.ConstantFoldsToSimpleInteger(Cond, CondConstant)) { if (CondConstant) ThenGen(CGF); else ElseGen(CGF); return; } // Otherwise, the condition did not fold, or we couldn't elide it. Just // emit the conditional branch. llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("omp_if.then"); llvm::BasicBlock *ElseBlock = CGF.createBasicBlock("omp_if.else"); llvm::BasicBlock *ContBlock = CGF.createBasicBlock("omp_if.end"); CGF.EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, /*TrueCount=*/0); // Emit the 'then' code. CGF.EmitBlock(ThenBlock); ThenGen(CGF); CGF.EmitBranch(ContBlock); // Emit the 'else' code if present. // There is no need to emit line number for unconditional branch. (void)ApplyDebugLocation::CreateEmpty(CGF); CGF.EmitBlock(ElseBlock); ElseGen(CGF); // There is no need to emit line number for unconditional branch. (void)ApplyDebugLocation::CreateEmpty(CGF); CGF.EmitBranch(ContBlock); // Emit the continuation block for code after the if. CGF.EmitBlock(ContBlock, /*IsFinished=*/true); } void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef CapturedVars, const Expr *IfCond) { if (!CGF.HaveInsertPoint()) return; llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc); auto &M = CGM.getModule(); auto &&ThenGen = [&M, OutlinedFn, CapturedVars, RTLoc, this](CodeGenFunction &CGF, PrePostActionTy &) { // Build call __kmpc_fork_call(loc, n, microtask, var1, .., varn); CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime(); llvm::Value *Args[] = { RTLoc, CGF.Builder.getInt32(CapturedVars.size()), // Number of captured vars CGF.Builder.CreateBitCast(OutlinedFn, RT.getKmpc_MicroPointerTy())}; llvm::SmallVector RealArgs; RealArgs.append(std::begin(Args), std::end(Args)); RealArgs.append(CapturedVars.begin(), CapturedVars.end()); llvm::FunctionCallee RTLFn = OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_fork_call); CGF.EmitRuntimeCall(RTLFn, RealArgs); }; auto &&ElseGen = [&M, OutlinedFn, CapturedVars, RTLoc, Loc, this](CodeGenFunction &CGF, PrePostActionTy &) { CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime(); llvm::Value *ThreadID = RT.getThreadID(CGF, Loc); // Build calls: // __kmpc_serialized_parallel(&Loc, GTid); llvm::Value *Args[] = {RTLoc, ThreadID}; CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( M, OMPRTL___kmpc_serialized_parallel), Args); // OutlinedFn(>id, &zero_bound, CapturedStruct); Address ThreadIDAddr = RT.emitThreadIDAddress(CGF, Loc); Address ZeroAddrBound = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty, /*Name=*/".bound.zero.addr"); CGF.InitTempAlloca(ZeroAddrBound, CGF.Builder.getInt32(/*C*/ 0)); llvm::SmallVector OutlinedFnArgs; // ThreadId for serialized parallels is 0. OutlinedFnArgs.push_back(ThreadIDAddr.getPointer()); OutlinedFnArgs.push_back(ZeroAddrBound.getPointer()); OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end()); // Ensure we do not inline the function. This is trivially true for the ones - // passed to __kmpc_fork_call but the ones calles in serialized regions + // passed to __kmpc_fork_call but the ones called in serialized regions // could be inlined. This is not a perfect but it is closer to the invariant // we want, namely, every data environment starts with a new function. // TODO: We should pass the if condition to the runtime function and do the // handling there. Much cleaner code. + OutlinedFn->removeFnAttr(llvm::Attribute::AlwaysInline); OutlinedFn->addFnAttr(llvm::Attribute::NoInline); RT.emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs); // __kmpc_end_serialized_parallel(&Loc, GTid); llvm::Value *EndArgs[] = {RT.emitUpdateLocation(CGF, Loc), ThreadID}; CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( M, OMPRTL___kmpc_end_serialized_parallel), EndArgs); }; if (IfCond) { emitIfClause(CGF, IfCond, ThenGen, ElseGen); } else { RegionCodeGenTy ThenRCG(ThenGen); ThenRCG(CGF); } } // If we're inside an (outlined) parallel region, use the region info's // thread-ID variable (it is passed in a first argument of the outlined function // as "kmp_int32 *gtid"). Otherwise, if we're not inside parallel region, but in // regular serial code region, get thread ID by calling kmp_int32 // kmpc_global_thread_num(ident_t *loc), stash this thread ID in a temporary and // return the address of that temp. Address CGOpenMPRuntime::emitThreadIDAddress(CodeGenFunction &CGF, SourceLocation Loc) { if (auto *OMPRegionInfo = dyn_cast_or_null(CGF.CapturedStmtInfo)) if (OMPRegionInfo->getThreadIDVariable()) return OMPRegionInfo->getThreadIDVariableLValue(CGF).getAddress(CGF); llvm::Value *ThreadID = getThreadID(CGF, Loc); QualType Int32Ty = CGF.getContext().getIntTypeForBitwidth(/*DestWidth*/ 32, /*Signed*/ true); Address ThreadIDTemp = CGF.CreateMemTemp(Int32Ty, /*Name*/ ".threadid_temp."); CGF.EmitStoreOfScalar(ThreadID, CGF.MakeAddrLValue(ThreadIDTemp, Int32Ty)); return ThreadIDTemp; } llvm::Constant *CGOpenMPRuntime::getOrCreateInternalVariable( llvm::Type *Ty, const llvm::Twine &Name, unsigned AddressSpace) { SmallString<256> Buffer; llvm::raw_svector_ostream Out(Buffer); Out << Name; StringRef RuntimeName = Out.str(); auto &Elem = *InternalVars.try_emplace(RuntimeName, nullptr).first; if (Elem.second) { assert(Elem.second->getType()->getPointerElementType() == Ty && "OMP internal variable has different type than requested"); return &*Elem.second; } return Elem.second = new llvm::GlobalVariable( CGM.getModule(), Ty, /*IsConstant*/ false, llvm::GlobalValue::CommonLinkage, llvm::Constant::getNullValue(Ty), Elem.first(), /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal, AddressSpace); } llvm::Value *CGOpenMPRuntime::getCriticalRegionLock(StringRef CriticalName) { std::string Prefix = Twine("gomp_critical_user_", CriticalName).str(); std::string Name = getName({Prefix, "var"}); return getOrCreateInternalVariable(KmpCriticalNameTy, Name); } namespace { /// Common pre(post)-action for different OpenMP constructs. class CommonActionTy final : public PrePostActionTy { llvm::FunctionCallee EnterCallee; ArrayRef EnterArgs; llvm::FunctionCallee ExitCallee; ArrayRef ExitArgs; bool Conditional; llvm::BasicBlock *ContBlock = nullptr; public: CommonActionTy(llvm::FunctionCallee EnterCallee, ArrayRef EnterArgs, llvm::FunctionCallee ExitCallee, ArrayRef ExitArgs, bool Conditional = false) : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee), ExitArgs(ExitArgs), Conditional(Conditional) {} void Enter(CodeGenFunction &CGF) override { llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs); if (Conditional) { llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes); auto *ThenBlock = CGF.createBasicBlock("omp_if.then"); ContBlock = CGF.createBasicBlock("omp_if.end"); // Generate the branch (If-stmt) CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock); CGF.EmitBlock(ThenBlock); } } void Done(CodeGenFunction &CGF) { // Emit the rest of blocks/branches CGF.EmitBranch(ContBlock); CGF.EmitBlock(ContBlock, true); } void Exit(CodeGenFunction &CGF) override { CGF.EmitRuntimeCall(ExitCallee, ExitArgs); } }; } // anonymous namespace void CGOpenMPRuntime::emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName, const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc, const Expr *Hint) { // __kmpc_critical[_with_hint](ident_t *, gtid, Lock[, hint]); // CriticalOpGen(); // __kmpc_end_critical(ident_t *, gtid, Lock); // Prepare arguments and build a call to __kmpc_critical if (!CGF.HaveInsertPoint()) return; llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc), getCriticalRegionLock(CriticalName)}; llvm::SmallVector EnterArgs(std::begin(Args), std::end(Args)); if (Hint) { EnterArgs.push_back(CGF.Builder.CreateIntCast( CGF.EmitScalarExpr(Hint), CGM.Int32Ty, /*isSigned=*/false)); } CommonActionTy Action( OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), Hint ? OMPRTL___kmpc_critical_with_hint : OMPRTL___kmpc_critical), EnterArgs, OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), OMPRTL___kmpc_end_critical), Args); CriticalOpGen.setAction(Action); emitInlinedDirective(CGF, OMPD_critical, CriticalOpGen); } void CGOpenMPRuntime::emitMasterRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MasterOpGen, SourceLocation Loc) { if (!CGF.HaveInsertPoint()) return; // if(__kmpc_master(ident_t *, gtid)) { // MasterOpGen(); // __kmpc_end_master(ident_t *, gtid); // } // Prepare arguments and build a call to __kmpc_master llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)}; CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_master), Args, OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_end_master), Args, /*Conditional=*/true); MasterOpGen.setAction(Action); emitInlinedDirective(CGF, OMPD_master, MasterOpGen); Action.Done(CGF); } void CGOpenMPRuntime::emitMaskedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MaskedOpGen, SourceLocation Loc, const Expr *Filter) { if (!CGF.HaveInsertPoint()) return; // if(__kmpc_masked(ident_t *, gtid, filter)) { // MaskedOpGen(); // __kmpc_end_masked(iden_t *, gtid); // } // Prepare arguments and build a call to __kmpc_masked llvm::Value *FilterVal = Filter ? CGF.EmitScalarExpr(Filter, CGF.Int32Ty) : llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/0); llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc), FilterVal}; llvm::Value *ArgsEnd[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)}; CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_masked), Args, OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_end_masked), ArgsEnd, /*Conditional=*/true); MaskedOpGen.setAction(Action); emitInlinedDirective(CGF, OMPD_masked, MaskedOpGen); Action.Done(CGF); } void CGOpenMPRuntime::emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc) { if (!CGF.HaveInsertPoint()) return; if (CGF.CGM.getLangOpts().OpenMPIRBuilder) { OMPBuilder.createTaskyield(CGF.Builder); } else { // Build call __kmpc_omp_taskyield(loc, thread_id, 0); llvm::Value *Args[] = { emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc), llvm::ConstantInt::get(CGM.IntTy, /*V=*/0, /*isSigned=*/true)}; CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_omp_taskyield), Args); } if (auto *Region = dyn_cast_or_null(CGF.CapturedStmtInfo)) Region->emitUntiedSwitch(CGF); } void CGOpenMPRuntime::emitTaskgroupRegion(CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen, SourceLocation Loc) { if (!CGF.HaveInsertPoint()) return; // __kmpc_taskgroup(ident_t *, gtid); // TaskgroupOpGen(); // __kmpc_end_taskgroup(ident_t *, gtid); // Prepare arguments and build a call to __kmpc_taskgroup llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)}; CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_taskgroup), Args, OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_end_taskgroup), Args); TaskgroupOpGen.setAction(Action); emitInlinedDirective(CGF, OMPD_taskgroup, TaskgroupOpGen); } /// Given an array of pointers to variables, project the address of a /// given variable. static Address emitAddrOfVarFromArray(CodeGenFunction &CGF, Address Array, unsigned Index, const VarDecl *Var) { // Pull out the pointer to the variable. Address PtrAddr = CGF.Builder.CreateConstArrayGEP(Array, Index); llvm::Value *Ptr = CGF.Builder.CreateLoad(PtrAddr); Address Addr = Address(Ptr, CGF.getContext().getDeclAlign(Var)); Addr = CGF.Builder.CreateElementBitCast( Addr, CGF.ConvertTypeForMem(Var->getType())); return Addr; } static llvm::Value *emitCopyprivateCopyFunction( CodeGenModule &CGM, llvm::Type *ArgsType, ArrayRef CopyprivateVars, ArrayRef DestExprs, ArrayRef SrcExprs, ArrayRef AssignmentOps, SourceLocation Loc) { ASTContext &C = CGM.getContext(); // void copy_func(void *LHSArg, void *RHSArg); FunctionArgList Args; ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy, ImplicitParamDecl::Other); ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy, ImplicitParamDecl::Other); Args.push_back(&LHSArg); Args.push_back(&RHSArg); const auto &CGFI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); std::string Name = CGM.getOpenMPRuntime().getName({"omp", "copyprivate", "copy_func"}); auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule()); CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); Fn->setDoesNotRecurse(); CodeGenFunction CGF(CGM); CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc); // Dest = (void*[n])(LHSArg); // Src = (void*[n])(RHSArg); Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)), ArgsType), CGF.getPointerAlign()); Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)), ArgsType), CGF.getPointerAlign()); // *(Type0*)Dst[0] = *(Type0*)Src[0]; // *(Type1*)Dst[1] = *(Type1*)Src[1]; // ... // *(Typen*)Dst[n] = *(Typen*)Src[n]; for (unsigned I = 0, E = AssignmentOps.size(); I < E; ++I) { const auto *DestVar = cast(cast(DestExprs[I])->getDecl()); Address DestAddr = emitAddrOfVarFromArray(CGF, LHS, I, DestVar); const auto *SrcVar = cast(cast(SrcExprs[I])->getDecl()); Address SrcAddr = emitAddrOfVarFromArray(CGF, RHS, I, SrcVar); const auto *VD = cast(CopyprivateVars[I])->getDecl(); QualType Type = VD->getType(); CGF.EmitOMPCopy(Type, DestAddr, SrcAddr, DestVar, SrcVar, AssignmentOps[I]); } CGF.FinishFunction(); return Fn; } void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen, SourceLocation Loc, ArrayRef CopyprivateVars, ArrayRef SrcExprs, ArrayRef DstExprs, ArrayRef AssignmentOps) { if (!CGF.HaveInsertPoint()) return; assert(CopyprivateVars.size() == SrcExprs.size() && CopyprivateVars.size() == DstExprs.size() && CopyprivateVars.size() == AssignmentOps.size()); ASTContext &C = CGM.getContext(); // int32 did_it = 0; // if(__kmpc_single(ident_t *, gtid)) { // SingleOpGen(); // __kmpc_end_single(ident_t *, gtid); // did_it = 1; // } // call __kmpc_copyprivate(ident_t *, gtid, , , // , did_it); Address DidIt = Address::invalid(); if (!CopyprivateVars.empty()) { // int32 did_it = 0; QualType KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1); DidIt = CGF.CreateMemTemp(KmpInt32Ty, ".omp.copyprivate.did_it"); CGF.Builder.CreateStore(CGF.Builder.getInt32(0), DidIt); } // Prepare arguments and build a call to __kmpc_single llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)}; CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_single), Args, OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_end_single), Args, /*Conditional=*/true); SingleOpGen.setAction(Action); emitInlinedDirective(CGF, OMPD_single, SingleOpGen); if (DidIt.isValid()) { // did_it = 1; CGF.Builder.CreateStore(CGF.Builder.getInt32(1), DidIt); } Action.Done(CGF); // call __kmpc_copyprivate(ident_t *, gtid, , , // , did_it); if (DidIt.isValid()) { llvm::APInt ArraySize(/*unsigned int numBits=*/32, CopyprivateVars.size()); QualType CopyprivateArrayTy = C.getConstantArrayType( C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0); // Create a list of all private variables for copyprivate. Address CopyprivateList = CGF.CreateMemTemp(CopyprivateArrayTy, ".omp.copyprivate.cpr_list"); for (unsigned I = 0, E = CopyprivateVars.size(); I < E; ++I) { Address Elem = CGF.Builder.CreateConstArrayGEP(CopyprivateList, I); CGF.Builder.CreateStore( CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( CGF.EmitLValue(CopyprivateVars[I]).getPointer(CGF), CGF.VoidPtrTy), Elem); } // Build function that copies private values from single region to all other // threads in the corresponding parallel region. llvm::Value *CpyFn = emitCopyprivateCopyFunction( CGM, CGF.ConvertTypeForMem(CopyprivateArrayTy)->getPointerTo(), CopyprivateVars, SrcExprs, DstExprs, AssignmentOps, Loc); llvm::Value *BufSize = CGF.getTypeSize(CopyprivateArrayTy); Address CL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(CopyprivateList, CGF.VoidPtrTy); llvm::Value *DidItVal = CGF.Builder.CreateLoad(DidIt); llvm::Value *Args[] = { emitUpdateLocation(CGF, Loc), // ident_t * getThreadID(CGF, Loc), // i32 BufSize, // size_t CL.getPointer(), // void * CpyFn, // void (*) (void *, void *) DidItVal // i32 did_it }; CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_copyprivate), Args); } } void CGOpenMPRuntime::emitOrderedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &OrderedOpGen, SourceLocation Loc, bool IsThreads) { if (!CGF.HaveInsertPoint()) return; // __kmpc_ordered(ident_t *, gtid); // OrderedOpGen(); // __kmpc_end_ordered(ident_t *, gtid); // Prepare arguments and build a call to __kmpc_ordered if (IsThreads) { llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)}; CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_ordered), Args, OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_end_ordered), Args); OrderedOpGen.setAction(Action); emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen); return; } emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen); } unsigned CGOpenMPRuntime::getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind) { unsigned Flags; if (Kind == OMPD_for) Flags = OMP_IDENT_BARRIER_IMPL_FOR; else if (Kind == OMPD_sections) Flags = OMP_IDENT_BARRIER_IMPL_SECTIONS; else if (Kind == OMPD_single) Flags = OMP_IDENT_BARRIER_IMPL_SINGLE; else if (Kind == OMPD_barrier) Flags = OMP_IDENT_BARRIER_EXPL; else Flags = OMP_IDENT_BARRIER_IMPL; return Flags; } void CGOpenMPRuntime::getDefaultScheduleAndChunk( CodeGenFunction &CGF, const OMPLoopDirective &S, OpenMPScheduleClauseKind &ScheduleKind, const Expr *&ChunkExpr) const { // Check if the loop directive is actually a doacross loop directive. In this // case choose static, 1 schedule. if (llvm::any_of( S.getClausesOfKind(), [](const OMPOrderedClause *C) { return C->getNumForLoops(); })) { ScheduleKind = OMPC_SCHEDULE_static; // Chunk size is 1 in this case. llvm::APInt ChunkSize(32, 1); ChunkExpr = IntegerLiteral::Create( CGF.getContext(), ChunkSize, CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0), SourceLocation()); } } void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind Kind, bool EmitChecks, bool ForceSimpleCall) { // Check if we should use the OMPBuilder auto *OMPRegionInfo = dyn_cast_or_null(CGF.CapturedStmtInfo); if (CGF.CGM.getLangOpts().OpenMPIRBuilder) { CGF.Builder.restoreIP(OMPBuilder.createBarrier( CGF.Builder, Kind, ForceSimpleCall, EmitChecks)); return; } if (!CGF.HaveInsertPoint()) return; // Build call __kmpc_cancel_barrier(loc, thread_id); // Build call __kmpc_barrier(loc, thread_id); unsigned Flags = getDefaultFlagsForBarriers(Kind); // Build call __kmpc_cancel_barrier(loc, thread_id) or __kmpc_barrier(loc, // thread_id); llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags), getThreadID(CGF, Loc)}; if (OMPRegionInfo) { if (!ForceSimpleCall && OMPRegionInfo->hasCancel()) { llvm::Value *Result = CGF.EmitRuntimeCall( OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), OMPRTL___kmpc_cancel_barrier), Args); if (EmitChecks) { // if (__kmpc_cancel_barrier()) { // exit from construct; // } llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit"); llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue"); llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result); CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB); CGF.EmitBlock(ExitBB); // exit from construct; CodeGenFunction::JumpDest CancelDestination = CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind()); CGF.EmitBranchThroughCleanup(CancelDestination); CGF.EmitBlock(ContBB, /*IsFinished=*/true); } return; } } CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_barrier), Args); } /// Map the OpenMP loop schedule to the runtime enumeration. static OpenMPSchedType getRuntimeSchedule(OpenMPScheduleClauseKind ScheduleKind, bool Chunked, bool Ordered) { switch (ScheduleKind) { case OMPC_SCHEDULE_static: return Chunked ? (Ordered ? OMP_ord_static_chunked : OMP_sch_static_chunked) : (Ordered ? OMP_ord_static : OMP_sch_static); case OMPC_SCHEDULE_dynamic: return Ordered ? OMP_ord_dynamic_chunked : OMP_sch_dynamic_chunked; case OMPC_SCHEDULE_guided: return Ordered ? OMP_ord_guided_chunked : OMP_sch_guided_chunked; case OMPC_SCHEDULE_runtime: return Ordered ? OMP_ord_runtime : OMP_sch_runtime; case OMPC_SCHEDULE_auto: return Ordered ? OMP_ord_auto : OMP_sch_auto; case OMPC_SCHEDULE_unknown: assert(!Chunked && "chunk was specified but schedule kind not known"); return Ordered ? OMP_ord_static : OMP_sch_static; } llvm_unreachable("Unexpected runtime schedule"); } /// Map the OpenMP distribute schedule to the runtime enumeration. static OpenMPSchedType getRuntimeSchedule(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) { // only static is allowed for dist_schedule return Chunked ? OMP_dist_sch_static_chunked : OMP_dist_sch_static; } bool CGOpenMPRuntime::isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind, bool Chunked) const { OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false); return Schedule == OMP_sch_static; } bool CGOpenMPRuntime::isStaticNonchunked( OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const { OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunked); return Schedule == OMP_dist_sch_static; } bool CGOpenMPRuntime::isStaticChunked(OpenMPScheduleClauseKind ScheduleKind, bool Chunked) const { OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false); return Schedule == OMP_sch_static_chunked; } bool CGOpenMPRuntime::isStaticChunked( OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const { OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunked); return Schedule == OMP_dist_sch_static_chunked; } bool CGOpenMPRuntime::isDynamic(OpenMPScheduleClauseKind ScheduleKind) const { OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, /*Chunked=*/false, /*Ordered=*/false); assert(Schedule != OMP_sch_static_chunked && "cannot be chunked here"); return Schedule != OMP_sch_static; } static int addMonoNonMonoModifier(CodeGenModule &CGM, OpenMPSchedType Schedule, OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2) { int Modifier = 0; switch (M1) { case OMPC_SCHEDULE_MODIFIER_monotonic: Modifier = OMP_sch_modifier_monotonic; break; case OMPC_SCHEDULE_MODIFIER_nonmonotonic: Modifier = OMP_sch_modifier_nonmonotonic; break; case OMPC_SCHEDULE_MODIFIER_simd: if (Schedule == OMP_sch_static_chunked) Schedule = OMP_sch_static_balanced_chunked; break; case OMPC_SCHEDULE_MODIFIER_last: case OMPC_SCHEDULE_MODIFIER_unknown: break; } switch (M2) { case OMPC_SCHEDULE_MODIFIER_monotonic: Modifier = OMP_sch_modifier_monotonic; break; case OMPC_SCHEDULE_MODIFIER_nonmonotonic: Modifier = OMP_sch_modifier_nonmonotonic; break; case OMPC_SCHEDULE_MODIFIER_simd: if (Schedule == OMP_sch_static_chunked) Schedule = OMP_sch_static_balanced_chunked; break; case OMPC_SCHEDULE_MODIFIER_last: case OMPC_SCHEDULE_MODIFIER_unknown: break; } // OpenMP 5.0, 2.9.2 Worksharing-Loop Construct, Desription. // If the static schedule kind is specified or if the ordered clause is // specified, and if the nonmonotonic modifier is not specified, the effect is // as if the monotonic modifier is specified. Otherwise, unless the monotonic // modifier is specified, the effect is as if the nonmonotonic modifier is // specified. if (CGM.getLangOpts().OpenMP >= 50 && Modifier == 0) { if (!(Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static || Schedule == OMP_sch_static_balanced_chunked || Schedule == OMP_ord_static_chunked || Schedule == OMP_ord_static || Schedule == OMP_dist_sch_static_chunked || Schedule == OMP_dist_sch_static)) Modifier = OMP_sch_modifier_nonmonotonic; } return Schedule | Modifier; } void CGOpenMPRuntime::emitForDispatchInit( CodeGenFunction &CGF, SourceLocation Loc, const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned, bool Ordered, const DispatchRTInput &DispatchValues) { if (!CGF.HaveInsertPoint()) return; OpenMPSchedType Schedule = getRuntimeSchedule( ScheduleKind.Schedule, DispatchValues.Chunk != nullptr, Ordered); assert(Ordered || (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked && Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked && Schedule != OMP_sch_static_balanced_chunked)); // Call __kmpc_dispatch_init( // ident_t *loc, kmp_int32 tid, kmp_int32 schedule, // kmp_int[32|64] lower, kmp_int[32|64] upper, // kmp_int[32|64] stride, kmp_int[32|64] chunk); // If the Chunk was not specified in the clause - use default value 1. llvm::Value *Chunk = DispatchValues.Chunk ? DispatchValues.Chunk : CGF.Builder.getIntN(IVSize, 1); llvm::Value *Args[] = { emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc), CGF.Builder.getInt32(addMonoNonMonoModifier( CGM, Schedule, ScheduleKind.M1, ScheduleKind.M2)), // Schedule type DispatchValues.LB, // Lower DispatchValues.UB, // Upper CGF.Builder.getIntN(IVSize, 1), // Stride Chunk // Chunk }; CGF.EmitRuntimeCall(createDispatchInitFunction(IVSize, IVSigned), Args); } static void emitForStaticInitCall( CodeGenFunction &CGF, llvm::Value *UpdateLocation, llvm::Value *ThreadId, llvm::FunctionCallee ForStaticInitFunction, OpenMPSchedType Schedule, OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, const CGOpenMPRuntime::StaticRTInput &Values) { if (!CGF.HaveInsertPoint()) return; assert(!Values.Ordered); assert(Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked || Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static || Schedule == OMP_dist_sch_static_chunked); // Call __kmpc_for_static_init( // ident_t *loc, kmp_int32 tid, kmp_int32 schedtype, // kmp_int32 *p_lastiter, kmp_int[32|64] *p_lower, // kmp_int[32|64] *p_upper, kmp_int[32|64] *p_stride, // kmp_int[32|64] incr, kmp_int[32|64] chunk); llvm::Value *Chunk = Values.Chunk; if (Chunk == nullptr) { assert((Schedule == OMP_sch_static || Schedule == OMP_ord_static || Schedule == OMP_dist_sch_static) && "expected static non-chunked schedule"); // If the Chunk was not specified in the clause - use default value 1. Chunk = CGF.Builder.getIntN(Values.IVSize, 1); } else { assert((Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked || Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static_chunked) && "expected static chunked schedule"); } llvm::Value *Args[] = { UpdateLocation, ThreadId, CGF.Builder.getInt32(addMonoNonMonoModifier(CGF.CGM, Schedule, M1, M2)), // Schedule type Values.IL.getPointer(), // &isLastIter Values.LB.getPointer(), // &LB Values.UB.getPointer(), // &UB Values.ST.getPointer(), // &Stride CGF.Builder.getIntN(Values.IVSize, 1), // Incr Chunk // Chunk }; CGF.EmitRuntimeCall(ForStaticInitFunction, Args); } void CGOpenMPRuntime::emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind, const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values) { OpenMPSchedType ScheduleNum = getRuntimeSchedule( ScheduleKind.Schedule, Values.Chunk != nullptr, Values.Ordered); assert(isOpenMPWorksharingDirective(DKind) && "Expected loop-based or sections-based directive."); llvm::Value *UpdatedLocation = emitUpdateLocation(CGF, Loc, isOpenMPLoopDirective(DKind) ? OMP_IDENT_WORK_LOOP : OMP_IDENT_WORK_SECTIONS); llvm::Value *ThreadId = getThreadID(CGF, Loc); llvm::FunctionCallee StaticInitFunction = createForStaticInitFunction(Values.IVSize, Values.IVSigned); auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc); emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction, ScheduleNum, ScheduleKind.M1, ScheduleKind.M2, Values); } void CGOpenMPRuntime::emitDistributeStaticInit( CodeGenFunction &CGF, SourceLocation Loc, OpenMPDistScheduleClauseKind SchedKind, const CGOpenMPRuntime::StaticRTInput &Values) { OpenMPSchedType ScheduleNum = getRuntimeSchedule(SchedKind, Values.Chunk != nullptr); llvm::Value *UpdatedLocation = emitUpdateLocation(CGF, Loc, OMP_IDENT_WORK_DISTRIBUTE); llvm::Value *ThreadId = getThreadID(CGF, Loc); llvm::FunctionCallee StaticInitFunction = createForStaticInitFunction(Values.IVSize, Values.IVSigned); emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction, ScheduleNum, OMPC_SCHEDULE_MODIFIER_unknown, OMPC_SCHEDULE_MODIFIER_unknown, Values); } void CGOpenMPRuntime::emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind) { if (!CGF.HaveInsertPoint()) return; // Call __kmpc_for_static_fini(ident_t *loc, kmp_int32 tid); llvm::Value *Args[] = { emitUpdateLocation(CGF, Loc, isOpenMPDistributeDirective(DKind) ? OMP_IDENT_WORK_DISTRIBUTE : isOpenMPLoopDirective(DKind) ? OMP_IDENT_WORK_LOOP : OMP_IDENT_WORK_SECTIONS), getThreadID(CGF, Loc)}; auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc); CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_for_static_fini), Args); } void CGOpenMPRuntime::emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned) { if (!CGF.HaveInsertPoint()) return; // Call __kmpc_for_dynamic_fini_(4|8)[u](ident_t *loc, kmp_int32 tid); llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)}; CGF.EmitRuntimeCall(createDispatchFiniFunction(IVSize, IVSigned), Args); } llvm::Value *CGOpenMPRuntime::emitForNext(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned, Address IL, Address LB, Address UB, Address ST) { // Call __kmpc_dispatch_next( // ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter, // kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper, // kmp_int[32|64] *p_stride); llvm::Value *Args[] = { emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc), IL.getPointer(), // &isLastIter LB.getPointer(), // &Lower UB.getPointer(), // &Upper ST.getPointer() // &Stride }; llvm::Value *Call = CGF.EmitRuntimeCall(createDispatchNextFunction(IVSize, IVSigned), Args); return CGF.EmitScalarConversion( Call, CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/1), CGF.getContext().BoolTy, Loc); } void CGOpenMPRuntime::emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc) { if (!CGF.HaveInsertPoint()) return; // Build call __kmpc_push_num_threads(&loc, global_tid, num_threads) llvm::Value *Args[] = { emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc), CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty, /*isSigned*/ true)}; CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_push_num_threads), Args); } void CGOpenMPRuntime::emitProcBindClause(CodeGenFunction &CGF, ProcBindKind ProcBind, SourceLocation Loc) { if (!CGF.HaveInsertPoint()) return; assert(ProcBind != OMP_PROC_BIND_unknown && "Unsupported proc_bind value."); // Build call __kmpc_push_proc_bind(&loc, global_tid, proc_bind) llvm::Value *Args[] = { emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc), llvm::ConstantInt::get(CGM.IntTy, unsigned(ProcBind), /*isSigned=*/true)}; CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_push_proc_bind), Args); } void CGOpenMPRuntime::emitFlush(CodeGenFunction &CGF, ArrayRef, SourceLocation Loc, llvm::AtomicOrdering AO) { if (CGF.CGM.getLangOpts().OpenMPIRBuilder) { OMPBuilder.createFlush(CGF.Builder); } else { if (!CGF.HaveInsertPoint()) return; // Build call void __kmpc_flush(ident_t *loc) CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_flush), emitUpdateLocation(CGF, Loc)); } } namespace { /// Indexes of fields for type kmp_task_t. enum KmpTaskTFields { /// List of shared variables. KmpTaskTShareds, /// Task routine. KmpTaskTRoutine, /// Partition id for the untied tasks. KmpTaskTPartId, /// Function with call of destructors for private variables. Data1, /// Task priority. Data2, /// (Taskloops only) Lower bound. KmpTaskTLowerBound, /// (Taskloops only) Upper bound. KmpTaskTUpperBound, /// (Taskloops only) Stride. KmpTaskTStride, /// (Taskloops only) Is last iteration flag. KmpTaskTLastIter, /// (Taskloops only) Reduction data. KmpTaskTReductions, }; } // anonymous namespace bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::empty() const { return OffloadEntriesTargetRegion.empty() && OffloadEntriesDeviceGlobalVar.empty(); } /// Initialize target region entry. void CGOpenMPRuntime::OffloadEntriesInfoManagerTy:: initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum, unsigned Order) { assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is " "only required for the device " "code generation."); OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] = OffloadEntryInfoTargetRegion(Order, /*Addr=*/nullptr, /*ID=*/nullptr, OMPTargetRegionEntryTargetRegion); ++OffloadingEntriesNum; } void CGOpenMPRuntime::OffloadEntriesInfoManagerTy:: registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum, llvm::Constant *Addr, llvm::Constant *ID, OMPTargetRegionEntryKind Flags) { // If we are emitting code for a target, the entry is already initialized, // only has to be registered. if (CGM.getLangOpts().OpenMPIsDevice) { // This could happen if the device compilation is invoked standalone. if (!hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum)) return; auto &Entry = OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum]; Entry.setAddress(Addr); Entry.setID(ID); Entry.setFlags(Flags); } else { if (Flags == OffloadEntriesInfoManagerTy::OMPTargetRegionEntryTargetRegion && hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum, /*IgnoreAddressId*/ true)) return; assert(!hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum) && "Target region entry already registered!"); OffloadEntryInfoTargetRegion Entry(OffloadingEntriesNum, Addr, ID, Flags); OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] = Entry; ++OffloadingEntriesNum; } } bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::hasTargetRegionEntryInfo( unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum, bool IgnoreAddressId) const { auto PerDevice = OffloadEntriesTargetRegion.find(DeviceID); if (PerDevice == OffloadEntriesTargetRegion.end()) return false; auto PerFile = PerDevice->second.find(FileID); if (PerFile == PerDevice->second.end()) return false; auto PerParentName = PerFile->second.find(ParentName); if (PerParentName == PerFile->second.end()) return false; auto PerLine = PerParentName->second.find(LineNum); if (PerLine == PerParentName->second.end()) return false; // Fail if this entry is already registered. if (!IgnoreAddressId && (PerLine->second.getAddress() || PerLine->second.getID())) return false; return true; } void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::actOnTargetRegionEntriesInfo( const OffloadTargetRegionEntryInfoActTy &Action) { // Scan all target region entries and perform the provided action. for (const auto &D : OffloadEntriesTargetRegion) for (const auto &F : D.second) for (const auto &P : F.second) for (const auto &L : P.second) Action(D.first, F.first, P.first(), L.first, L.second); } void CGOpenMPRuntime::OffloadEntriesInfoManagerTy:: initializeDeviceGlobalVarEntryInfo(StringRef Name, OMPTargetGlobalVarEntryKind Flags, unsigned Order) { assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is " "only required for the device " "code generation."); OffloadEntriesDeviceGlobalVar.try_emplace(Name, Order, Flags); ++OffloadingEntriesNum; } void CGOpenMPRuntime::OffloadEntriesInfoManagerTy:: registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr, CharUnits VarSize, OMPTargetGlobalVarEntryKind Flags, llvm::GlobalValue::LinkageTypes Linkage) { if (CGM.getLangOpts().OpenMPIsDevice) { // This could happen if the device compilation is invoked standalone. if (!hasDeviceGlobalVarEntryInfo(VarName)) return; auto &Entry = OffloadEntriesDeviceGlobalVar[VarName]; if (Entry.getAddress() && hasDeviceGlobalVarEntryInfo(VarName)) { if (Entry.getVarSize().isZero()) { Entry.setVarSize(VarSize); Entry.setLinkage(Linkage); } return; } Entry.setVarSize(VarSize); Entry.setLinkage(Linkage); Entry.setAddress(Addr); } else { if (hasDeviceGlobalVarEntryInfo(VarName)) { auto &Entry = OffloadEntriesDeviceGlobalVar[VarName]; assert(Entry.isValid() && Entry.getFlags() == Flags && "Entry not initialized!"); if (Entry.getVarSize().isZero()) { Entry.setVarSize(VarSize); Entry.setLinkage(Linkage); } return; } OffloadEntriesDeviceGlobalVar.try_emplace( VarName, OffloadingEntriesNum, Addr, VarSize, Flags, Linkage); ++OffloadingEntriesNum; } } void CGOpenMPRuntime::OffloadEntriesInfoManagerTy:: actOnDeviceGlobalVarEntriesInfo( const OffloadDeviceGlobalVarEntryInfoActTy &Action) { // Scan all target region entries and perform the provided action. for (const auto &E : OffloadEntriesDeviceGlobalVar) Action(E.getKey(), E.getValue()); } void CGOpenMPRuntime::createOffloadEntry( llvm::Constant *ID, llvm::Constant *Addr, uint64_t Size, int32_t Flags, llvm::GlobalValue::LinkageTypes Linkage) { StringRef Name = Addr->getName(); llvm::Module &M = CGM.getModule(); llvm::LLVMContext &C = M.getContext(); // Create constant string with the name. llvm::Constant *StrPtrInit = llvm::ConstantDataArray::getString(C, Name); std::string StringName = getName({"omp_offloading", "entry_name"}); auto *Str = new llvm::GlobalVariable( M, StrPtrInit->getType(), /*isConstant=*/true, llvm::GlobalValue::InternalLinkage, StrPtrInit, StringName); Str->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); llvm::Constant *Data[] = { llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(ID, CGM.VoidPtrTy), llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(Str, CGM.Int8PtrTy), llvm::ConstantInt::get(CGM.SizeTy, Size), llvm::ConstantInt::get(CGM.Int32Ty, Flags), llvm::ConstantInt::get(CGM.Int32Ty, 0)}; std::string EntryName = getName({"omp_offloading", "entry", ""}); llvm::GlobalVariable *Entry = createGlobalStruct( CGM, getTgtOffloadEntryQTy(), /*IsConstant=*/true, Data, Twine(EntryName).concat(Name), llvm::GlobalValue::WeakAnyLinkage); // The entry has to be created in the section the linker expects it to be. Entry->setSection("omp_offloading_entries"); } void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() { // Emit the offloading entries and metadata so that the device codegen side // can easily figure out what to emit. The produced metadata looks like // this: // // !omp_offload.info = !{!1, ...} // // Right now we only generate metadata for function that contain target // regions. // If we are in simd mode or there are no entries, we don't need to do // anything. if (CGM.getLangOpts().OpenMPSimd || OffloadEntriesInfoManager.empty()) return; llvm::Module &M = CGM.getModule(); llvm::LLVMContext &C = M.getContext(); SmallVector, 16> OrderedEntries(OffloadEntriesInfoManager.size()); llvm::SmallVector ParentFunctions( OffloadEntriesInfoManager.size()); // Auxiliary methods to create metadata values and strings. auto &&GetMDInt = [this](unsigned V) { return llvm::ConstantAsMetadata::get( llvm::ConstantInt::get(CGM.Int32Ty, V)); }; auto &&GetMDString = [&C](StringRef V) { return llvm::MDString::get(C, V); }; // Create the offloading info metadata node. llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("omp_offload.info"); // Create function that emits metadata for each target region entry; auto &&TargetRegionMetadataEmitter = [this, &C, MD, &OrderedEntries, &ParentFunctions, &GetMDInt, &GetMDString]( unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned Line, const OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion &E) { // Generate metadata for target regions. Each entry of this metadata // contains: // - Entry 0 -> Kind of this type of metadata (0). // - Entry 1 -> Device ID of the file where the entry was identified. // - Entry 2 -> File ID of the file where the entry was identified. // - Entry 3 -> Mangled name of the function where the entry was // identified. // - Entry 4 -> Line in the file where the entry was identified. // - Entry 5 -> Order the entry was created. // The first element of the metadata node is the kind. llvm::Metadata *Ops[] = {GetMDInt(E.getKind()), GetMDInt(DeviceID), GetMDInt(FileID), GetMDString(ParentName), GetMDInt(Line), GetMDInt(E.getOrder())}; SourceLocation Loc; for (auto I = CGM.getContext().getSourceManager().fileinfo_begin(), E = CGM.getContext().getSourceManager().fileinfo_end(); I != E; ++I) { if (I->getFirst()->getUniqueID().getDevice() == DeviceID && I->getFirst()->getUniqueID().getFile() == FileID) { Loc = CGM.getContext().getSourceManager().translateFileLineCol( I->getFirst(), Line, 1); break; } } // Save this entry in the right position of the ordered entries array. OrderedEntries[E.getOrder()] = std::make_tuple(&E, Loc, ParentName); ParentFunctions[E.getOrder()] = ParentName; // Add metadata to the named metadata node. MD->addOperand(llvm::MDNode::get(C, Ops)); }; OffloadEntriesInfoManager.actOnTargetRegionEntriesInfo( TargetRegionMetadataEmitter); // Create function that emits metadata for each device global variable entry; auto &&DeviceGlobalVarMetadataEmitter = [&C, &OrderedEntries, &GetMDInt, &GetMDString, MD](StringRef MangledName, const OffloadEntriesInfoManagerTy::OffloadEntryInfoDeviceGlobalVar &E) { // Generate metadata for global variables. Each entry of this metadata // contains: // - Entry 0 -> Kind of this type of metadata (1). // - Entry 1 -> Mangled name of the variable. // - Entry 2 -> Declare target kind. // - Entry 3 -> Order the entry was created. // The first element of the metadata node is the kind. llvm::Metadata *Ops[] = { GetMDInt(E.getKind()), GetMDString(MangledName), GetMDInt(E.getFlags()), GetMDInt(E.getOrder())}; // Save this entry in the right position of the ordered entries array. OrderedEntries[E.getOrder()] = std::make_tuple(&E, SourceLocation(), MangledName); // Add metadata to the named metadata node. MD->addOperand(llvm::MDNode::get(C, Ops)); }; OffloadEntriesInfoManager.actOnDeviceGlobalVarEntriesInfo( DeviceGlobalVarMetadataEmitter); for (const auto &E : OrderedEntries) { assert(std::get<0>(E) && "All ordered entries must exist!"); if (const auto *CE = dyn_cast( std::get<0>(E))) { if (!CE->getID() || !CE->getAddress()) { // Do not blame the entry if the parent funtion is not emitted. StringRef FnName = ParentFunctions[CE->getOrder()]; if (!CGM.GetGlobalValue(FnName)) continue; unsigned DiagID = CGM.getDiags().getCustomDiagID( DiagnosticsEngine::Error, "Offloading entry for target region in %0 is incorrect: either the " "address or the ID is invalid."); CGM.getDiags().Report(std::get<1>(E), DiagID) << FnName; continue; } createOffloadEntry(CE->getID(), CE->getAddress(), /*Size=*/0, CE->getFlags(), llvm::GlobalValue::WeakAnyLinkage); } else if (const auto *CE = dyn_cast( std::get<0>(E))) { OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind Flags = static_cast( CE->getFlags()); switch (Flags) { case OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo: { if (CGM.getLangOpts().OpenMPIsDevice && CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory()) continue; if (!CE->getAddress()) { unsigned DiagID = CGM.getDiags().getCustomDiagID( DiagnosticsEngine::Error, "Offloading entry for declare target " "variable %0 is incorrect: the " "address is invalid."); CGM.getDiags().Report(std::get<1>(E), DiagID) << std::get<2>(E); continue; } // The vaiable has no definition - no need to add the entry. if (CE->getVarSize().isZero()) continue; break; } case OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryLink: assert(((CGM.getLangOpts().OpenMPIsDevice && !CE->getAddress()) || (!CGM.getLangOpts().OpenMPIsDevice && CE->getAddress())) && "Declaret target link address is set."); if (CGM.getLangOpts().OpenMPIsDevice) continue; if (!CE->getAddress()) { unsigned DiagID = CGM.getDiags().getCustomDiagID( DiagnosticsEngine::Error, "Offloading entry for declare target variable is incorrect: the " "address is invalid."); CGM.getDiags().Report(DiagID); continue; } break; } createOffloadEntry(CE->getAddress(), CE->getAddress(), CE->getVarSize().getQuantity(), Flags, CE->getLinkage()); } else { llvm_unreachable("Unsupported entry kind."); } } } /// Loads all the offload entries information from the host IR /// metadata. void CGOpenMPRuntime::loadOffloadInfoMetadata() { // If we are in target mode, load the metadata from the host IR. This code has // to match the metadaata creation in createOffloadEntriesAndInfoMetadata(). if (!CGM.getLangOpts().OpenMPIsDevice) return; if (CGM.getLangOpts().OMPHostIRFile.empty()) return; auto Buf = llvm::MemoryBuffer::getFile(CGM.getLangOpts().OMPHostIRFile); if (auto EC = Buf.getError()) { CGM.getDiags().Report(diag::err_cannot_open_file) << CGM.getLangOpts().OMPHostIRFile << EC.message(); return; } llvm::LLVMContext C; auto ME = expectedToErrorOrAndEmitErrors( C, llvm::parseBitcodeFile(Buf.get()->getMemBufferRef(), C)); if (auto EC = ME.getError()) { unsigned DiagID = CGM.getDiags().getCustomDiagID( DiagnosticsEngine::Error, "Unable to parse host IR file '%0':'%1'"); CGM.getDiags().Report(DiagID) << CGM.getLangOpts().OMPHostIRFile << EC.message(); return; } llvm::NamedMDNode *MD = ME.get()->getNamedMetadata("omp_offload.info"); if (!MD) return; for (llvm::MDNode *MN : MD->operands()) { auto &&GetMDInt = [MN](unsigned Idx) { auto *V = cast(MN->getOperand(Idx)); return cast(V->getValue())->getZExtValue(); }; auto &&GetMDString = [MN](unsigned Idx) { auto *V = cast(MN->getOperand(Idx)); return V->getString(); }; switch (GetMDInt(0)) { default: llvm_unreachable("Unexpected metadata!"); break; case OffloadEntriesInfoManagerTy::OffloadEntryInfo:: OffloadingEntryInfoTargetRegion: OffloadEntriesInfoManager.initializeTargetRegionEntryInfo( /*DeviceID=*/GetMDInt(1), /*FileID=*/GetMDInt(2), /*ParentName=*/GetMDString(3), /*Line=*/GetMDInt(4), /*Order=*/GetMDInt(5)); break; case OffloadEntriesInfoManagerTy::OffloadEntryInfo:: OffloadingEntryInfoDeviceGlobalVar: OffloadEntriesInfoManager.initializeDeviceGlobalVarEntryInfo( /*MangledName=*/GetMDString(1), static_cast( /*Flags=*/GetMDInt(2)), /*Order=*/GetMDInt(3)); break; } } } void CGOpenMPRuntime::emitKmpRoutineEntryT(QualType KmpInt32Ty) { if (!KmpRoutineEntryPtrTy) { // Build typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); type. ASTContext &C = CGM.getContext(); QualType KmpRoutineEntryTyArgs[] = {KmpInt32Ty, C.VoidPtrTy}; FunctionProtoType::ExtProtoInfo EPI; KmpRoutineEntryPtrQTy = C.getPointerType( C.getFunctionType(KmpInt32Ty, KmpRoutineEntryTyArgs, EPI)); KmpRoutineEntryPtrTy = CGM.getTypes().ConvertType(KmpRoutineEntryPtrQTy); } } QualType CGOpenMPRuntime::getTgtOffloadEntryQTy() { // Make sure the type of the entry is already created. This is the type we // have to create: // struct __tgt_offload_entry{ // void *addr; // Pointer to the offload entry info. // // (function or global) // char *name; // Name of the function or global. // size_t size; // Size of the entry info (0 if it a function). // int32_t flags; // Flags associated with the entry, e.g. 'link'. // int32_t reserved; // Reserved, to use by the runtime library. // }; if (TgtOffloadEntryQTy.isNull()) { ASTContext &C = CGM.getContext(); RecordDecl *RD = C.buildImplicitRecord("__tgt_offload_entry"); RD->startDefinition(); addFieldToRecordDecl(C, RD, C.VoidPtrTy); addFieldToRecordDecl(C, RD, C.getPointerType(C.CharTy)); addFieldToRecordDecl(C, RD, C.getSizeType()); addFieldToRecordDecl( C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true)); addFieldToRecordDecl( C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true)); RD->completeDefinition(); RD->addAttr(PackedAttr::CreateImplicit(C)); TgtOffloadEntryQTy = C.getRecordType(RD); } return TgtOffloadEntryQTy; } namespace { struct PrivateHelpersTy { PrivateHelpersTy(const Expr *OriginalRef, const VarDecl *Original, const VarDecl *PrivateCopy, const VarDecl *PrivateElemInit) : OriginalRef(OriginalRef), Original(Original), PrivateCopy(PrivateCopy), PrivateElemInit(PrivateElemInit) {} PrivateHelpersTy(const VarDecl *Original) : Original(Original) {} const Expr *OriginalRef = nullptr; const VarDecl *Original = nullptr; const VarDecl *PrivateCopy = nullptr; const VarDecl *PrivateElemInit = nullptr; bool isLocalPrivate() const { return !OriginalRef && !PrivateCopy && !PrivateElemInit; } }; typedef std::pair PrivateDataTy; } // anonymous namespace static bool isAllocatableDecl(const VarDecl *VD) { const VarDecl *CVD = VD->getCanonicalDecl(); if (!CVD->hasAttr()) return false; const auto *AA = CVD->getAttr(); // Use the default allocation. return !((AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc || AA->getAllocatorType() == OMPAllocateDeclAttr::OMPNullMemAlloc) && !AA->getAllocator()); } static RecordDecl * createPrivatesRecordDecl(CodeGenModule &CGM, ArrayRef Privates) { if (!Privates.empty()) { ASTContext &C = CGM.getContext(); // Build struct .kmp_privates_t. { // /* private vars */ // }; RecordDecl *RD = C.buildImplicitRecord(".kmp_privates.t"); RD->startDefinition(); for (const auto &Pair : Privates) { const VarDecl *VD = Pair.second.Original; QualType Type = VD->getType().getNonReferenceType(); // If the private variable is a local variable with lvalue ref type, // allocate the pointer instead of the pointee type. if (Pair.second.isLocalPrivate()) { if (VD->getType()->isLValueReferenceType()) Type = C.getPointerType(Type); if (isAllocatableDecl(VD)) Type = C.getPointerType(Type); } FieldDecl *FD = addFieldToRecordDecl(C, RD, Type); if (VD->hasAttrs()) { for (specific_attr_iterator I(VD->getAttrs().begin()), E(VD->getAttrs().end()); I != E; ++I) FD->addAttr(*I); } } RD->completeDefinition(); return RD; } return nullptr; } static RecordDecl * createKmpTaskTRecordDecl(CodeGenModule &CGM, OpenMPDirectiveKind Kind, QualType KmpInt32Ty, QualType KmpRoutineEntryPointerQTy) { ASTContext &C = CGM.getContext(); // Build struct kmp_task_t { // void * shareds; // kmp_routine_entry_t routine; // kmp_int32 part_id; // kmp_cmplrdata_t data1; // kmp_cmplrdata_t data2; // For taskloops additional fields: // kmp_uint64 lb; // kmp_uint64 ub; // kmp_int64 st; // kmp_int32 liter; // void * reductions; // }; RecordDecl *UD = C.buildImplicitRecord("kmp_cmplrdata_t", TTK_Union); UD->startDefinition(); addFieldToRecordDecl(C, UD, KmpInt32Ty); addFieldToRecordDecl(C, UD, KmpRoutineEntryPointerQTy); UD->completeDefinition(); QualType KmpCmplrdataTy = C.getRecordType(UD); RecordDecl *RD = C.buildImplicitRecord("kmp_task_t"); RD->startDefinition(); addFieldToRecordDecl(C, RD, C.VoidPtrTy); addFieldToRecordDecl(C, RD, KmpRoutineEntryPointerQTy); addFieldToRecordDecl(C, RD, KmpInt32Ty); addFieldToRecordDecl(C, RD, KmpCmplrdataTy); addFieldToRecordDecl(C, RD, KmpCmplrdataTy); if (isOpenMPTaskLoopDirective(Kind)) { QualType KmpUInt64Ty = CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0); QualType KmpInt64Ty = CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1); addFieldToRecordDecl(C, RD, KmpUInt64Ty); addFieldToRecordDecl(C, RD, KmpUInt64Ty); addFieldToRecordDecl(C, RD, KmpInt64Ty); addFieldToRecordDecl(C, RD, KmpInt32Ty); addFieldToRecordDecl(C, RD, C.VoidPtrTy); } RD->completeDefinition(); return RD; } static RecordDecl * createKmpTaskTWithPrivatesRecordDecl(CodeGenModule &CGM, QualType KmpTaskTQTy, ArrayRef Privates) { ASTContext &C = CGM.getContext(); // Build struct kmp_task_t_with_privates { // kmp_task_t task_data; // .kmp_privates_t. privates; // }; RecordDecl *RD = C.buildImplicitRecord("kmp_task_t_with_privates"); RD->startDefinition(); addFieldToRecordDecl(C, RD, KmpTaskTQTy); if (const RecordDecl *PrivateRD = createPrivatesRecordDecl(CGM, Privates)) addFieldToRecordDecl(C, RD, C.getRecordType(PrivateRD)); RD->completeDefinition(); return RD; } /// Emit a proxy function which accepts kmp_task_t as the second /// argument. /// \code /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, &tt->privates, task_privates_map, tt, /// For taskloops: /// tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter, /// tt->reductions, tt->shareds); /// return 0; /// } /// \endcode static llvm::Function * emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc, OpenMPDirectiveKind Kind, QualType KmpInt32Ty, QualType KmpTaskTWithPrivatesPtrQTy, QualType KmpTaskTWithPrivatesQTy, QualType KmpTaskTQTy, QualType SharedsPtrTy, llvm::Function *TaskFunction, llvm::Value *TaskPrivatesMap) { ASTContext &C = CGM.getContext(); FunctionArgList Args; ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty, ImplicitParamDecl::Other); ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpTaskTWithPrivatesPtrQTy.withRestrict(), ImplicitParamDecl::Other); Args.push_back(&GtidArg); Args.push_back(&TaskTypeArg); const auto &TaskEntryFnInfo = CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args); llvm::FunctionType *TaskEntryTy = CGM.getTypes().GetFunctionType(TaskEntryFnInfo); std::string Name = CGM.getOpenMPRuntime().getName({"omp_task_entry", ""}); auto *TaskEntry = llvm::Function::Create( TaskEntryTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule()); CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskEntry, TaskEntryFnInfo); TaskEntry->setDoesNotRecurse(); CodeGenFunction CGF(CGM); CGF.StartFunction(GlobalDecl(), KmpInt32Ty, TaskEntry, TaskEntryFnInfo, Args, Loc, Loc); // TaskFunction(gtid, tt->task_data.part_id, &tt->privates, task_privates_map, // tt, // For taskloops: // tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter, // tt->task_data.shareds); llvm::Value *GtidParam = CGF.EmitLoadOfScalar( CGF.GetAddrOfLocalVar(&GtidArg), /*Volatile=*/false, KmpInt32Ty, Loc); LValue TDBase = CGF.EmitLoadOfPointerLValue( CGF.GetAddrOfLocalVar(&TaskTypeArg), KmpTaskTWithPrivatesPtrQTy->castAs()); const auto *KmpTaskTWithPrivatesQTyRD = cast(KmpTaskTWithPrivatesQTy->getAsTagDecl()); LValue Base = CGF.EmitLValueForField(TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin()); const auto *KmpTaskTQTyRD = cast(KmpTaskTQTy->getAsTagDecl()); auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId); LValue PartIdLVal = CGF.EmitLValueForField(Base, *PartIdFI); llvm::Value *PartidParam = PartIdLVal.getPointer(CGF); auto SharedsFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds); LValue SharedsLVal = CGF.EmitLValueForField(Base, *SharedsFI); llvm::Value *SharedsParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( CGF.EmitLoadOfScalar(SharedsLVal, Loc), CGF.ConvertTypeForMem(SharedsPtrTy)); auto PrivatesFI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin(), 1); llvm::Value *PrivatesParam; if (PrivatesFI != KmpTaskTWithPrivatesQTyRD->field_end()) { LValue PrivatesLVal = CGF.EmitLValueForField(TDBase, *PrivatesFI); PrivatesParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( PrivatesLVal.getPointer(CGF), CGF.VoidPtrTy); } else { PrivatesParam = llvm::ConstantPointerNull::get(CGF.VoidPtrTy); } llvm::Value *CommonArgs[] = {GtidParam, PartidParam, PrivatesParam, TaskPrivatesMap, CGF.Builder .CreatePointerBitCastOrAddrSpaceCast( TDBase.getAddress(CGF), CGF.VoidPtrTy) .getPointer()}; SmallVector CallArgs(std::begin(CommonArgs), std::end(CommonArgs)); if (isOpenMPTaskLoopDirective(Kind)) { auto LBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound); LValue LBLVal = CGF.EmitLValueForField(Base, *LBFI); llvm::Value *LBParam = CGF.EmitLoadOfScalar(LBLVal, Loc); auto UBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound); LValue UBLVal = CGF.EmitLValueForField(Base, *UBFI); llvm::Value *UBParam = CGF.EmitLoadOfScalar(UBLVal, Loc); auto StFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTStride); LValue StLVal = CGF.EmitLValueForField(Base, *StFI); llvm::Value *StParam = CGF.EmitLoadOfScalar(StLVal, Loc); auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter); LValue LILVal = CGF.EmitLValueForField(Base, *LIFI); llvm::Value *LIParam = CGF.EmitLoadOfScalar(LILVal, Loc); auto RFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTReductions); LValue RLVal = CGF.EmitLValueForField(Base, *RFI); llvm::Value *RParam = CGF.EmitLoadOfScalar(RLVal, Loc); CallArgs.push_back(LBParam); CallArgs.push_back(UBParam); CallArgs.push_back(StParam); CallArgs.push_back(LIParam); CallArgs.push_back(RParam); } CallArgs.push_back(SharedsParam); CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskFunction, CallArgs); CGF.EmitStoreThroughLValue(RValue::get(CGF.Builder.getInt32(/*C=*/0)), CGF.MakeAddrLValue(CGF.ReturnValue, KmpInt32Ty)); CGF.FinishFunction(); return TaskEntry; } static llvm::Value *emitDestructorsFunction(CodeGenModule &CGM, SourceLocation Loc, QualType KmpInt32Ty, QualType KmpTaskTWithPrivatesPtrQTy, QualType KmpTaskTWithPrivatesQTy) { ASTContext &C = CGM.getContext(); FunctionArgList Args; ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty, ImplicitParamDecl::Other); ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpTaskTWithPrivatesPtrQTy.withRestrict(), ImplicitParamDecl::Other); Args.push_back(&GtidArg); Args.push_back(&TaskTypeArg); const auto &DestructorFnInfo = CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args); llvm::FunctionType *DestructorFnTy = CGM.getTypes().GetFunctionType(DestructorFnInfo); std::string Name = CGM.getOpenMPRuntime().getName({"omp_task_destructor", ""}); auto *DestructorFn = llvm::Function::Create(DestructorFnTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule()); CGM.SetInternalFunctionAttributes(GlobalDecl(), DestructorFn, DestructorFnInfo); DestructorFn->setDoesNotRecurse(); CodeGenFunction CGF(CGM); CGF.StartFunction(GlobalDecl(), KmpInt32Ty, DestructorFn, DestructorFnInfo, Args, Loc, Loc); LValue Base = CGF.EmitLoadOfPointerLValue( CGF.GetAddrOfLocalVar(&TaskTypeArg), KmpTaskTWithPrivatesPtrQTy->castAs()); const auto *KmpTaskTWithPrivatesQTyRD = cast(KmpTaskTWithPrivatesQTy->getAsTagDecl()); auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin()); Base = CGF.EmitLValueForField(Base, *FI); for (const auto *Field : cast(FI->getType()->getAsTagDecl())->fields()) { if (QualType::DestructionKind DtorKind = Field->getType().isDestructedType()) { LValue FieldLValue = CGF.EmitLValueForField(Base, Field); CGF.pushDestroy(DtorKind, FieldLValue.getAddress(CGF), Field->getType()); } } CGF.FinishFunction(); return DestructorFn; } /// Emit a privates mapping function for correct handling of private and /// firstprivate variables. /// \code /// void .omp_task_privates_map.(const .privates. *noalias privs, /// **noalias priv1,..., **noalias privn) { /// *priv1 = &.privates.priv1; /// ...; /// *privn = &.privates.privn; /// } /// \endcode static llvm::Value * emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc, const OMPTaskDataTy &Data, QualType PrivatesQTy, ArrayRef Privates) { ASTContext &C = CGM.getContext(); FunctionArgList Args; ImplicitParamDecl TaskPrivatesArg( C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.getPointerType(PrivatesQTy).withConst().withRestrict(), ImplicitParamDecl::Other); Args.push_back(&TaskPrivatesArg); llvm::DenseMap, unsigned> PrivateVarsPos; unsigned Counter = 1; for (const Expr *E : Data.PrivateVars) { Args.push_back(ImplicitParamDecl::Create( C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.getPointerType(C.getPointerType(E->getType())) .withConst() .withRestrict(), ImplicitParamDecl::Other)); const auto *VD = cast(cast(E)->getDecl()); PrivateVarsPos[VD] = Counter; ++Counter; } for (const Expr *E : Data.FirstprivateVars) { Args.push_back(ImplicitParamDecl::Create( C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.getPointerType(C.getPointerType(E->getType())) .withConst() .withRestrict(), ImplicitParamDecl::Other)); const auto *VD = cast(cast(E)->getDecl()); PrivateVarsPos[VD] = Counter; ++Counter; } for (const Expr *E : Data.LastprivateVars) { Args.push_back(ImplicitParamDecl::Create( C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.getPointerType(C.getPointerType(E->getType())) .withConst() .withRestrict(), ImplicitParamDecl::Other)); const auto *VD = cast(cast(E)->getDecl()); PrivateVarsPos[VD] = Counter; ++Counter; } for (const VarDecl *VD : Data.PrivateLocals) { QualType Ty = VD->getType().getNonReferenceType(); if (VD->getType()->isLValueReferenceType()) Ty = C.getPointerType(Ty); if (isAllocatableDecl(VD)) Ty = C.getPointerType(Ty); Args.push_back(ImplicitParamDecl::Create( C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.getPointerType(C.getPointerType(Ty)).withConst().withRestrict(), ImplicitParamDecl::Other)); PrivateVarsPos[VD] = Counter; ++Counter; } const auto &TaskPrivatesMapFnInfo = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); llvm::FunctionType *TaskPrivatesMapTy = CGM.getTypes().GetFunctionType(TaskPrivatesMapFnInfo); std::string Name = CGM.getOpenMPRuntime().getName({"omp_task_privates_map", ""}); auto *TaskPrivatesMap = llvm::Function::Create( TaskPrivatesMapTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule()); CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskPrivatesMap, TaskPrivatesMapFnInfo); if (CGM.getLangOpts().Optimize) { TaskPrivatesMap->removeFnAttr(llvm::Attribute::NoInline); TaskPrivatesMap->removeFnAttr(llvm::Attribute::OptimizeNone); TaskPrivatesMap->addFnAttr(llvm::Attribute::AlwaysInline); } CodeGenFunction CGF(CGM); CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskPrivatesMap, TaskPrivatesMapFnInfo, Args, Loc, Loc); // *privi = &.privates.privi; LValue Base = CGF.EmitLoadOfPointerLValue( CGF.GetAddrOfLocalVar(&TaskPrivatesArg), TaskPrivatesArg.getType()->castAs()); const auto *PrivatesQTyRD = cast(PrivatesQTy->getAsTagDecl()); Counter = 0; for (const FieldDecl *Field : PrivatesQTyRD->fields()) { LValue FieldLVal = CGF.EmitLValueForField(Base, Field); const VarDecl *VD = Args[PrivateVarsPos[Privates[Counter].second.Original]]; LValue RefLVal = CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType()); LValue RefLoadLVal = CGF.EmitLoadOfPointerLValue( RefLVal.getAddress(CGF), RefLVal.getType()->castAs()); CGF.EmitStoreOfScalar(FieldLVal.getPointer(CGF), RefLoadLVal); ++Counter; } CGF.FinishFunction(); return TaskPrivatesMap; } /// Emit initialization for private variables in task-based directives. static void emitPrivatesInit(CodeGenFunction &CGF, const OMPExecutableDirective &D, Address KmpTaskSharedsPtr, LValue TDBase, const RecordDecl *KmpTaskTWithPrivatesQTyRD, QualType SharedsTy, QualType SharedsPtrTy, const OMPTaskDataTy &Data, ArrayRef Privates, bool ForDup) { ASTContext &C = CGF.getContext(); auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin()); LValue PrivatesBase = CGF.EmitLValueForField(TDBase, *FI); OpenMPDirectiveKind Kind = isOpenMPTaskLoopDirective(D.getDirectiveKind()) ? OMPD_taskloop : OMPD_task; const CapturedStmt &CS = *D.getCapturedStmt(Kind); CodeGenFunction::CGCapturedStmtInfo CapturesInfo(CS); LValue SrcBase; bool IsTargetTask = isOpenMPTargetDataManagementDirective(D.getDirectiveKind()) || isOpenMPTargetExecutionDirective(D.getDirectiveKind()); // For target-based directives skip 4 firstprivate arrays BasePointersArray, // PointersArray, SizesArray, and MappersArray. The original variables for // these arrays are not captured and we get their addresses explicitly. if ((!IsTargetTask && !Data.FirstprivateVars.empty() && ForDup) || (IsTargetTask && KmpTaskSharedsPtr.isValid())) { SrcBase = CGF.MakeAddrLValue( CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( KmpTaskSharedsPtr, CGF.ConvertTypeForMem(SharedsPtrTy)), SharedsTy); } FI = cast(FI->getType()->getAsTagDecl())->field_begin(); for (const PrivateDataTy &Pair : Privates) { // Do not initialize private locals. if (Pair.second.isLocalPrivate()) { ++FI; continue; } const VarDecl *VD = Pair.second.PrivateCopy; const Expr *Init = VD->getAnyInitializer(); if (Init && (!ForDup || (isa(Init) && !CGF.isTrivialInitializer(Init)))) { LValue PrivateLValue = CGF.EmitLValueForField(PrivatesBase, *FI); if (const VarDecl *Elem = Pair.second.PrivateElemInit) { const VarDecl *OriginalVD = Pair.second.Original; // Check if the variable is the target-based BasePointersArray, // PointersArray, SizesArray, or MappersArray. LValue SharedRefLValue; QualType Type = PrivateLValue.getType(); const FieldDecl *SharedField = CapturesInfo.lookup(OriginalVD); if (IsTargetTask && !SharedField) { assert(isa(OriginalVD) && isa(OriginalVD->getDeclContext()) && cast(OriginalVD->getDeclContext()) ->getNumParams() == 0 && isa( cast(OriginalVD->getDeclContext()) ->getDeclContext()) && "Expected artificial target data variable."); SharedRefLValue = CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(OriginalVD), Type); } else if (ForDup) { SharedRefLValue = CGF.EmitLValueForField(SrcBase, SharedField); SharedRefLValue = CGF.MakeAddrLValue( Address(SharedRefLValue.getPointer(CGF), C.getDeclAlign(OriginalVD)), SharedRefLValue.getType(), LValueBaseInfo(AlignmentSource::Decl), SharedRefLValue.getTBAAInfo()); } else if (CGF.LambdaCaptureFields.count( Pair.second.Original->getCanonicalDecl()) > 0 || dyn_cast_or_null(CGF.CurCodeDecl)) { SharedRefLValue = CGF.EmitLValue(Pair.second.OriginalRef); } else { // Processing for implicitly captured variables. InlinedOpenMPRegionRAII Region( CGF, [](CodeGenFunction &, PrePostActionTy &) {}, OMPD_unknown, /*HasCancel=*/false, /*NoInheritance=*/true); SharedRefLValue = CGF.EmitLValue(Pair.second.OriginalRef); } if (Type->isArrayType()) { // Initialize firstprivate array. if (!isa(Init) || CGF.isTrivialInitializer(Init)) { // Perform simple memcpy. CGF.EmitAggregateAssign(PrivateLValue, SharedRefLValue, Type); } else { // Initialize firstprivate array using element-by-element // initialization. CGF.EmitOMPAggregateAssign( PrivateLValue.getAddress(CGF), SharedRefLValue.getAddress(CGF), Type, [&CGF, Elem, Init, &CapturesInfo](Address DestElement, Address SrcElement) { // Clean up any temporaries needed by the initialization. CodeGenFunction::OMPPrivateScope InitScope(CGF); InitScope.addPrivate( Elem, [SrcElement]() -> Address { return SrcElement; }); (void)InitScope.Privatize(); // Emit initialization for single element. CodeGenFunction::CGCapturedStmtRAII CapInfoRAII( CGF, &CapturesInfo); CGF.EmitAnyExprToMem(Init, DestElement, Init->getType().getQualifiers(), /*IsInitializer=*/false); }); } } else { CodeGenFunction::OMPPrivateScope InitScope(CGF); InitScope.addPrivate(Elem, [SharedRefLValue, &CGF]() -> Address { return SharedRefLValue.getAddress(CGF); }); (void)InitScope.Privatize(); CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CapturesInfo); CGF.EmitExprAsInit(Init, VD, PrivateLValue, /*capturedByInit=*/false); } } else { CGF.EmitExprAsInit(Init, VD, PrivateLValue, /*capturedByInit=*/false); } } ++FI; } } /// Check if duplication function is required for taskloops. static bool checkInitIsRequired(CodeGenFunction &CGF, ArrayRef Privates) { bool InitRequired = false; for (const PrivateDataTy &Pair : Privates) { if (Pair.second.isLocalPrivate()) continue; const VarDecl *VD = Pair.second.PrivateCopy; const Expr *Init = VD->getAnyInitializer(); InitRequired = InitRequired || (Init && isa(Init) && !CGF.isTrivialInitializer(Init)); if (InitRequired) break; } return InitRequired; } /// Emit task_dup function (for initialization of /// private/firstprivate/lastprivate vars and last_iter flag) /// \code /// void __task_dup_entry(kmp_task_t *task_dst, const kmp_task_t *task_src, int /// lastpriv) { /// // setup lastprivate flag /// task_dst->last = lastpriv; /// // could be constructor calls here... /// } /// \endcode static llvm::Value * emitTaskDupFunction(CodeGenModule &CGM, SourceLocation Loc, const OMPExecutableDirective &D, QualType KmpTaskTWithPrivatesPtrQTy, const RecordDecl *KmpTaskTWithPrivatesQTyRD, const RecordDecl *KmpTaskTQTyRD, QualType SharedsTy, QualType SharedsPtrTy, const OMPTaskDataTy &Data, ArrayRef Privates, bool WithLastIter) { ASTContext &C = CGM.getContext(); FunctionArgList Args; ImplicitParamDecl DstArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpTaskTWithPrivatesPtrQTy, ImplicitParamDecl::Other); ImplicitParamDecl SrcArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpTaskTWithPrivatesPtrQTy, ImplicitParamDecl::Other); ImplicitParamDecl LastprivArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy, ImplicitParamDecl::Other); Args.push_back(&DstArg); Args.push_back(&SrcArg); Args.push_back(&LastprivArg); const auto &TaskDupFnInfo = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); llvm::FunctionType *TaskDupTy = CGM.getTypes().GetFunctionType(TaskDupFnInfo); std::string Name = CGM.getOpenMPRuntime().getName({"omp_task_dup", ""}); auto *TaskDup = llvm::Function::Create( TaskDupTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule()); CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskDup, TaskDupFnInfo); TaskDup->setDoesNotRecurse(); CodeGenFunction CGF(CGM); CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskDup, TaskDupFnInfo, Args, Loc, Loc); LValue TDBase = CGF.EmitLoadOfPointerLValue( CGF.GetAddrOfLocalVar(&DstArg), KmpTaskTWithPrivatesPtrQTy->castAs()); // task_dst->liter = lastpriv; if (WithLastIter) { auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter); LValue Base = CGF.EmitLValueForField( TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin()); LValue LILVal = CGF.EmitLValueForField(Base, *LIFI); llvm::Value *Lastpriv = CGF.EmitLoadOfScalar( CGF.GetAddrOfLocalVar(&LastprivArg), /*Volatile=*/false, C.IntTy, Loc); CGF.EmitStoreOfScalar(Lastpriv, LILVal); } // Emit initial values for private copies (if any). assert(!Privates.empty()); Address KmpTaskSharedsPtr = Address::invalid(); if (!Data.FirstprivateVars.empty()) { LValue TDBase = CGF.EmitLoadOfPointerLValue( CGF.GetAddrOfLocalVar(&SrcArg), KmpTaskTWithPrivatesPtrQTy->castAs()); LValue Base = CGF.EmitLValueForField( TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin()); KmpTaskSharedsPtr = Address( CGF.EmitLoadOfScalar(CGF.EmitLValueForField( Base, *std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds)), Loc), CGM.getNaturalTypeAlignment(SharedsTy)); } emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, TDBase, KmpTaskTWithPrivatesQTyRD, SharedsTy, SharedsPtrTy, Data, Privates, /*ForDup=*/true); CGF.FinishFunction(); return TaskDup; } /// Checks if destructor function is required to be generated. /// \return true if cleanups are required, false otherwise. static bool checkDestructorsRequired(const RecordDecl *KmpTaskTWithPrivatesQTyRD, ArrayRef Privates) { for (const PrivateDataTy &P : Privates) { if (P.second.isLocalPrivate()) continue; QualType Ty = P.second.Original->getType().getNonReferenceType(); if (Ty.isDestructedType()) return true; } return false; } namespace { /// Loop generator for OpenMP iterator expression. class OMPIteratorGeneratorScope final : public CodeGenFunction::OMPPrivateScope { CodeGenFunction &CGF; const OMPIteratorExpr *E = nullptr; SmallVector ContDests; SmallVector ExitDests; OMPIteratorGeneratorScope() = delete; OMPIteratorGeneratorScope(OMPIteratorGeneratorScope &) = delete; public: OMPIteratorGeneratorScope(CodeGenFunction &CGF, const OMPIteratorExpr *E) : CodeGenFunction::OMPPrivateScope(CGF), CGF(CGF), E(E) { if (!E) return; SmallVector Uppers; for (unsigned I = 0, End = E->numOfIterators(); I < End; ++I) { Uppers.push_back(CGF.EmitScalarExpr(E->getHelper(I).Upper)); const auto *VD = cast(E->getIteratorDecl(I)); addPrivate(VD, [&CGF, VD]() { return CGF.CreateMemTemp(VD->getType(), VD->getName()); }); const OMPIteratorHelperData &HelperData = E->getHelper(I); addPrivate(HelperData.CounterVD, [&CGF, &HelperData]() { return CGF.CreateMemTemp(HelperData.CounterVD->getType(), "counter.addr"); }); } Privatize(); for (unsigned I = 0, End = E->numOfIterators(); I < End; ++I) { const OMPIteratorHelperData &HelperData = E->getHelper(I); LValue CLVal = CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(HelperData.CounterVD), HelperData.CounterVD->getType()); // Counter = 0; CGF.EmitStoreOfScalar( llvm::ConstantInt::get(CLVal.getAddress(CGF).getElementType(), 0), CLVal); CodeGenFunction::JumpDest &ContDest = ContDests.emplace_back(CGF.getJumpDestInCurrentScope("iter.cont")); CodeGenFunction::JumpDest &ExitDest = ExitDests.emplace_back(CGF.getJumpDestInCurrentScope("iter.exit")); // N = ; llvm::Value *N = Uppers[I]; // cont: // if (Counter < N) goto body; else goto exit; CGF.EmitBlock(ContDest.getBlock()); auto *CVal = CGF.EmitLoadOfScalar(CLVal, HelperData.CounterVD->getLocation()); llvm::Value *Cmp = HelperData.CounterVD->getType()->isSignedIntegerOrEnumerationType() ? CGF.Builder.CreateICmpSLT(CVal, N) : CGF.Builder.CreateICmpULT(CVal, N); llvm::BasicBlock *BodyBB = CGF.createBasicBlock("iter.body"); CGF.Builder.CreateCondBr(Cmp, BodyBB, ExitDest.getBlock()); // body: CGF.EmitBlock(BodyBB); // Iteri = Begini + Counter * Stepi; CGF.EmitIgnoredExpr(HelperData.Update); } } ~OMPIteratorGeneratorScope() { if (!E) return; for (unsigned I = E->numOfIterators(); I > 0; --I) { // Counter = Counter + 1; const OMPIteratorHelperData &HelperData = E->getHelper(I - 1); CGF.EmitIgnoredExpr(HelperData.CounterUpdate); // goto cont; CGF.EmitBranchThroughCleanup(ContDests[I - 1]); // exit: CGF.EmitBlock(ExitDests[I - 1].getBlock(), /*IsFinished=*/I == 1); } } }; } // namespace static std::pair getPointerAndSize(CodeGenFunction &CGF, const Expr *E) { const auto *OASE = dyn_cast(E); llvm::Value *Addr; if (OASE) { const Expr *Base = OASE->getBase(); Addr = CGF.EmitScalarExpr(Base); } else { Addr = CGF.EmitLValue(E).getPointer(CGF); } llvm::Value *SizeVal; QualType Ty = E->getType(); if (OASE) { SizeVal = CGF.getTypeSize(OASE->getBase()->getType()->getPointeeType()); for (const Expr *SE : OASE->getDimensions()) { llvm::Value *Sz = CGF.EmitScalarExpr(SE); Sz = CGF.EmitScalarConversion( Sz, SE->getType(), CGF.getContext().getSizeType(), SE->getExprLoc()); SizeVal = CGF.Builder.CreateNUWMul(SizeVal, Sz); } } else if (const auto *ASE = dyn_cast(E->IgnoreParenImpCasts())) { LValue UpAddrLVal = CGF.EmitOMPArraySectionExpr(ASE, /*IsLowerBound=*/false); Address UpAddrAddress = UpAddrLVal.getAddress(CGF); llvm::Value *UpAddr = CGF.Builder.CreateConstGEP1_32( UpAddrAddress.getElementType(), UpAddrAddress.getPointer(), /*Idx0=*/1); llvm::Value *LowIntPtr = CGF.Builder.CreatePtrToInt(Addr, CGF.SizeTy); llvm::Value *UpIntPtr = CGF.Builder.CreatePtrToInt(UpAddr, CGF.SizeTy); SizeVal = CGF.Builder.CreateNUWSub(UpIntPtr, LowIntPtr); } else { SizeVal = CGF.getTypeSize(Ty); } return std::make_pair(Addr, SizeVal); } /// Builds kmp_depend_info, if it is not built yet, and builds flags type. static void getKmpAffinityType(ASTContext &C, QualType &KmpTaskAffinityInfoTy) { QualType FlagsTy = C.getIntTypeForBitwidth(32, /*Signed=*/false); if (KmpTaskAffinityInfoTy.isNull()) { RecordDecl *KmpAffinityInfoRD = C.buildImplicitRecord("kmp_task_affinity_info_t"); KmpAffinityInfoRD->startDefinition(); addFieldToRecordDecl(C, KmpAffinityInfoRD, C.getIntPtrType()); addFieldToRecordDecl(C, KmpAffinityInfoRD, C.getSizeType()); addFieldToRecordDecl(C, KmpAffinityInfoRD, FlagsTy); KmpAffinityInfoRD->completeDefinition(); KmpTaskAffinityInfoTy = C.getRecordType(KmpAffinityInfoRD); } } CGOpenMPRuntime::TaskResultTy CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const OMPTaskDataTy &Data) { ASTContext &C = CGM.getContext(); llvm::SmallVector Privates; // Aggregate privates and sort them by the alignment. const auto *I = Data.PrivateCopies.begin(); for (const Expr *E : Data.PrivateVars) { const auto *VD = cast(cast(E)->getDecl()); Privates.emplace_back( C.getDeclAlign(VD), PrivateHelpersTy(E, VD, cast(cast(*I)->getDecl()), /*PrivateElemInit=*/nullptr)); ++I; } I = Data.FirstprivateCopies.begin(); const auto *IElemInitRef = Data.FirstprivateInits.begin(); for (const Expr *E : Data.FirstprivateVars) { const auto *VD = cast(cast(E)->getDecl()); Privates.emplace_back( C.getDeclAlign(VD), PrivateHelpersTy( E, VD, cast(cast(*I)->getDecl()), cast(cast(*IElemInitRef)->getDecl()))); ++I; ++IElemInitRef; } I = Data.LastprivateCopies.begin(); for (const Expr *E : Data.LastprivateVars) { const auto *VD = cast(cast(E)->getDecl()); Privates.emplace_back( C.getDeclAlign(VD), PrivateHelpersTy(E, VD, cast(cast(*I)->getDecl()), /*PrivateElemInit=*/nullptr)); ++I; } for (const VarDecl *VD : Data.PrivateLocals) { if (isAllocatableDecl(VD)) Privates.emplace_back(CGM.getPointerAlign(), PrivateHelpersTy(VD)); else Privates.emplace_back(C.getDeclAlign(VD), PrivateHelpersTy(VD)); } llvm::stable_sort(Privates, [](const PrivateDataTy &L, const PrivateDataTy &R) { return L.first > R.first; }); QualType KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1); // Build type kmp_routine_entry_t (if not built yet). emitKmpRoutineEntryT(KmpInt32Ty); // Build type kmp_task_t (if not built yet). if (isOpenMPTaskLoopDirective(D.getDirectiveKind())) { if (SavedKmpTaskloopTQTy.isNull()) { SavedKmpTaskloopTQTy = C.getRecordType(createKmpTaskTRecordDecl( CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy)); } KmpTaskTQTy = SavedKmpTaskloopTQTy; } else { assert((D.getDirectiveKind() == OMPD_task || isOpenMPTargetExecutionDirective(D.getDirectiveKind()) || isOpenMPTargetDataManagementDirective(D.getDirectiveKind())) && "Expected taskloop, task or target directive"); if (SavedKmpTaskTQTy.isNull()) { SavedKmpTaskTQTy = C.getRecordType(createKmpTaskTRecordDecl( CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy)); } KmpTaskTQTy = SavedKmpTaskTQTy; } const auto *KmpTaskTQTyRD = cast(KmpTaskTQTy->getAsTagDecl()); // Build particular struct kmp_task_t for the given task. const RecordDecl *KmpTaskTWithPrivatesQTyRD = createKmpTaskTWithPrivatesRecordDecl(CGM, KmpTaskTQTy, Privates); QualType KmpTaskTWithPrivatesQTy = C.getRecordType(KmpTaskTWithPrivatesQTyRD); QualType KmpTaskTWithPrivatesPtrQTy = C.getPointerType(KmpTaskTWithPrivatesQTy); llvm::Type *KmpTaskTWithPrivatesTy = CGF.ConvertType(KmpTaskTWithPrivatesQTy); llvm::Type *KmpTaskTWithPrivatesPtrTy = KmpTaskTWithPrivatesTy->getPointerTo(); llvm::Value *KmpTaskTWithPrivatesTySize = CGF.getTypeSize(KmpTaskTWithPrivatesQTy); QualType SharedsPtrTy = C.getPointerType(SharedsTy); // Emit initial values for private copies (if any). llvm::Value *TaskPrivatesMap = nullptr; llvm::Type *TaskPrivatesMapTy = std::next(TaskFunction->arg_begin(), 3)->getType(); if (!Privates.empty()) { auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin()); TaskPrivatesMap = emitTaskPrivateMappingFunction(CGM, Loc, Data, FI->getType(), Privates); TaskPrivatesMap = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( TaskPrivatesMap, TaskPrivatesMapTy); } else { TaskPrivatesMap = llvm::ConstantPointerNull::get( cast(TaskPrivatesMapTy)); } // Build a proxy function kmp_int32 .omp_task_entry.(kmp_int32 gtid, // kmp_task_t *tt); llvm::Function *TaskEntry = emitProxyTaskFunction( CGM, Loc, D.getDirectiveKind(), KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy, KmpTaskTWithPrivatesQTy, KmpTaskTQTy, SharedsPtrTy, TaskFunction, TaskPrivatesMap); // Build call kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid, // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, // kmp_routine_entry_t *task_entry); // Task flags. Format is taken from // https://github.com/llvm/llvm-project/blob/main/openmp/runtime/src/kmp.h, // description of kmp_tasking_flags struct. enum { TiedFlag = 0x1, FinalFlag = 0x2, DestructorsFlag = 0x8, PriorityFlag = 0x20, DetachableFlag = 0x40, }; unsigned Flags = Data.Tied ? TiedFlag : 0; bool NeedsCleanup = false; if (!Privates.empty()) { NeedsCleanup = checkDestructorsRequired(KmpTaskTWithPrivatesQTyRD, Privates); if (NeedsCleanup) Flags = Flags | DestructorsFlag; } if (Data.Priority.getInt()) Flags = Flags | PriorityFlag; if (D.hasClausesOfKind()) Flags = Flags | DetachableFlag; llvm::Value *TaskFlags = Data.Final.getPointer() ? CGF.Builder.CreateSelect(Data.Final.getPointer(), CGF.Builder.getInt32(FinalFlag), CGF.Builder.getInt32(/*C=*/0)) : CGF.Builder.getInt32(Data.Final.getInt() ? FinalFlag : 0); TaskFlags = CGF.Builder.CreateOr(TaskFlags, CGF.Builder.getInt32(Flags)); llvm::Value *SharedsSize = CGM.getSize(C.getTypeSizeInChars(SharedsTy)); SmallVector AllocArgs = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc), TaskFlags, KmpTaskTWithPrivatesTySize, SharedsSize, CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( TaskEntry, KmpRoutineEntryPtrTy)}; llvm::Value *NewTask; if (D.hasClausesOfKind()) { // Check if we have any device clause associated with the directive. const Expr *Device = nullptr; if (auto *C = D.getSingleClause()) Device = C->getDevice(); // Emit device ID if any otherwise use default value. llvm::Value *DeviceID; if (Device) DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device), CGF.Int64Ty, /*isSigned=*/true); else DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF); AllocArgs.push_back(DeviceID); NewTask = CGF.EmitRuntimeCall( OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_omp_target_task_alloc), AllocArgs); } else { NewTask = CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_omp_task_alloc), AllocArgs); } // Emit detach clause initialization. // evt = (typeof(evt))__kmpc_task_allow_completion_event(loc, tid, // task_descriptor); if (const auto *DC = D.getSingleClause()) { const Expr *Evt = DC->getEventHandler()->IgnoreParenImpCasts(); LValue EvtLVal = CGF.EmitLValue(Evt); // Build kmp_event_t *__kmpc_task_allow_completion_event(ident_t *loc_ref, // int gtid, kmp_task_t *task); llvm::Value *Loc = emitUpdateLocation(CGF, DC->getBeginLoc()); llvm::Value *Tid = getThreadID(CGF, DC->getBeginLoc()); Tid = CGF.Builder.CreateIntCast(Tid, CGF.IntTy, /*isSigned=*/false); llvm::Value *EvtVal = CGF.EmitRuntimeCall( OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_task_allow_completion_event), {Loc, Tid, NewTask}); EvtVal = CGF.EmitScalarConversion(EvtVal, C.VoidPtrTy, Evt->getType(), Evt->getExprLoc()); CGF.EmitStoreOfScalar(EvtVal, EvtLVal); } // Process affinity clauses. if (D.hasClausesOfKind()) { // Process list of affinity data. ASTContext &C = CGM.getContext(); Address AffinitiesArray = Address::invalid(); // Calculate number of elements to form the array of affinity data. llvm::Value *NumOfElements = nullptr; unsigned NumAffinities = 0; for (const auto *C : D.getClausesOfKind()) { if (const Expr *Modifier = C->getModifier()) { const auto *IE = cast(Modifier->IgnoreParenImpCasts()); for (unsigned I = 0, E = IE->numOfIterators(); I < E; ++I) { llvm::Value *Sz = CGF.EmitScalarExpr(IE->getHelper(I).Upper); Sz = CGF.Builder.CreateIntCast(Sz, CGF.SizeTy, /*isSigned=*/false); NumOfElements = NumOfElements ? CGF.Builder.CreateNUWMul(NumOfElements, Sz) : Sz; } } else { NumAffinities += C->varlist_size(); } } getKmpAffinityType(CGM.getContext(), KmpTaskAffinityInfoTy); // Fields ids in kmp_task_affinity_info record. enum RTLAffinityInfoFieldsTy { BaseAddr, Len, Flags }; QualType KmpTaskAffinityInfoArrayTy; if (NumOfElements) { NumOfElements = CGF.Builder.CreateNUWAdd( llvm::ConstantInt::get(CGF.SizeTy, NumAffinities), NumOfElements); OpaqueValueExpr OVE( Loc, C.getIntTypeForBitwidth(C.getTypeSize(C.getSizeType()), /*Signed=*/0), VK_PRValue); CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, RValue::get(NumOfElements)); KmpTaskAffinityInfoArrayTy = C.getVariableArrayType(KmpTaskAffinityInfoTy, &OVE, ArrayType::Normal, /*IndexTypeQuals=*/0, SourceRange(Loc, Loc)); // Properly emit variable-sized array. auto *PD = ImplicitParamDecl::Create(C, KmpTaskAffinityInfoArrayTy, ImplicitParamDecl::Other); CGF.EmitVarDecl(*PD); AffinitiesArray = CGF.GetAddrOfLocalVar(PD); NumOfElements = CGF.Builder.CreateIntCast(NumOfElements, CGF.Int32Ty, /*isSigned=*/false); } else { KmpTaskAffinityInfoArrayTy = C.getConstantArrayType( KmpTaskAffinityInfoTy, llvm::APInt(C.getTypeSize(C.getSizeType()), NumAffinities), nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0); AffinitiesArray = CGF.CreateMemTemp(KmpTaskAffinityInfoArrayTy, ".affs.arr.addr"); AffinitiesArray = CGF.Builder.CreateConstArrayGEP(AffinitiesArray, 0); NumOfElements = llvm::ConstantInt::get(CGM.Int32Ty, NumAffinities, /*isSigned=*/false); } const auto *KmpAffinityInfoRD = KmpTaskAffinityInfoTy->getAsRecordDecl(); // Fill array by elements without iterators. unsigned Pos = 0; bool HasIterator = false; for (const auto *C : D.getClausesOfKind()) { if (C->getModifier()) { HasIterator = true; continue; } for (const Expr *E : C->varlists()) { llvm::Value *Addr; llvm::Value *Size; std::tie(Addr, Size) = getPointerAndSize(CGF, E); LValue Base = CGF.MakeAddrLValue(CGF.Builder.CreateConstGEP(AffinitiesArray, Pos), KmpTaskAffinityInfoTy); // affs[i].base_addr = &; LValue BaseAddrLVal = CGF.EmitLValueForField( Base, *std::next(KmpAffinityInfoRD->field_begin(), BaseAddr)); CGF.EmitStoreOfScalar(CGF.Builder.CreatePtrToInt(Addr, CGF.IntPtrTy), BaseAddrLVal); // affs[i].len = sizeof(); LValue LenLVal = CGF.EmitLValueForField( Base, *std::next(KmpAffinityInfoRD->field_begin(), Len)); CGF.EmitStoreOfScalar(Size, LenLVal); ++Pos; } } LValue PosLVal; if (HasIterator) { PosLVal = CGF.MakeAddrLValue( CGF.CreateMemTemp(C.getSizeType(), "affs.counter.addr"), C.getSizeType()); CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGF.SizeTy, Pos), PosLVal); } // Process elements with iterators. for (const auto *C : D.getClausesOfKind()) { const Expr *Modifier = C->getModifier(); if (!Modifier) continue; OMPIteratorGeneratorScope IteratorScope( CGF, cast_or_null(Modifier->IgnoreParenImpCasts())); for (const Expr *E : C->varlists()) { llvm::Value *Addr; llvm::Value *Size; std::tie(Addr, Size) = getPointerAndSize(CGF, E); llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc()); LValue Base = CGF.MakeAddrLValue( Address(CGF.Builder.CreateGEP(AffinitiesArray.getElementType(), AffinitiesArray.getPointer(), Idx), AffinitiesArray.getAlignment()), KmpTaskAffinityInfoTy); // affs[i].base_addr = &; LValue BaseAddrLVal = CGF.EmitLValueForField( Base, *std::next(KmpAffinityInfoRD->field_begin(), BaseAddr)); CGF.EmitStoreOfScalar(CGF.Builder.CreatePtrToInt(Addr, CGF.IntPtrTy), BaseAddrLVal); // affs[i].len = sizeof(); LValue LenLVal = CGF.EmitLValueForField( Base, *std::next(KmpAffinityInfoRD->field_begin(), Len)); CGF.EmitStoreOfScalar(Size, LenLVal); Idx = CGF.Builder.CreateNUWAdd( Idx, llvm::ConstantInt::get(Idx->getType(), 1)); CGF.EmitStoreOfScalar(Idx, PosLVal); } } // Call to kmp_int32 __kmpc_omp_reg_task_with_affinity(ident_t *loc_ref, // kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 // naffins, kmp_task_affinity_info_t *affin_list); llvm::Value *LocRef = emitUpdateLocation(CGF, Loc); llvm::Value *GTid = getThreadID(CGF, Loc); llvm::Value *AffinListPtr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( AffinitiesArray.getPointer(), CGM.VoidPtrTy); // FIXME: Emit the function and ignore its result for now unless the // runtime function is properly implemented. (void)CGF.EmitRuntimeCall( OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_omp_reg_task_with_affinity), {LocRef, GTid, NewTask, NumOfElements, AffinListPtr}); } llvm::Value *NewTaskNewTaskTTy = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( NewTask, KmpTaskTWithPrivatesPtrTy); LValue Base = CGF.MakeNaturalAlignAddrLValue(NewTaskNewTaskTTy, KmpTaskTWithPrivatesQTy); LValue TDBase = CGF.EmitLValueForField(Base, *KmpTaskTWithPrivatesQTyRD->field_begin()); // Fill the data in the resulting kmp_task_t record. // Copy shareds if there are any. Address KmpTaskSharedsPtr = Address::invalid(); if (!SharedsTy->getAsStructureType()->getDecl()->field_empty()) { KmpTaskSharedsPtr = Address(CGF.EmitLoadOfScalar( CGF.EmitLValueForField( TDBase, *std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds)), Loc), CGM.getNaturalTypeAlignment(SharedsTy)); LValue Dest = CGF.MakeAddrLValue(KmpTaskSharedsPtr, SharedsTy); LValue Src = CGF.MakeAddrLValue(Shareds, SharedsTy); CGF.EmitAggregateCopy(Dest, Src, SharedsTy, AggValueSlot::DoesNotOverlap); } // Emit initial values for private copies (if any). TaskResultTy Result; if (!Privates.empty()) { emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, Base, KmpTaskTWithPrivatesQTyRD, SharedsTy, SharedsPtrTy, Data, Privates, /*ForDup=*/false); if (isOpenMPTaskLoopDirective(D.getDirectiveKind()) && (!Data.LastprivateVars.empty() || checkInitIsRequired(CGF, Privates))) { Result.TaskDupFn = emitTaskDupFunction( CGM, Loc, D, KmpTaskTWithPrivatesPtrQTy, KmpTaskTWithPrivatesQTyRD, KmpTaskTQTyRD, SharedsTy, SharedsPtrTy, Data, Privates, /*WithLastIter=*/!Data.LastprivateVars.empty()); } } // Fields of union "kmp_cmplrdata_t" for destructors and priority. enum { Priority = 0, Destructors = 1 }; // Provide pointer to function with destructors for privates. auto FI = std::next(KmpTaskTQTyRD->field_begin(), Data1); const RecordDecl *KmpCmplrdataUD = (*FI)->getType()->getAsUnionType()->getDecl(); if (NeedsCleanup) { llvm::Value *DestructorFn = emitDestructorsFunction( CGM, Loc, KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy, KmpTaskTWithPrivatesQTy); LValue Data1LV = CGF.EmitLValueForField(TDBase, *FI); LValue DestructorsLV = CGF.EmitLValueForField( Data1LV, *std::next(KmpCmplrdataUD->field_begin(), Destructors)); CGF.EmitStoreOfScalar(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( DestructorFn, KmpRoutineEntryPtrTy), DestructorsLV); } // Set priority. if (Data.Priority.getInt()) { LValue Data2LV = CGF.EmitLValueForField( TDBase, *std::next(KmpTaskTQTyRD->field_begin(), Data2)); LValue PriorityLV = CGF.EmitLValueForField( Data2LV, *std::next(KmpCmplrdataUD->field_begin(), Priority)); CGF.EmitStoreOfScalar(Data.Priority.getPointer(), PriorityLV); } Result.NewTask = NewTask; Result.TaskEntry = TaskEntry; Result.NewTaskNewTaskTTy = NewTaskNewTaskTTy; Result.TDBase = TDBase; Result.KmpTaskTQTyRD = KmpTaskTQTyRD; return Result; } namespace { /// Dependence kind for RTL. enum RTLDependenceKindTy { DepIn = 0x01, DepInOut = 0x3, DepMutexInOutSet = 0x4 }; /// Fields ids in kmp_depend_info record. enum RTLDependInfoFieldsTy { BaseAddr, Len, Flags }; } // namespace /// Translates internal dependency kind into the runtime kind. static RTLDependenceKindTy translateDependencyKind(OpenMPDependClauseKind K) { RTLDependenceKindTy DepKind; switch (K) { case OMPC_DEPEND_in: DepKind = DepIn; break; // Out and InOut dependencies must use the same code. case OMPC_DEPEND_out: case OMPC_DEPEND_inout: DepKind = DepInOut; break; case OMPC_DEPEND_mutexinoutset: DepKind = DepMutexInOutSet; break; case OMPC_DEPEND_source: case OMPC_DEPEND_sink: case OMPC_DEPEND_depobj: case OMPC_DEPEND_unknown: llvm_unreachable("Unknown task dependence type"); } return DepKind; } /// Builds kmp_depend_info, if it is not built yet, and builds flags type. static void getDependTypes(ASTContext &C, QualType &KmpDependInfoTy, QualType &FlagsTy) { FlagsTy = C.getIntTypeForBitwidth(C.getTypeSize(C.BoolTy), /*Signed=*/false); if (KmpDependInfoTy.isNull()) { RecordDecl *KmpDependInfoRD = C.buildImplicitRecord("kmp_depend_info"); KmpDependInfoRD->startDefinition(); addFieldToRecordDecl(C, KmpDependInfoRD, C.getIntPtrType()); addFieldToRecordDecl(C, KmpDependInfoRD, C.getSizeType()); addFieldToRecordDecl(C, KmpDependInfoRD, FlagsTy); KmpDependInfoRD->completeDefinition(); KmpDependInfoTy = C.getRecordType(KmpDependInfoRD); } } std::pair CGOpenMPRuntime::getDepobjElements(CodeGenFunction &CGF, LValue DepobjLVal, SourceLocation Loc) { ASTContext &C = CGM.getContext(); QualType FlagsTy; getDependTypes(C, KmpDependInfoTy, FlagsTy); RecordDecl *KmpDependInfoRD = cast(KmpDependInfoTy->getAsTagDecl()); LValue Base = CGF.EmitLoadOfPointerLValue( DepobjLVal.getAddress(CGF), C.getPointerType(C.VoidPtrTy).castAs()); QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy); Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( Base.getAddress(CGF), CGF.ConvertTypeForMem(KmpDependInfoPtrTy)); Base = CGF.MakeAddrLValue(Addr, KmpDependInfoTy, Base.getBaseInfo(), Base.getTBAAInfo()); llvm::Value *DepObjAddr = CGF.Builder.CreateGEP( Addr.getElementType(), Addr.getPointer(), llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true)); LValue NumDepsBase = CGF.MakeAddrLValue( Address(DepObjAddr, Addr.getAlignment()), KmpDependInfoTy, Base.getBaseInfo(), Base.getTBAAInfo()); // NumDeps = deps[i].base_addr; LValue BaseAddrLVal = CGF.EmitLValueForField( NumDepsBase, *std::next(KmpDependInfoRD->field_begin(), BaseAddr)); llvm::Value *NumDeps = CGF.EmitLoadOfScalar(BaseAddrLVal, Loc); return std::make_pair(NumDeps, Base); } static void emitDependData(CodeGenFunction &CGF, QualType &KmpDependInfoTy, llvm::PointerUnion Pos, const OMPTaskDataTy::DependData &Data, Address DependenciesArray) { CodeGenModule &CGM = CGF.CGM; ASTContext &C = CGM.getContext(); QualType FlagsTy; getDependTypes(C, KmpDependInfoTy, FlagsTy); RecordDecl *KmpDependInfoRD = cast(KmpDependInfoTy->getAsTagDecl()); llvm::Type *LLVMFlagsTy = CGF.ConvertTypeForMem(FlagsTy); OMPIteratorGeneratorScope IteratorScope( CGF, cast_or_null( Data.IteratorExpr ? Data.IteratorExpr->IgnoreParenImpCasts() : nullptr)); for (const Expr *E : Data.DepExprs) { llvm::Value *Addr; llvm::Value *Size; std::tie(Addr, Size) = getPointerAndSize(CGF, E); LValue Base; if (unsigned *P = Pos.dyn_cast()) { Base = CGF.MakeAddrLValue( CGF.Builder.CreateConstGEP(DependenciesArray, *P), KmpDependInfoTy); } else { LValue &PosLVal = *Pos.get(); llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc()); Base = CGF.MakeAddrLValue( Address(CGF.Builder.CreateGEP(DependenciesArray.getElementType(), DependenciesArray.getPointer(), Idx), DependenciesArray.getAlignment()), KmpDependInfoTy); } // deps[i].base_addr = &; LValue BaseAddrLVal = CGF.EmitLValueForField( Base, *std::next(KmpDependInfoRD->field_begin(), BaseAddr)); CGF.EmitStoreOfScalar(CGF.Builder.CreatePtrToInt(Addr, CGF.IntPtrTy), BaseAddrLVal); // deps[i].len = sizeof(); LValue LenLVal = CGF.EmitLValueForField( Base, *std::next(KmpDependInfoRD->field_begin(), Len)); CGF.EmitStoreOfScalar(Size, LenLVal); // deps[i].flags = ; RTLDependenceKindTy DepKind = translateDependencyKind(Data.DepKind); LValue FlagsLVal = CGF.EmitLValueForField( Base, *std::next(KmpDependInfoRD->field_begin(), Flags)); CGF.EmitStoreOfScalar(llvm::ConstantInt::get(LLVMFlagsTy, DepKind), FlagsLVal); if (unsigned *P = Pos.dyn_cast()) { ++(*P); } else { LValue &PosLVal = *Pos.get(); llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc()); Idx = CGF.Builder.CreateNUWAdd(Idx, llvm::ConstantInt::get(Idx->getType(), 1)); CGF.EmitStoreOfScalar(Idx, PosLVal); } } } static SmallVector emitDepobjElementsSizes(CodeGenFunction &CGF, QualType &KmpDependInfoTy, const OMPTaskDataTy::DependData &Data) { assert(Data.DepKind == OMPC_DEPEND_depobj && "Expected depobj dependecy kind."); SmallVector Sizes; SmallVector SizeLVals; ASTContext &C = CGF.getContext(); QualType FlagsTy; getDependTypes(C, KmpDependInfoTy, FlagsTy); RecordDecl *KmpDependInfoRD = cast(KmpDependInfoTy->getAsTagDecl()); QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy); llvm::Type *KmpDependInfoPtrT = CGF.ConvertTypeForMem(KmpDependInfoPtrTy); { OMPIteratorGeneratorScope IteratorScope( CGF, cast_or_null( Data.IteratorExpr ? Data.IteratorExpr->IgnoreParenImpCasts() : nullptr)); for (const Expr *E : Data.DepExprs) { LValue DepobjLVal = CGF.EmitLValue(E->IgnoreParenImpCasts()); LValue Base = CGF.EmitLoadOfPointerLValue( DepobjLVal.getAddress(CGF), C.getPointerType(C.VoidPtrTy).castAs()); Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( Base.getAddress(CGF), KmpDependInfoPtrT); Base = CGF.MakeAddrLValue(Addr, KmpDependInfoTy, Base.getBaseInfo(), Base.getTBAAInfo()); llvm::Value *DepObjAddr = CGF.Builder.CreateGEP( Addr.getElementType(), Addr.getPointer(), llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true)); LValue NumDepsBase = CGF.MakeAddrLValue( Address(DepObjAddr, Addr.getAlignment()), KmpDependInfoTy, Base.getBaseInfo(), Base.getTBAAInfo()); // NumDeps = deps[i].base_addr; LValue BaseAddrLVal = CGF.EmitLValueForField( NumDepsBase, *std::next(KmpDependInfoRD->field_begin(), BaseAddr)); llvm::Value *NumDeps = CGF.EmitLoadOfScalar(BaseAddrLVal, E->getExprLoc()); LValue NumLVal = CGF.MakeAddrLValue( CGF.CreateMemTemp(C.getUIntPtrType(), "depobj.size.addr"), C.getUIntPtrType()); CGF.InitTempAlloca(NumLVal.getAddress(CGF), llvm::ConstantInt::get(CGF.IntPtrTy, 0)); llvm::Value *PrevVal = CGF.EmitLoadOfScalar(NumLVal, E->getExprLoc()); llvm::Value *Add = CGF.Builder.CreateNUWAdd(PrevVal, NumDeps); CGF.EmitStoreOfScalar(Add, NumLVal); SizeLVals.push_back(NumLVal); } } for (unsigned I = 0, E = SizeLVals.size(); I < E; ++I) { llvm::Value *Size = CGF.EmitLoadOfScalar(SizeLVals[I], Data.DepExprs[I]->getExprLoc()); Sizes.push_back(Size); } return Sizes; } static void emitDepobjElements(CodeGenFunction &CGF, QualType &KmpDependInfoTy, LValue PosLVal, const OMPTaskDataTy::DependData &Data, Address DependenciesArray) { assert(Data.DepKind == OMPC_DEPEND_depobj && "Expected depobj dependecy kind."); ASTContext &C = CGF.getContext(); QualType FlagsTy; getDependTypes(C, KmpDependInfoTy, FlagsTy); RecordDecl *KmpDependInfoRD = cast(KmpDependInfoTy->getAsTagDecl()); QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy); llvm::Type *KmpDependInfoPtrT = CGF.ConvertTypeForMem(KmpDependInfoPtrTy); llvm::Value *ElSize = CGF.getTypeSize(KmpDependInfoTy); { OMPIteratorGeneratorScope IteratorScope( CGF, cast_or_null( Data.IteratorExpr ? Data.IteratorExpr->IgnoreParenImpCasts() : nullptr)); for (unsigned I = 0, End = Data.DepExprs.size(); I < End; ++I) { const Expr *E = Data.DepExprs[I]; LValue DepobjLVal = CGF.EmitLValue(E->IgnoreParenImpCasts()); LValue Base = CGF.EmitLoadOfPointerLValue( DepobjLVal.getAddress(CGF), C.getPointerType(C.VoidPtrTy).castAs()); Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( Base.getAddress(CGF), KmpDependInfoPtrT); Base = CGF.MakeAddrLValue(Addr, KmpDependInfoTy, Base.getBaseInfo(), Base.getTBAAInfo()); // Get number of elements in a single depobj. llvm::Value *DepObjAddr = CGF.Builder.CreateGEP( Addr.getElementType(), Addr.getPointer(), llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true)); LValue NumDepsBase = CGF.MakeAddrLValue( Address(DepObjAddr, Addr.getAlignment()), KmpDependInfoTy, Base.getBaseInfo(), Base.getTBAAInfo()); // NumDeps = deps[i].base_addr; LValue BaseAddrLVal = CGF.EmitLValueForField( NumDepsBase, *std::next(KmpDependInfoRD->field_begin(), BaseAddr)); llvm::Value *NumDeps = CGF.EmitLoadOfScalar(BaseAddrLVal, E->getExprLoc()); // memcopy dependency data. llvm::Value *Size = CGF.Builder.CreateNUWMul( ElSize, CGF.Builder.CreateIntCast(NumDeps, CGF.SizeTy, /*isSigned=*/false)); llvm::Value *Pos = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc()); Address DepAddr = Address(CGF.Builder.CreateGEP(DependenciesArray.getElementType(), DependenciesArray.getPointer(), Pos), DependenciesArray.getAlignment()); CGF.Builder.CreateMemCpy(DepAddr, Base.getAddress(CGF), Size); // Increase pos. // pos += size; llvm::Value *Add = CGF.Builder.CreateNUWAdd(Pos, NumDeps); CGF.EmitStoreOfScalar(Add, PosLVal); } } } std::pair CGOpenMPRuntime::emitDependClause( CodeGenFunction &CGF, ArrayRef Dependencies, SourceLocation Loc) { if (llvm::all_of(Dependencies, [](const OMPTaskDataTy::DependData &D) { return D.DepExprs.empty(); })) return std::make_pair(nullptr, Address::invalid()); // Process list of dependencies. ASTContext &C = CGM.getContext(); Address DependenciesArray = Address::invalid(); llvm::Value *NumOfElements = nullptr; unsigned NumDependencies = std::accumulate( Dependencies.begin(), Dependencies.end(), 0, [](unsigned V, const OMPTaskDataTy::DependData &D) { return D.DepKind == OMPC_DEPEND_depobj ? V : (V + (D.IteratorExpr ? 0 : D.DepExprs.size())); }); QualType FlagsTy; getDependTypes(C, KmpDependInfoTy, FlagsTy); bool HasDepobjDeps = false; bool HasRegularWithIterators = false; llvm::Value *NumOfDepobjElements = llvm::ConstantInt::get(CGF.IntPtrTy, 0); llvm::Value *NumOfRegularWithIterators = llvm::ConstantInt::get(CGF.IntPtrTy, 1); // Calculate number of depobj dependecies and regular deps with the iterators. for (const OMPTaskDataTy::DependData &D : Dependencies) { if (D.DepKind == OMPC_DEPEND_depobj) { SmallVector Sizes = emitDepobjElementsSizes(CGF, KmpDependInfoTy, D); for (llvm::Value *Size : Sizes) { NumOfDepobjElements = CGF.Builder.CreateNUWAdd(NumOfDepobjElements, Size); } HasDepobjDeps = true; continue; } // Include number of iterations, if any. if (const auto *IE = cast_or_null(D.IteratorExpr)) { for (unsigned I = 0, E = IE->numOfIterators(); I < E; ++I) { llvm::Value *Sz = CGF.EmitScalarExpr(IE->getHelper(I).Upper); Sz = CGF.Builder.CreateIntCast(Sz, CGF.IntPtrTy, /*isSigned=*/false); NumOfRegularWithIterators = CGF.Builder.CreateNUWMul(NumOfRegularWithIterators, Sz); } HasRegularWithIterators = true; continue; } } QualType KmpDependInfoArrayTy; if (HasDepobjDeps || HasRegularWithIterators) { NumOfElements = llvm::ConstantInt::get(CGM.IntPtrTy, NumDependencies, /*isSigned=*/false); if (HasDepobjDeps) { NumOfElements = CGF.Builder.CreateNUWAdd(NumOfDepobjElements, NumOfElements); } if (HasRegularWithIterators) { NumOfElements = CGF.Builder.CreateNUWAdd(NumOfRegularWithIterators, NumOfElements); } OpaqueValueExpr OVE(Loc, C.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0), VK_PRValue); CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, RValue::get(NumOfElements)); KmpDependInfoArrayTy = C.getVariableArrayType(KmpDependInfoTy, &OVE, ArrayType::Normal, /*IndexTypeQuals=*/0, SourceRange(Loc, Loc)); // CGF.EmitVariablyModifiedType(KmpDependInfoArrayTy); // Properly emit variable-sized array. auto *PD = ImplicitParamDecl::Create(C, KmpDependInfoArrayTy, ImplicitParamDecl::Other); CGF.EmitVarDecl(*PD); DependenciesArray = CGF.GetAddrOfLocalVar(PD); NumOfElements = CGF.Builder.CreateIntCast(NumOfElements, CGF.Int32Ty, /*isSigned=*/false); } else { KmpDependInfoArrayTy = C.getConstantArrayType( KmpDependInfoTy, llvm::APInt(/*numBits=*/64, NumDependencies), nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0); DependenciesArray = CGF.CreateMemTemp(KmpDependInfoArrayTy, ".dep.arr.addr"); DependenciesArray = CGF.Builder.CreateConstArrayGEP(DependenciesArray, 0); NumOfElements = llvm::ConstantInt::get(CGM.Int32Ty, NumDependencies, /*isSigned=*/false); } unsigned Pos = 0; for (unsigned I = 0, End = Dependencies.size(); I < End; ++I) { if (Dependencies[I].DepKind == OMPC_DEPEND_depobj || Dependencies[I].IteratorExpr) continue; emitDependData(CGF, KmpDependInfoTy, &Pos, Dependencies[I], DependenciesArray); } // Copy regular dependecies with iterators. LValue PosLVal = CGF.MakeAddrLValue( CGF.CreateMemTemp(C.getSizeType(), "dep.counter.addr"), C.getSizeType()); CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGF.SizeTy, Pos), PosLVal); for (unsigned I = 0, End = Dependencies.size(); I < End; ++I) { if (Dependencies[I].DepKind == OMPC_DEPEND_depobj || !Dependencies[I].IteratorExpr) continue; emitDependData(CGF, KmpDependInfoTy, &PosLVal, Dependencies[I], DependenciesArray); } // Copy final depobj arrays without iterators. if (HasDepobjDeps) { for (unsigned I = 0, End = Dependencies.size(); I < End; ++I) { if (Dependencies[I].DepKind != OMPC_DEPEND_depobj) continue; emitDepobjElements(CGF, KmpDependInfoTy, PosLVal, Dependencies[I], DependenciesArray); } } DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( DependenciesArray, CGF.VoidPtrTy); return std::make_pair(NumOfElements, DependenciesArray); } Address CGOpenMPRuntime::emitDepobjDependClause( CodeGenFunction &CGF, const OMPTaskDataTy::DependData &Dependencies, SourceLocation Loc) { if (Dependencies.DepExprs.empty()) return Address::invalid(); // Process list of dependencies. ASTContext &C = CGM.getContext(); Address DependenciesArray = Address::invalid(); unsigned NumDependencies = Dependencies.DepExprs.size(); QualType FlagsTy; getDependTypes(C, KmpDependInfoTy, FlagsTy); RecordDecl *KmpDependInfoRD = cast(KmpDependInfoTy->getAsTagDecl()); llvm::Value *Size; // Define type kmp_depend_info[]; // For depobj reserve one extra element to store the number of elements. // It is required to handle depobj(x) update(in) construct. // kmp_depend_info[] deps; llvm::Value *NumDepsVal; CharUnits Align = C.getTypeAlignInChars(KmpDependInfoTy); if (const auto *IE = cast_or_null(Dependencies.IteratorExpr)) { NumDepsVal = llvm::ConstantInt::get(CGF.SizeTy, 1); for (unsigned I = 0, E = IE->numOfIterators(); I < E; ++I) { llvm::Value *Sz = CGF.EmitScalarExpr(IE->getHelper(I).Upper); Sz = CGF.Builder.CreateIntCast(Sz, CGF.SizeTy, /*isSigned=*/false); NumDepsVal = CGF.Builder.CreateNUWMul(NumDepsVal, Sz); } Size = CGF.Builder.CreateNUWAdd(llvm::ConstantInt::get(CGF.SizeTy, 1), NumDepsVal); CharUnits SizeInBytes = C.getTypeSizeInChars(KmpDependInfoTy).alignTo(Align); llvm::Value *RecSize = CGM.getSize(SizeInBytes); Size = CGF.Builder.CreateNUWMul(Size, RecSize); NumDepsVal = CGF.Builder.CreateIntCast(NumDepsVal, CGF.IntPtrTy, /*isSigned=*/false); } else { QualType KmpDependInfoArrayTy = C.getConstantArrayType( KmpDependInfoTy, llvm::APInt(/*numBits=*/64, NumDependencies + 1), nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0); CharUnits Sz = C.getTypeSizeInChars(KmpDependInfoArrayTy); Size = CGM.getSize(Sz.alignTo(Align)); NumDepsVal = llvm::ConstantInt::get(CGF.IntPtrTy, NumDependencies); } // Need to allocate on the dynamic memory. llvm::Value *ThreadID = getThreadID(CGF, Loc); // Use default allocator. llvm::Value *Allocator = llvm::ConstantPointerNull::get(CGF.VoidPtrTy); llvm::Value *Args[] = {ThreadID, Size, Allocator}; llvm::Value *Addr = CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_alloc), Args, ".dep.arr.addr"); Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( Addr, CGF.ConvertTypeForMem(KmpDependInfoTy)->getPointerTo()); DependenciesArray = Address(Addr, Align); // Write number of elements in the first element of array for depobj. LValue Base = CGF.MakeAddrLValue(DependenciesArray, KmpDependInfoTy); // deps[i].base_addr = NumDependencies; LValue BaseAddrLVal = CGF.EmitLValueForField( Base, *std::next(KmpDependInfoRD->field_begin(), BaseAddr)); CGF.EmitStoreOfScalar(NumDepsVal, BaseAddrLVal); llvm::PointerUnion Pos; unsigned Idx = 1; LValue PosLVal; if (Dependencies.IteratorExpr) { PosLVal = CGF.MakeAddrLValue( CGF.CreateMemTemp(C.getSizeType(), "iterator.counter.addr"), C.getSizeType()); CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGF.SizeTy, Idx), PosLVal, /*IsInit=*/true); Pos = &PosLVal; } else { Pos = &Idx; } emitDependData(CGF, KmpDependInfoTy, Pos, Dependencies, DependenciesArray); DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( CGF.Builder.CreateConstGEP(DependenciesArray, 1), CGF.VoidPtrTy); return DependenciesArray; } void CGOpenMPRuntime::emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal, SourceLocation Loc) { ASTContext &C = CGM.getContext(); QualType FlagsTy; getDependTypes(C, KmpDependInfoTy, FlagsTy); LValue Base = CGF.EmitLoadOfPointerLValue( DepobjLVal.getAddress(CGF), C.getPointerType(C.VoidPtrTy).castAs()); QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy); Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( Base.getAddress(CGF), CGF.ConvertTypeForMem(KmpDependInfoPtrTy)); llvm::Value *DepObjAddr = CGF.Builder.CreateGEP( Addr.getElementType(), Addr.getPointer(), llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true)); DepObjAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(DepObjAddr, CGF.VoidPtrTy); llvm::Value *ThreadID = getThreadID(CGF, Loc); // Use default allocator. llvm::Value *Allocator = llvm::ConstantPointerNull::get(CGF.VoidPtrTy); llvm::Value *Args[] = {ThreadID, DepObjAddr, Allocator}; // _kmpc_free(gtid, addr, nullptr); (void)CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_free), Args); } void CGOpenMPRuntime::emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal, OpenMPDependClauseKind NewDepKind, SourceLocation Loc) { ASTContext &C = CGM.getContext(); QualType FlagsTy; getDependTypes(C, KmpDependInfoTy, FlagsTy); RecordDecl *KmpDependInfoRD = cast(KmpDependInfoTy->getAsTagDecl()); llvm::Type *LLVMFlagsTy = CGF.ConvertTypeForMem(FlagsTy); llvm::Value *NumDeps; LValue Base; std::tie(NumDeps, Base) = getDepobjElements(CGF, DepobjLVal, Loc); Address Begin = Base.getAddress(CGF); // Cast from pointer to array type to pointer to single element. llvm::Value *End = CGF.Builder.CreateGEP( Begin.getElementType(), Begin.getPointer(), NumDeps); // The basic structure here is a while-do loop. llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.body"); llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.done"); llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock(); CGF.EmitBlock(BodyBB); llvm::PHINode *ElementPHI = CGF.Builder.CreatePHI(Begin.getType(), 2, "omp.elementPast"); ElementPHI->addIncoming(Begin.getPointer(), EntryBB); Begin = Address(ElementPHI, Begin.getAlignment()); Base = CGF.MakeAddrLValue(Begin, KmpDependInfoTy, Base.getBaseInfo(), Base.getTBAAInfo()); // deps[i].flags = NewDepKind; RTLDependenceKindTy DepKind = translateDependencyKind(NewDepKind); LValue FlagsLVal = CGF.EmitLValueForField( Base, *std::next(KmpDependInfoRD->field_begin(), Flags)); CGF.EmitStoreOfScalar(llvm::ConstantInt::get(LLVMFlagsTy, DepKind), FlagsLVal); // Shift the address forward by one element. Address ElementNext = CGF.Builder.CreateConstGEP(Begin, /*Index=*/1, "omp.elementNext"); ElementPHI->addIncoming(ElementNext.getPointer(), CGF.Builder.GetInsertBlock()); llvm::Value *IsEmpty = CGF.Builder.CreateICmpEQ(ElementNext.getPointer(), End, "omp.isempty"); CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB); // Done. CGF.EmitBlock(DoneBB, /*IsFinished=*/true); } void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data) { if (!CGF.HaveInsertPoint()) return; TaskResultTy Result = emitTaskInit(CGF, Loc, D, TaskFunction, SharedsTy, Shareds, Data); llvm::Value *NewTask = Result.NewTask; llvm::Function *TaskEntry = Result.TaskEntry; llvm::Value *NewTaskNewTaskTTy = Result.NewTaskNewTaskTTy; LValue TDBase = Result.TDBase; const RecordDecl *KmpTaskTQTyRD = Result.KmpTaskTQTyRD; // Process list of dependences. Address DependenciesArray = Address::invalid(); llvm::Value *NumOfElements; std::tie(NumOfElements, DependenciesArray) = emitDependClause(CGF, Data.Dependences, Loc); // NOTE: routine and part_id fields are initialized by __kmpc_omp_task_alloc() // libcall. // Build kmp_int32 __kmpc_omp_task_with_deps(ident_t *, kmp_int32 gtid, // kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, // kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list) if dependence // list is not empty llvm::Value *ThreadID = getThreadID(CGF, Loc); llvm::Value *UpLoc = emitUpdateLocation(CGF, Loc); llvm::Value *TaskArgs[] = { UpLoc, ThreadID, NewTask }; llvm::Value *DepTaskArgs[7]; if (!Data.Dependences.empty()) { DepTaskArgs[0] = UpLoc; DepTaskArgs[1] = ThreadID; DepTaskArgs[2] = NewTask; DepTaskArgs[3] = NumOfElements; DepTaskArgs[4] = DependenciesArray.getPointer(); DepTaskArgs[5] = CGF.Builder.getInt32(0); DepTaskArgs[6] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy); } auto &&ThenCodeGen = [this, &Data, TDBase, KmpTaskTQTyRD, &TaskArgs, &DepTaskArgs](CodeGenFunction &CGF, PrePostActionTy &) { if (!Data.Tied) { auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId); LValue PartIdLVal = CGF.EmitLValueForField(TDBase, *PartIdFI); CGF.EmitStoreOfScalar(CGF.Builder.getInt32(0), PartIdLVal); } if (!Data.Dependences.empty()) { CGF.EmitRuntimeCall( OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_omp_task_with_deps), DepTaskArgs); } else { CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_omp_task), TaskArgs); } // Check if parent region is untied and build return for untied task; if (auto *Region = dyn_cast_or_null(CGF.CapturedStmtInfo)) Region->emitUntiedSwitch(CGF); }; llvm::Value *DepWaitTaskArgs[6]; if (!Data.Dependences.empty()) { DepWaitTaskArgs[0] = UpLoc; DepWaitTaskArgs[1] = ThreadID; DepWaitTaskArgs[2] = NumOfElements; DepWaitTaskArgs[3] = DependenciesArray.getPointer(); DepWaitTaskArgs[4] = CGF.Builder.getInt32(0); DepWaitTaskArgs[5] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy); } auto &M = CGM.getModule(); auto &&ElseCodeGen = [this, &M, &TaskArgs, ThreadID, NewTaskNewTaskTTy, TaskEntry, &Data, &DepWaitTaskArgs, Loc](CodeGenFunction &CGF, PrePostActionTy &) { CodeGenFunction::RunCleanupsScope LocalScope(CGF); // Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid, // kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 // ndeps_noalias, kmp_depend_info_t *noalias_dep_list); if dependence info // is specified. if (!Data.Dependences.empty()) CGF.EmitRuntimeCall( OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_omp_wait_deps), DepWaitTaskArgs); // Call proxy_task_entry(gtid, new_task); auto &&CodeGen = [TaskEntry, ThreadID, NewTaskNewTaskTTy, Loc](CodeGenFunction &CGF, PrePostActionTy &Action) { Action.Enter(CGF); llvm::Value *OutlinedFnArgs[] = {ThreadID, NewTaskNewTaskTTy}; CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskEntry, OutlinedFnArgs); }; // Build void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid, // kmp_task_t *new_task); // Build void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid, // kmp_task_t *new_task); RegionCodeGenTy RCG(CodeGen); CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction( M, OMPRTL___kmpc_omp_task_begin_if0), TaskArgs, OMPBuilder.getOrCreateRuntimeFunction( M, OMPRTL___kmpc_omp_task_complete_if0), TaskArgs); RCG.setAction(Action); RCG(CGF); }; if (IfCond) { emitIfClause(CGF, IfCond, ThenCodeGen, ElseCodeGen); } else { RegionCodeGenTy ThenRCG(ThenCodeGen); ThenRCG(CGF); } } void CGOpenMPRuntime::emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data) { if (!CGF.HaveInsertPoint()) return; TaskResultTy Result = emitTaskInit(CGF, Loc, D, TaskFunction, SharedsTy, Shareds, Data); // NOTE: routine and part_id fields are initialized by __kmpc_omp_task_alloc() // libcall. // Call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int // sched, kmp_uint64 grainsize, void *task_dup); llvm::Value *ThreadID = getThreadID(CGF, Loc); llvm::Value *UpLoc = emitUpdateLocation(CGF, Loc); llvm::Value *IfVal; if (IfCond) { IfVal = CGF.Builder.CreateIntCast(CGF.EvaluateExprAsBool(IfCond), CGF.IntTy, /*isSigned=*/true); } else { IfVal = llvm::ConstantInt::getSigned(CGF.IntTy, /*V=*/1); } LValue LBLVal = CGF.EmitLValueForField( Result.TDBase, *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound)); const auto *LBVar = cast(cast(D.getLowerBoundVariable())->getDecl()); CGF.EmitAnyExprToMem(LBVar->getInit(), LBLVal.getAddress(CGF), LBLVal.getQuals(), /*IsInitializer=*/true); LValue UBLVal = CGF.EmitLValueForField( Result.TDBase, *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound)); const auto *UBVar = cast(cast(D.getUpperBoundVariable())->getDecl()); CGF.EmitAnyExprToMem(UBVar->getInit(), UBLVal.getAddress(CGF), UBLVal.getQuals(), /*IsInitializer=*/true); LValue StLVal = CGF.EmitLValueForField( Result.TDBase, *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTStride)); const auto *StVar = cast(cast(D.getStrideVariable())->getDecl()); CGF.EmitAnyExprToMem(StVar->getInit(), StLVal.getAddress(CGF), StLVal.getQuals(), /*IsInitializer=*/true); // Store reductions address. LValue RedLVal = CGF.EmitLValueForField( Result.TDBase, *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTReductions)); if (Data.Reductions) { CGF.EmitStoreOfScalar(Data.Reductions, RedLVal); } else { CGF.EmitNullInitialization(RedLVal.getAddress(CGF), CGF.getContext().VoidPtrTy); } enum { NoSchedule = 0, Grainsize = 1, NumTasks = 2 }; llvm::Value *TaskArgs[] = { UpLoc, ThreadID, Result.NewTask, IfVal, LBLVal.getPointer(CGF), UBLVal.getPointer(CGF), CGF.EmitLoadOfScalar(StLVal, Loc), llvm::ConstantInt::getSigned( CGF.IntTy, 1), // Always 1 because taskgroup emitted by the compiler llvm::ConstantInt::getSigned( CGF.IntTy, Data.Schedule.getPointer() ? Data.Schedule.getInt() ? NumTasks : Grainsize : NoSchedule), Data.Schedule.getPointer() ? CGF.Builder.CreateIntCast(Data.Schedule.getPointer(), CGF.Int64Ty, /*isSigned=*/false) : llvm::ConstantInt::get(CGF.Int64Ty, /*V=*/0), Result.TaskDupFn ? CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( Result.TaskDupFn, CGF.VoidPtrTy) : llvm::ConstantPointerNull::get(CGF.VoidPtrTy)}; CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_taskloop), TaskArgs); } /// Emit reduction operation for each element of array (required for /// array sections) LHS op = RHS. /// \param Type Type of array. /// \param LHSVar Variable on the left side of the reduction operation /// (references element of array in original variable). /// \param RHSVar Variable on the right side of the reduction operation /// (references element of array in original variable). /// \param RedOpGen Generator of reduction operation with use of LHSVar and /// RHSVar. static void EmitOMPAggregateReduction( CodeGenFunction &CGF, QualType Type, const VarDecl *LHSVar, const VarDecl *RHSVar, const llvm::function_ref &RedOpGen, const Expr *XExpr = nullptr, const Expr *EExpr = nullptr, const Expr *UpExpr = nullptr) { // Perform element-by-element initialization. QualType ElementTy; Address LHSAddr = CGF.GetAddrOfLocalVar(LHSVar); Address RHSAddr = CGF.GetAddrOfLocalVar(RHSVar); // Drill down to the base element type on both arrays. const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe(); llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, LHSAddr); llvm::Value *RHSBegin = RHSAddr.getPointer(); llvm::Value *LHSBegin = LHSAddr.getPointer(); // Cast from pointer to array type to pointer to single element. llvm::Value *LHSEnd = CGF.Builder.CreateGEP(LHSAddr.getElementType(), LHSBegin, NumElements); // The basic structure here is a while-do loop. llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.arraycpy.body"); llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.arraycpy.done"); llvm::Value *IsEmpty = CGF.Builder.CreateICmpEQ(LHSBegin, LHSEnd, "omp.arraycpy.isempty"); CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB); // Enter the loop body, making that address the current address. llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock(); CGF.EmitBlock(BodyBB); CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy); llvm::PHINode *RHSElementPHI = CGF.Builder.CreatePHI( RHSBegin->getType(), 2, "omp.arraycpy.srcElementPast"); RHSElementPHI->addIncoming(RHSBegin, EntryBB); Address RHSElementCurrent = Address(RHSElementPHI, RHSAddr.getAlignment().alignmentOfArrayElement(ElementSize)); llvm::PHINode *LHSElementPHI = CGF.Builder.CreatePHI( LHSBegin->getType(), 2, "omp.arraycpy.destElementPast"); LHSElementPHI->addIncoming(LHSBegin, EntryBB); Address LHSElementCurrent = Address(LHSElementPHI, LHSAddr.getAlignment().alignmentOfArrayElement(ElementSize)); // Emit copy. CodeGenFunction::OMPPrivateScope Scope(CGF); Scope.addPrivate(LHSVar, [=]() { return LHSElementCurrent; }); Scope.addPrivate(RHSVar, [=]() { return RHSElementCurrent; }); Scope.Privatize(); RedOpGen(CGF, XExpr, EExpr, UpExpr); Scope.ForceCleanup(); // Shift the address forward by one element. llvm::Value *LHSElementNext = CGF.Builder.CreateConstGEP1_32( LHSAddr.getElementType(), LHSElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element"); llvm::Value *RHSElementNext = CGF.Builder.CreateConstGEP1_32( RHSAddr.getElementType(), RHSElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element"); // Check whether we've reached the end. llvm::Value *Done = CGF.Builder.CreateICmpEQ(LHSElementNext, LHSEnd, "omp.arraycpy.done"); CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB); LHSElementPHI->addIncoming(LHSElementNext, CGF.Builder.GetInsertBlock()); RHSElementPHI->addIncoming(RHSElementNext, CGF.Builder.GetInsertBlock()); // Done. CGF.EmitBlock(DoneBB, /*IsFinished=*/true); } /// Emit reduction combiner. If the combiner is a simple expression emit it as /// is, otherwise consider it as combiner of UDR decl and emit it as a call of /// UDR combiner function. static void emitReductionCombiner(CodeGenFunction &CGF, const Expr *ReductionOp) { if (const auto *CE = dyn_cast(ReductionOp)) if (const auto *OVE = dyn_cast(CE->getCallee())) if (const auto *DRE = dyn_cast(OVE->getSourceExpr()->IgnoreImpCasts())) if (const auto *DRD = dyn_cast(DRE->getDecl())) { std::pair Reduction = CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD); RValue Func = RValue::get(Reduction.first); CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func); CGF.EmitIgnoredExpr(ReductionOp); return; } CGF.EmitIgnoredExpr(ReductionOp); } llvm::Function *CGOpenMPRuntime::emitReductionFunction( SourceLocation Loc, llvm::Type *ArgsType, ArrayRef Privates, ArrayRef LHSExprs, ArrayRef RHSExprs, ArrayRef ReductionOps) { ASTContext &C = CGM.getContext(); // void reduction_func(void *LHSArg, void *RHSArg); FunctionArgList Args; ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy, ImplicitParamDecl::Other); ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy, ImplicitParamDecl::Other); Args.push_back(&LHSArg); Args.push_back(&RHSArg); const auto &CGFI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); std::string Name = getName({"omp", "reduction", "reduction_func"}); auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule()); CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); Fn->setDoesNotRecurse(); CodeGenFunction CGF(CGM); CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc); // Dst = (void*[n])(LHSArg); // Src = (void*[n])(RHSArg); Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)), ArgsType), CGF.getPointerAlign()); Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)), ArgsType), CGF.getPointerAlign()); // ... // *(Type*)lhs[i] = RedOp(*(Type*)lhs[i], *(Type*)rhs[i]); // ... CodeGenFunction::OMPPrivateScope Scope(CGF); auto IPriv = Privates.begin(); unsigned Idx = 0; for (unsigned I = 0, E = ReductionOps.size(); I < E; ++I, ++IPriv, ++Idx) { const auto *RHSVar = cast(cast(RHSExprs[I])->getDecl()); Scope.addPrivate(RHSVar, [&CGF, RHS, Idx, RHSVar]() { return emitAddrOfVarFromArray(CGF, RHS, Idx, RHSVar); }); const auto *LHSVar = cast(cast(LHSExprs[I])->getDecl()); Scope.addPrivate(LHSVar, [&CGF, LHS, Idx, LHSVar]() { return emitAddrOfVarFromArray(CGF, LHS, Idx, LHSVar); }); QualType PrivTy = (*IPriv)->getType(); if (PrivTy->isVariablyModifiedType()) { // Get array size and emit VLA type. ++Idx; Address Elem = CGF.Builder.CreateConstArrayGEP(LHS, Idx); llvm::Value *Ptr = CGF.Builder.CreateLoad(Elem); const VariableArrayType *VLA = CGF.getContext().getAsVariableArrayType(PrivTy); const auto *OVE = cast(VLA->getSizeExpr()); CodeGenFunction::OpaqueValueMapping OpaqueMap( CGF, OVE, RValue::get(CGF.Builder.CreatePtrToInt(Ptr, CGF.SizeTy))); CGF.EmitVariablyModifiedType(PrivTy); } } Scope.Privatize(); IPriv = Privates.begin(); auto ILHS = LHSExprs.begin(); auto IRHS = RHSExprs.begin(); for (const Expr *E : ReductionOps) { if ((*IPriv)->getType()->isArrayType()) { // Emit reduction for array section. const auto *LHSVar = cast(cast(*ILHS)->getDecl()); const auto *RHSVar = cast(cast(*IRHS)->getDecl()); EmitOMPAggregateReduction( CGF, (*IPriv)->getType(), LHSVar, RHSVar, [=](CodeGenFunction &CGF, const Expr *, const Expr *, const Expr *) { emitReductionCombiner(CGF, E); }); } else { // Emit reduction for array subscript or single variable. emitReductionCombiner(CGF, E); } ++IPriv; ++ILHS; ++IRHS; } Scope.ForceCleanup(); CGF.FinishFunction(); return Fn; } void CGOpenMPRuntime::emitSingleReductionCombiner(CodeGenFunction &CGF, const Expr *ReductionOp, const Expr *PrivateRef, const DeclRefExpr *LHS, const DeclRefExpr *RHS) { if (PrivateRef->getType()->isArrayType()) { // Emit reduction for array section. const auto *LHSVar = cast(LHS->getDecl()); const auto *RHSVar = cast(RHS->getDecl()); EmitOMPAggregateReduction( CGF, PrivateRef->getType(), LHSVar, RHSVar, [=](CodeGenFunction &CGF, const Expr *, const Expr *, const Expr *) { emitReductionCombiner(CGF, ReductionOp); }); } else { // Emit reduction for array subscript or single variable. emitReductionCombiner(CGF, ReductionOp); } } void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef Privates, ArrayRef LHSExprs, ArrayRef RHSExprs, ArrayRef ReductionOps, ReductionOptionsTy Options) { if (!CGF.HaveInsertPoint()) return; bool WithNowait = Options.WithNowait; bool SimpleReduction = Options.SimpleReduction; // Next code should be emitted for reduction: // // static kmp_critical_name lock = { 0 }; // // void reduce_func(void *lhs[], void *rhs[]) { // *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]); // ... // *(Type-1*)lhs[-1] = ReductionOperation-1(*(Type-1*)lhs[-1], // *(Type-1*)rhs[-1]); // } // // ... // void *RedList[] = {&[0], ..., &[-1]}; // switch (__kmpc_reduce{_nowait}(, , , sizeof(RedList), // RedList, reduce_func, &)) { // case 1: // ... // [i] = RedOp(*[i], *[i]); // ... // __kmpc_end_reduce{_nowait}(, , &); // break; // case 2: // ... // Atomic([i] = RedOp(*[i], *[i])); // ... // [__kmpc_end_reduce(, , &);] // break; // default:; // } // // if SimpleReduction is true, only the next code is generated: // ... // [i] = RedOp(*[i], *[i]); // ... ASTContext &C = CGM.getContext(); if (SimpleReduction) { CodeGenFunction::RunCleanupsScope Scope(CGF); auto IPriv = Privates.begin(); auto ILHS = LHSExprs.begin(); auto IRHS = RHSExprs.begin(); for (const Expr *E : ReductionOps) { emitSingleReductionCombiner(CGF, E, *IPriv, cast(*ILHS), cast(*IRHS)); ++IPriv; ++ILHS; ++IRHS; } return; } // 1. Build a list of reduction variables. // void *RedList[] = {[0], ..., [-1]}; auto Size = RHSExprs.size(); for (const Expr *E : Privates) { if (E->getType()->isVariablyModifiedType()) // Reserve place for array size. ++Size; } llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size); QualType ReductionArrayTy = C.getConstantArrayType(C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0); Address ReductionList = CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list"); auto IPriv = Privates.begin(); unsigned Idx = 0; for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) { Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx); CGF.Builder.CreateStore( CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( CGF.EmitLValue(RHSExprs[I]).getPointer(CGF), CGF.VoidPtrTy), Elem); if ((*IPriv)->getType()->isVariablyModifiedType()) { // Store array size. ++Idx; Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx); llvm::Value *Size = CGF.Builder.CreateIntCast( CGF.getVLASize( CGF.getContext().getAsVariableArrayType((*IPriv)->getType())) .NumElts, CGF.SizeTy, /*isSigned=*/false); CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy), Elem); } } // 2. Emit reduce_func(). llvm::Function *ReductionFn = emitReductionFunction( Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), Privates, LHSExprs, RHSExprs, ReductionOps); // 3. Create static kmp_critical_name lock = { 0 }; std::string Name = getName({"reduction"}); llvm::Value *Lock = getCriticalRegionLock(Name); // 4. Build res = __kmpc_reduce{_nowait}(, , , sizeof(RedList), // RedList, reduce_func, &); llvm::Value *IdentTLoc = emitUpdateLocation(CGF, Loc, OMP_ATOMIC_REDUCE); llvm::Value *ThreadId = getThreadID(CGF, Loc); llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy); llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( ReductionList.getPointer(), CGF.VoidPtrTy); llvm::Value *Args[] = { IdentTLoc, // ident_t * ThreadId, // i32 CGF.Builder.getInt32(RHSExprs.size()), // i32 ReductionArrayTySize, // size_type sizeof(RedList) RL, // void *RedList ReductionFn, // void (*) (void *, void *) Lock // kmp_critical_name *& }; llvm::Value *Res = CGF.EmitRuntimeCall( OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), WithNowait ? OMPRTL___kmpc_reduce_nowait : OMPRTL___kmpc_reduce), Args); // 5. Build switch(res) llvm::BasicBlock *DefaultBB = CGF.createBasicBlock(".omp.reduction.default"); llvm::SwitchInst *SwInst = CGF.Builder.CreateSwitch(Res, DefaultBB, /*NumCases=*/2); // 6. Build case 1: // ... // [i] = RedOp(*[i], *[i]); // ... // __kmpc_end_reduce{_nowait}(, , &); // break; llvm::BasicBlock *Case1BB = CGF.createBasicBlock(".omp.reduction.case1"); SwInst->addCase(CGF.Builder.getInt32(1), Case1BB); CGF.EmitBlock(Case1BB); // Add emission of __kmpc_end_reduce{_nowait}(, , &); llvm::Value *EndArgs[] = { IdentTLoc, // ident_t * ThreadId, // i32 Lock // kmp_critical_name *& }; auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps]( CodeGenFunction &CGF, PrePostActionTy &Action) { CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime(); auto IPriv = Privates.begin(); auto ILHS = LHSExprs.begin(); auto IRHS = RHSExprs.begin(); for (const Expr *E : ReductionOps) { RT.emitSingleReductionCombiner(CGF, E, *IPriv, cast(*ILHS), cast(*IRHS)); ++IPriv; ++ILHS; ++IRHS; } }; RegionCodeGenTy RCG(CodeGen); CommonActionTy Action( nullptr, llvm::None, OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), WithNowait ? OMPRTL___kmpc_end_reduce_nowait : OMPRTL___kmpc_end_reduce), EndArgs); RCG.setAction(Action); RCG(CGF); CGF.EmitBranch(DefaultBB); // 7. Build case 2: // ... // Atomic([i] = RedOp(*[i], *[i])); // ... // break; llvm::BasicBlock *Case2BB = CGF.createBasicBlock(".omp.reduction.case2"); SwInst->addCase(CGF.Builder.getInt32(2), Case2BB); CGF.EmitBlock(Case2BB); auto &&AtomicCodeGen = [Loc, Privates, LHSExprs, RHSExprs, ReductionOps]( CodeGenFunction &CGF, PrePostActionTy &Action) { auto ILHS = LHSExprs.begin(); auto IRHS = RHSExprs.begin(); auto IPriv = Privates.begin(); for (const Expr *E : ReductionOps) { const Expr *XExpr = nullptr; const Expr *EExpr = nullptr; const Expr *UpExpr = nullptr; BinaryOperatorKind BO = BO_Comma; if (const auto *BO = dyn_cast(E)) { if (BO->getOpcode() == BO_Assign) { XExpr = BO->getLHS(); UpExpr = BO->getRHS(); } } // Try to emit update expression as a simple atomic. const Expr *RHSExpr = UpExpr; if (RHSExpr) { // Analyze RHS part of the whole expression. if (const auto *ACO = dyn_cast( RHSExpr->IgnoreParenImpCasts())) { // If this is a conditional operator, analyze its condition for // min/max reduction operator. RHSExpr = ACO->getCond(); } if (const auto *BORHS = dyn_cast(RHSExpr->IgnoreParenImpCasts())) { EExpr = BORHS->getRHS(); BO = BORHS->getOpcode(); } } if (XExpr) { const auto *VD = cast(cast(*ILHS)->getDecl()); auto &&AtomicRedGen = [BO, VD, Loc](CodeGenFunction &CGF, const Expr *XExpr, const Expr *EExpr, const Expr *UpExpr) { LValue X = CGF.EmitLValue(XExpr); RValue E; if (EExpr) E = CGF.EmitAnyExpr(EExpr); CGF.EmitOMPAtomicSimpleUpdateExpr( X, E, BO, /*IsXLHSInRHSPart=*/true, llvm::AtomicOrdering::Monotonic, Loc, [&CGF, UpExpr, VD, Loc](RValue XRValue) { CodeGenFunction::OMPPrivateScope PrivateScope(CGF); PrivateScope.addPrivate( VD, [&CGF, VD, XRValue, Loc]() { Address LHSTemp = CGF.CreateMemTemp(VD->getType()); CGF.emitOMPSimpleStore( CGF.MakeAddrLValue(LHSTemp, VD->getType()), XRValue, VD->getType().getNonReferenceType(), Loc); return LHSTemp; }); (void)PrivateScope.Privatize(); return CGF.EmitAnyExpr(UpExpr); }); }; if ((*IPriv)->getType()->isArrayType()) { // Emit atomic reduction for array section. const auto *RHSVar = cast(cast(*IRHS)->getDecl()); EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), VD, RHSVar, AtomicRedGen, XExpr, EExpr, UpExpr); } else { // Emit atomic reduction for array subscript or single variable. AtomicRedGen(CGF, XExpr, EExpr, UpExpr); } } else { // Emit as a critical region. auto &&CritRedGen = [E, Loc](CodeGenFunction &CGF, const Expr *, const Expr *, const Expr *) { CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime(); std::string Name = RT.getName({"atomic_reduction"}); RT.emitCriticalRegion( CGF, Name, [=](CodeGenFunction &CGF, PrePostActionTy &Action) { Action.Enter(CGF); emitReductionCombiner(CGF, E); }, Loc); }; if ((*IPriv)->getType()->isArrayType()) { const auto *LHSVar = cast(cast(*ILHS)->getDecl()); const auto *RHSVar = cast(cast(*IRHS)->getDecl()); EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), LHSVar, RHSVar, CritRedGen); } else { CritRedGen(CGF, nullptr, nullptr, nullptr); } } ++ILHS; ++IRHS; ++IPriv; } }; RegionCodeGenTy AtomicRCG(AtomicCodeGen); if (!WithNowait) { // Add emission of __kmpc_end_reduce(, , &); llvm::Value *EndArgs[] = { IdentTLoc, // ident_t * ThreadId, // i32 Lock // kmp_critical_name *& }; CommonActionTy Action(nullptr, llvm::None, OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_end_reduce), EndArgs); AtomicRCG.setAction(Action); AtomicRCG(CGF); } else { AtomicRCG(CGF); } CGF.EmitBranch(DefaultBB); CGF.EmitBlock(DefaultBB, /*IsFinished=*/true); } /// Generates unique name for artificial threadprivate variables. /// Format is: "." "_" "" static std::string generateUniqueName(CodeGenModule &CGM, StringRef Prefix, const Expr *Ref) { SmallString<256> Buffer; llvm::raw_svector_ostream Out(Buffer); const clang::DeclRefExpr *DE; const VarDecl *D = ::getBaseDecl(Ref, DE); if (!D) D = cast(cast(Ref)->getDecl()); D = D->getCanonicalDecl(); std::string Name = CGM.getOpenMPRuntime().getName( {D->isLocalVarDeclOrParm() ? D->getName() : CGM.getMangledName(D)}); Out << Prefix << Name << "_" << D->getCanonicalDecl()->getBeginLoc().getRawEncoding(); return std::string(Out.str()); } /// Emits reduction initializer function: /// \code /// void @.red_init(void* %arg, void* %orig) { /// %0 = bitcast void* %arg to * /// store , * %0 /// ret void /// } /// \endcode static llvm::Value *emitReduceInitFunction(CodeGenModule &CGM, SourceLocation Loc, ReductionCodeGen &RCG, unsigned N) { ASTContext &C = CGM.getContext(); QualType VoidPtrTy = C.VoidPtrTy; VoidPtrTy.addRestrict(); FunctionArgList Args; ImplicitParamDecl Param(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, VoidPtrTy, ImplicitParamDecl::Other); ImplicitParamDecl ParamOrig(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, VoidPtrTy, ImplicitParamDecl::Other); Args.emplace_back(&Param); Args.emplace_back(&ParamOrig); const auto &FnInfo = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo); std::string Name = CGM.getOpenMPRuntime().getName({"red_init", ""}); auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule()); CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo); Fn->setDoesNotRecurse(); CodeGenFunction CGF(CGM); CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc); Address PrivateAddr = CGF.EmitLoadOfPointer( CGF.GetAddrOfLocalVar(&Param), C.getPointerType(C.VoidPtrTy).castAs()); llvm::Value *Size = nullptr; // If the size of the reduction item is non-constant, load it from global // threadprivate variable. if (RCG.getSizes(N).second) { Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate( CGF, CGM.getContext().getSizeType(), generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N))); Size = CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false, CGM.getContext().getSizeType(), Loc); } RCG.emitAggregateType(CGF, N, Size); LValue OrigLVal; // If initializer uses initializer from declare reduction construct, emit a // pointer to the address of the original reduction item (reuired by reduction // initializer) if (RCG.usesReductionInitializer(N)) { Address SharedAddr = CGF.GetAddrOfLocalVar(&ParamOrig); SharedAddr = CGF.EmitLoadOfPointer( SharedAddr, CGM.getContext().VoidPtrTy.castAs()->getTypePtr()); OrigLVal = CGF.MakeAddrLValue(SharedAddr, CGM.getContext().VoidPtrTy); } else { OrigLVal = CGF.MakeNaturalAlignAddrLValue( llvm::ConstantPointerNull::get(CGM.VoidPtrTy), CGM.getContext().VoidPtrTy); } // Emit the initializer: // %0 = bitcast void* %arg to * // store , * %0 RCG.emitInitialization(CGF, N, PrivateAddr, OrigLVal, [](CodeGenFunction &) { return false; }); CGF.FinishFunction(); return Fn; } /// Emits reduction combiner function: /// \code /// void @.red_comb(void* %arg0, void* %arg1) { /// %lhs = bitcast void* %arg0 to * /// %rhs = bitcast void* %arg1 to * /// %2 = (* %lhs, * %rhs) /// store %2, * %lhs /// ret void /// } /// \endcode static llvm::Value *emitReduceCombFunction(CodeGenModule &CGM, SourceLocation Loc, ReductionCodeGen &RCG, unsigned N, const Expr *ReductionOp, const Expr *LHS, const Expr *RHS, const Expr *PrivateRef) { ASTContext &C = CGM.getContext(); const auto *LHSVD = cast(cast(LHS)->getDecl()); const auto *RHSVD = cast(cast(RHS)->getDecl()); FunctionArgList Args; ImplicitParamDecl ParamInOut(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy, ImplicitParamDecl::Other); ImplicitParamDecl ParamIn(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy, ImplicitParamDecl::Other); Args.emplace_back(&ParamInOut); Args.emplace_back(&ParamIn); const auto &FnInfo = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo); std::string Name = CGM.getOpenMPRuntime().getName({"red_comb", ""}); auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule()); CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo); Fn->setDoesNotRecurse(); CodeGenFunction CGF(CGM); CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc); llvm::Value *Size = nullptr; // If the size of the reduction item is non-constant, load it from global // threadprivate variable. if (RCG.getSizes(N).second) { Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate( CGF, CGM.getContext().getSizeType(), generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N))); Size = CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false, CGM.getContext().getSizeType(), Loc); } RCG.emitAggregateType(CGF, N, Size); // Remap lhs and rhs variables to the addresses of the function arguments. // %lhs = bitcast void* %arg0 to * // %rhs = bitcast void* %arg1 to * CodeGenFunction::OMPPrivateScope PrivateScope(CGF); PrivateScope.addPrivate(LHSVD, [&C, &CGF, &ParamInOut, LHSVD]() { // Pull out the pointer to the variable. Address PtrAddr = CGF.EmitLoadOfPointer( CGF.GetAddrOfLocalVar(&ParamInOut), C.getPointerType(C.VoidPtrTy).castAs()); return CGF.Builder.CreateElementBitCast( PtrAddr, CGF.ConvertTypeForMem(LHSVD->getType())); }); PrivateScope.addPrivate(RHSVD, [&C, &CGF, &ParamIn, RHSVD]() { // Pull out the pointer to the variable. Address PtrAddr = CGF.EmitLoadOfPointer( CGF.GetAddrOfLocalVar(&ParamIn), C.getPointerType(C.VoidPtrTy).castAs()); return CGF.Builder.CreateElementBitCast( PtrAddr, CGF.ConvertTypeForMem(RHSVD->getType())); }); PrivateScope.Privatize(); // Emit the combiner body: // %2 = ( *%lhs, *%rhs) // store %2, * %lhs CGM.getOpenMPRuntime().emitSingleReductionCombiner( CGF, ReductionOp, PrivateRef, cast(LHS), cast(RHS)); CGF.FinishFunction(); return Fn; } /// Emits reduction finalizer function: /// \code /// void @.red_fini(void* %arg) { /// %0 = bitcast void* %arg to * /// (* %0) /// ret void /// } /// \endcode static llvm::Value *emitReduceFiniFunction(CodeGenModule &CGM, SourceLocation Loc, ReductionCodeGen &RCG, unsigned N) { if (!RCG.needCleanups(N)) return nullptr; ASTContext &C = CGM.getContext(); FunctionArgList Args; ImplicitParamDecl Param(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy, ImplicitParamDecl::Other); Args.emplace_back(&Param); const auto &FnInfo = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo); std::string Name = CGM.getOpenMPRuntime().getName({"red_fini", ""}); auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule()); CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo); Fn->setDoesNotRecurse(); CodeGenFunction CGF(CGM); CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc); Address PrivateAddr = CGF.EmitLoadOfPointer( CGF.GetAddrOfLocalVar(&Param), C.getPointerType(C.VoidPtrTy).castAs()); llvm::Value *Size = nullptr; // If the size of the reduction item is non-constant, load it from global // threadprivate variable. if (RCG.getSizes(N).second) { Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate( CGF, CGM.getContext().getSizeType(), generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N))); Size = CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false, CGM.getContext().getSizeType(), Loc); } RCG.emitAggregateType(CGF, N, Size); // Emit the finalizer body: // (* %0) RCG.emitCleanups(CGF, N, PrivateAddr); CGF.FinishFunction(Loc); return Fn; } llvm::Value *CGOpenMPRuntime::emitTaskReductionInit( CodeGenFunction &CGF, SourceLocation Loc, ArrayRef LHSExprs, ArrayRef RHSExprs, const OMPTaskDataTy &Data) { if (!CGF.HaveInsertPoint() || Data.ReductionVars.empty()) return nullptr; // Build typedef struct: // kmp_taskred_input { // void *reduce_shar; // shared reduction item // void *reduce_orig; // original reduction item used for initialization // size_t reduce_size; // size of data item // void *reduce_init; // data initialization routine // void *reduce_fini; // data finalization routine // void *reduce_comb; // data combiner routine // kmp_task_red_flags_t flags; // flags for additional info from compiler // } kmp_taskred_input_t; ASTContext &C = CGM.getContext(); RecordDecl *RD = C.buildImplicitRecord("kmp_taskred_input_t"); RD->startDefinition(); const FieldDecl *SharedFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy); const FieldDecl *OrigFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy); const FieldDecl *SizeFD = addFieldToRecordDecl(C, RD, C.getSizeType()); const FieldDecl *InitFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy); const FieldDecl *FiniFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy); const FieldDecl *CombFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy); const FieldDecl *FlagsFD = addFieldToRecordDecl( C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false)); RD->completeDefinition(); QualType RDType = C.getRecordType(RD); unsigned Size = Data.ReductionVars.size(); llvm::APInt ArraySize(/*numBits=*/64, Size); QualType ArrayRDType = C.getConstantArrayType( RDType, ArraySize, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0); // kmp_task_red_input_t .rd_input.[Size]; Address TaskRedInput = CGF.CreateMemTemp(ArrayRDType, ".rd_input."); ReductionCodeGen RCG(Data.ReductionVars, Data.ReductionOrigs, Data.ReductionCopies, Data.ReductionOps); for (unsigned Cnt = 0; Cnt < Size; ++Cnt) { // kmp_task_red_input_t &ElemLVal = .rd_input.[Cnt]; llvm::Value *Idxs[] = {llvm::ConstantInt::get(CGM.SizeTy, /*V=*/0), llvm::ConstantInt::get(CGM.SizeTy, Cnt)}; llvm::Value *GEP = CGF.EmitCheckedInBoundsGEP( TaskRedInput.getPointer(), Idxs, /*SignedIndices=*/false, /*IsSubtraction=*/false, Loc, ".rd_input.gep."); LValue ElemLVal = CGF.MakeNaturalAlignAddrLValue(GEP, RDType); // ElemLVal.reduce_shar = &Shareds[Cnt]; LValue SharedLVal = CGF.EmitLValueForField(ElemLVal, SharedFD); RCG.emitSharedOrigLValue(CGF, Cnt); llvm::Value *CastedShared = CGF.EmitCastToVoidPtr(RCG.getSharedLValue(Cnt).getPointer(CGF)); CGF.EmitStoreOfScalar(CastedShared, SharedLVal); // ElemLVal.reduce_orig = &Origs[Cnt]; LValue OrigLVal = CGF.EmitLValueForField(ElemLVal, OrigFD); llvm::Value *CastedOrig = CGF.EmitCastToVoidPtr(RCG.getOrigLValue(Cnt).getPointer(CGF)); CGF.EmitStoreOfScalar(CastedOrig, OrigLVal); RCG.emitAggregateType(CGF, Cnt); llvm::Value *SizeValInChars; llvm::Value *SizeVal; std::tie(SizeValInChars, SizeVal) = RCG.getSizes(Cnt); // We use delayed creation/initialization for VLAs and array sections. It is // required because runtime does not provide the way to pass the sizes of // VLAs/array sections to initializer/combiner/finalizer functions. Instead // threadprivate global variables are used to store these values and use // them in the functions. bool DelayedCreation = !!SizeVal; SizeValInChars = CGF.Builder.CreateIntCast(SizeValInChars, CGM.SizeTy, /*isSigned=*/false); LValue SizeLVal = CGF.EmitLValueForField(ElemLVal, SizeFD); CGF.EmitStoreOfScalar(SizeValInChars, SizeLVal); // ElemLVal.reduce_init = init; LValue InitLVal = CGF.EmitLValueForField(ElemLVal, InitFD); llvm::Value *InitAddr = CGF.EmitCastToVoidPtr(emitReduceInitFunction(CGM, Loc, RCG, Cnt)); CGF.EmitStoreOfScalar(InitAddr, InitLVal); // ElemLVal.reduce_fini = fini; LValue FiniLVal = CGF.EmitLValueForField(ElemLVal, FiniFD); llvm::Value *Fini = emitReduceFiniFunction(CGM, Loc, RCG, Cnt); llvm::Value *FiniAddr = Fini ? CGF.EmitCastToVoidPtr(Fini) : llvm::ConstantPointerNull::get(CGM.VoidPtrTy); CGF.EmitStoreOfScalar(FiniAddr, FiniLVal); // ElemLVal.reduce_comb = comb; LValue CombLVal = CGF.EmitLValueForField(ElemLVal, CombFD); llvm::Value *CombAddr = CGF.EmitCastToVoidPtr(emitReduceCombFunction( CGM, Loc, RCG, Cnt, Data.ReductionOps[Cnt], LHSExprs[Cnt], RHSExprs[Cnt], Data.ReductionCopies[Cnt])); CGF.EmitStoreOfScalar(CombAddr, CombLVal); // ElemLVal.flags = 0; LValue FlagsLVal = CGF.EmitLValueForField(ElemLVal, FlagsFD); if (DelayedCreation) { CGF.EmitStoreOfScalar( llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1, /*isSigned=*/true), FlagsLVal); } else CGF.EmitNullInitialization(FlagsLVal.getAddress(CGF), FlagsLVal.getType()); } if (Data.IsReductionWithTaskMod) { // Build call void *__kmpc_taskred_modifier_init(ident_t *loc, int gtid, int // is_ws, int num, void *data); llvm::Value *IdentTLoc = emitUpdateLocation(CGF, Loc); llvm::Value *GTid = CGF.Builder.CreateIntCast(getThreadID(CGF, Loc), CGM.IntTy, /*isSigned=*/true); llvm::Value *Args[] = { IdentTLoc, GTid, llvm::ConstantInt::get(CGM.IntTy, Data.IsWorksharingReduction ? 1 : 0, /*isSigned=*/true), llvm::ConstantInt::get(CGM.IntTy, Size, /*isSigned=*/true), CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( TaskRedInput.getPointer(), CGM.VoidPtrTy)}; return CGF.EmitRuntimeCall( OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_taskred_modifier_init), Args); } // Build call void *__kmpc_taskred_init(int gtid, int num_data, void *data); llvm::Value *Args[] = { CGF.Builder.CreateIntCast(getThreadID(CGF, Loc), CGM.IntTy, /*isSigned=*/true), llvm::ConstantInt::get(CGM.IntTy, Size, /*isSigned=*/true), CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TaskRedInput.getPointer(), CGM.VoidPtrTy)}; return CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_taskred_init), Args); } void CGOpenMPRuntime::emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc, bool IsWorksharingReduction) { // Build call void *__kmpc_taskred_modifier_init(ident_t *loc, int gtid, int // is_ws, int num, void *data); llvm::Value *IdentTLoc = emitUpdateLocation(CGF, Loc); llvm::Value *GTid = CGF.Builder.CreateIntCast(getThreadID(CGF, Loc), CGM.IntTy, /*isSigned=*/true); llvm::Value *Args[] = {IdentTLoc, GTid, llvm::ConstantInt::get(CGM.IntTy, IsWorksharingReduction ? 1 : 0, /*isSigned=*/true)}; (void)CGF.EmitRuntimeCall( OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_task_reduction_modifier_fini), Args); } void CGOpenMPRuntime::emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc, ReductionCodeGen &RCG, unsigned N) { auto Sizes = RCG.getSizes(N); // Emit threadprivate global variable if the type is non-constant // (Sizes.second = nullptr). if (Sizes.second) { llvm::Value *SizeVal = CGF.Builder.CreateIntCast(Sizes.second, CGM.SizeTy, /*isSigned=*/false); Address SizeAddr = getAddrOfArtificialThreadPrivate( CGF, CGM.getContext().getSizeType(), generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N))); CGF.Builder.CreateStore(SizeVal, SizeAddr, /*IsVolatile=*/false); } } Address CGOpenMPRuntime::getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *ReductionsPtr, LValue SharedLVal) { // Build call void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void // *d); llvm::Value *Args[] = {CGF.Builder.CreateIntCast(getThreadID(CGF, Loc), CGM.IntTy, /*isSigned=*/true), ReductionsPtr, CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( SharedLVal.getPointer(CGF), CGM.VoidPtrTy)}; return Address( CGF.EmitRuntimeCall( OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_task_reduction_get_th_data), Args), SharedLVal.getAlignment()); } void CGOpenMPRuntime::emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc) { if (!CGF.HaveInsertPoint()) return; if (CGF.CGM.getLangOpts().OpenMPIRBuilder) { OMPBuilder.createTaskwait(CGF.Builder); } else { // Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32 // global_tid); llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)}; // Ignore return result until untied tasks are supported. CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_omp_taskwait), Args); } if (auto *Region = dyn_cast_or_null(CGF.CapturedStmtInfo)) Region->emitUntiedSwitch(CGF); } void CGOpenMPRuntime::emitInlinedDirective(CodeGenFunction &CGF, OpenMPDirectiveKind InnerKind, const RegionCodeGenTy &CodeGen, bool HasCancel) { if (!CGF.HaveInsertPoint()) return; InlinedOpenMPRegionRAII Region(CGF, CodeGen, InnerKind, HasCancel, InnerKind != OMPD_critical && InnerKind != OMPD_master && InnerKind != OMPD_masked); CGF.CapturedStmtInfo->EmitBody(CGF, /*S=*/nullptr); } namespace { enum RTCancelKind { CancelNoreq = 0, CancelParallel = 1, CancelLoop = 2, CancelSections = 3, CancelTaskgroup = 4 }; } // anonymous namespace static RTCancelKind getCancellationKind(OpenMPDirectiveKind CancelRegion) { RTCancelKind CancelKind = CancelNoreq; if (CancelRegion == OMPD_parallel) CancelKind = CancelParallel; else if (CancelRegion == OMPD_for) CancelKind = CancelLoop; else if (CancelRegion == OMPD_sections) CancelKind = CancelSections; else { assert(CancelRegion == OMPD_taskgroup); CancelKind = CancelTaskgroup; } return CancelKind; } void CGOpenMPRuntime::emitCancellationPointCall( CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind CancelRegion) { if (!CGF.HaveInsertPoint()) return; // Build call kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32 // global_tid, kmp_int32 cncl_kind); if (auto *OMPRegionInfo = dyn_cast_or_null(CGF.CapturedStmtInfo)) { // For 'cancellation point taskgroup', the task region info may not have a // cancel. This may instead happen in another adjacent task. if (CancelRegion == OMPD_taskgroup || OMPRegionInfo->hasCancel()) { llvm::Value *Args[] = { emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc), CGF.Builder.getInt32(getCancellationKind(CancelRegion))}; // Ignore return result until untied tasks are supported. llvm::Value *Result = CGF.EmitRuntimeCall( OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_cancellationpoint), Args); // if (__kmpc_cancellationpoint()) { // call i32 @__kmpc_cancel_barrier( // for parallel cancellation only // exit from construct; // } llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit"); llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue"); llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result); CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB); CGF.EmitBlock(ExitBB); if (CancelRegion == OMPD_parallel) emitBarrierCall(CGF, Loc, OMPD_unknown, /*EmitChecks=*/false); // exit from construct; CodeGenFunction::JumpDest CancelDest = CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind()); CGF.EmitBranchThroughCleanup(CancelDest); CGF.EmitBlock(ContBB, /*IsFinished=*/true); } } } void CGOpenMPRuntime::emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc, const Expr *IfCond, OpenMPDirectiveKind CancelRegion) { if (!CGF.HaveInsertPoint()) return; // Build call kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid, // kmp_int32 cncl_kind); auto &M = CGM.getModule(); if (auto *OMPRegionInfo = dyn_cast_or_null(CGF.CapturedStmtInfo)) { auto &&ThenGen = [this, &M, Loc, CancelRegion, OMPRegionInfo](CodeGenFunction &CGF, PrePostActionTy &) { CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime(); llvm::Value *Args[] = { RT.emitUpdateLocation(CGF, Loc), RT.getThreadID(CGF, Loc), CGF.Builder.getInt32(getCancellationKind(CancelRegion))}; // Ignore return result until untied tasks are supported. llvm::Value *Result = CGF.EmitRuntimeCall( OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_cancel), Args); // if (__kmpc_cancel()) { // call i32 @__kmpc_cancel_barrier( // for parallel cancellation only // exit from construct; // } llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit"); llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue"); llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result); CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB); CGF.EmitBlock(ExitBB); if (CancelRegion == OMPD_parallel) RT.emitBarrierCall(CGF, Loc, OMPD_unknown, /*EmitChecks=*/false); // exit from construct; CodeGenFunction::JumpDest CancelDest = CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind()); CGF.EmitBranchThroughCleanup(CancelDest); CGF.EmitBlock(ContBB, /*IsFinished=*/true); }; if (IfCond) { emitIfClause(CGF, IfCond, ThenGen, [](CodeGenFunction &, PrePostActionTy &) {}); } else { RegionCodeGenTy ThenRCG(ThenGen); ThenRCG(CGF); } } } namespace { /// Cleanup action for uses_allocators support. class OMPUsesAllocatorsActionTy final : public PrePostActionTy { ArrayRef> Allocators; public: OMPUsesAllocatorsActionTy( ArrayRef> Allocators) : Allocators(Allocators) {} void Enter(CodeGenFunction &CGF) override { if (!CGF.HaveInsertPoint()) return; for (const auto &AllocatorData : Allocators) { CGF.CGM.getOpenMPRuntime().emitUsesAllocatorsInit( CGF, AllocatorData.first, AllocatorData.second); } } void Exit(CodeGenFunction &CGF) override { if (!CGF.HaveInsertPoint()) return; for (const auto &AllocatorData : Allocators) { CGF.CGM.getOpenMPRuntime().emitUsesAllocatorsFini(CGF, AllocatorData.first); } } }; } // namespace void CGOpenMPRuntime::emitTargetOutlinedFunction( const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) { assert(!ParentName.empty() && "Invalid target region parent name!"); HasEmittedTargetRegion = true; SmallVector, 4> Allocators; for (const auto *C : D.getClausesOfKind()) { for (unsigned I = 0, E = C->getNumberOfAllocators(); I < E; ++I) { const OMPUsesAllocatorsClause::Data D = C->getAllocatorData(I); if (!D.AllocatorTraits) continue; Allocators.emplace_back(D.Allocator, D.AllocatorTraits); } } OMPUsesAllocatorsActionTy UsesAllocatorAction(Allocators); CodeGen.setAction(UsesAllocatorAction); emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry, CodeGen); } void CGOpenMPRuntime::emitUsesAllocatorsInit(CodeGenFunction &CGF, const Expr *Allocator, const Expr *AllocatorTraits) { llvm::Value *ThreadId = getThreadID(CGF, Allocator->getExprLoc()); ThreadId = CGF.Builder.CreateIntCast(ThreadId, CGF.IntTy, /*isSigned=*/true); // Use default memspace handle. llvm::Value *MemSpaceHandle = llvm::ConstantPointerNull::get(CGF.VoidPtrTy); llvm::Value *NumTraits = llvm::ConstantInt::get( CGF.IntTy, cast( AllocatorTraits->getType()->getAsArrayTypeUnsafe()) ->getSize() .getLimitedValue()); LValue AllocatorTraitsLVal = CGF.EmitLValue(AllocatorTraits); Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( AllocatorTraitsLVal.getAddress(CGF), CGF.VoidPtrPtrTy); AllocatorTraitsLVal = CGF.MakeAddrLValue(Addr, CGF.getContext().VoidPtrTy, AllocatorTraitsLVal.getBaseInfo(), AllocatorTraitsLVal.getTBAAInfo()); llvm::Value *Traits = CGF.EmitLoadOfScalar(AllocatorTraitsLVal, AllocatorTraits->getExprLoc()); llvm::Value *AllocatorVal = CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_init_allocator), {ThreadId, MemSpaceHandle, NumTraits, Traits}); // Store to allocator. CGF.EmitVarDecl(*cast( cast(Allocator->IgnoreParenImpCasts())->getDecl())); LValue AllocatorLVal = CGF.EmitLValue(Allocator->IgnoreParenImpCasts()); AllocatorVal = CGF.EmitScalarConversion(AllocatorVal, CGF.getContext().VoidPtrTy, Allocator->getType(), Allocator->getExprLoc()); CGF.EmitStoreOfScalar(AllocatorVal, AllocatorLVal); } void CGOpenMPRuntime::emitUsesAllocatorsFini(CodeGenFunction &CGF, const Expr *Allocator) { llvm::Value *ThreadId = getThreadID(CGF, Allocator->getExprLoc()); ThreadId = CGF.Builder.CreateIntCast(ThreadId, CGF.IntTy, /*isSigned=*/true); LValue AllocatorLVal = CGF.EmitLValue(Allocator->IgnoreParenImpCasts()); llvm::Value *AllocatorVal = CGF.EmitLoadOfScalar(AllocatorLVal, Allocator->getExprLoc()); AllocatorVal = CGF.EmitScalarConversion(AllocatorVal, Allocator->getType(), CGF.getContext().VoidPtrTy, Allocator->getExprLoc()); (void)CGF.EmitRuntimeCall( OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), OMPRTL___kmpc_destroy_allocator), {ThreadId, AllocatorVal}); } void CGOpenMPRuntime::emitTargetOutlinedFunctionHelper( const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) { // Create a unique name for the entry function using the source location // information of the current target region. The name will be something like: // // __omp_offloading_DD_FFFF_PP_lBB // // where DD_FFFF is an ID unique to the file (device and file IDs), PP is the // mangled name of the function that encloses the target region and BB is the // line number of the target region. unsigned DeviceID; unsigned FileID; unsigned Line; getTargetEntryUniqueInfo(CGM.getContext(), D.getBeginLoc(), DeviceID, FileID, Line); SmallString<64> EntryFnName; { llvm::raw_svector_ostream OS(EntryFnName); OS << "__omp_offloading" << llvm::format("_%x", DeviceID) << llvm::format("_%x_", FileID) << ParentName << "_l" << Line; } const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target); CodeGenFunction CGF(CGM, true); CGOpenMPTargetRegionInfo CGInfo(CS, CodeGen, EntryFnName); CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo); OutlinedFn = CGF.GenerateOpenMPCapturedStmtFunction(CS, D.getBeginLoc()); // If this target outline function is not an offload entry, we don't need to // register it. if (!IsOffloadEntry) return; // The target region ID is used by the runtime library to identify the current // target region, so it only has to be unique and not necessarily point to // anything. It could be the pointer to the outlined function that implements // the target region, but we aren't using that so that the compiler doesn't // need to keep that, and could therefore inline the host function if proven // worthwhile during optimization. In the other hand, if emitting code for the // device, the ID has to be the function address so that it can retrieved from // the offloading entry and launched by the runtime library. We also mark the // outlined function to have external linkage in case we are emitting code for // the device, because these functions will be entry points to the device. if (CGM.getLangOpts().OpenMPIsDevice) { OutlinedFnID = llvm::ConstantExpr::getBitCast(OutlinedFn, CGM.Int8PtrTy); OutlinedFn->setLinkage(llvm::GlobalValue::WeakAnyLinkage); OutlinedFn->setDSOLocal(false); if (CGM.getTriple().isAMDGCN()) OutlinedFn->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL); } else { std::string Name = getName({EntryFnName, "region_id"}); OutlinedFnID = new llvm::GlobalVariable( CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true, llvm::GlobalValue::WeakAnyLinkage, llvm::Constant::getNullValue(CGM.Int8Ty), Name); } // Register the information for the entry associated with this target region. OffloadEntriesInfoManager.registerTargetRegionEntryInfo( DeviceID, FileID, ParentName, Line, OutlinedFn, OutlinedFnID, OffloadEntriesInfoManagerTy::OMPTargetRegionEntryTargetRegion); // Add NumTeams and ThreadLimit attributes to the outlined GPU function int32_t DefaultValTeams = -1; getNumTeamsExprForTargetDirective(CGF, D, DefaultValTeams); if (DefaultValTeams > 0) { OutlinedFn->addFnAttr("omp_target_num_teams", std::to_string(DefaultValTeams)); } int32_t DefaultValThreads = -1; getNumThreadsExprForTargetDirective(CGF, D, DefaultValThreads); if (DefaultValThreads > 0) { OutlinedFn->addFnAttr("omp_target_thread_limit", std::to_string(DefaultValThreads)); } } /// Checks if the expression is constant or does not have non-trivial function /// calls. static bool isTrivial(ASTContext &Ctx, const Expr * E) { // We can skip constant expressions. // We can skip expressions with trivial calls or simple expressions. return (E->isEvaluatable(Ctx, Expr::SE_AllowUndefinedBehavior) || !E->hasNonTrivialCall(Ctx)) && !E->HasSideEffects(Ctx, /*IncludePossibleEffects=*/true); } const Stmt *CGOpenMPRuntime::getSingleCompoundChild(ASTContext &Ctx, const Stmt *Body) { const Stmt *Child = Body->IgnoreContainers(); while (const auto *C = dyn_cast_or_null(Child)) { Child = nullptr; for (const Stmt *S : C->body()) { if (const auto *E = dyn_cast(S)) { if (isTrivial(Ctx, E)) continue; } // Some of the statements can be ignored. if (isa(S) || isa(S) || isa(S) || isa(S) || isa(S)) continue; // Analyze declarations. if (const auto *DS = dyn_cast(S)) { if (llvm::all_of(DS->decls(), [](const Decl *D) { if (isa(D) || isa(D) || isa(D) || isa(D) || isa(D) || isa(D) || isa(D) || isa(D) || isa(D) || isa(D)) return true; const auto *VD = dyn_cast(D); if (!VD) return false; return VD->hasGlobalStorage() || !VD->isUsed(); })) continue; } // Found multiple children - cannot get the one child only. if (Child) return nullptr; Child = S; } if (Child) Child = Child->IgnoreContainers(); } return Child; } const Expr *CGOpenMPRuntime::getNumTeamsExprForTargetDirective( CodeGenFunction &CGF, const OMPExecutableDirective &D, int32_t &DefaultVal) { OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind(); assert(isOpenMPTargetExecutionDirective(DirectiveKind) && "Expected target-based executable directive."); switch (DirectiveKind) { case OMPD_target: { const auto *CS = D.getInnermostCapturedStmt(); const auto *Body = CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true); const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(CGF.getContext(), Body); if (const auto *NestedDir = dyn_cast_or_null(ChildStmt)) { if (isOpenMPTeamsDirective(NestedDir->getDirectiveKind())) { if (NestedDir->hasClausesOfKind()) { const Expr *NumTeams = NestedDir->getSingleClause()->getNumTeams(); if (NumTeams->isIntegerConstantExpr(CGF.getContext())) if (auto Constant = NumTeams->getIntegerConstantExpr(CGF.getContext())) DefaultVal = Constant->getExtValue(); return NumTeams; } DefaultVal = 0; return nullptr; } if (isOpenMPParallelDirective(NestedDir->getDirectiveKind()) || isOpenMPSimdDirective(NestedDir->getDirectiveKind())) { DefaultVal = 1; return nullptr; } DefaultVal = 1; return nullptr; } // A value of -1 is used to check if we need to emit no teams region DefaultVal = -1; return nullptr; } case OMPD_target_teams: case OMPD_target_teams_distribute: case OMPD_target_teams_distribute_simd: case OMPD_target_teams_distribute_parallel_for: case OMPD_target_teams_distribute_parallel_for_simd: { if (D.hasClausesOfKind()) { const Expr *NumTeams = D.getSingleClause()->getNumTeams(); if (NumTeams->isIntegerConstantExpr(CGF.getContext())) if (auto Constant = NumTeams->getIntegerConstantExpr(CGF.getContext())) DefaultVal = Constant->getExtValue(); return NumTeams; } DefaultVal = 0; return nullptr; } case OMPD_target_parallel: case OMPD_target_parallel_for: case OMPD_target_parallel_for_simd: case OMPD_target_simd: DefaultVal = 1; return nullptr; case OMPD_parallel: case OMPD_for: case OMPD_parallel_for: case OMPD_parallel_master: case OMPD_parallel_sections: case OMPD_for_simd: case OMPD_parallel_for_simd: case OMPD_cancel: case OMPD_cancellation_point: case OMPD_ordered: case OMPD_threadprivate: case OMPD_allocate: case OMPD_task: case OMPD_simd: case OMPD_tile: case OMPD_unroll: case OMPD_sections: case OMPD_section: case OMPD_single: case OMPD_master: case OMPD_critical: case OMPD_taskyield: case OMPD_barrier: case OMPD_taskwait: case OMPD_taskgroup: case OMPD_atomic: case OMPD_flush: case OMPD_depobj: case OMPD_scan: case OMPD_teams: case OMPD_target_data: case OMPD_target_exit_data: case OMPD_target_enter_data: case OMPD_distribute: case OMPD_distribute_simd: case OMPD_distribute_parallel_for: case OMPD_distribute_parallel_for_simd: case OMPD_teams_distribute: case OMPD_teams_distribute_simd: case OMPD_teams_distribute_parallel_for: case OMPD_teams_distribute_parallel_for_simd: case OMPD_target_update: case OMPD_declare_simd: case OMPD_declare_variant: case OMPD_begin_declare_variant: case OMPD_end_declare_variant: case OMPD_declare_target: case OMPD_end_declare_target: case OMPD_declare_reduction: case OMPD_declare_mapper: case OMPD_taskloop: case OMPD_taskloop_simd: case OMPD_master_taskloop: case OMPD_master_taskloop_simd: case OMPD_parallel_master_taskloop: case OMPD_parallel_master_taskloop_simd: case OMPD_requires: case OMPD_unknown: break; default: break; } llvm_unreachable("Unexpected directive kind."); } llvm::Value *CGOpenMPRuntime::emitNumTeamsForTargetDirective( CodeGenFunction &CGF, const OMPExecutableDirective &D) { assert(!CGF.getLangOpts().OpenMPIsDevice && "Clauses associated with the teams directive expected to be emitted " "only for the host!"); CGBuilderTy &Bld = CGF.Builder; int32_t DefaultNT = -1; const Expr *NumTeams = getNumTeamsExprForTargetDirective(CGF, D, DefaultNT); if (NumTeams != nullptr) { OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind(); switch (DirectiveKind) { case OMPD_target: { const auto *CS = D.getInnermostCapturedStmt(); CGOpenMPInnerExprInfo CGInfo(CGF, *CS); CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo); llvm::Value *NumTeamsVal = CGF.EmitScalarExpr(NumTeams, /*IgnoreResultAssign*/ true); return Bld.CreateIntCast(NumTeamsVal, CGF.Int32Ty, /*isSigned=*/true); } case OMPD_target_teams: case OMPD_target_teams_distribute: case OMPD_target_teams_distribute_simd: case OMPD_target_teams_distribute_parallel_for: case OMPD_target_teams_distribute_parallel_for_simd: { CodeGenFunction::RunCleanupsScope NumTeamsScope(CGF); llvm::Value *NumTeamsVal = CGF.EmitScalarExpr(NumTeams, /*IgnoreResultAssign*/ true); return Bld.CreateIntCast(NumTeamsVal, CGF.Int32Ty, /*isSigned=*/true); } default: break; } } else if (DefaultNT == -1) { return nullptr; } return Bld.getInt32(DefaultNT); } static llvm::Value *getNumThreads(CodeGenFunction &CGF, const CapturedStmt *CS, llvm::Value *DefaultThreadLimitVal) { const Stmt *Child = CGOpenMPRuntime::getSingleCompoundChild( CGF.getContext(), CS->getCapturedStmt()); if (const auto *Dir = dyn_cast_or_null(Child)) { if (isOpenMPParallelDirective(Dir->getDirectiveKind())) { llvm::Value *NumThreads = nullptr; llvm::Value *CondVal = nullptr; // Handle if clause. If if clause present, the number of threads is // calculated as ? ( ? : 0 ) : 1. if (Dir->hasClausesOfKind()) { CGOpenMPInnerExprInfo CGInfo(CGF, *CS); CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo); const OMPIfClause *IfClause = nullptr; for (const auto *C : Dir->getClausesOfKind()) { if (C->getNameModifier() == OMPD_unknown || C->getNameModifier() == OMPD_parallel) { IfClause = C; break; } } if (IfClause) { const Expr *Cond = IfClause->getCondition(); bool Result; if (Cond->EvaluateAsBooleanCondition(Result, CGF.getContext())) { if (!Result) return CGF.Builder.getInt32(1); } else { CodeGenFunction::LexicalScope Scope(CGF, Cond->getSourceRange()); if (const auto *PreInit = cast_or_null(IfClause->getPreInitStmt())) { for (const auto *I : PreInit->decls()) { if (!I->hasAttr()) { CGF.EmitVarDecl(cast(*I)); } else { CodeGenFunction::AutoVarEmission Emission = CGF.EmitAutoVarAlloca(cast(*I)); CGF.EmitAutoVarCleanups(Emission); } } } CondVal = CGF.EvaluateExprAsBool(Cond); } } } // Check the value of num_threads clause iff if clause was not specified // or is not evaluated to false. if (Dir->hasClausesOfKind()) { CGOpenMPInnerExprInfo CGInfo(CGF, *CS); CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo); const auto *NumThreadsClause = Dir->getSingleClause(); CodeGenFunction::LexicalScope Scope( CGF, NumThreadsClause->getNumThreads()->getSourceRange()); if (const auto *PreInit = cast_or_null(NumThreadsClause->getPreInitStmt())) { for (const auto *I : PreInit->decls()) { if (!I->hasAttr()) { CGF.EmitVarDecl(cast(*I)); } else { CodeGenFunction::AutoVarEmission Emission = CGF.EmitAutoVarAlloca(cast(*I)); CGF.EmitAutoVarCleanups(Emission); } } } NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads()); NumThreads = CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty, /*isSigned=*/false); if (DefaultThreadLimitVal) NumThreads = CGF.Builder.CreateSelect( CGF.Builder.CreateICmpULT(DefaultThreadLimitVal, NumThreads), DefaultThreadLimitVal, NumThreads); } else { NumThreads = DefaultThreadLimitVal ? DefaultThreadLimitVal : CGF.Builder.getInt32(0); } // Process condition of the if clause. if (CondVal) { NumThreads = CGF.Builder.CreateSelect(CondVal, NumThreads, CGF.Builder.getInt32(1)); } return NumThreads; } if (isOpenMPSimdDirective(Dir->getDirectiveKind())) return CGF.Builder.getInt32(1); return DefaultThreadLimitVal; } return DefaultThreadLimitVal ? DefaultThreadLimitVal : CGF.Builder.getInt32(0); } const Expr *CGOpenMPRuntime::getNumThreadsExprForTargetDirective( CodeGenFunction &CGF, const OMPExecutableDirective &D, int32_t &DefaultVal) { OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind(); assert(isOpenMPTargetExecutionDirective(DirectiveKind) && "Expected target-based executable directive."); switch (DirectiveKind) { case OMPD_target: // Teams have no clause thread_limit return nullptr; case OMPD_target_teams: case OMPD_target_teams_distribute: if (D.hasClausesOfKind()) { const auto *ThreadLimitClause = D.getSingleClause(); const Expr *ThreadLimit = ThreadLimitClause->getThreadLimit(); if (ThreadLimit->isIntegerConstantExpr(CGF.getContext())) if (auto Constant = ThreadLimit->getIntegerConstantExpr(CGF.getContext())) DefaultVal = Constant->getExtValue(); return ThreadLimit; } return nullptr; case OMPD_target_parallel: case OMPD_target_parallel_for: case OMPD_target_parallel_for_simd: case OMPD_target_teams_distribute_parallel_for: case OMPD_target_teams_distribute_parallel_for_simd: { Expr *ThreadLimit = nullptr; Expr *NumThreads = nullptr; if (D.hasClausesOfKind()) { const auto *ThreadLimitClause = D.getSingleClause(); ThreadLimit = ThreadLimitClause->getThreadLimit(); if (ThreadLimit->isIntegerConstantExpr(CGF.getContext())) if (auto Constant = ThreadLimit->getIntegerConstantExpr(CGF.getContext())) DefaultVal = Constant->getExtValue(); } if (D.hasClausesOfKind()) { const auto *NumThreadsClause = D.getSingleClause(); NumThreads = NumThreadsClause->getNumThreads(); if (NumThreads->isIntegerConstantExpr(CGF.getContext())) { if (auto Constant = NumThreads->getIntegerConstantExpr(CGF.getContext())) { if (Constant->getExtValue() < DefaultVal) { DefaultVal = Constant->getExtValue(); ThreadLimit = NumThreads; } } } } return ThreadLimit; } case OMPD_target_teams_distribute_simd: case OMPD_target_simd: DefaultVal = 1; return nullptr; case OMPD_parallel: case OMPD_for: case OMPD_parallel_for: case OMPD_parallel_master: case OMPD_parallel_sections: case OMPD_for_simd: case OMPD_parallel_for_simd: case OMPD_cancel: case OMPD_cancellation_point: case OMPD_ordered: case OMPD_threadprivate: case OMPD_allocate: case OMPD_task: case OMPD_simd: case OMPD_tile: case OMPD_unroll: case OMPD_sections: case OMPD_section: case OMPD_single: case OMPD_master: case OMPD_critical: case OMPD_taskyield: case OMPD_barrier: case OMPD_taskwait: case OMPD_taskgroup: case OMPD_atomic: case OMPD_flush: case OMPD_depobj: case OMPD_scan: case OMPD_teams: case OMPD_target_data: case OMPD_target_exit_data: case OMPD_target_enter_data: case OMPD_distribute: case OMPD_distribute_simd: case OMPD_distribute_parallel_for: case OMPD_distribute_parallel_for_simd: case OMPD_teams_distribute: case OMPD_teams_distribute_simd: case OMPD_teams_distribute_parallel_for: case OMPD_teams_distribute_parallel_for_simd: case OMPD_target_update: case OMPD_declare_simd: case OMPD_declare_variant: case OMPD_begin_declare_variant: case OMPD_end_declare_variant: case OMPD_declare_target: case OMPD_end_declare_target: case OMPD_declare_reduction: case OMPD_declare_mapper: case OMPD_taskloop: case OMPD_taskloop_simd: case OMPD_master_taskloop: case OMPD_master_taskloop_simd: case OMPD_parallel_master_taskloop: case OMPD_parallel_master_taskloop_simd: case OMPD_requires: case OMPD_unknown: break; default: break; } llvm_unreachable("Unsupported directive kind."); } llvm::Value *CGOpenMPRuntime::emitNumThreadsForTargetDirective( CodeGenFunction &CGF, const OMPExecutableDirective &D) { assert(!CGF.getLangOpts().OpenMPIsDevice && "Clauses associated with the teams directive expected to be emitted " "only for the host!"); OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind(); assert(isOpenMPTargetExecutionDirective(DirectiveKind) && "Expected target-based executable directive."); CGBuilderTy &Bld = CGF.Builder; llvm::Value *ThreadLimitVal = nullptr; llvm::Value *NumThreadsVal = nullptr; switch (DirectiveKind) { case OMPD_target: { const CapturedStmt *CS = D.getInnermostCapturedStmt(); if (llvm::Value *NumThreads = getNumThreads(CGF, CS, ThreadLimitVal)) return NumThreads; const Stmt *Child = CGOpenMPRuntime::getSingleCompoundChild( CGF.getContext(), CS->getCapturedStmt()); if (const auto *Dir = dyn_cast_or_null(Child)) { if (Dir->hasClausesOfKind()) { CGOpenMPInnerExprInfo CGInfo(CGF, *CS); CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo); const auto *ThreadLimitClause = Dir->getSingleClause(); CodeGenFunction::LexicalScope Scope( CGF, ThreadLimitClause->getThreadLimit()->getSourceRange()); if (const auto *PreInit = cast_or_null(ThreadLimitClause->getPreInitStmt())) { for (const auto *I : PreInit->decls()) { if (!I->hasAttr()) { CGF.EmitVarDecl(cast(*I)); } else { CodeGenFunction::AutoVarEmission Emission = CGF.EmitAutoVarAlloca(cast(*I)); CGF.EmitAutoVarCleanups(Emission); } } } llvm::Value *ThreadLimit = CGF.EmitScalarExpr( ThreadLimitClause->getThreadLimit(), /*IgnoreResultAssign=*/true); ThreadLimitVal = Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty, /*isSigned=*/false); } if (isOpenMPTeamsDirective(Dir->getDirectiveKind()) && !isOpenMPDistributeDirective(Dir->getDirectiveKind())) { CS = Dir->getInnermostCapturedStmt(); const Stmt *Child = CGOpenMPRuntime::getSingleCompoundChild( CGF.getContext(), CS->getCapturedStmt()); Dir = dyn_cast_or_null(Child); } if (Dir && isOpenMPDistributeDirective(Dir->getDirectiveKind()) && !isOpenMPSimdDirective(Dir->getDirectiveKind())) { CS = Dir->getInnermostCapturedStmt(); if (llvm::Value *NumThreads = getNumThreads(CGF, CS, ThreadLimitVal)) return NumThreads; } if (Dir && isOpenMPSimdDirective(Dir->getDirectiveKind())) return Bld.getInt32(1); } return ThreadLimitVal ? ThreadLimitVal : Bld.getInt32(0); } case OMPD_target_teams: { if (D.hasClausesOfKind()) { CodeGenFunction::RunCleanupsScope ThreadLimitScope(CGF); const auto *ThreadLimitClause = D.getSingleClause(); llvm::Value *ThreadLimit = CGF.EmitScalarExpr( ThreadLimitClause->getThreadLimit(), /*IgnoreResultAssign=*/true); ThreadLimitVal = Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty, /*isSigned=*/false); } const CapturedStmt *CS = D.getInnermostCapturedStmt(); if (llvm::Value *NumThreads = getNumThreads(CGF, CS, ThreadLimitVal)) return NumThreads; const Stmt *Child = CGOpenMPRuntime::getSingleCompoundChild( CGF.getContext(), CS->getCapturedStmt()); if (const auto *Dir = dyn_cast_or_null(Child)) { if (Dir->getDirectiveKind() == OMPD_distribute) { CS = Dir->getInnermostCapturedStmt(); if (llvm::Value *NumThreads = getNumThreads(CGF, CS, ThreadLimitVal)) return NumThreads; } } return ThreadLimitVal ? ThreadLimitVal : Bld.getInt32(0); } case OMPD_target_teams_distribute: if (D.hasClausesOfKind()) { CodeGenFunction::RunCleanupsScope ThreadLimitScope(CGF); const auto *ThreadLimitClause = D.getSingleClause(); llvm::Value *ThreadLimit = CGF.EmitScalarExpr( ThreadLimitClause->getThreadLimit(), /*IgnoreResultAssign=*/true); ThreadLimitVal = Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty, /*isSigned=*/false); } return getNumThreads(CGF, D.getInnermostCapturedStmt(), ThreadLimitVal); case OMPD_target_parallel: case OMPD_target_parallel_for: case OMPD_target_parallel_for_simd: case OMPD_target_teams_distribute_parallel_for: case OMPD_target_teams_distribute_parallel_for_simd: { llvm::Value *CondVal = nullptr; // Handle if clause. If if clause present, the number of threads is // calculated as ? ( ? : 0 ) : 1. if (D.hasClausesOfKind()) { const OMPIfClause *IfClause = nullptr; for (const auto *C : D.getClausesOfKind()) { if (C->getNameModifier() == OMPD_unknown || C->getNameModifier() == OMPD_parallel) { IfClause = C; break; } } if (IfClause) { const Expr *Cond = IfClause->getCondition(); bool Result; if (Cond->EvaluateAsBooleanCondition(Result, CGF.getContext())) { if (!Result) return Bld.getInt32(1); } else { CodeGenFunction::RunCleanupsScope Scope(CGF); CondVal = CGF.EvaluateExprAsBool(Cond); } } } if (D.hasClausesOfKind()) { CodeGenFunction::RunCleanupsScope ThreadLimitScope(CGF); const auto *ThreadLimitClause = D.getSingleClause(); llvm::Value *ThreadLimit = CGF.EmitScalarExpr( ThreadLimitClause->getThreadLimit(), /*IgnoreResultAssign=*/true); ThreadLimitVal = Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty, /*isSigned=*/false); } if (D.hasClausesOfKind()) { CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF); const auto *NumThreadsClause = D.getSingleClause(); llvm::Value *NumThreads = CGF.EmitScalarExpr( NumThreadsClause->getNumThreads(), /*IgnoreResultAssign=*/true); NumThreadsVal = Bld.CreateIntCast(NumThreads, CGF.Int32Ty, /*isSigned=*/false); ThreadLimitVal = ThreadLimitVal ? Bld.CreateSelect(Bld.CreateICmpULT(NumThreadsVal, ThreadLimitVal), NumThreadsVal, ThreadLimitVal) : NumThreadsVal; } if (!ThreadLimitVal) ThreadLimitVal = Bld.getInt32(0); if (CondVal) return Bld.CreateSelect(CondVal, ThreadLimitVal, Bld.getInt32(1)); return ThreadLimitVal; } case OMPD_target_teams_distribute_simd: case OMPD_target_simd: return Bld.getInt32(1); case OMPD_parallel: case OMPD_for: case OMPD_parallel_for: case OMPD_parallel_master: case OMPD_parallel_sections: case OMPD_for_simd: case OMPD_parallel_for_simd: case OMPD_cancel: case OMPD_cancellation_point: case OMPD_ordered: case OMPD_threadprivate: case OMPD_allocate: case OMPD_task: case OMPD_simd: case OMPD_tile: case OMPD_unroll: case OMPD_sections: case OMPD_section: case OMPD_single: case OMPD_master: case OMPD_critical: case OMPD_taskyield: case OMPD_barrier: case OMPD_taskwait: case OMPD_taskgroup: case OMPD_atomic: case OMPD_flush: case OMPD_depobj: case OMPD_scan: case OMPD_teams: case OMPD_target_data: case OMPD_target_exit_data: case OMPD_target_enter_data: case OMPD_distribute: case OMPD_distribute_simd: case OMPD_distribute_parallel_for: case OMPD_distribute_parallel_for_simd: case OMPD_teams_distribute: case OMPD_teams_distribute_simd: case OMPD_teams_distribute_parallel_for: case OMPD_teams_distribute_parallel_for_simd: case OMPD_target_update: case OMPD_declare_simd: case OMPD_declare_variant: case OMPD_begin_declare_variant: case OMPD_end_declare_variant: case OMPD_declare_target: case OMPD_end_declare_target: case OMPD_declare_reduction: case OMPD_declare_mapper: case OMPD_taskloop: case OMPD_taskloop_simd: case OMPD_master_taskloop: case OMPD_master_taskloop_simd: case OMPD_parallel_master_taskloop: case OMPD_parallel_master_taskloop_simd: case OMPD_requires: case OMPD_unknown: break; default: break; } llvm_unreachable("Unsupported directive kind."); } namespace { LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE(); // Utility to handle information from clauses associated with a given // construct that use mappable expressions (e.g. 'map' clause, 'to' clause). // It provides a convenient interface to obtain the information and generate // code for that information. class MappableExprsHandler { public: /// Values for bit flags used to specify the mapping type for /// offloading. enum OpenMPOffloadMappingFlags : uint64_t { /// No flags OMP_MAP_NONE = 0x0, /// Allocate memory on the device and move data from host to device. OMP_MAP_TO = 0x01, /// Allocate memory on the device and move data from device to host. OMP_MAP_FROM = 0x02, /// Always perform the requested mapping action on the element, even /// if it was already mapped before. OMP_MAP_ALWAYS = 0x04, /// Delete the element from the device environment, ignoring the /// current reference count associated with the element. OMP_MAP_DELETE = 0x08, /// The element being mapped is a pointer-pointee pair; both the /// pointer and the pointee should be mapped. OMP_MAP_PTR_AND_OBJ = 0x10, /// This flags signals that the base address of an entry should be /// passed to the target kernel as an argument. OMP_MAP_TARGET_PARAM = 0x20, /// Signal that the runtime library has to return the device pointer /// in the current position for the data being mapped. Used when we have the /// use_device_ptr or use_device_addr clause. OMP_MAP_RETURN_PARAM = 0x40, /// This flag signals that the reference being passed is a pointer to /// private data. OMP_MAP_PRIVATE = 0x80, /// Pass the element to the device by value. OMP_MAP_LITERAL = 0x100, /// Implicit map OMP_MAP_IMPLICIT = 0x200, /// Close is a hint to the runtime to allocate memory close to /// the target device. OMP_MAP_CLOSE = 0x400, /// 0x800 is reserved for compatibility with XLC. /// Produce a runtime error if the data is not already allocated. OMP_MAP_PRESENT = 0x1000, /// Signal that the runtime library should use args as an array of /// descriptor_dim pointers and use args_size as dims. Used when we have /// non-contiguous list items in target update directive OMP_MAP_NON_CONTIG = 0x100000000000, /// The 16 MSBs of the flags indicate whether the entry is member of some /// struct/class. OMP_MAP_MEMBER_OF = 0xffff000000000000, LLVM_MARK_AS_BITMASK_ENUM(/* LargestFlag = */ OMP_MAP_MEMBER_OF), }; /// Get the offset of the OMP_MAP_MEMBER_OF field. static unsigned getFlagMemberOffset() { unsigned Offset = 0; for (uint64_t Remain = OMP_MAP_MEMBER_OF; !(Remain & 1); Remain = Remain >> 1) Offset++; return Offset; } /// Class that holds debugging information for a data mapping to be passed to /// the runtime library. class MappingExprInfo { /// The variable declaration used for the data mapping. const ValueDecl *MapDecl = nullptr; /// The original expression used in the map clause, or null if there is /// none. const Expr *MapExpr = nullptr; public: MappingExprInfo(const ValueDecl *MapDecl, const Expr *MapExpr = nullptr) : MapDecl(MapDecl), MapExpr(MapExpr) {} const ValueDecl *getMapDecl() const { return MapDecl; } const Expr *getMapExpr() const { return MapExpr; } }; /// Class that associates information with a base pointer to be passed to the /// runtime library. class BasePointerInfo { /// The base pointer. llvm::Value *Ptr = nullptr; /// The base declaration that refers to this device pointer, or null if /// there is none. const ValueDecl *DevPtrDecl = nullptr; public: BasePointerInfo(llvm::Value *Ptr, const ValueDecl *DevPtrDecl = nullptr) : Ptr(Ptr), DevPtrDecl(DevPtrDecl) {} llvm::Value *operator*() const { return Ptr; } const ValueDecl *getDevicePtrDecl() const { return DevPtrDecl; } void setDevicePtrDecl(const ValueDecl *D) { DevPtrDecl = D; } }; using MapExprsArrayTy = SmallVector; using MapBaseValuesArrayTy = SmallVector; using MapValuesArrayTy = SmallVector; using MapFlagsArrayTy = SmallVector; using MapMappersArrayTy = SmallVector; using MapDimArrayTy = SmallVector; using MapNonContiguousArrayTy = SmallVector; /// This structure contains combined information generated for mappable /// clauses, including base pointers, pointers, sizes, map types, user-defined /// mappers, and non-contiguous information. struct MapCombinedInfoTy { struct StructNonContiguousInfo { bool IsNonContiguous = false; MapDimArrayTy Dims; MapNonContiguousArrayTy Offsets; MapNonContiguousArrayTy Counts; MapNonContiguousArrayTy Strides; }; MapExprsArrayTy Exprs; MapBaseValuesArrayTy BasePointers; MapValuesArrayTy Pointers; MapValuesArrayTy Sizes; MapFlagsArrayTy Types; MapMappersArrayTy Mappers; StructNonContiguousInfo NonContigInfo; /// Append arrays in \a CurInfo. void append(MapCombinedInfoTy &CurInfo) { Exprs.append(CurInfo.Exprs.begin(), CurInfo.Exprs.end()); BasePointers.append(CurInfo.BasePointers.begin(), CurInfo.BasePointers.end()); Pointers.append(CurInfo.Pointers.begin(), CurInfo.Pointers.end()); Sizes.append(CurInfo.Sizes.begin(), CurInfo.Sizes.end()); Types.append(CurInfo.Types.begin(), CurInfo.Types.end()); Mappers.append(CurInfo.Mappers.begin(), CurInfo.Mappers.end()); NonContigInfo.Dims.append(CurInfo.NonContigInfo.Dims.begin(), CurInfo.NonContigInfo.Dims.end()); NonContigInfo.Offsets.append(CurInfo.NonContigInfo.Offsets.begin(), CurInfo.NonContigInfo.Offsets.end()); NonContigInfo.Counts.append(CurInfo.NonContigInfo.Counts.begin(), CurInfo.NonContigInfo.Counts.end()); NonContigInfo.Strides.append(CurInfo.NonContigInfo.Strides.begin(), CurInfo.NonContigInfo.Strides.end()); } }; /// Map between a struct and the its lowest & highest elements which have been /// mapped. /// [ValueDecl *] --> {LE(FieldIndex, Pointer), /// HE(FieldIndex, Pointer)} struct StructRangeInfoTy { MapCombinedInfoTy PreliminaryMapData; std::pair LowestElem = { 0, Address::invalid()}; std::pair HighestElem = { 0, Address::invalid()}; Address Base = Address::invalid(); Address LB = Address::invalid(); bool IsArraySection = false; bool HasCompleteRecord = false; }; private: /// Kind that defines how a device pointer has to be returned. struct MapInfo { OMPClauseMappableExprCommon::MappableExprComponentListRef Components; OpenMPMapClauseKind MapType = OMPC_MAP_unknown; ArrayRef MapModifiers; ArrayRef MotionModifiers; bool ReturnDevicePointer = false; bool IsImplicit = false; const ValueDecl *Mapper = nullptr; const Expr *VarRef = nullptr; bool ForDeviceAddr = false; MapInfo() = default; MapInfo( OMPClauseMappableExprCommon::MappableExprComponentListRef Components, OpenMPMapClauseKind MapType, ArrayRef MapModifiers, ArrayRef MotionModifiers, bool ReturnDevicePointer, bool IsImplicit, const ValueDecl *Mapper = nullptr, const Expr *VarRef = nullptr, bool ForDeviceAddr = false) : Components(Components), MapType(MapType), MapModifiers(MapModifiers), MotionModifiers(MotionModifiers), ReturnDevicePointer(ReturnDevicePointer), IsImplicit(IsImplicit), Mapper(Mapper), VarRef(VarRef), ForDeviceAddr(ForDeviceAddr) {} }; /// If use_device_ptr or use_device_addr is used on a decl which is a struct /// member and there is no map information about it, then emission of that /// entry is deferred until the whole struct has been processed. struct DeferredDevicePtrEntryTy { const Expr *IE = nullptr; const ValueDecl *VD = nullptr; bool ForDeviceAddr = false; DeferredDevicePtrEntryTy(const Expr *IE, const ValueDecl *VD, bool ForDeviceAddr) : IE(IE), VD(VD), ForDeviceAddr(ForDeviceAddr) {} }; /// The target directive from where the mappable clauses were extracted. It /// is either a executable directive or a user-defined mapper directive. llvm::PointerUnion CurDir; /// Function the directive is being generated for. CodeGenFunction &CGF; /// Set of all first private variables in the current directive. /// bool data is set to true if the variable is implicitly marked as /// firstprivate, false otherwise. llvm::DenseMap, bool> FirstPrivateDecls; /// Map between device pointer declarations and their expression components. /// The key value for declarations in 'this' is null. llvm::DenseMap< const ValueDecl *, SmallVector> DevPointersMap; llvm::Value *getExprTypeSize(const Expr *E) const { QualType ExprTy = E->getType().getCanonicalType(); // Calculate the size for array shaping expression. if (const auto *OAE = dyn_cast(E)) { llvm::Value *Size = CGF.getTypeSize(OAE->getBase()->getType()->getPointeeType()); for (const Expr *SE : OAE->getDimensions()) { llvm::Value *Sz = CGF.EmitScalarExpr(SE); Sz = CGF.EmitScalarConversion(Sz, SE->getType(), CGF.getContext().getSizeType(), SE->getExprLoc()); Size = CGF.Builder.CreateNUWMul(Size, Sz); } return Size; } // Reference types are ignored for mapping purposes. if (const auto *RefTy = ExprTy->getAs()) ExprTy = RefTy->getPointeeType().getCanonicalType(); // Given that an array section is considered a built-in type, we need to // do the calculation based on the length of the section instead of relying // on CGF.getTypeSize(E->getType()). if (const auto *OAE = dyn_cast(E)) { QualType BaseTy = OMPArraySectionExpr::getBaseOriginalType( OAE->getBase()->IgnoreParenImpCasts()) .getCanonicalType(); // If there is no length associated with the expression and lower bound is // not specified too, that means we are using the whole length of the // base. if (!OAE->getLength() && OAE->getColonLocFirst().isValid() && !OAE->getLowerBound()) return CGF.getTypeSize(BaseTy); llvm::Value *ElemSize; if (const auto *PTy = BaseTy->getAs()) { ElemSize = CGF.getTypeSize(PTy->getPointeeType().getCanonicalType()); } else { const auto *ATy = cast(BaseTy.getTypePtr()); assert(ATy && "Expecting array type if not a pointer type."); ElemSize = CGF.getTypeSize(ATy->getElementType().getCanonicalType()); } // If we don't have a length at this point, that is because we have an // array section with a single element. if (!OAE->getLength() && OAE->getColonLocFirst().isInvalid()) return ElemSize; if (const Expr *LenExpr = OAE->getLength()) { llvm::Value *LengthVal = CGF.EmitScalarExpr(LenExpr); LengthVal = CGF.EmitScalarConversion(LengthVal, LenExpr->getType(), CGF.getContext().getSizeType(), LenExpr->getExprLoc()); return CGF.Builder.CreateNUWMul(LengthVal, ElemSize); } assert(!OAE->getLength() && OAE->getColonLocFirst().isValid() && OAE->getLowerBound() && "expected array_section[lb:]."); // Size = sizetype - lb * elemtype; llvm::Value *LengthVal = CGF.getTypeSize(BaseTy); llvm::Value *LBVal = CGF.EmitScalarExpr(OAE->getLowerBound()); LBVal = CGF.EmitScalarConversion(LBVal, OAE->getLowerBound()->getType(), CGF.getContext().getSizeType(), OAE->getLowerBound()->getExprLoc()); LBVal = CGF.Builder.CreateNUWMul(LBVal, ElemSize); llvm::Value *Cmp = CGF.Builder.CreateICmpUGT(LengthVal, LBVal); llvm::Value *TrueVal = CGF.Builder.CreateNUWSub(LengthVal, LBVal); LengthVal = CGF.Builder.CreateSelect( Cmp, TrueVal, llvm::ConstantInt::get(CGF.SizeTy, 0)); return LengthVal; } return CGF.getTypeSize(ExprTy); } /// Return the corresponding bits for a given map clause modifier. Add /// a flag marking the map as a pointer if requested. Add a flag marking the /// map as the first one of a series of maps that relate to the same map /// expression. OpenMPOffloadMappingFlags getMapTypeBits( OpenMPMapClauseKind MapType, ArrayRef MapModifiers, ArrayRef MotionModifiers, bool IsImplicit, bool AddPtrFlag, bool AddIsTargetParamFlag, bool IsNonContiguous) const { OpenMPOffloadMappingFlags Bits = IsImplicit ? OMP_MAP_IMPLICIT : OMP_MAP_NONE; switch (MapType) { case OMPC_MAP_alloc: case OMPC_MAP_release: // alloc and release is the default behavior in the runtime library, i.e. // if we don't pass any bits alloc/release that is what the runtime is // going to do. Therefore, we don't need to signal anything for these two // type modifiers. break; case OMPC_MAP_to: Bits |= OMP_MAP_TO; break; case OMPC_MAP_from: Bits |= OMP_MAP_FROM; break; case OMPC_MAP_tofrom: Bits |= OMP_MAP_TO | OMP_MAP_FROM; break; case OMPC_MAP_delete: Bits |= OMP_MAP_DELETE; break; case OMPC_MAP_unknown: llvm_unreachable("Unexpected map type!"); } if (AddPtrFlag) Bits |= OMP_MAP_PTR_AND_OBJ; if (AddIsTargetParamFlag) Bits |= OMP_MAP_TARGET_PARAM; if (llvm::find(MapModifiers, OMPC_MAP_MODIFIER_always) != MapModifiers.end()) Bits |= OMP_MAP_ALWAYS; if (llvm::find(MapModifiers, OMPC_MAP_MODIFIER_close) != MapModifiers.end()) Bits |= OMP_MAP_CLOSE; if (llvm::find(MapModifiers, OMPC_MAP_MODIFIER_present) != MapModifiers.end() || llvm::find(MotionModifiers, OMPC_MOTION_MODIFIER_present) != MotionModifiers.end()) Bits |= OMP_MAP_PRESENT; if (IsNonContiguous) Bits |= OMP_MAP_NON_CONTIG; return Bits; } /// Return true if the provided expression is a final array section. A /// final array section, is one whose length can't be proved to be one. bool isFinalArraySectionExpression(const Expr *E) const { const auto *OASE = dyn_cast(E); // It is not an array section and therefore not a unity-size one. if (!OASE) return false; // An array section with no colon always refer to a single element. if (OASE->getColonLocFirst().isInvalid()) return false; const Expr *Length = OASE->getLength(); // If we don't have a length we have to check if the array has size 1 // for this dimension. Also, we should always expect a length if the // base type is pointer. if (!Length) { QualType BaseQTy = OMPArraySectionExpr::getBaseOriginalType( OASE->getBase()->IgnoreParenImpCasts()) .getCanonicalType(); if (const auto *ATy = dyn_cast(BaseQTy.getTypePtr())) return ATy->getSize().getSExtValue() != 1; // If we don't have a constant dimension length, we have to consider // the current section as having any size, so it is not necessarily // unitary. If it happen to be unity size, that's user fault. return true; } // Check if the length evaluates to 1. Expr::EvalResult Result; if (!Length->EvaluateAsInt(Result, CGF.getContext())) return true; // Can have more that size 1. llvm::APSInt ConstLength = Result.Val.getInt(); return ConstLength.getSExtValue() != 1; } /// Generate the base pointers, section pointers, sizes, map type bits, and /// user-defined mappers (all included in \a CombinedInfo) for the provided /// map type, map or motion modifiers, and expression components. /// \a IsFirstComponent should be set to true if the provided set of /// components is the first associated with a capture. void generateInfoForComponentList( OpenMPMapClauseKind MapType, ArrayRef MapModifiers, ArrayRef MotionModifiers, OMPClauseMappableExprCommon::MappableExprComponentListRef Components, MapCombinedInfoTy &CombinedInfo, StructRangeInfoTy &PartialStruct, bool IsFirstComponentList, bool IsImplicit, const ValueDecl *Mapper = nullptr, bool ForDeviceAddr = false, const ValueDecl *BaseDecl = nullptr, const Expr *MapExpr = nullptr, ArrayRef OverlappedElements = llvm::None) const { // The following summarizes what has to be generated for each map and the // types below. The generated information is expressed in this order: // base pointer, section pointer, size, flags // (to add to the ones that come from the map type and modifier). // // double d; // int i[100]; // float *p; // // struct S1 { // int i; // float f[50]; // } // struct S2 { // int i; // float f[50]; // S1 s; // double *p; // struct S2 *ps; // int &ref; // } // S2 s; // S2 *ps; // // map(d) // &d, &d, sizeof(double), TARGET_PARAM | TO | FROM // // map(i) // &i, &i, 100*sizeof(int), TARGET_PARAM | TO | FROM // // map(i[1:23]) // &i(=&i[0]), &i[1], 23*sizeof(int), TARGET_PARAM | TO | FROM // // map(p) // &p, &p, sizeof(float*), TARGET_PARAM | TO | FROM // // map(p[1:24]) // &p, &p[1], 24*sizeof(float), TARGET_PARAM | TO | FROM | PTR_AND_OBJ // in unified shared memory mode or for local pointers // p, &p[1], 24*sizeof(float), TARGET_PARAM | TO | FROM // // map(s) // &s, &s, sizeof(S2), TARGET_PARAM | TO | FROM // // map(s.i) // &s, &(s.i), sizeof(int), TARGET_PARAM | TO | FROM // // map(s.s.f) // &s, &(s.s.f[0]), 50*sizeof(float), TARGET_PARAM | TO | FROM // // map(s.p) // &s, &(s.p), sizeof(double*), TARGET_PARAM | TO | FROM // // map(to: s.p[:22]) // &s, &(s.p), sizeof(double*), TARGET_PARAM (*) // &s, &(s.p), sizeof(double*), MEMBER_OF(1) (**) // &(s.p), &(s.p[0]), 22*sizeof(double), // MEMBER_OF(1) | PTR_AND_OBJ | TO (***) // (*) alloc space for struct members, only this is a target parameter // (**) map the pointer (nothing to be mapped in this example) (the compiler // optimizes this entry out, same in the examples below) // (***) map the pointee (map: to) // // map(to: s.ref) // &s, &(s.ref), sizeof(int*), TARGET_PARAM (*) // &s, &(s.ref), sizeof(int), MEMBER_OF(1) | PTR_AND_OBJ | TO (***) // (*) alloc space for struct members, only this is a target parameter // (**) map the pointer (nothing to be mapped in this example) (the compiler // optimizes this entry out, same in the examples below) // (***) map the pointee (map: to) // // map(s.ps) // &s, &(s.ps), sizeof(S2*), TARGET_PARAM | TO | FROM // // map(from: s.ps->s.i) // &s, &(s.ps), sizeof(S2*), TARGET_PARAM // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1) // &(s.ps), &(s.ps->s.i), sizeof(int), MEMBER_OF(1) | PTR_AND_OBJ | FROM // // map(to: s.ps->ps) // &s, &(s.ps), sizeof(S2*), TARGET_PARAM // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1) // &(s.ps), &(s.ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ | TO // // map(s.ps->ps->ps) // &s, &(s.ps), sizeof(S2*), TARGET_PARAM // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1) // &(s.ps), &(s.ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ // &(s.ps->ps), &(s.ps->ps->ps), sizeof(S2*), PTR_AND_OBJ | TO | FROM // // map(to: s.ps->ps->s.f[:22]) // &s, &(s.ps), sizeof(S2*), TARGET_PARAM // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1) // &(s.ps), &(s.ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ // &(s.ps->ps), &(s.ps->ps->s.f[0]), 22*sizeof(float), PTR_AND_OBJ | TO // // map(ps) // &ps, &ps, sizeof(S2*), TARGET_PARAM | TO | FROM // // map(ps->i) // ps, &(ps->i), sizeof(int), TARGET_PARAM | TO | FROM // // map(ps->s.f) // ps, &(ps->s.f[0]), 50*sizeof(float), TARGET_PARAM | TO | FROM // // map(from: ps->p) // ps, &(ps->p), sizeof(double*), TARGET_PARAM | FROM // // map(to: ps->p[:22]) // ps, &(ps->p), sizeof(double*), TARGET_PARAM // ps, &(ps->p), sizeof(double*), MEMBER_OF(1) // &(ps->p), &(ps->p[0]), 22*sizeof(double), MEMBER_OF(1) | PTR_AND_OBJ | TO // // map(ps->ps) // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM | TO | FROM // // map(from: ps->ps->s.i) // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1) // &(ps->ps), &(ps->ps->s.i), sizeof(int), MEMBER_OF(1) | PTR_AND_OBJ | FROM // // map(from: ps->ps->ps) // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1) // &(ps->ps), &(ps->ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ | FROM // // map(ps->ps->ps->ps) // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1) // &(ps->ps), &(ps->ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ // &(ps->ps->ps), &(ps->ps->ps->ps), sizeof(S2*), PTR_AND_OBJ | TO | FROM // // map(to: ps->ps->ps->s.f[:22]) // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1) // &(ps->ps), &(ps->ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ // &(ps->ps->ps), &(ps->ps->ps->s.f[0]), 22*sizeof(float), PTR_AND_OBJ | TO // // map(to: s.f[:22]) map(from: s.p[:33]) // &s, &(s.f[0]), 50*sizeof(float) + sizeof(struct S1) + // sizeof(double*) (**), TARGET_PARAM // &s, &(s.f[0]), 22*sizeof(float), MEMBER_OF(1) | TO // &s, &(s.p), sizeof(double*), MEMBER_OF(1) // &(s.p), &(s.p[0]), 33*sizeof(double), MEMBER_OF(1) | PTR_AND_OBJ | FROM // (*) allocate contiguous space needed to fit all mapped members even if // we allocate space for members not mapped (in this example, // s.f[22..49] and s.s are not mapped, yet we must allocate space for // them as well because they fall between &s.f[0] and &s.p) // // map(from: s.f[:22]) map(to: ps->p[:33]) // &s, &(s.f[0]), 22*sizeof(float), TARGET_PARAM | FROM // ps, &(ps->p), sizeof(S2*), TARGET_PARAM // ps, &(ps->p), sizeof(double*), MEMBER_OF(2) (*) // &(ps->p), &(ps->p[0]), 33*sizeof(double), MEMBER_OF(2) | PTR_AND_OBJ | TO // (*) the struct this entry pertains to is the 2nd element in the list of // arguments, hence MEMBER_OF(2) // // map(from: s.f[:22], s.s) map(to: ps->p[:33]) // &s, &(s.f[0]), 50*sizeof(float) + sizeof(struct S1), TARGET_PARAM // &s, &(s.f[0]), 22*sizeof(float), MEMBER_OF(1) | FROM // &s, &(s.s), sizeof(struct S1), MEMBER_OF(1) | FROM // ps, &(ps->p), sizeof(S2*), TARGET_PARAM // ps, &(ps->p), sizeof(double*), MEMBER_OF(4) (*) // &(ps->p), &(ps->p[0]), 33*sizeof(double), MEMBER_OF(4) | PTR_AND_OBJ | TO // (*) the struct this entry pertains to is the 4th element in the list // of arguments, hence MEMBER_OF(4) // Track if the map information being generated is the first for a capture. bool IsCaptureFirstInfo = IsFirstComponentList; // When the variable is on a declare target link or in a to clause with // unified memory, a reference is needed to hold the host/device address // of the variable. bool RequiresReference = false; // Scan the components from the base to the complete expression. auto CI = Components.rbegin(); auto CE = Components.rend(); auto I = CI; // Track if the map information being generated is the first for a list of // components. bool IsExpressionFirstInfo = true; bool FirstPointerInComplexData = false; Address BP = Address::invalid(); const Expr *AssocExpr = I->getAssociatedExpression(); const auto *AE = dyn_cast(AssocExpr); const auto *OASE = dyn_cast(AssocExpr); const auto *OAShE = dyn_cast(AssocExpr); if (isa(AssocExpr)) { // The base is the 'this' pointer. The content of the pointer is going // to be the base of the field being mapped. BP = CGF.LoadCXXThisAddress(); } else if ((AE && isa(AE->getBase()->IgnoreParenImpCasts())) || (OASE && isa(OASE->getBase()->IgnoreParenImpCasts()))) { BP = CGF.EmitOMPSharedLValue(AssocExpr).getAddress(CGF); } else if (OAShE && isa(OAShE->getBase()->IgnoreParenCasts())) { BP = Address( CGF.EmitScalarExpr(OAShE->getBase()), CGF.getContext().getTypeAlignInChars(OAShE->getBase()->getType())); } else { // The base is the reference to the variable. // BP = &Var. BP = CGF.EmitOMPSharedLValue(AssocExpr).getAddress(CGF); if (const auto *VD = dyn_cast_or_null(I->getAssociatedDeclaration())) { if (llvm::Optional Res = OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) { if ((*Res == OMPDeclareTargetDeclAttr::MT_Link) || (*Res == OMPDeclareTargetDeclAttr::MT_To && CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) { RequiresReference = true; BP = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD); } } } // If the variable is a pointer and is being dereferenced (i.e. is not // the last component), the base has to be the pointer itself, not its // reference. References are ignored for mapping purposes. QualType Ty = I->getAssociatedDeclaration()->getType().getNonReferenceType(); if (Ty->isAnyPointerType() && std::next(I) != CE) { // No need to generate individual map information for the pointer, it // can be associated with the combined storage if shared memory mode is // active or the base declaration is not global variable. const auto *VD = dyn_cast(I->getAssociatedDeclaration()); if (CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory() || !VD || VD->hasLocalStorage()) BP = CGF.EmitLoadOfPointer(BP, Ty->castAs()); else FirstPointerInComplexData = true; ++I; } } // Track whether a component of the list should be marked as MEMBER_OF some // combined entry (for partial structs). Only the first PTR_AND_OBJ entry // in a component list should be marked as MEMBER_OF, all subsequent entries // do not belong to the base struct. E.g. // struct S2 s; // s.ps->ps->ps->f[:] // (1) (2) (3) (4) // ps(1) is a member pointer, ps(2) is a pointee of ps(1), so it is a // PTR_AND_OBJ entry; the PTR is ps(1), so MEMBER_OF the base struct. ps(3) // is the pointee of ps(2) which is not member of struct s, so it should not // be marked as such (it is still PTR_AND_OBJ). // The variable is initialized to false so that PTR_AND_OBJ entries which // are not struct members are not considered (e.g. array of pointers to // data). bool ShouldBeMemberOf = false; // Variable keeping track of whether or not we have encountered a component // in the component list which is a member expression. Useful when we have a // pointer or a final array section, in which case it is the previous // component in the list which tells us whether we have a member expression. // E.g. X.f[:] // While processing the final array section "[:]" it is "f" which tells us // whether we are dealing with a member of a declared struct. const MemberExpr *EncounteredME = nullptr; // Track for the total number of dimension. Start from one for the dummy // dimension. uint64_t DimSize = 1; bool IsNonContiguous = CombinedInfo.NonContigInfo.IsNonContiguous; bool IsPrevMemberReference = false; for (; I != CE; ++I) { // If the current component is member of a struct (parent struct) mark it. if (!EncounteredME) { EncounteredME = dyn_cast(I->getAssociatedExpression()); // If we encounter a PTR_AND_OBJ entry from now on it should be marked // as MEMBER_OF the parent struct. if (EncounteredME) { ShouldBeMemberOf = true; // Do not emit as complex pointer if this is actually not array-like // expression. if (FirstPointerInComplexData) { QualType Ty = std::prev(I) ->getAssociatedDeclaration() ->getType() .getNonReferenceType(); BP = CGF.EmitLoadOfPointer(BP, Ty->castAs()); FirstPointerInComplexData = false; } } } auto Next = std::next(I); // We need to generate the addresses and sizes if this is the last // component, if the component is a pointer or if it is an array section // whose length can't be proved to be one. If this is a pointer, it // becomes the base address for the following components. // A final array section, is one whose length can't be proved to be one. // If the map item is non-contiguous then we don't treat any array section // as final array section. bool IsFinalArraySection = !IsNonContiguous && isFinalArraySectionExpression(I->getAssociatedExpression()); // If we have a declaration for the mapping use that, otherwise use // the base declaration of the map clause. const ValueDecl *MapDecl = (I->getAssociatedDeclaration()) ? I->getAssociatedDeclaration() : BaseDecl; MapExpr = (I->getAssociatedExpression()) ? I->getAssociatedExpression() : MapExpr; // Get information on whether the element is a pointer. Have to do a // special treatment for array sections given that they are built-in // types. const auto *OASE = dyn_cast(I->getAssociatedExpression()); const auto *OAShE = dyn_cast(I->getAssociatedExpression()); const auto *UO = dyn_cast(I->getAssociatedExpression()); const auto *BO = dyn_cast(I->getAssociatedExpression()); bool IsPointer = OAShE || (OASE && OMPArraySectionExpr::getBaseOriginalType(OASE) .getCanonicalType() ->isAnyPointerType()) || I->getAssociatedExpression()->getType()->isAnyPointerType(); bool IsMemberReference = isa(I->getAssociatedExpression()) && MapDecl && MapDecl->getType()->isLValueReferenceType(); bool IsNonDerefPointer = IsPointer && !UO && !BO && !IsNonContiguous; if (OASE) ++DimSize; if (Next == CE || IsMemberReference || IsNonDerefPointer || IsFinalArraySection) { // If this is not the last component, we expect the pointer to be // associated with an array expression or member expression. assert((Next == CE || isa(Next->getAssociatedExpression()) || isa(Next->getAssociatedExpression()) || isa(Next->getAssociatedExpression()) || isa(Next->getAssociatedExpression()) || isa(Next->getAssociatedExpression()) || isa(Next->getAssociatedExpression())) && "Unexpected expression"); Address LB = Address::invalid(); Address LowestElem = Address::invalid(); auto &&EmitMemberExprBase = [](CodeGenFunction &CGF, const MemberExpr *E) { const Expr *BaseExpr = E->getBase(); // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a // scalar. LValue BaseLV; if (E->isArrow()) { LValueBaseInfo BaseInfo; TBAAAccessInfo TBAAInfo; Address Addr = CGF.EmitPointerWithAlignment(BaseExpr, &BaseInfo, &TBAAInfo); QualType PtrTy = BaseExpr->getType()->getPointeeType(); BaseLV = CGF.MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo); } else { BaseLV = CGF.EmitOMPSharedLValue(BaseExpr); } return BaseLV; }; if (OAShE) { LowestElem = LB = Address(CGF.EmitScalarExpr(OAShE->getBase()), CGF.getContext().getTypeAlignInChars( OAShE->getBase()->getType())); } else if (IsMemberReference) { const auto *ME = cast(I->getAssociatedExpression()); LValue BaseLVal = EmitMemberExprBase(CGF, ME); LowestElem = CGF.EmitLValueForFieldInitialization( BaseLVal, cast(MapDecl)) .getAddress(CGF); LB = CGF.EmitLoadOfReferenceLValue(LowestElem, MapDecl->getType()) .getAddress(CGF); } else { LowestElem = LB = CGF.EmitOMPSharedLValue(I->getAssociatedExpression()) .getAddress(CGF); } // If this component is a pointer inside the base struct then we don't // need to create any entry for it - it will be combined with the object // it is pointing to into a single PTR_AND_OBJ entry. bool IsMemberPointerOrAddr = EncounteredME && (((IsPointer || ForDeviceAddr) && I->getAssociatedExpression() == EncounteredME) || (IsPrevMemberReference && !IsPointer) || (IsMemberReference && Next != CE && !Next->getAssociatedExpression()->getType()->isPointerType())); if (!OverlappedElements.empty() && Next == CE) { // Handle base element with the info for overlapped elements. assert(!PartialStruct.Base.isValid() && "The base element is set."); assert(!IsPointer && "Unexpected base element with the pointer type."); // Mark the whole struct as the struct that requires allocation on the // device. PartialStruct.LowestElem = {0, LowestElem}; CharUnits TypeSize = CGF.getContext().getTypeSizeInChars( I->getAssociatedExpression()->getType()); Address HB = CGF.Builder.CreateConstGEP( CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(LowestElem, CGF.VoidPtrTy), TypeSize.getQuantity() - 1); PartialStruct.HighestElem = { std::numeric_limits::max(), HB}; PartialStruct.Base = BP; PartialStruct.LB = LB; assert( PartialStruct.PreliminaryMapData.BasePointers.empty() && "Overlapped elements must be used only once for the variable."); std::swap(PartialStruct.PreliminaryMapData, CombinedInfo); // Emit data for non-overlapped data. OpenMPOffloadMappingFlags Flags = OMP_MAP_MEMBER_OF | getMapTypeBits(MapType, MapModifiers, MotionModifiers, IsImplicit, /*AddPtrFlag=*/false, /*AddIsTargetParamFlag=*/false, IsNonContiguous); llvm::Value *Size = nullptr; // Do bitcopy of all non-overlapped structure elements. for (OMPClauseMappableExprCommon::MappableExprComponentListRef Component : OverlappedElements) { Address ComponentLB = Address::invalid(); for (const OMPClauseMappableExprCommon::MappableComponent &MC : Component) { if (const ValueDecl *VD = MC.getAssociatedDeclaration()) { const auto *FD = dyn_cast(VD); if (FD && FD->getType()->isLValueReferenceType()) { const auto *ME = cast(MC.getAssociatedExpression()); LValue BaseLVal = EmitMemberExprBase(CGF, ME); ComponentLB = CGF.EmitLValueForFieldInitialization(BaseLVal, FD) .getAddress(CGF); } else { ComponentLB = CGF.EmitOMPSharedLValue(MC.getAssociatedExpression()) .getAddress(CGF); } Size = CGF.Builder.CreatePtrDiff( CGF.EmitCastToVoidPtr(ComponentLB.getPointer()), CGF.EmitCastToVoidPtr(LB.getPointer())); break; } } assert(Size && "Failed to determine structure size"); CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr); CombinedInfo.BasePointers.push_back(BP.getPointer()); CombinedInfo.Pointers.push_back(LB.getPointer()); CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast( Size, CGF.Int64Ty, /*isSigned=*/true)); CombinedInfo.Types.push_back(Flags); CombinedInfo.Mappers.push_back(nullptr); CombinedInfo.NonContigInfo.Dims.push_back(IsNonContiguous ? DimSize : 1); LB = CGF.Builder.CreateConstGEP(ComponentLB, 1); } CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr); CombinedInfo.BasePointers.push_back(BP.getPointer()); CombinedInfo.Pointers.push_back(LB.getPointer()); Size = CGF.Builder.CreatePtrDiff( CGF.Builder.CreateConstGEP(HB, 1).getPointer(), CGF.EmitCastToVoidPtr(LB.getPointer())); CombinedInfo.Sizes.push_back( CGF.Builder.CreateIntCast(Size, CGF.Int64Ty, /*isSigned=*/true)); CombinedInfo.Types.push_back(Flags); CombinedInfo.Mappers.push_back(nullptr); CombinedInfo.NonContigInfo.Dims.push_back(IsNonContiguous ? DimSize : 1); break; } llvm::Value *Size = getExprTypeSize(I->getAssociatedExpression()); if (!IsMemberPointerOrAddr || (Next == CE && MapType != OMPC_MAP_unknown)) { CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr); CombinedInfo.BasePointers.push_back(BP.getPointer()); CombinedInfo.Pointers.push_back(LB.getPointer()); CombinedInfo.Sizes.push_back( CGF.Builder.CreateIntCast(Size, CGF.Int64Ty, /*isSigned=*/true)); CombinedInfo.NonContigInfo.Dims.push_back(IsNonContiguous ? DimSize : 1); // If Mapper is valid, the last component inherits the mapper. bool HasMapper = Mapper && Next == CE; CombinedInfo.Mappers.push_back(HasMapper ? Mapper : nullptr); // We need to add a pointer flag for each map that comes from the // same expression except for the first one. We also need to signal // this map is the first one that relates with the current capture // (there is a set of entries for each capture). OpenMPOffloadMappingFlags Flags = getMapTypeBits( MapType, MapModifiers, MotionModifiers, IsImplicit, !IsExpressionFirstInfo || RequiresReference || FirstPointerInComplexData || IsMemberReference, IsCaptureFirstInfo && !RequiresReference, IsNonContiguous); if (!IsExpressionFirstInfo || IsMemberReference) { // If we have a PTR_AND_OBJ pair where the OBJ is a pointer as well, // then we reset the TO/FROM/ALWAYS/DELETE/CLOSE flags. if (IsPointer || (IsMemberReference && Next != CE)) Flags &= ~(OMP_MAP_TO | OMP_MAP_FROM | OMP_MAP_ALWAYS | OMP_MAP_DELETE | OMP_MAP_CLOSE); if (ShouldBeMemberOf) { // Set placeholder value MEMBER_OF=FFFF to indicate that the flag // should be later updated with the correct value of MEMBER_OF. Flags |= OMP_MAP_MEMBER_OF; // From now on, all subsequent PTR_AND_OBJ entries should not be // marked as MEMBER_OF. ShouldBeMemberOf = false; } } CombinedInfo.Types.push_back(Flags); } // If we have encountered a member expression so far, keep track of the // mapped member. If the parent is "*this", then the value declaration // is nullptr. if (EncounteredME) { const auto *FD = cast(EncounteredME->getMemberDecl()); unsigned FieldIndex = FD->getFieldIndex(); // Update info about the lowest and highest elements for this struct if (!PartialStruct.Base.isValid()) { PartialStruct.LowestElem = {FieldIndex, LowestElem}; if (IsFinalArraySection) { Address HB = CGF.EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false) .getAddress(CGF); PartialStruct.HighestElem = {FieldIndex, HB}; } else { PartialStruct.HighestElem = {FieldIndex, LowestElem}; } PartialStruct.Base = BP; PartialStruct.LB = BP; } else if (FieldIndex < PartialStruct.LowestElem.first) { PartialStruct.LowestElem = {FieldIndex, LowestElem}; } else if (FieldIndex > PartialStruct.HighestElem.first) { PartialStruct.HighestElem = {FieldIndex, LowestElem}; } } // Need to emit combined struct for array sections. if (IsFinalArraySection || IsNonContiguous) PartialStruct.IsArraySection = true; // If we have a final array section, we are done with this expression. if (IsFinalArraySection) break; // The pointer becomes the base for the next element. if (Next != CE) BP = IsMemberReference ? LowestElem : LB; IsExpressionFirstInfo = false; IsCaptureFirstInfo = false; FirstPointerInComplexData = false; IsPrevMemberReference = IsMemberReference; } else if (FirstPointerInComplexData) { QualType Ty = Components.rbegin() ->getAssociatedDeclaration() ->getType() .getNonReferenceType(); BP = CGF.EmitLoadOfPointer(BP, Ty->castAs()); FirstPointerInComplexData = false; } } // If ran into the whole component - allocate the space for the whole // record. if (!EncounteredME) PartialStruct.HasCompleteRecord = true; if (!IsNonContiguous) return; const ASTContext &Context = CGF.getContext(); // For supporting stride in array section, we need to initialize the first // dimension size as 1, first offset as 0, and first count as 1 MapValuesArrayTy CurOffsets = {llvm::ConstantInt::get(CGF.CGM.Int64Ty, 0)}; MapValuesArrayTy CurCounts = {llvm::ConstantInt::get(CGF.CGM.Int64Ty, 1)}; MapValuesArrayTy CurStrides; MapValuesArrayTy DimSizes{llvm::ConstantInt::get(CGF.CGM.Int64Ty, 1)}; uint64_t ElementTypeSize; // Collect Size information for each dimension and get the element size as // the first Stride. For example, for `int arr[10][10]`, the DimSizes // should be [10, 10] and the first stride is 4 btyes. for (const OMPClauseMappableExprCommon::MappableComponent &Component : Components) { const Expr *AssocExpr = Component.getAssociatedExpression(); const auto *OASE = dyn_cast(AssocExpr); if (!OASE) continue; QualType Ty = OMPArraySectionExpr::getBaseOriginalType(OASE->getBase()); auto *CAT = Context.getAsConstantArrayType(Ty); auto *VAT = Context.getAsVariableArrayType(Ty); // We need all the dimension size except for the last dimension. assert((VAT || CAT || &Component == &*Components.begin()) && "Should be either ConstantArray or VariableArray if not the " "first Component"); // Get element size if CurStrides is empty. if (CurStrides.empty()) { const Type *ElementType = nullptr; if (CAT) ElementType = CAT->getElementType().getTypePtr(); else if (VAT) ElementType = VAT->getElementType().getTypePtr(); else assert(&Component == &*Components.begin() && "Only expect pointer (non CAT or VAT) when this is the " "first Component"); // If ElementType is null, then it means the base is a pointer // (neither CAT nor VAT) and we'll attempt to get ElementType again // for next iteration. if (ElementType) { // For the case that having pointer as base, we need to remove one // level of indirection. if (&Component != &*Components.begin()) ElementType = ElementType->getPointeeOrArrayElementType(); ElementTypeSize = Context.getTypeSizeInChars(ElementType).getQuantity(); CurStrides.push_back( llvm::ConstantInt::get(CGF.Int64Ty, ElementTypeSize)); } } // Get dimension value except for the last dimension since we don't need // it. if (DimSizes.size() < Components.size() - 1) { if (CAT) DimSizes.push_back(llvm::ConstantInt::get( CGF.Int64Ty, CAT->getSize().getZExtValue())); else if (VAT) DimSizes.push_back(CGF.Builder.CreateIntCast( CGF.EmitScalarExpr(VAT->getSizeExpr()), CGF.Int64Ty, /*IsSigned=*/false)); } } // Skip the dummy dimension since we have already have its information. auto DI = DimSizes.begin() + 1; // Product of dimension. llvm::Value *DimProd = llvm::ConstantInt::get(CGF.CGM.Int64Ty, ElementTypeSize); // Collect info for non-contiguous. Notice that offset, count, and stride // are only meaningful for array-section, so we insert a null for anything // other than array-section. // Also, the size of offset, count, and stride are not the same as // pointers, base_pointers, sizes, or dims. Instead, the size of offset, // count, and stride are the same as the number of non-contiguous // declaration in target update to/from clause. for (const OMPClauseMappableExprCommon::MappableComponent &Component : Components) { const Expr *AssocExpr = Component.getAssociatedExpression(); if (const auto *AE = dyn_cast(AssocExpr)) { llvm::Value *Offset = CGF.Builder.CreateIntCast( CGF.EmitScalarExpr(AE->getIdx()), CGF.Int64Ty, /*isSigned=*/false); CurOffsets.push_back(Offset); CurCounts.push_back(llvm::ConstantInt::get(CGF.Int64Ty, /*V=*/1)); CurStrides.push_back(CurStrides.back()); continue; } const auto *OASE = dyn_cast(AssocExpr); if (!OASE) continue; // Offset const Expr *OffsetExpr = OASE->getLowerBound(); llvm::Value *Offset = nullptr; if (!OffsetExpr) { // If offset is absent, then we just set it to zero. Offset = llvm::ConstantInt::get(CGF.Int64Ty, 0); } else { Offset = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(OffsetExpr), CGF.Int64Ty, /*isSigned=*/false); } CurOffsets.push_back(Offset); // Count const Expr *CountExpr = OASE->getLength(); llvm::Value *Count = nullptr; if (!CountExpr) { // In Clang, once a high dimension is an array section, we construct all // the lower dimension as array section, however, for case like // arr[0:2][2], Clang construct the inner dimension as an array section // but it actually is not in an array section form according to spec. if (!OASE->getColonLocFirst().isValid() && !OASE->getColonLocSecond().isValid()) { Count = llvm::ConstantInt::get(CGF.Int64Ty, 1); } else { // OpenMP 5.0, 2.1.5 Array Sections, Description. // When the length is absent it defaults to ⌈(size − // lower-bound)/stride⌉, where size is the size of the array // dimension. const Expr *StrideExpr = OASE->getStride(); llvm::Value *Stride = StrideExpr ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(StrideExpr), CGF.Int64Ty, /*isSigned=*/false) : nullptr; if (Stride) Count = CGF.Builder.CreateUDiv( CGF.Builder.CreateNUWSub(*DI, Offset), Stride); else Count = CGF.Builder.CreateNUWSub(*DI, Offset); } } else { Count = CGF.EmitScalarExpr(CountExpr); } Count = CGF.Builder.CreateIntCast(Count, CGF.Int64Ty, /*isSigned=*/false); CurCounts.push_back(Count); // Stride_n' = Stride_n * (D_0 * D_1 ... * D_n-1) * Unit size // Take `int arr[5][5][5]` and `arr[0:2:2][1:2:1][0:2:2]` as an example: // Offset Count Stride // D0 0 1 4 (int) <- dummy dimension // D1 0 2 8 (2 * (1) * 4) // D2 1 2 20 (1 * (1 * 5) * 4) // D3 0 2 200 (2 * (1 * 5 * 4) * 4) const Expr *StrideExpr = OASE->getStride(); llvm::Value *Stride = StrideExpr ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(StrideExpr), CGF.Int64Ty, /*isSigned=*/false) : nullptr; DimProd = CGF.Builder.CreateNUWMul(DimProd, *(DI - 1)); if (Stride) CurStrides.push_back(CGF.Builder.CreateNUWMul(DimProd, Stride)); else CurStrides.push_back(DimProd); if (DI != DimSizes.end()) ++DI; } CombinedInfo.NonContigInfo.Offsets.push_back(CurOffsets); CombinedInfo.NonContigInfo.Counts.push_back(CurCounts); CombinedInfo.NonContigInfo.Strides.push_back(CurStrides); } /// Return the adjusted map modifiers if the declaration a capture refers to /// appears in a first-private clause. This is expected to be used only with /// directives that start with 'target'. MappableExprsHandler::OpenMPOffloadMappingFlags getMapModifiersForPrivateClauses(const CapturedStmt::Capture &Cap) const { assert(Cap.capturesVariable() && "Expected capture by reference only!"); // A first private variable captured by reference will use only the // 'private ptr' and 'map to' flag. Return the right flags if the captured // declaration is known as first-private in this handler. if (FirstPrivateDecls.count(Cap.getCapturedVar())) { if (Cap.getCapturedVar()->getType()->isAnyPointerType()) return MappableExprsHandler::OMP_MAP_TO | MappableExprsHandler::OMP_MAP_PTR_AND_OBJ; return MappableExprsHandler::OMP_MAP_PRIVATE | MappableExprsHandler::OMP_MAP_TO; } return MappableExprsHandler::OMP_MAP_TO | MappableExprsHandler::OMP_MAP_FROM; } static OpenMPOffloadMappingFlags getMemberOfFlag(unsigned Position) { // Rotate by getFlagMemberOffset() bits. return static_cast(((uint64_t)Position + 1) << getFlagMemberOffset()); } static void setCorrectMemberOfFlag(OpenMPOffloadMappingFlags &Flags, OpenMPOffloadMappingFlags MemberOfFlag) { // If the entry is PTR_AND_OBJ but has not been marked with the special // placeholder value 0xFFFF in the MEMBER_OF field, then it should not be // marked as MEMBER_OF. if ((Flags & OMP_MAP_PTR_AND_OBJ) && ((Flags & OMP_MAP_MEMBER_OF) != OMP_MAP_MEMBER_OF)) return; // Reset the placeholder value to prepare the flag for the assignment of the // proper MEMBER_OF value. Flags &= ~OMP_MAP_MEMBER_OF; Flags |= MemberOfFlag; } void getPlainLayout(const CXXRecordDecl *RD, llvm::SmallVectorImpl &Layout, bool AsBase) const { const CGRecordLayout &RL = CGF.getTypes().getCGRecordLayout(RD); llvm::StructType *St = AsBase ? RL.getBaseSubobjectLLVMType() : RL.getLLVMType(); unsigned NumElements = St->getNumElements(); llvm::SmallVector< llvm::PointerUnion, 4> RecordLayout(NumElements); // Fill bases. for (const auto &I : RD->bases()) { if (I.isVirtual()) continue; const auto *Base = I.getType()->getAsCXXRecordDecl(); // Ignore empty bases. if (Base->isEmpty() || CGF.getContext() .getASTRecordLayout(Base) .getNonVirtualSize() .isZero()) continue; unsigned FieldIndex = RL.getNonVirtualBaseLLVMFieldNo(Base); RecordLayout[FieldIndex] = Base; } // Fill in virtual bases. for (const auto &I : RD->vbases()) { const auto *Base = I.getType()->getAsCXXRecordDecl(); // Ignore empty bases. if (Base->isEmpty()) continue; unsigned FieldIndex = RL.getVirtualBaseIndex(Base); if (RecordLayout[FieldIndex]) continue; RecordLayout[FieldIndex] = Base; } // Fill in all the fields. assert(!RD->isUnion() && "Unexpected union."); for (const auto *Field : RD->fields()) { // Fill in non-bitfields. (Bitfields always use a zero pattern, which we // will fill in later.) if (!Field->isBitField() && !Field->isZeroSize(CGF.getContext())) { unsigned FieldIndex = RL.getLLVMFieldNo(Field); RecordLayout[FieldIndex] = Field; } } for (const llvm::PointerUnion &Data : RecordLayout) { if (Data.isNull()) continue; if (const auto *Base = Data.dyn_cast()) getPlainLayout(Base, Layout, /*AsBase=*/true); else Layout.push_back(Data.get()); } } /// Generate all the base pointers, section pointers, sizes, map types, and /// mappers for the extracted mappable expressions (all included in \a /// CombinedInfo). Also, for each item that relates with a device pointer, a /// pair of the relevant declaration and index where it occurs is appended to /// the device pointers info array. void generateAllInfoForClauses( ArrayRef Clauses, MapCombinedInfoTy &CombinedInfo, const llvm::DenseSet> &SkipVarSet = llvm::DenseSet>()) const { // We have to process the component lists that relate with the same // declaration in a single chunk so that we can generate the map flags // correctly. Therefore, we organize all lists in a map. enum MapKind { Present, Allocs, Other, Total }; llvm::MapVector, SmallVector, 4>> Info; // Helper function to fill the information map for the different supported // clauses. auto &&InfoGen = [&Info, &SkipVarSet]( const ValueDecl *D, MapKind Kind, OMPClauseMappableExprCommon::MappableExprComponentListRef L, OpenMPMapClauseKind MapType, ArrayRef MapModifiers, ArrayRef MotionModifiers, bool ReturnDevicePointer, bool IsImplicit, const ValueDecl *Mapper, const Expr *VarRef = nullptr, bool ForDeviceAddr = false) { if (SkipVarSet.contains(D)) return; auto It = Info.find(D); if (It == Info.end()) It = Info .insert(std::make_pair( D, SmallVector, 4>(Total))) .first; It->second[Kind].emplace_back( L, MapType, MapModifiers, MotionModifiers, ReturnDevicePointer, IsImplicit, Mapper, VarRef, ForDeviceAddr); }; for (const auto *Cl : Clauses) { const auto *C = dyn_cast(Cl); if (!C) continue; MapKind Kind = Other; if (!C->getMapTypeModifiers().empty() && llvm::any_of(C->getMapTypeModifiers(), [](OpenMPMapModifierKind K) { return K == OMPC_MAP_MODIFIER_present; })) Kind = Present; else if (C->getMapType() == OMPC_MAP_alloc) Kind = Allocs; const auto *EI = C->getVarRefs().begin(); for (const auto L : C->component_lists()) { const Expr *E = (C->getMapLoc().isValid()) ? *EI : nullptr; InfoGen(std::get<0>(L), Kind, std::get<1>(L), C->getMapType(), C->getMapTypeModifiers(), llvm::None, /*ReturnDevicePointer=*/false, C->isImplicit(), std::get<2>(L), E); ++EI; } } for (const auto *Cl : Clauses) { const auto *C = dyn_cast(Cl); if (!C) continue; MapKind Kind = Other; if (!C->getMotionModifiers().empty() && llvm::any_of(C->getMotionModifiers(), [](OpenMPMotionModifierKind K) { return K == OMPC_MOTION_MODIFIER_present; })) Kind = Present; const auto *EI = C->getVarRefs().begin(); for (const auto L : C->component_lists()) { InfoGen(std::get<0>(L), Kind, std::get<1>(L), OMPC_MAP_to, llvm::None, C->getMotionModifiers(), /*ReturnDevicePointer=*/false, C->isImplicit(), std::get<2>(L), *EI); ++EI; } } for (const auto *Cl : Clauses) { const auto *C = dyn_cast(Cl); if (!C) continue; MapKind Kind = Other; if (!C->getMotionModifiers().empty() && llvm::any_of(C->getMotionModifiers(), [](OpenMPMotionModifierKind K) { return K == OMPC_MOTION_MODIFIER_present; })) Kind = Present; const auto *EI = C->getVarRefs().begin(); for (const auto L : C->component_lists()) { InfoGen(std::get<0>(L), Kind, std::get<1>(L), OMPC_MAP_from, llvm::None, C->getMotionModifiers(), /*ReturnDevicePointer=*/false, C->isImplicit(), std::get<2>(L), *EI); ++EI; } } // Look at the use_device_ptr clause information and mark the existing map // entries as such. If there is no map information for an entry in the // use_device_ptr list, we create one with map type 'alloc' and zero size // section. It is the user fault if that was not mapped before. If there is // no map information and the pointer is a struct member, then we defer the // emission of that entry until the whole struct has been processed. llvm::MapVector, SmallVector> DeferredInfo; MapCombinedInfoTy UseDevicePtrCombinedInfo; for (const auto *Cl : Clauses) { const auto *C = dyn_cast(Cl); if (!C) continue; for (const auto L : C->component_lists()) { OMPClauseMappableExprCommon::MappableExprComponentListRef Components = std::get<1>(L); assert(!Components.empty() && "Not expecting empty list of components!"); const ValueDecl *VD = Components.back().getAssociatedDeclaration(); VD = cast(VD->getCanonicalDecl()); const Expr *IE = Components.back().getAssociatedExpression(); // If the first component is a member expression, we have to look into // 'this', which maps to null in the map of map information. Otherwise // look directly for the information. auto It = Info.find(isa(IE) ? nullptr : VD); // We potentially have map information for this declaration already. // Look for the first set of components that refer to it. if (It != Info.end()) { bool Found = false; for (auto &Data : It->second) { auto *CI = llvm::find_if(Data, [VD](const MapInfo &MI) { return MI.Components.back().getAssociatedDeclaration() == VD; }); // If we found a map entry, signal that the pointer has to be // returned and move on to the next declaration. Exclude cases where // the base pointer is mapped as array subscript, array section or // array shaping. The base address is passed as a pointer to base in // this case and cannot be used as a base for use_device_ptr list // item. if (CI != Data.end()) { auto PrevCI = std::next(CI->Components.rbegin()); const auto *VarD = dyn_cast(VD); if (CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory() || isa(IE) || !VD->getType().getNonReferenceType()->isPointerType() || PrevCI == CI->Components.rend() || isa(PrevCI->getAssociatedExpression()) || !VarD || VarD->hasLocalStorage()) { CI->ReturnDevicePointer = true; Found = true; break; } } } if (Found) continue; } // We didn't find any match in our map information - generate a zero // size array section - if the pointer is a struct member we defer this // action until the whole struct has been processed. if (isa(IE)) { // Insert the pointer into Info to be processed by // generateInfoForComponentList. Because it is a member pointer // without a pointee, no entry will be generated for it, therefore // we need to generate one after the whole struct has been processed. // Nonetheless, generateInfoForComponentList must be called to take // the pointer into account for the calculation of the range of the // partial struct. InfoGen(nullptr, Other, Components, OMPC_MAP_unknown, llvm::None, llvm::None, /*ReturnDevicePointer=*/false, C->isImplicit(), nullptr); DeferredInfo[nullptr].emplace_back(IE, VD, /*ForDeviceAddr=*/false); } else { llvm::Value *Ptr = CGF.EmitLoadOfScalar(CGF.EmitLValue(IE), IE->getExprLoc()); UseDevicePtrCombinedInfo.Exprs.push_back(VD); UseDevicePtrCombinedInfo.BasePointers.emplace_back(Ptr, VD); UseDevicePtrCombinedInfo.Pointers.push_back(Ptr); UseDevicePtrCombinedInfo.Sizes.push_back( llvm::Constant::getNullValue(CGF.Int64Ty)); UseDevicePtrCombinedInfo.Types.push_back(OMP_MAP_RETURN_PARAM); UseDevicePtrCombinedInfo.Mappers.push_back(nullptr); } } } // Look at the use_device_addr clause information and mark the existing map // entries as such. If there is no map information for an entry in the // use_device_addr list, we create one with map type 'alloc' and zero size // section. It is the user fault if that was not mapped before. If there is // no map information and the pointer is a struct member, then we defer the // emission of that entry until the whole struct has been processed. llvm::SmallDenseSet, 4> Processed; for (const auto *Cl : Clauses) { const auto *C = dyn_cast(Cl); if (!C) continue; for (const auto L : C->component_lists()) { assert(!std::get<1>(L).empty() && "Not expecting empty list of components!"); const ValueDecl *VD = std::get<1>(L).back().getAssociatedDeclaration(); if (!Processed.insert(VD).second) continue; VD = cast(VD->getCanonicalDecl()); const Expr *IE = std::get<1>(L).back().getAssociatedExpression(); // If the first component is a member expression, we have to look into // 'this', which maps to null in the map of map information. Otherwise // look directly for the information. auto It = Info.find(isa(IE) ? nullptr : VD); // We potentially have map information for this declaration already. // Look for the first set of components that refer to it. if (It != Info.end()) { bool Found = false; for (auto &Data : It->second) { auto *CI = llvm::find_if(Data, [VD](const MapInfo &MI) { return MI.Components.back().getAssociatedDeclaration() == VD; }); // If we found a map entry, signal that the pointer has to be // returned and move on to the next declaration. if (CI != Data.end()) { CI->ReturnDevicePointer = true; Found = true; break; } } if (Found) continue; } // We didn't find any match in our map information - generate a zero // size array section - if the pointer is a struct member we defer this // action until the whole struct has been processed. if (isa(IE)) { // Insert the pointer into Info to be processed by // generateInfoForComponentList. Because it is a member pointer // without a pointee, no entry will be generated for it, therefore // we need to generate one after the whole struct has been processed. // Nonetheless, generateInfoForComponentList must be called to take // the pointer into account for the calculation of the range of the // partial struct. InfoGen(nullptr, Other, std::get<1>(L), OMPC_MAP_unknown, llvm::None, llvm::None, /*ReturnDevicePointer=*/false, C->isImplicit(), nullptr, nullptr, /*ForDeviceAddr=*/true); DeferredInfo[nullptr].emplace_back(IE, VD, /*ForDeviceAddr=*/true); } else { llvm::Value *Ptr; if (IE->isGLValue()) Ptr = CGF.EmitLValue(IE).getPointer(CGF); else Ptr = CGF.EmitScalarExpr(IE); CombinedInfo.Exprs.push_back(VD); CombinedInfo.BasePointers.emplace_back(Ptr, VD); CombinedInfo.Pointers.push_back(Ptr); CombinedInfo.Sizes.push_back( llvm::Constant::getNullValue(CGF.Int64Ty)); CombinedInfo.Types.push_back(OMP_MAP_RETURN_PARAM); CombinedInfo.Mappers.push_back(nullptr); } } } for (const auto &Data : Info) { StructRangeInfoTy PartialStruct; // Temporary generated information. MapCombinedInfoTy CurInfo; const Decl *D = Data.first; const ValueDecl *VD = cast_or_null(D); for (const auto &M : Data.second) { for (const MapInfo &L : M) { assert(!L.Components.empty() && "Not expecting declaration with no component lists."); // Remember the current base pointer index. unsigned CurrentBasePointersIdx = CurInfo.BasePointers.size(); CurInfo.NonContigInfo.IsNonContiguous = L.Components.back().isNonContiguous(); generateInfoForComponentList( L.MapType, L.MapModifiers, L.MotionModifiers, L.Components, CurInfo, PartialStruct, /*IsFirstComponentList=*/false, L.IsImplicit, L.Mapper, L.ForDeviceAddr, VD, L.VarRef); // If this entry relates with a device pointer, set the relevant // declaration and add the 'return pointer' flag. if (L.ReturnDevicePointer) { assert(CurInfo.BasePointers.size() > CurrentBasePointersIdx && "Unexpected number of mapped base pointers."); const ValueDecl *RelevantVD = L.Components.back().getAssociatedDeclaration(); assert(RelevantVD && "No relevant declaration related with device pointer??"); CurInfo.BasePointers[CurrentBasePointersIdx].setDevicePtrDecl( RelevantVD); CurInfo.Types[CurrentBasePointersIdx] |= OMP_MAP_RETURN_PARAM; } } } // Append any pending zero-length pointers which are struct members and // used with use_device_ptr or use_device_addr. auto CI = DeferredInfo.find(Data.first); if (CI != DeferredInfo.end()) { for (const DeferredDevicePtrEntryTy &L : CI->second) { llvm::Value *BasePtr; llvm::Value *Ptr; if (L.ForDeviceAddr) { if (L.IE->isGLValue()) Ptr = this->CGF.EmitLValue(L.IE).getPointer(CGF); else Ptr = this->CGF.EmitScalarExpr(L.IE); BasePtr = Ptr; // Entry is RETURN_PARAM. Also, set the placeholder value // MEMBER_OF=FFFF so that the entry is later updated with the // correct value of MEMBER_OF. CurInfo.Types.push_back(OMP_MAP_RETURN_PARAM | OMP_MAP_MEMBER_OF); } else { BasePtr = this->CGF.EmitLValue(L.IE).getPointer(CGF); Ptr = this->CGF.EmitLoadOfScalar(this->CGF.EmitLValue(L.IE), L.IE->getExprLoc()); // Entry is PTR_AND_OBJ and RETURN_PARAM. Also, set the // placeholder value MEMBER_OF=FFFF so that the entry is later // updated with the correct value of MEMBER_OF. CurInfo.Types.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_RETURN_PARAM | OMP_MAP_MEMBER_OF); } CurInfo.Exprs.push_back(L.VD); CurInfo.BasePointers.emplace_back(BasePtr, L.VD); CurInfo.Pointers.push_back(Ptr); CurInfo.Sizes.push_back( llvm::Constant::getNullValue(this->CGF.Int64Ty)); CurInfo.Mappers.push_back(nullptr); } } // If there is an entry in PartialStruct it means we have a struct with // individual members mapped. Emit an extra combined entry. if (PartialStruct.Base.isValid()) { CurInfo.NonContigInfo.Dims.push_back(0); emitCombinedEntry(CombinedInfo, CurInfo.Types, PartialStruct, VD); } // We need to append the results of this capture to what we already // have. CombinedInfo.append(CurInfo); } // Append data for use_device_ptr clauses. CombinedInfo.append(UseDevicePtrCombinedInfo); } public: MappableExprsHandler(const OMPExecutableDirective &Dir, CodeGenFunction &CGF) : CurDir(&Dir), CGF(CGF) { // Extract firstprivate clause information. for (const auto *C : Dir.getClausesOfKind()) for (const auto *D : C->varlists()) FirstPrivateDecls.try_emplace( cast(cast(D)->getDecl()), C->isImplicit()); // Extract implicit firstprivates from uses_allocators clauses. for (const auto *C : Dir.getClausesOfKind()) { for (unsigned I = 0, E = C->getNumberOfAllocators(); I < E; ++I) { OMPUsesAllocatorsClause::Data D = C->getAllocatorData(I); if (const auto *DRE = dyn_cast_or_null(D.AllocatorTraits)) FirstPrivateDecls.try_emplace(cast(DRE->getDecl()), /*Implicit=*/true); else if (const auto *VD = dyn_cast( cast(D.Allocator->IgnoreParenImpCasts()) ->getDecl())) FirstPrivateDecls.try_emplace(VD, /*Implicit=*/true); } } // Extract device pointer clause information. for (const auto *C : Dir.getClausesOfKind()) for (auto L : C->component_lists()) DevPointersMap[std::get<0>(L)].push_back(std::get<1>(L)); } /// Constructor for the declare mapper directive. MappableExprsHandler(const OMPDeclareMapperDecl &Dir, CodeGenFunction &CGF) : CurDir(&Dir), CGF(CGF) {} /// Generate code for the combined entry if we have a partially mapped struct /// and take care of the mapping flags of the arguments corresponding to /// individual struct members. void emitCombinedEntry(MapCombinedInfoTy &CombinedInfo, MapFlagsArrayTy &CurTypes, const StructRangeInfoTy &PartialStruct, const ValueDecl *VD = nullptr, bool NotTargetParams = true) const { if (CurTypes.size() == 1 && ((CurTypes.back() & OMP_MAP_MEMBER_OF) != OMP_MAP_MEMBER_OF) && !PartialStruct.IsArraySection) return; Address LBAddr = PartialStruct.LowestElem.second; Address HBAddr = PartialStruct.HighestElem.second; if (PartialStruct.HasCompleteRecord) { LBAddr = PartialStruct.LB; HBAddr = PartialStruct.LB; } CombinedInfo.Exprs.push_back(VD); // Base is the base of the struct CombinedInfo.BasePointers.push_back(PartialStruct.Base.getPointer()); // Pointer is the address of the lowest element llvm::Value *LB = LBAddr.getPointer(); CombinedInfo.Pointers.push_back(LB); // There should not be a mapper for a combined entry. CombinedInfo.Mappers.push_back(nullptr); // Size is (addr of {highest+1} element) - (addr of lowest element) llvm::Value *HB = HBAddr.getPointer(); llvm::Value *HAddr = CGF.Builder.CreateConstGEP1_32(HBAddr.getElementType(), HB, /*Idx0=*/1); llvm::Value *CLAddr = CGF.Builder.CreatePointerCast(LB, CGF.VoidPtrTy); llvm::Value *CHAddr = CGF.Builder.CreatePointerCast(HAddr, CGF.VoidPtrTy); llvm::Value *Diff = CGF.Builder.CreatePtrDiff(CHAddr, CLAddr); llvm::Value *Size = CGF.Builder.CreateIntCast(Diff, CGF.Int64Ty, /*isSigned=*/false); CombinedInfo.Sizes.push_back(Size); // Map type is always TARGET_PARAM, if generate info for captures. CombinedInfo.Types.push_back(NotTargetParams ? OMP_MAP_NONE : OMP_MAP_TARGET_PARAM); // If any element has the present modifier, then make sure the runtime // doesn't attempt to allocate the struct. if (CurTypes.end() != llvm::find_if(CurTypes, [](OpenMPOffloadMappingFlags Type) { return Type & OMP_MAP_PRESENT; })) CombinedInfo.Types.back() |= OMP_MAP_PRESENT; // Remove TARGET_PARAM flag from the first element (*CurTypes.begin()) &= ~OMP_MAP_TARGET_PARAM; // All other current entries will be MEMBER_OF the combined entry // (except for PTR_AND_OBJ entries which do not have a placeholder value // 0xFFFF in the MEMBER_OF field). OpenMPOffloadMappingFlags MemberOfFlag = getMemberOfFlag(CombinedInfo.BasePointers.size() - 1); for (auto &M : CurTypes) setCorrectMemberOfFlag(M, MemberOfFlag); } /// Generate all the base pointers, section pointers, sizes, map types, and /// mappers for the extracted mappable expressions (all included in \a /// CombinedInfo). Also, for each item that relates with a device pointer, a /// pair of the relevant declaration and index where it occurs is appended to /// the device pointers info array. void generateAllInfo( MapCombinedInfoTy &CombinedInfo, const llvm::DenseSet> &SkipVarSet = llvm::DenseSet>()) const { assert(CurDir.is() && "Expect a executable directive"); const auto *CurExecDir = CurDir.get(); generateAllInfoForClauses(CurExecDir->clauses(), CombinedInfo, SkipVarSet); } /// Generate all the base pointers, section pointers, sizes, map types, and /// mappers for the extracted map clauses of user-defined mapper (all included /// in \a CombinedInfo). void generateAllInfoForMapper(MapCombinedInfoTy &CombinedInfo) const { assert(CurDir.is() && "Expect a declare mapper directive"); const auto *CurMapperDir = CurDir.get(); generateAllInfoForClauses(CurMapperDir->clauses(), CombinedInfo); } /// Emit capture info for lambdas for variables captured by reference. void generateInfoForLambdaCaptures( const ValueDecl *VD, llvm::Value *Arg, MapCombinedInfoTy &CombinedInfo, llvm::DenseMap &LambdaPointers) const { const auto *RD = VD->getType() .getCanonicalType() .getNonReferenceType() ->getAsCXXRecordDecl(); if (!RD || !RD->isLambda()) return; Address VDAddr = Address(Arg, CGF.getContext().getDeclAlign(VD)); LValue VDLVal = CGF.MakeAddrLValue( VDAddr, VD->getType().getCanonicalType().getNonReferenceType()); llvm::DenseMap Captures; FieldDecl *ThisCapture = nullptr; RD->getCaptureFields(Captures, ThisCapture); if (ThisCapture) { LValue ThisLVal = CGF.EmitLValueForFieldInitialization(VDLVal, ThisCapture); LValue ThisLValVal = CGF.EmitLValueForField(VDLVal, ThisCapture); LambdaPointers.try_emplace(ThisLVal.getPointer(CGF), VDLVal.getPointer(CGF)); CombinedInfo.Exprs.push_back(VD); CombinedInfo.BasePointers.push_back(ThisLVal.getPointer(CGF)); CombinedInfo.Pointers.push_back(ThisLValVal.getPointer(CGF)); CombinedInfo.Sizes.push_back( CGF.Builder.CreateIntCast(CGF.getTypeSize(CGF.getContext().VoidPtrTy), CGF.Int64Ty, /*isSigned=*/true)); CombinedInfo.Types.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_LITERAL | OMP_MAP_MEMBER_OF | OMP_MAP_IMPLICIT); CombinedInfo.Mappers.push_back(nullptr); } for (const LambdaCapture &LC : RD->captures()) { if (!LC.capturesVariable()) continue; const VarDecl *VD = LC.getCapturedVar(); if (LC.getCaptureKind() != LCK_ByRef && !VD->getType()->isPointerType()) continue; auto It = Captures.find(VD); assert(It != Captures.end() && "Found lambda capture without field."); LValue VarLVal = CGF.EmitLValueForFieldInitialization(VDLVal, It->second); if (LC.getCaptureKind() == LCK_ByRef) { LValue VarLValVal = CGF.EmitLValueForField(VDLVal, It->second); LambdaPointers.try_emplace(VarLVal.getPointer(CGF), VDLVal.getPointer(CGF)); CombinedInfo.Exprs.push_back(VD); CombinedInfo.BasePointers.push_back(VarLVal.getPointer(CGF)); CombinedInfo.Pointers.push_back(VarLValVal.getPointer(CGF)); CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast( CGF.getTypeSize( VD->getType().getCanonicalType().getNonReferenceType()), CGF.Int64Ty, /*isSigned=*/true)); } else { RValue VarRVal = CGF.EmitLoadOfLValue(VarLVal, RD->getLocation()); LambdaPointers.try_emplace(VarLVal.getPointer(CGF), VDLVal.getPointer(CGF)); CombinedInfo.Exprs.push_back(VD); CombinedInfo.BasePointers.push_back(VarLVal.getPointer(CGF)); CombinedInfo.Pointers.push_back(VarRVal.getScalarVal()); CombinedInfo.Sizes.push_back(llvm::ConstantInt::get(CGF.Int64Ty, 0)); } CombinedInfo.Types.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_LITERAL | OMP_MAP_MEMBER_OF | OMP_MAP_IMPLICIT); CombinedInfo.Mappers.push_back(nullptr); } } /// Set correct indices for lambdas captures. void adjustMemberOfForLambdaCaptures( const llvm::DenseMap &LambdaPointers, MapBaseValuesArrayTy &BasePointers, MapValuesArrayTy &Pointers, MapFlagsArrayTy &Types) const { for (unsigned I = 0, E = Types.size(); I < E; ++I) { // Set correct member_of idx for all implicit lambda captures. if (Types[I] != (OMP_MAP_PTR_AND_OBJ | OMP_MAP_LITERAL | OMP_MAP_MEMBER_OF | OMP_MAP_IMPLICIT)) continue; llvm::Value *BasePtr = LambdaPointers.lookup(*BasePointers[I]); assert(BasePtr && "Unable to find base lambda address."); int TgtIdx = -1; for (unsigned J = I; J > 0; --J) { unsigned Idx = J - 1; if (Pointers[Idx] != BasePtr) continue; TgtIdx = Idx; break; } assert(TgtIdx != -1 && "Unable to find parent lambda."); // All other current entries will be MEMBER_OF the combined entry // (except for PTR_AND_OBJ entries which do not have a placeholder value // 0xFFFF in the MEMBER_OF field). OpenMPOffloadMappingFlags MemberOfFlag = getMemberOfFlag(TgtIdx); setCorrectMemberOfFlag(Types[I], MemberOfFlag); } } /// Generate the base pointers, section pointers, sizes, map types, and /// mappers associated to a given capture (all included in \a CombinedInfo). void generateInfoForCapture(const CapturedStmt::Capture *Cap, llvm::Value *Arg, MapCombinedInfoTy &CombinedInfo, StructRangeInfoTy &PartialStruct) const { assert(!Cap->capturesVariableArrayType() && "Not expecting to generate map info for a variable array type!"); // We need to know when we generating information for the first component const ValueDecl *VD = Cap->capturesThis() ? nullptr : Cap->getCapturedVar()->getCanonicalDecl(); // If this declaration appears in a is_device_ptr clause we just have to // pass the pointer by value. If it is a reference to a declaration, we just // pass its value. if (DevPointersMap.count(VD)) { CombinedInfo.Exprs.push_back(VD); CombinedInfo.BasePointers.emplace_back(Arg, VD); CombinedInfo.Pointers.push_back(Arg); CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast( CGF.getTypeSize(CGF.getContext().VoidPtrTy), CGF.Int64Ty, /*isSigned=*/true)); CombinedInfo.Types.push_back( (Cap->capturesVariable() ? OMP_MAP_TO : OMP_MAP_LITERAL) | OMP_MAP_TARGET_PARAM); CombinedInfo.Mappers.push_back(nullptr); return; } using MapData = std::tuple, bool, const ValueDecl *, const Expr *>; SmallVector DeclComponentLists; assert(CurDir.is() && "Expect a executable directive"); const auto *CurExecDir = CurDir.get(); for (const auto *C : CurExecDir->getClausesOfKind()) { const auto *EI = C->getVarRefs().begin(); for (const auto L : C->decl_component_lists(VD)) { const ValueDecl *VDecl, *Mapper; // The Expression is not correct if the mapping is implicit const Expr *E = (C->getMapLoc().isValid()) ? *EI : nullptr; OMPClauseMappableExprCommon::MappableExprComponentListRef Components; std::tie(VDecl, Components, Mapper) = L; assert(VDecl == VD && "We got information for the wrong declaration??"); assert(!Components.empty() && "Not expecting declaration with no component lists."); DeclComponentLists.emplace_back(Components, C->getMapType(), C->getMapTypeModifiers(), C->isImplicit(), Mapper, E); ++EI; } } llvm::stable_sort(DeclComponentLists, [](const MapData &LHS, const MapData &RHS) { ArrayRef MapModifiers = std::get<2>(LHS); OpenMPMapClauseKind MapType = std::get<1>(RHS); bool HasPresent = !MapModifiers.empty() && llvm::any_of(MapModifiers, [](OpenMPMapModifierKind K) { return K == clang::OMPC_MAP_MODIFIER_present; }); bool HasAllocs = MapType == OMPC_MAP_alloc; MapModifiers = std::get<2>(RHS); MapType = std::get<1>(LHS); bool HasPresentR = !MapModifiers.empty() && llvm::any_of(MapModifiers, [](OpenMPMapModifierKind K) { return K == clang::OMPC_MAP_MODIFIER_present; }); bool HasAllocsR = MapType == OMPC_MAP_alloc; return (HasPresent && !HasPresentR) || (HasAllocs && !HasAllocsR); }); // Find overlapping elements (including the offset from the base element). llvm::SmallDenseMap< const MapData *, llvm::SmallVector< OMPClauseMappableExprCommon::MappableExprComponentListRef, 4>, 4> OverlappedData; size_t Count = 0; for (const MapData &L : DeclComponentLists) { OMPClauseMappableExprCommon::MappableExprComponentListRef Components; OpenMPMapClauseKind MapType; ArrayRef MapModifiers; bool IsImplicit; const ValueDecl *Mapper; const Expr *VarRef; std::tie(Components, MapType, MapModifiers, IsImplicit, Mapper, VarRef) = L; ++Count; for (const MapData &L1 : makeArrayRef(DeclComponentLists).slice(Count)) { OMPClauseMappableExprCommon::MappableExprComponentListRef Components1; std::tie(Components1, MapType, MapModifiers, IsImplicit, Mapper, VarRef) = L1; auto CI = Components.rbegin(); auto CE = Components.rend(); auto SI = Components1.rbegin(); auto SE = Components1.rend(); for (; CI != CE && SI != SE; ++CI, ++SI) { if (CI->getAssociatedExpression()->getStmtClass() != SI->getAssociatedExpression()->getStmtClass()) break; // Are we dealing with different variables/fields? if (CI->getAssociatedDeclaration() != SI->getAssociatedDeclaration()) break; } // Found overlapping if, at least for one component, reached the head // of the components list. if (CI == CE || SI == SE) { // Ignore it if it is the same component. if (CI == CE && SI == SE) continue; const auto It = (SI == SE) ? CI : SI; // If one component is a pointer and another one is a kind of // dereference of this pointer (array subscript, section, dereference, // etc.), it is not an overlapping. // Same, if one component is a base and another component is a // dereferenced pointer memberexpr with the same base. if (!isa(It->getAssociatedExpression()) || (std::prev(It)->getAssociatedDeclaration() && std::prev(It) ->getAssociatedDeclaration() ->getType() ->isPointerType()) || (It->getAssociatedDeclaration() && It->getAssociatedDeclaration()->getType()->isPointerType() && std::next(It) != CE && std::next(It) != SE)) continue; const MapData &BaseData = CI == CE ? L : L1; OMPClauseMappableExprCommon::MappableExprComponentListRef SubData = SI == SE ? Components : Components1; auto &OverlappedElements = OverlappedData.FindAndConstruct(&BaseData); OverlappedElements.getSecond().push_back(SubData); } } } // Sort the overlapped elements for each item. llvm::SmallVector Layout; if (!OverlappedData.empty()) { const Type *BaseType = VD->getType().getCanonicalType().getTypePtr(); const Type *OrigType = BaseType->getPointeeOrArrayElementType(); while (BaseType != OrigType) { BaseType = OrigType->getCanonicalTypeInternal().getTypePtr(); OrigType = BaseType->getPointeeOrArrayElementType(); } if (const auto *CRD = BaseType->getAsCXXRecordDecl()) getPlainLayout(CRD, Layout, /*AsBase=*/false); else { const auto *RD = BaseType->getAsRecordDecl(); Layout.append(RD->field_begin(), RD->field_end()); } } for (auto &Pair : OverlappedData) { llvm::stable_sort( Pair.getSecond(), [&Layout]( OMPClauseMappableExprCommon::MappableExprComponentListRef First, OMPClauseMappableExprCommon::MappableExprComponentListRef Second) { auto CI = First.rbegin(); auto CE = First.rend(); auto SI = Second.rbegin(); auto SE = Second.rend(); for (; CI != CE && SI != SE; ++CI, ++SI) { if (CI->getAssociatedExpression()->getStmtClass() != SI->getAssociatedExpression()->getStmtClass()) break; // Are we dealing with different variables/fields? if (CI->getAssociatedDeclaration() != SI->getAssociatedDeclaration()) break; } // Lists contain the same elements. if (CI == CE && SI == SE) return false; // List with less elements is less than list with more elements. if (CI == CE || SI == SE) return CI == CE; const auto *FD1 = cast(CI->getAssociatedDeclaration()); const auto *FD2 = cast(SI->getAssociatedDeclaration()); if (FD1->getParent() == FD2->getParent()) return FD1->getFieldIndex() < FD2->getFieldIndex(); const auto *It = llvm::find_if(Layout, [FD1, FD2](const FieldDecl *FD) { return FD == FD1 || FD == FD2; }); return *It == FD1; }); } // Associated with a capture, because the mapping flags depend on it. // Go through all of the elements with the overlapped elements. bool IsFirstComponentList = true; for (const auto &Pair : OverlappedData) { const MapData &L = *Pair.getFirst(); OMPClauseMappableExprCommon::MappableExprComponentListRef Components; OpenMPMapClauseKind MapType; ArrayRef MapModifiers; bool IsImplicit; const ValueDecl *Mapper; const Expr *VarRef; std::tie(Components, MapType, MapModifiers, IsImplicit, Mapper, VarRef) = L; ArrayRef OverlappedComponents = Pair.getSecond(); generateInfoForComponentList( MapType, MapModifiers, llvm::None, Components, CombinedInfo, PartialStruct, IsFirstComponentList, IsImplicit, Mapper, /*ForDeviceAddr=*/false, VD, VarRef, OverlappedComponents); IsFirstComponentList = false; } // Go through other elements without overlapped elements. for (const MapData &L : DeclComponentLists) { OMPClauseMappableExprCommon::MappableExprComponentListRef Components; OpenMPMapClauseKind MapType; ArrayRef MapModifiers; bool IsImplicit; const ValueDecl *Mapper; const Expr *VarRef; std::tie(Components, MapType, MapModifiers, IsImplicit, Mapper, VarRef) = L; auto It = OverlappedData.find(&L); if (It == OverlappedData.end()) generateInfoForComponentList(MapType, MapModifiers, llvm::None, Components, CombinedInfo, PartialStruct, IsFirstComponentList, IsImplicit, Mapper, /*ForDeviceAddr=*/false, VD, VarRef); IsFirstComponentList = false; } } /// Generate the default map information for a given capture \a CI, /// record field declaration \a RI and captured value \a CV. void generateDefaultMapInfo(const CapturedStmt::Capture &CI, const FieldDecl &RI, llvm::Value *CV, MapCombinedInfoTy &CombinedInfo) const { bool IsImplicit = true; // Do the default mapping. if (CI.capturesThis()) { CombinedInfo.Exprs.push_back(nullptr); CombinedInfo.BasePointers.push_back(CV); CombinedInfo.Pointers.push_back(CV); const auto *PtrTy = cast(RI.getType().getTypePtr()); CombinedInfo.Sizes.push_back( CGF.Builder.CreateIntCast(CGF.getTypeSize(PtrTy->getPointeeType()), CGF.Int64Ty, /*isSigned=*/true)); // Default map type. CombinedInfo.Types.push_back(OMP_MAP_TO | OMP_MAP_FROM); } else if (CI.capturesVariableByCopy()) { const VarDecl *VD = CI.getCapturedVar(); CombinedInfo.Exprs.push_back(VD->getCanonicalDecl()); CombinedInfo.BasePointers.push_back(CV); CombinedInfo.Pointers.push_back(CV); if (!RI.getType()->isAnyPointerType()) { // We have to signal to the runtime captures passed by value that are // not pointers. CombinedInfo.Types.push_back(OMP_MAP_LITERAL); CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast( CGF.getTypeSize(RI.getType()), CGF.Int64Ty, /*isSigned=*/true)); } else { // Pointers are implicitly mapped with a zero size and no flags // (other than first map that is added for all implicit maps). CombinedInfo.Types.push_back(OMP_MAP_NONE); CombinedInfo.Sizes.push_back(llvm::Constant::getNullValue(CGF.Int64Ty)); } auto I = FirstPrivateDecls.find(VD); if (I != FirstPrivateDecls.end()) IsImplicit = I->getSecond(); } else { assert(CI.capturesVariable() && "Expected captured reference."); const auto *PtrTy = cast(RI.getType().getTypePtr()); QualType ElementType = PtrTy->getPointeeType(); CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast( CGF.getTypeSize(ElementType), CGF.Int64Ty, /*isSigned=*/true)); // The default map type for a scalar/complex type is 'to' because by // default the value doesn't have to be retrieved. For an aggregate // type, the default is 'tofrom'. CombinedInfo.Types.push_back(getMapModifiersForPrivateClauses(CI)); const VarDecl *VD = CI.getCapturedVar(); auto I = FirstPrivateDecls.find(VD); CombinedInfo.Exprs.push_back(VD->getCanonicalDecl()); CombinedInfo.BasePointers.push_back(CV); if (I != FirstPrivateDecls.end() && ElementType->isAnyPointerType()) { Address PtrAddr = CGF.EmitLoadOfReference(CGF.MakeAddrLValue( CV, ElementType, CGF.getContext().getDeclAlign(VD), AlignmentSource::Decl)); CombinedInfo.Pointers.push_back(PtrAddr.getPointer()); } else { CombinedInfo.Pointers.push_back(CV); } if (I != FirstPrivateDecls.end()) IsImplicit = I->getSecond(); } // Every default map produces a single argument which is a target parameter. CombinedInfo.Types.back() |= OMP_MAP_TARGET_PARAM; // Add flag stating this is an implicit map. if (IsImplicit) CombinedInfo.Types.back() |= OMP_MAP_IMPLICIT; // No user-defined mapper for default mapping. CombinedInfo.Mappers.push_back(nullptr); } }; } // anonymous namespace static void emitNonContiguousDescriptor( CodeGenFunction &CGF, MappableExprsHandler::MapCombinedInfoTy &CombinedInfo, CGOpenMPRuntime::TargetDataInfo &Info) { CodeGenModule &CGM = CGF.CGM; MappableExprsHandler::MapCombinedInfoTy::StructNonContiguousInfo &NonContigInfo = CombinedInfo.NonContigInfo; // Build an array of struct descriptor_dim and then assign it to // offload_args. // // struct descriptor_dim { // uint64_t offset; // uint64_t count; // uint64_t stride // }; ASTContext &C = CGF.getContext(); QualType Int64Ty = C.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0); RecordDecl *RD; RD = C.buildImplicitRecord("descriptor_dim"); RD->startDefinition(); addFieldToRecordDecl(C, RD, Int64Ty); addFieldToRecordDecl(C, RD, Int64Ty); addFieldToRecordDecl(C, RD, Int64Ty); RD->completeDefinition(); QualType DimTy = C.getRecordType(RD); enum { OffsetFD = 0, CountFD, StrideFD }; // We need two index variable here since the size of "Dims" is the same as the // size of Components, however, the size of offset, count, and stride is equal // to the size of base declaration that is non-contiguous. for (unsigned I = 0, L = 0, E = NonContigInfo.Dims.size(); I < E; ++I) { // Skip emitting ir if dimension size is 1 since it cannot be // non-contiguous. if (NonContigInfo.Dims[I] == 1) continue; llvm::APInt Size(/*numBits=*/32, NonContigInfo.Dims[I]); QualType ArrayTy = C.getConstantArrayType(DimTy, Size, nullptr, ArrayType::Normal, 0); Address DimsAddr = CGF.CreateMemTemp(ArrayTy, "dims"); for (unsigned II = 0, EE = NonContigInfo.Dims[I]; II < EE; ++II) { unsigned RevIdx = EE - II - 1; LValue DimsLVal = CGF.MakeAddrLValue( CGF.Builder.CreateConstArrayGEP(DimsAddr, II), DimTy); // Offset LValue OffsetLVal = CGF.EmitLValueForField( DimsLVal, *std::next(RD->field_begin(), OffsetFD)); CGF.EmitStoreOfScalar(NonContigInfo.Offsets[L][RevIdx], OffsetLVal); // Count LValue CountLVal = CGF.EmitLValueForField( DimsLVal, *std::next(RD->field_begin(), CountFD)); CGF.EmitStoreOfScalar(NonContigInfo.Counts[L][RevIdx], CountLVal); // Stride LValue StrideLVal = CGF.EmitLValueForField( DimsLVal, *std::next(RD->field_begin(), StrideFD)); CGF.EmitStoreOfScalar(NonContigInfo.Strides[L][RevIdx], StrideLVal); } // args[I] = &dims Address DAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( DimsAddr, CGM.Int8PtrTy); llvm::Value *P = CGF.Builder.CreateConstInBoundsGEP2_32( llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs), Info.PointersArray, 0, I); Address PAddr(P, CGF.getPointerAlign()); CGF.Builder.CreateStore(DAddr.getPointer(), PAddr); ++L; } } /// Emit a string constant containing the names of the values mapped to the /// offloading runtime library. llvm::Constant * emitMappingInformation(CodeGenFunction &CGF, llvm::OpenMPIRBuilder &OMPBuilder, MappableExprsHandler::MappingExprInfo &MapExprs) { llvm::Constant *SrcLocStr; if (!MapExprs.getMapDecl()) { SrcLocStr = OMPBuilder.getOrCreateDefaultSrcLocStr(); } else { std::string ExprName = ""; if (MapExprs.getMapExpr()) { PrintingPolicy P(CGF.getContext().getLangOpts()); llvm::raw_string_ostream OS(ExprName); MapExprs.getMapExpr()->printPretty(OS, nullptr, P); OS.flush(); } else { ExprName = MapExprs.getMapDecl()->getNameAsString(); } SourceLocation Loc = MapExprs.getMapDecl()->getLocation(); PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc); const char *FileName = PLoc.getFilename(); unsigned Line = PLoc.getLine(); unsigned Column = PLoc.getColumn(); SrcLocStr = OMPBuilder.getOrCreateSrcLocStr(FileName, ExprName.c_str(), Line, Column); } return SrcLocStr; } /// Emit the arrays used to pass the captures and map information to the /// offloading runtime library. If there is no map or capture information, /// return nullptr by reference. static void emitOffloadingArrays( CodeGenFunction &CGF, MappableExprsHandler::MapCombinedInfoTy &CombinedInfo, CGOpenMPRuntime::TargetDataInfo &Info, llvm::OpenMPIRBuilder &OMPBuilder, bool IsNonContiguous = false) { CodeGenModule &CGM = CGF.CGM; ASTContext &Ctx = CGF.getContext(); // Reset the array information. Info.clearArrayInfo(); Info.NumberOfPtrs = CombinedInfo.BasePointers.size(); if (Info.NumberOfPtrs) { // Detect if we have any capture size requiring runtime evaluation of the // size so that a constant array could be eventually used. bool hasRuntimeEvaluationCaptureSize = false; for (llvm::Value *S : CombinedInfo.Sizes) if (!isa(S)) { hasRuntimeEvaluationCaptureSize = true; break; } llvm::APInt PointerNumAP(32, Info.NumberOfPtrs, /*isSigned=*/true); QualType PointerArrayType = Ctx.getConstantArrayType( Ctx.VoidPtrTy, PointerNumAP, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0); Info.BasePointersArray = CGF.CreateMemTemp(PointerArrayType, ".offload_baseptrs").getPointer(); Info.PointersArray = CGF.CreateMemTemp(PointerArrayType, ".offload_ptrs").getPointer(); Address MappersArray = CGF.CreateMemTemp(PointerArrayType, ".offload_mappers"); Info.MappersArray = MappersArray.getPointer(); // If we don't have any VLA types or other types that require runtime // evaluation, we can use a constant array for the map sizes, otherwise we // need to fill up the arrays as we do for the pointers. QualType Int64Ty = Ctx.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1); if (hasRuntimeEvaluationCaptureSize) { QualType SizeArrayType = Ctx.getConstantArrayType( Int64Ty, PointerNumAP, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0); Info.SizesArray = CGF.CreateMemTemp(SizeArrayType, ".offload_sizes").getPointer(); } else { // We expect all the sizes to be constant, so we collect them to create // a constant array. SmallVector ConstSizes; for (unsigned I = 0, E = CombinedInfo.Sizes.size(); I < E; ++I) { if (IsNonContiguous && (CombinedInfo.Types[I] & MappableExprsHandler::OMP_MAP_NON_CONTIG)) { ConstSizes.push_back(llvm::ConstantInt::get( CGF.Int64Ty, CombinedInfo.NonContigInfo.Dims[I])); } else { ConstSizes.push_back(cast(CombinedInfo.Sizes[I])); } } auto *SizesArrayInit = llvm::ConstantArray::get( llvm::ArrayType::get(CGM.Int64Ty, ConstSizes.size()), ConstSizes); std::string Name = CGM.getOpenMPRuntime().getName({"offload_sizes"}); auto *SizesArrayGbl = new llvm::GlobalVariable( CGM.getModule(), SizesArrayInit->getType(), /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, SizesArrayInit, Name); SizesArrayGbl->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); Info.SizesArray = SizesArrayGbl; } // The map types are always constant so we don't need to generate code to // fill arrays. Instead, we create an array constant. SmallVector Mapping(CombinedInfo.Types.size(), 0); llvm::copy(CombinedInfo.Types, Mapping.begin()); std::string MaptypesName = CGM.getOpenMPRuntime().getName({"offload_maptypes"}); auto *MapTypesArrayGbl = OMPBuilder.createOffloadMaptypes(Mapping, MaptypesName); Info.MapTypesArray = MapTypesArrayGbl; // The information types are only built if there is debug information // requested. if (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo) { Info.MapNamesArray = llvm::Constant::getNullValue( llvm::Type::getInt8Ty(CGF.Builder.getContext())->getPointerTo()); } else { auto fillInfoMap = [&](MappableExprsHandler::MappingExprInfo &MapExpr) { return emitMappingInformation(CGF, OMPBuilder, MapExpr); }; SmallVector InfoMap(CombinedInfo.Exprs.size()); llvm::transform(CombinedInfo.Exprs, InfoMap.begin(), fillInfoMap); std::string MapnamesName = CGM.getOpenMPRuntime().getName({"offload_mapnames"}); auto *MapNamesArrayGbl = OMPBuilder.createOffloadMapnames(InfoMap, MapnamesName); Info.MapNamesArray = MapNamesArrayGbl; } // If there's a present map type modifier, it must not be applied to the end // of a region, so generate a separate map type array in that case. if (Info.separateBeginEndCalls()) { bool EndMapTypesDiffer = false; for (uint64_t &Type : Mapping) { if (Type & MappableExprsHandler::OMP_MAP_PRESENT) { Type &= ~MappableExprsHandler::OMP_MAP_PRESENT; EndMapTypesDiffer = true; } } if (EndMapTypesDiffer) { MapTypesArrayGbl = OMPBuilder.createOffloadMaptypes(Mapping, MaptypesName); Info.MapTypesArrayEnd = MapTypesArrayGbl; } } for (unsigned I = 0; I < Info.NumberOfPtrs; ++I) { llvm::Value *BPVal = *CombinedInfo.BasePointers[I]; llvm::Value *BP = CGF.Builder.CreateConstInBoundsGEP2_32( llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs), Info.BasePointersArray, 0, I); BP = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( BP, BPVal->getType()->getPointerTo(/*AddrSpace=*/0)); Address BPAddr(BP, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy)); CGF.Builder.CreateStore(BPVal, BPAddr); if (Info.requiresDevicePointerInfo()) if (const ValueDecl *DevVD = CombinedInfo.BasePointers[I].getDevicePtrDecl()) Info.CaptureDeviceAddrMap.try_emplace(DevVD, BPAddr); llvm::Value *PVal = CombinedInfo.Pointers[I]; llvm::Value *P = CGF.Builder.CreateConstInBoundsGEP2_32( llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs), Info.PointersArray, 0, I); P = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( P, PVal->getType()->getPointerTo(/*AddrSpace=*/0)); Address PAddr(P, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy)); CGF.Builder.CreateStore(PVal, PAddr); if (hasRuntimeEvaluationCaptureSize) { llvm::Value *S = CGF.Builder.CreateConstInBoundsGEP2_32( llvm::ArrayType::get(CGM.Int64Ty, Info.NumberOfPtrs), Info.SizesArray, /*Idx0=*/0, /*Idx1=*/I); Address SAddr(S, Ctx.getTypeAlignInChars(Int64Ty)); CGF.Builder.CreateStore(CGF.Builder.CreateIntCast(CombinedInfo.Sizes[I], CGM.Int64Ty, /*isSigned=*/true), SAddr); } // Fill up the mapper array. llvm::Value *MFunc = llvm::ConstantPointerNull::get(CGM.VoidPtrTy); if (CombinedInfo.Mappers[I]) { MFunc = CGM.getOpenMPRuntime().getOrCreateUserDefinedMapperFunc( cast(CombinedInfo.Mappers[I])); MFunc = CGF.Builder.CreatePointerCast(MFunc, CGM.VoidPtrTy); Info.HasMapper = true; } Address MAddr = CGF.Builder.CreateConstArrayGEP(MappersArray, I); CGF.Builder.CreateStore(MFunc, MAddr); } } if (!IsNonContiguous || CombinedInfo.NonContigInfo.Offsets.empty() || Info.NumberOfPtrs == 0) return; emitNonContiguousDescriptor(CGF, CombinedInfo, Info); } namespace { /// Additional arguments for emitOffloadingArraysArgument function. struct ArgumentsOptions { bool ForEndCall = false; ArgumentsOptions() = default; ArgumentsOptions(bool ForEndCall) : ForEndCall(ForEndCall) {} }; } // namespace /// Emit the arguments to be passed to the runtime library based on the /// arrays of base pointers, pointers, sizes, map types, and mappers. If /// ForEndCall, emit map types to be passed for the end of the region instead of /// the beginning. static void emitOffloadingArraysArgument( CodeGenFunction &CGF, llvm::Value *&BasePointersArrayArg, llvm::Value *&PointersArrayArg, llvm::Value *&SizesArrayArg, llvm::Value *&MapTypesArrayArg, llvm::Value *&MapNamesArrayArg, llvm::Value *&MappersArrayArg, CGOpenMPRuntime::TargetDataInfo &Info, const ArgumentsOptions &Options = ArgumentsOptions()) { assert((!Options.ForEndCall || Info.separateBeginEndCalls()) && "expected region end call to runtime only when end call is separate"); CodeGenModule &CGM = CGF.CGM; if (Info.NumberOfPtrs) { BasePointersArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32( llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs), Info.BasePointersArray, /*Idx0=*/0, /*Idx1=*/0); PointersArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32( llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs), Info.PointersArray, /*Idx0=*/0, /*Idx1=*/0); SizesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32( llvm::ArrayType::get(CGM.Int64Ty, Info.NumberOfPtrs), Info.SizesArray, /*Idx0=*/0, /*Idx1=*/0); MapTypesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32( llvm::ArrayType::get(CGM.Int64Ty, Info.NumberOfPtrs), Options.ForEndCall && Info.MapTypesArrayEnd ? Info.MapTypesArrayEnd : Info.MapTypesArray, /*Idx0=*/0, /*Idx1=*/0); // Only emit the mapper information arrays if debug information is // requested. if (CGF.CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo) MapNamesArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy); else MapNamesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32( llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs), Info.MapNamesArray, /*Idx0=*/0, /*Idx1=*/0); // If there is no user-defined mapper, set the mapper array to nullptr to // avoid an unnecessary data privatization if (!Info.HasMapper) MappersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy); else MappersArrayArg = CGF.Builder.CreatePointerCast(Info.MappersArray, CGM.VoidPtrPtrTy); } else { BasePointersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy); PointersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy); SizesArrayArg = llvm::ConstantPointerNull::get(CGM.Int64Ty->getPointerTo()); MapTypesArrayArg = llvm::ConstantPointerNull::get(CGM.Int64Ty->getPointerTo()); MapNamesArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy); MappersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy); } } /// Check for inner distribute directive. static const OMPExecutableDirective * getNestedDistributeDirective(ASTContext &Ctx, const OMPExecutableDirective &D) { const auto *CS = D.getInnermostCapturedStmt(); const auto *Body = CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true); const Stmt *ChildStmt = CGOpenMPSIMDRuntime::getSingleCompoundChild(Ctx, Body); if (const auto *NestedDir = dyn_cast_or_null(ChildStmt)) { OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind(); switch (D.getDirectiveKind()) { case OMPD_target: if (isOpenMPDistributeDirective(DKind)) return NestedDir; if (DKind == OMPD_teams) { Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers( /*IgnoreCaptured=*/true); if (!Body) return nullptr; ChildStmt = CGOpenMPSIMDRuntime::getSingleCompoundChild(Ctx, Body); if (const auto *NND = dyn_cast_or_null(ChildStmt)) { DKind = NND->getDirectiveKind(); if (isOpenMPDistributeDirective(DKind)) return NND; } } return nullptr; case OMPD_target_teams: if (isOpenMPDistributeDirective(DKind)) return NestedDir; return nullptr; case OMPD_target_parallel: case OMPD_target_simd: case OMPD_target_parallel_for: case OMPD_target_parallel_for_simd: return nullptr; case OMPD_target_teams_distribute: case OMPD_target_teams_distribute_simd: case OMPD_target_teams_distribute_parallel_for: case OMPD_target_teams_distribute_parallel_for_simd: case OMPD_parallel: case OMPD_for: case OMPD_parallel_for: case OMPD_parallel_master: case OMPD_parallel_sections: case OMPD_for_simd: case OMPD_parallel_for_simd: case OMPD_cancel: case OMPD_cancellation_point: case OMPD_ordered: case OMPD_threadprivate: case OMPD_allocate: case OMPD_task: case OMPD_simd: case OMPD_tile: case OMPD_unroll: case OMPD_sections: case OMPD_section: case OMPD_single: case OMPD_master: case OMPD_critical: case OMPD_taskyield: case OMPD_barrier: case OMPD_taskwait: case OMPD_taskgroup: case OMPD_atomic: case OMPD_flush: case OMPD_depobj: case OMPD_scan: case OMPD_teams: case OMPD_target_data: case OMPD_target_exit_data: case OMPD_target_enter_data: case OMPD_distribute: case OMPD_distribute_simd: case OMPD_distribute_parallel_for: case OMPD_distribute_parallel_for_simd: case OMPD_teams_distribute: case OMPD_teams_distribute_simd: case OMPD_teams_distribute_parallel_for: case OMPD_teams_distribute_parallel_for_simd: case OMPD_target_update: case OMPD_declare_simd: case OMPD_declare_variant: case OMPD_begin_declare_variant: case OMPD_end_declare_variant: case OMPD_declare_target: case OMPD_end_declare_target: case OMPD_declare_reduction: case OMPD_declare_mapper: case OMPD_taskloop: case OMPD_taskloop_simd: case OMPD_master_taskloop: case OMPD_master_taskloop_simd: case OMPD_parallel_master_taskloop: case OMPD_parallel_master_taskloop_simd: case OMPD_requires: case OMPD_unknown: default: llvm_unreachable("Unexpected directive."); } } return nullptr; } /// Emit the user-defined mapper function. The code generation follows the /// pattern in the example below. /// \code /// void .omp_mapper...(void *rt_mapper_handle, /// void *base, void *begin, /// int64_t size, int64_t type, /// void *name = nullptr) { /// // Allocate space for an array section first or add a base/begin for /// // pointer dereference. /// if ((size > 1 || (base != begin && maptype.IsPtrAndObj)) && /// !maptype.IsDelete) /// __tgt_push_mapper_component(rt_mapper_handle, base, begin, /// size*sizeof(Ty), clearToFromMember(type)); /// // Map members. /// for (unsigned i = 0; i < size; i++) { /// // For each component specified by this mapper: /// for (auto c : begin[i]->all_components) { /// if (c.hasMapper()) /// (*c.Mapper())(rt_mapper_handle, c.arg_base, c.arg_begin, c.arg_size, /// c.arg_type, c.arg_name); /// else /// __tgt_push_mapper_component(rt_mapper_handle, c.arg_base, /// c.arg_begin, c.arg_size, c.arg_type, /// c.arg_name); /// } /// } /// // Delete the array section. /// if (size > 1 && maptype.IsDelete) /// __tgt_push_mapper_component(rt_mapper_handle, base, begin, /// size*sizeof(Ty), clearToFromMember(type)); /// } /// \endcode void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D, CodeGenFunction *CGF) { if (UDMMap.count(D) > 0) return; ASTContext &C = CGM.getContext(); QualType Ty = D->getType(); QualType PtrTy = C.getPointerType(Ty).withRestrict(); QualType Int64Ty = C.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/true); auto *MapperVarDecl = cast(cast(D->getMapperVarRef())->getDecl()); SourceLocation Loc = D->getLocation(); CharUnits ElementSize = C.getTypeSizeInChars(Ty); // Prepare mapper function arguments and attributes. ImplicitParamDecl HandleArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy, ImplicitParamDecl::Other); ImplicitParamDecl BaseArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy, ImplicitParamDecl::Other); ImplicitParamDecl BeginArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy, ImplicitParamDecl::Other); ImplicitParamDecl SizeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, Int64Ty, ImplicitParamDecl::Other); ImplicitParamDecl TypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, Int64Ty, ImplicitParamDecl::Other); ImplicitParamDecl NameArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy, ImplicitParamDecl::Other); FunctionArgList Args; Args.push_back(&HandleArg); Args.push_back(&BaseArg); Args.push_back(&BeginArg); Args.push_back(&SizeArg); Args.push_back(&TypeArg); Args.push_back(&NameArg); const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo); SmallString<64> TyStr; llvm::raw_svector_ostream Out(TyStr); CGM.getCXXABI().getMangleContext().mangleTypeName(Ty, Out); std::string Name = getName({"omp_mapper", TyStr, D->getName()}); auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule()); CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo); Fn->removeFnAttr(llvm::Attribute::OptimizeNone); // Start the mapper function code generation. CodeGenFunction MapperCGF(CGM); MapperCGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc); // Compute the starting and end addresses of array elements. llvm::Value *Size = MapperCGF.EmitLoadOfScalar( MapperCGF.GetAddrOfLocalVar(&SizeArg), /*Volatile=*/false, C.getPointerType(Int64Ty), Loc); // Prepare common arguments for array initiation and deletion. llvm::Value *Handle = MapperCGF.EmitLoadOfScalar( MapperCGF.GetAddrOfLocalVar(&HandleArg), /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc); llvm::Value *BaseIn = MapperCGF.EmitLoadOfScalar( MapperCGF.GetAddrOfLocalVar(&BaseArg), /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc); llvm::Value *BeginIn = MapperCGF.EmitLoadOfScalar( MapperCGF.GetAddrOfLocalVar(&BeginArg), /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc); // Convert the size in bytes into the number of array elements. Size = MapperCGF.Builder.CreateExactUDiv( Size, MapperCGF.Builder.getInt64(ElementSize.getQuantity())); llvm::Value *PtrBegin = MapperCGF.Builder.CreateBitCast( BeginIn, CGM.getTypes().ConvertTypeForMem(PtrTy)); llvm::Value *PtrEnd = MapperCGF.Builder.CreateGEP( PtrBegin->getType()->getPointerElementType(), PtrBegin, Size); llvm::Value *MapType = MapperCGF.EmitLoadOfScalar( MapperCGF.GetAddrOfLocalVar(&TypeArg), /*Volatile=*/false, C.getPointerType(Int64Ty), Loc); llvm::Value *MapName = MapperCGF.EmitLoadOfScalar( MapperCGF.GetAddrOfLocalVar(&NameArg), /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc); // Emit array initiation if this is an array section and \p MapType indicates // that memory allocation is required. llvm::BasicBlock *HeadBB = MapperCGF.createBasicBlock("omp.arraymap.head"); emitUDMapperArrayInitOrDel(MapperCGF, Handle, BaseIn, BeginIn, Size, MapType, MapName, ElementSize, HeadBB, /*IsInit=*/true); // Emit a for loop to iterate through SizeArg of elements and map all of them. // Emit the loop header block. MapperCGF.EmitBlock(HeadBB); llvm::BasicBlock *BodyBB = MapperCGF.createBasicBlock("omp.arraymap.body"); llvm::BasicBlock *DoneBB = MapperCGF.createBasicBlock("omp.done"); // Evaluate whether the initial condition is satisfied. llvm::Value *IsEmpty = MapperCGF.Builder.CreateICmpEQ(PtrBegin, PtrEnd, "omp.arraymap.isempty"); MapperCGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB); llvm::BasicBlock *EntryBB = MapperCGF.Builder.GetInsertBlock(); // Emit the loop body block. MapperCGF.EmitBlock(BodyBB); llvm::BasicBlock *LastBB = BodyBB; llvm::PHINode *PtrPHI = MapperCGF.Builder.CreatePHI( PtrBegin->getType(), 2, "omp.arraymap.ptrcurrent"); PtrPHI->addIncoming(PtrBegin, EntryBB); Address PtrCurrent = Address(PtrPHI, MapperCGF.GetAddrOfLocalVar(&BeginArg) .getAlignment() .alignmentOfArrayElement(ElementSize)); // Privatize the declared variable of mapper to be the current array element. CodeGenFunction::OMPPrivateScope Scope(MapperCGF); Scope.addPrivate(MapperVarDecl, [PtrCurrent]() { return PtrCurrent; }); (void)Scope.Privatize(); // Get map clause information. Fill up the arrays with all mapped variables. MappableExprsHandler::MapCombinedInfoTy Info; MappableExprsHandler MEHandler(*D, MapperCGF); MEHandler.generateAllInfoForMapper(Info); // Call the runtime API __tgt_mapper_num_components to get the number of // pre-existing components. llvm::Value *OffloadingArgs[] = {Handle}; llvm::Value *PreviousSize = MapperCGF.EmitRuntimeCall( OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), OMPRTL___tgt_mapper_num_components), OffloadingArgs); llvm::Value *ShiftedPreviousSize = MapperCGF.Builder.CreateShl( PreviousSize, MapperCGF.Builder.getInt64(MappableExprsHandler::getFlagMemberOffset())); // Fill up the runtime mapper handle for all components. for (unsigned I = 0; I < Info.BasePointers.size(); ++I) { llvm::Value *CurBaseArg = MapperCGF.Builder.CreateBitCast( *Info.BasePointers[I], CGM.getTypes().ConvertTypeForMem(C.VoidPtrTy)); llvm::Value *CurBeginArg = MapperCGF.Builder.CreateBitCast( Info.Pointers[I], CGM.getTypes().ConvertTypeForMem(C.VoidPtrTy)); llvm::Value *CurSizeArg = Info.Sizes[I]; llvm::Value *CurNameArg = (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo) ? llvm::ConstantPointerNull::get(CGM.VoidPtrTy) : emitMappingInformation(MapperCGF, OMPBuilder, Info.Exprs[I]); // Extract the MEMBER_OF field from the map type. llvm::Value *OriMapType = MapperCGF.Builder.getInt64(Info.Types[I]); llvm::Value *MemberMapType = MapperCGF.Builder.CreateNUWAdd(OriMapType, ShiftedPreviousSize); // Combine the map type inherited from user-defined mapper with that // specified in the program. According to the OMP_MAP_TO and OMP_MAP_FROM // bits of the \a MapType, which is the input argument of the mapper // function, the following code will set the OMP_MAP_TO and OMP_MAP_FROM // bits of MemberMapType. // [OpenMP 5.0], 1.2.6. map-type decay. // | alloc | to | from | tofrom | release | delete // ---------------------------------------------------------- // alloc | alloc | alloc | alloc | alloc | release | delete // to | alloc | to | alloc | to | release | delete // from | alloc | alloc | from | from | release | delete // tofrom | alloc | to | from | tofrom | release | delete llvm::Value *LeftToFrom = MapperCGF.Builder.CreateAnd( MapType, MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_TO | MappableExprsHandler::OMP_MAP_FROM)); llvm::BasicBlock *AllocBB = MapperCGF.createBasicBlock("omp.type.alloc"); llvm::BasicBlock *AllocElseBB = MapperCGF.createBasicBlock("omp.type.alloc.else"); llvm::BasicBlock *ToBB = MapperCGF.createBasicBlock("omp.type.to"); llvm::BasicBlock *ToElseBB = MapperCGF.createBasicBlock("omp.type.to.else"); llvm::BasicBlock *FromBB = MapperCGF.createBasicBlock("omp.type.from"); llvm::BasicBlock *EndBB = MapperCGF.createBasicBlock("omp.type.end"); llvm::Value *IsAlloc = MapperCGF.Builder.CreateIsNull(LeftToFrom); MapperCGF.Builder.CreateCondBr(IsAlloc, AllocBB, AllocElseBB); // In case of alloc, clear OMP_MAP_TO and OMP_MAP_FROM. MapperCGF.EmitBlock(AllocBB); llvm::Value *AllocMapType = MapperCGF.Builder.CreateAnd( MemberMapType, MapperCGF.Builder.getInt64(~(MappableExprsHandler::OMP_MAP_TO | MappableExprsHandler::OMP_MAP_FROM))); MapperCGF.Builder.CreateBr(EndBB); MapperCGF.EmitBlock(AllocElseBB); llvm::Value *IsTo = MapperCGF.Builder.CreateICmpEQ( LeftToFrom, MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_TO)); MapperCGF.Builder.CreateCondBr(IsTo, ToBB, ToElseBB); // In case of to, clear OMP_MAP_FROM. MapperCGF.EmitBlock(ToBB); llvm::Value *ToMapType = MapperCGF.Builder.CreateAnd( MemberMapType, MapperCGF.Builder.getInt64(~MappableExprsHandler::OMP_MAP_FROM)); MapperCGF.Builder.CreateBr(EndBB); MapperCGF.EmitBlock(ToElseBB); llvm::Value *IsFrom = MapperCGF.Builder.CreateICmpEQ( LeftToFrom, MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_FROM)); MapperCGF.Builder.CreateCondBr(IsFrom, FromBB, EndBB); // In case of from, clear OMP_MAP_TO. MapperCGF.EmitBlock(FromBB); llvm::Value *FromMapType = MapperCGF.Builder.CreateAnd( MemberMapType, MapperCGF.Builder.getInt64(~MappableExprsHandler::OMP_MAP_TO)); // In case of tofrom, do nothing. MapperCGF.EmitBlock(EndBB); LastBB = EndBB; llvm::PHINode *CurMapType = MapperCGF.Builder.CreatePHI(CGM.Int64Ty, 4, "omp.maptype"); CurMapType->addIncoming(AllocMapType, AllocBB); CurMapType->addIncoming(ToMapType, ToBB); CurMapType->addIncoming(FromMapType, FromBB); CurMapType->addIncoming(MemberMapType, ToElseBB); llvm::Value *OffloadingArgs[] = {Handle, CurBaseArg, CurBeginArg, CurSizeArg, CurMapType, CurNameArg}; if (Info.Mappers[I]) { // Call the corresponding mapper function. llvm::Function *MapperFunc = getOrCreateUserDefinedMapperFunc( cast(Info.Mappers[I])); assert(MapperFunc && "Expect a valid mapper function is available."); MapperCGF.EmitNounwindRuntimeCall(MapperFunc, OffloadingArgs); } else { // Call the runtime API __tgt_push_mapper_component to fill up the runtime // data structure. MapperCGF.EmitRuntimeCall( OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___tgt_push_mapper_component), OffloadingArgs); } } // Update the pointer to point to the next element that needs to be mapped, // and check whether we have mapped all elements. llvm::Type *ElemTy = PtrPHI->getType()->getPointerElementType(); llvm::Value *PtrNext = MapperCGF.Builder.CreateConstGEP1_32( ElemTy, PtrPHI, /*Idx0=*/1, "omp.arraymap.next"); PtrPHI->addIncoming(PtrNext, LastBB); llvm::Value *IsDone = MapperCGF.Builder.CreateICmpEQ(PtrNext, PtrEnd, "omp.arraymap.isdone"); llvm::BasicBlock *ExitBB = MapperCGF.createBasicBlock("omp.arraymap.exit"); MapperCGF.Builder.CreateCondBr(IsDone, ExitBB, BodyBB); MapperCGF.EmitBlock(ExitBB); // Emit array deletion if this is an array section and \p MapType indicates // that deletion is required. emitUDMapperArrayInitOrDel(MapperCGF, Handle, BaseIn, BeginIn, Size, MapType, MapName, ElementSize, DoneBB, /*IsInit=*/false); // Emit the function exit block. MapperCGF.EmitBlock(DoneBB, /*IsFinished=*/true); MapperCGF.FinishFunction(); UDMMap.try_emplace(D, Fn); if (CGF) { auto &Decls = FunctionUDMMap.FindAndConstruct(CGF->CurFn); Decls.second.push_back(D); } } /// Emit the array initialization or deletion portion for user-defined mapper /// code generation. First, it evaluates whether an array section is mapped and /// whether the \a MapType instructs to delete this section. If \a IsInit is /// true, and \a MapType indicates to not delete this array, array /// initialization code is generated. If \a IsInit is false, and \a MapType /// indicates to not this array, array deletion code is generated. void CGOpenMPRuntime::emitUDMapperArrayInitOrDel( CodeGenFunction &MapperCGF, llvm::Value *Handle, llvm::Value *Base, llvm::Value *Begin, llvm::Value *Size, llvm::Value *MapType, llvm::Value *MapName, CharUnits ElementSize, llvm::BasicBlock *ExitBB, bool IsInit) { StringRef Prefix = IsInit ? ".init" : ".del"; // Evaluate if this is an array section. llvm::BasicBlock *BodyBB = MapperCGF.createBasicBlock(getName({"omp.array", Prefix})); llvm::Value *IsArray = MapperCGF.Builder.CreateICmpSGT( Size, MapperCGF.Builder.getInt64(1), "omp.arrayinit.isarray"); llvm::Value *DeleteBit = MapperCGF.Builder.CreateAnd( MapType, MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_DELETE)); llvm::Value *DeleteCond; llvm::Value *Cond; if (IsInit) { // base != begin? llvm::Value *BaseIsBegin = MapperCGF.Builder.CreateIsNotNull( MapperCGF.Builder.CreatePtrDiff(Base, Begin)); // IsPtrAndObj? llvm::Value *PtrAndObjBit = MapperCGF.Builder.CreateAnd( MapType, MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_PTR_AND_OBJ)); PtrAndObjBit = MapperCGF.Builder.CreateIsNotNull(PtrAndObjBit); BaseIsBegin = MapperCGF.Builder.CreateAnd(BaseIsBegin, PtrAndObjBit); Cond = MapperCGF.Builder.CreateOr(IsArray, BaseIsBegin); DeleteCond = MapperCGF.Builder.CreateIsNull( DeleteBit, getName({"omp.array", Prefix, ".delete"})); } else { Cond = IsArray; DeleteCond = MapperCGF.Builder.CreateIsNotNull( DeleteBit, getName({"omp.array", Prefix, ".delete"})); } Cond = MapperCGF.Builder.CreateAnd(Cond, DeleteCond); MapperCGF.Builder.CreateCondBr(Cond, BodyBB, ExitBB); MapperCGF.EmitBlock(BodyBB); // Get the array size by multiplying element size and element number (i.e., \p // Size). llvm::Value *ArraySize = MapperCGF.Builder.CreateNUWMul( Size, MapperCGF.Builder.getInt64(ElementSize.getQuantity())); // Remove OMP_MAP_TO and OMP_MAP_FROM from the map type, so that it achieves // memory allocation/deletion purpose only. llvm::Value *MapTypeArg = MapperCGF.Builder.CreateAnd( MapType, MapperCGF.Builder.getInt64(~(MappableExprsHandler::OMP_MAP_TO | MappableExprsHandler::OMP_MAP_FROM))); MapTypeArg = MapperCGF.Builder.CreateOr( MapTypeArg, MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_IMPLICIT)); // Call the runtime API __tgt_push_mapper_component to fill up the runtime // data structure. llvm::Value *OffloadingArgs[] = {Handle, Base, Begin, ArraySize, MapTypeArg, MapName}; MapperCGF.EmitRuntimeCall( OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), OMPRTL___tgt_push_mapper_component), OffloadingArgs); } llvm::Function *CGOpenMPRuntime::getOrCreateUserDefinedMapperFunc( const OMPDeclareMapperDecl *D) { auto I = UDMMap.find(D); if (I != UDMMap.end()) return I->second; emitUserDefinedMapper(D); return UDMMap.lookup(D); } void CGOpenMPRuntime::emitTargetNumIterationsCall( CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Value *DeviceID, llvm::function_ref SizeEmitter) { OpenMPDirectiveKind Kind = D.getDirectiveKind(); const OMPExecutableDirective *TD = &D; // Get nested teams distribute kind directive, if any. if (!isOpenMPDistributeDirective(Kind) || !isOpenMPTeamsDirective(Kind)) TD = getNestedDistributeDirective(CGM.getContext(), D); if (!TD) return; const auto *LD = cast(TD); auto &&CodeGen = [LD, DeviceID, SizeEmitter, &D, this](CodeGenFunction &CGF, PrePostActionTy &) { if (llvm::Value *NumIterations = SizeEmitter(CGF, *LD)) { llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc()); llvm::Value *Args[] = {RTLoc, DeviceID, NumIterations}; CGF.EmitRuntimeCall( OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_push_target_tripcount_mapper), Args); } }; emitInlinedDirective(CGF, OMPD_unknown, CodeGen); } void CGOpenMPRuntime::emitTargetCall( CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond, llvm::PointerIntPair Device, llvm::function_ref SizeEmitter) { if (!CGF.HaveInsertPoint()) return; assert(OutlinedFn && "Invalid outlined function!"); const bool RequiresOuterTask = D.hasClausesOfKind() || D.hasClausesOfKind(); llvm::SmallVector CapturedVars; const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target); auto &&ArgsCodegen = [&CS, &CapturedVars](CodeGenFunction &CGF, PrePostActionTy &) { CGF.GenerateOpenMPCapturedVars(CS, CapturedVars); }; emitInlinedDirective(CGF, OMPD_unknown, ArgsCodegen); CodeGenFunction::OMPTargetDataInfo InputInfo; llvm::Value *MapTypesArray = nullptr; llvm::Value *MapNamesArray = nullptr; // Fill up the pointer arrays and transfer execution to the device. auto &&ThenGen = [this, Device, OutlinedFn, OutlinedFnID, &D, &InputInfo, &MapTypesArray, &MapNamesArray, &CS, RequiresOuterTask, &CapturedVars, SizeEmitter](CodeGenFunction &CGF, PrePostActionTy &) { if (Device.getInt() == OMPC_DEVICE_ancestor) { // Reverse offloading is not supported, so just execute on the host. if (RequiresOuterTask) { CapturedVars.clear(); CGF.GenerateOpenMPCapturedVars(CS, CapturedVars); } emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedFn, CapturedVars); return; } // On top of the arrays that were filled up, the target offloading call // takes as arguments the device id as well as the host pointer. The host // pointer is used by the runtime library to identify the current target // region, so it only has to be unique and not necessarily point to // anything. It could be the pointer to the outlined function that // implements the target region, but we aren't using that so that the // compiler doesn't need to keep that, and could therefore inline the host // function if proven worthwhile during optimization. // From this point on, we need to have an ID of the target region defined. assert(OutlinedFnID && "Invalid outlined function ID!"); // Emit device ID if any. llvm::Value *DeviceID; if (Device.getPointer()) { assert((Device.getInt() == OMPC_DEVICE_unknown || Device.getInt() == OMPC_DEVICE_device_num) && "Expected device_num modifier."); llvm::Value *DevVal = CGF.EmitScalarExpr(Device.getPointer()); DeviceID = CGF.Builder.CreateIntCast(DevVal, CGF.Int64Ty, /*isSigned=*/true); } else { DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF); } // Emit the number of elements in the offloading arrays. llvm::Value *PointerNum = CGF.Builder.getInt32(InputInfo.NumberOfTargetItems); // Return value of the runtime offloading call. llvm::Value *Return; llvm::Value *NumTeams = emitNumTeamsForTargetDirective(CGF, D); llvm::Value *NumThreads = emitNumThreadsForTargetDirective(CGF, D); // Source location for the ident struct llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc()); // Emit tripcount for the target loop-based directive. emitTargetNumIterationsCall(CGF, D, DeviceID, SizeEmitter); bool HasNowait = D.hasClausesOfKind(); // The target region is an outlined function launched by the runtime // via calls __tgt_target() or __tgt_target_teams(). // // __tgt_target() launches a target region with one team and one thread, // executing a serial region. This master thread may in turn launch // more threads within its team upon encountering a parallel region, // however, no additional teams can be launched on the device. // // __tgt_target_teams() launches a target region with one or more teams, // each with one or more threads. This call is required for target // constructs such as: // 'target teams' // 'target' / 'teams' // 'target teams distribute parallel for' // 'target parallel' // and so on. // // Note that on the host and CPU targets, the runtime implementation of // these calls simply call the outlined function without forking threads. // The outlined functions themselves have runtime calls to // __kmpc_fork_teams() and __kmpc_fork() for this purpose, codegen'd by // the compiler in emitTeamsCall() and emitParallelCall(). // // In contrast, on the NVPTX target, the implementation of // __tgt_target_teams() launches a GPU kernel with the requested number // of teams and threads so no additional calls to the runtime are required. if (NumTeams) { // If we have NumTeams defined this means that we have an enclosed teams // region. Therefore we also expect to have NumThreads defined. These two // values should be defined in the presence of a teams directive, // regardless of having any clauses associated. If the user is using teams // but no clauses, these two values will be the default that should be // passed to the runtime library - a 32-bit integer with the value zero. assert(NumThreads && "Thread limit expression should be available along " "with number of teams."); SmallVector OffloadingArgs = { RTLoc, DeviceID, OutlinedFnID, PointerNum, InputInfo.BasePointersArray.getPointer(), InputInfo.PointersArray.getPointer(), InputInfo.SizesArray.getPointer(), MapTypesArray, MapNamesArray, InputInfo.MappersArray.getPointer(), NumTeams, NumThreads}; if (HasNowait) { // Add int32_t depNum = 0, void *depList = nullptr, int32_t // noAliasDepNum = 0, void *noAliasDepList = nullptr. OffloadingArgs.push_back(CGF.Builder.getInt32(0)); OffloadingArgs.push_back(llvm::ConstantPointerNull::get(CGM.VoidPtrTy)); OffloadingArgs.push_back(CGF.Builder.getInt32(0)); OffloadingArgs.push_back(llvm::ConstantPointerNull::get(CGM.VoidPtrTy)); } Return = CGF.EmitRuntimeCall( OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), HasNowait ? OMPRTL___tgt_target_teams_nowait_mapper : OMPRTL___tgt_target_teams_mapper), OffloadingArgs); } else { SmallVector OffloadingArgs = { RTLoc, DeviceID, OutlinedFnID, PointerNum, InputInfo.BasePointersArray.getPointer(), InputInfo.PointersArray.getPointer(), InputInfo.SizesArray.getPointer(), MapTypesArray, MapNamesArray, InputInfo.MappersArray.getPointer()}; if (HasNowait) { // Add int32_t depNum = 0, void *depList = nullptr, int32_t // noAliasDepNum = 0, void *noAliasDepList = nullptr. OffloadingArgs.push_back(CGF.Builder.getInt32(0)); OffloadingArgs.push_back(llvm::ConstantPointerNull::get(CGM.VoidPtrTy)); OffloadingArgs.push_back(CGF.Builder.getInt32(0)); OffloadingArgs.push_back(llvm::ConstantPointerNull::get(CGM.VoidPtrTy)); } Return = CGF.EmitRuntimeCall( OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), HasNowait ? OMPRTL___tgt_target_nowait_mapper : OMPRTL___tgt_target_mapper), OffloadingArgs); } // Check the error code and execute the host version if required. llvm::BasicBlock *OffloadFailedBlock = CGF.createBasicBlock("omp_offload.failed"); llvm::BasicBlock *OffloadContBlock = CGF.createBasicBlock("omp_offload.cont"); llvm::Value *Failed = CGF.Builder.CreateIsNotNull(Return); CGF.Builder.CreateCondBr(Failed, OffloadFailedBlock, OffloadContBlock); CGF.EmitBlock(OffloadFailedBlock); if (RequiresOuterTask) { CapturedVars.clear(); CGF.GenerateOpenMPCapturedVars(CS, CapturedVars); } emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedFn, CapturedVars); CGF.EmitBranch(OffloadContBlock); CGF.EmitBlock(OffloadContBlock, /*IsFinished=*/true); }; // Notify that the host version must be executed. auto &&ElseGen = [this, &D, OutlinedFn, &CS, &CapturedVars, RequiresOuterTask](CodeGenFunction &CGF, PrePostActionTy &) { if (RequiresOuterTask) { CapturedVars.clear(); CGF.GenerateOpenMPCapturedVars(CS, CapturedVars); } emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedFn, CapturedVars); }; auto &&TargetThenGen = [this, &ThenGen, &D, &InputInfo, &MapTypesArray, &MapNamesArray, &CapturedVars, RequiresOuterTask, &CS](CodeGenFunction &CGF, PrePostActionTy &) { // Fill up the arrays with all the captured variables. MappableExprsHandler::MapCombinedInfoTy CombinedInfo; // Get mappable expression information. MappableExprsHandler MEHandler(D, CGF); llvm::DenseMap LambdaPointers; llvm::DenseSet> MappedVarSet; auto RI = CS.getCapturedRecordDecl()->field_begin(); auto *CV = CapturedVars.begin(); for (CapturedStmt::const_capture_iterator CI = CS.capture_begin(), CE = CS.capture_end(); CI != CE; ++CI, ++RI, ++CV) { MappableExprsHandler::MapCombinedInfoTy CurInfo; MappableExprsHandler::StructRangeInfoTy PartialStruct; // VLA sizes are passed to the outlined region by copy and do not have map // information associated. if (CI->capturesVariableArrayType()) { CurInfo.Exprs.push_back(nullptr); CurInfo.BasePointers.push_back(*CV); CurInfo.Pointers.push_back(*CV); CurInfo.Sizes.push_back(CGF.Builder.CreateIntCast( CGF.getTypeSize(RI->getType()), CGF.Int64Ty, /*isSigned=*/true)); // Copy to the device as an argument. No need to retrieve it. CurInfo.Types.push_back(MappableExprsHandler::OMP_MAP_LITERAL | MappableExprsHandler::OMP_MAP_TARGET_PARAM | MappableExprsHandler::OMP_MAP_IMPLICIT); CurInfo.Mappers.push_back(nullptr); } else { // If we have any information in the map clause, we use it, otherwise we // just do a default mapping. MEHandler.generateInfoForCapture(CI, *CV, CurInfo, PartialStruct); if (!CI->capturesThis()) MappedVarSet.insert(CI->getCapturedVar()); else MappedVarSet.insert(nullptr); if (CurInfo.BasePointers.empty() && !PartialStruct.Base.isValid()) MEHandler.generateDefaultMapInfo(*CI, **RI, *CV, CurInfo); // Generate correct mapping for variables captured by reference in // lambdas. if (CI->capturesVariable()) MEHandler.generateInfoForLambdaCaptures(CI->getCapturedVar(), *CV, CurInfo, LambdaPointers); } // We expect to have at least an element of information for this capture. assert((!CurInfo.BasePointers.empty() || PartialStruct.Base.isValid()) && "Non-existing map pointer for capture!"); assert(CurInfo.BasePointers.size() == CurInfo.Pointers.size() && CurInfo.BasePointers.size() == CurInfo.Sizes.size() && CurInfo.BasePointers.size() == CurInfo.Types.size() && CurInfo.BasePointers.size() == CurInfo.Mappers.size() && "Inconsistent map information sizes!"); // If there is an entry in PartialStruct it means we have a struct with // individual members mapped. Emit an extra combined entry. if (PartialStruct.Base.isValid()) { CombinedInfo.append(PartialStruct.PreliminaryMapData); MEHandler.emitCombinedEntry( CombinedInfo, CurInfo.Types, PartialStruct, nullptr, !PartialStruct.PreliminaryMapData.BasePointers.empty()); } // We need to append the results of this capture to what we already have. CombinedInfo.append(CurInfo); } // Adjust MEMBER_OF flags for the lambdas captures. MEHandler.adjustMemberOfForLambdaCaptures( LambdaPointers, CombinedInfo.BasePointers, CombinedInfo.Pointers, CombinedInfo.Types); // Map any list items in a map clause that were not captures because they // weren't referenced within the construct. MEHandler.generateAllInfo(CombinedInfo, MappedVarSet); TargetDataInfo Info; // Fill up the arrays and create the arguments. emitOffloadingArrays(CGF, CombinedInfo, Info, OMPBuilder); emitOffloadingArraysArgument( CGF, Info.BasePointersArray, Info.PointersArray, Info.SizesArray, Info.MapTypesArray, Info.MapNamesArray, Info.MappersArray, Info, {/*ForEndTask=*/false}); InputInfo.NumberOfTargetItems = Info.NumberOfPtrs; InputInfo.BasePointersArray = Address(Info.BasePointersArray, CGM.getPointerAlign()); InputInfo.PointersArray = Address(Info.PointersArray, CGM.getPointerAlign()); InputInfo.SizesArray = Address(Info.SizesArray, CGM.getPointerAlign()); InputInfo.MappersArray = Address(Info.MappersArray, CGM.getPointerAlign()); MapTypesArray = Info.MapTypesArray; MapNamesArray = Info.MapNamesArray; if (RequiresOuterTask) CGF.EmitOMPTargetTaskBasedDirective(D, ThenGen, InputInfo); else emitInlinedDirective(CGF, D.getDirectiveKind(), ThenGen); }; auto &&TargetElseGen = [this, &ElseGen, &D, RequiresOuterTask]( CodeGenFunction &CGF, PrePostActionTy &) { if (RequiresOuterTask) { CodeGenFunction::OMPTargetDataInfo InputInfo; CGF.EmitOMPTargetTaskBasedDirective(D, ElseGen, InputInfo); } else { emitInlinedDirective(CGF, D.getDirectiveKind(), ElseGen); } }; // If we have a target function ID it means that we need to support // offloading, otherwise, just execute on the host. We need to execute on host // regardless of the conditional in the if clause if, e.g., the user do not // specify target triples. if (OutlinedFnID) { if (IfCond) { emitIfClause(CGF, IfCond, TargetThenGen, TargetElseGen); } else { RegionCodeGenTy ThenRCG(TargetThenGen); ThenRCG(CGF); } } else { RegionCodeGenTy ElseRCG(TargetElseGen); ElseRCG(CGF); } } void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S, StringRef ParentName) { if (!S) return; // Codegen OMP target directives that offload compute to the device. bool RequiresDeviceCodegen = isa(S) && isOpenMPTargetExecutionDirective( cast(S)->getDirectiveKind()); if (RequiresDeviceCodegen) { const auto &E = *cast(S); unsigned DeviceID; unsigned FileID; unsigned Line; getTargetEntryUniqueInfo(CGM.getContext(), E.getBeginLoc(), DeviceID, FileID, Line); // Is this a target region that should not be emitted as an entry point? If // so just signal we are done with this target region. if (!OffloadEntriesInfoManager.hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, Line)) return; switch (E.getDirectiveKind()) { case OMPD_target: CodeGenFunction::EmitOMPTargetDeviceFunction(CGM, ParentName, cast(E)); break; case OMPD_target_parallel: CodeGenFunction::EmitOMPTargetParallelDeviceFunction( CGM, ParentName, cast(E)); break; case OMPD_target_teams: CodeGenFunction::EmitOMPTargetTeamsDeviceFunction( CGM, ParentName, cast(E)); break; case OMPD_target_teams_distribute: CodeGenFunction::EmitOMPTargetTeamsDistributeDeviceFunction( CGM, ParentName, cast(E)); break; case OMPD_target_teams_distribute_simd: CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDeviceFunction( CGM, ParentName, cast(E)); break; case OMPD_target_parallel_for: CodeGenFunction::EmitOMPTargetParallelForDeviceFunction( CGM, ParentName, cast(E)); break; case OMPD_target_parallel_for_simd: CodeGenFunction::EmitOMPTargetParallelForSimdDeviceFunction( CGM, ParentName, cast(E)); break; case OMPD_target_simd: CodeGenFunction::EmitOMPTargetSimdDeviceFunction( CGM, ParentName, cast(E)); break; case OMPD_target_teams_distribute_parallel_for: CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDeviceFunction( CGM, ParentName, cast(E)); break; case OMPD_target_teams_distribute_parallel_for_simd: CodeGenFunction:: EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction( CGM, ParentName, cast(E)); break; case OMPD_parallel: case OMPD_for: case OMPD_parallel_for: case OMPD_parallel_master: case OMPD_parallel_sections: case OMPD_for_simd: case OMPD_parallel_for_simd: case OMPD_cancel: case OMPD_cancellation_point: case OMPD_ordered: case OMPD_threadprivate: case OMPD_allocate: case OMPD_task: case OMPD_simd: case OMPD_tile: case OMPD_unroll: case OMPD_sections: case OMPD_section: case OMPD_single: case OMPD_master: case OMPD_critical: case OMPD_taskyield: case OMPD_barrier: case OMPD_taskwait: case OMPD_taskgroup: case OMPD_atomic: case OMPD_flush: case OMPD_depobj: case OMPD_scan: case OMPD_teams: case OMPD_target_data: case OMPD_target_exit_data: case OMPD_target_enter_data: case OMPD_distribute: case OMPD_distribute_simd: case OMPD_distribute_parallel_for: case OMPD_distribute_parallel_for_simd: case OMPD_teams_distribute: case OMPD_teams_distribute_simd: case OMPD_teams_distribute_parallel_for: case OMPD_teams_distribute_parallel_for_simd: case OMPD_target_update: case OMPD_declare_simd: case OMPD_declare_variant: case OMPD_begin_declare_variant: case OMPD_end_declare_variant: case OMPD_declare_target: case OMPD_end_declare_target: case OMPD_declare_reduction: case OMPD_declare_mapper: case OMPD_taskloop: case OMPD_taskloop_simd: case OMPD_master_taskloop: case OMPD_master_taskloop_simd: case OMPD_parallel_master_taskloop: case OMPD_parallel_master_taskloop_simd: case OMPD_requires: case OMPD_unknown: default: llvm_unreachable("Unknown target directive for OpenMP device codegen."); } return; } if (const auto *E = dyn_cast(S)) { if (!E->hasAssociatedStmt() || !E->getAssociatedStmt()) return; scanForTargetRegionsFunctions(E->getRawStmt(), ParentName); return; } // If this is a lambda function, look into its body. if (const auto *L = dyn_cast(S)) S = L->getBody(); // Keep looking for target regions recursively. for (const Stmt *II : S->children()) scanForTargetRegionsFunctions(II, ParentName); } static bool isAssumedToBeNotEmitted(const ValueDecl *VD, bool IsDevice) { Optional DevTy = OMPDeclareTargetDeclAttr::getDeviceType(VD); if (!DevTy) return false; // Do not emit device_type(nohost) functions for the host. if (!IsDevice && DevTy == OMPDeclareTargetDeclAttr::DT_NoHost) return true; // Do not emit device_type(host) functions for the device. if (IsDevice && DevTy == OMPDeclareTargetDeclAttr::DT_Host) return true; return false; } bool CGOpenMPRuntime::emitTargetFunctions(GlobalDecl GD) { // If emitting code for the host, we do not process FD here. Instead we do // the normal code generation. if (!CGM.getLangOpts().OpenMPIsDevice) { if (const auto *FD = dyn_cast(GD.getDecl())) if (isAssumedToBeNotEmitted(cast(FD), CGM.getLangOpts().OpenMPIsDevice)) return true; return false; } const ValueDecl *VD = cast(GD.getDecl()); // Try to detect target regions in the function. if (const auto *FD = dyn_cast(VD)) { StringRef Name = CGM.getMangledName(GD); scanForTargetRegionsFunctions(FD->getBody(), Name); if (isAssumedToBeNotEmitted(cast(FD), CGM.getLangOpts().OpenMPIsDevice)) return true; } // Do not to emit function if it is not marked as declare target. return !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD) && AlreadyEmittedTargetDecls.count(VD) == 0; } bool CGOpenMPRuntime::emitTargetGlobalVariable(GlobalDecl GD) { if (isAssumedToBeNotEmitted(cast(GD.getDecl()), CGM.getLangOpts().OpenMPIsDevice)) return true; if (!CGM.getLangOpts().OpenMPIsDevice) return false; // Check if there are Ctors/Dtors in this declaration and look for target // regions in it. We use the complete variant to produce the kernel name // mangling. QualType RDTy = cast(GD.getDecl())->getType(); if (const auto *RD = RDTy->getBaseElementTypeUnsafe()->getAsCXXRecordDecl()) { for (const CXXConstructorDecl *Ctor : RD->ctors()) { StringRef ParentName = CGM.getMangledName(GlobalDecl(Ctor, Ctor_Complete)); scanForTargetRegionsFunctions(Ctor->getBody(), ParentName); } if (const CXXDestructorDecl *Dtor = RD->getDestructor()) { StringRef ParentName = CGM.getMangledName(GlobalDecl(Dtor, Dtor_Complete)); scanForTargetRegionsFunctions(Dtor->getBody(), ParentName); } } // Do not to emit variable if it is not marked as declare target. llvm::Optional Res = OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration( cast(GD.getDecl())); if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_Link || (*Res == OMPDeclareTargetDeclAttr::MT_To && HasRequiresUnifiedSharedMemory)) { DeferredGlobalVariables.insert(cast(GD.getDecl())); return true; } return false; } void CGOpenMPRuntime::registerTargetGlobalVariable(const VarDecl *VD, llvm::Constant *Addr) { if (CGM.getLangOpts().OMPTargetTriples.empty() && !CGM.getLangOpts().OpenMPIsDevice) return; // If we have host/nohost variables, they do not need to be registered. Optional DevTy = OMPDeclareTargetDeclAttr::getDeviceType(VD); if (DevTy && DevTy.getValue() != OMPDeclareTargetDeclAttr::DT_Any) return; llvm::Optional Res = OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD); if (!Res) { if (CGM.getLangOpts().OpenMPIsDevice) { // Register non-target variables being emitted in device code (debug info // may cause this). StringRef VarName = CGM.getMangledName(VD); EmittedNonTargetVariables.try_emplace(VarName, Addr); } return; } // Register declare target variables. OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind Flags; StringRef VarName; CharUnits VarSize; llvm::GlobalValue::LinkageTypes Linkage; if (*Res == OMPDeclareTargetDeclAttr::MT_To && !HasRequiresUnifiedSharedMemory) { Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo; VarName = CGM.getMangledName(VD); if (VD->hasDefinition(CGM.getContext()) != VarDecl::DeclarationOnly) { VarSize = CGM.getContext().getTypeSizeInChars(VD->getType()); assert(!VarSize.isZero() && "Expected non-zero size of the variable"); } else { VarSize = CharUnits::Zero(); } Linkage = CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false); // Temp solution to prevent optimizations of the internal variables. if (CGM.getLangOpts().OpenMPIsDevice && !VD->isExternallyVisible()) { // Do not create a "ref-variable" if the original is not also available // on the host. if (!OffloadEntriesInfoManager.hasDeviceGlobalVarEntryInfo(VarName)) return; std::string RefName = getName({VarName, "ref"}); if (!CGM.GetGlobalValue(RefName)) { llvm::Constant *AddrRef = getOrCreateInternalVariable(Addr->getType(), RefName); auto *GVAddrRef = cast(AddrRef); GVAddrRef->setConstant(/*Val=*/true); GVAddrRef->setLinkage(llvm::GlobalValue::InternalLinkage); GVAddrRef->setInitializer(Addr); CGM.addCompilerUsedGlobal(GVAddrRef); } } } else { assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) || (*Res == OMPDeclareTargetDeclAttr::MT_To && HasRequiresUnifiedSharedMemory)) && "Declare target attribute must link or to with unified memory."); if (*Res == OMPDeclareTargetDeclAttr::MT_Link) Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryLink; else Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo; if (CGM.getLangOpts().OpenMPIsDevice) { VarName = Addr->getName(); Addr = nullptr; } else { VarName = getAddrOfDeclareTargetVar(VD).getName(); Addr = cast(getAddrOfDeclareTargetVar(VD).getPointer()); } VarSize = CGM.getPointerSize(); Linkage = llvm::GlobalValue::WeakAnyLinkage; } OffloadEntriesInfoManager.registerDeviceGlobalVarEntryInfo( VarName, Addr, VarSize, Flags, Linkage); } bool CGOpenMPRuntime::emitTargetGlobal(GlobalDecl GD) { if (isa(GD.getDecl()) || isa(GD.getDecl())) return emitTargetFunctions(GD); return emitTargetGlobalVariable(GD); } void CGOpenMPRuntime::emitDeferredTargetDecls() const { for (const VarDecl *VD : DeferredGlobalVariables) { llvm::Optional Res = OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD); if (!Res) continue; if (*Res == OMPDeclareTargetDeclAttr::MT_To && !HasRequiresUnifiedSharedMemory) { CGM.EmitGlobal(VD); } else { assert((*Res == OMPDeclareTargetDeclAttr::MT_Link || (*Res == OMPDeclareTargetDeclAttr::MT_To && HasRequiresUnifiedSharedMemory)) && "Expected link clause or to clause with unified memory."); (void)CGM.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD); } } } void CGOpenMPRuntime::adjustTargetSpecificDataForLambdas( CodeGenFunction &CGF, const OMPExecutableDirective &D) const { assert(isOpenMPTargetExecutionDirective(D.getDirectiveKind()) && " Expected target-based directive."); } void CGOpenMPRuntime::processRequiresDirective(const OMPRequiresDecl *D) { for (const OMPClause *Clause : D->clauselists()) { if (Clause->getClauseKind() == OMPC_unified_shared_memory) { HasRequiresUnifiedSharedMemory = true; } else if (const auto *AC = dyn_cast(Clause)) { switch (AC->getAtomicDefaultMemOrderKind()) { case OMPC_ATOMIC_DEFAULT_MEM_ORDER_acq_rel: RequiresAtomicOrdering = llvm::AtomicOrdering::AcquireRelease; break; case OMPC_ATOMIC_DEFAULT_MEM_ORDER_seq_cst: RequiresAtomicOrdering = llvm::AtomicOrdering::SequentiallyConsistent; break; case OMPC_ATOMIC_DEFAULT_MEM_ORDER_relaxed: RequiresAtomicOrdering = llvm::AtomicOrdering::Monotonic; break; case OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown: break; } } } } llvm::AtomicOrdering CGOpenMPRuntime::getDefaultMemoryOrdering() const { return RequiresAtomicOrdering; } bool CGOpenMPRuntime::hasAllocateAttributeForGlobalVar(const VarDecl *VD, LangAS &AS) { if (!VD || !VD->hasAttr()) return false; const auto *A = VD->getAttr(); switch(A->getAllocatorType()) { case OMPAllocateDeclAttr::OMPNullMemAlloc: case OMPAllocateDeclAttr::OMPDefaultMemAlloc: // Not supported, fallback to the default mem space. case OMPAllocateDeclAttr::OMPLargeCapMemAlloc: case OMPAllocateDeclAttr::OMPCGroupMemAlloc: case OMPAllocateDeclAttr::OMPHighBWMemAlloc: case OMPAllocateDeclAttr::OMPLowLatMemAlloc: case OMPAllocateDeclAttr::OMPThreadMemAlloc: case OMPAllocateDeclAttr::OMPConstMemAlloc: case OMPAllocateDeclAttr::OMPPTeamMemAlloc: AS = LangAS::Default; return true; case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc: llvm_unreachable("Expected predefined allocator for the variables with the " "static storage."); } return false; } bool CGOpenMPRuntime::hasRequiresUnifiedSharedMemory() const { return HasRequiresUnifiedSharedMemory; } CGOpenMPRuntime::DisableAutoDeclareTargetRAII::DisableAutoDeclareTargetRAII( CodeGenModule &CGM) : CGM(CGM) { if (CGM.getLangOpts().OpenMPIsDevice) { SavedShouldMarkAsGlobal = CGM.getOpenMPRuntime().ShouldMarkAsGlobal; CGM.getOpenMPRuntime().ShouldMarkAsGlobal = false; } } CGOpenMPRuntime::DisableAutoDeclareTargetRAII::~DisableAutoDeclareTargetRAII() { if (CGM.getLangOpts().OpenMPIsDevice) CGM.getOpenMPRuntime().ShouldMarkAsGlobal = SavedShouldMarkAsGlobal; } bool CGOpenMPRuntime::markAsGlobalTarget(GlobalDecl GD) { if (!CGM.getLangOpts().OpenMPIsDevice || !ShouldMarkAsGlobal) return true; const auto *D = cast(GD.getDecl()); // Do not to emit function if it is marked as declare target as it was already // emitted. if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(D)) { if (D->hasBody() && AlreadyEmittedTargetDecls.count(D) == 0) { if (auto *F = dyn_cast_or_null( CGM.GetGlobalValue(CGM.getMangledName(GD)))) return !F->isDeclaration(); return false; } return true; } return !AlreadyEmittedTargetDecls.insert(D).second; } llvm::Function *CGOpenMPRuntime::emitRequiresDirectiveRegFun() { // If we don't have entries or if we are emitting code for the device, we // don't need to do anything. if (CGM.getLangOpts().OMPTargetTriples.empty() || CGM.getLangOpts().OpenMPSimd || CGM.getLangOpts().OpenMPIsDevice || (OffloadEntriesInfoManager.empty() && !HasEmittedDeclareTargetRegion && !HasEmittedTargetRegion)) return nullptr; // Create and register the function that handles the requires directives. ASTContext &C = CGM.getContext(); llvm::Function *RequiresRegFn; { CodeGenFunction CGF(CGM); const auto &FI = CGM.getTypes().arrangeNullaryFunction(); llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI); std::string ReqName = getName({"omp_offloading", "requires_reg"}); RequiresRegFn = CGM.CreateGlobalInitOrCleanUpFunction(FTy, ReqName, FI); CGF.StartFunction(GlobalDecl(), C.VoidTy, RequiresRegFn, FI, {}); OpenMPOffloadingRequiresDirFlags Flags = OMP_REQ_NONE; // TODO: check for other requires clauses. // The requires directive takes effect only when a target region is // present in the compilation unit. Otherwise it is ignored and not // passed to the runtime. This avoids the runtime from throwing an error // for mismatching requires clauses across compilation units that don't // contain at least 1 target region. assert((HasEmittedTargetRegion || HasEmittedDeclareTargetRegion || !OffloadEntriesInfoManager.empty()) && "Target or declare target region expected."); if (HasRequiresUnifiedSharedMemory) Flags = OMP_REQ_UNIFIED_SHARED_MEMORY; CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___tgt_register_requires), llvm::ConstantInt::get(CGM.Int64Ty, Flags)); CGF.FinishFunction(); } return RequiresRegFn; } void CGOpenMPRuntime::emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef CapturedVars) { if (!CGF.HaveInsertPoint()) return; llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc); CodeGenFunction::RunCleanupsScope Scope(CGF); // Build call __kmpc_fork_teams(loc, n, microtask, var1, .., varn); llvm::Value *Args[] = { RTLoc, CGF.Builder.getInt32(CapturedVars.size()), // Number of captured vars CGF.Builder.CreateBitCast(OutlinedFn, getKmpc_MicroPointerTy())}; llvm::SmallVector RealArgs; RealArgs.append(std::begin(Args), std::end(Args)); RealArgs.append(CapturedVars.begin(), CapturedVars.end()); llvm::FunctionCallee RTLFn = OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_fork_teams); CGF.EmitRuntimeCall(RTLFn, RealArgs); } void CGOpenMPRuntime::emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams, const Expr *ThreadLimit, SourceLocation Loc) { if (!CGF.HaveInsertPoint()) return; llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc); llvm::Value *NumTeamsVal = NumTeams ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(NumTeams), CGF.CGM.Int32Ty, /* isSigned = */ true) : CGF.Builder.getInt32(0); llvm::Value *ThreadLimitVal = ThreadLimit ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(ThreadLimit), CGF.CGM.Int32Ty, /* isSigned = */ true) : CGF.Builder.getInt32(0); // Build call __kmpc_push_num_teamss(&loc, global_tid, num_teams, thread_limit) llvm::Value *PushNumTeamsArgs[] = {RTLoc, getThreadID(CGF, Loc), NumTeamsVal, ThreadLimitVal}; CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_push_num_teams), PushNumTeamsArgs); } void CGOpenMPRuntime::emitTargetDataCalls( CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) { if (!CGF.HaveInsertPoint()) return; // Action used to replace the default codegen action and turn privatization // off. PrePostActionTy NoPrivAction; // Generate the code for the opening of the data environment. Capture all the // arguments of the runtime call by reference because they are used in the // closing of the region. auto &&BeginThenGen = [this, &D, Device, &Info, &CodeGen](CodeGenFunction &CGF, PrePostActionTy &) { // Fill up the arrays with all the mapped variables. MappableExprsHandler::MapCombinedInfoTy CombinedInfo; // Get map clause information. MappableExprsHandler MEHandler(D, CGF); MEHandler.generateAllInfo(CombinedInfo); // Fill up the arrays and create the arguments. emitOffloadingArrays(CGF, CombinedInfo, Info, OMPBuilder, /*IsNonContiguous=*/true); llvm::Value *BasePointersArrayArg = nullptr; llvm::Value *PointersArrayArg = nullptr; llvm::Value *SizesArrayArg = nullptr; llvm::Value *MapTypesArrayArg = nullptr; llvm::Value *MapNamesArrayArg = nullptr; llvm::Value *MappersArrayArg = nullptr; emitOffloadingArraysArgument(CGF, BasePointersArrayArg, PointersArrayArg, SizesArrayArg, MapTypesArrayArg, MapNamesArrayArg, MappersArrayArg, Info); // Emit device ID if any. llvm::Value *DeviceID = nullptr; if (Device) { DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device), CGF.Int64Ty, /*isSigned=*/true); } else { DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF); } // Emit the number of elements in the offloading arrays. llvm::Value *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs); // // Source location for the ident struct llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc()); llvm::Value *OffloadingArgs[] = {RTLoc, DeviceID, PointerNum, BasePointersArrayArg, PointersArrayArg, SizesArrayArg, MapTypesArrayArg, MapNamesArrayArg, MappersArrayArg}; CGF.EmitRuntimeCall( OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___tgt_target_data_begin_mapper), OffloadingArgs); // If device pointer privatization is required, emit the body of the region // here. It will have to be duplicated: with and without privatization. if (!Info.CaptureDeviceAddrMap.empty()) CodeGen(CGF); }; // Generate code for the closing of the data region. auto &&EndThenGen = [this, Device, &Info, &D](CodeGenFunction &CGF, PrePostActionTy &) { assert(Info.isValid() && "Invalid data environment closing arguments."); llvm::Value *BasePointersArrayArg = nullptr; llvm::Value *PointersArrayArg = nullptr; llvm::Value *SizesArrayArg = nullptr; llvm::Value *MapTypesArrayArg = nullptr; llvm::Value *MapNamesArrayArg = nullptr; llvm::Value *MappersArrayArg = nullptr; emitOffloadingArraysArgument(CGF, BasePointersArrayArg, PointersArrayArg, SizesArrayArg, MapTypesArrayArg, MapNamesArrayArg, MappersArrayArg, Info, {/*ForEndCall=*/true}); // Emit device ID if any. llvm::Value *DeviceID = nullptr; if (Device) { DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device), CGF.Int64Ty, /*isSigned=*/true); } else { DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF); } // Emit the number of elements in the offloading arrays. llvm::Value *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs); // Source location for the ident struct llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc()); llvm::Value *OffloadingArgs[] = {RTLoc, DeviceID, PointerNum, BasePointersArrayArg, PointersArrayArg, SizesArrayArg, MapTypesArrayArg, MapNamesArrayArg, MappersArrayArg}; CGF.EmitRuntimeCall( OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___tgt_target_data_end_mapper), OffloadingArgs); }; // If we need device pointer privatization, we need to emit the body of the // region with no privatization in the 'else' branch of the conditional. // Otherwise, we don't have to do anything. auto &&BeginElseGen = [&Info, &CodeGen, &NoPrivAction](CodeGenFunction &CGF, PrePostActionTy &) { if (!Info.CaptureDeviceAddrMap.empty()) { CodeGen.setAction(NoPrivAction); CodeGen(CGF); } }; // We don't have to do anything to close the region if the if clause evaluates // to false. auto &&EndElseGen = [](CodeGenFunction &CGF, PrePostActionTy &) {}; if (IfCond) { emitIfClause(CGF, IfCond, BeginThenGen, BeginElseGen); } else { RegionCodeGenTy RCG(BeginThenGen); RCG(CGF); } // If we don't require privatization of device pointers, we emit the body in // between the runtime calls. This avoids duplicating the body code. if (Info.CaptureDeviceAddrMap.empty()) { CodeGen.setAction(NoPrivAction); CodeGen(CGF); } if (IfCond) { emitIfClause(CGF, IfCond, EndThenGen, EndElseGen); } else { RegionCodeGenTy RCG(EndThenGen); RCG(CGF); } } void CGOpenMPRuntime::emitTargetDataStandAloneCall( CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device) { if (!CGF.HaveInsertPoint()) return; assert((isa(D) || isa(D) || isa(D)) && "Expecting either target enter, exit data, or update directives."); CodeGenFunction::OMPTargetDataInfo InputInfo; llvm::Value *MapTypesArray = nullptr; llvm::Value *MapNamesArray = nullptr; // Generate the code for the opening of the data environment. auto &&ThenGen = [this, &D, Device, &InputInfo, &MapTypesArray, &MapNamesArray](CodeGenFunction &CGF, PrePostActionTy &) { // Emit device ID if any. llvm::Value *DeviceID = nullptr; if (Device) { DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device), CGF.Int64Ty, /*isSigned=*/true); } else { DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF); } // Emit the number of elements in the offloading arrays. llvm::Constant *PointerNum = CGF.Builder.getInt32(InputInfo.NumberOfTargetItems); // Source location for the ident struct llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc()); llvm::Value *OffloadingArgs[] = {RTLoc, DeviceID, PointerNum, InputInfo.BasePointersArray.getPointer(), InputInfo.PointersArray.getPointer(), InputInfo.SizesArray.getPointer(), MapTypesArray, MapNamesArray, InputInfo.MappersArray.getPointer()}; // Select the right runtime function call for each standalone // directive. const bool HasNowait = D.hasClausesOfKind(); RuntimeFunction RTLFn; switch (D.getDirectiveKind()) { case OMPD_target_enter_data: RTLFn = HasNowait ? OMPRTL___tgt_target_data_begin_nowait_mapper : OMPRTL___tgt_target_data_begin_mapper; break; case OMPD_target_exit_data: RTLFn = HasNowait ? OMPRTL___tgt_target_data_end_nowait_mapper : OMPRTL___tgt_target_data_end_mapper; break; case OMPD_target_update: RTLFn = HasNowait ? OMPRTL___tgt_target_data_update_nowait_mapper : OMPRTL___tgt_target_data_update_mapper; break; case OMPD_parallel: case OMPD_for: case OMPD_parallel_for: case OMPD_parallel_master: case OMPD_parallel_sections: case OMPD_for_simd: case OMPD_parallel_for_simd: case OMPD_cancel: case OMPD_cancellation_point: case OMPD_ordered: case OMPD_threadprivate: case OMPD_allocate: case OMPD_task: case OMPD_simd: case OMPD_tile: case OMPD_unroll: case OMPD_sections: case OMPD_section: case OMPD_single: case OMPD_master: case OMPD_critical: case OMPD_taskyield: case OMPD_barrier: case OMPD_taskwait: case OMPD_taskgroup: case OMPD_atomic: case OMPD_flush: case OMPD_depobj: case OMPD_scan: case OMPD_teams: case OMPD_target_data: case OMPD_distribute: case OMPD_distribute_simd: case OMPD_distribute_parallel_for: case OMPD_distribute_parallel_for_simd: case OMPD_teams_distribute: case OMPD_teams_distribute_simd: case OMPD_teams_distribute_parallel_for: case OMPD_teams_distribute_parallel_for_simd: case OMPD_declare_simd: case OMPD_declare_variant: case OMPD_begin_declare_variant: case OMPD_end_declare_variant: case OMPD_declare_target: case OMPD_end_declare_target: case OMPD_declare_reduction: case OMPD_declare_mapper: case OMPD_taskloop: case OMPD_taskloop_simd: case OMPD_master_taskloop: case OMPD_master_taskloop_simd: case OMPD_parallel_master_taskloop: case OMPD_parallel_master_taskloop_simd: case OMPD_target: case OMPD_target_simd: case OMPD_target_teams_distribute: case OMPD_target_teams_distribute_simd: case OMPD_target_teams_distribute_parallel_for: case OMPD_target_teams_distribute_parallel_for_simd: case OMPD_target_teams: case OMPD_target_parallel: case OMPD_target_parallel_for: case OMPD_target_parallel_for_simd: case OMPD_requires: case OMPD_unknown: default: llvm_unreachable("Unexpected standalone target data directive."); break; } CGF.EmitRuntimeCall( OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), RTLFn), OffloadingArgs); }; auto &&TargetThenGen = [this, &ThenGen, &D, &InputInfo, &MapTypesArray, &MapNamesArray](CodeGenFunction &CGF, PrePostActionTy &) { // Fill up the arrays with all the mapped variables. MappableExprsHandler::MapCombinedInfoTy CombinedInfo; // Get map clause information. MappableExprsHandler MEHandler(D, CGF); MEHandler.generateAllInfo(CombinedInfo); TargetDataInfo Info; // Fill up the arrays and create the arguments. emitOffloadingArrays(CGF, CombinedInfo, Info, OMPBuilder, /*IsNonContiguous=*/true); bool RequiresOuterTask = D.hasClausesOfKind() || D.hasClausesOfKind(); emitOffloadingArraysArgument( CGF, Info.BasePointersArray, Info.PointersArray, Info.SizesArray, Info.MapTypesArray, Info.MapNamesArray, Info.MappersArray, Info, {/*ForEndTask=*/false}); InputInfo.NumberOfTargetItems = Info.NumberOfPtrs; InputInfo.BasePointersArray = Address(Info.BasePointersArray, CGM.getPointerAlign()); InputInfo.PointersArray = Address(Info.PointersArray, CGM.getPointerAlign()); InputInfo.SizesArray = Address(Info.SizesArray, CGM.getPointerAlign()); InputInfo.MappersArray = Address(Info.MappersArray, CGM.getPointerAlign()); MapTypesArray = Info.MapTypesArray; MapNamesArray = Info.MapNamesArray; if (RequiresOuterTask) CGF.EmitOMPTargetTaskBasedDirective(D, ThenGen, InputInfo); else emitInlinedDirective(CGF, D.getDirectiveKind(), ThenGen); }; if (IfCond) { emitIfClause(CGF, IfCond, TargetThenGen, [](CodeGenFunction &CGF, PrePostActionTy &) {}); } else { RegionCodeGenTy ThenRCG(TargetThenGen); ThenRCG(CGF); } } namespace { /// Kind of parameter in a function with 'declare simd' directive. enum ParamKindTy { LinearWithVarStride, Linear, Uniform, Vector }; /// Attribute set of the parameter. struct ParamAttrTy { ParamKindTy Kind = Vector; llvm::APSInt StrideOrArg; llvm::APSInt Alignment; }; } // namespace static unsigned evaluateCDTSize(const FunctionDecl *FD, ArrayRef ParamAttrs) { // Every vector variant of a SIMD-enabled function has a vector length (VLEN). // If OpenMP clause "simdlen" is used, the VLEN is the value of the argument // of that clause. The VLEN value must be power of 2. // In other case the notion of the function`s "characteristic data type" (CDT) // is used to compute the vector length. // CDT is defined in the following order: // a) For non-void function, the CDT is the return type. // b) If the function has any non-uniform, non-linear parameters, then the // CDT is the type of the first such parameter. // c) If the CDT determined by a) or b) above is struct, union, or class // type which is pass-by-value (except for the type that maps to the // built-in complex data type), the characteristic data type is int. // d) If none of the above three cases is applicable, the CDT is int. // The VLEN is then determined based on the CDT and the size of vector // register of that ISA for which current vector version is generated. The // VLEN is computed using the formula below: // VLEN = sizeof(vector_register) / sizeof(CDT), // where vector register size specified in section 3.2.1 Registers and the // Stack Frame of original AMD64 ABI document. QualType RetType = FD->getReturnType(); if (RetType.isNull()) return 0; ASTContext &C = FD->getASTContext(); QualType CDT; if (!RetType.isNull() && !RetType->isVoidType()) { CDT = RetType; } else { unsigned Offset = 0; if (const auto *MD = dyn_cast(FD)) { if (ParamAttrs[Offset].Kind == Vector) CDT = C.getPointerType(C.getRecordType(MD->getParent())); ++Offset; } if (CDT.isNull()) { for (unsigned I = 0, E = FD->getNumParams(); I < E; ++I) { if (ParamAttrs[I + Offset].Kind == Vector) { CDT = FD->getParamDecl(I)->getType(); break; } } } } if (CDT.isNull()) CDT = C.IntTy; CDT = CDT->getCanonicalTypeUnqualified(); if (CDT->isRecordType() || CDT->isUnionType()) CDT = C.IntTy; return C.getTypeSize(CDT); } static void emitX86DeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn, const llvm::APSInt &VLENVal, ArrayRef ParamAttrs, OMPDeclareSimdDeclAttr::BranchStateTy State) { struct ISADataTy { char ISA; unsigned VecRegSize; }; ISADataTy ISAData[] = { { 'b', 128 }, // SSE { 'c', 256 }, // AVX { 'd', 256 }, // AVX2 { 'e', 512 }, // AVX512 }; llvm::SmallVector Masked; switch (State) { case OMPDeclareSimdDeclAttr::BS_Undefined: Masked.push_back('N'); Masked.push_back('M'); break; case OMPDeclareSimdDeclAttr::BS_Notinbranch: Masked.push_back('N'); break; case OMPDeclareSimdDeclAttr::BS_Inbranch: Masked.push_back('M'); break; } for (char Mask : Masked) { for (const ISADataTy &Data : ISAData) { SmallString<256> Buffer; llvm::raw_svector_ostream Out(Buffer); Out << "_ZGV" << Data.ISA << Mask; if (!VLENVal) { unsigned NumElts = evaluateCDTSize(FD, ParamAttrs); assert(NumElts && "Non-zero simdlen/cdtsize expected"); Out << llvm::APSInt::getUnsigned(Data.VecRegSize / NumElts); } else { Out << VLENVal; } for (const ParamAttrTy &ParamAttr : ParamAttrs) { switch (ParamAttr.Kind){ case LinearWithVarStride: Out << 's' << ParamAttr.StrideOrArg; break; case Linear: Out << 'l'; if (ParamAttr.StrideOrArg != 1) Out << ParamAttr.StrideOrArg; break; case Uniform: Out << 'u'; break; case Vector: Out << 'v'; break; } if (!!ParamAttr.Alignment) Out << 'a' << ParamAttr.Alignment; } Out << '_' << Fn->getName(); Fn->addFnAttr(Out.str()); } } } // This are the Functions that are needed to mangle the name of the // vector functions generated by the compiler, according to the rules // defined in the "Vector Function ABI specifications for AArch64", // available at // https://developer.arm.com/products/software-development-tools/hpc/arm-compiler-for-hpc/vector-function-abi. /// Maps To Vector (MTV), as defined in 3.1.1 of the AAVFABI. /// /// TODO: Need to implement the behavior for reference marked with a /// var or no linear modifiers (1.b in the section). For this, we /// need to extend ParamKindTy to support the linear modifiers. static bool getAArch64MTV(QualType QT, ParamKindTy Kind) { QT = QT.getCanonicalType(); if (QT->isVoidType()) return false; if (Kind == ParamKindTy::Uniform) return false; if (Kind == ParamKindTy::Linear) return false; // TODO: Handle linear references with modifiers if (Kind == ParamKindTy::LinearWithVarStride) return false; return true; } /// Pass By Value (PBV), as defined in 3.1.2 of the AAVFABI. static bool getAArch64PBV(QualType QT, ASTContext &C) { QT = QT.getCanonicalType(); unsigned Size = C.getTypeSize(QT); // Only scalars and complex within 16 bytes wide set PVB to true. if (Size != 8 && Size != 16 && Size != 32 && Size != 64 && Size != 128) return false; if (QT->isFloatingType()) return true; if (QT->isIntegerType()) return true; if (QT->isPointerType()) return true; // TODO: Add support for complex types (section 3.1.2, item 2). return false; } /// Computes the lane size (LS) of a return type or of an input parameter, /// as defined by `LS(P)` in 3.2.1 of the AAVFABI. /// TODO: Add support for references, section 3.2.1, item 1. static unsigned getAArch64LS(QualType QT, ParamKindTy Kind, ASTContext &C) { if (!getAArch64MTV(QT, Kind) && QT.getCanonicalType()->isPointerType()) { QualType PTy = QT.getCanonicalType()->getPointeeType(); if (getAArch64PBV(PTy, C)) return C.getTypeSize(PTy); } if (getAArch64PBV(QT, C)) return C.getTypeSize(QT); return C.getTypeSize(C.getUIntPtrType()); } // Get Narrowest Data Size (NDS) and Widest Data Size (WDS) from the // signature of the scalar function, as defined in 3.2.2 of the // AAVFABI. static std::tuple getNDSWDS(const FunctionDecl *FD, ArrayRef ParamAttrs) { QualType RetType = FD->getReturnType().getCanonicalType(); ASTContext &C = FD->getASTContext(); bool OutputBecomesInput = false; llvm::SmallVector Sizes; if (!RetType->isVoidType()) { Sizes.push_back(getAArch64LS(RetType, ParamKindTy::Vector, C)); if (!getAArch64PBV(RetType, C) && getAArch64MTV(RetType, {})) OutputBecomesInput = true; } for (unsigned I = 0, E = FD->getNumParams(); I < E; ++I) { QualType QT = FD->getParamDecl(I)->getType().getCanonicalType(); Sizes.push_back(getAArch64LS(QT, ParamAttrs[I].Kind, C)); } assert(!Sizes.empty() && "Unable to determine NDS and WDS."); // The LS of a function parameter / return value can only be a power // of 2, starting from 8 bits, up to 128. assert(std::all_of(Sizes.begin(), Sizes.end(), [](unsigned Size) { return Size == 8 || Size == 16 || Size == 32 || Size == 64 || Size == 128; }) && "Invalid size"); return std::make_tuple(*std::min_element(std::begin(Sizes), std::end(Sizes)), *std::max_element(std::begin(Sizes), std::end(Sizes)), OutputBecomesInput); } /// Mangle the parameter part of the vector function name according to /// their OpenMP classification. The mangling function is defined in /// section 3.5 of the AAVFABI. static std::string mangleVectorParameters(ArrayRef ParamAttrs) { SmallString<256> Buffer; llvm::raw_svector_ostream Out(Buffer); for (const auto &ParamAttr : ParamAttrs) { switch (ParamAttr.Kind) { case LinearWithVarStride: Out << "ls" << ParamAttr.StrideOrArg; break; case Linear: Out << 'l'; // Don't print the step value if it is not present or if it is // equal to 1. if (ParamAttr.StrideOrArg != 1) Out << ParamAttr.StrideOrArg; break; case Uniform: Out << 'u'; break; case Vector: Out << 'v'; break; } if (!!ParamAttr.Alignment) Out << 'a' << ParamAttr.Alignment; } return std::string(Out.str()); } // Function used to add the attribute. The parameter `VLEN` is // templated to allow the use of "x" when targeting scalable functions // for SVE. template static void addAArch64VectorName(T VLEN, StringRef LMask, StringRef Prefix, char ISA, StringRef ParSeq, StringRef MangledName, bool OutputBecomesInput, llvm::Function *Fn) { SmallString<256> Buffer; llvm::raw_svector_ostream Out(Buffer); Out << Prefix << ISA << LMask << VLEN; if (OutputBecomesInput) Out << "v"; Out << ParSeq << "_" << MangledName; Fn->addFnAttr(Out.str()); } // Helper function to generate the Advanced SIMD names depending on // the value of the NDS when simdlen is not present. static void addAArch64AdvSIMDNDSNames(unsigned NDS, StringRef Mask, StringRef Prefix, char ISA, StringRef ParSeq, StringRef MangledName, bool OutputBecomesInput, llvm::Function *Fn) { switch (NDS) { case 8: addAArch64VectorName(8, Mask, Prefix, ISA, ParSeq, MangledName, OutputBecomesInput, Fn); addAArch64VectorName(16, Mask, Prefix, ISA, ParSeq, MangledName, OutputBecomesInput, Fn); break; case 16: addAArch64VectorName(4, Mask, Prefix, ISA, ParSeq, MangledName, OutputBecomesInput, Fn); addAArch64VectorName(8, Mask, Prefix, ISA, ParSeq, MangledName, OutputBecomesInput, Fn); break; case 32: addAArch64VectorName(2, Mask, Prefix, ISA, ParSeq, MangledName, OutputBecomesInput, Fn); addAArch64VectorName(4, Mask, Prefix, ISA, ParSeq, MangledName, OutputBecomesInput, Fn); break; case 64: case 128: addAArch64VectorName(2, Mask, Prefix, ISA, ParSeq, MangledName, OutputBecomesInput, Fn); break; default: llvm_unreachable("Scalar type is too wide."); } } /// Emit vector function attributes for AArch64, as defined in the AAVFABI. static void emitAArch64DeclareSimdFunction( CodeGenModule &CGM, const FunctionDecl *FD, unsigned UserVLEN, ArrayRef ParamAttrs, OMPDeclareSimdDeclAttr::BranchStateTy State, StringRef MangledName, char ISA, unsigned VecRegSize, llvm::Function *Fn, SourceLocation SLoc) { // Get basic data for building the vector signature. const auto Data = getNDSWDS(FD, ParamAttrs); const unsigned NDS = std::get<0>(Data); const unsigned WDS = std::get<1>(Data); const bool OutputBecomesInput = std::get<2>(Data); // Check the values provided via `simdlen` by the user. // 1. A `simdlen(1)` doesn't produce vector signatures, if (UserVLEN == 1) { unsigned DiagID = CGM.getDiags().getCustomDiagID( DiagnosticsEngine::Warning, "The clause simdlen(1) has no effect when targeting aarch64."); CGM.getDiags().Report(SLoc, DiagID); return; } // 2. Section 3.3.1, item 1: user input must be a power of 2 for // Advanced SIMD output. if (ISA == 'n' && UserVLEN && !llvm::isPowerOf2_32(UserVLEN)) { unsigned DiagID = CGM.getDiags().getCustomDiagID( DiagnosticsEngine::Warning, "The value specified in simdlen must be a " "power of 2 when targeting Advanced SIMD."); CGM.getDiags().Report(SLoc, DiagID); return; } // 3. Section 3.4.1. SVE fixed lengh must obey the architectural // limits. if (ISA == 's' && UserVLEN != 0) { if ((UserVLEN * WDS > 2048) || (UserVLEN * WDS % 128 != 0)) { unsigned DiagID = CGM.getDiags().getCustomDiagID( DiagnosticsEngine::Warning, "The clause simdlen must fit the %0-bit " "lanes in the architectural constraints " "for SVE (min is 128-bit, max is " "2048-bit, by steps of 128-bit)"); CGM.getDiags().Report(SLoc, DiagID) << WDS; return; } } // Sort out parameter sequence. const std::string ParSeq = mangleVectorParameters(ParamAttrs); StringRef Prefix = "_ZGV"; // Generate simdlen from user input (if any). if (UserVLEN) { if (ISA == 's') { // SVE generates only a masked function. addAArch64VectorName(UserVLEN, "M", Prefix, ISA, ParSeq, MangledName, OutputBecomesInput, Fn); } else { assert(ISA == 'n' && "Expected ISA either 's' or 'n'."); // Advanced SIMD generates one or two functions, depending on // the `[not]inbranch` clause. switch (State) { case OMPDeclareSimdDeclAttr::BS_Undefined: addAArch64VectorName(UserVLEN, "N", Prefix, ISA, ParSeq, MangledName, OutputBecomesInput, Fn); addAArch64VectorName(UserVLEN, "M", Prefix, ISA, ParSeq, MangledName, OutputBecomesInput, Fn); break; case OMPDeclareSimdDeclAttr::BS_Notinbranch: addAArch64VectorName(UserVLEN, "N", Prefix, ISA, ParSeq, MangledName, OutputBecomesInput, Fn); break; case OMPDeclareSimdDeclAttr::BS_Inbranch: addAArch64VectorName(UserVLEN, "M", Prefix, ISA, ParSeq, MangledName, OutputBecomesInput, Fn); break; } } } else { // If no user simdlen is provided, follow the AAVFABI rules for // generating the vector length. if (ISA == 's') { // SVE, section 3.4.1, item 1. addAArch64VectorName("x", "M", Prefix, ISA, ParSeq, MangledName, OutputBecomesInput, Fn); } else { assert(ISA == 'n' && "Expected ISA either 's' or 'n'."); // Advanced SIMD, Section 3.3.1 of the AAVFABI, generates one or // two vector names depending on the use of the clause // `[not]inbranch`. switch (State) { case OMPDeclareSimdDeclAttr::BS_Undefined: addAArch64AdvSIMDNDSNames(NDS, "N", Prefix, ISA, ParSeq, MangledName, OutputBecomesInput, Fn); addAArch64AdvSIMDNDSNames(NDS, "M", Prefix, ISA, ParSeq, MangledName, OutputBecomesInput, Fn); break; case OMPDeclareSimdDeclAttr::BS_Notinbranch: addAArch64AdvSIMDNDSNames(NDS, "N", Prefix, ISA, ParSeq, MangledName, OutputBecomesInput, Fn); break; case OMPDeclareSimdDeclAttr::BS_Inbranch: addAArch64AdvSIMDNDSNames(NDS, "M", Prefix, ISA, ParSeq, MangledName, OutputBecomesInput, Fn); break; } } } } void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn) { ASTContext &C = CGM.getContext(); FD = FD->getMostRecentDecl(); // Map params to their positions in function decl. llvm::DenseMap ParamPositions; if (isa(FD)) ParamPositions.try_emplace(FD, 0); unsigned ParamPos = ParamPositions.size(); for (const ParmVarDecl *P : FD->parameters()) { ParamPositions.try_emplace(P->getCanonicalDecl(), ParamPos); ++ParamPos; } while (FD) { for (const auto *Attr : FD->specific_attrs()) { llvm::SmallVector ParamAttrs(ParamPositions.size()); // Mark uniform parameters. for (const Expr *E : Attr->uniforms()) { E = E->IgnoreParenImpCasts(); unsigned Pos; if (isa(E)) { Pos = ParamPositions[FD]; } else { const auto *PVD = cast(cast(E)->getDecl()) ->getCanonicalDecl(); Pos = ParamPositions[PVD]; } ParamAttrs[Pos].Kind = Uniform; } // Get alignment info. auto NI = Attr->alignments_begin(); for (const Expr *E : Attr->aligneds()) { E = E->IgnoreParenImpCasts(); unsigned Pos; QualType ParmTy; if (isa(E)) { Pos = ParamPositions[FD]; ParmTy = E->getType(); } else { const auto *PVD = cast(cast(E)->getDecl()) ->getCanonicalDecl(); Pos = ParamPositions[PVD]; ParmTy = PVD->getType(); } ParamAttrs[Pos].Alignment = (*NI) ? (*NI)->EvaluateKnownConstInt(C) : llvm::APSInt::getUnsigned( C.toCharUnitsFromBits(C.getOpenMPDefaultSimdAlign(ParmTy)) .getQuantity()); ++NI; } // Mark linear parameters. auto SI = Attr->steps_begin(); auto MI = Attr->modifiers_begin(); for (const Expr *E : Attr->linears()) { E = E->IgnoreParenImpCasts(); unsigned Pos; // Rescaling factor needed to compute the linear parameter // value in the mangled name. unsigned PtrRescalingFactor = 1; if (isa(E)) { Pos = ParamPositions[FD]; } else { const auto *PVD = cast(cast(E)->getDecl()) ->getCanonicalDecl(); Pos = ParamPositions[PVD]; if (auto *P = dyn_cast(PVD->getType())) PtrRescalingFactor = CGM.getContext() .getTypeSizeInChars(P->getPointeeType()) .getQuantity(); } ParamAttrTy &ParamAttr = ParamAttrs[Pos]; ParamAttr.Kind = Linear; // Assuming a stride of 1, for `linear` without modifiers. ParamAttr.StrideOrArg = llvm::APSInt::getUnsigned(1); if (*SI) { Expr::EvalResult Result; if (!(*SI)->EvaluateAsInt(Result, C, Expr::SE_AllowSideEffects)) { if (const auto *DRE = cast((*SI)->IgnoreParenImpCasts())) { if (const auto *StridePVD = cast(DRE->getDecl())) { ParamAttr.Kind = LinearWithVarStride; ParamAttr.StrideOrArg = llvm::APSInt::getUnsigned( ParamPositions[StridePVD->getCanonicalDecl()]); } } } else { ParamAttr.StrideOrArg = Result.Val.getInt(); } } // If we are using a linear clause on a pointer, we need to // rescale the value of linear_step with the byte size of the // pointee type. if (Linear == ParamAttr.Kind) ParamAttr.StrideOrArg = ParamAttr.StrideOrArg * PtrRescalingFactor; ++SI; ++MI; } llvm::APSInt VLENVal; SourceLocation ExprLoc; const Expr *VLENExpr = Attr->getSimdlen(); if (VLENExpr) { VLENVal = VLENExpr->EvaluateKnownConstInt(C); ExprLoc = VLENExpr->getExprLoc(); } OMPDeclareSimdDeclAttr::BranchStateTy State = Attr->getBranchState(); if (CGM.getTriple().isX86()) { emitX86DeclareSimdFunction(FD, Fn, VLENVal, ParamAttrs, State); } else if (CGM.getTriple().getArch() == llvm::Triple::aarch64) { unsigned VLEN = VLENVal.getExtValue(); StringRef MangledName = Fn->getName(); if (CGM.getTarget().hasFeature("sve")) emitAArch64DeclareSimdFunction(CGM, FD, VLEN, ParamAttrs, State, MangledName, 's', 128, Fn, ExprLoc); if (CGM.getTarget().hasFeature("neon")) emitAArch64DeclareSimdFunction(CGM, FD, VLEN, ParamAttrs, State, MangledName, 'n', 128, Fn, ExprLoc); } } FD = FD->getPreviousDecl(); } } namespace { /// Cleanup action for doacross support. class DoacrossCleanupTy final : public EHScopeStack::Cleanup { public: static const int DoacrossFinArgs = 2; private: llvm::FunctionCallee RTLFn; llvm::Value *Args[DoacrossFinArgs]; public: DoacrossCleanupTy(llvm::FunctionCallee RTLFn, ArrayRef CallArgs) : RTLFn(RTLFn) { assert(CallArgs.size() == DoacrossFinArgs); std::copy(CallArgs.begin(), CallArgs.end(), std::begin(Args)); } void Emit(CodeGenFunction &CGF, Flags /*flags*/) override { if (!CGF.HaveInsertPoint()) return; CGF.EmitRuntimeCall(RTLFn, Args); } }; } // namespace void CGOpenMPRuntime::emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D, ArrayRef NumIterations) { if (!CGF.HaveInsertPoint()) return; ASTContext &C = CGM.getContext(); QualType Int64Ty = C.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/true); RecordDecl *RD; if (KmpDimTy.isNull()) { // Build struct kmp_dim { // loop bounds info casted to kmp_int64 // kmp_int64 lo; // lower // kmp_int64 up; // upper // kmp_int64 st; // stride // }; RD = C.buildImplicitRecord("kmp_dim"); RD->startDefinition(); addFieldToRecordDecl(C, RD, Int64Ty); addFieldToRecordDecl(C, RD, Int64Ty); addFieldToRecordDecl(C, RD, Int64Ty); RD->completeDefinition(); KmpDimTy = C.getRecordType(RD); } else { RD = cast(KmpDimTy->getAsTagDecl()); } llvm::APInt Size(/*numBits=*/32, NumIterations.size()); QualType ArrayTy = C.getConstantArrayType(KmpDimTy, Size, nullptr, ArrayType::Normal, 0); Address DimsAddr = CGF.CreateMemTemp(ArrayTy, "dims"); CGF.EmitNullInitialization(DimsAddr, ArrayTy); enum { LowerFD = 0, UpperFD, StrideFD }; // Fill dims with data. for (unsigned I = 0, E = NumIterations.size(); I < E; ++I) { LValue DimsLVal = CGF.MakeAddrLValue( CGF.Builder.CreateConstArrayGEP(DimsAddr, I), KmpDimTy); // dims.upper = num_iterations; LValue UpperLVal = CGF.EmitLValueForField( DimsLVal, *std::next(RD->field_begin(), UpperFD)); llvm::Value *NumIterVal = CGF.EmitScalarConversion( CGF.EmitScalarExpr(NumIterations[I]), NumIterations[I]->getType(), Int64Ty, NumIterations[I]->getExprLoc()); CGF.EmitStoreOfScalar(NumIterVal, UpperLVal); // dims.stride = 1; LValue StrideLVal = CGF.EmitLValueForField( DimsLVal, *std::next(RD->field_begin(), StrideFD)); CGF.EmitStoreOfScalar(llvm::ConstantInt::getSigned(CGM.Int64Ty, /*V=*/1), StrideLVal); } // Build call void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid, // kmp_int32 num_dims, struct kmp_dim * dims); llvm::Value *Args[] = { emitUpdateLocation(CGF, D.getBeginLoc()), getThreadID(CGF, D.getBeginLoc()), llvm::ConstantInt::getSigned(CGM.Int32Ty, NumIterations.size()), CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( CGF.Builder.CreateConstArrayGEP(DimsAddr, 0).getPointer(), CGM.VoidPtrTy)}; llvm::FunctionCallee RTLFn = OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_doacross_init); CGF.EmitRuntimeCall(RTLFn, Args); llvm::Value *FiniArgs[DoacrossCleanupTy::DoacrossFinArgs] = { emitUpdateLocation(CGF, D.getEndLoc()), getThreadID(CGF, D.getEndLoc())}; llvm::FunctionCallee FiniRTLFn = OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_doacross_fini); CGF.EHStack.pushCleanup(NormalAndEHCleanup, FiniRTLFn, llvm::makeArrayRef(FiniArgs)); } void CGOpenMPRuntime::emitDoacrossOrdered(CodeGenFunction &CGF, const OMPDependClause *C) { QualType Int64Ty = CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1); llvm::APInt Size(/*numBits=*/32, C->getNumLoops()); QualType ArrayTy = CGM.getContext().getConstantArrayType( Int64Ty, Size, nullptr, ArrayType::Normal, 0); Address CntAddr = CGF.CreateMemTemp(ArrayTy, ".cnt.addr"); for (unsigned I = 0, E = C->getNumLoops(); I < E; ++I) { const Expr *CounterVal = C->getLoopData(I); assert(CounterVal); llvm::Value *CntVal = CGF.EmitScalarConversion( CGF.EmitScalarExpr(CounterVal), CounterVal->getType(), Int64Ty, CounterVal->getExprLoc()); CGF.EmitStoreOfScalar(CntVal, CGF.Builder.CreateConstArrayGEP(CntAddr, I), /*Volatile=*/false, Int64Ty); } llvm::Value *Args[] = { emitUpdateLocation(CGF, C->getBeginLoc()), getThreadID(CGF, C->getBeginLoc()), CGF.Builder.CreateConstArrayGEP(CntAddr, 0).getPointer()}; llvm::FunctionCallee RTLFn; if (C->getDependencyKind() == OMPC_DEPEND_source) { RTLFn = OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), OMPRTL___kmpc_doacross_post); } else { assert(C->getDependencyKind() == OMPC_DEPEND_sink); RTLFn = OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), OMPRTL___kmpc_doacross_wait); } CGF.EmitRuntimeCall(RTLFn, Args); } void CGOpenMPRuntime::emitCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee Callee, ArrayRef Args) const { assert(Loc.isValid() && "Outlined function call location must be valid."); auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc); if (auto *Fn = dyn_cast(Callee.getCallee())) { if (Fn->doesNotThrow()) { CGF.EmitNounwindRuntimeCall(Fn, Args); return; } } CGF.EmitRuntimeCall(Callee, Args); } void CGOpenMPRuntime::emitOutlinedFunctionCall( CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn, ArrayRef Args) const { emitCall(CGF, Loc, OutlinedFn, Args); } void CGOpenMPRuntime::emitFunctionProlog(CodeGenFunction &CGF, const Decl *D) { if (const auto *FD = dyn_cast(D)) if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(FD)) HasEmittedDeclareTargetRegion = true; } Address CGOpenMPRuntime::getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam, const VarDecl *TargetParam) const { return CGF.GetAddrOfLocalVar(NativeParam); } Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD) { if (!VD) return Address::invalid(); Address UntiedAddr = Address::invalid(); Address UntiedRealAddr = Address::invalid(); auto It = FunctionToUntiedTaskStackMap.find(CGF.CurFn); if (It != FunctionToUntiedTaskStackMap.end()) { const UntiedLocalVarsAddressesMap &UntiedData = UntiedLocalVarsStack[It->second]; auto I = UntiedData.find(VD); if (I != UntiedData.end()) { UntiedAddr = I->second.first; UntiedRealAddr = I->second.second; } } const VarDecl *CVD = VD->getCanonicalDecl(); if (CVD->hasAttr()) { // Use the default allocation. if (!isAllocatableDecl(VD)) return UntiedAddr; llvm::Value *Size; CharUnits Align = CGM.getContext().getDeclAlign(CVD); if (CVD->getType()->isVariablyModifiedType()) { Size = CGF.getTypeSize(CVD->getType()); // Align the size: ((size + align - 1) / align) * align Size = CGF.Builder.CreateNUWAdd( Size, CGM.getSize(Align - CharUnits::fromQuantity(1))); Size = CGF.Builder.CreateUDiv(Size, CGM.getSize(Align)); Size = CGF.Builder.CreateNUWMul(Size, CGM.getSize(Align)); } else { CharUnits Sz = CGM.getContext().getTypeSizeInChars(CVD->getType()); Size = CGM.getSize(Sz.alignTo(Align)); } llvm::Value *ThreadID = getThreadID(CGF, CVD->getBeginLoc()); const auto *AA = CVD->getAttr(); assert(AA->getAllocator() && "Expected allocator expression for non-default allocator."); llvm::Value *Allocator = CGF.EmitScalarExpr(AA->getAllocator()); // According to the standard, the original allocator type is a enum // (integer). Convert to pointer type, if required. Allocator = CGF.EmitScalarConversion( Allocator, AA->getAllocator()->getType(), CGF.getContext().VoidPtrTy, AA->getAllocator()->getExprLoc()); llvm::Value *Args[] = {ThreadID, Size, Allocator}; llvm::Value *Addr = CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_alloc), Args, getName({CVD->getName(), ".void.addr"})); llvm::FunctionCallee FiniRTLFn = OMPBuilder.getOrCreateRuntimeFunction( CGM.getModule(), OMPRTL___kmpc_free); QualType Ty = CGM.getContext().getPointerType(CVD->getType()); Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( Addr, CGF.ConvertTypeForMem(Ty), getName({CVD->getName(), ".addr"})); if (UntiedAddr.isValid()) CGF.EmitStoreOfScalar(Addr, UntiedAddr, /*Volatile=*/false, Ty); // Cleanup action for allocate support. class OMPAllocateCleanupTy final : public EHScopeStack::Cleanup { llvm::FunctionCallee RTLFn; SourceLocation::UIntTy LocEncoding; Address Addr; const Expr *Allocator; public: OMPAllocateCleanupTy(llvm::FunctionCallee RTLFn, SourceLocation::UIntTy LocEncoding, Address Addr, const Expr *Allocator) : RTLFn(RTLFn), LocEncoding(LocEncoding), Addr(Addr), Allocator(Allocator) {} void Emit(CodeGenFunction &CGF, Flags /*flags*/) override { if (!CGF.HaveInsertPoint()) return; llvm::Value *Args[3]; Args[0] = CGF.CGM.getOpenMPRuntime().getThreadID( CGF, SourceLocation::getFromRawEncoding(LocEncoding)); Args[1] = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( Addr.getPointer(), CGF.VoidPtrTy); llvm::Value *AllocVal = CGF.EmitScalarExpr(Allocator); // According to the standard, the original allocator type is a enum // (integer). Convert to pointer type, if required. AllocVal = CGF.EmitScalarConversion(AllocVal, Allocator->getType(), CGF.getContext().VoidPtrTy, Allocator->getExprLoc()); Args[2] = AllocVal; CGF.EmitRuntimeCall(RTLFn, Args); } }; Address VDAddr = UntiedRealAddr.isValid() ? UntiedRealAddr : Address(Addr, Align); CGF.EHStack.pushCleanup( NormalAndEHCleanup, FiniRTLFn, CVD->getLocation().getRawEncoding(), VDAddr, AA->getAllocator()); if (UntiedRealAddr.isValid()) if (auto *Region = dyn_cast_or_null(CGF.CapturedStmtInfo)) Region->emitUntiedSwitch(CGF); return VDAddr; } return UntiedAddr; } bool CGOpenMPRuntime::isLocalVarInUntiedTask(CodeGenFunction &CGF, const VarDecl *VD) const { auto It = FunctionToUntiedTaskStackMap.find(CGF.CurFn); if (It == FunctionToUntiedTaskStackMap.end()) return false; return UntiedLocalVarsStack[It->second].count(VD) > 0; } CGOpenMPRuntime::NontemporalDeclsRAII::NontemporalDeclsRAII( CodeGenModule &CGM, const OMPLoopDirective &S) : CGM(CGM), NeedToPush(S.hasClausesOfKind()) { assert(CGM.getLangOpts().OpenMP && "Not in OpenMP mode."); if (!NeedToPush) return; NontemporalDeclsSet &DS = CGM.getOpenMPRuntime().NontemporalDeclsStack.emplace_back(); for (const auto *C : S.getClausesOfKind()) { for (const Stmt *Ref : C->private_refs()) { const auto *SimpleRefExpr = cast(Ref)->IgnoreParenImpCasts(); const ValueDecl *VD; if (const auto *DRE = dyn_cast(SimpleRefExpr)) { VD = DRE->getDecl(); } else { const auto *ME = cast(SimpleRefExpr); assert((ME->isImplicitCXXThis() || isa(ME->getBase()->IgnoreParenImpCasts())) && "Expected member of current class."); VD = ME->getMemberDecl(); } DS.insert(VD); } } } CGOpenMPRuntime::NontemporalDeclsRAII::~NontemporalDeclsRAII() { if (!NeedToPush) return; CGM.getOpenMPRuntime().NontemporalDeclsStack.pop_back(); } CGOpenMPRuntime::UntiedTaskLocalDeclsRAII::UntiedTaskLocalDeclsRAII( CodeGenFunction &CGF, const llvm::MapVector, std::pair> &LocalVars) : CGM(CGF.CGM), NeedToPush(!LocalVars.empty()) { if (!NeedToPush) return; CGM.getOpenMPRuntime().FunctionToUntiedTaskStackMap.try_emplace( CGF.CurFn, CGM.getOpenMPRuntime().UntiedLocalVarsStack.size()); CGM.getOpenMPRuntime().UntiedLocalVarsStack.push_back(LocalVars); } CGOpenMPRuntime::UntiedTaskLocalDeclsRAII::~UntiedTaskLocalDeclsRAII() { if (!NeedToPush) return; CGM.getOpenMPRuntime().UntiedLocalVarsStack.pop_back(); } bool CGOpenMPRuntime::isNontemporalDecl(const ValueDecl *VD) const { assert(CGM.getLangOpts().OpenMP && "Not in OpenMP mode."); return llvm::any_of( CGM.getOpenMPRuntime().NontemporalDeclsStack, [VD](const NontemporalDeclsSet &Set) { return Set.count(VD) > 0; }); } void CGOpenMPRuntime::LastprivateConditionalRAII::tryToDisableInnerAnalysis( const OMPExecutableDirective &S, llvm::DenseSet> &NeedToAddForLPCsAsDisabled) const { llvm::DenseSet> NeedToCheckForLPCs; // Vars in target/task regions must be excluded completely. if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()) || isOpenMPTaskingDirective(S.getDirectiveKind())) { SmallVector CaptureRegions; getOpenMPCaptureRegions(CaptureRegions, S.getDirectiveKind()); const CapturedStmt *CS = S.getCapturedStmt(CaptureRegions.front()); for (const CapturedStmt::Capture &Cap : CS->captures()) { if (Cap.capturesVariable() || Cap.capturesVariableByCopy()) NeedToCheckForLPCs.insert(Cap.getCapturedVar()); } } // Exclude vars in private clauses. for (const auto *C : S.getClausesOfKind()) { for (const Expr *Ref : C->varlists()) { if (!Ref->getType()->isScalarType()) continue; const auto *DRE = dyn_cast(Ref->IgnoreParenImpCasts()); if (!DRE) continue; NeedToCheckForLPCs.insert(DRE->getDecl()); } } for (const auto *C : S.getClausesOfKind()) { for (const Expr *Ref : C->varlists()) { if (!Ref->getType()->isScalarType()) continue; const auto *DRE = dyn_cast(Ref->IgnoreParenImpCasts()); if (!DRE) continue; NeedToCheckForLPCs.insert(DRE->getDecl()); } } for (const auto *C : S.getClausesOfKind()) { for (const Expr *Ref : C->varlists()) { if (!Ref->getType()->isScalarType()) continue; const auto *DRE = dyn_cast(Ref->IgnoreParenImpCasts()); if (!DRE) continue; NeedToCheckForLPCs.insert(DRE->getDecl()); } } for (const auto *C : S.getClausesOfKind()) { for (const Expr *Ref : C->varlists()) { if (!Ref->getType()->isScalarType()) continue; const auto *DRE = dyn_cast(Ref->IgnoreParenImpCasts()); if (!DRE) continue; NeedToCheckForLPCs.insert(DRE->getDecl()); } } for (const auto *C : S.getClausesOfKind()) { for (const Expr *Ref : C->varlists()) { if (!Ref->getType()->isScalarType()) continue; const auto *DRE = dyn_cast(Ref->IgnoreParenImpCasts()); if (!DRE) continue; NeedToCheckForLPCs.insert(DRE->getDecl()); } } for (const Decl *VD : NeedToCheckForLPCs) { for (const LastprivateConditionalData &Data : llvm::reverse(CGM.getOpenMPRuntime().LastprivateConditionalStack)) { if (Data.DeclToUniqueName.count(VD) > 0) { if (!Data.Disabled) NeedToAddForLPCsAsDisabled.insert(VD); break; } } } } CGOpenMPRuntime::LastprivateConditionalRAII::LastprivateConditionalRAII( CodeGenFunction &CGF, const OMPExecutableDirective &S, LValue IVLVal) : CGM(CGF.CGM), Action((CGM.getLangOpts().OpenMP >= 50 && llvm::any_of(S.getClausesOfKind(), [](const OMPLastprivateClause *C) { return C->getKind() == OMPC_LASTPRIVATE_conditional; })) ? ActionToDo::PushAsLastprivateConditional : ActionToDo::DoNotPush) { assert(CGM.getLangOpts().OpenMP && "Not in OpenMP mode."); if (CGM.getLangOpts().OpenMP < 50 || Action == ActionToDo::DoNotPush) return; assert(Action == ActionToDo::PushAsLastprivateConditional && "Expected a push action."); LastprivateConditionalData &Data = CGM.getOpenMPRuntime().LastprivateConditionalStack.emplace_back(); for (const auto *C : S.getClausesOfKind()) { if (C->getKind() != OMPC_LASTPRIVATE_conditional) continue; for (const Expr *Ref : C->varlists()) { Data.DeclToUniqueName.insert(std::make_pair( cast(Ref->IgnoreParenImpCasts())->getDecl(), SmallString<16>(generateUniqueName(CGM, "pl_cond", Ref)))); } } Data.IVLVal = IVLVal; Data.Fn = CGF.CurFn; } CGOpenMPRuntime::LastprivateConditionalRAII::LastprivateConditionalRAII( CodeGenFunction &CGF, const OMPExecutableDirective &S) : CGM(CGF.CGM), Action(ActionToDo::DoNotPush) { assert(CGM.getLangOpts().OpenMP && "Not in OpenMP mode."); if (CGM.getLangOpts().OpenMP < 50) return; llvm::DenseSet> NeedToAddForLPCsAsDisabled; tryToDisableInnerAnalysis(S, NeedToAddForLPCsAsDisabled); if (!NeedToAddForLPCsAsDisabled.empty()) { Action = ActionToDo::DisableLastprivateConditional; LastprivateConditionalData &Data = CGM.getOpenMPRuntime().LastprivateConditionalStack.emplace_back(); for (const Decl *VD : NeedToAddForLPCsAsDisabled) Data.DeclToUniqueName.insert(std::make_pair(VD, SmallString<16>())); Data.Fn = CGF.CurFn; Data.Disabled = true; } } CGOpenMPRuntime::LastprivateConditionalRAII CGOpenMPRuntime::LastprivateConditionalRAII::disable( CodeGenFunction &CGF, const OMPExecutableDirective &S) { return LastprivateConditionalRAII(CGF, S); } CGOpenMPRuntime::LastprivateConditionalRAII::~LastprivateConditionalRAII() { if (CGM.getLangOpts().OpenMP < 50) return; if (Action == ActionToDo::DisableLastprivateConditional) { assert(CGM.getOpenMPRuntime().LastprivateConditionalStack.back().Disabled && "Expected list of disabled private vars."); CGM.getOpenMPRuntime().LastprivateConditionalStack.pop_back(); } if (Action == ActionToDo::PushAsLastprivateConditional) { assert( !CGM.getOpenMPRuntime().LastprivateConditionalStack.back().Disabled && "Expected list of lastprivate conditional vars."); CGM.getOpenMPRuntime().LastprivateConditionalStack.pop_back(); } } Address CGOpenMPRuntime::emitLastprivateConditionalInit(CodeGenFunction &CGF, const VarDecl *VD) { ASTContext &C = CGM.getContext(); auto I = LastprivateConditionalToTypes.find(CGF.CurFn); if (I == LastprivateConditionalToTypes.end()) I = LastprivateConditionalToTypes.try_emplace(CGF.CurFn).first; QualType NewType; const FieldDecl *VDField; const FieldDecl *FiredField; LValue BaseLVal; auto VI = I->getSecond().find(VD); if (VI == I->getSecond().end()) { RecordDecl *RD = C.buildImplicitRecord("lasprivate.conditional"); RD->startDefinition(); VDField = addFieldToRecordDecl(C, RD, VD->getType().getNonReferenceType()); FiredField = addFieldToRecordDecl(C, RD, C.CharTy); RD->completeDefinition(); NewType = C.getRecordType(RD); Address Addr = CGF.CreateMemTemp(NewType, C.getDeclAlign(VD), VD->getName()); BaseLVal = CGF.MakeAddrLValue(Addr, NewType, AlignmentSource::Decl); I->getSecond().try_emplace(VD, NewType, VDField, FiredField, BaseLVal); } else { NewType = std::get<0>(VI->getSecond()); VDField = std::get<1>(VI->getSecond()); FiredField = std::get<2>(VI->getSecond()); BaseLVal = std::get<3>(VI->getSecond()); } LValue FiredLVal = CGF.EmitLValueForField(BaseLVal, FiredField); CGF.EmitStoreOfScalar( llvm::ConstantInt::getNullValue(CGF.ConvertTypeForMem(C.CharTy)), FiredLVal); return CGF.EmitLValueForField(BaseLVal, VDField).getAddress(CGF); } namespace { /// Checks if the lastprivate conditional variable is referenced in LHS. class LastprivateConditionalRefChecker final : public ConstStmtVisitor { ArrayRef LPM; const Expr *FoundE = nullptr; const Decl *FoundD = nullptr; StringRef UniqueDeclName; LValue IVLVal; llvm::Function *FoundFn = nullptr; SourceLocation Loc; public: bool VisitDeclRefExpr(const DeclRefExpr *E) { for (const CGOpenMPRuntime::LastprivateConditionalData &D : llvm::reverse(LPM)) { auto It = D.DeclToUniqueName.find(E->getDecl()); if (It == D.DeclToUniqueName.end()) continue; if (D.Disabled) return false; FoundE = E; FoundD = E->getDecl()->getCanonicalDecl(); UniqueDeclName = It->second; IVLVal = D.IVLVal; FoundFn = D.Fn; break; } return FoundE == E; } bool VisitMemberExpr(const MemberExpr *E) { if (!CodeGenFunction::IsWrappedCXXThis(E->getBase())) return false; for (const CGOpenMPRuntime::LastprivateConditionalData &D : llvm::reverse(LPM)) { auto It = D.DeclToUniqueName.find(E->getMemberDecl()); if (It == D.DeclToUniqueName.end()) continue; if (D.Disabled) return false; FoundE = E; FoundD = E->getMemberDecl()->getCanonicalDecl(); UniqueDeclName = It->second; IVLVal = D.IVLVal; FoundFn = D.Fn; break; } return FoundE == E; } bool VisitStmt(const Stmt *S) { for (const Stmt *Child : S->children()) { if (!Child) continue; if (const auto *E = dyn_cast(Child)) if (!E->isGLValue()) continue; if (Visit(Child)) return true; } return false; } explicit LastprivateConditionalRefChecker( ArrayRef LPM) : LPM(LPM) {} std::tuple getFoundData() const { return std::make_tuple(FoundE, FoundD, UniqueDeclName, IVLVal, FoundFn); } }; } // namespace void CGOpenMPRuntime::emitLastprivateConditionalUpdate(CodeGenFunction &CGF, LValue IVLVal, StringRef UniqueDeclName, LValue LVal, SourceLocation Loc) { // Last updated loop counter for the lastprivate conditional var. // int last_iv = 0; llvm::Type *LLIVTy = CGF.ConvertTypeForMem(IVLVal.getType()); llvm::Constant *LastIV = getOrCreateInternalVariable(LLIVTy, getName({UniqueDeclName, "iv"})); cast(LastIV)->setAlignment( IVLVal.getAlignment().getAsAlign()); LValue LastIVLVal = CGF.MakeNaturalAlignAddrLValue(LastIV, IVLVal.getType()); // Last value of the lastprivate conditional. // decltype(priv_a) last_a; llvm::Constant *Last = getOrCreateInternalVariable( CGF.ConvertTypeForMem(LVal.getType()), UniqueDeclName); cast(Last)->setAlignment( LVal.getAlignment().getAsAlign()); LValue LastLVal = CGF.MakeAddrLValue(Last, LVal.getType(), LVal.getAlignment()); // Global loop counter. Required to handle inner parallel-for regions. // iv llvm::Value *IVVal = CGF.EmitLoadOfScalar(IVLVal, Loc); // #pragma omp critical(a) // if (last_iv <= iv) { // last_iv = iv; // last_a = priv_a; // } auto &&CodeGen = [&LastIVLVal, &IVLVal, IVVal, &LVal, &LastLVal, Loc](CodeGenFunction &CGF, PrePostActionTy &Action) { Action.Enter(CGF); llvm::Value *LastIVVal = CGF.EmitLoadOfScalar(LastIVLVal, Loc); // (last_iv <= iv) ? Check if the variable is updated and store new // value in global var. llvm::Value *CmpRes; if (IVLVal.getType()->isSignedIntegerType()) { CmpRes = CGF.Builder.CreateICmpSLE(LastIVVal, IVVal); } else { assert(IVLVal.getType()->isUnsignedIntegerType() && "Loop iteration variable must be integer."); CmpRes = CGF.Builder.CreateICmpULE(LastIVVal, IVVal); } llvm::BasicBlock *ThenBB = CGF.createBasicBlock("lp_cond_then"); llvm::BasicBlock *ExitBB = CGF.createBasicBlock("lp_cond_exit"); CGF.Builder.CreateCondBr(CmpRes, ThenBB, ExitBB); // { CGF.EmitBlock(ThenBB); // last_iv = iv; CGF.EmitStoreOfScalar(IVVal, LastIVLVal); // last_a = priv_a; switch (CGF.getEvaluationKind(LVal.getType())) { case TEK_Scalar: { llvm::Value *PrivVal = CGF.EmitLoadOfScalar(LVal, Loc); CGF.EmitStoreOfScalar(PrivVal, LastLVal); break; } case TEK_Complex: { CodeGenFunction::ComplexPairTy PrivVal = CGF.EmitLoadOfComplex(LVal, Loc); CGF.EmitStoreOfComplex(PrivVal, LastLVal, /*isInit=*/false); break; } case TEK_Aggregate: llvm_unreachable( "Aggregates are not supported in lastprivate conditional."); } // } CGF.EmitBranch(ExitBB); // There is no need to emit line number for unconditional branch. (void)ApplyDebugLocation::CreateEmpty(CGF); CGF.EmitBlock(ExitBB, /*IsFinished=*/true); }; if (CGM.getLangOpts().OpenMPSimd) { // Do not emit as a critical region as no parallel region could be emitted. RegionCodeGenTy ThenRCG(CodeGen); ThenRCG(CGF); } else { emitCriticalRegion(CGF, UniqueDeclName, CodeGen, Loc); } } void CGOpenMPRuntime::checkAndEmitLastprivateConditional(CodeGenFunction &CGF, const Expr *LHS) { if (CGF.getLangOpts().OpenMP < 50 || LastprivateConditionalStack.empty()) return; LastprivateConditionalRefChecker Checker(LastprivateConditionalStack); if (!Checker.Visit(LHS)) return; const Expr *FoundE; const Decl *FoundD; StringRef UniqueDeclName; LValue IVLVal; llvm::Function *FoundFn; std::tie(FoundE, FoundD, UniqueDeclName, IVLVal, FoundFn) = Checker.getFoundData(); if (FoundFn != CGF.CurFn) { // Special codegen for inner parallel regions. // ((struct.lastprivate.conditional*)&priv_a)->Fired = 1; auto It = LastprivateConditionalToTypes[FoundFn].find(FoundD); assert(It != LastprivateConditionalToTypes[FoundFn].end() && "Lastprivate conditional is not found in outer region."); QualType StructTy = std::get<0>(It->getSecond()); const FieldDecl* FiredDecl = std::get<2>(It->getSecond()); LValue PrivLVal = CGF.EmitLValue(FoundE); Address StructAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( PrivLVal.getAddress(CGF), CGF.ConvertTypeForMem(CGF.getContext().getPointerType(StructTy))); LValue BaseLVal = CGF.MakeAddrLValue(StructAddr, StructTy, AlignmentSource::Decl); LValue FiredLVal = CGF.EmitLValueForField(BaseLVal, FiredDecl); CGF.EmitAtomicStore(RValue::get(llvm::ConstantInt::get( CGF.ConvertTypeForMem(FiredDecl->getType()), 1)), FiredLVal, llvm::AtomicOrdering::Unordered, /*IsVolatile=*/true, /*isInit=*/false); return; } // Private address of the lastprivate conditional in the current context. // priv_a LValue LVal = CGF.EmitLValue(FoundE); emitLastprivateConditionalUpdate(CGF, IVLVal, UniqueDeclName, LVal, FoundE->getExprLoc()); } void CGOpenMPRuntime::checkAndEmitSharedLastprivateConditional( CodeGenFunction &CGF, const OMPExecutableDirective &D, const llvm::DenseSet> &IgnoredDecls) { if (CGF.getLangOpts().OpenMP < 50 || LastprivateConditionalStack.empty()) return; auto Range = llvm::reverse(LastprivateConditionalStack); auto It = llvm::find_if( Range, [](const LastprivateConditionalData &D) { return !D.Disabled; }); if (It == Range.end() || It->Fn != CGF.CurFn) return; auto LPCI = LastprivateConditionalToTypes.find(It->Fn); assert(LPCI != LastprivateConditionalToTypes.end() && "Lastprivates must be registered already."); SmallVector CaptureRegions; getOpenMPCaptureRegions(CaptureRegions, D.getDirectiveKind()); const CapturedStmt *CS = D.getCapturedStmt(CaptureRegions.back()); for (const auto &Pair : It->DeclToUniqueName) { const auto *VD = cast(Pair.first->getCanonicalDecl()); if (!CS->capturesVariable(VD) || IgnoredDecls.count(VD) > 0) continue; auto I = LPCI->getSecond().find(Pair.first); assert(I != LPCI->getSecond().end() && "Lastprivate must be rehistered already."); // bool Cmp = priv_a.Fired != 0; LValue BaseLVal = std::get<3>(I->getSecond()); LValue FiredLVal = CGF.EmitLValueForField(BaseLVal, std::get<2>(I->getSecond())); llvm::Value *Res = CGF.EmitLoadOfScalar(FiredLVal, D.getBeginLoc()); llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Res); llvm::BasicBlock *ThenBB = CGF.createBasicBlock("lpc.then"); llvm::BasicBlock *DoneBB = CGF.createBasicBlock("lpc.done"); // if (Cmp) { CGF.Builder.CreateCondBr(Cmp, ThenBB, DoneBB); CGF.EmitBlock(ThenBB); Address Addr = CGF.GetAddrOfLocalVar(VD); LValue LVal; if (VD->getType()->isReferenceType()) LVal = CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(), AlignmentSource::Decl); else LVal = CGF.MakeAddrLValue(Addr, VD->getType().getNonReferenceType(), AlignmentSource::Decl); emitLastprivateConditionalUpdate(CGF, It->IVLVal, Pair.second, LVal, D.getBeginLoc()); auto AL = ApplyDebugLocation::CreateArtificial(CGF); CGF.EmitBlock(DoneBB, /*IsFinal=*/true); // } } } void CGOpenMPRuntime::emitLastprivateConditionalFinalUpdate( CodeGenFunction &CGF, LValue PrivLVal, const VarDecl *VD, SourceLocation Loc) { if (CGF.getLangOpts().OpenMP < 50) return; auto It = LastprivateConditionalStack.back().DeclToUniqueName.find(VD); assert(It != LastprivateConditionalStack.back().DeclToUniqueName.end() && "Unknown lastprivate conditional variable."); StringRef UniqueName = It->second; llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(UniqueName); // The variable was not updated in the region - exit. if (!GV) return; LValue LPLVal = CGF.MakeAddrLValue( GV, PrivLVal.getType().getNonReferenceType(), PrivLVal.getAlignment()); llvm::Value *Res = CGF.EmitLoadOfScalar(LPLVal, Loc); CGF.EmitStoreOfScalar(Res, PrivLVal); } llvm::Function *CGOpenMPSIMDRuntime::emitParallelOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) { llvm_unreachable("Not supported in SIMD-only mode"); } llvm::Function *CGOpenMPSIMDRuntime::emitTeamsOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) { llvm_unreachable("Not supported in SIMD-only mode"); } llvm::Function *CGOpenMPSIMDRuntime::emitTaskOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, const VarDecl *PartIDVar, const VarDecl *TaskTVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool Tied, unsigned &NumberOfParts) { llvm_unreachable("Not supported in SIMD-only mode"); } void CGOpenMPSIMDRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef CapturedVars, const Expr *IfCond) { llvm_unreachable("Not supported in SIMD-only mode"); } void CGOpenMPSIMDRuntime::emitCriticalRegion( CodeGenFunction &CGF, StringRef CriticalName, const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc, const Expr *Hint) { llvm_unreachable("Not supported in SIMD-only mode"); } void CGOpenMPSIMDRuntime::emitMasterRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MasterOpGen, SourceLocation Loc) { llvm_unreachable("Not supported in SIMD-only mode"); } void CGOpenMPSIMDRuntime::emitMaskedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MasterOpGen, SourceLocation Loc, const Expr *Filter) { llvm_unreachable("Not supported in SIMD-only mode"); } void CGOpenMPSIMDRuntime::emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc) { llvm_unreachable("Not supported in SIMD-only mode"); } void CGOpenMPSIMDRuntime::emitTaskgroupRegion( CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen, SourceLocation Loc) { llvm_unreachable("Not supported in SIMD-only mode"); } void CGOpenMPSIMDRuntime::emitSingleRegion( CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen, SourceLocation Loc, ArrayRef CopyprivateVars, ArrayRef DestExprs, ArrayRef SrcExprs, ArrayRef AssignmentOps) { llvm_unreachable("Not supported in SIMD-only mode"); } void CGOpenMPSIMDRuntime::emitOrderedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &OrderedOpGen, SourceLocation Loc, bool IsThreads) { llvm_unreachable("Not supported in SIMD-only mode"); } void CGOpenMPSIMDRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind Kind, bool EmitChecks, bool ForceSimpleCall) { llvm_unreachable("Not supported in SIMD-only mode"); } void CGOpenMPSIMDRuntime::emitForDispatchInit( CodeGenFunction &CGF, SourceLocation Loc, const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned, bool Ordered, const DispatchRTInput &DispatchValues) { llvm_unreachable("Not supported in SIMD-only mode"); } void CGOpenMPSIMDRuntime::emitForStaticInit( CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind, const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values) { llvm_unreachable("Not supported in SIMD-only mode"); } void CGOpenMPSIMDRuntime::emitDistributeStaticInit( CodeGenFunction &CGF, SourceLocation Loc, OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values) { llvm_unreachable("Not supported in SIMD-only mode"); } void CGOpenMPSIMDRuntime::emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned) { llvm_unreachable("Not supported in SIMD-only mode"); } void CGOpenMPSIMDRuntime::emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind) { llvm_unreachable("Not supported in SIMD-only mode"); } llvm::Value *CGOpenMPSIMDRuntime::emitForNext(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned, Address IL, Address LB, Address UB, Address ST) { llvm_unreachable("Not supported in SIMD-only mode"); } void CGOpenMPSIMDRuntime::emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc) { llvm_unreachable("Not supported in SIMD-only mode"); } void CGOpenMPSIMDRuntime::emitProcBindClause(CodeGenFunction &CGF, ProcBindKind ProcBind, SourceLocation Loc) { llvm_unreachable("Not supported in SIMD-only mode"); } Address CGOpenMPSIMDRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc) { llvm_unreachable("Not supported in SIMD-only mode"); } llvm::Function *CGOpenMPSIMDRuntime::emitThreadPrivateVarDefinition( const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit, CodeGenFunction *CGF) { llvm_unreachable("Not supported in SIMD-only mode"); } Address CGOpenMPSIMDRuntime::getAddrOfArtificialThreadPrivate( CodeGenFunction &CGF, QualType VarType, StringRef Name) { llvm_unreachable("Not supported in SIMD-only mode"); } void CGOpenMPSIMDRuntime::emitFlush(CodeGenFunction &CGF, ArrayRef Vars, SourceLocation Loc, llvm::AtomicOrdering AO) { llvm_unreachable("Not supported in SIMD-only mode"); } void CGOpenMPSIMDRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data) { llvm_unreachable("Not supported in SIMD-only mode"); } void CGOpenMPSIMDRuntime::emitTaskLoopCall( CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data) { llvm_unreachable("Not supported in SIMD-only mode"); } void CGOpenMPSIMDRuntime::emitReduction( CodeGenFunction &CGF, SourceLocation Loc, ArrayRef Privates, ArrayRef LHSExprs, ArrayRef RHSExprs, ArrayRef ReductionOps, ReductionOptionsTy Options) { assert(Options.SimpleReduction && "Only simple reduction is expected."); CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs, ReductionOps, Options); } llvm::Value *CGOpenMPSIMDRuntime::emitTaskReductionInit( CodeGenFunction &CGF, SourceLocation Loc, ArrayRef LHSExprs, ArrayRef RHSExprs, const OMPTaskDataTy &Data) { llvm_unreachable("Not supported in SIMD-only mode"); } void CGOpenMPSIMDRuntime::emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc, bool IsWorksharingReduction) { llvm_unreachable("Not supported in SIMD-only mode"); } void CGOpenMPSIMDRuntime::emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc, ReductionCodeGen &RCG, unsigned N) { llvm_unreachable("Not supported in SIMD-only mode"); } Address CGOpenMPSIMDRuntime::getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *ReductionsPtr, LValue SharedLVal) { llvm_unreachable("Not supported in SIMD-only mode"); } void CGOpenMPSIMDRuntime::emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc) { llvm_unreachable("Not supported in SIMD-only mode"); } void CGOpenMPSIMDRuntime::emitCancellationPointCall( CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind CancelRegion) { llvm_unreachable("Not supported in SIMD-only mode"); } void CGOpenMPSIMDRuntime::emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc, const Expr *IfCond, OpenMPDirectiveKind CancelRegion) { llvm_unreachable("Not supported in SIMD-only mode"); } void CGOpenMPSIMDRuntime::emitTargetOutlinedFunction( const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) { llvm_unreachable("Not supported in SIMD-only mode"); } void CGOpenMPSIMDRuntime::emitTargetCall( CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond, llvm::PointerIntPair Device, llvm::function_ref SizeEmitter) { llvm_unreachable("Not supported in SIMD-only mode"); } bool CGOpenMPSIMDRuntime::emitTargetFunctions(GlobalDecl GD) { llvm_unreachable("Not supported in SIMD-only mode"); } bool CGOpenMPSIMDRuntime::emitTargetGlobalVariable(GlobalDecl GD) { llvm_unreachable("Not supported in SIMD-only mode"); } bool CGOpenMPSIMDRuntime::emitTargetGlobal(GlobalDecl GD) { return false; } void CGOpenMPSIMDRuntime::emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef CapturedVars) { llvm_unreachable("Not supported in SIMD-only mode"); } void CGOpenMPSIMDRuntime::emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams, const Expr *ThreadLimit, SourceLocation Loc) { llvm_unreachable("Not supported in SIMD-only mode"); } void CGOpenMPSIMDRuntime::emitTargetDataCalls( CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) { llvm_unreachable("Not supported in SIMD-only mode"); } void CGOpenMPSIMDRuntime::emitTargetDataStandAloneCall( CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device) { llvm_unreachable("Not supported in SIMD-only mode"); } void CGOpenMPSIMDRuntime::emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D, ArrayRef NumIterations) { llvm_unreachable("Not supported in SIMD-only mode"); } void CGOpenMPSIMDRuntime::emitDoacrossOrdered(CodeGenFunction &CGF, const OMPDependClause *C) { llvm_unreachable("Not supported in SIMD-only mode"); } const VarDecl * CGOpenMPSIMDRuntime::translateParameter(const FieldDecl *FD, const VarDecl *NativeParam) const { llvm_unreachable("Not supported in SIMD-only mode"); } Address CGOpenMPSIMDRuntime::getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam, const VarDecl *TargetParam) const { llvm_unreachable("Not supported in SIMD-only mode"); } diff --git a/clang/lib/Format/TokenAnnotator.cpp b/clang/lib/Format/TokenAnnotator.cpp index 54e6c7d38e7d..11dc661abc24 100644 --- a/clang/lib/Format/TokenAnnotator.cpp +++ b/clang/lib/Format/TokenAnnotator.cpp @@ -1,4291 +1,4298 @@ //===--- TokenAnnotator.cpp - Format C++ code -----------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// /// \file /// This file implements a token annotator, i.e. creates /// \c AnnotatedTokens out of \c FormatTokens with required extra information. /// //===----------------------------------------------------------------------===// #include "TokenAnnotator.h" #include "FormatToken.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/TokenKinds.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/Support/Debug.h" #define DEBUG_TYPE "format-token-annotator" namespace clang { namespace format { namespace { /// Returns \c true if the token can be used as an identifier in /// an Objective-C \c \@selector, \c false otherwise. /// /// Because getFormattingLangOpts() always lexes source code as /// Objective-C++, C++ keywords like \c new and \c delete are /// lexed as tok::kw_*, not tok::identifier, even for Objective-C. /// /// For Objective-C and Objective-C++, both identifiers and keywords /// are valid inside @selector(...) (or a macro which /// invokes @selector(...)). So, we allow treat any identifier or /// keyword as a potential Objective-C selector component. static bool canBeObjCSelectorComponent(const FormatToken &Tok) { return Tok.Tok.getIdentifierInfo() != nullptr; } /// With `Left` being '(', check if we're at either `[...](` or /// `[...]<...>(`, where the [ opens a lambda capture list. static bool isLambdaParameterList(const FormatToken *Left) { // Skip <...> if present. if (Left->Previous && Left->Previous->is(tok::greater) && Left->Previous->MatchingParen && Left->Previous->MatchingParen->is(TT_TemplateOpener)) Left = Left->Previous->MatchingParen; // Check for `[...]`. return Left->Previous && Left->Previous->is(tok::r_square) && Left->Previous->MatchingParen && Left->Previous->MatchingParen->is(TT_LambdaLSquare); } /// Returns \c true if the token is followed by a boolean condition, \c false /// otherwise. static bool isKeywordWithCondition(const FormatToken &Tok) { return Tok.isOneOf(tok::kw_if, tok::kw_for, tok::kw_while, tok::kw_switch, tok::kw_constexpr, tok::kw_catch); } /// A parser that gathers additional information about tokens. /// /// The \c TokenAnnotator tries to match parenthesis and square brakets and /// store a parenthesis levels. It also tries to resolve matching "<" and ">" /// into template parameter lists. class AnnotatingParser { public: AnnotatingParser(const FormatStyle &Style, AnnotatedLine &Line, const AdditionalKeywords &Keywords) : Style(Style), Line(Line), CurrentToken(Line.First), AutoFound(false), Keywords(Keywords) { Contexts.push_back(Context(tok::unknown, 1, /*IsExpression=*/false)); resetTokenMetadata(CurrentToken); } private: bool parseAngle() { if (!CurrentToken || !CurrentToken->Previous) return false; if (NonTemplateLess.count(CurrentToken->Previous)) return false; const FormatToken &Previous = *CurrentToken->Previous; // The '<'. if (Previous.Previous) { if (Previous.Previous->Tok.isLiteral()) return false; if (Previous.Previous->is(tok::r_paren) && Contexts.size() > 1 && (!Previous.Previous->MatchingParen || !Previous.Previous->MatchingParen->is(TT_OverloadedOperatorLParen))) return false; } FormatToken *Left = CurrentToken->Previous; Left->ParentBracket = Contexts.back().ContextKind; ScopedContextCreator ContextCreator(*this, tok::less, 12); // If this angle is in the context of an expression, we need to be more // hesitant to detect it as opening template parameters. bool InExprContext = Contexts.back().IsExpression; Contexts.back().IsExpression = false; // If there's a template keyword before the opening angle bracket, this is a // template parameter, not an argument. Contexts.back().InTemplateArgument = Left->Previous && Left->Previous->Tok.isNot(tok::kw_template); if (Style.Language == FormatStyle::LK_Java && CurrentToken->is(tok::question)) next(); while (CurrentToken) { if (CurrentToken->is(tok::greater)) { // Try to do a better job at looking for ">>" within the condition of // a statement. Conservatively insert spaces between consecutive ">" // tokens to prevent splitting right bitshift operators and potentially // altering program semantics. This check is overly conservative and // will prevent spaces from being inserted in select nested template // parameter cases, but should not alter program semantics. if (CurrentToken->Next && CurrentToken->Next->is(tok::greater) && Left->ParentBracket != tok::less && (isKeywordWithCondition(*Line.First) || CurrentToken->getStartOfNonWhitespace() == CurrentToken->Next->getStartOfNonWhitespace().getLocWithOffset( -1))) return false; Left->MatchingParen = CurrentToken; CurrentToken->MatchingParen = Left; // In TT_Proto, we must distignuish between: // map // msg < item: data > // msg: < item: data > // In TT_TextProto, map does not occur. if (Style.Language == FormatStyle::LK_TextProto || (Style.Language == FormatStyle::LK_Proto && Left->Previous && Left->Previous->isOneOf(TT_SelectorName, TT_DictLiteral))) CurrentToken->setType(TT_DictLiteral); else CurrentToken->setType(TT_TemplateCloser); next(); return true; } if (CurrentToken->is(tok::question) && Style.Language == FormatStyle::LK_Java) { next(); continue; } if (CurrentToken->isOneOf(tok::r_paren, tok::r_square, tok::r_brace) || (CurrentToken->isOneOf(tok::colon, tok::question) && InExprContext && !Style.isCSharp() && Style.Language != FormatStyle::LK_Proto && Style.Language != FormatStyle::LK_TextProto)) return false; // If a && or || is found and interpreted as a binary operator, this set // of angles is likely part of something like "a < b && c > d". If the // angles are inside an expression, the ||/&& might also be a binary // operator that was misinterpreted because we are parsing template // parameters. // FIXME: This is getting out of hand, write a decent parser. if (CurrentToken->Previous->isOneOf(tok::pipepipe, tok::ampamp) && CurrentToken->Previous->is(TT_BinaryOperator) && Contexts[Contexts.size() - 2].IsExpression && !Line.startsWith(tok::kw_template)) return false; updateParameterCount(Left, CurrentToken); if (Style.Language == FormatStyle::LK_Proto) { if (FormatToken *Previous = CurrentToken->getPreviousNonComment()) { if (CurrentToken->is(tok::colon) || (CurrentToken->isOneOf(tok::l_brace, tok::less) && Previous->isNot(tok::colon))) Previous->setType(TT_SelectorName); } } if (!consumeToken()) return false; } return false; } bool parseUntouchableParens() { while (CurrentToken) { CurrentToken->Finalized = true; switch (CurrentToken->Tok.getKind()) { case tok::l_paren: next(); if (!parseUntouchableParens()) return false; continue; case tok::r_paren: next(); return true; default: // no-op break; } next(); } return false; } bool parseParens(bool LookForDecls = false) { if (!CurrentToken) return false; FormatToken *Left = CurrentToken->Previous; assert(Left && "Unknown previous token"); FormatToken *PrevNonComment = Left->getPreviousNonComment(); Left->ParentBracket = Contexts.back().ContextKind; ScopedContextCreator ContextCreator(*this, tok::l_paren, 1); // FIXME: This is a bit of a hack. Do better. Contexts.back().ColonIsForRangeExpr = Contexts.size() == 2 && Contexts[0].ColonIsForRangeExpr; if (Left->Previous && Left->Previous->is(TT_UntouchableMacroFunc)) { Left->Finalized = true; return parseUntouchableParens(); } bool StartsObjCMethodExpr = false; if (FormatToken *MaybeSel = Left->Previous) { // @selector( starts a selector. if (MaybeSel->isObjCAtKeyword(tok::objc_selector) && MaybeSel->Previous && MaybeSel->Previous->is(tok::at)) { StartsObjCMethodExpr = true; } } if (Left->is(TT_OverloadedOperatorLParen)) { // Find the previous kw_operator token. FormatToken *Prev = Left; while (!Prev->is(tok::kw_operator)) { Prev = Prev->Previous; assert(Prev && "Expect a kw_operator prior to the OperatorLParen!"); } // If faced with "a.operator*(argument)" or "a->operator*(argument)", // i.e. the operator is called as a member function, // then the argument must be an expression. bool OperatorCalledAsMemberFunction = Prev->Previous && Prev->Previous->isOneOf(tok::period, tok::arrow); Contexts.back().IsExpression = OperatorCalledAsMemberFunction; } else if (Style.Language == FormatStyle::LK_JavaScript && (Line.startsWith(Keywords.kw_type, tok::identifier) || Line.startsWith(tok::kw_export, Keywords.kw_type, tok::identifier))) { // type X = (...); // export type X = (...); Contexts.back().IsExpression = false; } else if (Left->Previous && (Left->Previous->isOneOf(tok::kw_static_assert, tok::kw_while, tok::l_paren, tok::comma) || Left->Previous->isIf() || Left->Previous->is(TT_BinaryOperator))) { // static_assert, if and while usually contain expressions. Contexts.back().IsExpression = true; } else if (Style.Language == FormatStyle::LK_JavaScript && Left->Previous && (Left->Previous->is(Keywords.kw_function) || (Left->Previous->endsSequence(tok::identifier, Keywords.kw_function)))) { // function(...) or function f(...) Contexts.back().IsExpression = false; } else if (Style.Language == FormatStyle::LK_JavaScript && Left->Previous && Left->Previous->is(TT_JsTypeColon)) { // let x: (SomeType); Contexts.back().IsExpression = false; } else if (isLambdaParameterList(Left)) { // This is a parameter list of a lambda expression. Contexts.back().IsExpression = false; } else if (Line.InPPDirective && (!Left->Previous || !Left->Previous->is(tok::identifier))) { Contexts.back().IsExpression = true; } else if (Contexts[Contexts.size() - 2].CaretFound) { // This is the parameter list of an ObjC block. Contexts.back().IsExpression = false; } else if (Left->Previous && Left->Previous->is(TT_ForEachMacro)) { // The first argument to a foreach macro is a declaration. Contexts.back().IsForEachMacro = true; Contexts.back().IsExpression = false; } else if (Left->Previous && Left->Previous->MatchingParen && Left->Previous->MatchingParen->is(TT_ObjCBlockLParen)) { Contexts.back().IsExpression = false; } else if (!Line.MustBeDeclaration && !Line.InPPDirective) { bool IsForOrCatch = Left->Previous && Left->Previous->isOneOf(tok::kw_for, tok::kw_catch); Contexts.back().IsExpression = !IsForOrCatch; } // Infer the role of the l_paren based on the previous token if we haven't // detected one one yet. if (PrevNonComment && Left->is(TT_Unknown)) { if (PrevNonComment->is(tok::kw___attribute)) { Left->setType(TT_AttributeParen); } else if (PrevNonComment->isOneOf(TT_TypenameMacro, tok::kw_decltype, tok::kw_typeof, tok::kw__Atomic, tok::kw___underlying_type)) { Left->setType(TT_TypeDeclarationParen); // decltype() and typeof() usually contain expressions. if (PrevNonComment->isOneOf(tok::kw_decltype, tok::kw_typeof)) Contexts.back().IsExpression = true; } } if (StartsObjCMethodExpr) { Contexts.back().ColonIsObjCMethodExpr = true; Left->setType(TT_ObjCMethodExpr); } // MightBeFunctionType and ProbablyFunctionType are used for // function pointer and reference types as well as Objective-C // block types: // // void (*FunctionPointer)(void); // void (&FunctionReference)(void); // void (^ObjCBlock)(void); bool MightBeFunctionType = !Contexts[Contexts.size() - 2].IsExpression; bool ProbablyFunctionType = CurrentToken->isOneOf(tok::star, tok::amp, tok::caret); bool HasMultipleLines = false; bool HasMultipleParametersOnALine = false; bool MightBeObjCForRangeLoop = Left->Previous && Left->Previous->is(tok::kw_for); FormatToken *PossibleObjCForInToken = nullptr; while (CurrentToken) { // LookForDecls is set when "if (" has been seen. Check for // 'identifier' '*' 'identifier' followed by not '=' -- this // '*' has to be a binary operator but determineStarAmpUsage() will // categorize it as an unary operator, so set the right type here. if (LookForDecls && CurrentToken->Next) { FormatToken *Prev = CurrentToken->getPreviousNonComment(); if (Prev) { FormatToken *PrevPrev = Prev->getPreviousNonComment(); FormatToken *Next = CurrentToken->Next; if (PrevPrev && PrevPrev->is(tok::identifier) && Prev->isOneOf(tok::star, tok::amp, tok::ampamp) && CurrentToken->is(tok::identifier) && Next->isNot(tok::equal)) { Prev->setType(TT_BinaryOperator); LookForDecls = false; } } } if (CurrentToken->Previous->is(TT_PointerOrReference) && CurrentToken->Previous->Previous->isOneOf(tok::l_paren, tok::coloncolon)) ProbablyFunctionType = true; if (CurrentToken->is(tok::comma)) MightBeFunctionType = false; if (CurrentToken->Previous->is(TT_BinaryOperator)) Contexts.back().IsExpression = true; if (CurrentToken->is(tok::r_paren)) { if (MightBeFunctionType && ProbablyFunctionType && CurrentToken->Next && (CurrentToken->Next->is(tok::l_paren) || (CurrentToken->Next->is(tok::l_square) && Line.MustBeDeclaration))) Left->setType(Left->Next->is(tok::caret) ? TT_ObjCBlockLParen : TT_FunctionTypeLParen); Left->MatchingParen = CurrentToken; CurrentToken->MatchingParen = Left; if (CurrentToken->Next && CurrentToken->Next->is(tok::l_brace) && Left->Previous && Left->Previous->is(tok::l_paren)) { // Detect the case where macros are used to generate lambdas or // function bodies, e.g.: // auto my_lambda = MACRO((Type *type, int i) { .. body .. }); for (FormatToken *Tok = Left; Tok != CurrentToken; Tok = Tok->Next) { if (Tok->is(TT_BinaryOperator) && Tok->isOneOf(tok::star, tok::amp, tok::ampamp)) Tok->setType(TT_PointerOrReference); } } if (StartsObjCMethodExpr) { CurrentToken->setType(TT_ObjCMethodExpr); if (Contexts.back().FirstObjCSelectorName) { Contexts.back().FirstObjCSelectorName->LongestObjCSelectorName = Contexts.back().LongestObjCSelectorName; } } if (Left->is(TT_AttributeParen)) CurrentToken->setType(TT_AttributeParen); if (Left->is(TT_TypeDeclarationParen)) CurrentToken->setType(TT_TypeDeclarationParen); if (Left->Previous && Left->Previous->is(TT_JavaAnnotation)) CurrentToken->setType(TT_JavaAnnotation); if (Left->Previous && Left->Previous->is(TT_LeadingJavaAnnotation)) CurrentToken->setType(TT_LeadingJavaAnnotation); if (Left->Previous && Left->Previous->is(TT_AttributeSquare)) CurrentToken->setType(TT_AttributeSquare); if (!HasMultipleLines) Left->setPackingKind(PPK_Inconclusive); else if (HasMultipleParametersOnALine) Left->setPackingKind(PPK_BinPacked); else Left->setPackingKind(PPK_OnePerLine); next(); return true; } if (CurrentToken->isOneOf(tok::r_square, tok::r_brace)) return false; if (CurrentToken->is(tok::l_brace)) Left->setType(TT_Unknown); // Not TT_ObjCBlockLParen if (CurrentToken->is(tok::comma) && CurrentToken->Next && !CurrentToken->Next->HasUnescapedNewline && !CurrentToken->Next->isTrailingComment()) HasMultipleParametersOnALine = true; bool ProbablyFunctionTypeLParen = (CurrentToken->is(tok::l_paren) && CurrentToken->Next && CurrentToken->Next->isOneOf(tok::star, tok::amp, tok::caret)); if ((CurrentToken->Previous->isOneOf(tok::kw_const, tok::kw_auto) || CurrentToken->Previous->isSimpleTypeSpecifier()) && !(CurrentToken->is(tok::l_brace) || (CurrentToken->is(tok::l_paren) && !ProbablyFunctionTypeLParen))) Contexts.back().IsExpression = false; if (CurrentToken->isOneOf(tok::semi, tok::colon)) { MightBeObjCForRangeLoop = false; if (PossibleObjCForInToken) { PossibleObjCForInToken->setType(TT_Unknown); PossibleObjCForInToken = nullptr; } } if (MightBeObjCForRangeLoop && CurrentToken->is(Keywords.kw_in)) { PossibleObjCForInToken = CurrentToken; PossibleObjCForInToken->setType(TT_ObjCForIn); } // When we discover a 'new', we set CanBeExpression to 'false' in order to // parse the type correctly. Reset that after a comma. if (CurrentToken->is(tok::comma)) Contexts.back().CanBeExpression = true; FormatToken *Tok = CurrentToken; if (!consumeToken()) return false; updateParameterCount(Left, Tok); if (CurrentToken && CurrentToken->HasUnescapedNewline) HasMultipleLines = true; } return false; } bool isCSharpAttributeSpecifier(const FormatToken &Tok) { if (!Style.isCSharp()) return false; // `identifier[i]` is not an attribute. if (Tok.Previous && Tok.Previous->is(tok::identifier)) return false; // Chains of [] in `identifier[i][j][k]` are not attributes. if (Tok.Previous && Tok.Previous->is(tok::r_square)) { auto *MatchingParen = Tok.Previous->MatchingParen; if (!MatchingParen || MatchingParen->is(TT_ArraySubscriptLSquare)) return false; } const FormatToken *AttrTok = Tok.Next; if (!AttrTok) return false; // Just an empty declaration e.g. string []. if (AttrTok->is(tok::r_square)) return false; // Move along the tokens inbetween the '[' and ']' e.g. [STAThread]. while (AttrTok && AttrTok->isNot(tok::r_square)) { AttrTok = AttrTok->Next; } if (!AttrTok) return false; // Allow an attribute to be the only content of a file. AttrTok = AttrTok->Next; if (!AttrTok) return true; // Limit this to being an access modifier that follows. if (AttrTok->isOneOf(tok::kw_public, tok::kw_private, tok::kw_protected, tok::comment, tok::kw_class, tok::kw_static, tok::l_square, Keywords.kw_internal)) { return true; } // incase its a [XXX] retval func(.... if (AttrTok->Next && AttrTok->Next->startsSequence(tok::identifier, tok::l_paren)) return true; return false; } bool isCpp11AttributeSpecifier(const FormatToken &Tok) { if (!Style.isCpp() || !Tok.startsSequence(tok::l_square, tok::l_square)) return false; // The first square bracket is part of an ObjC array literal if (Tok.Previous && Tok.Previous->is(tok::at)) { return false; } const FormatToken *AttrTok = Tok.Next->Next; if (!AttrTok) return false; // C++17 '[[using ns: foo, bar(baz, blech)]]' // We assume nobody will name an ObjC variable 'using'. if (AttrTok->startsSequence(tok::kw_using, tok::identifier, tok::colon)) return true; if (AttrTok->isNot(tok::identifier)) return false; while (AttrTok && !AttrTok->startsSequence(tok::r_square, tok::r_square)) { // ObjC message send. We assume nobody will use : in a C++11 attribute // specifier parameter, although this is technically valid: // [[foo(:)]]. if (AttrTok->is(tok::colon) || AttrTok->startsSequence(tok::identifier, tok::identifier) || AttrTok->startsSequence(tok::r_paren, tok::identifier)) return false; if (AttrTok->is(tok::ellipsis)) return true; AttrTok = AttrTok->Next; } return AttrTok && AttrTok->startsSequence(tok::r_square, tok::r_square); } bool parseSquare() { if (!CurrentToken) return false; // A '[' could be an index subscript (after an identifier or after // ')' or ']'), it could be the start of an Objective-C method // expression, it could the start of an Objective-C array literal, // or it could be a C++ attribute specifier [[foo::bar]]. FormatToken *Left = CurrentToken->Previous; Left->ParentBracket = Contexts.back().ContextKind; FormatToken *Parent = Left->getPreviousNonComment(); // Cases where '>' is followed by '['. // In C++, this can happen either in array of templates (foo[10]) // or when array is a nested template type (unique_ptr[]>). bool CppArrayTemplates = Style.isCpp() && Parent && Parent->is(TT_TemplateCloser) && (Contexts.back().CanBeExpression || Contexts.back().IsExpression || Contexts.back().InTemplateArgument); bool IsCpp11AttributeSpecifier = isCpp11AttributeSpecifier(*Left) || Contexts.back().InCpp11AttributeSpecifier; // Treat C# Attributes [STAThread] much like C++ attributes [[...]]. bool IsCSharpAttributeSpecifier = isCSharpAttributeSpecifier(*Left) || Contexts.back().InCSharpAttributeSpecifier; bool InsideInlineASM = Line.startsWith(tok::kw_asm); bool IsCppStructuredBinding = Left->isCppStructuredBinding(Style); bool StartsObjCMethodExpr = !IsCppStructuredBinding && !InsideInlineASM && !CppArrayTemplates && Style.isCpp() && !IsCpp11AttributeSpecifier && !IsCSharpAttributeSpecifier && Contexts.back().CanBeExpression && Left->isNot(TT_LambdaLSquare) && !CurrentToken->isOneOf(tok::l_brace, tok::r_square) && (!Parent || Parent->isOneOf(tok::colon, tok::l_square, tok::l_paren, tok::kw_return, tok::kw_throw) || Parent->isUnaryOperator() || // FIXME(bug 36976): ObjC return types shouldn't use TT_CastRParen. Parent->isOneOf(TT_ObjCForIn, TT_CastRParen) || (getBinOpPrecedence(Parent->Tok.getKind(), true, true) > prec::Unknown)); bool ColonFound = false; unsigned BindingIncrease = 1; if (IsCppStructuredBinding) { Left->setType(TT_StructuredBindingLSquare); } else if (Left->is(TT_Unknown)) { if (StartsObjCMethodExpr) { Left->setType(TT_ObjCMethodExpr); } else if (InsideInlineASM) { Left->setType(TT_InlineASMSymbolicNameLSquare); } else if (IsCpp11AttributeSpecifier) { Left->setType(TT_AttributeSquare); } else if (Style.Language == FormatStyle::LK_JavaScript && Parent && Contexts.back().ContextKind == tok::l_brace && Parent->isOneOf(tok::l_brace, tok::comma)) { Left->setType(TT_JsComputedPropertyName); } else if (Style.isCpp() && Contexts.back().ContextKind == tok::l_brace && Parent && Parent->isOneOf(tok::l_brace, tok::comma)) { Left->setType(TT_DesignatedInitializerLSquare); } else if (IsCSharpAttributeSpecifier) { Left->setType(TT_AttributeSquare); } else if (CurrentToken->is(tok::r_square) && Parent && Parent->is(TT_TemplateCloser)) { Left->setType(TT_ArraySubscriptLSquare); } else if (Style.Language == FormatStyle::LK_Proto || Style.Language == FormatStyle::LK_TextProto) { // Square braces in LK_Proto can either be message field attributes: // // optional Aaa aaa = 1 [ // (aaa) = aaa // ]; // // extensions 123 [ // (aaa) = aaa // ]; // // or text proto extensions (in options): // // option (Aaa.options) = { // [type.type/type] { // key: value // } // } // // or repeated fields (in options): // // option (Aaa.options) = { // keys: [ 1, 2, 3 ] // } // // In the first and the third case we want to spread the contents inside // the square braces; in the second we want to keep them inline. Left->setType(TT_ArrayInitializerLSquare); if (!Left->endsSequence(tok::l_square, tok::numeric_constant, tok::equal) && !Left->endsSequence(tok::l_square, tok::numeric_constant, tok::identifier) && !Left->endsSequence(tok::l_square, tok::colon, TT_SelectorName)) { Left->setType(TT_ProtoExtensionLSquare); BindingIncrease = 10; } } else if (!CppArrayTemplates && Parent && Parent->isOneOf(TT_BinaryOperator, TT_TemplateCloser, tok::at, tok::comma, tok::l_paren, tok::l_square, tok::question, tok::colon, tok::kw_return, // Should only be relevant to JavaScript: tok::kw_default)) { Left->setType(TT_ArrayInitializerLSquare); } else { BindingIncrease = 10; Left->setType(TT_ArraySubscriptLSquare); } } ScopedContextCreator ContextCreator(*this, tok::l_square, BindingIncrease); Contexts.back().IsExpression = true; if (Style.Language == FormatStyle::LK_JavaScript && Parent && Parent->is(TT_JsTypeColon)) Contexts.back().IsExpression = false; Contexts.back().ColonIsObjCMethodExpr = StartsObjCMethodExpr; Contexts.back().InCpp11AttributeSpecifier = IsCpp11AttributeSpecifier; Contexts.back().InCSharpAttributeSpecifier = IsCSharpAttributeSpecifier; while (CurrentToken) { if (CurrentToken->is(tok::r_square)) { if (IsCpp11AttributeSpecifier) CurrentToken->setType(TT_AttributeSquare); if (IsCSharpAttributeSpecifier) CurrentToken->setType(TT_AttributeSquare); else if (((CurrentToken->Next && CurrentToken->Next->is(tok::l_paren)) || (CurrentToken->Previous && CurrentToken->Previous->Previous == Left)) && Left->is(TT_ObjCMethodExpr)) { // An ObjC method call is rarely followed by an open parenthesis. It // also can't be composed of just one token, unless it's a macro that // will be expanded to more tokens. // FIXME: Do we incorrectly label ":" with this? StartsObjCMethodExpr = false; Left->setType(TT_Unknown); } if (StartsObjCMethodExpr && CurrentToken->Previous != Left) { CurrentToken->setType(TT_ObjCMethodExpr); // If we haven't seen a colon yet, make sure the last identifier // before the r_square is tagged as a selector name component. if (!ColonFound && CurrentToken->Previous && CurrentToken->Previous->is(TT_Unknown) && canBeObjCSelectorComponent(*CurrentToken->Previous)) CurrentToken->Previous->setType(TT_SelectorName); // determineStarAmpUsage() thinks that '*' '[' is allocating an // array of pointers, but if '[' starts a selector then '*' is a // binary operator. if (Parent && Parent->is(TT_PointerOrReference)) Parent->setType(TT_BinaryOperator); } // An arrow after an ObjC method expression is not a lambda arrow. if (CurrentToken->getType() == TT_ObjCMethodExpr && CurrentToken->Next && CurrentToken->Next->is(TT_LambdaArrow)) CurrentToken->Next->setType(TT_Unknown); Left->MatchingParen = CurrentToken; CurrentToken->MatchingParen = Left; // FirstObjCSelectorName is set when a colon is found. This does // not work, however, when the method has no parameters. // Here, we set FirstObjCSelectorName when the end of the method call is // reached, in case it was not set already. if (!Contexts.back().FirstObjCSelectorName) { FormatToken *Previous = CurrentToken->getPreviousNonComment(); if (Previous && Previous->is(TT_SelectorName)) { Previous->ObjCSelectorNameParts = 1; Contexts.back().FirstObjCSelectorName = Previous; } } else { Left->ParameterCount = Contexts.back().FirstObjCSelectorName->ObjCSelectorNameParts; } if (Contexts.back().FirstObjCSelectorName) { Contexts.back().FirstObjCSelectorName->LongestObjCSelectorName = Contexts.back().LongestObjCSelectorName; if (Left->BlockParameterCount > 1) Contexts.back().FirstObjCSelectorName->LongestObjCSelectorName = 0; } next(); return true; } if (CurrentToken->isOneOf(tok::r_paren, tok::r_brace)) return false; if (CurrentToken->is(tok::colon)) { if (IsCpp11AttributeSpecifier && CurrentToken->endsSequence(tok::colon, tok::identifier, tok::kw_using)) { // Remember that this is a [[using ns: foo]] C++ attribute, so we // don't add a space before the colon (unlike other colons). CurrentToken->setType(TT_AttributeColon); } else if (Left->isOneOf(TT_ArraySubscriptLSquare, TT_DesignatedInitializerLSquare)) { Left->setType(TT_ObjCMethodExpr); StartsObjCMethodExpr = true; Contexts.back().ColonIsObjCMethodExpr = true; if (Parent && Parent->is(tok::r_paren)) // FIXME(bug 36976): ObjC return types shouldn't use TT_CastRParen. Parent->setType(TT_CastRParen); } ColonFound = true; } if (CurrentToken->is(tok::comma) && Left->is(TT_ObjCMethodExpr) && !ColonFound) Left->setType(TT_ArrayInitializerLSquare); FormatToken *Tok = CurrentToken; if (!consumeToken()) return false; updateParameterCount(Left, Tok); } return false; } bool couldBeInStructArrayInitializer() const { if (Contexts.size() < 2) return false; // We want to back up no more then 2 context levels i.e. // . { { <- const auto End = std::next(Contexts.rbegin(), 2); auto Last = Contexts.rbegin(); unsigned Depth = 0; for (; Last != End; ++Last) { if (Last->ContextKind == tok::l_brace) ++Depth; } return Depth == 2 && Last->ContextKind != tok::l_brace; } bool parseBrace() { if (CurrentToken) { FormatToken *Left = CurrentToken->Previous; Left->ParentBracket = Contexts.back().ContextKind; if (Contexts.back().CaretFound) Left->setType(TT_ObjCBlockLBrace); Contexts.back().CaretFound = false; ScopedContextCreator ContextCreator(*this, tok::l_brace, 1); Contexts.back().ColonIsDictLiteral = true; if (Left->is(BK_BracedInit)) Contexts.back().IsExpression = true; if (Style.Language == FormatStyle::LK_JavaScript && Left->Previous && Left->Previous->is(TT_JsTypeColon)) Contexts.back().IsExpression = false; unsigned CommaCount = 0; while (CurrentToken) { if (CurrentToken->is(tok::r_brace)) { Left->MatchingParen = CurrentToken; CurrentToken->MatchingParen = Left; if (Style.AlignArrayOfStructures != FormatStyle::AIAS_None) { if (Left->ParentBracket == tok::l_brace && couldBeInStructArrayInitializer() && CommaCount > 0) { Contexts.back().InStructArrayInitializer = true; } } next(); return true; } if (CurrentToken->isOneOf(tok::r_paren, tok::r_square)) return false; updateParameterCount(Left, CurrentToken); if (CurrentToken->isOneOf(tok::colon, tok::l_brace, tok::less)) { FormatToken *Previous = CurrentToken->getPreviousNonComment(); if (Previous->is(TT_JsTypeOptionalQuestion)) Previous = Previous->getPreviousNonComment(); if ((CurrentToken->is(tok::colon) && (!Contexts.back().ColonIsDictLiteral || !Style.isCpp())) || Style.Language == FormatStyle::LK_Proto || Style.Language == FormatStyle::LK_TextProto) { Left->setType(TT_DictLiteral); if (Previous->Tok.getIdentifierInfo() || Previous->is(tok::string_literal)) Previous->setType(TT_SelectorName); } if (CurrentToken->is(tok::colon) || Style.Language == FormatStyle::LK_JavaScript) Left->setType(TT_DictLiteral); } if (CurrentToken->is(tok::comma)) { if (Style.Language == FormatStyle::LK_JavaScript) Left->setType(TT_DictLiteral); ++CommaCount; } if (!consumeToken()) return false; } } return true; } void updateParameterCount(FormatToken *Left, FormatToken *Current) { // For ObjC methods, the number of parameters is calculated differently as // method declarations have a different structure (the parameters are not // inside a bracket scope). if (Current->is(tok::l_brace) && Current->is(BK_Block)) ++Left->BlockParameterCount; if (Current->is(tok::comma)) { ++Left->ParameterCount; if (!Left->Role) Left->Role.reset(new CommaSeparatedList(Style)); Left->Role->CommaFound(Current); } else if (Left->ParameterCount == 0 && Current->isNot(tok::comment)) { Left->ParameterCount = 1; } } bool parseConditional() { while (CurrentToken) { if (CurrentToken->is(tok::colon)) { CurrentToken->setType(TT_ConditionalExpr); next(); return true; } if (!consumeToken()) return false; } return false; } bool parseTemplateDeclaration() { if (CurrentToken && CurrentToken->is(tok::less)) { CurrentToken->setType(TT_TemplateOpener); next(); if (!parseAngle()) return false; if (CurrentToken) CurrentToken->Previous->ClosesTemplateDeclaration = true; return true; } return false; } bool consumeToken() { FormatToken *Tok = CurrentToken; next(); switch (Tok->Tok.getKind()) { case tok::plus: case tok::minus: if (!Tok->Previous && Line.MustBeDeclaration) Tok->setType(TT_ObjCMethodSpecifier); break; case tok::colon: if (!Tok->Previous) return false; // Colons from ?: are handled in parseConditional(). if (Style.Language == FormatStyle::LK_JavaScript) { if (Contexts.back().ColonIsForRangeExpr || // colon in for loop (Contexts.size() == 1 && // switch/case labels !Line.First->isOneOf(tok::kw_enum, tok::kw_case)) || Contexts.back().ContextKind == tok::l_paren || // function params Contexts.back().ContextKind == tok::l_square || // array type (!Contexts.back().IsExpression && Contexts.back().ContextKind == tok::l_brace) || // object type (Contexts.size() == 1 && Line.MustBeDeclaration)) { // method/property declaration Contexts.back().IsExpression = false; Tok->setType(TT_JsTypeColon); break; } } else if (Style.isCSharp()) { if (Contexts.back().InCSharpAttributeSpecifier) { Tok->setType(TT_AttributeColon); break; } if (Contexts.back().ContextKind == tok::l_paren) { Tok->setType(TT_CSharpNamedArgumentColon); break; } } if (Contexts.back().ColonIsDictLiteral || Style.Language == FormatStyle::LK_Proto || Style.Language == FormatStyle::LK_TextProto) { Tok->setType(TT_DictLiteral); if (Style.Language == FormatStyle::LK_TextProto) { if (FormatToken *Previous = Tok->getPreviousNonComment()) Previous->setType(TT_SelectorName); } } else if (Contexts.back().ColonIsObjCMethodExpr || Line.startsWith(TT_ObjCMethodSpecifier)) { Tok->setType(TT_ObjCMethodExpr); const FormatToken *BeforePrevious = Tok->Previous->Previous; // Ensure we tag all identifiers in method declarations as // TT_SelectorName. bool UnknownIdentifierInMethodDeclaration = Line.startsWith(TT_ObjCMethodSpecifier) && Tok->Previous->is(tok::identifier) && Tok->Previous->is(TT_Unknown); if (!BeforePrevious || // FIXME(bug 36976): ObjC return types shouldn't use TT_CastRParen. !(BeforePrevious->is(TT_CastRParen) || (BeforePrevious->is(TT_ObjCMethodExpr) && BeforePrevious->is(tok::colon))) || BeforePrevious->is(tok::r_square) || Contexts.back().LongestObjCSelectorName == 0 || UnknownIdentifierInMethodDeclaration) { Tok->Previous->setType(TT_SelectorName); if (!Contexts.back().FirstObjCSelectorName) Contexts.back().FirstObjCSelectorName = Tok->Previous; else if (Tok->Previous->ColumnWidth > Contexts.back().LongestObjCSelectorName) Contexts.back().LongestObjCSelectorName = Tok->Previous->ColumnWidth; Tok->Previous->ParameterIndex = Contexts.back().FirstObjCSelectorName->ObjCSelectorNameParts; ++Contexts.back().FirstObjCSelectorName->ObjCSelectorNameParts; } } else if (Contexts.back().ColonIsForRangeExpr) { Tok->setType(TT_RangeBasedForLoopColon); } else if (CurrentToken && CurrentToken->is(tok::numeric_constant)) { Tok->setType(TT_BitFieldColon); } else if (Contexts.size() == 1 && !Line.First->isOneOf(tok::kw_enum, tok::kw_case, tok::kw_default)) { FormatToken *Prev = Tok->getPreviousNonComment(); if (Prev->isOneOf(tok::r_paren, tok::kw_noexcept)) Tok->setType(TT_CtorInitializerColon); else if (Prev->is(tok::kw_try)) { // Member initializer list within function try block. FormatToken *PrevPrev = Prev->getPreviousNonComment(); if (PrevPrev && PrevPrev->isOneOf(tok::r_paren, tok::kw_noexcept)) Tok->setType(TT_CtorInitializerColon); } else Tok->setType(TT_InheritanceColon); } else if (canBeObjCSelectorComponent(*Tok->Previous) && Tok->Next && (Tok->Next->isOneOf(tok::r_paren, tok::comma) || (canBeObjCSelectorComponent(*Tok->Next) && Tok->Next->Next && Tok->Next->Next->is(tok::colon)))) { // This handles a special macro in ObjC code where selectors including // the colon are passed as macro arguments. Tok->setType(TT_ObjCMethodExpr); } else if (Contexts.back().ContextKind == tok::l_paren) { Tok->setType(TT_InlineASMColon); } break; case tok::pipe: case tok::amp: // | and & in declarations/type expressions represent union and // intersection types, respectively. if (Style.Language == FormatStyle::LK_JavaScript && !Contexts.back().IsExpression) Tok->setType(TT_JsTypeOperator); break; case tok::kw_if: case tok::kw_while: if (Tok->is(tok::kw_if) && CurrentToken && CurrentToken->isOneOf(tok::kw_constexpr, tok::identifier)) next(); if (CurrentToken && CurrentToken->is(tok::l_paren)) { next(); if (!parseParens(/*LookForDecls=*/true)) return false; } break; case tok::kw_for: if (Style.Language == FormatStyle::LK_JavaScript) { // x.for and {for: ...} if ((Tok->Previous && Tok->Previous->is(tok::period)) || (Tok->Next && Tok->Next->is(tok::colon))) break; // JS' for await ( ... if (CurrentToken && CurrentToken->is(Keywords.kw_await)) next(); } Contexts.back().ColonIsForRangeExpr = true; next(); if (!parseParens()) return false; break; case tok::l_paren: // When faced with 'operator()()', the kw_operator handler incorrectly // marks the first l_paren as a OverloadedOperatorLParen. Here, we make // the first two parens OverloadedOperators and the second l_paren an // OverloadedOperatorLParen. if (Tok->Previous && Tok->Previous->is(tok::r_paren) && Tok->Previous->MatchingParen && Tok->Previous->MatchingParen->is(TT_OverloadedOperatorLParen)) { Tok->Previous->setType(TT_OverloadedOperator); Tok->Previous->MatchingParen->setType(TT_OverloadedOperator); Tok->setType(TT_OverloadedOperatorLParen); } if (!parseParens()) return false; if (Line.MustBeDeclaration && Contexts.size() == 1 && !Contexts.back().IsExpression && !Line.startsWith(TT_ObjCProperty) && !Tok->is(TT_TypeDeclarationParen) && (!Tok->Previous || !Tok->Previous->isOneOf(tok::kw___attribute, TT_LeadingJavaAnnotation))) Line.MightBeFunctionDecl = true; break; case tok::l_square: if (!parseSquare()) return false; break; case tok::l_brace: if (Style.Language == FormatStyle::LK_TextProto) { FormatToken *Previous = Tok->getPreviousNonComment(); if (Previous && Previous->getType() != TT_DictLiteral) Previous->setType(TT_SelectorName); } if (!parseBrace()) return false; break; case tok::less: if (parseAngle()) { Tok->setType(TT_TemplateOpener); // In TT_Proto, we must distignuish between: // map // msg < item: data > // msg: < item: data > // In TT_TextProto, map does not occur. if (Style.Language == FormatStyle::LK_TextProto || (Style.Language == FormatStyle::LK_Proto && Tok->Previous && Tok->Previous->isOneOf(TT_SelectorName, TT_DictLiteral))) { Tok->setType(TT_DictLiteral); FormatToken *Previous = Tok->getPreviousNonComment(); if (Previous && Previous->getType() != TT_DictLiteral) Previous->setType(TT_SelectorName); } } else { Tok->setType(TT_BinaryOperator); NonTemplateLess.insert(Tok); CurrentToken = Tok; next(); } break; case tok::r_paren: case tok::r_square: return false; case tok::r_brace: // Lines can start with '}'. if (Tok->Previous) return false; break; case tok::greater: if (Style.Language != FormatStyle::LK_TextProto) Tok->setType(TT_BinaryOperator); if (Tok->Previous && Tok->Previous->is(TT_TemplateCloser)) Tok->SpacesRequiredBefore = 1; break; case tok::kw_operator: if (Style.Language == FormatStyle::LK_TextProto || Style.Language == FormatStyle::LK_Proto) break; while (CurrentToken && !CurrentToken->isOneOf(tok::l_paren, tok::semi, tok::r_paren)) { if (CurrentToken->isOneOf(tok::star, tok::amp)) CurrentToken->setType(TT_PointerOrReference); consumeToken(); if (CurrentToken && CurrentToken->is(tok::comma) && CurrentToken->Previous->isNot(tok::kw_operator)) break; if (CurrentToken && CurrentToken->Previous->isOneOf( TT_BinaryOperator, TT_UnaryOperator, tok::comma, tok::star, tok::arrow, tok::amp, tok::ampamp)) CurrentToken->Previous->setType(TT_OverloadedOperator); } if (CurrentToken && CurrentToken->is(tok::l_paren)) CurrentToken->setType(TT_OverloadedOperatorLParen); if (CurrentToken && CurrentToken->Previous->is(TT_BinaryOperator)) CurrentToken->Previous->setType(TT_OverloadedOperator); break; case tok::question: if (Style.Language == FormatStyle::LK_JavaScript && Tok->Next && Tok->Next->isOneOf(tok::semi, tok::comma, tok::colon, tok::r_paren, tok::r_brace)) { // Question marks before semicolons, colons, etc. indicate optional // types (fields, parameters), e.g. // function(x?: string, y?) {...} // class X { y?; } Tok->setType(TT_JsTypeOptionalQuestion); break; } // Declarations cannot be conditional expressions, this can only be part // of a type declaration. if (Line.MustBeDeclaration && !Contexts.back().IsExpression && Style.Language == FormatStyle::LK_JavaScript) break; if (Style.isCSharp()) { // `Type?)`, `Type?>`, `Type? name;` and `Type? name =` can only be // nullable types. // Line.MustBeDeclaration will be true for `Type? name;`. if ((!Contexts.back().IsExpression && Line.MustBeDeclaration) || (Tok->Next && Tok->Next->isOneOf(tok::r_paren, tok::greater)) || (Tok->Next && Tok->Next->is(tok::identifier) && Tok->Next->Next && Tok->Next->Next->is(tok::equal))) { Tok->setType(TT_CSharpNullable); break; } } parseConditional(); break; case tok::kw_template: parseTemplateDeclaration(); break; case tok::comma: if (Contexts.back().InCtorInitializer) Tok->setType(TT_CtorInitializerComma); else if (Contexts.back().InInheritanceList) Tok->setType(TT_InheritanceComma); else if (Contexts.back().FirstStartOfName && (Contexts.size() == 1 || Line.startsWith(tok::kw_for))) { Contexts.back().FirstStartOfName->PartOfMultiVariableDeclStmt = true; Line.IsMultiVariableDeclStmt = true; } if (Contexts.back().IsForEachMacro) Contexts.back().IsExpression = true; break; case tok::identifier: if (Tok->isOneOf(Keywords.kw___has_include, Keywords.kw___has_include_next)) { parseHasInclude(); } if (Style.isCSharp() && Tok->is(Keywords.kw_where) && Tok->Next && Tok->Next->isNot(tok::l_paren)) { Tok->setType(TT_CSharpGenericTypeConstraint); parseCSharpGenericTypeConstraint(); } break; default: break; } return true; } void parseCSharpGenericTypeConstraint() { int OpenAngleBracketsCount = 0; while (CurrentToken) { if (CurrentToken->is(tok::less)) { // parseAngle is too greedy and will consume the whole line. CurrentToken->setType(TT_TemplateOpener); ++OpenAngleBracketsCount; next(); } else if (CurrentToken->is(tok::greater)) { CurrentToken->setType(TT_TemplateCloser); --OpenAngleBracketsCount; next(); } else if (CurrentToken->is(tok::comma) && OpenAngleBracketsCount == 0) { // We allow line breaks after GenericTypeConstraintComma's // so do not flag commas in Generics as GenericTypeConstraintComma's. CurrentToken->setType(TT_CSharpGenericTypeConstraintComma); next(); } else if (CurrentToken->is(Keywords.kw_where)) { CurrentToken->setType(TT_CSharpGenericTypeConstraint); next(); } else if (CurrentToken->is(tok::colon)) { CurrentToken->setType(TT_CSharpGenericTypeConstraintColon); next(); } else { next(); } } } void parseIncludeDirective() { if (CurrentToken && CurrentToken->is(tok::less)) { next(); while (CurrentToken) { // Mark tokens up to the trailing line comments as implicit string // literals. if (CurrentToken->isNot(tok::comment) && !CurrentToken->TokenText.startswith("//")) CurrentToken->setType(TT_ImplicitStringLiteral); next(); } } } void parseWarningOrError() { next(); // We still want to format the whitespace left of the first token of the // warning or error. next(); while (CurrentToken) { CurrentToken->setType(TT_ImplicitStringLiteral); next(); } } void parsePragma() { next(); // Consume "pragma". if (CurrentToken && CurrentToken->isOneOf(Keywords.kw_mark, Keywords.kw_option)) { bool IsMark = CurrentToken->is(Keywords.kw_mark); next(); // Consume "mark". next(); // Consume first token (so we fix leading whitespace). while (CurrentToken) { if (IsMark || CurrentToken->Previous->is(TT_BinaryOperator)) CurrentToken->setType(TT_ImplicitStringLiteral); next(); } } } void parseHasInclude() { if (!CurrentToken || !CurrentToken->is(tok::l_paren)) return; next(); // '(' parseIncludeDirective(); next(); // ')' } LineType parsePreprocessorDirective() { bool IsFirstToken = CurrentToken->IsFirst; LineType Type = LT_PreprocessorDirective; next(); if (!CurrentToken) return Type; if (Style.Language == FormatStyle::LK_JavaScript && IsFirstToken) { // JavaScript files can contain shebang lines of the form: // #!/usr/bin/env node // Treat these like C++ #include directives. while (CurrentToken) { // Tokens cannot be comments here. CurrentToken->setType(TT_ImplicitStringLiteral); next(); } return LT_ImportStatement; } if (CurrentToken->Tok.is(tok::numeric_constant)) { CurrentToken->SpacesRequiredBefore = 1; return Type; } // Hashes in the middle of a line can lead to any strange token // sequence. if (!CurrentToken->Tok.getIdentifierInfo()) return Type; switch (CurrentToken->Tok.getIdentifierInfo()->getPPKeywordID()) { case tok::pp_include: case tok::pp_include_next: case tok::pp_import: next(); parseIncludeDirective(); Type = LT_ImportStatement; break; case tok::pp_error: case tok::pp_warning: parseWarningOrError(); break; case tok::pp_pragma: parsePragma(); break; case tok::pp_if: case tok::pp_elif: Contexts.back().IsExpression = true; next(); parseLine(); break; default: break; } while (CurrentToken) { FormatToken *Tok = CurrentToken; next(); if (Tok->is(tok::l_paren)) parseParens(); else if (Tok->isOneOf(Keywords.kw___has_include, Keywords.kw___has_include_next)) parseHasInclude(); } return Type; } public: LineType parseLine() { if (!CurrentToken) return LT_Invalid; NonTemplateLess.clear(); if (CurrentToken->is(tok::hash)) return parsePreprocessorDirective(); // Directly allow to 'import ' to support protocol buffer // definitions (github.com/google/protobuf) or missing "#" (either way we // should not break the line). IdentifierInfo *Info = CurrentToken->Tok.getIdentifierInfo(); if ((Style.Language == FormatStyle::LK_Java && CurrentToken->is(Keywords.kw_package)) || (Info && Info->getPPKeywordID() == tok::pp_import && CurrentToken->Next && CurrentToken->Next->isOneOf(tok::string_literal, tok::identifier, tok::kw_static))) { next(); parseIncludeDirective(); return LT_ImportStatement; } // If this line starts and ends in '<' and '>', respectively, it is likely // part of "#define ". if (CurrentToken->is(tok::less) && Line.Last->is(tok::greater)) { parseIncludeDirective(); return LT_ImportStatement; } // In .proto files, top-level options and package statements are very // similar to import statements and should not be line-wrapped. if (Style.Language == FormatStyle::LK_Proto && Line.Level == 0 && CurrentToken->isOneOf(Keywords.kw_option, Keywords.kw_package)) { next(); if (CurrentToken && CurrentToken->is(tok::identifier)) { while (CurrentToken) next(); return LT_ImportStatement; } } bool KeywordVirtualFound = false; bool ImportStatement = false; // import {...} from '...'; if (Style.Language == FormatStyle::LK_JavaScript && CurrentToken->is(Keywords.kw_import)) ImportStatement = true; while (CurrentToken) { if (CurrentToken->is(tok::kw_virtual)) KeywordVirtualFound = true; if (Style.Language == FormatStyle::LK_JavaScript) { // export {...} from '...'; // An export followed by "from 'some string';" is a re-export from // another module identified by a URI and is treated as a // LT_ImportStatement (i.e. prevent wraps on it for long URIs). // Just "export {...};" or "export class ..." should not be treated as // an import in this sense. if (Line.First->is(tok::kw_export) && CurrentToken->is(Keywords.kw_from) && CurrentToken->Next && CurrentToken->Next->isStringLiteral()) ImportStatement = true; if (isClosureImportStatement(*CurrentToken)) ImportStatement = true; } if (!consumeToken()) return LT_Invalid; } if (KeywordVirtualFound) return LT_VirtualFunctionDecl; if (ImportStatement) return LT_ImportStatement; if (Line.startsWith(TT_ObjCMethodSpecifier)) { if (Contexts.back().FirstObjCSelectorName) Contexts.back().FirstObjCSelectorName->LongestObjCSelectorName = Contexts.back().LongestObjCSelectorName; return LT_ObjCMethodDecl; } for (const auto &ctx : Contexts) { if (ctx.InStructArrayInitializer) { return LT_ArrayOfStructInitializer; } } return LT_Other; } private: bool isClosureImportStatement(const FormatToken &Tok) { // FIXME: Closure-library specific stuff should not be hard-coded but be // configurable. return Tok.TokenText == "goog" && Tok.Next && Tok.Next->is(tok::period) && Tok.Next->Next && (Tok.Next->Next->TokenText == "module" || Tok.Next->Next->TokenText == "provide" || Tok.Next->Next->TokenText == "require" || Tok.Next->Next->TokenText == "requireType" || Tok.Next->Next->TokenText == "forwardDeclare") && Tok.Next->Next->Next && Tok.Next->Next->Next->is(tok::l_paren); } void resetTokenMetadata(FormatToken *Token) { if (!Token) return; // Reset token type in case we have already looked at it and then // recovered from an error (e.g. failure to find the matching >). if (!CurrentToken->isOneOf( TT_LambdaLSquare, TT_LambdaLBrace, TT_AttributeMacro, TT_IfMacro, TT_ForEachMacro, TT_TypenameMacro, TT_FunctionLBrace, TT_ImplicitStringLiteral, TT_InlineASMBrace, TT_FatArrow, TT_LambdaArrow, TT_NamespaceMacro, TT_OverloadedOperator, TT_RegexLiteral, TT_TemplateString, TT_ObjCStringLiteral, TT_UntouchableMacroFunc, TT_ConstraintJunctions, TT_StatementAttributeLikeMacro)) CurrentToken->setType(TT_Unknown); CurrentToken->Role.reset(); CurrentToken->MatchingParen = nullptr; CurrentToken->FakeLParens.clear(); CurrentToken->FakeRParens = 0; } void next() { if (CurrentToken) { CurrentToken->NestingLevel = Contexts.size() - 1; CurrentToken->BindingStrength = Contexts.back().BindingStrength; modifyContext(*CurrentToken); determineTokenType(*CurrentToken); CurrentToken = CurrentToken->Next; } resetTokenMetadata(CurrentToken); } /// A struct to hold information valid in a specific context, e.g. /// a pair of parenthesis. struct Context { Context(tok::TokenKind ContextKind, unsigned BindingStrength, bool IsExpression) : ContextKind(ContextKind), BindingStrength(BindingStrength), IsExpression(IsExpression) {} tok::TokenKind ContextKind; unsigned BindingStrength; bool IsExpression; unsigned LongestObjCSelectorName = 0; bool ColonIsForRangeExpr = false; bool ColonIsDictLiteral = false; bool ColonIsObjCMethodExpr = false; FormatToken *FirstObjCSelectorName = nullptr; FormatToken *FirstStartOfName = nullptr; bool CanBeExpression = true; bool InTemplateArgument = false; bool InCtorInitializer = false; bool InInheritanceList = false; bool CaretFound = false; bool IsForEachMacro = false; bool InCpp11AttributeSpecifier = false; bool InCSharpAttributeSpecifier = false; bool InStructArrayInitializer = false; }; /// Puts a new \c Context onto the stack \c Contexts for the lifetime /// of each instance. struct ScopedContextCreator { AnnotatingParser &P; ScopedContextCreator(AnnotatingParser &P, tok::TokenKind ContextKind, unsigned Increase) : P(P) { P.Contexts.push_back(Context(ContextKind, P.Contexts.back().BindingStrength + Increase, P.Contexts.back().IsExpression)); } ~ScopedContextCreator() { if (P.Style.AlignArrayOfStructures != FormatStyle::AIAS_None) { if (P.Contexts.back().InStructArrayInitializer) { P.Contexts.pop_back(); P.Contexts.back().InStructArrayInitializer = true; return; } } P.Contexts.pop_back(); } }; void modifyContext(const FormatToken &Current) { if (Current.getPrecedence() == prec::Assignment && !Line.First->isOneOf(tok::kw_template, tok::kw_using, tok::kw_return) && // Type aliases use `type X = ...;` in TypeScript and can be exported // using `export type ...`. !(Style.Language == FormatStyle::LK_JavaScript && (Line.startsWith(Keywords.kw_type, tok::identifier) || Line.startsWith(tok::kw_export, Keywords.kw_type, tok::identifier))) && (!Current.Previous || Current.Previous->isNot(tok::kw_operator))) { Contexts.back().IsExpression = true; if (!Line.startsWith(TT_UnaryOperator)) { for (FormatToken *Previous = Current.Previous; Previous && Previous->Previous && !Previous->Previous->isOneOf(tok::comma, tok::semi); Previous = Previous->Previous) { if (Previous->isOneOf(tok::r_square, tok::r_paren)) { Previous = Previous->MatchingParen; if (!Previous) break; } if (Previous->opensScope()) break; if (Previous->isOneOf(TT_BinaryOperator, TT_UnaryOperator) && Previous->isOneOf(tok::star, tok::amp, tok::ampamp) && Previous->Previous && Previous->Previous->isNot(tok::equal)) Previous->setType(TT_PointerOrReference); } } } else if (Current.is(tok::lessless) && (!Current.Previous || !Current.Previous->is(tok::kw_operator))) { Contexts.back().IsExpression = true; } else if (Current.isOneOf(tok::kw_return, tok::kw_throw)) { Contexts.back().IsExpression = true; } else if (Current.is(TT_TrailingReturnArrow)) { Contexts.back().IsExpression = false; } else if (Current.is(TT_LambdaArrow) || Current.is(Keywords.kw_assert)) { Contexts.back().IsExpression = Style.Language == FormatStyle::LK_Java; } else if (Current.Previous && Current.Previous->is(TT_CtorInitializerColon)) { Contexts.back().IsExpression = true; Contexts.back().InCtorInitializer = true; } else if (Current.Previous && Current.Previous->is(TT_InheritanceColon)) { Contexts.back().InInheritanceList = true; } else if (Current.isOneOf(tok::r_paren, tok::greater, tok::comma)) { for (FormatToken *Previous = Current.Previous; Previous && Previous->isOneOf(tok::star, tok::amp); Previous = Previous->Previous) Previous->setType(TT_PointerOrReference); if (Line.MustBeDeclaration && !Contexts.front().InCtorInitializer) Contexts.back().IsExpression = false; } else if (Current.is(tok::kw_new)) { Contexts.back().CanBeExpression = false; } else if (Current.is(tok::semi) || (Current.is(tok::exclaim) && Current.Previous && !Current.Previous->is(tok::kw_operator))) { // This should be the condition or increment in a for-loop. // But not operator !() (can't use TT_OverloadedOperator here as its not // been annotated yet). Contexts.back().IsExpression = true; } } static FormatToken *untilMatchingParen(FormatToken *Current) { // Used when `MatchingParen` is not yet established. int ParenLevel = 0; while (Current) { if (Current->is(tok::l_paren)) ParenLevel++; if (Current->is(tok::r_paren)) ParenLevel--; if (ParenLevel < 1) break; Current = Current->Next; } return Current; } static bool isDeductionGuide(FormatToken &Current) { // Look for a deduction guide template A(...) -> A<...>; if (Current.Previous && Current.Previous->is(tok::r_paren) && Current.startsSequence(tok::arrow, tok::identifier, tok::less)) { // Find the TemplateCloser. FormatToken *TemplateCloser = Current.Next->Next; int NestingLevel = 0; while (TemplateCloser) { // Skip over an expressions in parens A<(3 < 2)>; if (TemplateCloser->is(tok::l_paren)) { // No Matching Paren yet so skip to matching paren TemplateCloser = untilMatchingParen(TemplateCloser); } if (TemplateCloser->is(tok::less)) NestingLevel++; if (TemplateCloser->is(tok::greater)) NestingLevel--; if (NestingLevel < 1) break; TemplateCloser = TemplateCloser->Next; } // Assuming we have found the end of the template ensure its followed // with a semi-colon. if (TemplateCloser && TemplateCloser->Next && TemplateCloser->Next->is(tok::semi) && Current.Previous->MatchingParen) { // Determine if the identifier `A` prior to the A<..>; is the same as // prior to the A(..) FormatToken *LeadingIdentifier = Current.Previous->MatchingParen->Previous; // Differentiate a deduction guide by seeing the // > of the template prior to the leading identifier. if (LeadingIdentifier) { FormatToken *PriorLeadingIdentifier = LeadingIdentifier->Previous; // Skip back past explicit decoration if (PriorLeadingIdentifier && PriorLeadingIdentifier->is(tok::kw_explicit)) PriorLeadingIdentifier = PriorLeadingIdentifier->Previous; return (PriorLeadingIdentifier && PriorLeadingIdentifier->is(TT_TemplateCloser) && LeadingIdentifier->TokenText == Current.Next->TokenText); } } } return false; } void determineTokenType(FormatToken &Current) { if (!Current.is(TT_Unknown)) // The token type is already known. return; if ((Style.Language == FormatStyle::LK_JavaScript || Style.isCSharp()) && Current.is(tok::exclaim)) { if (Current.Previous) { bool IsIdentifier = Style.Language == FormatStyle::LK_JavaScript ? Keywords.IsJavaScriptIdentifier( *Current.Previous, /* AcceptIdentifierName= */ true) : Current.Previous->is(tok::identifier); if (IsIdentifier || Current.Previous->isOneOf( tok::kw_namespace, tok::r_paren, tok::r_square, tok::r_brace, tok::kw_false, tok::kw_true, Keywords.kw_type, Keywords.kw_get, Keywords.kw_set) || Current.Previous->Tok.isLiteral()) { Current.setType(TT_NonNullAssertion); return; } } if (Current.Next && Current.Next->isOneOf(TT_BinaryOperator, Keywords.kw_as)) { Current.setType(TT_NonNullAssertion); return; } } // Line.MightBeFunctionDecl can only be true after the parentheses of a // function declaration have been found. In this case, 'Current' is a // trailing token of this declaration and thus cannot be a name. if (Current.is(Keywords.kw_instanceof)) { Current.setType(TT_BinaryOperator); } else if (isStartOfName(Current) && (!Line.MightBeFunctionDecl || Current.NestingLevel != 0)) { Contexts.back().FirstStartOfName = &Current; Current.setType(TT_StartOfName); } else if (Current.is(tok::semi)) { // Reset FirstStartOfName after finding a semicolon so that a for loop // with multiple increment statements is not confused with a for loop // having multiple variable declarations. Contexts.back().FirstStartOfName = nullptr; } else if (Current.isOneOf(tok::kw_auto, tok::kw___auto_type)) { AutoFound = true; } else if (Current.is(tok::arrow) && Style.Language == FormatStyle::LK_Java) { Current.setType(TT_LambdaArrow); } else if (Current.is(tok::arrow) && AutoFound && Line.MustBeDeclaration && Current.NestingLevel == 0 && !Current.Previous->is(tok::kw_operator)) { // not auto operator->() -> xxx; Current.setType(TT_TrailingReturnArrow); } else if (Current.is(tok::arrow) && Current.Previous && Current.Previous->is(tok::r_brace)) { // Concept implicit conversion contraint needs to be treated like // a trailing return type ... } -> . Current.setType(TT_TrailingReturnArrow); } else if (isDeductionGuide(Current)) { // Deduction guides trailing arrow " A(...) -> A;". Current.setType(TT_TrailingReturnArrow); } else if (Current.isOneOf(tok::star, tok::amp, tok::ampamp)) { Current.setType(determineStarAmpUsage( Current, Contexts.back().CanBeExpression && Contexts.back().IsExpression, Contexts.back().InTemplateArgument)); } else if (Current.isOneOf(tok::minus, tok::plus, tok::caret)) { Current.setType(determinePlusMinusCaretUsage(Current)); if (Current.is(TT_UnaryOperator) && Current.is(tok::caret)) Contexts.back().CaretFound = true; } else if (Current.isOneOf(tok::minusminus, tok::plusplus)) { Current.setType(determineIncrementUsage(Current)); } else if (Current.isOneOf(tok::exclaim, tok::tilde)) { Current.setType(TT_UnaryOperator); } else if (Current.is(tok::question)) { if (Style.Language == FormatStyle::LK_JavaScript && Line.MustBeDeclaration && !Contexts.back().IsExpression) { // In JavaScript, `interface X { foo?(): bar; }` is an optional method // on the interface, not a ternary expression. Current.setType(TT_JsTypeOptionalQuestion); } else { Current.setType(TT_ConditionalExpr); } } else if (Current.isBinaryOperator() && (!Current.Previous || Current.Previous->isNot(tok::l_square)) && (!Current.is(tok::greater) && Style.Language != FormatStyle::LK_TextProto)) { Current.setType(TT_BinaryOperator); } else if (Current.is(tok::comment)) { if (Current.TokenText.startswith("/*")) { if (Current.TokenText.endswith("*/")) Current.setType(TT_BlockComment); else // The lexer has for some reason determined a comment here. But we // cannot really handle it, if it isn't properly terminated. Current.Tok.setKind(tok::unknown); } else { Current.setType(TT_LineComment); } } else if (Current.is(tok::r_paren)) { if (rParenEndsCast(Current)) Current.setType(TT_CastRParen); if (Current.MatchingParen && Current.Next && !Current.Next->isBinaryOperator() && !Current.Next->isOneOf(tok::semi, tok::colon, tok::l_brace, tok::comma, tok::period, tok::arrow, tok::coloncolon)) if (FormatToken *AfterParen = Current.MatchingParen->Next) { // Make sure this isn't the return type of an Obj-C block declaration if (AfterParen->Tok.isNot(tok::caret)) { if (FormatToken *BeforeParen = Current.MatchingParen->Previous) if (BeforeParen->is(tok::identifier) && !BeforeParen->is(TT_TypenameMacro) && BeforeParen->TokenText == BeforeParen->TokenText.upper() && (!BeforeParen->Previous || BeforeParen->Previous->ClosesTemplateDeclaration)) Current.setType(TT_FunctionAnnotationRParen); } } } else if (Current.is(tok::at) && Current.Next && Style.Language != FormatStyle::LK_JavaScript && Style.Language != FormatStyle::LK_Java) { // In Java & JavaScript, "@..." is a decorator or annotation. In ObjC, it // marks declarations and properties that need special formatting. switch (Current.Next->Tok.getObjCKeywordID()) { case tok::objc_interface: case tok::objc_implementation: case tok::objc_protocol: Current.setType(TT_ObjCDecl); break; case tok::objc_property: Current.setType(TT_ObjCProperty); break; default: break; } } else if (Current.is(tok::period)) { FormatToken *PreviousNoComment = Current.getPreviousNonComment(); if (PreviousNoComment && PreviousNoComment->isOneOf(tok::comma, tok::l_brace)) Current.setType(TT_DesignatedInitializerPeriod); else if (Style.Language == FormatStyle::LK_Java && Current.Previous && Current.Previous->isOneOf(TT_JavaAnnotation, TT_LeadingJavaAnnotation)) { Current.setType(Current.Previous->getType()); } } else if (canBeObjCSelectorComponent(Current) && // FIXME(bug 36976): ObjC return types shouldn't use // TT_CastRParen. Current.Previous && Current.Previous->is(TT_CastRParen) && Current.Previous->MatchingParen && Current.Previous->MatchingParen->Previous && Current.Previous->MatchingParen->Previous->is( TT_ObjCMethodSpecifier)) { // This is the first part of an Objective-C selector name. (If there's no // colon after this, this is the only place which annotates the identifier // as a selector.) Current.setType(TT_SelectorName); } else if (Current.isOneOf(tok::identifier, tok::kw_const, tok::kw_noexcept, tok::kw_requires) && Current.Previous && !Current.Previous->isOneOf(tok::equal, tok::at) && Line.MightBeFunctionDecl && Contexts.size() == 1) { // Line.MightBeFunctionDecl can only be true after the parentheses of a // function declaration have been found. Current.setType(TT_TrailingAnnotation); } else if ((Style.Language == FormatStyle::LK_Java || Style.Language == FormatStyle::LK_JavaScript) && Current.Previous) { if (Current.Previous->is(tok::at) && Current.isNot(Keywords.kw_interface)) { const FormatToken &AtToken = *Current.Previous; const FormatToken *Previous = AtToken.getPreviousNonComment(); if (!Previous || Previous->is(TT_LeadingJavaAnnotation)) Current.setType(TT_LeadingJavaAnnotation); else Current.setType(TT_JavaAnnotation); } else if (Current.Previous->is(tok::period) && Current.Previous->isOneOf(TT_JavaAnnotation, TT_LeadingJavaAnnotation)) { Current.setType(Current.Previous->getType()); } } } /// Take a guess at whether \p Tok starts a name of a function or /// variable declaration. /// /// This is a heuristic based on whether \p Tok is an identifier following /// something that is likely a type. bool isStartOfName(const FormatToken &Tok) { if (Tok.isNot(tok::identifier) || !Tok.Previous) return false; if (Tok.Previous->isOneOf(TT_LeadingJavaAnnotation, Keywords.kw_instanceof, Keywords.kw_as)) return false; if (Style.Language == FormatStyle::LK_JavaScript && Tok.Previous->is(Keywords.kw_in)) return false; // Skip "const" as it does not have an influence on whether this is a name. FormatToken *PreviousNotConst = Tok.getPreviousNonComment(); while (PreviousNotConst && PreviousNotConst->is(tok::kw_const)) PreviousNotConst = PreviousNotConst->getPreviousNonComment(); if (!PreviousNotConst) return false; bool IsPPKeyword = PreviousNotConst->is(tok::identifier) && PreviousNotConst->Previous && PreviousNotConst->Previous->is(tok::hash); if (PreviousNotConst->is(TT_TemplateCloser)) return PreviousNotConst && PreviousNotConst->MatchingParen && PreviousNotConst->MatchingParen->Previous && PreviousNotConst->MatchingParen->Previous->isNot(tok::period) && PreviousNotConst->MatchingParen->Previous->isNot(tok::kw_template); if (PreviousNotConst->is(tok::r_paren) && PreviousNotConst->is(TT_TypeDeclarationParen)) return true; return (!IsPPKeyword && PreviousNotConst->isOneOf(tok::identifier, tok::kw_auto)) || PreviousNotConst->is(TT_PointerOrReference) || PreviousNotConst->isSimpleTypeSpecifier(); } /// Determine whether ')' is ending a cast. bool rParenEndsCast(const FormatToken &Tok) { // C-style casts are only used in C++, C# and Java. if (!Style.isCSharp() && !Style.isCpp() && Style.Language != FormatStyle::LK_Java) return false; // Empty parens aren't casts and there are no casts at the end of the line. if (Tok.Previous == Tok.MatchingParen || !Tok.Next || !Tok.MatchingParen) return false; FormatToken *LeftOfParens = Tok.MatchingParen->getPreviousNonComment(); if (LeftOfParens) { // If there is a closing parenthesis left of the current parentheses, // look past it as these might be chained casts. if (LeftOfParens->is(tok::r_paren)) { if (!LeftOfParens->MatchingParen || !LeftOfParens->MatchingParen->Previous) return false; LeftOfParens = LeftOfParens->MatchingParen->Previous; } // If there is an identifier (or with a few exceptions a keyword) right // before the parentheses, this is unlikely to be a cast. if (LeftOfParens->Tok.getIdentifierInfo() && !LeftOfParens->isOneOf(Keywords.kw_in, tok::kw_return, tok::kw_case, tok::kw_delete)) return false; // Certain other tokens right before the parentheses are also signals that // this cannot be a cast. if (LeftOfParens->isOneOf(tok::at, tok::r_square, TT_OverloadedOperator, TT_TemplateCloser, tok::ellipsis)) return false; } if (Tok.Next->is(tok::question)) return false; // `foreach((A a, B b) in someList)` should not be seen as a cast. if (Tok.Next->is(Keywords.kw_in) && Style.isCSharp()) return false; // Functions which end with decorations like volatile, noexcept are unlikely // to be casts. if (Tok.Next->isOneOf(tok::kw_noexcept, tok::kw_volatile, tok::kw_const, tok::kw_requires, tok::kw_throw, tok::arrow, Keywords.kw_override, Keywords.kw_final) || isCpp11AttributeSpecifier(*Tok.Next)) return false; // As Java has no function types, a "(" after the ")" likely means that this // is a cast. if (Style.Language == FormatStyle::LK_Java && Tok.Next->is(tok::l_paren)) return true; // If a (non-string) literal follows, this is likely a cast. if (Tok.Next->isNot(tok::string_literal) && (Tok.Next->Tok.isLiteral() || Tok.Next->isOneOf(tok::kw_sizeof, tok::kw_alignof))) return true; // Heuristically try to determine whether the parentheses contain a type. auto IsQualifiedPointerOrReference = [](FormatToken *T) { // This is used to handle cases such as x = (foo *const)&y; assert(!T->isSimpleTypeSpecifier() && "Should have already been checked"); // Strip trailing qualifiers such as const or volatile when checking // whether the parens could be a cast to a pointer/reference type. while (T) { if (T->is(TT_AttributeParen)) { // Handle `x = (foo *__attribute__((foo)))&v;`: if (T->MatchingParen && T->MatchingParen->Previous && T->MatchingParen->Previous->is(tok::kw___attribute)) { T = T->MatchingParen->Previous->Previous; continue; } } else if (T->is(TT_AttributeSquare)) { // Handle `x = (foo *[[clang::foo]])&v;`: if (T->MatchingParen && T->MatchingParen->Previous) { T = T->MatchingParen->Previous; continue; } } else if (T->canBePointerOrReferenceQualifier()) { T = T->Previous; continue; } break; } return T && T->is(TT_PointerOrReference); }; bool ParensAreType = !Tok.Previous || Tok.Previous->isOneOf(TT_TemplateCloser, TT_TypeDeclarationParen) || Tok.Previous->isSimpleTypeSpecifier() || IsQualifiedPointerOrReference(Tok.Previous); bool ParensCouldEndDecl = Tok.Next->isOneOf(tok::equal, tok::semi, tok::l_brace, tok::greater); if (ParensAreType && !ParensCouldEndDecl) return true; // At this point, we heuristically assume that there are no casts at the // start of the line. We assume that we have found most cases where there // are by the logic above, e.g. "(void)x;". if (!LeftOfParens) return false; // Certain token types inside the parentheses mean that this can't be a // cast. for (const FormatToken *Token = Tok.MatchingParen->Next; Token != &Tok; Token = Token->Next) if (Token->is(TT_BinaryOperator)) return false; // If the following token is an identifier or 'this', this is a cast. All // cases where this can be something else are handled above. if (Tok.Next->isOneOf(tok::identifier, tok::kw_this)) return true; // Look for a cast `( x ) (`. if (Tok.Next->is(tok::l_paren) && Tok.Previous && Tok.Previous->Previous) { if (Tok.Previous->is(tok::identifier) && Tok.Previous->Previous->is(tok::l_paren)) return true; } if (!Tok.Next->Next) return false; // If the next token after the parenthesis is a unary operator, assume // that this is cast, unless there are unexpected tokens inside the // parenthesis. bool NextIsUnary = Tok.Next->isUnaryOperator() || Tok.Next->isOneOf(tok::amp, tok::star); if (!NextIsUnary || Tok.Next->is(tok::plus) || !Tok.Next->Next->isOneOf(tok::identifier, tok::numeric_constant)) return false; // Search for unexpected tokens. for (FormatToken *Prev = Tok.Previous; Prev != Tok.MatchingParen; Prev = Prev->Previous) { if (!Prev->isOneOf(tok::kw_const, tok::identifier, tok::coloncolon)) return false; } return true; } /// Return the type of the given token assuming it is * or &. TokenType determineStarAmpUsage(const FormatToken &Tok, bool IsExpression, bool InTemplateArgument) { if (Style.Language == FormatStyle::LK_JavaScript) return TT_BinaryOperator; // && in C# must be a binary operator. if (Style.isCSharp() && Tok.is(tok::ampamp)) return TT_BinaryOperator; const FormatToken *PrevToken = Tok.getPreviousNonComment(); if (!PrevToken) return TT_UnaryOperator; const FormatToken *NextToken = Tok.getNextNonComment(); if (!NextToken || NextToken->isOneOf(tok::arrow, tok::equal, tok::kw_noexcept) || NextToken->canBePointerOrReferenceQualifier() || (NextToken->is(tok::l_brace) && !NextToken->getNextNonComment())) return TT_PointerOrReference; if (PrevToken->is(tok::coloncolon)) return TT_PointerOrReference; if (PrevToken->is(tok::r_paren) && PrevToken->is(TT_TypeDeclarationParen)) return TT_PointerOrReference; if (PrevToken->isOneOf(tok::l_paren, tok::l_square, tok::l_brace, tok::comma, tok::semi, tok::kw_return, tok::colon, tok::kw_co_return, tok::kw_co_await, tok::kw_co_yield, tok::equal, tok::kw_delete, tok::kw_sizeof, tok::kw_throw) || PrevToken->isOneOf(TT_BinaryOperator, TT_ConditionalExpr, TT_UnaryOperator, TT_CastRParen)) return TT_UnaryOperator; if (NextToken->is(tok::l_square) && NextToken->isNot(TT_LambdaLSquare)) return TT_PointerOrReference; if (NextToken->is(tok::kw_operator) && !IsExpression) return TT_PointerOrReference; if (NextToken->isOneOf(tok::comma, tok::semi)) return TT_PointerOrReference; if (PrevToken->Tok.isLiteral() || PrevToken->isOneOf(tok::r_paren, tok::r_square, tok::kw_true, tok::kw_false, tok::r_brace) || NextToken->Tok.isLiteral() || NextToken->isOneOf(tok::kw_true, tok::kw_false) || NextToken->isUnaryOperator() || // If we know we're in a template argument, there are no named // declarations. Thus, having an identifier on the right-hand side // indicates a binary operator. (InTemplateArgument && NextToken->Tok.isAnyIdentifier())) return TT_BinaryOperator; // "&&(" is quite unlikely to be two successive unary "&". if (Tok.is(tok::ampamp) && NextToken->is(tok::l_paren)) return TT_BinaryOperator; // This catches some cases where evaluation order is used as control flow: // aaa && aaa->f(); if (NextToken->Tok.isAnyIdentifier()) { const FormatToken *NextNextToken = NextToken->getNextNonComment(); if (NextNextToken && NextNextToken->is(tok::arrow)) return TT_BinaryOperator; } // It is very unlikely that we are going to find a pointer or reference type // definition on the RHS of an assignment. if (IsExpression && !Contexts.back().CaretFound) return TT_BinaryOperator; return TT_PointerOrReference; } TokenType determinePlusMinusCaretUsage(const FormatToken &Tok) { const FormatToken *PrevToken = Tok.getPreviousNonComment(); if (!PrevToken) return TT_UnaryOperator; if (PrevToken->isOneOf(TT_CastRParen, TT_UnaryOperator)) // This must be a sequence of leading unary operators. return TT_UnaryOperator; // Use heuristics to recognize unary operators. if (PrevToken->isOneOf(tok::equal, tok::l_paren, tok::comma, tok::l_square, tok::question, tok::colon, tok::kw_return, tok::kw_case, tok::at, tok::l_brace, tok::kw_throw, tok::kw_co_return, tok::kw_co_yield)) return TT_UnaryOperator; // There can't be two consecutive binary operators. if (PrevToken->is(TT_BinaryOperator)) return TT_UnaryOperator; // Fall back to marking the token as binary operator. return TT_BinaryOperator; } /// Determine whether ++/-- are pre- or post-increments/-decrements. TokenType determineIncrementUsage(const FormatToken &Tok) { const FormatToken *PrevToken = Tok.getPreviousNonComment(); if (!PrevToken || PrevToken->is(TT_CastRParen)) return TT_UnaryOperator; if (PrevToken->isOneOf(tok::r_paren, tok::r_square, tok::identifier)) return TT_TrailingUnaryOperator; return TT_UnaryOperator; } SmallVector Contexts; const FormatStyle &Style; AnnotatedLine &Line; FormatToken *CurrentToken; bool AutoFound; const AdditionalKeywords &Keywords; // Set of "<" tokens that do not open a template parameter list. If parseAngle // determines that a specific token can't be a template opener, it will make // same decision irrespective of the decisions for tokens leading up to it. // Store this information to prevent this from causing exponential runtime. llvm::SmallPtrSet NonTemplateLess; }; static const int PrecedenceUnaryOperator = prec::PointerToMember + 1; static const int PrecedenceArrowAndPeriod = prec::PointerToMember + 2; /// Parses binary expressions by inserting fake parenthesis based on /// operator precedence. class ExpressionParser { public: ExpressionParser(const FormatStyle &Style, const AdditionalKeywords &Keywords, AnnotatedLine &Line) : Style(Style), Keywords(Keywords), Current(Line.First) {} /// Parse expressions with the given operator precedence. void parse(int Precedence = 0) { // Skip 'return' and ObjC selector colons as they are not part of a binary // expression. while (Current && (Current->is(tok::kw_return) || (Current->is(tok::colon) && Current->isOneOf(TT_ObjCMethodExpr, TT_DictLiteral)))) next(); if (!Current || Precedence > PrecedenceArrowAndPeriod) return; // Conditional expressions need to be parsed separately for proper nesting. if (Precedence == prec::Conditional) { parseConditionalExpr(); return; } // Parse unary operators, which all have a higher precedence than binary // operators. if (Precedence == PrecedenceUnaryOperator) { parseUnaryOperator(); return; } FormatToken *Start = Current; FormatToken *LatestOperator = nullptr; unsigned OperatorIndex = 0; while (Current) { // Consume operators with higher precedence. parse(Precedence + 1); int CurrentPrecedence = getCurrentPrecedence(); if (Current && Current->is(TT_SelectorName) && Precedence == CurrentPrecedence) { if (LatestOperator) addFakeParenthesis(Start, prec::Level(Precedence)); Start = Current; } // At the end of the line or when an operator with higher precedence is // found, insert fake parenthesis and return. if (!Current || (Current->closesScope() && (Current->MatchingParen || Current->is(TT_TemplateString))) || (CurrentPrecedence != -1 && CurrentPrecedence < Precedence) || (CurrentPrecedence == prec::Conditional && Precedence == prec::Assignment && Current->is(tok::colon))) { break; } // Consume scopes: (), [], <> and {} if (Current->opensScope()) { // In fragment of a JavaScript template string can look like '}..${' and // thus close a scope and open a new one at the same time. while (Current && (!Current->closesScope() || Current->opensScope())) { next(); parse(); } next(); } else { // Operator found. if (CurrentPrecedence == Precedence) { if (LatestOperator) LatestOperator->NextOperator = Current; LatestOperator = Current; Current->OperatorIndex = OperatorIndex; ++OperatorIndex; } next(/*SkipPastLeadingComments=*/Precedence > 0); } } if (LatestOperator && (Current || Precedence > 0)) { // LatestOperator->LastOperator = true; if (Precedence == PrecedenceArrowAndPeriod) { // Call expressions don't have a binary operator precedence. addFakeParenthesis(Start, prec::Unknown); } else { addFakeParenthesis(Start, prec::Level(Precedence)); } } } private: /// Gets the precedence (+1) of the given token for binary operators /// and other tokens that we treat like binary operators. int getCurrentPrecedence() { if (Current) { const FormatToken *NextNonComment = Current->getNextNonComment(); if (Current->is(TT_ConditionalExpr)) return prec::Conditional; if (NextNonComment && Current->is(TT_SelectorName) && (NextNonComment->isOneOf(TT_DictLiteral, TT_JsTypeColon) || ((Style.Language == FormatStyle::LK_Proto || Style.Language == FormatStyle::LK_TextProto) && NextNonComment->is(tok::less)))) return prec::Assignment; if (Current->is(TT_JsComputedPropertyName)) return prec::Assignment; if (Current->is(TT_LambdaArrow)) return prec::Comma; if (Current->is(TT_FatArrow)) return prec::Assignment; if (Current->isOneOf(tok::semi, TT_InlineASMColon, TT_SelectorName) || (Current->is(tok::comment) && NextNonComment && NextNonComment->is(TT_SelectorName))) return 0; if (Current->is(TT_RangeBasedForLoopColon)) return prec::Comma; if ((Style.Language == FormatStyle::LK_Java || Style.Language == FormatStyle::LK_JavaScript) && Current->is(Keywords.kw_instanceof)) return prec::Relational; if (Style.Language == FormatStyle::LK_JavaScript && Current->isOneOf(Keywords.kw_in, Keywords.kw_as)) return prec::Relational; if (Current->is(TT_BinaryOperator) || Current->is(tok::comma)) return Current->getPrecedence(); if (Current->isOneOf(tok::period, tok::arrow)) return PrecedenceArrowAndPeriod; if ((Style.Language == FormatStyle::LK_Java || Style.Language == FormatStyle::LK_JavaScript) && Current->isOneOf(Keywords.kw_extends, Keywords.kw_implements, Keywords.kw_throws)) return 0; } return -1; } void addFakeParenthesis(FormatToken *Start, prec::Level Precedence) { Start->FakeLParens.push_back(Precedence); if (Precedence > prec::Unknown) Start->StartsBinaryExpression = true; if (Current) { FormatToken *Previous = Current->Previous; while (Previous->is(tok::comment) && Previous->Previous) Previous = Previous->Previous; ++Previous->FakeRParens; if (Precedence > prec::Unknown) Previous->EndsBinaryExpression = true; } } /// Parse unary operator expressions and surround them with fake /// parentheses if appropriate. void parseUnaryOperator() { llvm::SmallVector Tokens; while (Current && Current->is(TT_UnaryOperator)) { Tokens.push_back(Current); next(); } parse(PrecedenceArrowAndPeriod); for (FormatToken *Token : llvm::reverse(Tokens)) // The actual precedence doesn't matter. addFakeParenthesis(Token, prec::Unknown); } void parseConditionalExpr() { while (Current && Current->isTrailingComment()) { next(); } FormatToken *Start = Current; parse(prec::LogicalOr); if (!Current || !Current->is(tok::question)) return; next(); parse(prec::Assignment); if (!Current || Current->isNot(TT_ConditionalExpr)) return; next(); parse(prec::Assignment); addFakeParenthesis(Start, prec::Conditional); } void next(bool SkipPastLeadingComments = true) { if (Current) Current = Current->Next; while (Current && (Current->NewlinesBefore == 0 || SkipPastLeadingComments) && Current->isTrailingComment()) Current = Current->Next; } const FormatStyle &Style; const AdditionalKeywords &Keywords; FormatToken *Current; }; } // end anonymous namespace void TokenAnnotator::setCommentLineLevels( SmallVectorImpl &Lines) { const AnnotatedLine *NextNonCommentLine = nullptr; for (SmallVectorImpl::reverse_iterator I = Lines.rbegin(), E = Lines.rend(); I != E; ++I) { bool CommentLine = true; for (const FormatToken *Tok = (*I)->First; Tok; Tok = Tok->Next) { if (!Tok->is(tok::comment)) { CommentLine = false; break; } } // If the comment is currently aligned with the line immediately following // it, that's probably intentional and we should keep it. if (NextNonCommentLine && CommentLine && NextNonCommentLine->First->NewlinesBefore <= 1 && NextNonCommentLine->First->OriginalColumn == (*I)->First->OriginalColumn) { // Align comments for preprocessor lines with the # in column 0 if // preprocessor lines are not indented. Otherwise, align with the next // line. (*I)->Level = (Style.IndentPPDirectives != FormatStyle::PPDIS_BeforeHash && (NextNonCommentLine->Type == LT_PreprocessorDirective || NextNonCommentLine->Type == LT_ImportStatement)) ? 0 : NextNonCommentLine->Level; } else { NextNonCommentLine = (*I)->First->isNot(tok::r_brace) ? (*I) : nullptr; } setCommentLineLevels((*I)->Children); } } static unsigned maxNestingDepth(const AnnotatedLine &Line) { unsigned Result = 0; for (const auto *Tok = Line.First; Tok != nullptr; Tok = Tok->Next) Result = std::max(Result, Tok->NestingLevel); return Result; } void TokenAnnotator::annotate(AnnotatedLine &Line) { for (SmallVectorImpl::iterator I = Line.Children.begin(), E = Line.Children.end(); I != E; ++I) { annotate(**I); } AnnotatingParser Parser(Style, Line, Keywords); Line.Type = Parser.parseLine(); // With very deep nesting, ExpressionParser uses lots of stack and the // formatting algorithm is very slow. We're not going to do a good job here // anyway - it's probably generated code being formatted by mistake. // Just skip the whole line. if (maxNestingDepth(Line) > 50) Line.Type = LT_Invalid; if (Line.Type == LT_Invalid) return; ExpressionParser ExprParser(Style, Keywords, Line); ExprParser.parse(); if (Line.startsWith(TT_ObjCMethodSpecifier)) Line.Type = LT_ObjCMethodDecl; else if (Line.startsWith(TT_ObjCDecl)) Line.Type = LT_ObjCDecl; else if (Line.startsWith(TT_ObjCProperty)) Line.Type = LT_ObjCProperty; Line.First->SpacesRequiredBefore = 1; Line.First->CanBreakBefore = Line.First->MustBreakBefore; } // This function heuristically determines whether 'Current' starts the name of a // function declaration. -static bool isFunctionDeclarationName(const FormatToken &Current, +static bool isFunctionDeclarationName(bool IsCpp, const FormatToken &Current, const AnnotatedLine &Line) { auto skipOperatorName = [](const FormatToken *Next) -> const FormatToken * { for (; Next; Next = Next->Next) { if (Next->is(TT_OverloadedOperatorLParen)) return Next; if (Next->is(TT_OverloadedOperator)) continue; if (Next->isOneOf(tok::kw_new, tok::kw_delete)) { // For 'new[]' and 'delete[]'. if (Next->Next && Next->Next->startsSequence(tok::l_square, tok::r_square)) Next = Next->Next->Next; continue; } if (Next->startsSequence(tok::l_square, tok::r_square)) { // For operator[](). Next = Next->Next; continue; } if ((Next->isSimpleTypeSpecifier() || Next->is(tok::identifier)) && Next->Next && Next->Next->isOneOf(tok::star, tok::amp, tok::ampamp)) { // For operator void*(), operator char*(), operator Foo*(). Next = Next->Next; continue; } if (Next->is(TT_TemplateOpener) && Next->MatchingParen) { Next = Next->MatchingParen; continue; } break; } return nullptr; }; // Find parentheses of parameter list. const FormatToken *Next = Current.Next; if (Current.is(tok::kw_operator)) { if (Current.Previous && Current.Previous->is(tok::coloncolon)) return false; Next = skipOperatorName(Next); } else { if (!Current.is(TT_StartOfName) || Current.NestingLevel != 0) return false; for (; Next; Next = Next->Next) { if (Next->is(TT_TemplateOpener)) { Next = Next->MatchingParen; } else if (Next->is(tok::coloncolon)) { Next = Next->Next; if (!Next) return false; if (Next->is(tok::kw_operator)) { Next = skipOperatorName(Next->Next); break; } if (!Next->is(tok::identifier)) return false; } else if (Next->is(tok::l_paren)) { break; } else { return false; } } } // Check whether parameter list can belong to a function declaration. if (!Next || !Next->is(tok::l_paren) || !Next->MatchingParen) return false; // If the lines ends with "{", this is likely an function definition. if (Line.Last->is(tok::l_brace)) return true; if (Next->Next == Next->MatchingParen) return true; // Empty parentheses. // If there is an &/&& after the r_paren, this is likely a function. if (Next->MatchingParen->Next && Next->MatchingParen->Next->is(TT_PointerOrReference)) return true; - // Check for K&R C function definitions, e.g.: + + // Check for K&R C function definitions (and C++ function definitions with + // unnamed parameters), e.g.: // int f(i) // { // return i + 1; // } - if (Next->Next && Next->Next->is(tok::identifier) && - !(Next->MatchingParen->Next && Next->MatchingParen->Next->is(tok::semi))) + // bool g(size_t = 0, bool b = false) + // { + // return !b; + // } + if (IsCpp && Next->Next && Next->Next->is(tok::identifier) && + !Line.endsWith(tok::semi)) return true; + for (const FormatToken *Tok = Next->Next; Tok && Tok != Next->MatchingParen; Tok = Tok->Next) { if (Tok->is(TT_TypeDeclarationParen)) return true; if (Tok->isOneOf(tok::l_paren, TT_TemplateOpener) && Tok->MatchingParen) { Tok = Tok->MatchingParen; continue; } if (Tok->is(tok::kw_const) || Tok->isSimpleTypeSpecifier() || Tok->isOneOf(TT_PointerOrReference, TT_StartOfName, tok::ellipsis)) return true; if (Tok->isOneOf(tok::l_brace, tok::string_literal, TT_ObjCMethodExpr) || Tok->Tok.isLiteral()) return false; } return false; } bool TokenAnnotator::mustBreakForReturnType(const AnnotatedLine &Line) const { assert(Line.MightBeFunctionDecl); if ((Style.AlwaysBreakAfterReturnType == FormatStyle::RTBS_TopLevel || Style.AlwaysBreakAfterReturnType == FormatStyle::RTBS_TopLevelDefinitions) && Line.Level > 0) return false; switch (Style.AlwaysBreakAfterReturnType) { case FormatStyle::RTBS_None: return false; case FormatStyle::RTBS_All: case FormatStyle::RTBS_TopLevel: return true; case FormatStyle::RTBS_AllDefinitions: case FormatStyle::RTBS_TopLevelDefinitions: return Line.mightBeFunctionDefinition(); } return false; } void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) { for (SmallVectorImpl::iterator I = Line.Children.begin(), E = Line.Children.end(); I != E; ++I) { calculateFormattingInformation(**I); } Line.First->TotalLength = Line.First->IsMultiline ? Style.ColumnLimit : Line.FirstStartColumn + Line.First->ColumnWidth; FormatToken *Current = Line.First->Next; bool InFunctionDecl = Line.MightBeFunctionDecl; bool AlignArrayOfStructures = (Style.AlignArrayOfStructures != FormatStyle::AIAS_None && Line.Type == LT_ArrayOfStructInitializer); if (AlignArrayOfStructures) calculateArrayInitializerColumnList(Line); while (Current) { - if (isFunctionDeclarationName(*Current, Line)) + if (isFunctionDeclarationName(Style.isCpp(), *Current, Line)) Current->setType(TT_FunctionDeclarationName); if (Current->is(TT_LineComment)) { if (Current->Previous->is(BK_BracedInit) && Current->Previous->opensScope()) Current->SpacesRequiredBefore = (Style.Cpp11BracedListStyle && !Style.SpacesInParentheses) ? 0 : 1; else Current->SpacesRequiredBefore = Style.SpacesBeforeTrailingComments; // If we find a trailing comment, iterate backwards to determine whether // it seems to relate to a specific parameter. If so, break before that // parameter to avoid changing the comment's meaning. E.g. don't move 'b' // to the previous line in: // SomeFunction(a, // b, // comment // c); if (!Current->HasUnescapedNewline) { for (FormatToken *Parameter = Current->Previous; Parameter; Parameter = Parameter->Previous) { if (Parameter->isOneOf(tok::comment, tok::r_brace)) break; if (Parameter->Previous && Parameter->Previous->is(tok::comma)) { if (!Parameter->Previous->is(TT_CtorInitializerComma) && Parameter->HasUnescapedNewline) Parameter->MustBreakBefore = true; break; } } } } else if (Current->SpacesRequiredBefore == 0 && spaceRequiredBefore(Line, *Current)) { Current->SpacesRequiredBefore = 1; } Current->MustBreakBefore = Current->MustBreakBefore || mustBreakBefore(Line, *Current); if (!Current->MustBreakBefore && InFunctionDecl && Current->is(TT_FunctionDeclarationName)) Current->MustBreakBefore = mustBreakForReturnType(Line); Current->CanBreakBefore = Current->MustBreakBefore || canBreakBefore(Line, *Current); unsigned ChildSize = 0; if (Current->Previous->Children.size() == 1) { FormatToken &LastOfChild = *Current->Previous->Children[0]->Last; ChildSize = LastOfChild.isTrailingComment() ? Style.ColumnLimit : LastOfChild.TotalLength + 1; } const FormatToken *Prev = Current->Previous; if (Current->MustBreakBefore || Prev->Children.size() > 1 || (Prev->Children.size() == 1 && Prev->Children[0]->First->MustBreakBefore) || Current->IsMultiline) Current->TotalLength = Prev->TotalLength + Style.ColumnLimit; else Current->TotalLength = Prev->TotalLength + Current->ColumnWidth + ChildSize + Current->SpacesRequiredBefore; if (Current->is(TT_CtorInitializerColon)) InFunctionDecl = false; // FIXME: Only calculate this if CanBreakBefore is true once static // initializers etc. are sorted out. // FIXME: Move magic numbers to a better place. // Reduce penalty for aligning ObjC method arguments using the colon // alignment as this is the canonical way (still prefer fitting everything // into one line if possible). Trying to fit a whole expression into one // line should not force other line breaks (e.g. when ObjC method // expression is a part of other expression). Current->SplitPenalty = splitPenalty(Line, *Current, InFunctionDecl); if (Style.Language == FormatStyle::LK_ObjC && Current->is(TT_SelectorName) && Current->ParameterIndex > 0) { if (Current->ParameterIndex == 1) Current->SplitPenalty += 5 * Current->BindingStrength; } else { Current->SplitPenalty += 20 * Current->BindingStrength; } Current = Current->Next; } calculateUnbreakableTailLengths(Line); unsigned IndentLevel = Line.Level; for (Current = Line.First; Current != nullptr; Current = Current->Next) { if (Current->Role) Current->Role->precomputeFormattingInfos(Current); if (Current->MatchingParen && Current->MatchingParen->opensBlockOrBlockTypeList(Style)) { assert(IndentLevel > 0); --IndentLevel; } Current->IndentLevel = IndentLevel; if (Current->opensBlockOrBlockTypeList(Style)) ++IndentLevel; } LLVM_DEBUG({ printDebugInfo(Line); }); } void TokenAnnotator::calculateUnbreakableTailLengths(AnnotatedLine &Line) { unsigned UnbreakableTailLength = 0; FormatToken *Current = Line.Last; while (Current) { Current->UnbreakableTailLength = UnbreakableTailLength; if (Current->CanBreakBefore || Current->isOneOf(tok::comment, tok::string_literal)) { UnbreakableTailLength = 0; } else { UnbreakableTailLength += Current->ColumnWidth + Current->SpacesRequiredBefore; } Current = Current->Previous; } } void TokenAnnotator::calculateArrayInitializerColumnList(AnnotatedLine &Line) { if (Line.First == Line.Last) { return; } auto *CurrentToken = Line.First; CurrentToken->ArrayInitializerLineStart = true; unsigned Depth = 0; while (CurrentToken != nullptr && CurrentToken != Line.Last) { if (CurrentToken->is(tok::l_brace)) { CurrentToken->IsArrayInitializer = true; if (CurrentToken->Next != nullptr) CurrentToken->Next->MustBreakBefore = true; CurrentToken = calculateInitializerColumnList(Line, CurrentToken->Next, Depth + 1); } else { CurrentToken = CurrentToken->Next; } } } FormatToken *TokenAnnotator::calculateInitializerColumnList( AnnotatedLine &Line, FormatToken *CurrentToken, unsigned Depth) { while (CurrentToken != nullptr && CurrentToken != Line.Last) { if (CurrentToken->is(tok::l_brace)) ++Depth; else if (CurrentToken->is(tok::r_brace)) --Depth; if (Depth == 2 && CurrentToken->isOneOf(tok::l_brace, tok::comma)) { CurrentToken = CurrentToken->Next; if (CurrentToken == nullptr) break; CurrentToken->StartsColumn = true; CurrentToken = CurrentToken->Previous; } CurrentToken = CurrentToken->Next; } return CurrentToken; } unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line, const FormatToken &Tok, bool InFunctionDecl) { const FormatToken &Left = *Tok.Previous; const FormatToken &Right = Tok; if (Left.is(tok::semi)) return 0; if (Style.Language == FormatStyle::LK_Java) { if (Right.isOneOf(Keywords.kw_extends, Keywords.kw_throws)) return 1; if (Right.is(Keywords.kw_implements)) return 2; if (Left.is(tok::comma) && Left.NestingLevel == 0) return 3; } else if (Style.Language == FormatStyle::LK_JavaScript) { if (Right.is(Keywords.kw_function) && Left.isNot(tok::comma)) return 100; if (Left.is(TT_JsTypeColon)) return 35; if ((Left.is(TT_TemplateString) && Left.TokenText.endswith("${")) || (Right.is(TT_TemplateString) && Right.TokenText.startswith("}"))) return 100; // Prefer breaking call chains (".foo") over empty "{}", "[]" or "()". if (Left.opensScope() && Right.closesScope()) return 200; } if (Right.is(tok::identifier) && Right.Next && Right.Next->is(TT_DictLiteral)) return 1; if (Right.is(tok::l_square)) { if (Style.Language == FormatStyle::LK_Proto) return 1; if (Left.is(tok::r_square)) return 200; // Slightly prefer formatting local lambda definitions like functions. if (Right.is(TT_LambdaLSquare) && Left.is(tok::equal)) return 35; if (!Right.isOneOf(TT_ObjCMethodExpr, TT_LambdaLSquare, TT_ArrayInitializerLSquare, TT_DesignatedInitializerLSquare, TT_AttributeSquare)) return 500; } if (Left.is(tok::coloncolon) || (Right.is(tok::period) && Style.Language == FormatStyle::LK_Proto)) return 500; if (Right.isOneOf(TT_StartOfName, TT_FunctionDeclarationName) || Right.is(tok::kw_operator)) { if (Line.startsWith(tok::kw_for) && Right.PartOfMultiVariableDeclStmt) return 3; if (Left.is(TT_StartOfName)) return 110; if (InFunctionDecl && Right.NestingLevel == 0) return Style.PenaltyReturnTypeOnItsOwnLine; return 200; } if (Right.is(TT_PointerOrReference)) return 190; if (Right.is(TT_LambdaArrow)) return 110; if (Left.is(tok::equal) && Right.is(tok::l_brace)) return 160; if (Left.is(TT_CastRParen)) return 100; if (Left.isOneOf(tok::kw_class, tok::kw_struct)) return 5000; if (Left.is(tok::comment)) return 1000; if (Left.isOneOf(TT_RangeBasedForLoopColon, TT_InheritanceColon, TT_CtorInitializerColon)) return 2; if (Right.isMemberAccess()) { // Breaking before the "./->" of a chained call/member access is reasonably // cheap, as formatting those with one call per line is generally // desirable. In particular, it should be cheaper to break before the call // than it is to break inside a call's parameters, which could lead to weird // "hanging" indents. The exception is the very last "./->" to support this // frequent pattern: // // aaaaaaaa.aaaaaaaa.bbbbbbb().ccccccccccccccccccccc( // dddddddd); // // which might otherwise be blown up onto many lines. Here, clang-format // won't produce "hanging" indents anyway as there is no other trailing // call. // // Also apply higher penalty is not a call as that might lead to a wrapping // like: // // aaaaaaa // .aaaaaaaaa.bbbbbbbb(cccccccc); return !Right.NextOperator || !Right.NextOperator->Previous->closesScope() ? 150 : 35; } if (Right.is(TT_TrailingAnnotation) && (!Right.Next || Right.Next->isNot(tok::l_paren))) { // Moving trailing annotations to the next line is fine for ObjC method // declarations. if (Line.startsWith(TT_ObjCMethodSpecifier)) return 10; // Generally, breaking before a trailing annotation is bad unless it is // function-like. It seems to be especially preferable to keep standard // annotations (i.e. "const", "final" and "override") on the same line. // Use a slightly higher penalty after ")" so that annotations like // "const override" are kept together. bool is_short_annotation = Right.TokenText.size() < 10; return (Left.is(tok::r_paren) ? 100 : 120) + (is_short_annotation ? 50 : 0); } // In for-loops, prefer breaking at ',' and ';'. if (Line.startsWith(tok::kw_for) && Left.is(tok::equal)) return 4; // In Objective-C method expressions, prefer breaking before "param:" over // breaking after it. if (Right.is(TT_SelectorName)) return 0; if (Left.is(tok::colon) && Left.is(TT_ObjCMethodExpr)) return Line.MightBeFunctionDecl ? 50 : 500; // In Objective-C type declarations, avoid breaking after the category's // open paren (we'll prefer breaking after the protocol list's opening // angle bracket, if present). if (Line.Type == LT_ObjCDecl && Left.is(tok::l_paren) && Left.Previous && Left.Previous->isOneOf(tok::identifier, tok::greater)) return 500; if (Left.is(tok::l_paren) && InFunctionDecl && Style.AlignAfterOpenBracket != FormatStyle::BAS_DontAlign) return 100; if (Left.is(tok::l_paren) && Left.Previous && (Left.Previous->is(tok::kw_for) || Left.Previous->isIf())) return 1000; if (Left.is(tok::equal) && InFunctionDecl) return 110; if (Right.is(tok::r_brace)) return 1; if (Left.is(TT_TemplateOpener)) return 100; if (Left.opensScope()) { // If we aren't aligning after opening parens/braces we can always break // here unless the style does not want us to place all arguments on the // next line. if (Style.AlignAfterOpenBracket == FormatStyle::BAS_DontAlign && (Left.ParameterCount <= 1 || Style.AllowAllArgumentsOnNextLine)) return 0; if (Left.is(tok::l_brace) && !Style.Cpp11BracedListStyle) return 19; return Left.ParameterCount > 1 ? Style.PenaltyBreakBeforeFirstCallParameter : 19; } if (Left.is(TT_JavaAnnotation)) return 50; if (Left.is(TT_UnaryOperator)) return 60; if (Left.isOneOf(tok::plus, tok::comma) && Left.Previous && Left.Previous->isLabelString() && (Left.NextOperator || Left.OperatorIndex != 0)) return 50; if (Right.is(tok::plus) && Left.isLabelString() && (Right.NextOperator || Right.OperatorIndex != 0)) return 25; if (Left.is(tok::comma)) return 1; if (Right.is(tok::lessless) && Left.isLabelString() && (Right.NextOperator || Right.OperatorIndex != 1)) return 25; if (Right.is(tok::lessless)) { // Breaking at a << is really cheap. if (!Left.is(tok::r_paren) || Right.OperatorIndex > 0) // Slightly prefer to break before the first one in log-like statements. return 2; return 1; } if (Left.ClosesTemplateDeclaration) return Style.PenaltyBreakTemplateDeclaration; if (Left.is(TT_ConditionalExpr)) return prec::Conditional; prec::Level Level = Left.getPrecedence(); if (Level == prec::Unknown) Level = Right.getPrecedence(); if (Level == prec::Assignment) return Style.PenaltyBreakAssignment; if (Level != prec::Unknown) return Level; return 3; } bool TokenAnnotator::spaceRequiredBeforeParens(const FormatToken &Right) const { return Style.SpaceBeforeParens == FormatStyle::SBPO_Always || (Style.SpaceBeforeParens == FormatStyle::SBPO_NonEmptyParentheses && Right.ParameterCount > 0); } bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line, const FormatToken &Left, const FormatToken &Right) { if (Left.is(tok::kw_return) && Right.isNot(tok::semi)) return true; if (Style.isJson() && Left.is(tok::string_literal) && Right.is(tok::colon)) return false; if (Left.is(Keywords.kw_assert) && Style.Language == FormatStyle::LK_Java) return true; if (Style.ObjCSpaceAfterProperty && Line.Type == LT_ObjCProperty && Left.Tok.getObjCKeywordID() == tok::objc_property) return true; if (Right.is(tok::hashhash)) return Left.is(tok::hash); if (Left.isOneOf(tok::hashhash, tok::hash)) return Right.is(tok::hash); if ((Left.is(tok::l_paren) && Right.is(tok::r_paren)) || (Left.is(tok::l_brace) && Left.isNot(BK_Block) && Right.is(tok::r_brace) && Right.isNot(BK_Block))) return Style.SpaceInEmptyParentheses; if (Style.SpacesInConditionalStatement) { if (Left.is(tok::l_paren) && Left.Previous && isKeywordWithCondition(*Left.Previous)) return true; if (Right.is(tok::r_paren) && Right.MatchingParen && Right.MatchingParen->Previous && isKeywordWithCondition(*Right.MatchingParen->Previous)) return true; } // requires ( or requires( if (Right.is(tok::l_paren) && Left.is(tok::kw_requires)) return spaceRequiredBeforeParens(Right); // requires clause Concept1 && Concept2 if (Left.is(TT_ConstraintJunctions) && Right.is(tok::identifier)) return true; if (Left.is(tok::l_paren) || Right.is(tok::r_paren)) return (Right.is(TT_CastRParen) || (Left.MatchingParen && Left.MatchingParen->is(TT_CastRParen))) ? Style.SpacesInCStyleCastParentheses : Style.SpacesInParentheses; if (Right.isOneOf(tok::semi, tok::comma)) return false; if (Right.is(tok::less) && Line.Type == LT_ObjCDecl) { bool IsLightweightGeneric = Right.MatchingParen && Right.MatchingParen->Next && Right.MatchingParen->Next->is(tok::colon); return !IsLightweightGeneric && Style.ObjCSpaceBeforeProtocolList; } if (Right.is(tok::less) && Left.is(tok::kw_template)) return Style.SpaceAfterTemplateKeyword; if (Left.isOneOf(tok::exclaim, tok::tilde)) return false; if (Left.is(tok::at) && Right.isOneOf(tok::identifier, tok::string_literal, tok::char_constant, tok::numeric_constant, tok::l_paren, tok::l_brace, tok::kw_true, tok::kw_false)) return false; if (Left.is(tok::colon)) return !Left.is(TT_ObjCMethodExpr); if (Left.is(tok::coloncolon)) return false; if (Left.is(tok::less) || Right.isOneOf(tok::greater, tok::less)) { if (Style.Language == FormatStyle::LK_TextProto || (Style.Language == FormatStyle::LK_Proto && (Left.is(TT_DictLiteral) || Right.is(TT_DictLiteral)))) { // Format empty list as `<>`. if (Left.is(tok::less) && Right.is(tok::greater)) return false; return !Style.Cpp11BracedListStyle; } return false; } if (Right.is(tok::ellipsis)) return Left.Tok.isLiteral() || (Left.is(tok::identifier) && Left.Previous && Left.Previous->is(tok::kw_case)); if (Left.is(tok::l_square) && Right.is(tok::amp)) return Style.SpacesInSquareBrackets; if (Right.is(TT_PointerOrReference)) { if (Left.is(tok::r_paren) && Line.MightBeFunctionDecl) { if (!Left.MatchingParen) return true; FormatToken *TokenBeforeMatchingParen = Left.MatchingParen->getPreviousNonComment(); if (!TokenBeforeMatchingParen || !Left.is(TT_TypeDeclarationParen)) return true; } // Add a space if the previous token is a pointer qualifer or the closing // parenthesis of __attribute__(()) expression and the style requires spaces // after pointer qualifiers. if ((Style.SpaceAroundPointerQualifiers == FormatStyle::SAPQ_After || Style.SpaceAroundPointerQualifiers == FormatStyle::SAPQ_Both) && (Left.is(TT_AttributeParen) || Left.canBePointerOrReferenceQualifier())) return true; return ( Left.Tok.isLiteral() || (!Left.isOneOf(TT_PointerOrReference, tok::l_paren) && (getTokenPointerOrReferenceAlignment(Right) != FormatStyle::PAS_Left || (Line.IsMultiVariableDeclStmt && (Left.NestingLevel == 0 || (Left.NestingLevel == 1 && Line.First->is(tok::kw_for))))))); } if (Right.is(TT_FunctionTypeLParen) && Left.isNot(tok::l_paren) && (!Left.is(TT_PointerOrReference) || (getTokenPointerOrReferenceAlignment(Left) != FormatStyle::PAS_Right && !Line.IsMultiVariableDeclStmt))) return true; if (Left.is(TT_PointerOrReference)) { // Add a space if the next token is a pointer qualifer and the style // requires spaces before pointer qualifiers. if ((Style.SpaceAroundPointerQualifiers == FormatStyle::SAPQ_Before || Style.SpaceAroundPointerQualifiers == FormatStyle::SAPQ_Both) && Right.canBePointerOrReferenceQualifier()) return true; return Right.Tok.isLiteral() || Right.is(TT_BlockComment) || (Right.isOneOf(Keywords.kw_override, Keywords.kw_final) && !Right.is(TT_StartOfName)) || (Right.is(tok::l_brace) && Right.is(BK_Block)) || (!Right.isOneOf(TT_PointerOrReference, TT_ArraySubscriptLSquare, tok::l_paren) && (getTokenPointerOrReferenceAlignment(Left) != FormatStyle::PAS_Right && !Line.IsMultiVariableDeclStmt) && Left.Previous && !Left.Previous->isOneOf(tok::l_paren, tok::coloncolon, tok::l_square)); } // Ensure right pointer alignement with ellipsis e.g. int *...P if (Left.is(tok::ellipsis) && Left.Previous && Left.Previous->isOneOf(tok::star, tok::amp, tok::ampamp)) return Style.PointerAlignment != FormatStyle::PAS_Right; if (Right.is(tok::star) && Left.is(tok::l_paren)) return false; if (Left.is(tok::star) && Right.isOneOf(tok::star, tok::amp, tok::ampamp)) return false; if (Right.isOneOf(tok::star, tok::amp, tok::ampamp)) { const FormatToken *Previous = &Left; while (Previous && !Previous->is(tok::kw_operator)) { if (Previous->is(tok::identifier) || Previous->isSimpleTypeSpecifier()) { Previous = Previous->getPreviousNonComment(); continue; } if (Previous->is(TT_TemplateCloser) && Previous->MatchingParen) { Previous = Previous->MatchingParen->getPreviousNonComment(); continue; } if (Previous->is(tok::coloncolon)) { Previous = Previous->getPreviousNonComment(); continue; } break; } // Space between the type and the * in: // operator void*() // operator char*() // operator void const*() // operator void volatile*() // operator /*comment*/ const char*() // operator volatile /*comment*/ char*() // operator Foo*() // operator C*() // operator std::Foo*() // operator C::D*() // dependent on PointerAlignment style. if (Previous) { if (Previous->endsSequence(tok::kw_operator)) return (Style.PointerAlignment != FormatStyle::PAS_Left); if (Previous->is(tok::kw_const) || Previous->is(tok::kw_volatile)) return (Style.PointerAlignment != FormatStyle::PAS_Left) || (Style.SpaceAroundPointerQualifiers == FormatStyle::SAPQ_After) || (Style.SpaceAroundPointerQualifiers == FormatStyle::SAPQ_Both); } } const auto SpaceRequiredForArrayInitializerLSquare = [](const FormatToken &LSquareTok, const FormatStyle &Style) { return Style.SpacesInContainerLiterals || ((Style.Language == FormatStyle::LK_Proto || Style.Language == FormatStyle::LK_TextProto) && !Style.Cpp11BracedListStyle && LSquareTok.endsSequence(tok::l_square, tok::colon, TT_SelectorName)); }; if (Left.is(tok::l_square)) return (Left.is(TT_ArrayInitializerLSquare) && Right.isNot(tok::r_square) && SpaceRequiredForArrayInitializerLSquare(Left, Style)) || (Left.isOneOf(TT_ArraySubscriptLSquare, TT_StructuredBindingLSquare, TT_LambdaLSquare) && Style.SpacesInSquareBrackets && Right.isNot(tok::r_square)); if (Right.is(tok::r_square)) return Right.MatchingParen && ((Right.MatchingParen->is(TT_ArrayInitializerLSquare) && SpaceRequiredForArrayInitializerLSquare(*Right.MatchingParen, Style)) || (Style.SpacesInSquareBrackets && Right.MatchingParen->isOneOf(TT_ArraySubscriptLSquare, TT_StructuredBindingLSquare, TT_LambdaLSquare)) || Right.MatchingParen->is(TT_AttributeParen)); if (Right.is(tok::l_square) && !Right.isOneOf(TT_ObjCMethodExpr, TT_LambdaLSquare, TT_DesignatedInitializerLSquare, TT_StructuredBindingLSquare, TT_AttributeSquare) && !Left.isOneOf(tok::numeric_constant, TT_DictLiteral) && !(!Left.is(tok::r_square) && Style.SpaceBeforeSquareBrackets && Right.is(TT_ArraySubscriptLSquare))) return false; if (Left.is(tok::l_brace) && Right.is(tok::r_brace)) return !Left.Children.empty(); // No spaces in "{}". if ((Left.is(tok::l_brace) && Left.isNot(BK_Block)) || (Right.is(tok::r_brace) && Right.MatchingParen && Right.MatchingParen->isNot(BK_Block))) return Style.Cpp11BracedListStyle ? Style.SpacesInParentheses : true; if (Left.is(TT_BlockComment)) // No whitespace in x(/*foo=*/1), except for JavaScript. return Style.Language == FormatStyle::LK_JavaScript || !Left.TokenText.endswith("=*/"); // Space between template and attribute. // e.g. template [[nodiscard]] ... if (Left.is(TT_TemplateCloser) && Right.is(TT_AttributeSquare)) return true; if (Right.is(tok::l_paren)) { if ((Left.is(tok::r_paren) && Left.is(TT_AttributeParen)) || (Left.is(tok::r_square) && Left.is(TT_AttributeSquare))) return true; if (Style.SpaceBeforeParens == FormatStyle::SBPO_ControlStatementsExceptControlMacros && Left.is(TT_ForEachMacro)) return false; if (Style.SpaceBeforeParens == FormatStyle::SBPO_ControlStatementsExceptControlMacros && Left.is(TT_IfMacro)) return false; return Line.Type == LT_ObjCDecl || Left.is(tok::semi) || (Style.SpaceBeforeParens != FormatStyle::SBPO_Never && (Left.isOneOf(tok::pp_elif, tok::kw_for, tok::kw_while, tok::kw_switch, tok::kw_case, TT_ForEachMacro, TT_ObjCForIn) || Left.isIf(Line.Type != LT_PreprocessorDirective) || (Left.isOneOf(tok::kw_try, Keywords.kw___except, tok::kw_catch, tok::kw_new, tok::kw_delete) && (!Left.Previous || Left.Previous->isNot(tok::period))))) || (spaceRequiredBeforeParens(Right) && (Left.is(tok::identifier) || Left.isFunctionLikeKeyword() || Left.is(tok::r_paren) || Left.isSimpleTypeSpecifier() || (Left.is(tok::r_square) && Left.MatchingParen && Left.MatchingParen->is(TT_LambdaLSquare))) && Line.Type != LT_PreprocessorDirective); } if (Left.is(tok::at) && Right.Tok.getObjCKeywordID() != tok::objc_not_keyword) return false; if (Right.is(TT_UnaryOperator)) return !Left.isOneOf(tok::l_paren, tok::l_square, tok::at) && (Left.isNot(tok::colon) || Left.isNot(TT_ObjCMethodExpr)); if ((Left.isOneOf(tok::identifier, tok::greater, tok::r_square, tok::r_paren) || Left.isSimpleTypeSpecifier()) && Right.is(tok::l_brace) && Right.getNextNonComment() && Right.isNot(BK_Block)) return false; if (Left.is(tok::period) || Right.is(tok::period)) return false; if (Right.is(tok::hash) && Left.is(tok::identifier) && Left.TokenText == "L") return false; if (Left.is(TT_TemplateCloser) && Left.MatchingParen && Left.MatchingParen->Previous && (Left.MatchingParen->Previous->is(tok::period) || Left.MatchingParen->Previous->is(tok::coloncolon))) // Java call to generic function with explicit type: // A.>>DoSomething(); // A::>>DoSomething(); // With a Java 8 method reference. return false; if (Left.is(TT_TemplateCloser) && Right.is(tok::l_square)) return false; if (Left.is(tok::l_brace) && Left.endsSequence(TT_DictLiteral, tok::at)) // Objective-C dictionary literal -> no space after opening brace. return false; if (Right.is(tok::r_brace) && Right.MatchingParen && Right.MatchingParen->endsSequence(TT_DictLiteral, tok::at)) // Objective-C dictionary literal -> no space before closing brace. return false; if (Right.getType() == TT_TrailingAnnotation && Right.isOneOf(tok::amp, tok::ampamp) && Left.isOneOf(tok::kw_const, tok::kw_volatile) && (!Right.Next || Right.Next->is(tok::semi))) // Match const and volatile ref-qualifiers without any additional // qualifiers such as // void Fn() const &; return getTokenReferenceAlignment(Right) != FormatStyle::PAS_Left; return true; } bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line, const FormatToken &Right) { const FormatToken &Left = *Right.Previous; auto HasExistingWhitespace = [&Right]() { return Right.WhitespaceRange.getBegin() != Right.WhitespaceRange.getEnd(); }; if (Right.Tok.getIdentifierInfo() && Left.Tok.getIdentifierInfo()) return true; // Never ever merge two identifiers. if (Style.isCpp()) { if (Left.is(tok::kw_operator)) return Right.is(tok::coloncolon); if (Right.is(tok::l_brace) && Right.is(BK_BracedInit) && !Left.opensScope() && Style.SpaceBeforeCpp11BracedList) return true; } else if (Style.Language == FormatStyle::LK_Proto || Style.Language == FormatStyle::LK_TextProto) { if (Right.is(tok::period) && Left.isOneOf(Keywords.kw_optional, Keywords.kw_required, Keywords.kw_repeated, Keywords.kw_extend)) return true; if (Right.is(tok::l_paren) && Left.isOneOf(Keywords.kw_returns, Keywords.kw_option)) return true; if (Right.isOneOf(tok::l_brace, tok::less) && Left.is(TT_SelectorName)) return true; // Slashes occur in text protocol extension syntax: [type/type] { ... }. if (Left.is(tok::slash) || Right.is(tok::slash)) return false; if (Left.MatchingParen && Left.MatchingParen->is(TT_ProtoExtensionLSquare) && Right.isOneOf(tok::l_brace, tok::less)) return !Style.Cpp11BracedListStyle; // A percent is probably part of a formatting specification, such as %lld. if (Left.is(tok::percent)) return false; // Preserve the existence of a space before a percent for cases like 0x%04x // and "%d %d" if (Left.is(tok::numeric_constant) && Right.is(tok::percent)) return HasExistingWhitespace(); } else if (Style.isJson()) { if (Right.is(tok::colon)) return false; } else if (Style.isCSharp()) { // Require spaces around '{' and before '}' unless they appear in // interpolated strings. Interpolated strings are merged into a single token // so cannot have spaces inserted by this function. // No space between 'this' and '[' if (Left.is(tok::kw_this) && Right.is(tok::l_square)) return false; // No space between 'new' and '(' if (Left.is(tok::kw_new) && Right.is(tok::l_paren)) return false; // Space before { (including space within '{ {'). if (Right.is(tok::l_brace)) return true; // Spaces inside braces. if (Left.is(tok::l_brace) && Right.isNot(tok::r_brace)) return true; if (Left.isNot(tok::l_brace) && Right.is(tok::r_brace)) return true; // Spaces around '=>'. if (Left.is(TT_FatArrow) || Right.is(TT_FatArrow)) return true; // No spaces around attribute target colons if (Left.is(TT_AttributeColon) || Right.is(TT_AttributeColon)) return false; // space between type and variable e.g. Dictionary foo; if (Left.is(TT_TemplateCloser) && Right.is(TT_StartOfName)) return true; // spaces inside square brackets. if (Left.is(tok::l_square) || Right.is(tok::r_square)) return Style.SpacesInSquareBrackets; // No space before ? in nullable types. if (Right.is(TT_CSharpNullable)) return false; // No space before null forgiving '!'. if (Right.is(TT_NonNullAssertion)) return false; // No space between consecutive commas '[,,]'. if (Left.is(tok::comma) && Right.is(tok::comma)) return false; // space after var in `var (key, value)` if (Left.is(Keywords.kw_var) && Right.is(tok::l_paren)) return true; // space between keywords and paren e.g. "using (" if (Right.is(tok::l_paren)) if (Left.isOneOf(tok::kw_using, Keywords.kw_async, Keywords.kw_when, Keywords.kw_lock)) return Style.SpaceBeforeParens == FormatStyle::SBPO_ControlStatements || spaceRequiredBeforeParens(Right); // space between method modifier and opening parenthesis of a tuple return // type if (Left.isOneOf(tok::kw_public, tok::kw_private, tok::kw_protected, tok::kw_virtual, tok::kw_extern, tok::kw_static, Keywords.kw_internal, Keywords.kw_abstract, Keywords.kw_sealed, Keywords.kw_override, Keywords.kw_async, Keywords.kw_unsafe) && Right.is(tok::l_paren)) return true; } else if (Style.Language == FormatStyle::LK_JavaScript) { if (Left.is(TT_FatArrow)) return true; // for await ( ... if (Right.is(tok::l_paren) && Left.is(Keywords.kw_await) && Left.Previous && Left.Previous->is(tok::kw_for)) return true; if (Left.is(Keywords.kw_async) && Right.is(tok::l_paren) && Right.MatchingParen) { const FormatToken *Next = Right.MatchingParen->getNextNonComment(); // An async arrow function, for example: `x = async () => foo();`, // as opposed to calling a function called async: `x = async();` if (Next && Next->is(TT_FatArrow)) return true; } if ((Left.is(TT_TemplateString) && Left.TokenText.endswith("${")) || (Right.is(TT_TemplateString) && Right.TokenText.startswith("}"))) return false; // In tagged template literals ("html`bar baz`"), there is no space between // the tag identifier and the template string. if (Keywords.IsJavaScriptIdentifier(Left, /* AcceptIdentifierName= */ false) && Right.is(TT_TemplateString)) return false; if (Right.is(tok::star) && Left.isOneOf(Keywords.kw_function, Keywords.kw_yield)) return false; if (Right.isOneOf(tok::l_brace, tok::l_square) && Left.isOneOf(Keywords.kw_function, Keywords.kw_yield, Keywords.kw_extends, Keywords.kw_implements)) return true; if (Right.is(tok::l_paren)) { // JS methods can use some keywords as names (e.g. `delete()`). if (Line.MustBeDeclaration && Left.Tok.getIdentifierInfo()) return false; // Valid JS method names can include keywords, e.g. `foo.delete()` or // `bar.instanceof()`. Recognize call positions by preceding period. if (Left.Previous && Left.Previous->is(tok::period) && Left.Tok.getIdentifierInfo()) return false; // Additional unary JavaScript operators that need a space after. if (Left.isOneOf(tok::kw_throw, Keywords.kw_await, Keywords.kw_typeof, tok::kw_void)) return true; } // `foo as const;` casts into a const type. if (Left.endsSequence(tok::kw_const, Keywords.kw_as)) { return false; } if ((Left.isOneOf(Keywords.kw_let, Keywords.kw_var, Keywords.kw_in, tok::kw_const) || // "of" is only a keyword if it appears after another identifier // (e.g. as "const x of y" in a for loop), or after a destructuring // operation (const [x, y] of z, const {a, b} of c). (Left.is(Keywords.kw_of) && Left.Previous && (Left.Previous->Tok.is(tok::identifier) || Left.Previous->isOneOf(tok::r_square, tok::r_brace)))) && (!Left.Previous || !Left.Previous->is(tok::period))) return true; if (Left.isOneOf(tok::kw_for, Keywords.kw_as) && Left.Previous && Left.Previous->is(tok::period) && Right.is(tok::l_paren)) return false; if (Left.is(Keywords.kw_as) && Right.isOneOf(tok::l_square, tok::l_brace, tok::l_paren)) return true; if (Left.is(tok::kw_default) && Left.Previous && Left.Previous->is(tok::kw_export)) return true; if (Left.is(Keywords.kw_is) && Right.is(tok::l_brace)) return true; if (Right.isOneOf(TT_JsTypeColon, TT_JsTypeOptionalQuestion)) return false; if (Left.is(TT_JsTypeOperator) || Right.is(TT_JsTypeOperator)) return false; if ((Left.is(tok::l_brace) || Right.is(tok::r_brace)) && Line.First->isOneOf(Keywords.kw_import, tok::kw_export)) return false; if (Left.is(tok::ellipsis)) return false; if (Left.is(TT_TemplateCloser) && !Right.isOneOf(tok::equal, tok::l_brace, tok::comma, tok::l_square, Keywords.kw_implements, Keywords.kw_extends)) // Type assertions ('expr') are not followed by whitespace. Other // locations that should have whitespace following are identified by the // above set of follower tokens. return false; if (Right.is(TT_NonNullAssertion)) return false; if (Left.is(TT_NonNullAssertion) && Right.isOneOf(Keywords.kw_as, Keywords.kw_in)) return true; // "x! as string", "x! in y" } else if (Style.Language == FormatStyle::LK_Java) { if (Left.is(tok::r_square) && Right.is(tok::l_brace)) return true; if (Left.is(Keywords.kw_synchronized) && Right.is(tok::l_paren)) return Style.SpaceBeforeParens != FormatStyle::SBPO_Never; if ((Left.isOneOf(tok::kw_static, tok::kw_public, tok::kw_private, tok::kw_protected) || Left.isOneOf(Keywords.kw_final, Keywords.kw_abstract, Keywords.kw_native)) && Right.is(TT_TemplateOpener)) return true; } if (Left.is(TT_ImplicitStringLiteral)) return HasExistingWhitespace(); if (Line.Type == LT_ObjCMethodDecl) { if (Left.is(TT_ObjCMethodSpecifier)) return true; if (Left.is(tok::r_paren) && canBeObjCSelectorComponent(Right)) // Don't space between ')' and or ')' and 'new'. 'new' is not a // keyword in Objective-C, and '+ (instancetype)new;' is a standard class // method declaration. return false; } if (Line.Type == LT_ObjCProperty && (Right.is(tok::equal) || Left.is(tok::equal))) return false; if (Right.isOneOf(TT_TrailingReturnArrow, TT_LambdaArrow) || Left.isOneOf(TT_TrailingReturnArrow, TT_LambdaArrow)) return true; if (Right.is(TT_OverloadedOperatorLParen)) return spaceRequiredBeforeParens(Right); if (Left.is(tok::comma)) return true; if (Right.is(tok::comma)) return false; if (Right.is(TT_ObjCBlockLParen)) return true; if (Right.is(TT_CtorInitializerColon)) return Style.SpaceBeforeCtorInitializerColon; if (Right.is(TT_InheritanceColon) && !Style.SpaceBeforeInheritanceColon) return false; if (Right.is(TT_RangeBasedForLoopColon) && !Style.SpaceBeforeRangeBasedForLoopColon) return false; if (Left.is(TT_BitFieldColon)) return Style.BitFieldColonSpacing == FormatStyle::BFCS_Both || Style.BitFieldColonSpacing == FormatStyle::BFCS_After; if (Right.is(tok::colon)) { if (Line.First->isOneOf(tok::kw_default, tok::kw_case)) return Style.SpaceBeforeCaseColon; if (!Right.getNextNonComment() || Right.getNextNonComment()->is(tok::semi)) return false; if (Right.is(TT_ObjCMethodExpr)) return false; if (Left.is(tok::question)) return false; if (Right.is(TT_InlineASMColon) && Left.is(tok::coloncolon)) return false; if (Right.is(TT_DictLiteral)) return Style.SpacesInContainerLiterals; if (Right.is(TT_AttributeColon)) return false; if (Right.is(TT_CSharpNamedArgumentColon)) return false; if (Right.is(TT_BitFieldColon)) return Style.BitFieldColonSpacing == FormatStyle::BFCS_Both || Style.BitFieldColonSpacing == FormatStyle::BFCS_Before; return true; } // Do not merge "- -" into "--". if ((Left.isOneOf(tok::minus, tok::minusminus) && Right.isOneOf(tok::minus, tok::minusminus)) || (Left.isOneOf(tok::plus, tok::plusplus) && Right.isOneOf(tok::plus, tok::plusplus))) return true; if (Left.is(TT_UnaryOperator)) { if (!Right.is(tok::l_paren)) { // The alternative operators for ~ and ! are "compl" and "not". // If they are used instead, we do not want to combine them with // the token to the right, unless that is a left paren. if (Left.is(tok::exclaim) && Left.TokenText == "not") return true; if (Left.is(tok::tilde) && Left.TokenText == "compl") return true; // Lambda captures allow for a lone &, so "&]" needs to be properly // handled. if (Left.is(tok::amp) && Right.is(tok::r_square)) return Style.SpacesInSquareBrackets; } return (Style.SpaceAfterLogicalNot && Left.is(tok::exclaim)) || Right.is(TT_BinaryOperator); } // If the next token is a binary operator or a selector name, we have // incorrectly classified the parenthesis as a cast. FIXME: Detect correctly. if (Left.is(TT_CastRParen)) return Style.SpaceAfterCStyleCast || Right.isOneOf(TT_BinaryOperator, TT_SelectorName); auto ShouldAddSpacesInAngles = [this, &HasExistingWhitespace]() { if (this->Style.SpacesInAngles == FormatStyle::SIAS_Always) return true; if (this->Style.SpacesInAngles == FormatStyle::SIAS_Leave) return HasExistingWhitespace(); return false; }; if (Left.is(tok::greater) && Right.is(tok::greater)) { if (Style.Language == FormatStyle::LK_TextProto || (Style.Language == FormatStyle::LK_Proto && Left.is(TT_DictLiteral))) return !Style.Cpp11BracedListStyle; return Right.is(TT_TemplateCloser) && Left.is(TT_TemplateCloser) && ((Style.Standard < FormatStyle::LS_Cpp11) || ShouldAddSpacesInAngles()); } if (Right.isOneOf(tok::arrow, tok::arrowstar, tok::periodstar) || Left.isOneOf(tok::arrow, tok::period, tok::arrowstar, tok::periodstar) || (Right.is(tok::period) && Right.isNot(TT_DesignatedInitializerPeriod))) return false; if (!Style.SpaceBeforeAssignmentOperators && Left.isNot(TT_TemplateCloser) && Right.getPrecedence() == prec::Assignment) return false; if (Style.Language == FormatStyle::LK_Java && Right.is(tok::coloncolon) && (Left.is(tok::identifier) || Left.is(tok::kw_this))) return false; if (Right.is(tok::coloncolon) && Left.is(tok::identifier)) // Generally don't remove existing spaces between an identifier and "::". // The identifier might actually be a macro name such as ALWAYS_INLINE. If // this turns out to be too lenient, add analysis of the identifier itself. return HasExistingWhitespace(); if (Right.is(tok::coloncolon) && !Left.isOneOf(tok::l_brace, tok::comment, tok::l_paren)) // Put a space between < and :: in vector< ::std::string > return (Left.is(TT_TemplateOpener) && ((Style.Standard < FormatStyle::LS_Cpp11) || ShouldAddSpacesInAngles())) || !(Left.isOneOf(tok::l_paren, tok::r_paren, tok::l_square, tok::kw___super, TT_TemplateOpener, TT_TemplateCloser)) || (Left.is(tok::l_paren) && Style.SpacesInParentheses); if ((Left.is(TT_TemplateOpener)) != (Right.is(TT_TemplateCloser))) return ShouldAddSpacesInAngles(); // Space before TT_StructuredBindingLSquare. if (Right.is(TT_StructuredBindingLSquare)) return !Left.isOneOf(tok::amp, tok::ampamp) || getTokenReferenceAlignment(Left) != FormatStyle::PAS_Right; // Space before & or && following a TT_StructuredBindingLSquare. if (Right.Next && Right.Next->is(TT_StructuredBindingLSquare) && Right.isOneOf(tok::amp, tok::ampamp)) return getTokenReferenceAlignment(Right) != FormatStyle::PAS_Left; if ((Right.is(TT_BinaryOperator) && !Left.is(tok::l_paren)) || (Left.isOneOf(TT_BinaryOperator, TT_ConditionalExpr) && !Right.is(tok::r_paren))) return true; if (Left.is(TT_TemplateCloser) && Right.is(tok::l_paren) && Right.isNot(TT_FunctionTypeLParen)) return spaceRequiredBeforeParens(Right); if (Right.is(TT_TemplateOpener) && Left.is(tok::r_paren) && Left.MatchingParen && Left.MatchingParen->is(TT_OverloadedOperatorLParen)) return false; if (Right.is(tok::less) && Left.isNot(tok::l_paren) && Line.startsWith(tok::hash)) return true; if (Right.is(TT_TrailingUnaryOperator)) return false; if (Left.is(TT_RegexLiteral)) return false; return spaceRequiredBetween(Line, Left, Right); } // Returns 'true' if 'Tok' is a brace we'd want to break before in Allman style. static bool isAllmanBrace(const FormatToken &Tok) { return Tok.is(tok::l_brace) && Tok.is(BK_Block) && !Tok.isOneOf(TT_ObjCBlockLBrace, TT_LambdaLBrace, TT_DictLiteral); } // Returns 'true' if 'Tok' is an function argument. static bool IsFunctionArgument(const FormatToken &Tok) { return Tok.MatchingParen && Tok.MatchingParen->Next && Tok.MatchingParen->Next->isOneOf(tok::comma, tok::r_paren); } static bool isItAnEmptyLambdaAllowed(const FormatToken &Tok, FormatStyle::ShortLambdaStyle ShortLambdaOption) { return Tok.Children.empty() && ShortLambdaOption != FormatStyle::SLS_None; } static bool isAllmanLambdaBrace(const FormatToken &Tok) { return (Tok.is(tok::l_brace) && Tok.is(BK_Block) && !Tok.isOneOf(TT_ObjCBlockLBrace, TT_DictLiteral)); } bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line, const FormatToken &Right) { const FormatToken &Left = *Right.Previous; if (Right.NewlinesBefore > 1 && Style.MaxEmptyLinesToKeep > 0) return true; if (Style.isCSharp()) { if (Right.is(TT_CSharpNamedArgumentColon) || Left.is(TT_CSharpNamedArgumentColon)) return false; if (Right.is(TT_CSharpGenericTypeConstraint)) return true; // Break after C# [...] and before public/protected/private/internal. if (Left.is(TT_AttributeSquare) && Left.is(tok::r_square) && (Right.isAccessSpecifier(/*ColonRequired=*/false) || Right.is(Keywords.kw_internal))) return true; // Break between ] and [ but only when there are really 2 attributes. if (Left.is(TT_AttributeSquare) && Right.is(TT_AttributeSquare) && Left.is(tok::r_square) && Right.is(tok::l_square)) return true; } else if (Style.Language == FormatStyle::LK_JavaScript) { // FIXME: This might apply to other languages and token kinds. if (Right.is(tok::string_literal) && Left.is(tok::plus) && Left.Previous && Left.Previous->is(tok::string_literal)) return true; if (Left.is(TT_DictLiteral) && Left.is(tok::l_brace) && Line.Level == 0 && Left.Previous && Left.Previous->is(tok::equal) && Line.First->isOneOf(tok::identifier, Keywords.kw_import, tok::kw_export, tok::kw_const) && // kw_var/kw_let are pseudo-tokens that are tok::identifier, so match // above. !Line.First->isOneOf(Keywords.kw_var, Keywords.kw_let)) // Object literals on the top level of a file are treated as "enum-style". // Each key/value pair is put on a separate line, instead of bin-packing. return true; if (Left.is(tok::l_brace) && Line.Level == 0 && (Line.startsWith(tok::kw_enum) || Line.startsWith(tok::kw_const, tok::kw_enum) || Line.startsWith(tok::kw_export, tok::kw_enum) || Line.startsWith(tok::kw_export, tok::kw_const, tok::kw_enum))) // JavaScript top-level enum key/value pairs are put on separate lines // instead of bin-packing. return true; if (Right.is(tok::r_brace) && Left.is(tok::l_brace) && Left.Previous && Left.Previous->is(TT_FatArrow)) { // JS arrow function (=> {...}). switch (Style.AllowShortLambdasOnASingleLine) { case FormatStyle::SLS_All: return false; case FormatStyle::SLS_None: return true; case FormatStyle::SLS_Empty: return !Left.Children.empty(); case FormatStyle::SLS_Inline: // allow one-lining inline (e.g. in function call args) and empty arrow // functions. return (Left.NestingLevel == 0 && Line.Level == 0) && !Left.Children.empty(); } llvm_unreachable("Unknown FormatStyle::ShortLambdaStyle enum"); } if (Right.is(tok::r_brace) && Left.is(tok::l_brace) && !Left.Children.empty()) // Support AllowShortFunctionsOnASingleLine for JavaScript. return Style.AllowShortFunctionsOnASingleLine == FormatStyle::SFS_None || Style.AllowShortFunctionsOnASingleLine == FormatStyle::SFS_Empty || (Left.NestingLevel == 0 && Line.Level == 0 && Style.AllowShortFunctionsOnASingleLine & FormatStyle::SFS_InlineOnly); } else if (Style.Language == FormatStyle::LK_Java) { if (Right.is(tok::plus) && Left.is(tok::string_literal) && Right.Next && Right.Next->is(tok::string_literal)) return true; } else if (Style.Language == FormatStyle::LK_Cpp || Style.Language == FormatStyle::LK_ObjC || Style.Language == FormatStyle::LK_Proto || Style.Language == FormatStyle::LK_TableGen || Style.Language == FormatStyle::LK_TextProto) { if (Left.isStringLiteral() && Right.isStringLiteral()) return true; } // Basic JSON newline processing. if (Style.isJson()) { // Always break after a JSON record opener. // { // } if (Left.is(TT_DictLiteral) && Left.is(tok::l_brace)) return true; // Always break after a JSON array opener. // [ // ] if (Left.is(TT_ArrayInitializerLSquare) && Left.is(tok::l_square) && !Right.is(tok::r_square)) return true; // Always break afer successive entries. // 1, // 2 if (Left.is(tok::comma)) return true; } // If the last token before a '}', ']', or ')' is a comma or a trailing // comment, the intention is to insert a line break after it in order to make // shuffling around entries easier. Import statements, especially in // JavaScript, can be an exception to this rule. if (Style.JavaScriptWrapImports || Line.Type != LT_ImportStatement) { const FormatToken *BeforeClosingBrace = nullptr; if ((Left.isOneOf(tok::l_brace, TT_ArrayInitializerLSquare) || (Style.Language == FormatStyle::LK_JavaScript && Left.is(tok::l_paren))) && Left.isNot(BK_Block) && Left.MatchingParen) BeforeClosingBrace = Left.MatchingParen->Previous; else if (Right.MatchingParen && (Right.MatchingParen->isOneOf(tok::l_brace, TT_ArrayInitializerLSquare) || (Style.Language == FormatStyle::LK_JavaScript && Right.MatchingParen->is(tok::l_paren)))) BeforeClosingBrace = &Left; if (BeforeClosingBrace && (BeforeClosingBrace->is(tok::comma) || BeforeClosingBrace->isTrailingComment())) return true; } if (Right.is(tok::comment)) return Left.isNot(BK_BracedInit) && Left.isNot(TT_CtorInitializerColon) && (Right.NewlinesBefore > 0 && Right.HasUnescapedNewline); if (Left.isTrailingComment()) return true; if (Right.Previous->IsUnterminatedLiteral) return true; if (Right.is(tok::lessless) && Right.Next && Right.Previous->is(tok::string_literal) && Right.Next->is(tok::string_literal)) return true; // Can break after template<> declaration if (Right.Previous->ClosesTemplateDeclaration && Right.Previous->MatchingParen && Right.Previous->MatchingParen->NestingLevel == 0) { // Put concepts on the next line e.g. // template // concept ... if (Right.is(tok::kw_concept)) return Style.BreakBeforeConceptDeclarations; return (Style.AlwaysBreakTemplateDeclarations == FormatStyle::BTDS_Yes); } if (Right.is(TT_CtorInitializerComma) && Style.BreakConstructorInitializers == FormatStyle::BCIS_BeforeComma && !Style.ConstructorInitializerAllOnOneLineOrOnePerLine) return true; if (Right.is(TT_CtorInitializerColon) && Style.BreakConstructorInitializers == FormatStyle::BCIS_BeforeComma && !Style.ConstructorInitializerAllOnOneLineOrOnePerLine) return true; // Break only if we have multiple inheritance. if (Style.BreakInheritanceList == FormatStyle::BILS_BeforeComma && Right.is(TT_InheritanceComma)) return true; if (Style.BreakInheritanceList == FormatStyle::BILS_AfterComma && Left.is(TT_InheritanceComma)) return true; if (Right.is(tok::string_literal) && Right.TokenText.startswith("R\"")) // Multiline raw string literals are special wrt. line breaks. The author // has made a deliberate choice and might have aligned the contents of the // string literal accordingly. Thus, we try keep existing line breaks. return Right.IsMultiline && Right.NewlinesBefore > 0; if ((Right.Previous->is(tok::l_brace) || (Right.Previous->is(tok::less) && Right.Previous->Previous && Right.Previous->Previous->is(tok::equal))) && Right.NestingLevel == 1 && Style.Language == FormatStyle::LK_Proto) { // Don't put enums or option definitions onto single lines in protocol // buffers. return true; } if (Right.is(TT_InlineASMBrace)) return Right.HasUnescapedNewline; if (isAllmanBrace(Left) || isAllmanBrace(Right)) return (Line.startsWith(tok::kw_enum) && Style.BraceWrapping.AfterEnum) || (Line.startsWith(tok::kw_typedef, tok::kw_enum) && Style.BraceWrapping.AfterEnum) || (Line.startsWith(tok::kw_class) && Style.BraceWrapping.AfterClass) || (Line.startsWith(tok::kw_struct) && Style.BraceWrapping.AfterStruct); if (Left.is(TT_ObjCBlockLBrace) && Style.AllowShortBlocksOnASingleLine == FormatStyle::SBS_Never) return true; if (Left.is(TT_LambdaLBrace)) { if (IsFunctionArgument(Left) && Style.AllowShortLambdasOnASingleLine == FormatStyle::SLS_Inline) return false; if (Style.AllowShortLambdasOnASingleLine == FormatStyle::SLS_None || Style.AllowShortLambdasOnASingleLine == FormatStyle::SLS_Inline || (!Left.Children.empty() && Style.AllowShortLambdasOnASingleLine == FormatStyle::SLS_Empty)) return true; } if (Style.BraceWrapping.BeforeLambdaBody && Right.is(TT_LambdaLBrace) && Left.isOneOf(tok::star, tok::amp, tok::ampamp, TT_TemplateCloser)) { return true; } // Put multiple Java annotation on a new line. if ((Style.Language == FormatStyle::LK_Java || Style.Language == FormatStyle::LK_JavaScript) && Left.is(TT_LeadingJavaAnnotation) && Right.isNot(TT_LeadingJavaAnnotation) && Right.isNot(tok::l_paren) && (Line.Last->is(tok::l_brace) || Style.BreakAfterJavaFieldAnnotations)) return true; if (Right.is(TT_ProtoExtensionLSquare)) return true; // In text proto instances if a submessage contains at least 2 entries and at // least one of them is a submessage, like A { ... B { ... } ... }, // put all of the entries of A on separate lines by forcing the selector of // the submessage B to be put on a newline. // // Example: these can stay on one line: // a { scalar_1: 1 scalar_2: 2 } // a { b { key: value } } // // and these entries need to be on a new line even if putting them all in one // line is under the column limit: // a { // scalar: 1 // b { key: value } // } // // We enforce this by breaking before a submessage field that has previous // siblings, *and* breaking before a field that follows a submessage field. // // Be careful to exclude the case [proto.ext] { ... } since the `]` is // the TT_SelectorName there, but we don't want to break inside the brackets. // // Another edge case is @submessage { key: value }, which is a common // substitution placeholder. In this case we want to keep `@` and `submessage` // together. // // We ensure elsewhere that extensions are always on their own line. if ((Style.Language == FormatStyle::LK_Proto || Style.Language == FormatStyle::LK_TextProto) && Right.is(TT_SelectorName) && !Right.is(tok::r_square) && Right.Next) { // Keep `@submessage` together in: // @submessage { key: value } if (Right.Previous && Right.Previous->is(tok::at)) return false; // Look for the scope opener after selector in cases like: // selector { ... // selector: { ... // selector: @base { ... FormatToken *LBrace = Right.Next; if (LBrace && LBrace->is(tok::colon)) { LBrace = LBrace->Next; if (LBrace && LBrace->is(tok::at)) { LBrace = LBrace->Next; if (LBrace) LBrace = LBrace->Next; } } if (LBrace && // The scope opener is one of {, [, <: // selector { ... } // selector [ ... ] // selector < ... > // // In case of selector { ... }, the l_brace is TT_DictLiteral. // In case of an empty selector {}, the l_brace is not TT_DictLiteral, // so we check for immediately following r_brace. ((LBrace->is(tok::l_brace) && (LBrace->is(TT_DictLiteral) || (LBrace->Next && LBrace->Next->is(tok::r_brace)))) || LBrace->is(TT_ArrayInitializerLSquare) || LBrace->is(tok::less))) { // If Left.ParameterCount is 0, then this submessage entry is not the // first in its parent submessage, and we want to break before this entry. // If Left.ParameterCount is greater than 0, then its parent submessage // might contain 1 or more entries and we want to break before this entry // if it contains at least 2 entries. We deal with this case later by // detecting and breaking before the next entry in the parent submessage. if (Left.ParameterCount == 0) return true; // However, if this submessage is the first entry in its parent // submessage, Left.ParameterCount might be 1 in some cases. // We deal with this case later by detecting an entry // following a closing paren of this submessage. } // If this is an entry immediately following a submessage, it will be // preceded by a closing paren of that submessage, like in: // left---. .---right // v v // sub: { ... } key: value // If there was a comment between `}` an `key` above, then `key` would be // put on a new line anyways. if (Left.isOneOf(tok::r_brace, tok::greater, tok::r_square)) return true; } // Deal with lambda arguments in C++ - we want consistent line breaks whether // they happen to be at arg0, arg1 or argN. The selection is a bit nuanced // as aggressive line breaks are placed when the lambda is not the last arg. if ((Style.Language == FormatStyle::LK_Cpp || Style.Language == FormatStyle::LK_ObjC) && Left.is(tok::l_paren) && Left.BlockParameterCount > 0 && !Right.isOneOf(tok::l_paren, TT_LambdaLSquare)) { // Multiple lambdas in the same function call force line breaks. if (Left.BlockParameterCount > 1) return true; // A lambda followed by another arg forces a line break. if (!Left.Role) return false; auto Comma = Left.Role->lastComma(); if (!Comma) return false; auto Next = Comma->getNextNonComment(); if (!Next) return false; if (!Next->isOneOf(TT_LambdaLSquare, tok::l_brace, tok::caret)) return true; } return false; } bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line, const FormatToken &Right) { const FormatToken &Left = *Right.Previous; // Language-specific stuff. if (Style.isCSharp()) { if (Left.isOneOf(TT_CSharpNamedArgumentColon, TT_AttributeColon) || Right.isOneOf(TT_CSharpNamedArgumentColon, TT_AttributeColon)) return false; // Only break after commas for generic type constraints. if (Line.First->is(TT_CSharpGenericTypeConstraint)) return Left.is(TT_CSharpGenericTypeConstraintComma); // Keep nullable operators attached to their identifiers. if (Right.is(TT_CSharpNullable)) { return false; } } else if (Style.Language == FormatStyle::LK_Java) { if (Left.isOneOf(Keywords.kw_throws, Keywords.kw_extends, Keywords.kw_implements)) return false; if (Right.isOneOf(Keywords.kw_throws, Keywords.kw_extends, Keywords.kw_implements)) return true; } else if (Style.Language == FormatStyle::LK_JavaScript) { const FormatToken *NonComment = Right.getPreviousNonComment(); if (NonComment && NonComment->isOneOf( tok::kw_return, Keywords.kw_yield, tok::kw_continue, tok::kw_break, tok::kw_throw, Keywords.kw_interface, Keywords.kw_type, tok::kw_static, tok::kw_public, tok::kw_private, tok::kw_protected, Keywords.kw_readonly, Keywords.kw_abstract, Keywords.kw_get, Keywords.kw_set, Keywords.kw_async, Keywords.kw_await)) return false; // Otherwise automatic semicolon insertion would trigger. if (Right.NestingLevel == 0 && (Left.Tok.getIdentifierInfo() || Left.isOneOf(tok::r_square, tok::r_paren)) && Right.isOneOf(tok::l_square, tok::l_paren)) return false; // Otherwise automatic semicolon insertion would trigger. if (NonComment && NonComment->is(tok::identifier) && NonComment->TokenText == "asserts") return false; if (Left.is(TT_FatArrow) && Right.is(tok::l_brace)) return false; if (Left.is(TT_JsTypeColon)) return true; // Don't wrap between ":" and "!" of a strict prop init ("field!: type;"). if (Left.is(tok::exclaim) && Right.is(tok::colon)) return false; // Look for is type annotations like: // function f(): a is B { ... } // Do not break before is in these cases. if (Right.is(Keywords.kw_is)) { const FormatToken *Next = Right.getNextNonComment(); // If `is` is followed by a colon, it's likely that it's a dict key, so // ignore it for this check. // For example this is common in Polymer: // Polymer({ // is: 'name', // ... // }); if (!Next || !Next->is(tok::colon)) return false; } if (Left.is(Keywords.kw_in)) return Style.BreakBeforeBinaryOperators == FormatStyle::BOS_None; if (Right.is(Keywords.kw_in)) return Style.BreakBeforeBinaryOperators != FormatStyle::BOS_None; if (Right.is(Keywords.kw_as)) return false; // must not break before as in 'x as type' casts if (Right.isOneOf(Keywords.kw_extends, Keywords.kw_infer)) { // extends and infer can appear as keywords in conditional types: // https://www.typescriptlang.org/docs/handbook/release-notes/typescript-2-8.html#conditional-types // do not break before them, as the expressions are subject to ASI. return false; } if (Left.is(Keywords.kw_as)) return true; if (Left.is(TT_NonNullAssertion)) return true; if (Left.is(Keywords.kw_declare) && Right.isOneOf(Keywords.kw_module, tok::kw_namespace, Keywords.kw_function, tok::kw_class, tok::kw_enum, Keywords.kw_interface, Keywords.kw_type, Keywords.kw_var, Keywords.kw_let, tok::kw_const)) // See grammar for 'declare' statements at: // https://github.com/Microsoft/TypeScript/blob/master/doc/spec.md#A.10 return false; if (Left.isOneOf(Keywords.kw_module, tok::kw_namespace) && Right.isOneOf(tok::identifier, tok::string_literal)) return false; // must not break in "module foo { ...}" if (Right.is(TT_TemplateString) && Right.closesScope()) return false; // Don't split tagged template literal so there is a break between the tag // identifier and template string. if (Left.is(tok::identifier) && Right.is(TT_TemplateString)) { return false; } if (Left.is(TT_TemplateString) && Left.opensScope()) return true; } if (Left.is(tok::at)) return false; if (Left.Tok.getObjCKeywordID() == tok::objc_interface) return false; if (Left.isOneOf(TT_JavaAnnotation, TT_LeadingJavaAnnotation)) return !Right.is(tok::l_paren); if (Right.is(TT_PointerOrReference)) return Line.IsMultiVariableDeclStmt || (getTokenPointerOrReferenceAlignment(Right) == FormatStyle::PAS_Right && (!Right.Next || Right.Next->isNot(TT_FunctionDeclarationName))); if (Right.isOneOf(TT_StartOfName, TT_FunctionDeclarationName) || Right.is(tok::kw_operator)) return true; if (Left.is(TT_PointerOrReference)) return false; if (Right.isTrailingComment()) // We rely on MustBreakBefore being set correctly here as we should not // change the "binding" behavior of a comment. // The first comment in a braced lists is always interpreted as belonging to // the first list element. Otherwise, it should be placed outside of the // list. return Left.is(BK_BracedInit) || (Left.is(TT_CtorInitializerColon) && Style.BreakConstructorInitializers == FormatStyle::BCIS_AfterColon); if (Left.is(tok::question) && Right.is(tok::colon)) return false; if (Right.is(TT_ConditionalExpr) || Right.is(tok::question)) return Style.BreakBeforeTernaryOperators; if (Left.is(TT_ConditionalExpr) || Left.is(tok::question)) return !Style.BreakBeforeTernaryOperators; if (Left.is(TT_InheritanceColon)) return Style.BreakInheritanceList == FormatStyle::BILS_AfterColon; if (Right.is(TT_InheritanceColon)) return Style.BreakInheritanceList != FormatStyle::BILS_AfterColon; if (Right.is(TT_ObjCMethodExpr) && !Right.is(tok::r_square) && Left.isNot(TT_SelectorName)) return true; if (Right.is(tok::colon) && !Right.isOneOf(TT_CtorInitializerColon, TT_InlineASMColon)) return false; if (Left.is(tok::colon) && Left.isOneOf(TT_DictLiteral, TT_ObjCMethodExpr)) { if (Style.Language == FormatStyle::LK_Proto || Style.Language == FormatStyle::LK_TextProto) { if (!Style.AlwaysBreakBeforeMultilineStrings && Right.isStringLiteral()) return false; // Prevent cases like: // // submessage: // { key: valueeeeeeeeeeee } // // when the snippet does not fit into one line. // Prefer: // // submessage: { // key: valueeeeeeeeeeee // } // // instead, even if it is longer by one line. // // Note that this allows allows the "{" to go over the column limit // when the column limit is just between ":" and "{", but that does // not happen too often and alternative formattings in this case are // not much better. // // The code covers the cases: // // submessage: { ... } // submessage: < ... > // repeated: [ ... ] if (((Right.is(tok::l_brace) || Right.is(tok::less)) && Right.is(TT_DictLiteral)) || Right.is(TT_ArrayInitializerLSquare)) return false; } return true; } if (Right.is(tok::r_square) && Right.MatchingParen && Right.MatchingParen->is(TT_ProtoExtensionLSquare)) return false; if (Right.is(TT_SelectorName) || (Right.is(tok::identifier) && Right.Next && Right.Next->is(TT_ObjCMethodExpr))) return Left.isNot(tok::period); // FIXME: Properly parse ObjC calls. if (Left.is(tok::r_paren) && Line.Type == LT_ObjCProperty) return true; if (Left.ClosesTemplateDeclaration || Left.is(TT_FunctionAnnotationRParen)) return true; if (Right.isOneOf(TT_RangeBasedForLoopColon, TT_OverloadedOperatorLParen, TT_OverloadedOperator)) return false; if (Left.is(TT_RangeBasedForLoopColon)) return true; if (Right.is(TT_RangeBasedForLoopColon)) return false; if (Left.is(TT_TemplateCloser) && Right.is(TT_TemplateOpener)) return true; if (Left.isOneOf(TT_TemplateCloser, TT_UnaryOperator) || Left.is(tok::kw_operator)) return false; if (Left.is(tok::equal) && !Right.isOneOf(tok::kw_default, tok::kw_delete) && Line.Type == LT_VirtualFunctionDecl && Left.NestingLevel == 0) return false; if (Left.is(tok::equal) && Right.is(tok::l_brace) && !Style.Cpp11BracedListStyle) return false; if (Left.is(tok::l_paren) && Left.isOneOf(TT_AttributeParen, TT_TypeDeclarationParen)) return false; if (Left.is(tok::l_paren) && Left.Previous && (Left.Previous->isOneOf(TT_BinaryOperator, TT_CastRParen))) return false; if (Right.is(TT_ImplicitStringLiteral)) return false; if (Right.is(tok::r_paren) || Right.is(TT_TemplateCloser)) return false; if (Right.is(tok::r_square) && Right.MatchingParen && Right.MatchingParen->is(TT_LambdaLSquare)) return false; // We only break before r_brace if there was a corresponding break before // the l_brace, which is tracked by BreakBeforeClosingBrace. if (Right.is(tok::r_brace)) return Right.MatchingParen && Right.MatchingParen->is(BK_Block); // Allow breaking after a trailing annotation, e.g. after a method // declaration. if (Left.is(TT_TrailingAnnotation)) return !Right.isOneOf(tok::l_brace, tok::semi, tok::equal, tok::l_paren, tok::less, tok::coloncolon); if (Right.is(tok::kw___attribute) || (Right.is(tok::l_square) && Right.is(TT_AttributeSquare))) return !Left.is(TT_AttributeSquare); if (Left.is(tok::identifier) && Right.is(tok::string_literal)) return true; if (Right.is(tok::identifier) && Right.Next && Right.Next->is(TT_DictLiteral)) return true; if (Left.is(TT_CtorInitializerColon)) return Style.BreakConstructorInitializers == FormatStyle::BCIS_AfterColon; if (Right.is(TT_CtorInitializerColon)) return Style.BreakConstructorInitializers != FormatStyle::BCIS_AfterColon; if (Left.is(TT_CtorInitializerComma) && Style.BreakConstructorInitializers == FormatStyle::BCIS_BeforeComma) return false; if (Right.is(TT_CtorInitializerComma) && Style.BreakConstructorInitializers == FormatStyle::BCIS_BeforeComma) return true; if (Left.is(TT_InheritanceComma) && Style.BreakInheritanceList == FormatStyle::BILS_BeforeComma) return false; if (Right.is(TT_InheritanceComma) && Style.BreakInheritanceList == FormatStyle::BILS_BeforeComma) return true; if ((Left.is(tok::greater) && Right.is(tok::greater)) || (Left.is(tok::less) && Right.is(tok::less))) return false; if (Right.is(TT_BinaryOperator) && Style.BreakBeforeBinaryOperators != FormatStyle::BOS_None && (Style.BreakBeforeBinaryOperators == FormatStyle::BOS_All || Right.getPrecedence() != prec::Assignment)) return true; if (Left.is(TT_ArrayInitializerLSquare)) return true; if (Right.is(tok::kw_typename) && Left.isNot(tok::kw_const)) return true; if ((Left.isBinaryOperator() || Left.is(TT_BinaryOperator)) && !Left.isOneOf(tok::arrowstar, tok::lessless) && Style.BreakBeforeBinaryOperators != FormatStyle::BOS_All && (Style.BreakBeforeBinaryOperators == FormatStyle::BOS_None || Left.getPrecedence() == prec::Assignment)) return true; if ((Left.is(TT_AttributeSquare) && Right.is(tok::l_square)) || (Left.is(tok::r_square) && Right.is(TT_AttributeSquare))) return false; auto ShortLambdaOption = Style.AllowShortLambdasOnASingleLine; if (Style.BraceWrapping.BeforeLambdaBody && Right.is(TT_LambdaLBrace)) { if (isAllmanLambdaBrace(Left)) return !isItAnEmptyLambdaAllowed(Left, ShortLambdaOption); if (isAllmanLambdaBrace(Right)) return !isItAnEmptyLambdaAllowed(Right, ShortLambdaOption); } return Left.isOneOf(tok::comma, tok::coloncolon, tok::semi, tok::l_brace, tok::kw_class, tok::kw_struct, tok::comment) || Right.isMemberAccess() || Right.isOneOf(TT_TrailingReturnArrow, TT_LambdaArrow, tok::lessless, tok::colon, tok::l_square, tok::at) || (Left.is(tok::r_paren) && Right.isOneOf(tok::identifier, tok::kw_const)) || (Left.is(tok::l_paren) && !Right.is(tok::r_paren)) || (Left.is(TT_TemplateOpener) && !Right.is(TT_TemplateCloser)); } void TokenAnnotator::printDebugInfo(const AnnotatedLine &Line) { llvm::errs() << "AnnotatedTokens(L=" << Line.Level << "):\n"; const FormatToken *Tok = Line.First; while (Tok) { llvm::errs() << " M=" << Tok->MustBreakBefore << " C=" << Tok->CanBreakBefore << " T=" << getTokenTypeName(Tok->getType()) << " S=" << Tok->SpacesRequiredBefore << " F=" << Tok->Finalized << " B=" << Tok->BlockParameterCount << " BK=" << Tok->getBlockKind() << " P=" << Tok->SplitPenalty << " Name=" << Tok->Tok.getName() << " L=" << Tok->TotalLength << " PPK=" << Tok->getPackingKind() << " FakeLParens="; for (unsigned i = 0, e = Tok->FakeLParens.size(); i != e; ++i) llvm::errs() << Tok->FakeLParens[i] << "/"; llvm::errs() << " FakeRParens=" << Tok->FakeRParens; llvm::errs() << " II=" << Tok->Tok.getIdentifierInfo(); llvm::errs() << " Text='" << Tok->TokenText << "'\n"; if (!Tok->Next) assert(Tok == Line.Last); Tok = Tok->Next; } llvm::errs() << "----\n"; } FormatStyle::PointerAlignmentStyle TokenAnnotator::getTokenReferenceAlignment(const FormatToken &Reference) { assert(Reference.isOneOf(tok::amp, tok::ampamp)); switch (Style.ReferenceAlignment) { case FormatStyle::RAS_Pointer: return Style.PointerAlignment; case FormatStyle::RAS_Left: return FormatStyle::PAS_Left; case FormatStyle::RAS_Right: return FormatStyle::PAS_Right; case FormatStyle::RAS_Middle: return FormatStyle::PAS_Middle; } assert(0); //"Unhandled value of ReferenceAlignment" return Style.PointerAlignment; } FormatStyle::PointerAlignmentStyle TokenAnnotator::getTokenPointerOrReferenceAlignment( const FormatToken &PointerOrReference) { if (PointerOrReference.isOneOf(tok::amp, tok::ampamp)) { switch (Style.ReferenceAlignment) { case FormatStyle::RAS_Pointer: return Style.PointerAlignment; case FormatStyle::RAS_Left: return FormatStyle::PAS_Left; case FormatStyle::RAS_Right: return FormatStyle::PAS_Right; case FormatStyle::RAS_Middle: return FormatStyle::PAS_Middle; } } assert(PointerOrReference.is(tok::star)); return Style.PointerAlignment; } } // namespace format } // namespace clang diff --git a/clang/lib/Format/UnwrappedLineParser.cpp b/clang/lib/Format/UnwrappedLineParser.cpp index 103e3559b120..673986d16af2 100644 --- a/clang/lib/Format/UnwrappedLineParser.cpp +++ b/clang/lib/Format/UnwrappedLineParser.cpp @@ -1,3336 +1,3353 @@ //===--- UnwrappedLineParser.cpp - Format C++ code ------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// /// \file /// This file contains the implementation of the UnwrappedLineParser, /// which turns a stream of tokens into UnwrappedLines. /// //===----------------------------------------------------------------------===// #include "UnwrappedLineParser.h" #include "FormatToken.h" #include "llvm/ADT/STLExtras.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include #define DEBUG_TYPE "format-parser" namespace clang { namespace format { class FormatTokenSource { public: virtual ~FormatTokenSource() {} virtual FormatToken *getNextToken() = 0; virtual unsigned getPosition() = 0; virtual FormatToken *setPosition(unsigned Position) = 0; }; namespace { class ScopedDeclarationState { public: ScopedDeclarationState(UnwrappedLine &Line, std::vector &Stack, bool MustBeDeclaration) : Line(Line), Stack(Stack) { Line.MustBeDeclaration = MustBeDeclaration; Stack.push_back(MustBeDeclaration); } ~ScopedDeclarationState() { Stack.pop_back(); if (!Stack.empty()) Line.MustBeDeclaration = Stack.back(); else Line.MustBeDeclaration = true; } private: UnwrappedLine &Line; std::vector &Stack; }; static bool isLineComment(const FormatToken &FormatTok) { return FormatTok.is(tok::comment) && !FormatTok.TokenText.startswith("/*"); } // Checks if \p FormatTok is a line comment that continues the line comment // \p Previous. The original column of \p MinColumnToken is used to determine // whether \p FormatTok is indented enough to the right to continue \p Previous. static bool continuesLineComment(const FormatToken &FormatTok, const FormatToken *Previous, const FormatToken *MinColumnToken) { if (!Previous || !MinColumnToken) return false; unsigned MinContinueColumn = MinColumnToken->OriginalColumn + (isLineComment(*MinColumnToken) ? 0 : 1); return isLineComment(FormatTok) && FormatTok.NewlinesBefore == 1 && isLineComment(*Previous) && FormatTok.OriginalColumn >= MinContinueColumn; } class ScopedMacroState : public FormatTokenSource { public: ScopedMacroState(UnwrappedLine &Line, FormatTokenSource *&TokenSource, FormatToken *&ResetToken) : Line(Line), TokenSource(TokenSource), ResetToken(ResetToken), PreviousLineLevel(Line.Level), PreviousTokenSource(TokenSource), Token(nullptr), PreviousToken(nullptr) { FakeEOF.Tok.startToken(); FakeEOF.Tok.setKind(tok::eof); TokenSource = this; Line.Level = 0; Line.InPPDirective = true; } ~ScopedMacroState() override { TokenSource = PreviousTokenSource; ResetToken = Token; Line.InPPDirective = false; Line.Level = PreviousLineLevel; } FormatToken *getNextToken() override { // The \c UnwrappedLineParser guards against this by never calling // \c getNextToken() after it has encountered the first eof token. assert(!eof()); PreviousToken = Token; Token = PreviousTokenSource->getNextToken(); if (eof()) return &FakeEOF; return Token; } unsigned getPosition() override { return PreviousTokenSource->getPosition(); } FormatToken *setPosition(unsigned Position) override { PreviousToken = nullptr; Token = PreviousTokenSource->setPosition(Position); return Token; } private: bool eof() { return Token && Token->HasUnescapedNewline && !continuesLineComment(*Token, PreviousToken, /*MinColumnToken=*/PreviousToken); } FormatToken FakeEOF; UnwrappedLine &Line; FormatTokenSource *&TokenSource; FormatToken *&ResetToken; unsigned PreviousLineLevel; FormatTokenSource *PreviousTokenSource; FormatToken *Token; FormatToken *PreviousToken; }; } // end anonymous namespace class ScopedLineState { public: ScopedLineState(UnwrappedLineParser &Parser, bool SwitchToPreprocessorLines = false) : Parser(Parser), OriginalLines(Parser.CurrentLines) { if (SwitchToPreprocessorLines) Parser.CurrentLines = &Parser.PreprocessorDirectives; else if (!Parser.Line->Tokens.empty()) Parser.CurrentLines = &Parser.Line->Tokens.back().Children; PreBlockLine = std::move(Parser.Line); Parser.Line = std::make_unique(); Parser.Line->Level = PreBlockLine->Level; Parser.Line->InPPDirective = PreBlockLine->InPPDirective; } ~ScopedLineState() { if (!Parser.Line->Tokens.empty()) { Parser.addUnwrappedLine(); } assert(Parser.Line->Tokens.empty()); Parser.Line = std::move(PreBlockLine); if (Parser.CurrentLines == &Parser.PreprocessorDirectives) Parser.MustBreakBeforeNextToken = true; Parser.CurrentLines = OriginalLines; } private: UnwrappedLineParser &Parser; std::unique_ptr PreBlockLine; SmallVectorImpl *OriginalLines; }; class CompoundStatementIndenter { public: CompoundStatementIndenter(UnwrappedLineParser *Parser, const FormatStyle &Style, unsigned &LineLevel) : CompoundStatementIndenter(Parser, LineLevel, Style.BraceWrapping.AfterControlStatement, Style.BraceWrapping.IndentBraces) {} CompoundStatementIndenter(UnwrappedLineParser *Parser, unsigned &LineLevel, bool WrapBrace, bool IndentBrace) : LineLevel(LineLevel), OldLineLevel(LineLevel) { if (WrapBrace) Parser->addUnwrappedLine(); if (IndentBrace) ++LineLevel; } ~CompoundStatementIndenter() { LineLevel = OldLineLevel; } private: unsigned &LineLevel; unsigned OldLineLevel; }; namespace { class IndexedTokenSource : public FormatTokenSource { public: IndexedTokenSource(ArrayRef Tokens) : Tokens(Tokens), Position(-1) {} FormatToken *getNextToken() override { ++Position; return Tokens[Position]; } unsigned getPosition() override { assert(Position >= 0); return Position; } FormatToken *setPosition(unsigned P) override { Position = P; return Tokens[Position]; } void reset() { Position = -1; } private: ArrayRef Tokens; int Position; }; } // end anonymous namespace UnwrappedLineParser::UnwrappedLineParser(const FormatStyle &Style, const AdditionalKeywords &Keywords, unsigned FirstStartColumn, ArrayRef Tokens, UnwrappedLineConsumer &Callback) : Line(new UnwrappedLine), MustBreakBeforeNextToken(false), CurrentLines(&Lines), Style(Style), Keywords(Keywords), CommentPragmasRegex(Style.CommentPragmas), Tokens(nullptr), Callback(Callback), AllTokens(Tokens), PPBranchLevel(-1), IncludeGuard(Style.IndentPPDirectives == FormatStyle::PPDIS_None ? IG_Rejected : IG_Inited), IncludeGuardToken(nullptr), FirstStartColumn(FirstStartColumn) {} void UnwrappedLineParser::reset() { PPBranchLevel = -1; IncludeGuard = Style.IndentPPDirectives == FormatStyle::PPDIS_None ? IG_Rejected : IG_Inited; IncludeGuardToken = nullptr; Line.reset(new UnwrappedLine); CommentsBeforeNextToken.clear(); FormatTok = nullptr; MustBreakBeforeNextToken = false; PreprocessorDirectives.clear(); CurrentLines = &Lines; DeclarationScopeStack.clear(); PPStack.clear(); Line->FirstStartColumn = FirstStartColumn; } void UnwrappedLineParser::parse() { IndexedTokenSource TokenSource(AllTokens); Line->FirstStartColumn = FirstStartColumn; do { LLVM_DEBUG(llvm::dbgs() << "----\n"); reset(); Tokens = &TokenSource; TokenSource.reset(); readToken(); parseFile(); // If we found an include guard then all preprocessor directives (other than // the guard) are over-indented by one. if (IncludeGuard == IG_Found) for (auto &Line : Lines) if (Line.InPPDirective && Line.Level > 0) --Line.Level; // Create line with eof token. pushToken(FormatTok); addUnwrappedLine(); for (SmallVectorImpl::iterator I = Lines.begin(), E = Lines.end(); I != E; ++I) { Callback.consumeUnwrappedLine(*I); } Callback.finishRun(); Lines.clear(); while (!PPLevelBranchIndex.empty() && PPLevelBranchIndex.back() + 1 >= PPLevelBranchCount.back()) { PPLevelBranchIndex.resize(PPLevelBranchIndex.size() - 1); PPLevelBranchCount.resize(PPLevelBranchCount.size() - 1); } if (!PPLevelBranchIndex.empty()) { ++PPLevelBranchIndex.back(); assert(PPLevelBranchIndex.size() == PPLevelBranchCount.size()); assert(PPLevelBranchIndex.back() <= PPLevelBranchCount.back()); } } while (!PPLevelBranchIndex.empty()); } void UnwrappedLineParser::parseFile() { // The top-level context in a file always has declarations, except for pre- // processor directives and JavaScript files. bool MustBeDeclaration = !Line->InPPDirective && Style.Language != FormatStyle::LK_JavaScript; ScopedDeclarationState DeclarationState(*Line, DeclarationScopeStack, MustBeDeclaration); if (Style.Language == FormatStyle::LK_TextProto) parseBracedList(); else parseLevel(/*HasOpeningBrace=*/false); // Make sure to format the remaining tokens. // // LK_TextProto is special since its top-level is parsed as the body of a // braced list, which does not necessarily have natural line separators such // as a semicolon. Comments after the last entry that have been determined to // not belong to that line, as in: // key: value // // endfile comment // do not have a chance to be put on a line of their own until this point. // Here we add this newline before end-of-file comments. if (Style.Language == FormatStyle::LK_TextProto && !CommentsBeforeNextToken.empty()) addUnwrappedLine(); flushComments(true); addUnwrappedLine(); } void UnwrappedLineParser::parseCSharpGenericTypeConstraint() { do { switch (FormatTok->Tok.getKind()) { case tok::l_brace: return; default: if (FormatTok->is(Keywords.kw_where)) { addUnwrappedLine(); nextToken(); parseCSharpGenericTypeConstraint(); break; } nextToken(); break; } } while (!eof()); } void UnwrappedLineParser::parseCSharpAttribute() { int UnpairedSquareBrackets = 1; do { switch (FormatTok->Tok.getKind()) { case tok::r_square: nextToken(); --UnpairedSquareBrackets; if (UnpairedSquareBrackets == 0) { addUnwrappedLine(); return; } break; case tok::l_square: ++UnpairedSquareBrackets; nextToken(); break; default: nextToken(); break; } } while (!eof()); } void UnwrappedLineParser::parseLevel(bool HasOpeningBrace) { bool SwitchLabelEncountered = false; do { tok::TokenKind kind = FormatTok->Tok.getKind(); if (FormatTok->getType() == TT_MacroBlockBegin) { kind = tok::l_brace; } else if (FormatTok->getType() == TT_MacroBlockEnd) { kind = tok::r_brace; } switch (kind) { case tok::comment: nextToken(); addUnwrappedLine(); break; case tok::l_brace: // FIXME: Add parameter whether this can happen - if this happens, we must // be in a non-declaration context. if (!FormatTok->is(TT_MacroBlockBegin) && tryToParseBracedList()) continue; parseBlock(/*MustBeDeclaration=*/false); addUnwrappedLine(); break; case tok::r_brace: if (HasOpeningBrace) return; nextToken(); addUnwrappedLine(); break; case tok::kw_default: { unsigned StoredPosition = Tokens->getPosition(); FormatToken *Next; do { Next = Tokens->getNextToken(); } while (Next && Next->is(tok::comment)); FormatTok = Tokens->setPosition(StoredPosition); if (Next && Next->isNot(tok::colon)) { // default not followed by ':' is not a case label; treat it like // an identifier. parseStructuralElement(); break; } // Else, if it is 'default:', fall through to the case handling. LLVM_FALLTHROUGH; } case tok::kw_case: if (Style.Language == FormatStyle::LK_JavaScript && Line->MustBeDeclaration) { // A 'case: string' style field declaration. parseStructuralElement(); break; } if (!SwitchLabelEncountered && (Style.IndentCaseLabels || (Line->InPPDirective && Line->Level == 1))) ++Line->Level; SwitchLabelEncountered = true; parseStructuralElement(); break; case tok::l_square: if (Style.isCSharp()) { nextToken(); parseCSharpAttribute(); break; } LLVM_FALLTHROUGH; default: parseStructuralElement(/*IsTopLevel=*/true); break; } } while (!eof()); } void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) { // We'll parse forward through the tokens until we hit // a closing brace or eof - note that getNextToken() will // parse macros, so this will magically work inside macro // definitions, too. unsigned StoredPosition = Tokens->getPosition(); FormatToken *Tok = FormatTok; const FormatToken *PrevTok = Tok->Previous; // Keep a stack of positions of lbrace tokens. We will // update information about whether an lbrace starts a // braced init list or a different block during the loop. SmallVector LBraceStack; assert(Tok->Tok.is(tok::l_brace)); do { // Get next non-comment token. FormatToken *NextTok; unsigned ReadTokens = 0; do { NextTok = Tokens->getNextToken(); ++ReadTokens; } while (NextTok->is(tok::comment)); switch (Tok->Tok.getKind()) { case tok::l_brace: if (Style.Language == FormatStyle::LK_JavaScript && PrevTok) { if (PrevTok->isOneOf(tok::colon, tok::less)) // A ':' indicates this code is in a type, or a braced list // following a label in an object literal ({a: {b: 1}}). // A '<' could be an object used in a comparison, but that is nonsense // code (can never return true), so more likely it is a generic type // argument (`X<{a: string; b: number}>`). // The code below could be confused by semicolons between the // individual members in a type member list, which would normally // trigger BK_Block. In both cases, this must be parsed as an inline // braced init. Tok->setBlockKind(BK_BracedInit); else if (PrevTok->is(tok::r_paren)) // `) { }` can only occur in function or method declarations in JS. Tok->setBlockKind(BK_Block); } else { Tok->setBlockKind(BK_Unknown); } LBraceStack.push_back(Tok); break; case tok::r_brace: if (LBraceStack.empty()) break; if (LBraceStack.back()->is(BK_Unknown)) { bool ProbablyBracedList = false; if (Style.Language == FormatStyle::LK_Proto) { ProbablyBracedList = NextTok->isOneOf(tok::comma, tok::r_square); } else { // Using OriginalColumn to distinguish between ObjC methods and // binary operators is a bit hacky. bool NextIsObjCMethod = NextTok->isOneOf(tok::plus, tok::minus) && NextTok->OriginalColumn == 0; // If there is a comma, semicolon or right paren after the closing // brace, we assume this is a braced initializer list. Note that // regardless how we mark inner braces here, we will overwrite the // BlockKind later if we parse a braced list (where all blocks // inside are by default braced lists), or when we explicitly detect // blocks (for example while parsing lambdas). // FIXME: Some of these do not apply to JS, e.g. "} {" can never be a // braced list in JS. ProbablyBracedList = (Style.Language == FormatStyle::LK_JavaScript && NextTok->isOneOf(Keywords.kw_of, Keywords.kw_in, Keywords.kw_as)) || (Style.isCpp() && NextTok->is(tok::l_paren)) || NextTok->isOneOf(tok::comma, tok::period, tok::colon, tok::r_paren, tok::r_square, tok::l_brace, tok::ellipsis) || (NextTok->is(tok::identifier) && !PrevTok->isOneOf(tok::semi, tok::r_brace, tok::l_brace)) || (NextTok->is(tok::semi) && (!ExpectClassBody || LBraceStack.size() != 1)) || (NextTok->isBinaryOperator() && !NextIsObjCMethod); if (!Style.isCSharp() && NextTok->is(tok::l_square)) { // We can have an array subscript after a braced init // list, but C++11 attributes are expected after blocks. NextTok = Tokens->getNextToken(); ++ReadTokens; ProbablyBracedList = NextTok->isNot(tok::l_square); } } if (ProbablyBracedList) { Tok->setBlockKind(BK_BracedInit); LBraceStack.back()->setBlockKind(BK_BracedInit); } else { Tok->setBlockKind(BK_Block); LBraceStack.back()->setBlockKind(BK_Block); } } LBraceStack.pop_back(); break; case tok::identifier: if (!Tok->is(TT_StatementMacro)) break; LLVM_FALLTHROUGH; case tok::at: case tok::semi: case tok::kw_if: case tok::kw_while: case tok::kw_for: case tok::kw_switch: case tok::kw_try: case tok::kw___try: if (!LBraceStack.empty() && LBraceStack.back()->is(BK_Unknown)) LBraceStack.back()->setBlockKind(BK_Block); break; default: break; } PrevTok = Tok; Tok = NextTok; } while (Tok->Tok.isNot(tok::eof) && !LBraceStack.empty()); // Assume other blocks for all unclosed opening braces. for (unsigned i = 0, e = LBraceStack.size(); i != e; ++i) { if (LBraceStack[i]->is(BK_Unknown)) LBraceStack[i]->setBlockKind(BK_Block); } FormatTok = Tokens->setPosition(StoredPosition); } template static inline void hash_combine(std::size_t &seed, const T &v) { std::hash hasher; seed ^= hasher(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2); } size_t UnwrappedLineParser::computePPHash() const { size_t h = 0; for (const auto &i : PPStack) { hash_combine(h, size_t(i.Kind)); hash_combine(h, i.Line); } return h; } void UnwrappedLineParser::parseBlock(bool MustBeDeclaration, unsigned AddLevels, bool MunchSemi, bool UnindentWhitesmithsBraces) { assert(FormatTok->isOneOf(tok::l_brace, TT_MacroBlockBegin) && "'{' or macro block token expected"); const bool MacroBlock = FormatTok->is(TT_MacroBlockBegin); FormatTok->setBlockKind(BK_Block); // For Whitesmiths mode, jump to the next level prior to skipping over the // braces. if (AddLevels > 0 && Style.BreakBeforeBraces == FormatStyle::BS_Whitesmiths) ++Line->Level; size_t PPStartHash = computePPHash(); unsigned InitialLevel = Line->Level; nextToken(/*LevelDifference=*/AddLevels); if (MacroBlock && FormatTok->is(tok::l_paren)) parseParens(); size_t NbPreprocessorDirectives = CurrentLines == &Lines ? PreprocessorDirectives.size() : 0; addUnwrappedLine(); size_t OpeningLineIndex = CurrentLines->empty() ? (UnwrappedLine::kInvalidIndex) : (CurrentLines->size() - 1 - NbPreprocessorDirectives); // Whitesmiths is weird here. The brace needs to be indented for the namespace // block, but the block itself may not be indented depending on the style // settings. This allows the format to back up one level in those cases. if (UnindentWhitesmithsBraces) --Line->Level; ScopedDeclarationState DeclarationState(*Line, DeclarationScopeStack, MustBeDeclaration); if (AddLevels > 0u && Style.BreakBeforeBraces != FormatStyle::BS_Whitesmiths) Line->Level += AddLevels; parseLevel(/*HasOpeningBrace=*/true); if (eof()) return; if (MacroBlock ? !FormatTok->is(TT_MacroBlockEnd) : !FormatTok->is(tok::r_brace)) { Line->Level = InitialLevel; FormatTok->setBlockKind(BK_Block); return; } size_t PPEndHash = computePPHash(); // Munch the closing brace. nextToken(/*LevelDifference=*/-AddLevels); if (MacroBlock && FormatTok->is(tok::l_paren)) parseParens(); if (FormatTok->is(tok::arrow)) { // Following the } we can find a trailing return type arrow // as part of an implicit conversion constraint. nextToken(); parseStructuralElement(); } if (MunchSemi && FormatTok->Tok.is(tok::semi)) nextToken(); Line->Level = InitialLevel; if (PPStartHash == PPEndHash) { Line->MatchingOpeningBlockLineIndex = OpeningLineIndex; if (OpeningLineIndex != UnwrappedLine::kInvalidIndex) { // Update the opening line to add the forward reference as well (*CurrentLines)[OpeningLineIndex].MatchingClosingBlockLineIndex = CurrentLines->size() - 1; } } } static bool isGoogScope(const UnwrappedLine &Line) { // FIXME: Closure-library specific stuff should not be hard-coded but be // configurable. if (Line.Tokens.size() < 4) return false; auto I = Line.Tokens.begin(); if (I->Tok->TokenText != "goog") return false; ++I; if (I->Tok->isNot(tok::period)) return false; ++I; if (I->Tok->TokenText != "scope") return false; ++I; return I->Tok->is(tok::l_paren); } static bool isIIFE(const UnwrappedLine &Line, const AdditionalKeywords &Keywords) { // Look for the start of an immediately invoked anonymous function. // https://en.wikipedia.org/wiki/Immediately-invoked_function_expression // This is commonly done in JavaScript to create a new, anonymous scope. // Example: (function() { ... })() if (Line.Tokens.size() < 3) return false; auto I = Line.Tokens.begin(); if (I->Tok->isNot(tok::l_paren)) return false; ++I; if (I->Tok->isNot(Keywords.kw_function)) return false; ++I; return I->Tok->is(tok::l_paren); } static bool ShouldBreakBeforeBrace(const FormatStyle &Style, const FormatToken &InitialToken) { if (InitialToken.isOneOf(tok::kw_namespace, TT_NamespaceMacro)) return Style.BraceWrapping.AfterNamespace; if (InitialToken.is(tok::kw_class)) return Style.BraceWrapping.AfterClass; if (InitialToken.is(tok::kw_union)) return Style.BraceWrapping.AfterUnion; if (InitialToken.is(tok::kw_struct)) return Style.BraceWrapping.AfterStruct; return false; } void UnwrappedLineParser::parseChildBlock() { FormatTok->setBlockKind(BK_Block); nextToken(); { bool SkipIndent = (Style.Language == FormatStyle::LK_JavaScript && (isGoogScope(*Line) || isIIFE(*Line, Keywords))); ScopedLineState LineState(*this); ScopedDeclarationState DeclarationState(*Line, DeclarationScopeStack, /*MustBeDeclaration=*/false); Line->Level += SkipIndent ? 0 : 1; parseLevel(/*HasOpeningBrace=*/true); flushComments(isOnNewLine(*FormatTok)); Line->Level -= SkipIndent ? 0 : 1; } nextToken(); } void UnwrappedLineParser::parsePPDirective() { assert(FormatTok->Tok.is(tok::hash) && "'#' expected"); ScopedMacroState MacroState(*Line, Tokens, FormatTok); nextToken(); if (!FormatTok->Tok.getIdentifierInfo()) { parsePPUnknown(); return; } switch (FormatTok->Tok.getIdentifierInfo()->getPPKeywordID()) { case tok::pp_define: parsePPDefine(); return; case tok::pp_if: parsePPIf(/*IfDef=*/false); break; case tok::pp_ifdef: case tok::pp_ifndef: parsePPIf(/*IfDef=*/true); break; case tok::pp_else: parsePPElse(); break; case tok::pp_elifdef: case tok::pp_elifndef: case tok::pp_elif: parsePPElIf(); break; case tok::pp_endif: parsePPEndIf(); break; default: parsePPUnknown(); break; } } void UnwrappedLineParser::conditionalCompilationCondition(bool Unreachable) { size_t Line = CurrentLines->size(); if (CurrentLines == &PreprocessorDirectives) Line += Lines.size(); if (Unreachable || (!PPStack.empty() && PPStack.back().Kind == PP_Unreachable)) PPStack.push_back({PP_Unreachable, Line}); else PPStack.push_back({PP_Conditional, Line}); } void UnwrappedLineParser::conditionalCompilationStart(bool Unreachable) { ++PPBranchLevel; assert(PPBranchLevel >= 0 && PPBranchLevel <= (int)PPLevelBranchIndex.size()); if (PPBranchLevel == (int)PPLevelBranchIndex.size()) { PPLevelBranchIndex.push_back(0); PPLevelBranchCount.push_back(0); } PPChainBranchIndex.push(0); bool Skip = PPLevelBranchIndex[PPBranchLevel] > 0; conditionalCompilationCondition(Unreachable || Skip); } void UnwrappedLineParser::conditionalCompilationAlternative() { if (!PPStack.empty()) PPStack.pop_back(); assert(PPBranchLevel < (int)PPLevelBranchIndex.size()); if (!PPChainBranchIndex.empty()) ++PPChainBranchIndex.top(); conditionalCompilationCondition( PPBranchLevel >= 0 && !PPChainBranchIndex.empty() && PPLevelBranchIndex[PPBranchLevel] != PPChainBranchIndex.top()); } void UnwrappedLineParser::conditionalCompilationEnd() { assert(PPBranchLevel < (int)PPLevelBranchIndex.size()); if (PPBranchLevel >= 0 && !PPChainBranchIndex.empty()) { if (PPChainBranchIndex.top() + 1 > PPLevelBranchCount[PPBranchLevel]) { PPLevelBranchCount[PPBranchLevel] = PPChainBranchIndex.top() + 1; } } // Guard against #endif's without #if. if (PPBranchLevel > -1) --PPBranchLevel; if (!PPChainBranchIndex.empty()) PPChainBranchIndex.pop(); if (!PPStack.empty()) PPStack.pop_back(); } void UnwrappedLineParser::parsePPIf(bool IfDef) { bool IfNDef = FormatTok->is(tok::pp_ifndef); nextToken(); bool Unreachable = false; if (!IfDef && (FormatTok->is(tok::kw_false) || FormatTok->TokenText == "0")) Unreachable = true; if (IfDef && !IfNDef && FormatTok->TokenText == "SWIG") Unreachable = true; conditionalCompilationStart(Unreachable); FormatToken *IfCondition = FormatTok; // If there's a #ifndef on the first line, and the only lines before it are // comments, it could be an include guard. bool MaybeIncludeGuard = IfNDef; if (IncludeGuard == IG_Inited && MaybeIncludeGuard) for (auto &Line : Lines) { if (!Line.Tokens.front().Tok->is(tok::comment)) { MaybeIncludeGuard = false; IncludeGuard = IG_Rejected; break; } } --PPBranchLevel; parsePPUnknown(); ++PPBranchLevel; if (IncludeGuard == IG_Inited && MaybeIncludeGuard) { IncludeGuard = IG_IfNdefed; IncludeGuardToken = IfCondition; } } void UnwrappedLineParser::parsePPElse() { // If a potential include guard has an #else, it's not an include guard. if (IncludeGuard == IG_Defined && PPBranchLevel == 0) IncludeGuard = IG_Rejected; conditionalCompilationAlternative(); if (PPBranchLevel > -1) --PPBranchLevel; parsePPUnknown(); ++PPBranchLevel; } void UnwrappedLineParser::parsePPElIf() { parsePPElse(); } void UnwrappedLineParser::parsePPEndIf() { conditionalCompilationEnd(); parsePPUnknown(); // If the #endif of a potential include guard is the last thing in the file, // then we found an include guard. unsigned TokenPosition = Tokens->getPosition(); FormatToken *PeekNext = AllTokens[TokenPosition]; if (IncludeGuard == IG_Defined && PPBranchLevel == -1 && PeekNext->is(tok::eof) && Style.IndentPPDirectives != FormatStyle::PPDIS_None) IncludeGuard = IG_Found; } void UnwrappedLineParser::parsePPDefine() { nextToken(); if (!FormatTok->Tok.getIdentifierInfo()) { IncludeGuard = IG_Rejected; IncludeGuardToken = nullptr; parsePPUnknown(); return; } if (IncludeGuard == IG_IfNdefed && IncludeGuardToken->TokenText == FormatTok->TokenText) { IncludeGuard = IG_Defined; IncludeGuardToken = nullptr; for (auto &Line : Lines) { if (!Line.Tokens.front().Tok->isOneOf(tok::comment, tok::hash)) { IncludeGuard = IG_Rejected; break; } } } nextToken(); if (FormatTok->Tok.getKind() == tok::l_paren && FormatTok->WhitespaceRange.getBegin() == FormatTok->WhitespaceRange.getEnd()) { parseParens(); } if (Style.IndentPPDirectives != FormatStyle::PPDIS_None) Line->Level += PPBranchLevel + 1; addUnwrappedLine(); ++Line->Level; // Errors during a preprocessor directive can only affect the layout of the // preprocessor directive, and thus we ignore them. An alternative approach // would be to use the same approach we use on the file level (no // re-indentation if there was a structural error) within the macro // definition. parseFile(); } void UnwrappedLineParser::parsePPUnknown() { do { nextToken(); } while (!eof()); if (Style.IndentPPDirectives != FormatStyle::PPDIS_None) Line->Level += PPBranchLevel + 1; addUnwrappedLine(); } // Here we exclude certain tokens that are not usually the first token in an // unwrapped line. This is used in attempt to distinguish macro calls without // trailing semicolons from other constructs split to several lines. static bool tokenCanStartNewLine(const FormatToken &Tok) { // Semicolon can be a null-statement, l_square can be a start of a macro or // a C++11 attribute, but this doesn't seem to be common. return Tok.isNot(tok::semi) && Tok.isNot(tok::l_brace) && Tok.isNot(TT_AttributeSquare) && // Tokens that can only be used as binary operators and a part of // overloaded operator names. Tok.isNot(tok::period) && Tok.isNot(tok::periodstar) && Tok.isNot(tok::arrow) && Tok.isNot(tok::arrowstar) && Tok.isNot(tok::less) && Tok.isNot(tok::greater) && Tok.isNot(tok::slash) && Tok.isNot(tok::percent) && Tok.isNot(tok::lessless) && Tok.isNot(tok::greatergreater) && Tok.isNot(tok::equal) && Tok.isNot(tok::plusequal) && Tok.isNot(tok::minusequal) && Tok.isNot(tok::starequal) && Tok.isNot(tok::slashequal) && Tok.isNot(tok::percentequal) && Tok.isNot(tok::ampequal) && Tok.isNot(tok::pipeequal) && Tok.isNot(tok::caretequal) && Tok.isNot(tok::greatergreaterequal) && Tok.isNot(tok::lesslessequal) && // Colon is used in labels, base class lists, initializer lists, // range-based for loops, ternary operator, but should never be the // first token in an unwrapped line. Tok.isNot(tok::colon) && // 'noexcept' is a trailing annotation. Tok.isNot(tok::kw_noexcept); } static bool mustBeJSIdent(const AdditionalKeywords &Keywords, const FormatToken *FormatTok) { // FIXME: This returns true for C/C++ keywords like 'struct'. return FormatTok->is(tok::identifier) && (FormatTok->Tok.getIdentifierInfo() == nullptr || !FormatTok->isOneOf( Keywords.kw_in, Keywords.kw_of, Keywords.kw_as, Keywords.kw_async, Keywords.kw_await, Keywords.kw_yield, Keywords.kw_finally, Keywords.kw_function, Keywords.kw_import, Keywords.kw_is, Keywords.kw_let, Keywords.kw_var, tok::kw_const, Keywords.kw_abstract, Keywords.kw_extends, Keywords.kw_implements, Keywords.kw_instanceof, Keywords.kw_interface, Keywords.kw_throws, Keywords.kw_from)); } static bool mustBeJSIdentOrValue(const AdditionalKeywords &Keywords, const FormatToken *FormatTok) { return FormatTok->Tok.isLiteral() || FormatTok->isOneOf(tok::kw_true, tok::kw_false) || mustBeJSIdent(Keywords, FormatTok); } // isJSDeclOrStmt returns true if |FormatTok| starts a declaration or statement // when encountered after a value (see mustBeJSIdentOrValue). static bool isJSDeclOrStmt(const AdditionalKeywords &Keywords, const FormatToken *FormatTok) { return FormatTok->isOneOf( tok::kw_return, Keywords.kw_yield, // conditionals tok::kw_if, tok::kw_else, // loops tok::kw_for, tok::kw_while, tok::kw_do, tok::kw_continue, tok::kw_break, // switch/case tok::kw_switch, tok::kw_case, // exceptions tok::kw_throw, tok::kw_try, tok::kw_catch, Keywords.kw_finally, // declaration tok::kw_const, tok::kw_class, Keywords.kw_var, Keywords.kw_let, Keywords.kw_async, Keywords.kw_function, // import/export Keywords.kw_import, tok::kw_export); } +// Checks whether a token is a type in K&R C (aka C78). +static bool isC78Type(const FormatToken &Tok) { + return Tok.isOneOf(tok::kw_char, tok::kw_short, tok::kw_int, tok::kw_long, + tok::kw_unsigned, tok::kw_float, tok::kw_double, + tok::identifier); +} + // This function checks whether a token starts the first parameter declaration // in a K&R C (aka C78) function definition, e.g.: // int f(a, b) // short a, b; // { // return a + b; // } -static bool isC78ParameterDecl(const FormatToken *Tok) { - if (!Tok) +static bool isC78ParameterDecl(const FormatToken *Tok, const FormatToken *Next, + const FormatToken *FuncName) { + assert(Tok); + assert(Next); + assert(FuncName); + + if (FuncName->isNot(tok::identifier)) return false; - if (!Tok->isOneOf(tok::kw_int, tok::kw_char, tok::kw_float, tok::kw_double, - tok::kw_struct, tok::kw_union, tok::kw_long, tok::kw_short, - tok::kw_unsigned, tok::kw_register, tok::identifier)) + const FormatToken *Prev = FuncName->Previous; + if (!Prev || (Prev->isNot(tok::star) && !isC78Type(*Prev))) + return false; + + if (!isC78Type(*Tok) && + !Tok->isOneOf(tok::kw_register, tok::kw_struct, tok::kw_union)) + return false; + + if (Next->isNot(tok::star) && !Next->Tok.getIdentifierInfo()) return false; Tok = Tok->Previous; if (!Tok || Tok->isNot(tok::r_paren)) return false; Tok = Tok->Previous; if (!Tok || Tok->isNot(tok::identifier)) return false; return Tok->Previous && Tok->Previous->isOneOf(tok::l_paren, tok::comma); } // readTokenWithJavaScriptASI reads the next token and terminates the current // line if JavaScript Automatic Semicolon Insertion must // happen between the current token and the next token. // // This method is conservative - it cannot cover all edge cases of JavaScript, // but only aims to correctly handle certain well known cases. It *must not* // return true in speculative cases. void UnwrappedLineParser::readTokenWithJavaScriptASI() { FormatToken *Previous = FormatTok; readToken(); FormatToken *Next = FormatTok; bool IsOnSameLine = CommentsBeforeNextToken.empty() ? Next->NewlinesBefore == 0 : CommentsBeforeNextToken.front()->NewlinesBefore == 0; if (IsOnSameLine) return; bool PreviousMustBeValue = mustBeJSIdentOrValue(Keywords, Previous); bool PreviousStartsTemplateExpr = Previous->is(TT_TemplateString) && Previous->TokenText.endswith("${"); if (PreviousMustBeValue || Previous->is(tok::r_paren)) { // If the line contains an '@' sign, the previous token might be an // annotation, which can precede another identifier/value. bool HasAt = std::find_if(Line->Tokens.begin(), Line->Tokens.end(), [](UnwrappedLineNode &LineNode) { return LineNode.Tok->is(tok::at); }) != Line->Tokens.end(); if (HasAt) return; } if (Next->is(tok::exclaim) && PreviousMustBeValue) return addUnwrappedLine(); bool NextMustBeValue = mustBeJSIdentOrValue(Keywords, Next); bool NextEndsTemplateExpr = Next->is(TT_TemplateString) && Next->TokenText.startswith("}"); if (NextMustBeValue && !NextEndsTemplateExpr && !PreviousStartsTemplateExpr && (PreviousMustBeValue || Previous->isOneOf(tok::r_square, tok::r_paren, tok::plusplus, tok::minusminus))) return addUnwrappedLine(); if ((PreviousMustBeValue || Previous->is(tok::r_paren)) && isJSDeclOrStmt(Keywords, Next)) return addUnwrappedLine(); } void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) { assert(!FormatTok->is(tok::l_brace)); if (Style.Language == FormatStyle::LK_TableGen && FormatTok->is(tok::pp_include)) { nextToken(); if (FormatTok->is(tok::string_literal)) nextToken(); addUnwrappedLine(); return; } switch (FormatTok->Tok.getKind()) { case tok::kw_asm: nextToken(); if (FormatTok->is(tok::l_brace)) { FormatTok->setType(TT_InlineASMBrace); nextToken(); while (FormatTok && FormatTok->isNot(tok::eof)) { if (FormatTok->is(tok::r_brace)) { FormatTok->setType(TT_InlineASMBrace); nextToken(); addUnwrappedLine(); break; } FormatTok->Finalized = true; nextToken(); } } break; case tok::kw_namespace: parseNamespace(); return; case tok::kw_public: case tok::kw_protected: case tok::kw_private: if (Style.Language == FormatStyle::LK_Java || Style.Language == FormatStyle::LK_JavaScript || Style.isCSharp()) nextToken(); else parseAccessSpecifier(); return; case tok::kw_if: if (Style.Language == FormatStyle::LK_JavaScript && Line->MustBeDeclaration) // field/method declaration. break; parseIfThenElse(); return; case tok::kw_for: case tok::kw_while: if (Style.Language == FormatStyle::LK_JavaScript && Line->MustBeDeclaration) // field/method declaration. break; parseForOrWhileLoop(); return; case tok::kw_do: if (Style.Language == FormatStyle::LK_JavaScript && Line->MustBeDeclaration) // field/method declaration. break; parseDoWhile(); return; case tok::kw_switch: if (Style.Language == FormatStyle::LK_JavaScript && Line->MustBeDeclaration) // 'switch: string' field declaration. break; parseSwitch(); return; case tok::kw_default: if (Style.Language == FormatStyle::LK_JavaScript && Line->MustBeDeclaration) // 'default: string' field declaration. break; nextToken(); if (FormatTok->is(tok::colon)) { parseLabel(); return; } // e.g. "default void f() {}" in a Java interface. break; case tok::kw_case: if (Style.Language == FormatStyle::LK_JavaScript && Line->MustBeDeclaration) // 'case: string' field declaration. break; parseCaseLabel(); return; case tok::kw_try: case tok::kw___try: if (Style.Language == FormatStyle::LK_JavaScript && Line->MustBeDeclaration) // field/method declaration. break; parseTryCatch(); return; case tok::kw_extern: nextToken(); if (FormatTok->Tok.is(tok::string_literal)) { nextToken(); if (FormatTok->Tok.is(tok::l_brace)) { if (!Style.IndentExternBlock) { if (Style.BraceWrapping.AfterExternBlock) { addUnwrappedLine(); } unsigned AddLevels = Style.BraceWrapping.AfterExternBlock ? 1u : 0u; parseBlock(/*MustBeDeclaration=*/true, AddLevels); } else { unsigned AddLevels = Style.IndentExternBlock == FormatStyle::IEBS_Indent ? 1u : 0u; parseBlock(/*MustBeDeclaration=*/true, AddLevels); } addUnwrappedLine(); return; } } break; case tok::kw_export: if (Style.Language == FormatStyle::LK_JavaScript) { parseJavaScriptEs6ImportExport(); return; } if (!Style.isCpp()) break; // Handle C++ "(inline|export) namespace". LLVM_FALLTHROUGH; case tok::kw_inline: nextToken(); if (FormatTok->Tok.is(tok::kw_namespace)) { parseNamespace(); return; } break; case tok::identifier: if (FormatTok->is(TT_ForEachMacro)) { parseForOrWhileLoop(); return; } if (FormatTok->is(TT_MacroBlockBegin)) { parseBlock(/*MustBeDeclaration=*/false, /*AddLevels=*/1u, /*MunchSemi=*/false); return; } if (FormatTok->is(Keywords.kw_import)) { if (Style.Language == FormatStyle::LK_JavaScript) { parseJavaScriptEs6ImportExport(); return; } if (Style.Language == FormatStyle::LK_Proto) { nextToken(); if (FormatTok->is(tok::kw_public)) nextToken(); if (!FormatTok->is(tok::string_literal)) return; nextToken(); if (FormatTok->is(tok::semi)) nextToken(); addUnwrappedLine(); return; } } if (Style.isCpp() && FormatTok->isOneOf(Keywords.kw_signals, Keywords.kw_qsignals, Keywords.kw_slots, Keywords.kw_qslots)) { nextToken(); if (FormatTok->is(tok::colon)) { nextToken(); addUnwrappedLine(); return; } } if (Style.isCpp() && FormatTok->is(TT_StatementMacro)) { parseStatementMacro(); return; } if (Style.isCpp() && FormatTok->is(TT_NamespaceMacro)) { parseNamespace(); return; } // In all other cases, parse the declaration. break; default: break; } do { const FormatToken *Previous = FormatTok->Previous; switch (FormatTok->Tok.getKind()) { case tok::at: nextToken(); if (FormatTok->Tok.is(tok::l_brace)) { nextToken(); parseBracedList(); break; } else if (Style.Language == FormatStyle::LK_Java && FormatTok->is(Keywords.kw_interface)) { nextToken(); break; } switch (FormatTok->Tok.getObjCKeywordID()) { case tok::objc_public: case tok::objc_protected: case tok::objc_package: case tok::objc_private: return parseAccessSpecifier(); case tok::objc_interface: case tok::objc_implementation: return parseObjCInterfaceOrImplementation(); case tok::objc_protocol: if (parseObjCProtocol()) return; break; case tok::objc_end: return; // Handled by the caller. case tok::objc_optional: case tok::objc_required: nextToken(); addUnwrappedLine(); return; case tok::objc_autoreleasepool: nextToken(); if (FormatTok->Tok.is(tok::l_brace)) { if (Style.BraceWrapping.AfterControlStatement == FormatStyle::BWACS_Always) addUnwrappedLine(); parseBlock(/*MustBeDeclaration=*/false); } addUnwrappedLine(); return; case tok::objc_synchronized: nextToken(); if (FormatTok->Tok.is(tok::l_paren)) // Skip synchronization object parseParens(); if (FormatTok->Tok.is(tok::l_brace)) { if (Style.BraceWrapping.AfterControlStatement == FormatStyle::BWACS_Always) addUnwrappedLine(); parseBlock(/*MustBeDeclaration=*/false); } addUnwrappedLine(); return; case tok::objc_try: // This branch isn't strictly necessary (the kw_try case below would // do this too after the tok::at is parsed above). But be explicit. parseTryCatch(); return; default: break; } break; case tok::kw_concept: parseConcept(); break; case tok::kw_requires: parseRequires(); break; case tok::kw_enum: // Ignore if this is part of "template is(tok::less)) { nextToken(); break; } // parseEnum falls through and does not yet add an unwrapped line as an // enum definition can start a structural element. if (!parseEnum()) break; // This only applies for C++. if (!Style.isCpp()) { addUnwrappedLine(); return; } break; case tok::kw_typedef: nextToken(); if (FormatTok->isOneOf(Keywords.kw_NS_ENUM, Keywords.kw_NS_OPTIONS, Keywords.kw_CF_ENUM, Keywords.kw_CF_OPTIONS, Keywords.kw_CF_CLOSED_ENUM, Keywords.kw_NS_CLOSED_ENUM)) parseEnum(); break; case tok::kw_struct: case tok::kw_union: case tok::kw_class: if (parseStructLike()) { return; } break; case tok::period: nextToken(); // In Java, classes have an implicit static member "class". if (Style.Language == FormatStyle::LK_Java && FormatTok && FormatTok->is(tok::kw_class)) nextToken(); if (Style.Language == FormatStyle::LK_JavaScript && FormatTok && FormatTok->Tok.getIdentifierInfo()) // JavaScript only has pseudo keywords, all keywords are allowed to // appear in "IdentifierName" positions. See http://es5.github.io/#x7.6 nextToken(); break; case tok::semi: nextToken(); addUnwrappedLine(); return; case tok::r_brace: addUnwrappedLine(); return; - case tok::l_paren: + case tok::l_paren: { parseParens(); // Break the unwrapped line if a K&R C function definition has a parameter // declaration. - if (!IsTopLevel || !Style.isCpp()) - break; - if (!Previous || Previous->isNot(tok::identifier)) + if (!IsTopLevel || !Style.isCpp() || !Previous || FormatTok->is(tok::eof)) break; - if (Previous->Previous && Previous->Previous->is(tok::at)) - break; - if (isC78ParameterDecl(FormatTok)) { + const unsigned Position = Tokens->getPosition() + 1; + assert(Position < AllTokens.size()); + if (isC78ParameterDecl(FormatTok, AllTokens[Position], Previous)) { addUnwrappedLine(); return; } break; + } case tok::kw_operator: nextToken(); if (FormatTok->isBinaryOperator()) nextToken(); break; case tok::caret: nextToken(); if (FormatTok->Tok.isAnyIdentifier() || FormatTok->isSimpleTypeSpecifier()) nextToken(); if (FormatTok->is(tok::l_paren)) parseParens(); if (FormatTok->is(tok::l_brace)) parseChildBlock(); break; case tok::l_brace: if (!tryToParsePropertyAccessor() && !tryToParseBracedList()) { // A block outside of parentheses must be the last part of a // structural element. // FIXME: Figure out cases where this is not true, and add projections // for them (the one we know is missing are lambdas). if (Style.BraceWrapping.AfterFunction) addUnwrappedLine(); FormatTok->setType(TT_FunctionLBrace); parseBlock(/*MustBeDeclaration=*/false); addUnwrappedLine(); return; } // Otherwise this was a braced init list, and the structural // element continues. break; case tok::kw_try: if (Style.Language == FormatStyle::LK_JavaScript && Line->MustBeDeclaration) { // field/method declaration. nextToken(); break; } // We arrive here when parsing function-try blocks. if (Style.BraceWrapping.AfterFunction) addUnwrappedLine(); parseTryCatch(); return; case tok::identifier: { if (Style.isCSharp() && FormatTok->is(Keywords.kw_where) && Line->MustBeDeclaration) { addUnwrappedLine(); parseCSharpGenericTypeConstraint(); break; } if (FormatTok->is(TT_MacroBlockEnd)) { addUnwrappedLine(); return; } // Function declarations (as opposed to function expressions) are parsed // on their own unwrapped line by continuing this loop. Function // expressions (functions that are not on their own line) must not create // a new unwrapped line, so they are special cased below. size_t TokenCount = Line->Tokens.size(); if (Style.Language == FormatStyle::LK_JavaScript && FormatTok->is(Keywords.kw_function) && (TokenCount > 1 || (TokenCount == 1 && !Line->Tokens.front().Tok->is( Keywords.kw_async)))) { tryToParseJSFunction(); break; } if ((Style.Language == FormatStyle::LK_JavaScript || Style.Language == FormatStyle::LK_Java) && FormatTok->is(Keywords.kw_interface)) { if (Style.Language == FormatStyle::LK_JavaScript) { // In JavaScript/TypeScript, "interface" can be used as a standalone // identifier, e.g. in `var interface = 1;`. If "interface" is // followed by another identifier, it is very like to be an actual // interface declaration. unsigned StoredPosition = Tokens->getPosition(); FormatToken *Next = Tokens->getNextToken(); FormatTok = Tokens->setPosition(StoredPosition); if (Next && !mustBeJSIdent(Keywords, Next)) { nextToken(); break; } } parseRecord(); addUnwrappedLine(); return; } if (FormatTok->is(Keywords.kw_interface)) { if (parseStructLike()) { return; } break; } if (Style.isCpp() && FormatTok->is(TT_StatementMacro)) { parseStatementMacro(); return; } // See if the following token should start a new unwrapped line. StringRef Text = FormatTok->TokenText; nextToken(); // JS doesn't have macros, and within classes colons indicate fields, not // labels. if (Style.Language == FormatStyle::LK_JavaScript) break; TokenCount = Line->Tokens.size(); if (TokenCount == 1 || (TokenCount == 2 && Line->Tokens.front().Tok->is(tok::comment))) { if (FormatTok->Tok.is(tok::colon) && !Line->MustBeDeclaration) { Line->Tokens.begin()->Tok->MustBreakBefore = true; parseLabel(!Style.IndentGotoLabels); return; } // Recognize function-like macro usages without trailing semicolon as // well as free-standing macros like Q_OBJECT. bool FunctionLike = FormatTok->is(tok::l_paren); if (FunctionLike) parseParens(); bool FollowedByNewline = CommentsBeforeNextToken.empty() ? FormatTok->NewlinesBefore > 0 : CommentsBeforeNextToken.front()->NewlinesBefore > 0; if (FollowedByNewline && (Text.size() >= 5 || FunctionLike) && tokenCanStartNewLine(*FormatTok) && Text == Text.upper()) { addUnwrappedLine(); return; } } break; } case tok::equal: // Fat arrows (=>) have tok::TokenKind tok::equal but TokenType // TT_FatArrow. They always start an expression or a child block if // followed by a curly brace. if (FormatTok->is(TT_FatArrow)) { nextToken(); if (FormatTok->is(tok::l_brace)) { // C# may break after => if the next character is a newline. if (Style.isCSharp() && Style.BraceWrapping.AfterFunction == true) { // calling `addUnwrappedLine()` here causes odd parsing errors. FormatTok->MustBreakBefore = true; } parseChildBlock(); } break; } nextToken(); if (FormatTok->Tok.is(tok::l_brace)) { // Block kind should probably be set to BK_BracedInit for any language. // C# needs this change to ensure that array initialisers and object // initialisers are indented the same way. if (Style.isCSharp()) FormatTok->setBlockKind(BK_BracedInit); nextToken(); parseBracedList(); } else if (Style.Language == FormatStyle::LK_Proto && FormatTok->Tok.is(tok::less)) { nextToken(); parseBracedList(/*ContinueOnSemicolons=*/false, /*IsEnum=*/false, /*ClosingBraceKind=*/tok::greater); } break; case tok::l_square: parseSquare(); break; case tok::kw_new: parseNew(); break; default: nextToken(); break; } } while (!eof()); } bool UnwrappedLineParser::tryToParsePropertyAccessor() { assert(FormatTok->is(tok::l_brace)); if (!Style.isCSharp()) return false; // See if it's a property accessor. if (FormatTok->Previous->isNot(tok::identifier)) return false; // See if we are inside a property accessor. // // Record the current tokenPosition so that we can advance and // reset the current token. `Next` is not set yet so we need // another way to advance along the token stream. unsigned int StoredPosition = Tokens->getPosition(); FormatToken *Tok = Tokens->getNextToken(); // A trivial property accessor is of the form: // { [ACCESS_SPECIFIER] [get]; [ACCESS_SPECIFIER] [set] } // Track these as they do not require line breaks to be introduced. bool HasGetOrSet = false; bool IsTrivialPropertyAccessor = true; while (!eof()) { if (Tok->isOneOf(tok::semi, tok::kw_public, tok::kw_private, tok::kw_protected, Keywords.kw_internal, Keywords.kw_get, Keywords.kw_set)) { if (Tok->isOneOf(Keywords.kw_get, Keywords.kw_set)) HasGetOrSet = true; Tok = Tokens->getNextToken(); continue; } if (Tok->isNot(tok::r_brace)) IsTrivialPropertyAccessor = false; break; } if (!HasGetOrSet) { Tokens->setPosition(StoredPosition); return false; } // Try to parse the property accessor: // https://docs.microsoft.com/en-us/dotnet/csharp/programming-guide/classes-and-structs/properties Tokens->setPosition(StoredPosition); if (!IsTrivialPropertyAccessor && Style.BraceWrapping.AfterFunction == true) addUnwrappedLine(); nextToken(); do { switch (FormatTok->Tok.getKind()) { case tok::r_brace: nextToken(); if (FormatTok->is(tok::equal)) { while (!eof() && FormatTok->isNot(tok::semi)) nextToken(); nextToken(); } addUnwrappedLine(); return true; case tok::l_brace: ++Line->Level; parseBlock(/*MustBeDeclaration=*/true); addUnwrappedLine(); --Line->Level; break; case tok::equal: if (FormatTok->is(TT_FatArrow)) { ++Line->Level; do { nextToken(); } while (!eof() && FormatTok->isNot(tok::semi)); nextToken(); addUnwrappedLine(); --Line->Level; break; } nextToken(); break; default: if (FormatTok->isOneOf(Keywords.kw_get, Keywords.kw_set) && !IsTrivialPropertyAccessor) { // Non-trivial get/set needs to be on its own line. addUnwrappedLine(); } nextToken(); } } while (!eof()); // Unreachable for well-formed code (paired '{' and '}'). return true; } bool UnwrappedLineParser::tryToParseLambda() { if (!Style.isCpp()) { nextToken(); return false; } assert(FormatTok->is(tok::l_square)); FormatToken &LSquare = *FormatTok; if (!tryToParseLambdaIntroducer()) return false; bool SeenArrow = false; while (FormatTok->isNot(tok::l_brace)) { if (FormatTok->isSimpleTypeSpecifier()) { nextToken(); continue; } switch (FormatTok->Tok.getKind()) { case tok::l_brace: break; case tok::l_paren: parseParens(); break; case tok::amp: case tok::star: case tok::kw_const: case tok::comma: case tok::less: case tok::greater: case tok::identifier: case tok::numeric_constant: case tok::coloncolon: case tok::kw_class: case tok::kw_mutable: case tok::kw_noexcept: case tok::kw_template: case tok::kw_typename: nextToken(); break; // Specialization of a template with an integer parameter can contain // arithmetic, logical, comparison and ternary operators. // // FIXME: This also accepts sequences of operators that are not in the scope // of a template argument list. // // In a C++ lambda a template type can only occur after an arrow. We use // this as an heuristic to distinguish between Objective-C expressions // followed by an `a->b` expression, such as: // ([obj func:arg] + a->b) // Otherwise the code below would parse as a lambda. // // FIXME: This heuristic is incorrect for C++20 generic lambdas with // explicit template lists: [](U &&u){} case tok::plus: case tok::minus: case tok::exclaim: case tok::tilde: case tok::slash: case tok::percent: case tok::lessless: case tok::pipe: case tok::pipepipe: case tok::ampamp: case tok::caret: case tok::equalequal: case tok::exclaimequal: case tok::greaterequal: case tok::lessequal: case tok::question: case tok::colon: case tok::ellipsis: case tok::kw_true: case tok::kw_false: if (SeenArrow) { nextToken(); break; } return true; case tok::arrow: // This might or might not actually be a lambda arrow (this could be an // ObjC method invocation followed by a dereferencing arrow). We might // reset this back to TT_Unknown in TokenAnnotator. FormatTok->setType(TT_LambdaArrow); SeenArrow = true; nextToken(); break; default: return true; } } FormatTok->setType(TT_LambdaLBrace); LSquare.setType(TT_LambdaLSquare); parseChildBlock(); return true; } bool UnwrappedLineParser::tryToParseLambdaIntroducer() { const FormatToken *Previous = FormatTok->Previous; if (Previous && (Previous->isOneOf(tok::identifier, tok::kw_operator, tok::kw_new, tok::kw_delete, tok::l_square) || FormatTok->isCppStructuredBinding(Style) || Previous->closesScope() || Previous->isSimpleTypeSpecifier())) { nextToken(); return false; } nextToken(); if (FormatTok->is(tok::l_square)) { return false; } parseSquare(/*LambdaIntroducer=*/true); return true; } void UnwrappedLineParser::tryToParseJSFunction() { assert(FormatTok->is(Keywords.kw_function) || FormatTok->startsSequence(Keywords.kw_async, Keywords.kw_function)); if (FormatTok->is(Keywords.kw_async)) nextToken(); // Consume "function". nextToken(); // Consume * (generator function). Treat it like C++'s overloaded operators. if (FormatTok->is(tok::star)) { FormatTok->setType(TT_OverloadedOperator); nextToken(); } // Consume function name. if (FormatTok->is(tok::identifier)) nextToken(); if (FormatTok->isNot(tok::l_paren)) return; // Parse formal parameter list. parseParens(); if (FormatTok->is(tok::colon)) { // Parse a type definition. nextToken(); // Eat the type declaration. For braced inline object types, balance braces, // otherwise just parse until finding an l_brace for the function body. if (FormatTok->is(tok::l_brace)) tryToParseBracedList(); else while (!FormatTok->isOneOf(tok::l_brace, tok::semi) && !eof()) nextToken(); } if (FormatTok->is(tok::semi)) return; parseChildBlock(); } bool UnwrappedLineParser::tryToParseBracedList() { if (FormatTok->is(BK_Unknown)) calculateBraceTypes(); assert(FormatTok->isNot(BK_Unknown)); if (FormatTok->is(BK_Block)) return false; nextToken(); parseBracedList(); return true; } bool UnwrappedLineParser::parseBracedList(bool ContinueOnSemicolons, bool IsEnum, tok::TokenKind ClosingBraceKind) { bool HasError = false; // FIXME: Once we have an expression parser in the UnwrappedLineParser, // replace this by using parseAssignmentExpression() inside. do { if (Style.isCSharp()) { // Fat arrows (=>) have tok::TokenKind tok::equal but TokenType // TT_FatArrow. They always start an expression or a child block if // followed by a curly brace. if (FormatTok->is(TT_FatArrow)) { nextToken(); if (FormatTok->is(tok::l_brace)) { // C# may break after => if the next character is a newline. if (Style.isCSharp() && Style.BraceWrapping.AfterFunction == true) { // calling `addUnwrappedLine()` here causes odd parsing errors. FormatTok->MustBreakBefore = true; } parseChildBlock(); continue; } } } if (Style.Language == FormatStyle::LK_JavaScript) { if (FormatTok->is(Keywords.kw_function) || FormatTok->startsSequence(Keywords.kw_async, Keywords.kw_function)) { tryToParseJSFunction(); continue; } if (FormatTok->is(TT_FatArrow)) { nextToken(); // Fat arrows can be followed by simple expressions or by child blocks // in curly braces. if (FormatTok->is(tok::l_brace)) { parseChildBlock(); continue; } } if (FormatTok->is(tok::l_brace)) { // Could be a method inside of a braced list `{a() { return 1; }}`. if (tryToParseBracedList()) continue; parseChildBlock(); } } if (FormatTok->Tok.getKind() == ClosingBraceKind) { if (IsEnum && !Style.AllowShortEnumsOnASingleLine) addUnwrappedLine(); nextToken(); return !HasError; } switch (FormatTok->Tok.getKind()) { case tok::caret: nextToken(); if (FormatTok->is(tok::l_brace)) { parseChildBlock(); } break; case tok::l_square: if (Style.isCSharp()) parseSquare(); else tryToParseLambda(); break; case tok::l_paren: parseParens(); // JavaScript can just have free standing methods and getters/setters in // object literals. Detect them by a "{" following ")". if (Style.Language == FormatStyle::LK_JavaScript) { if (FormatTok->is(tok::l_brace)) parseChildBlock(); break; } break; case tok::l_brace: // Assume there are no blocks inside a braced init list apart // from the ones we explicitly parse out (like lambdas). FormatTok->setBlockKind(BK_BracedInit); nextToken(); parseBracedList(); break; case tok::less: if (Style.Language == FormatStyle::LK_Proto) { nextToken(); parseBracedList(/*ContinueOnSemicolons=*/false, /*IsEnum=*/false, /*ClosingBraceKind=*/tok::greater); } else { nextToken(); } break; case tok::semi: // JavaScript (or more precisely TypeScript) can have semicolons in braced // lists (in so-called TypeMemberLists). Thus, the semicolon cannot be // used for error recovery if we have otherwise determined that this is // a braced list. if (Style.Language == FormatStyle::LK_JavaScript) { nextToken(); break; } HasError = true; if (!ContinueOnSemicolons) return !HasError; nextToken(); break; case tok::comma: nextToken(); if (IsEnum && !Style.AllowShortEnumsOnASingleLine) addUnwrappedLine(); break; default: nextToken(); break; } } while (!eof()); return false; } void UnwrappedLineParser::parseParens() { assert(FormatTok->Tok.is(tok::l_paren) && "'(' expected."); nextToken(); do { switch (FormatTok->Tok.getKind()) { case tok::l_paren: parseParens(); if (Style.Language == FormatStyle::LK_Java && FormatTok->is(tok::l_brace)) parseChildBlock(); break; case tok::r_paren: nextToken(); return; case tok::r_brace: // A "}" inside parenthesis is an error if there wasn't a matching "{". return; case tok::l_square: tryToParseLambda(); break; case tok::l_brace: if (!tryToParseBracedList()) parseChildBlock(); break; case tok::at: nextToken(); if (FormatTok->Tok.is(tok::l_brace)) { nextToken(); parseBracedList(); } break; case tok::equal: if (Style.isCSharp() && FormatTok->is(TT_FatArrow)) parseStructuralElement(); else nextToken(); break; case tok::kw_class: if (Style.Language == FormatStyle::LK_JavaScript) parseRecord(/*ParseAsExpr=*/true); else nextToken(); break; case tok::identifier: if (Style.Language == FormatStyle::LK_JavaScript && (FormatTok->is(Keywords.kw_function) || FormatTok->startsSequence(Keywords.kw_async, Keywords.kw_function))) tryToParseJSFunction(); else nextToken(); break; default: nextToken(); break; } } while (!eof()); } void UnwrappedLineParser::parseSquare(bool LambdaIntroducer) { if (!LambdaIntroducer) { assert(FormatTok->Tok.is(tok::l_square) && "'[' expected."); if (tryToParseLambda()) return; } do { switch (FormatTok->Tok.getKind()) { case tok::l_paren: parseParens(); break; case tok::r_square: nextToken(); return; case tok::r_brace: // A "}" inside parenthesis is an error if there wasn't a matching "{". return; case tok::l_square: parseSquare(); break; case tok::l_brace: { if (!tryToParseBracedList()) parseChildBlock(); break; } case tok::at: nextToken(); if (FormatTok->Tok.is(tok::l_brace)) { nextToken(); parseBracedList(); } break; default: nextToken(); break; } } while (!eof()); } void UnwrappedLineParser::parseIfThenElse() { assert(FormatTok->Tok.is(tok::kw_if) && "'if' expected"); nextToken(); if (FormatTok->Tok.isOneOf(tok::kw_constexpr, tok::identifier)) nextToken(); if (FormatTok->Tok.is(tok::l_paren)) parseParens(); // handle [[likely]] / [[unlikely]] if (FormatTok->is(tok::l_square) && tryToParseSimpleAttribute()) parseSquare(); bool NeedsUnwrappedLine = false; if (FormatTok->Tok.is(tok::l_brace)) { CompoundStatementIndenter Indenter(this, Style, Line->Level); parseBlock(/*MustBeDeclaration=*/false); if (Style.BraceWrapping.BeforeElse) addUnwrappedLine(); else NeedsUnwrappedLine = true; } else { addUnwrappedLine(); ++Line->Level; parseStructuralElement(); --Line->Level; } if (FormatTok->Tok.is(tok::kw_else)) { nextToken(); // handle [[likely]] / [[unlikely]] if (FormatTok->Tok.is(tok::l_square) && tryToParseSimpleAttribute()) parseSquare(); if (FormatTok->Tok.is(tok::l_brace)) { CompoundStatementIndenter Indenter(this, Style, Line->Level); parseBlock(/*MustBeDeclaration=*/false); addUnwrappedLine(); } else if (FormatTok->Tok.is(tok::kw_if)) { FormatToken *Previous = AllTokens[Tokens->getPosition() - 1]; bool PrecededByComment = Previous->is(tok::comment); if (PrecededByComment) { addUnwrappedLine(); ++Line->Level; } parseIfThenElse(); if (PrecededByComment) --Line->Level; } else { addUnwrappedLine(); ++Line->Level; parseStructuralElement(); if (FormatTok->is(tok::eof)) addUnwrappedLine(); --Line->Level; } } else if (NeedsUnwrappedLine) { addUnwrappedLine(); } } void UnwrappedLineParser::parseTryCatch() { assert(FormatTok->isOneOf(tok::kw_try, tok::kw___try) && "'try' expected"); nextToken(); bool NeedsUnwrappedLine = false; if (FormatTok->is(tok::colon)) { // We are in a function try block, what comes is an initializer list. nextToken(); // In case identifiers were removed by clang-tidy, what might follow is // multiple commas in sequence - before the first identifier. while (FormatTok->is(tok::comma)) nextToken(); while (FormatTok->is(tok::identifier)) { nextToken(); if (FormatTok->is(tok::l_paren)) parseParens(); if (FormatTok->Previous && FormatTok->Previous->is(tok::identifier) && FormatTok->is(tok::l_brace)) { do { nextToken(); } while (!FormatTok->is(tok::r_brace)); nextToken(); } // In case identifiers were removed by clang-tidy, what might follow is // multiple commas in sequence - after the first identifier. while (FormatTok->is(tok::comma)) nextToken(); } } // Parse try with resource. if (Style.Language == FormatStyle::LK_Java && FormatTok->is(tok::l_paren)) { parseParens(); } if (FormatTok->is(tok::l_brace)) { CompoundStatementIndenter Indenter(this, Style, Line->Level); parseBlock(/*MustBeDeclaration=*/false); if (Style.BraceWrapping.BeforeCatch) { addUnwrappedLine(); } else { NeedsUnwrappedLine = true; } } else if (!FormatTok->is(tok::kw_catch)) { // The C++ standard requires a compound-statement after a try. // If there's none, we try to assume there's a structuralElement // and try to continue. addUnwrappedLine(); ++Line->Level; parseStructuralElement(); --Line->Level; } while (1) { if (FormatTok->is(tok::at)) nextToken(); if (!(FormatTok->isOneOf(tok::kw_catch, Keywords.kw___except, tok::kw___finally) || ((Style.Language == FormatStyle::LK_Java || Style.Language == FormatStyle::LK_JavaScript) && FormatTok->is(Keywords.kw_finally)) || (FormatTok->Tok.isObjCAtKeyword(tok::objc_catch) || FormatTok->Tok.isObjCAtKeyword(tok::objc_finally)))) break; nextToken(); while (FormatTok->isNot(tok::l_brace)) { if (FormatTok->is(tok::l_paren)) { parseParens(); continue; } if (FormatTok->isOneOf(tok::semi, tok::r_brace, tok::eof)) return; nextToken(); } NeedsUnwrappedLine = false; CompoundStatementIndenter Indenter(this, Style, Line->Level); parseBlock(/*MustBeDeclaration=*/false); if (Style.BraceWrapping.BeforeCatch) addUnwrappedLine(); else NeedsUnwrappedLine = true; } if (NeedsUnwrappedLine) addUnwrappedLine(); } void UnwrappedLineParser::parseNamespace() { assert(FormatTok->isOneOf(tok::kw_namespace, TT_NamespaceMacro) && "'namespace' expected"); const FormatToken &InitialToken = *FormatTok; nextToken(); if (InitialToken.is(TT_NamespaceMacro)) { parseParens(); } else { while (FormatTok->isOneOf(tok::identifier, tok::coloncolon, tok::kw_inline, tok::l_square)) { if (FormatTok->is(tok::l_square)) parseSquare(); else nextToken(); } } if (FormatTok->Tok.is(tok::l_brace)) { if (ShouldBreakBeforeBrace(Style, InitialToken)) addUnwrappedLine(); unsigned AddLevels = Style.NamespaceIndentation == FormatStyle::NI_All || (Style.NamespaceIndentation == FormatStyle::NI_Inner && DeclarationScopeStack.size() > 1) ? 1u : 0u; bool ManageWhitesmithsBraces = AddLevels == 0u && Style.BreakBeforeBraces == FormatStyle::BS_Whitesmiths; // If we're in Whitesmiths mode, indent the brace if we're not indenting // the whole block. if (ManageWhitesmithsBraces) ++Line->Level; parseBlock(/*MustBeDeclaration=*/true, AddLevels, /*MunchSemi=*/true, /*UnindentWhitesmithsBraces=*/ManageWhitesmithsBraces); // Munch the semicolon after a namespace. This is more common than one would // think. Putting the semicolon into its own line is very ugly. if (FormatTok->Tok.is(tok::semi)) nextToken(); addUnwrappedLine(AddLevels > 0 ? LineLevel::Remove : LineLevel::Keep); if (ManageWhitesmithsBraces) --Line->Level; } // FIXME: Add error handling. } void UnwrappedLineParser::parseNew() { assert(FormatTok->is(tok::kw_new) && "'new' expected"); nextToken(); if (Style.isCSharp()) { do { if (FormatTok->is(tok::l_brace)) parseBracedList(); if (FormatTok->isOneOf(tok::semi, tok::comma)) return; nextToken(); } while (!eof()); } if (Style.Language != FormatStyle::LK_Java) return; // In Java, we can parse everything up to the parens, which aren't optional. do { // There should not be a ;, { or } before the new's open paren. if (FormatTok->isOneOf(tok::semi, tok::l_brace, tok::r_brace)) return; // Consume the parens. if (FormatTok->is(tok::l_paren)) { parseParens(); // If there is a class body of an anonymous class, consume that as child. if (FormatTok->is(tok::l_brace)) parseChildBlock(); return; } nextToken(); } while (!eof()); } void UnwrappedLineParser::parseForOrWhileLoop() { assert(FormatTok->isOneOf(tok::kw_for, tok::kw_while, TT_ForEachMacro) && "'for', 'while' or foreach macro expected"); nextToken(); // JS' for await ( ... if (Style.Language == FormatStyle::LK_JavaScript && FormatTok->is(Keywords.kw_await)) nextToken(); if (FormatTok->Tok.is(tok::l_paren)) parseParens(); if (FormatTok->Tok.is(tok::l_brace)) { CompoundStatementIndenter Indenter(this, Style, Line->Level); parseBlock(/*MustBeDeclaration=*/false); addUnwrappedLine(); } else { addUnwrappedLine(); ++Line->Level; parseStructuralElement(); --Line->Level; } } void UnwrappedLineParser::parseDoWhile() { assert(FormatTok->Tok.is(tok::kw_do) && "'do' expected"); nextToken(); if (FormatTok->Tok.is(tok::l_brace)) { CompoundStatementIndenter Indenter(this, Style, Line->Level); parseBlock(/*MustBeDeclaration=*/false); if (Style.BraceWrapping.BeforeWhile) addUnwrappedLine(); } else { addUnwrappedLine(); ++Line->Level; parseStructuralElement(); --Line->Level; } // FIXME: Add error handling. if (!FormatTok->Tok.is(tok::kw_while)) { addUnwrappedLine(); return; } // If in Whitesmiths mode, the line with the while() needs to be indented // to the same level as the block. if (Style.BreakBeforeBraces == FormatStyle::BS_Whitesmiths) ++Line->Level; nextToken(); parseStructuralElement(); } void UnwrappedLineParser::parseLabel(bool LeftAlignLabel) { nextToken(); unsigned OldLineLevel = Line->Level; if (Line->Level > 1 || (!Line->InPPDirective && Line->Level > 0)) --Line->Level; if (LeftAlignLabel) Line->Level = 0; if (!Style.IndentCaseBlocks && CommentsBeforeNextToken.empty() && FormatTok->Tok.is(tok::l_brace)) { CompoundStatementIndenter Indenter(this, Line->Level, Style.BraceWrapping.AfterCaseLabel, Style.BraceWrapping.IndentBraces); parseBlock(/*MustBeDeclaration=*/false); if (FormatTok->Tok.is(tok::kw_break)) { if (Style.BraceWrapping.AfterControlStatement == FormatStyle::BWACS_Always) { addUnwrappedLine(); if (!Style.IndentCaseBlocks && Style.BreakBeforeBraces == FormatStyle::BS_Whitesmiths) { Line->Level++; } } parseStructuralElement(); } addUnwrappedLine(); } else { if (FormatTok->is(tok::semi)) nextToken(); addUnwrappedLine(); } Line->Level = OldLineLevel; if (FormatTok->isNot(tok::l_brace)) { parseStructuralElement(); addUnwrappedLine(); } } void UnwrappedLineParser::parseCaseLabel() { assert(FormatTok->Tok.is(tok::kw_case) && "'case' expected"); // FIXME: fix handling of complex expressions here. do { nextToken(); } while (!eof() && !FormatTok->Tok.is(tok::colon)); parseLabel(); } void UnwrappedLineParser::parseSwitch() { assert(FormatTok->Tok.is(tok::kw_switch) && "'switch' expected"); nextToken(); if (FormatTok->Tok.is(tok::l_paren)) parseParens(); if (FormatTok->Tok.is(tok::l_brace)) { CompoundStatementIndenter Indenter(this, Style, Line->Level); parseBlock(/*MustBeDeclaration=*/false); addUnwrappedLine(); } else { addUnwrappedLine(); ++Line->Level; parseStructuralElement(); --Line->Level; } } void UnwrappedLineParser::parseAccessSpecifier() { nextToken(); // Understand Qt's slots. if (FormatTok->isOneOf(Keywords.kw_slots, Keywords.kw_qslots)) nextToken(); // Otherwise, we don't know what it is, and we'd better keep the next token. if (FormatTok->Tok.is(tok::colon)) nextToken(); addUnwrappedLine(); } void UnwrappedLineParser::parseConcept() { assert(FormatTok->Tok.is(tok::kw_concept) && "'concept' expected"); nextToken(); if (!FormatTok->Tok.is(tok::identifier)) return; nextToken(); if (!FormatTok->Tok.is(tok::equal)) return; nextToken(); if (FormatTok->Tok.is(tok::kw_requires)) { nextToken(); parseRequiresExpression(Line->Level); } else { parseConstraintExpression(Line->Level); } } void UnwrappedLineParser::parseRequiresExpression(unsigned int OriginalLevel) { // requires (R range) if (FormatTok->Tok.is(tok::l_paren)) { parseParens(); if (Style.IndentRequires && OriginalLevel != Line->Level) { addUnwrappedLine(); --Line->Level; } } if (FormatTok->Tok.is(tok::l_brace)) { if (Style.BraceWrapping.AfterFunction) addUnwrappedLine(); FormatTok->setType(TT_FunctionLBrace); parseBlock(/*MustBeDeclaration=*/false); addUnwrappedLine(); } else { parseConstraintExpression(OriginalLevel); } } void UnwrappedLineParser::parseConstraintExpression( unsigned int OriginalLevel) { // requires Id && Id || Id while ( FormatTok->isOneOf(tok::identifier, tok::kw_requires, tok::coloncolon)) { nextToken(); while (FormatTok->isOneOf(tok::identifier, tok::coloncolon, tok::less, tok::greater, tok::comma, tok::ellipsis)) { if (FormatTok->Tok.is(tok::less)) { parseBracedList(/*ContinueOnSemicolons=*/false, /*IsEnum=*/false, /*ClosingBraceKind=*/tok::greater); continue; } nextToken(); } if (FormatTok->Tok.is(tok::kw_requires)) { parseRequiresExpression(OriginalLevel); } if (FormatTok->Tok.is(tok::less)) { parseBracedList(/*ContinueOnSemicolons=*/false, /*IsEnum=*/false, /*ClosingBraceKind=*/tok::greater); } if (FormatTok->Tok.is(tok::l_paren)) { parseParens(); } if (FormatTok->Tok.is(tok::l_brace)) { if (Style.BraceWrapping.AfterFunction) addUnwrappedLine(); FormatTok->setType(TT_FunctionLBrace); parseBlock(/*MustBeDeclaration=*/false); } if (FormatTok->Tok.is(tok::semi)) { // Eat any trailing semi. nextToken(); addUnwrappedLine(); } if (FormatTok->Tok.is(tok::colon)) { return; } if (!FormatTok->Tok.isOneOf(tok::ampamp, tok::pipepipe)) { if (FormatTok->Previous && !FormatTok->Previous->isOneOf(tok::identifier, tok::kw_requires, tok::coloncolon)) { addUnwrappedLine(); } if (Style.IndentRequires && OriginalLevel != Line->Level) { --Line->Level; } break; } else { FormatTok->setType(TT_ConstraintJunctions); } nextToken(); } } void UnwrappedLineParser::parseRequires() { assert(FormatTok->Tok.is(tok::kw_requires) && "'requires' expected"); unsigned OriginalLevel = Line->Level; if (FormatTok->Previous && FormatTok->Previous->is(tok::greater)) { addUnwrappedLine(); if (Style.IndentRequires) { Line->Level++; } } nextToken(); parseRequiresExpression(OriginalLevel); } bool UnwrappedLineParser::parseEnum() { // Won't be 'enum' for NS_ENUMs. if (FormatTok->Tok.is(tok::kw_enum)) nextToken(); // In TypeScript, "enum" can also be used as property name, e.g. in interface // declarations. An "enum" keyword followed by a colon would be a syntax // error and thus assume it is just an identifier. if (Style.Language == FormatStyle::LK_JavaScript && FormatTok->isOneOf(tok::colon, tok::question)) return false; // In protobuf, "enum" can be used as a field name. if (Style.Language == FormatStyle::LK_Proto && FormatTok->is(tok::equal)) return false; // Eat up enum class ... if (FormatTok->Tok.is(tok::kw_class) || FormatTok->Tok.is(tok::kw_struct)) nextToken(); while (FormatTok->Tok.getIdentifierInfo() || FormatTok->isOneOf(tok::colon, tok::coloncolon, tok::less, tok::greater, tok::comma, tok::question)) { nextToken(); // We can have macros or attributes in between 'enum' and the enum name. if (FormatTok->is(tok::l_paren)) parseParens(); if (FormatTok->is(tok::identifier)) { nextToken(); // If there are two identifiers in a row, this is likely an elaborate // return type. In Java, this can be "implements", etc. if (Style.isCpp() && FormatTok->is(tok::identifier)) return false; } } // Just a declaration or something is wrong. if (FormatTok->isNot(tok::l_brace)) return true; FormatTok->setBlockKind(BK_Block); if (Style.Language == FormatStyle::LK_Java) { // Java enums are different. parseJavaEnumBody(); return true; } if (Style.Language == FormatStyle::LK_Proto) { parseBlock(/*MustBeDeclaration=*/true); return true; } if (!Style.AllowShortEnumsOnASingleLine) addUnwrappedLine(); // Parse enum body. nextToken(); if (!Style.AllowShortEnumsOnASingleLine) { addUnwrappedLine(); Line->Level += 1; } bool HasError = !parseBracedList(/*ContinueOnSemicolons=*/true, /*IsEnum=*/true); if (!Style.AllowShortEnumsOnASingleLine) Line->Level -= 1; if (HasError) { if (FormatTok->is(tok::semi)) nextToken(); addUnwrappedLine(); } return true; // There is no addUnwrappedLine() here so that we fall through to parsing a // structural element afterwards. Thus, in "enum A {} n, m;", // "} n, m;" will end up in one unwrapped line. } bool UnwrappedLineParser::parseStructLike() { // parseRecord falls through and does not yet add an unwrapped line as a // record declaration or definition can start a structural element. parseRecord(); // This does not apply to Java, JavaScript and C#. if (Style.Language == FormatStyle::LK_Java || Style.Language == FormatStyle::LK_JavaScript || Style.isCSharp()) { if (FormatTok->is(tok::semi)) nextToken(); addUnwrappedLine(); return true; } return false; } namespace { // A class used to set and restore the Token position when peeking // ahead in the token source. class ScopedTokenPosition { unsigned StoredPosition; FormatTokenSource *Tokens; public: ScopedTokenPosition(FormatTokenSource *Tokens) : Tokens(Tokens) { assert(Tokens && "Tokens expected to not be null"); StoredPosition = Tokens->getPosition(); } ~ScopedTokenPosition() { Tokens->setPosition(StoredPosition); } }; } // namespace // Look to see if we have [[ by looking ahead, if // its not then rewind to the original position. bool UnwrappedLineParser::tryToParseSimpleAttribute() { ScopedTokenPosition AutoPosition(Tokens); FormatToken *Tok = Tokens->getNextToken(); // We already read the first [ check for the second. if (Tok && !Tok->is(tok::l_square)) { return false; } // Double check that the attribute is just something // fairly simple. while (Tok) { if (Tok->is(tok::r_square)) { break; } Tok = Tokens->getNextToken(); } Tok = Tokens->getNextToken(); if (Tok && !Tok->is(tok::r_square)) { return false; } Tok = Tokens->getNextToken(); if (Tok && Tok->is(tok::semi)) { return false; } return true; } void UnwrappedLineParser::parseJavaEnumBody() { // Determine whether the enum is simple, i.e. does not have a semicolon or // constants with class bodies. Simple enums can be formatted like braced // lists, contracted to a single line, etc. unsigned StoredPosition = Tokens->getPosition(); bool IsSimple = true; FormatToken *Tok = Tokens->getNextToken(); while (Tok) { if (Tok->is(tok::r_brace)) break; if (Tok->isOneOf(tok::l_brace, tok::semi)) { IsSimple = false; break; } // FIXME: This will also mark enums with braces in the arguments to enum // constants as "not simple". This is probably fine in practice, though. Tok = Tokens->getNextToken(); } FormatTok = Tokens->setPosition(StoredPosition); if (IsSimple) { nextToken(); parseBracedList(); addUnwrappedLine(); return; } // Parse the body of a more complex enum. // First add a line for everything up to the "{". nextToken(); addUnwrappedLine(); ++Line->Level; // Parse the enum constants. while (FormatTok) { if (FormatTok->is(tok::l_brace)) { // Parse the constant's class body. parseBlock(/*MustBeDeclaration=*/true, /*AddLevels=*/1u, /*MunchSemi=*/false); } else if (FormatTok->is(tok::l_paren)) { parseParens(); } else if (FormatTok->is(tok::comma)) { nextToken(); addUnwrappedLine(); } else if (FormatTok->is(tok::semi)) { nextToken(); addUnwrappedLine(); break; } else if (FormatTok->is(tok::r_brace)) { addUnwrappedLine(); break; } else { nextToken(); } } // Parse the class body after the enum's ";" if any. parseLevel(/*HasOpeningBrace=*/true); nextToken(); --Line->Level; addUnwrappedLine(); } void UnwrappedLineParser::parseRecord(bool ParseAsExpr) { const FormatToken &InitialToken = *FormatTok; nextToken(); // The actual identifier can be a nested name specifier, and in macros // it is often token-pasted. // An [[attribute]] can be before the identifier. while (FormatTok->isOneOf(tok::identifier, tok::coloncolon, tok::hashhash, tok::kw___attribute, tok::kw___declspec, tok::kw_alignas, tok::l_square, tok::r_square) || ((Style.Language == FormatStyle::LK_Java || Style.Language == FormatStyle::LK_JavaScript) && FormatTok->isOneOf(tok::period, tok::comma))) { if (Style.Language == FormatStyle::LK_JavaScript && FormatTok->isOneOf(Keywords.kw_extends, Keywords.kw_implements)) { // JavaScript/TypeScript supports inline object types in // extends/implements positions: // class Foo implements {bar: number} { } nextToken(); if (FormatTok->is(tok::l_brace)) { tryToParseBracedList(); continue; } } bool IsNonMacroIdentifier = FormatTok->is(tok::identifier) && FormatTok->TokenText != FormatTok->TokenText.upper(); nextToken(); // We can have macros or attributes in between 'class' and the class name. if (!IsNonMacroIdentifier) { if (FormatTok->Tok.is(tok::l_paren)) { parseParens(); } else if (FormatTok->is(TT_AttributeSquare)) { parseSquare(); // Consume the closing TT_AttributeSquare. if (FormatTok->Next && FormatTok->is(TT_AttributeSquare)) nextToken(); } } } // Note that parsing away template declarations here leads to incorrectly // accepting function declarations as record declarations. // In general, we cannot solve this problem. Consider: // class A B() {} // which can be a function definition or a class definition when B() is a // macro. If we find enough real-world cases where this is a problem, we // can parse for the 'template' keyword in the beginning of the statement, // and thus rule out the record production in case there is no template // (this would still leave us with an ambiguity between template function // and class declarations). if (FormatTok->isOneOf(tok::colon, tok::less)) { while (!eof()) { if (FormatTok->is(tok::l_brace)) { calculateBraceTypes(/*ExpectClassBody=*/true); if (!tryToParseBracedList()) break; } if (FormatTok->Tok.is(tok::semi)) return; if (Style.isCSharp() && FormatTok->is(Keywords.kw_where)) { addUnwrappedLine(); nextToken(); parseCSharpGenericTypeConstraint(); break; } nextToken(); } } if (FormatTok->Tok.is(tok::l_brace)) { if (ParseAsExpr) { parseChildBlock(); } else { if (ShouldBreakBeforeBrace(Style, InitialToken)) addUnwrappedLine(); unsigned AddLevels = Style.IndentAccessModifiers ? 2u : 1u; parseBlock(/*MustBeDeclaration=*/true, AddLevels, /*MunchSemi=*/false); } } // There is no addUnwrappedLine() here so that we fall through to parsing a // structural element afterwards. Thus, in "class A {} n, m;", // "} n, m;" will end up in one unwrapped line. } void UnwrappedLineParser::parseObjCMethod() { assert(FormatTok->Tok.isOneOf(tok::l_paren, tok::identifier) && "'(' or identifier expected."); do { if (FormatTok->Tok.is(tok::semi)) { nextToken(); addUnwrappedLine(); return; } else if (FormatTok->Tok.is(tok::l_brace)) { if (Style.BraceWrapping.AfterFunction) addUnwrappedLine(); parseBlock(/*MustBeDeclaration=*/false); addUnwrappedLine(); return; } else { nextToken(); } } while (!eof()); } void UnwrappedLineParser::parseObjCProtocolList() { assert(FormatTok->Tok.is(tok::less) && "'<' expected."); do { nextToken(); // Early exit in case someone forgot a close angle. if (FormatTok->isOneOf(tok::semi, tok::l_brace) || FormatTok->Tok.isObjCAtKeyword(tok::objc_end)) return; } while (!eof() && FormatTok->Tok.isNot(tok::greater)); nextToken(); // Skip '>'. } void UnwrappedLineParser::parseObjCUntilAtEnd() { do { if (FormatTok->Tok.isObjCAtKeyword(tok::objc_end)) { nextToken(); addUnwrappedLine(); break; } if (FormatTok->is(tok::l_brace)) { parseBlock(/*MustBeDeclaration=*/false); // In ObjC interfaces, nothing should be following the "}". addUnwrappedLine(); } else if (FormatTok->is(tok::r_brace)) { // Ignore stray "}". parseStructuralElement doesn't consume them. nextToken(); addUnwrappedLine(); } else if (FormatTok->isOneOf(tok::minus, tok::plus)) { nextToken(); parseObjCMethod(); } else { parseStructuralElement(); } } while (!eof()); } void UnwrappedLineParser::parseObjCInterfaceOrImplementation() { assert(FormatTok->Tok.getObjCKeywordID() == tok::objc_interface || FormatTok->Tok.getObjCKeywordID() == tok::objc_implementation); nextToken(); nextToken(); // interface name // @interface can be followed by a lightweight generic // specialization list, then either a base class or a category. if (FormatTok->Tok.is(tok::less)) { parseObjCLightweightGenerics(); } if (FormatTok->Tok.is(tok::colon)) { nextToken(); nextToken(); // base class name // The base class can also have lightweight generics applied to it. if (FormatTok->Tok.is(tok::less)) { parseObjCLightweightGenerics(); } } else if (FormatTok->Tok.is(tok::l_paren)) // Skip category, if present. parseParens(); if (FormatTok->Tok.is(tok::less)) parseObjCProtocolList(); if (FormatTok->Tok.is(tok::l_brace)) { if (Style.BraceWrapping.AfterObjCDeclaration) addUnwrappedLine(); parseBlock(/*MustBeDeclaration=*/true); } // With instance variables, this puts '}' on its own line. Without instance // variables, this ends the @interface line. addUnwrappedLine(); parseObjCUntilAtEnd(); } void UnwrappedLineParser::parseObjCLightweightGenerics() { assert(FormatTok->Tok.is(tok::less)); // Unlike protocol lists, generic parameterizations support // nested angles: // // @interface Foo> : // NSObject // // so we need to count how many open angles we have left. unsigned NumOpenAngles = 1; do { nextToken(); // Early exit in case someone forgot a close angle. if (FormatTok->isOneOf(tok::semi, tok::l_brace) || FormatTok->Tok.isObjCAtKeyword(tok::objc_end)) break; if (FormatTok->Tok.is(tok::less)) ++NumOpenAngles; else if (FormatTok->Tok.is(tok::greater)) { assert(NumOpenAngles > 0 && "'>' makes NumOpenAngles negative"); --NumOpenAngles; } } while (!eof() && NumOpenAngles != 0); nextToken(); // Skip '>'. } // Returns true for the declaration/definition form of @protocol, // false for the expression form. bool UnwrappedLineParser::parseObjCProtocol() { assert(FormatTok->Tok.getObjCKeywordID() == tok::objc_protocol); nextToken(); if (FormatTok->is(tok::l_paren)) // The expression form of @protocol, e.g. "Protocol* p = @protocol(foo);". return false; // The definition/declaration form, // @protocol Foo // - (int)someMethod; // @end nextToken(); // protocol name if (FormatTok->Tok.is(tok::less)) parseObjCProtocolList(); // Check for protocol declaration. if (FormatTok->Tok.is(tok::semi)) { nextToken(); addUnwrappedLine(); return true; } addUnwrappedLine(); parseObjCUntilAtEnd(); return true; } void UnwrappedLineParser::parseJavaScriptEs6ImportExport() { bool IsImport = FormatTok->is(Keywords.kw_import); assert(IsImport || FormatTok->is(tok::kw_export)); nextToken(); // Consume the "default" in "export default class/function". if (FormatTok->is(tok::kw_default)) nextToken(); // Consume "async function", "function" and "default function", so that these // get parsed as free-standing JS functions, i.e. do not require a trailing // semicolon. if (FormatTok->is(Keywords.kw_async)) nextToken(); if (FormatTok->is(Keywords.kw_function)) { nextToken(); return; } // For imports, `export *`, `export {...}`, consume the rest of the line up // to the terminating `;`. For everything else, just return and continue // parsing the structural element, i.e. the declaration or expression for // `export default`. if (!IsImport && !FormatTok->isOneOf(tok::l_brace, tok::star) && !FormatTok->isStringLiteral()) return; while (!eof()) { if (FormatTok->is(tok::semi)) return; if (Line->Tokens.empty()) { // Common issue: Automatic Semicolon Insertion wrapped the line, so the // import statement should terminate. return; } if (FormatTok->is(tok::l_brace)) { FormatTok->setBlockKind(BK_Block); nextToken(); parseBracedList(); } else { nextToken(); } } } void UnwrappedLineParser::parseStatementMacro() { nextToken(); if (FormatTok->is(tok::l_paren)) parseParens(); if (FormatTok->is(tok::semi)) nextToken(); addUnwrappedLine(); } LLVM_ATTRIBUTE_UNUSED static void printDebugInfo(const UnwrappedLine &Line, StringRef Prefix = "") { llvm::dbgs() << Prefix << "Line(" << Line.Level << ", FSC=" << Line.FirstStartColumn << ")" << (Line.InPPDirective ? " MACRO" : "") << ": "; for (std::list::const_iterator I = Line.Tokens.begin(), E = Line.Tokens.end(); I != E; ++I) { llvm::dbgs() << I->Tok->Tok.getName() << "[" << "T=" << (unsigned)I->Tok->getType() << ", OC=" << I->Tok->OriginalColumn << "] "; } for (std::list::const_iterator I = Line.Tokens.begin(), E = Line.Tokens.end(); I != E; ++I) { const UnwrappedLineNode &Node = *I; for (SmallVectorImpl::const_iterator I = Node.Children.begin(), E = Node.Children.end(); I != E; ++I) { printDebugInfo(*I, "\nChild: "); } } llvm::dbgs() << "\n"; } void UnwrappedLineParser::addUnwrappedLine(LineLevel AdjustLevel) { if (Line->Tokens.empty()) return; LLVM_DEBUG({ if (CurrentLines == &Lines) printDebugInfo(*Line); }); // If this line closes a block when in Whitesmiths mode, remember that // information so that the level can be decreased after the line is added. // This has to happen after the addition of the line since the line itself // needs to be indented. bool ClosesWhitesmithsBlock = Line->MatchingOpeningBlockLineIndex != UnwrappedLine::kInvalidIndex && Style.BreakBeforeBraces == FormatStyle::BS_Whitesmiths; CurrentLines->push_back(std::move(*Line)); Line->Tokens.clear(); Line->MatchingOpeningBlockLineIndex = UnwrappedLine::kInvalidIndex; Line->FirstStartColumn = 0; if (ClosesWhitesmithsBlock && AdjustLevel == LineLevel::Remove) --Line->Level; if (CurrentLines == &Lines && !PreprocessorDirectives.empty()) { CurrentLines->append( std::make_move_iterator(PreprocessorDirectives.begin()), std::make_move_iterator(PreprocessorDirectives.end())); PreprocessorDirectives.clear(); } // Disconnect the current token from the last token on the previous line. FormatTok->Previous = nullptr; } bool UnwrappedLineParser::eof() const { return FormatTok->Tok.is(tok::eof); } bool UnwrappedLineParser::isOnNewLine(const FormatToken &FormatTok) { return (Line->InPPDirective || FormatTok.HasUnescapedNewline) && FormatTok.NewlinesBefore > 0; } // Checks if \p FormatTok is a line comment that continues the line comment // section on \p Line. static bool continuesLineCommentSection(const FormatToken &FormatTok, const UnwrappedLine &Line, const llvm::Regex &CommentPragmasRegex) { if (Line.Tokens.empty()) return false; StringRef IndentContent = FormatTok.TokenText; if (FormatTok.TokenText.startswith("//") || FormatTok.TokenText.startswith("/*")) IndentContent = FormatTok.TokenText.substr(2); if (CommentPragmasRegex.match(IndentContent)) return false; // If Line starts with a line comment, then FormatTok continues the comment // section if its original column is greater or equal to the original start // column of the line. // // Define the min column token of a line as follows: if a line ends in '{' or // contains a '{' followed by a line comment, then the min column token is // that '{'. Otherwise, the min column token of the line is the first token of // the line. // // If Line starts with a token other than a line comment, then FormatTok // continues the comment section if its original column is greater than the // original start column of the min column token of the line. // // For example, the second line comment continues the first in these cases: // // // first line // // second line // // and: // // // first line // // second line // // and: // // int i; // first line // // second line // // and: // // do { // first line // // second line // int i; // } while (true); // // and: // // enum { // a, // first line // // second line // b // }; // // The second line comment doesn't continue the first in these cases: // // // first line // // second line // // and: // // int i; // first line // // second line // // and: // // do { // first line // // second line // int i; // } while (true); // // and: // // enum { // a, // first line // // second line // }; const FormatToken *MinColumnToken = Line.Tokens.front().Tok; // Scan for '{//'. If found, use the column of '{' as a min column for line // comment section continuation. const FormatToken *PreviousToken = nullptr; for (const UnwrappedLineNode &Node : Line.Tokens) { if (PreviousToken && PreviousToken->is(tok::l_brace) && isLineComment(*Node.Tok)) { MinColumnToken = PreviousToken; break; } PreviousToken = Node.Tok; // Grab the last newline preceding a token in this unwrapped line. if (Node.Tok->NewlinesBefore > 0) { MinColumnToken = Node.Tok; } } if (PreviousToken && PreviousToken->is(tok::l_brace)) { MinColumnToken = PreviousToken; } return continuesLineComment(FormatTok, /*Previous=*/Line.Tokens.back().Tok, MinColumnToken); } void UnwrappedLineParser::flushComments(bool NewlineBeforeNext) { bool JustComments = Line->Tokens.empty(); for (SmallVectorImpl::const_iterator I = CommentsBeforeNextToken.begin(), E = CommentsBeforeNextToken.end(); I != E; ++I) { // Line comments that belong to the same line comment section are put on the // same line since later we might want to reflow content between them. // Additional fine-grained breaking of line comment sections is controlled // by the class BreakableLineCommentSection in case it is desirable to keep // several line comment sections in the same unwrapped line. // // FIXME: Consider putting separate line comment sections as children to the // unwrapped line instead. (*I)->ContinuesLineCommentSection = continuesLineCommentSection(**I, *Line, CommentPragmasRegex); if (isOnNewLine(**I) && JustComments && !(*I)->ContinuesLineCommentSection) addUnwrappedLine(); pushToken(*I); } if (NewlineBeforeNext && JustComments) addUnwrappedLine(); CommentsBeforeNextToken.clear(); } void UnwrappedLineParser::nextToken(int LevelDifference) { if (eof()) return; flushComments(isOnNewLine(*FormatTok)); pushToken(FormatTok); FormatToken *Previous = FormatTok; if (Style.Language != FormatStyle::LK_JavaScript) readToken(LevelDifference); else readTokenWithJavaScriptASI(); FormatTok->Previous = Previous; } void UnwrappedLineParser::distributeComments( const SmallVectorImpl &Comments, const FormatToken *NextTok) { // Whether or not a line comment token continues a line is controlled by // the method continuesLineCommentSection, with the following caveat: // // Define a trail of Comments to be a nonempty proper postfix of Comments such // that each comment line from the trail is aligned with the next token, if // the next token exists. If a trail exists, the beginning of the maximal // trail is marked as a start of a new comment section. // // For example in this code: // // int a; // line about a // // line 1 about b // // line 2 about b // int b; // // the two lines about b form a maximal trail, so there are two sections, the // first one consisting of the single comment "// line about a" and the // second one consisting of the next two comments. if (Comments.empty()) return; bool ShouldPushCommentsInCurrentLine = true; bool HasTrailAlignedWithNextToken = false; unsigned StartOfTrailAlignedWithNextToken = 0; if (NextTok) { // We are skipping the first element intentionally. for (unsigned i = Comments.size() - 1; i > 0; --i) { if (Comments[i]->OriginalColumn == NextTok->OriginalColumn) { HasTrailAlignedWithNextToken = true; StartOfTrailAlignedWithNextToken = i; } } } for (unsigned i = 0, e = Comments.size(); i < e; ++i) { FormatToken *FormatTok = Comments[i]; if (HasTrailAlignedWithNextToken && i == StartOfTrailAlignedWithNextToken) { FormatTok->ContinuesLineCommentSection = false; } else { FormatTok->ContinuesLineCommentSection = continuesLineCommentSection(*FormatTok, *Line, CommentPragmasRegex); } if (!FormatTok->ContinuesLineCommentSection && (isOnNewLine(*FormatTok) || FormatTok->IsFirst)) { ShouldPushCommentsInCurrentLine = false; } if (ShouldPushCommentsInCurrentLine) { pushToken(FormatTok); } else { CommentsBeforeNextToken.push_back(FormatTok); } } } void UnwrappedLineParser::readToken(int LevelDifference) { SmallVector Comments; do { FormatTok = Tokens->getNextToken(); assert(FormatTok); while (!Line->InPPDirective && FormatTok->Tok.is(tok::hash) && (FormatTok->HasUnescapedNewline || FormatTok->IsFirst)) { distributeComments(Comments, FormatTok); Comments.clear(); // If there is an unfinished unwrapped line, we flush the preprocessor // directives only after that unwrapped line was finished later. bool SwitchToPreprocessorLines = !Line->Tokens.empty(); ScopedLineState BlockState(*this, SwitchToPreprocessorLines); assert((LevelDifference >= 0 || static_cast(-LevelDifference) <= Line->Level) && "LevelDifference makes Line->Level negative"); Line->Level += LevelDifference; // Comments stored before the preprocessor directive need to be output // before the preprocessor directive, at the same level as the // preprocessor directive, as we consider them to apply to the directive. if (Style.IndentPPDirectives == FormatStyle::PPDIS_BeforeHash && PPBranchLevel > 0) Line->Level += PPBranchLevel; flushComments(isOnNewLine(*FormatTok)); parsePPDirective(); } while (FormatTok->getType() == TT_ConflictStart || FormatTok->getType() == TT_ConflictEnd || FormatTok->getType() == TT_ConflictAlternative) { if (FormatTok->getType() == TT_ConflictStart) { conditionalCompilationStart(/*Unreachable=*/false); } else if (FormatTok->getType() == TT_ConflictAlternative) { conditionalCompilationAlternative(); } else if (FormatTok->getType() == TT_ConflictEnd) { conditionalCompilationEnd(); } FormatTok = Tokens->getNextToken(); FormatTok->MustBreakBefore = true; } if (!PPStack.empty() && (PPStack.back().Kind == PP_Unreachable) && !Line->InPPDirective) { continue; } if (!FormatTok->Tok.is(tok::comment)) { distributeComments(Comments, FormatTok); Comments.clear(); return; } Comments.push_back(FormatTok); } while (!eof()); distributeComments(Comments, nullptr); Comments.clear(); } void UnwrappedLineParser::pushToken(FormatToken *Tok) { Line->Tokens.push_back(UnwrappedLineNode(Tok)); if (MustBreakBeforeNextToken) { Line->Tokens.back().Tok->MustBreakBefore = true; MustBreakBeforeNextToken = false; } } } // end namespace format } // end namespace clang diff --git a/clang/lib/Headers/openmp_wrappers/complex b/clang/lib/Headers/openmp_wrappers/complex index dfd6193c97cb..eb1ead207d58 100644 --- a/clang/lib/Headers/openmp_wrappers/complex +++ b/clang/lib/Headers/openmp_wrappers/complex @@ -1,46 +1,46 @@ /*===-- complex --- OpenMP complex wrapper for target regions --------- c++ -=== * * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. * See https://llvm.org/LICENSE.txt for license information. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception * *===-----------------------------------------------------------------------=== */ #ifndef __CLANG_OPENMP_COMPLEX__ #define __CLANG_OPENMP_COMPLEX__ #ifndef _OPENMP #error "This file is for OpenMP compilation only." #endif // We require std::math functions in the complex builtins below. #include #define __OPENMP_NVPTX__ #include <__clang_cuda_complex_builtins.h> #undef __OPENMP_NVPTX__ #endif // Grab the host header too. #include_next // If we are compiling against libc++, the macro _LIBCPP_STD_VER should be set // after including above. Since the complex header we use is a // simplified version of the libc++, we don't need it in this case. If we // compile against libstdc++, or any other standard library, we will overload // the (hopefully template) functions in the header with the ones we // got from libc++ which decomposes math functions, like `std::sin`, into // arithmetic and calls to non-complex functions, all of which we can then // handle. #ifndef _LIBCPP_STD_VER #pragma omp begin declare variant match( \ - device = {arch(nvptx, nvptx64)}, \ + device = {arch(amdgcn, nvptx, nvptx64)}, \ implementation = {extension(match_any, allow_templates)}) #include #pragma omp end declare variant #endif diff --git a/clang/lib/Sema/SemaStmt.cpp b/clang/lib/Sema/SemaStmt.cpp index 3baccec2d7bb..f7e4110e6110 100644 --- a/clang/lib/Sema/SemaStmt.cpp +++ b/clang/lib/Sema/SemaStmt.cpp @@ -1,4761 +1,4762 @@ //===--- SemaStmt.cpp - Semantic Analysis for Statements ------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements semantic analysis for statements. // //===----------------------------------------------------------------------===// #include "clang/AST/ASTContext.h" #include "clang/AST/ASTDiagnostic.h" #include "clang/AST/ASTLambda.h" #include "clang/AST/CXXInheritance.h" #include "clang/AST/CharUnits.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/EvaluatedExprVisitor.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/IgnoreExpr.h" #include "clang/AST/RecursiveASTVisitor.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtObjC.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/TargetInfo.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/Initialization.h" #include "clang/Sema/Lookup.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/SemaInternal.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallVector.h" using namespace clang; using namespace sema; StmtResult Sema::ActOnExprStmt(ExprResult FE, bool DiscardedValue) { if (FE.isInvalid()) return StmtError(); FE = ActOnFinishFullExpr(FE.get(), FE.get()->getExprLoc(), DiscardedValue); if (FE.isInvalid()) return StmtError(); // C99 6.8.3p2: The expression in an expression statement is evaluated as a // void expression for its side effects. Conversion to void allows any // operand, even incomplete types. // Same thing in for stmt first clause (when expr) and third clause. return StmtResult(FE.getAs()); } StmtResult Sema::ActOnExprStmtError() { DiscardCleanupsInEvaluationContext(); return StmtError(); } StmtResult Sema::ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro) { return new (Context) NullStmt(SemiLoc, HasLeadingEmptyMacro); } StmtResult Sema::ActOnDeclStmt(DeclGroupPtrTy dg, SourceLocation StartLoc, SourceLocation EndLoc) { DeclGroupRef DG = dg.get(); // If we have an invalid decl, just return an error. if (DG.isNull()) return StmtError(); return new (Context) DeclStmt(DG, StartLoc, EndLoc); } void Sema::ActOnForEachDeclStmt(DeclGroupPtrTy dg) { DeclGroupRef DG = dg.get(); // If we don't have a declaration, or we have an invalid declaration, // just return. if (DG.isNull() || !DG.isSingleDecl()) return; Decl *decl = DG.getSingleDecl(); if (!decl || decl->isInvalidDecl()) return; // Only variable declarations are permitted. VarDecl *var = dyn_cast(decl); if (!var) { Diag(decl->getLocation(), diag::err_non_variable_decl_in_for); decl->setInvalidDecl(); return; } // foreach variables are never actually initialized in the way that // the parser came up with. var->setInit(nullptr); // In ARC, we don't need to retain the iteration variable of a fast // enumeration loop. Rather than actually trying to catch that // during declaration processing, we remove the consequences here. if (getLangOpts().ObjCAutoRefCount) { QualType type = var->getType(); // Only do this if we inferred the lifetime. Inferred lifetime // will show up as a local qualifier because explicit lifetime // should have shown up as an AttributedType instead. if (type.getLocalQualifiers().getObjCLifetime() == Qualifiers::OCL_Strong) { // Add 'const' and mark the variable as pseudo-strong. var->setType(type.withConst()); var->setARCPseudoStrong(true); } } } /// Diagnose unused comparisons, both builtin and overloaded operators. /// For '==' and '!=', suggest fixits for '=' or '|='. /// /// Adding a cast to void (or other expression wrappers) will prevent the /// warning from firing. static bool DiagnoseUnusedComparison(Sema &S, const Expr *E) { SourceLocation Loc; bool CanAssign; enum { Equality, Inequality, Relational, ThreeWay } Kind; if (const BinaryOperator *Op = dyn_cast(E)) { if (!Op->isComparisonOp()) return false; if (Op->getOpcode() == BO_EQ) Kind = Equality; else if (Op->getOpcode() == BO_NE) Kind = Inequality; else if (Op->getOpcode() == BO_Cmp) Kind = ThreeWay; else { assert(Op->isRelationalOp()); Kind = Relational; } Loc = Op->getOperatorLoc(); CanAssign = Op->getLHS()->IgnoreParenImpCasts()->isLValue(); } else if (const CXXOperatorCallExpr *Op = dyn_cast(E)) { switch (Op->getOperator()) { case OO_EqualEqual: Kind = Equality; break; case OO_ExclaimEqual: Kind = Inequality; break; case OO_Less: case OO_Greater: case OO_GreaterEqual: case OO_LessEqual: Kind = Relational; break; case OO_Spaceship: Kind = ThreeWay; break; default: return false; } Loc = Op->getOperatorLoc(); CanAssign = Op->getArg(0)->IgnoreParenImpCasts()->isLValue(); } else { // Not a typo-prone comparison. return false; } // Suppress warnings when the operator, suspicious as it may be, comes from // a macro expansion. if (S.SourceMgr.isMacroBodyExpansion(Loc)) return false; S.Diag(Loc, diag::warn_unused_comparison) << (unsigned)Kind << E->getSourceRange(); // If the LHS is a plausible entity to assign to, provide a fixit hint to // correct common typos. if (CanAssign) { if (Kind == Inequality) S.Diag(Loc, diag::note_inequality_comparison_to_or_assign) << FixItHint::CreateReplacement(Loc, "|="); else if (Kind == Equality) S.Diag(Loc, diag::note_equality_comparison_to_assign) << FixItHint::CreateReplacement(Loc, "="); } return true; } static bool DiagnoseNoDiscard(Sema &S, const WarnUnusedResultAttr *A, SourceLocation Loc, SourceRange R1, SourceRange R2, bool IsCtor) { if (!A) return false; StringRef Msg = A->getMessage(); if (Msg.empty()) { if (IsCtor) return S.Diag(Loc, diag::warn_unused_constructor) << A << R1 << R2; return S.Diag(Loc, diag::warn_unused_result) << A << R1 << R2; } if (IsCtor) return S.Diag(Loc, diag::warn_unused_constructor_msg) << A << Msg << R1 << R2; return S.Diag(Loc, diag::warn_unused_result_msg) << A << Msg << R1 << R2; } void Sema::DiagnoseUnusedExprResult(const Stmt *S) { if (const LabelStmt *Label = dyn_cast_or_null(S)) return DiagnoseUnusedExprResult(Label->getSubStmt()); const Expr *E = dyn_cast_or_null(S); if (!E) return; // If we are in an unevaluated expression context, then there can be no unused // results because the results aren't expected to be used in the first place. if (isUnevaluatedContext()) return; SourceLocation ExprLoc = E->IgnoreParenImpCasts()->getExprLoc(); // In most cases, we don't want to warn if the expression is written in a // macro body, or if the macro comes from a system header. If the offending // expression is a call to a function with the warn_unused_result attribute, // we warn no matter the location. Because of the order in which the various // checks need to happen, we factor out the macro-related test here. bool ShouldSuppress = SourceMgr.isMacroBodyExpansion(ExprLoc) || SourceMgr.isInSystemMacro(ExprLoc); const Expr *WarnExpr; SourceLocation Loc; SourceRange R1, R2; if (!E->isUnusedResultAWarning(WarnExpr, Loc, R1, R2, Context)) return; // If this is a GNU statement expression expanded from a macro, it is probably // unused because it is a function-like macro that can be used as either an // expression or statement. Don't warn, because it is almost certainly a // false positive. if (isa(E) && Loc.isMacroID()) return; // Check if this is the UNREFERENCED_PARAMETER from the Microsoft headers. // That macro is frequently used to suppress "unused parameter" warnings, // but its implementation makes clang's -Wunused-value fire. Prevent this. if (isa(E->IgnoreImpCasts()) && Loc.isMacroID()) { SourceLocation SpellLoc = Loc; if (findMacroSpelling(SpellLoc, "UNREFERENCED_PARAMETER")) return; } // Okay, we have an unused result. Depending on what the base expression is, // we might want to make a more specific diagnostic. Check for one of these // cases now. unsigned DiagID = diag::warn_unused_expr; if (const FullExpr *Temps = dyn_cast(E)) E = Temps->getSubExpr(); if (const CXXBindTemporaryExpr *TempExpr = dyn_cast(E)) E = TempExpr->getSubExpr(); if (DiagnoseUnusedComparison(*this, E)) return; E = WarnExpr; if (const auto *Cast = dyn_cast(E)) if (Cast->getCastKind() == CK_NoOp || Cast->getCastKind() == CK_ConstructorConversion) E = Cast->getSubExpr()->IgnoreImpCasts(); if (const CallExpr *CE = dyn_cast(E)) { if (E->getType()->isVoidType()) return; if (DiagnoseNoDiscard(*this, cast_or_null( CE->getUnusedResultAttr(Context)), Loc, R1, R2, /*isCtor=*/false)) return; // If the callee has attribute pure, const, or warn_unused_result, warn with // a more specific message to make it clear what is happening. If the call // is written in a macro body, only warn if it has the warn_unused_result // attribute. if (const Decl *FD = CE->getCalleeDecl()) { if (ShouldSuppress) return; if (FD->hasAttr()) { Diag(Loc, diag::warn_unused_call) << R1 << R2 << "pure"; return; } if (FD->hasAttr()) { Diag(Loc, diag::warn_unused_call) << R1 << R2 << "const"; return; } } } else if (const auto *CE = dyn_cast(E)) { if (const CXXConstructorDecl *Ctor = CE->getConstructor()) { const auto *A = Ctor->getAttr(); A = A ? A : Ctor->getParent()->getAttr(); if (DiagnoseNoDiscard(*this, A, Loc, R1, R2, /*isCtor=*/true)) return; } } else if (const auto *ILE = dyn_cast(E)) { if (const TagDecl *TD = ILE->getType()->getAsTagDecl()) { if (DiagnoseNoDiscard(*this, TD->getAttr(), Loc, R1, R2, /*isCtor=*/false)) return; } } else if (ShouldSuppress) return; E = WarnExpr; if (const ObjCMessageExpr *ME = dyn_cast(E)) { if (getLangOpts().ObjCAutoRefCount && ME->isDelegateInitCall()) { Diag(Loc, diag::err_arc_unused_init_message) << R1; return; } const ObjCMethodDecl *MD = ME->getMethodDecl(); if (MD) { if (DiagnoseNoDiscard(*this, MD->getAttr(), Loc, R1, R2, /*isCtor=*/false)) return; } } else if (const PseudoObjectExpr *POE = dyn_cast(E)) { const Expr *Source = POE->getSyntacticForm(); // Handle the actually selected call of an OpenMP specialized call. if (LangOpts.OpenMP && isa(Source) && POE->getNumSemanticExprs() == 1 && isa(POE->getSemanticExpr(0))) return DiagnoseUnusedExprResult(POE->getSemanticExpr(0)); if (isa(Source)) DiagID = diag::warn_unused_container_subscript_expr; else DiagID = diag::warn_unused_property_expr; } else if (const CXXFunctionalCastExpr *FC = dyn_cast(E)) { const Expr *E = FC->getSubExpr(); if (const CXXBindTemporaryExpr *TE = dyn_cast(E)) E = TE->getSubExpr(); if (isa(E)) return; if (const CXXConstructExpr *CE = dyn_cast(E)) if (const CXXRecordDecl *RD = CE->getType()->getAsCXXRecordDecl()) if (!RD->getAttr()) return; } // Diagnose "(void*) blah" as a typo for "(void) blah". else if (const CStyleCastExpr *CE = dyn_cast(E)) { TypeSourceInfo *TI = CE->getTypeInfoAsWritten(); QualType T = TI->getType(); // We really do want to use the non-canonical type here. if (T == Context.VoidPtrTy) { PointerTypeLoc TL = TI->getTypeLoc().castAs(); Diag(Loc, diag::warn_unused_voidptr) << FixItHint::CreateRemoval(TL.getStarLoc()); return; } } // Tell the user to assign it into a variable to force a volatile load if this // isn't an array. if (E->isGLValue() && E->getType().isVolatileQualified() && !E->getType()->isArrayType()) { Diag(Loc, diag::warn_unused_volatile) << R1 << R2; return; } DiagRuntimeBehavior(Loc, nullptr, PDiag(DiagID) << R1 << R2); } void Sema::ActOnStartOfCompoundStmt(bool IsStmtExpr) { PushCompoundScope(IsStmtExpr); } void Sema::ActOnAfterCompoundStatementLeadingPragmas() { if (getCurFPFeatures().isFPConstrained()) { FunctionScopeInfo *FSI = getCurFunction(); assert(FSI); FSI->setUsesFPIntrin(); } } void Sema::ActOnFinishOfCompoundStmt() { PopCompoundScope(); } sema::CompoundScopeInfo &Sema::getCurCompoundScope() const { return getCurFunction()->CompoundScopes.back(); } StmtResult Sema::ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef Elts, bool isStmtExpr) { const unsigned NumElts = Elts.size(); // If we're in C89 mode, check that we don't have any decls after stmts. If // so, emit an extension diagnostic. if (!getLangOpts().C99 && !getLangOpts().CPlusPlus) { // Note that __extension__ can be around a decl. unsigned i = 0; // Skip over all declarations. for (; i != NumElts && isa(Elts[i]); ++i) /*empty*/; // We found the end of the list or a statement. Scan for another declstmt. for (; i != NumElts && !isa(Elts[i]); ++i) /*empty*/; if (i != NumElts) { Decl *D = *cast(Elts[i])->decl_begin(); Diag(D->getLocation(), diag::ext_mixed_decls_code); } } // Check for suspicious empty body (null statement) in `for' and `while' // statements. Don't do anything for template instantiations, this just adds // noise. if (NumElts != 0 && !CurrentInstantiationScope && getCurCompoundScope().HasEmptyLoopBodies) { for (unsigned i = 0; i != NumElts - 1; ++i) DiagnoseEmptyLoopBody(Elts[i], Elts[i + 1]); } return CompoundStmt::Create(Context, Elts, L, R); } ExprResult Sema::ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val) { if (!Val.get()) return Val; if (DiagnoseUnexpandedParameterPack(Val.get())) return ExprError(); // If we're not inside a switch, let the 'case' statement handling diagnose // this. Just clean up after the expression as best we can. if (getCurFunction()->SwitchStack.empty()) return ActOnFinishFullExpr(Val.get(), Val.get()->getExprLoc(), false, getLangOpts().CPlusPlus11); Expr *CondExpr = getCurFunction()->SwitchStack.back().getPointer()->getCond(); if (!CondExpr) return ExprError(); QualType CondType = CondExpr->getType(); auto CheckAndFinish = [&](Expr *E) { if (CondType->isDependentType() || E->isTypeDependent()) return ExprResult(E); if (getLangOpts().CPlusPlus11) { // C++11 [stmt.switch]p2: the constant-expression shall be a converted // constant expression of the promoted type of the switch condition. llvm::APSInt TempVal; return CheckConvertedConstantExpression(E, CondType, TempVal, CCEK_CaseValue); } ExprResult ER = E; if (!E->isValueDependent()) ER = VerifyIntegerConstantExpression(E, AllowFold); if (!ER.isInvalid()) ER = DefaultLvalueConversion(ER.get()); if (!ER.isInvalid()) ER = ImpCastExprToType(ER.get(), CondType, CK_IntegralCast); if (!ER.isInvalid()) ER = ActOnFinishFullExpr(ER.get(), ER.get()->getExprLoc(), false); return ER; }; ExprResult Converted = CorrectDelayedTyposInExpr( Val, /*InitDecl=*/nullptr, /*RecoverUncorrectedTypos=*/false, CheckAndFinish); if (Converted.get() == Val.get()) Converted = CheckAndFinish(Val.get()); return Converted; } StmtResult Sema::ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHSVal, SourceLocation DotDotDotLoc, ExprResult RHSVal, SourceLocation ColonLoc) { assert((LHSVal.isInvalid() || LHSVal.get()) && "missing LHS value"); assert((DotDotDotLoc.isInvalid() ? RHSVal.isUnset() : RHSVal.isInvalid() || RHSVal.get()) && "missing RHS value"); if (getCurFunction()->SwitchStack.empty()) { Diag(CaseLoc, diag::err_case_not_in_switch); return StmtError(); } if (LHSVal.isInvalid() || RHSVal.isInvalid()) { getCurFunction()->SwitchStack.back().setInt(true); return StmtError(); } auto *CS = CaseStmt::Create(Context, LHSVal.get(), RHSVal.get(), CaseLoc, DotDotDotLoc, ColonLoc); getCurFunction()->SwitchStack.back().getPointer()->addSwitchCase(CS); return CS; } /// ActOnCaseStmtBody - This installs a statement as the body of a case. void Sema::ActOnCaseStmtBody(Stmt *S, Stmt *SubStmt) { cast(S)->setSubStmt(SubStmt); } StmtResult Sema::ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope) { if (getCurFunction()->SwitchStack.empty()) { Diag(DefaultLoc, diag::err_default_not_in_switch); return SubStmt; } DefaultStmt *DS = new (Context) DefaultStmt(DefaultLoc, ColonLoc, SubStmt); getCurFunction()->SwitchStack.back().getPointer()->addSwitchCase(DS); return DS; } StmtResult Sema::ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt) { // If the label was multiply defined, reject it now. if (TheDecl->getStmt()) { Diag(IdentLoc, diag::err_redefinition_of_label) << TheDecl->getDeclName(); Diag(TheDecl->getLocation(), diag::note_previous_definition); return SubStmt; } ReservedIdentifierStatus Status = TheDecl->isReserved(getLangOpts()); if (Status != ReservedIdentifierStatus::NotReserved && !Context.getSourceManager().isInSystemHeader(IdentLoc)) Diag(IdentLoc, diag::warn_reserved_extern_symbol) << TheDecl << static_cast(Status); // Otherwise, things are good. Fill in the declaration and return it. LabelStmt *LS = new (Context) LabelStmt(IdentLoc, TheDecl, SubStmt); TheDecl->setStmt(LS); if (!TheDecl->isGnuLocal()) { TheDecl->setLocStart(IdentLoc); if (!TheDecl->isMSAsmLabel()) { // Don't update the location of MS ASM labels. These will result in // a diagnostic, and changing the location here will mess that up. TheDecl->setLocation(IdentLoc); } } return LS; } StmtResult Sema::BuildAttributedStmt(SourceLocation AttrsLoc, ArrayRef Attrs, Stmt *SubStmt) { // FIXME: this code should move when a planned refactoring around statement // attributes lands. for (const auto *A : Attrs) { if (A->getKind() == attr::MustTail) { if (!checkAndRewriteMustTailAttr(SubStmt, *A)) { return SubStmt; } setFunctionHasMustTail(); } } return AttributedStmt::Create(Context, AttrsLoc, Attrs, SubStmt); } StmtResult Sema::ActOnAttributedStmt(const ParsedAttributesWithRange &Attrs, Stmt *SubStmt) { SmallVector SemanticAttrs; ProcessStmtAttributes(SubStmt, Attrs, SemanticAttrs); if (!SemanticAttrs.empty()) return BuildAttributedStmt(Attrs.Range.getBegin(), SemanticAttrs, SubStmt); // If none of the attributes applied, that's fine, we can recover by // returning the substatement directly instead of making an AttributedStmt // with no attributes on it. return SubStmt; } bool Sema::checkAndRewriteMustTailAttr(Stmt *St, const Attr &MTA) { ReturnStmt *R = cast(St); Expr *E = R->getRetValue(); if (CurContext->isDependentContext() || (E && E->isInstantiationDependent())) // We have to suspend our check until template instantiation time. return true; if (!checkMustTailAttr(St, MTA)) return false; // FIXME: Replace Expr::IgnoreImplicitAsWritten() with this function. // Currently it does not skip implicit constructors in an initialization // context. auto IgnoreImplicitAsWritten = [](Expr *E) -> Expr * { return IgnoreExprNodes(E, IgnoreImplicitAsWrittenSingleStep, IgnoreElidableImplicitConstructorSingleStep); }; // Now that we have verified that 'musttail' is valid here, rewrite the // return value to remove all implicit nodes, but retain parentheses. R->setRetValue(IgnoreImplicitAsWritten(E)); return true; } bool Sema::checkMustTailAttr(const Stmt *St, const Attr &MTA) { assert(!CurContext->isDependentContext() && "musttail cannot be checked from a dependent context"); // FIXME: Add Expr::IgnoreParenImplicitAsWritten() with this definition. auto IgnoreParenImplicitAsWritten = [](const Expr *E) -> const Expr * { return IgnoreExprNodes(const_cast(E), IgnoreParensSingleStep, IgnoreImplicitAsWrittenSingleStep, IgnoreElidableImplicitConstructorSingleStep); }; const Expr *E = cast(St)->getRetValue(); const auto *CE = dyn_cast_or_null(IgnoreParenImplicitAsWritten(E)); if (!CE) { Diag(St->getBeginLoc(), diag::err_musttail_needs_call) << &MTA; return false; } if (const auto *EWC = dyn_cast(E)) { if (EWC->cleanupsHaveSideEffects()) { Diag(St->getBeginLoc(), diag::err_musttail_needs_trivial_args) << &MTA; return false; } } // We need to determine the full function type (including "this" type, if any) // for both caller and callee. struct FuncType { enum { ft_non_member, ft_static_member, ft_non_static_member, ft_pointer_to_member, } MemberType = ft_non_member; QualType This; const FunctionProtoType *Func; const CXXMethodDecl *Method = nullptr; } CallerType, CalleeType; auto GetMethodType = [this, St, MTA](const CXXMethodDecl *CMD, FuncType &Type, bool IsCallee) -> bool { if (isa(CMD)) { Diag(St->getBeginLoc(), diag::err_musttail_structors_forbidden) << IsCallee << isa(CMD); if (IsCallee) Diag(CMD->getBeginLoc(), diag::note_musttail_structors_forbidden) << isa(CMD); Diag(MTA.getLocation(), diag::note_tail_call_required) << &MTA; return false; } if (CMD->isStatic()) Type.MemberType = FuncType::ft_static_member; else { Type.This = CMD->getThisType()->getPointeeType(); Type.MemberType = FuncType::ft_non_static_member; } Type.Func = CMD->getType()->castAs(); return true; }; const auto *CallerDecl = dyn_cast(CurContext); // Find caller function signature. if (!CallerDecl) { int ContextType; if (isa(CurContext)) ContextType = 0; else if (isa(CurContext)) ContextType = 1; else ContextType = 2; Diag(St->getBeginLoc(), diag::err_musttail_forbidden_from_this_context) << &MTA << ContextType; return false; } else if (const auto *CMD = dyn_cast(CurContext)) { // Caller is a class/struct method. if (!GetMethodType(CMD, CallerType, false)) return false; } else { // Caller is a non-method function. CallerType.Func = CallerDecl->getType()->getAs(); } const Expr *CalleeExpr = CE->getCallee()->IgnoreParens(); const auto *CalleeBinOp = dyn_cast(CalleeExpr); SourceLocation CalleeLoc = CE->getCalleeDecl() ? CE->getCalleeDecl()->getBeginLoc() : St->getBeginLoc(); // Find callee function signature. if (const CXXMethodDecl *CMD = dyn_cast_or_null(CE->getCalleeDecl())) { // Call is: obj.method(), obj->method(), functor(), etc. if (!GetMethodType(CMD, CalleeType, true)) return false; } else if (CalleeBinOp && CalleeBinOp->isPtrMemOp()) { // Call is: obj->*method_ptr or obj.*method_ptr const auto *MPT = CalleeBinOp->getRHS()->getType()->castAs(); CalleeType.This = QualType(MPT->getClass(), 0); CalleeType.Func = MPT->getPointeeType()->castAs(); CalleeType.MemberType = FuncType::ft_pointer_to_member; } else if (isa(CalleeExpr)) { Diag(St->getBeginLoc(), diag::err_musttail_structors_forbidden) << /* IsCallee = */ 1 << /* IsDestructor = */ 1; Diag(MTA.getLocation(), diag::note_tail_call_required) << &MTA; return false; } else { // Non-method function. CalleeType.Func = CalleeExpr->getType()->getPointeeType()->getAs(); } // Both caller and callee must have a prototype (no K&R declarations). if (!CalleeType.Func || !CallerType.Func) { Diag(St->getBeginLoc(), diag::err_musttail_needs_prototype) << &MTA; if (!CalleeType.Func && CE->getDirectCallee()) { Diag(CE->getDirectCallee()->getBeginLoc(), diag::note_musttail_fix_non_prototype); } if (!CallerType.Func) Diag(CallerDecl->getBeginLoc(), diag::note_musttail_fix_non_prototype); return false; } // Caller and callee must have matching calling conventions. // // Some calling conventions are physically capable of supporting tail calls // even if the function types don't perfectly match. LLVM is currently too // strict to allow this, but if LLVM added support for this in the future, we // could exit early here and skip the remaining checks if the functions are // using such a calling convention. if (CallerType.Func->getCallConv() != CalleeType.Func->getCallConv()) { if (const auto *ND = dyn_cast_or_null(CE->getCalleeDecl())) Diag(St->getBeginLoc(), diag::err_musttail_callconv_mismatch) << true << ND->getDeclName(); else Diag(St->getBeginLoc(), diag::err_musttail_callconv_mismatch) << false; Diag(CalleeLoc, diag::note_musttail_callconv_mismatch) << FunctionType::getNameForCallConv(CallerType.Func->getCallConv()) << FunctionType::getNameForCallConv(CalleeType.Func->getCallConv()); Diag(MTA.getLocation(), diag::note_tail_call_required) << &MTA; return false; } if (CalleeType.Func->isVariadic() || CallerType.Func->isVariadic()) { Diag(St->getBeginLoc(), diag::err_musttail_no_variadic) << &MTA; return false; } // Caller and callee must match in whether they have a "this" parameter. if (CallerType.This.isNull() != CalleeType.This.isNull()) { if (const auto *ND = dyn_cast_or_null(CE->getCalleeDecl())) { Diag(St->getBeginLoc(), diag::err_musttail_member_mismatch) << CallerType.MemberType << CalleeType.MemberType << true << ND->getDeclName(); Diag(CalleeLoc, diag::note_musttail_callee_defined_here) << ND->getDeclName(); } else Diag(St->getBeginLoc(), diag::err_musttail_member_mismatch) << CallerType.MemberType << CalleeType.MemberType << false; Diag(MTA.getLocation(), diag::note_tail_call_required) << &MTA; return false; } auto CheckTypesMatch = [this](FuncType CallerType, FuncType CalleeType, PartialDiagnostic &PD) -> bool { enum { ft_different_class, ft_parameter_arity, ft_parameter_mismatch, ft_return_type, }; auto DoTypesMatch = [this, &PD](QualType A, QualType B, unsigned Select) -> bool { if (!Context.hasSimilarType(A, B)) { PD << Select << A.getUnqualifiedType() << B.getUnqualifiedType(); return false; } return true; }; if (!CallerType.This.isNull() && !DoTypesMatch(CallerType.This, CalleeType.This, ft_different_class)) return false; if (!DoTypesMatch(CallerType.Func->getReturnType(), CalleeType.Func->getReturnType(), ft_return_type)) return false; if (CallerType.Func->getNumParams() != CalleeType.Func->getNumParams()) { PD << ft_parameter_arity << CallerType.Func->getNumParams() << CalleeType.Func->getNumParams(); return false; } ArrayRef CalleeParams = CalleeType.Func->getParamTypes(); ArrayRef CallerParams = CallerType.Func->getParamTypes(); size_t N = CallerType.Func->getNumParams(); for (size_t I = 0; I < N; I++) { if (!DoTypesMatch(CalleeParams[I], CallerParams[I], ft_parameter_mismatch)) { PD << static_cast(I) + 1; return false; } } return true; }; PartialDiagnostic PD = PDiag(diag::note_musttail_mismatch); if (!CheckTypesMatch(CallerType, CalleeType, PD)) { if (const auto *ND = dyn_cast_or_null(CE->getCalleeDecl())) Diag(St->getBeginLoc(), diag::err_musttail_mismatch) << true << ND->getDeclName(); else Diag(St->getBeginLoc(), diag::err_musttail_mismatch) << false; Diag(CalleeLoc, PD); Diag(MTA.getLocation(), diag::note_tail_call_required) << &MTA; return false; } return true; } namespace { class CommaVisitor : public EvaluatedExprVisitor { typedef EvaluatedExprVisitor Inherited; Sema &SemaRef; public: CommaVisitor(Sema &SemaRef) : Inherited(SemaRef.Context), SemaRef(SemaRef) {} void VisitBinaryOperator(BinaryOperator *E) { if (E->getOpcode() == BO_Comma) SemaRef.DiagnoseCommaOperator(E->getLHS(), E->getExprLoc()); EvaluatedExprVisitor::VisitBinaryOperator(E); } }; } StmtResult Sema::ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc, Stmt *thenStmt, SourceLocation ElseLoc, Stmt *elseStmt) { if (Cond.isInvalid()) Cond = ConditionResult( *this, nullptr, MakeFullExpr(new (Context) OpaqueValueExpr(SourceLocation(), Context.BoolTy, VK_PRValue), IfLoc), false); Expr *CondExpr = Cond.get().second; // Only call the CommaVisitor when not C89 due to differences in scope flags. if ((getLangOpts().C99 || getLangOpts().CPlusPlus) && !Diags.isIgnored(diag::warn_comma_operator, CondExpr->getExprLoc())) CommaVisitor(*this).Visit(CondExpr); if (!elseStmt) DiagnoseEmptyStmtBody(CondExpr->getEndLoc(), thenStmt, diag::warn_empty_if_body); if (IsConstexpr) { auto DiagnoseLikelihood = [&](const Stmt *S) { if (const Attr *A = Stmt::getLikelihoodAttr(S)) { Diags.Report(A->getLocation(), diag::warn_attribute_has_no_effect_on_if_constexpr) << A << A->getRange(); Diags.Report(IfLoc, diag::note_attribute_has_no_effect_on_if_constexpr_here) << SourceRange(IfLoc, LParenLoc.getLocWithOffset(-1)); } }; DiagnoseLikelihood(thenStmt); DiagnoseLikelihood(elseStmt); } else { std::tuple LHC = Stmt::determineLikelihoodConflict(thenStmt, elseStmt); if (std::get<0>(LHC)) { const Attr *ThenAttr = std::get<1>(LHC); const Attr *ElseAttr = std::get<2>(LHC); Diags.Report(ThenAttr->getLocation(), diag::warn_attributes_likelihood_ifstmt_conflict) << ThenAttr << ThenAttr->getRange(); Diags.Report(ElseAttr->getLocation(), diag::note_conflicting_attribute) << ElseAttr << ElseAttr->getRange(); } } return BuildIfStmt(IfLoc, IsConstexpr, LParenLoc, InitStmt, Cond, RParenLoc, thenStmt, ElseLoc, elseStmt); } StmtResult Sema::BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc, Stmt *thenStmt, SourceLocation ElseLoc, Stmt *elseStmt) { if (Cond.isInvalid()) return StmtError(); if (IsConstexpr || isa(Cond.get().second)) setFunctionHasBranchProtectedScope(); return IfStmt::Create(Context, IfLoc, IsConstexpr, InitStmt, Cond.get().first, Cond.get().second, LParenLoc, RParenLoc, thenStmt, ElseLoc, elseStmt); } namespace { struct CaseCompareFunctor { bool operator()(const std::pair &LHS, const llvm::APSInt &RHS) { return LHS.first < RHS; } bool operator()(const std::pair &LHS, const std::pair &RHS) { return LHS.first < RHS.first; } bool operator()(const llvm::APSInt &LHS, const std::pair &RHS) { return LHS < RHS.first; } }; } /// CmpCaseVals - Comparison predicate for sorting case values. /// static bool CmpCaseVals(const std::pair& lhs, const std::pair& rhs) { if (lhs.first < rhs.first) return true; if (lhs.first == rhs.first && lhs.second->getCaseLoc() < rhs.second->getCaseLoc()) return true; return false; } /// CmpEnumVals - Comparison predicate for sorting enumeration values. /// static bool CmpEnumVals(const std::pair& lhs, const std::pair& rhs) { return lhs.first < rhs.first; } /// EqEnumVals - Comparison preficate for uniqing enumeration values. /// static bool EqEnumVals(const std::pair& lhs, const std::pair& rhs) { return lhs.first == rhs.first; } /// GetTypeBeforeIntegralPromotion - Returns the pre-promotion type of /// potentially integral-promoted expression @p expr. static QualType GetTypeBeforeIntegralPromotion(const Expr *&E) { if (const auto *FE = dyn_cast(E)) E = FE->getSubExpr(); while (const auto *ImpCast = dyn_cast(E)) { if (ImpCast->getCastKind() != CK_IntegralCast) break; E = ImpCast->getSubExpr(); } return E->getType(); } ExprResult Sema::CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond) { class SwitchConvertDiagnoser : public ICEConvertDiagnoser { Expr *Cond; public: SwitchConvertDiagnoser(Expr *Cond) : ICEConvertDiagnoser(/*AllowScopedEnumerations*/true, false, true), Cond(Cond) {} SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) override { return S.Diag(Loc, diag::err_typecheck_statement_requires_integer) << T; } SemaDiagnosticBuilder diagnoseIncomplete( Sema &S, SourceLocation Loc, QualType T) override { return S.Diag(Loc, diag::err_switch_incomplete_class_type) << T << Cond->getSourceRange(); } SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override { return S.Diag(Loc, diag::err_switch_explicit_conversion) << T << ConvTy; } SemaDiagnosticBuilder noteExplicitConv( Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override { return S.Diag(Conv->getLocation(), diag::note_switch_conversion) << ConvTy->isEnumeralType() << ConvTy; } SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) override { return S.Diag(Loc, diag::err_switch_multiple_conversions) << T; } SemaDiagnosticBuilder noteAmbiguous( Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override { return S.Diag(Conv->getLocation(), diag::note_switch_conversion) << ConvTy->isEnumeralType() << ConvTy; } SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override { llvm_unreachable("conversion functions are permitted"); } } SwitchDiagnoser(Cond); ExprResult CondResult = PerformContextualImplicitConversion(SwitchLoc, Cond, SwitchDiagnoser); if (CondResult.isInvalid()) return ExprError(); // FIXME: PerformContextualImplicitConversion doesn't always tell us if it // failed and produced a diagnostic. Cond = CondResult.get(); if (!Cond->isTypeDependent() && !Cond->getType()->isIntegralOrEnumerationType()) return ExprError(); // C99 6.8.4.2p5 - Integer promotions are performed on the controlling expr. return UsualUnaryConversions(Cond); } StmtResult Sema::ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc) { Expr *CondExpr = Cond.get().second; assert((Cond.isInvalid() || CondExpr) && "switch with no condition"); if (CondExpr && !CondExpr->isTypeDependent()) { // We have already converted the expression to an integral or enumeration // type, when we parsed the switch condition. There are cases where we don't // have an appropriate type, e.g. a typo-expr Cond was corrected to an // inappropriate-type expr, we just return an error. if (!CondExpr->getType()->isIntegralOrEnumerationType()) return StmtError(); if (CondExpr->isKnownToHaveBooleanValue()) { // switch(bool_expr) {...} is often a programmer error, e.g. // switch(n && mask) { ... } // Doh - should be "n & mask". // One can always use an if statement instead of switch(bool_expr). Diag(SwitchLoc, diag::warn_bool_switch_condition) << CondExpr->getSourceRange(); } } setFunctionHasBranchIntoScope(); auto *SS = SwitchStmt::Create(Context, InitStmt, Cond.get().first, CondExpr, LParenLoc, RParenLoc); getCurFunction()->SwitchStack.push_back( FunctionScopeInfo::SwitchInfo(SS, false)); return SS; } static void AdjustAPSInt(llvm::APSInt &Val, unsigned BitWidth, bool IsSigned) { Val = Val.extOrTrunc(BitWidth); Val.setIsSigned(IsSigned); } /// Check the specified case value is in range for the given unpromoted switch /// type. static void checkCaseValue(Sema &S, SourceLocation Loc, const llvm::APSInt &Val, unsigned UnpromotedWidth, bool UnpromotedSign) { // In C++11 onwards, this is checked by the language rules. if (S.getLangOpts().CPlusPlus11) return; // If the case value was signed and negative and the switch expression is // unsigned, don't bother to warn: this is implementation-defined behavior. // FIXME: Introduce a second, default-ignored warning for this case? if (UnpromotedWidth < Val.getBitWidth()) { llvm::APSInt ConvVal(Val); AdjustAPSInt(ConvVal, UnpromotedWidth, UnpromotedSign); AdjustAPSInt(ConvVal, Val.getBitWidth(), Val.isSigned()); // FIXME: Use different diagnostics for overflow in conversion to promoted // type versus "switch expression cannot have this value". Use proper // IntRange checking rather than just looking at the unpromoted type here. if (ConvVal != Val) S.Diag(Loc, diag::warn_case_value_overflow) << toString(Val, 10) << toString(ConvVal, 10); } } typedef SmallVector, 64> EnumValsTy; /// Returns true if we should emit a diagnostic about this case expression not /// being a part of the enum used in the switch controlling expression. static bool ShouldDiagnoseSwitchCaseNotInEnum(const Sema &S, const EnumDecl *ED, const Expr *CaseExpr, EnumValsTy::iterator &EI, EnumValsTy::iterator &EIEnd, const llvm::APSInt &Val) { if (!ED->isClosed()) return false; if (const DeclRefExpr *DRE = dyn_cast(CaseExpr->IgnoreParenImpCasts())) { if (const VarDecl *VD = dyn_cast(DRE->getDecl())) { QualType VarType = VD->getType(); QualType EnumType = S.Context.getTypeDeclType(ED); if (VD->hasGlobalStorage() && VarType.isConstQualified() && S.Context.hasSameUnqualifiedType(EnumType, VarType)) return false; } } if (ED->hasAttr()) return !S.IsValueInFlagEnum(ED, Val, false); while (EI != EIEnd && EI->first < Val) EI++; if (EI != EIEnd && EI->first == Val) return false; return true; } static void checkEnumTypesInSwitchStmt(Sema &S, const Expr *Cond, const Expr *Case) { QualType CondType = Cond->getType(); QualType CaseType = Case->getType(); const EnumType *CondEnumType = CondType->getAs(); const EnumType *CaseEnumType = CaseType->getAs(); if (!CondEnumType || !CaseEnumType) return; // Ignore anonymous enums. if (!CondEnumType->getDecl()->getIdentifier() && !CondEnumType->getDecl()->getTypedefNameForAnonDecl()) return; if (!CaseEnumType->getDecl()->getIdentifier() && !CaseEnumType->getDecl()->getTypedefNameForAnonDecl()) return; if (S.Context.hasSameUnqualifiedType(CondType, CaseType)) return; S.Diag(Case->getExprLoc(), diag::warn_comparison_of_mixed_enum_types_switch) << CondType << CaseType << Cond->getSourceRange() << Case->getSourceRange(); } StmtResult Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *BodyStmt) { SwitchStmt *SS = cast(Switch); bool CaseListIsIncomplete = getCurFunction()->SwitchStack.back().getInt(); assert(SS == getCurFunction()->SwitchStack.back().getPointer() && "switch stack missing push/pop!"); getCurFunction()->SwitchStack.pop_back(); if (!BodyStmt) return StmtError(); SS->setBody(BodyStmt, SwitchLoc); Expr *CondExpr = SS->getCond(); if (!CondExpr) return StmtError(); QualType CondType = CondExpr->getType(); // C++ 6.4.2.p2: // Integral promotions are performed (on the switch condition). // // A case value unrepresentable by the original switch condition // type (before the promotion) doesn't make sense, even when it can // be represented by the promoted type. Therefore we need to find // the pre-promotion type of the switch condition. const Expr *CondExprBeforePromotion = CondExpr; QualType CondTypeBeforePromotion = GetTypeBeforeIntegralPromotion(CondExprBeforePromotion); // Get the bitwidth of the switched-on value after promotions. We must // convert the integer case values to this width before comparison. bool HasDependentValue = CondExpr->isTypeDependent() || CondExpr->isValueDependent(); unsigned CondWidth = HasDependentValue ? 0 : Context.getIntWidth(CondType); bool CondIsSigned = CondType->isSignedIntegerOrEnumerationType(); // Get the width and signedness that the condition might actually have, for // warning purposes. // FIXME: Grab an IntRange for the condition rather than using the unpromoted // type. unsigned CondWidthBeforePromotion = HasDependentValue ? 0 : Context.getIntWidth(CondTypeBeforePromotion); bool CondIsSignedBeforePromotion = CondTypeBeforePromotion->isSignedIntegerOrEnumerationType(); // Accumulate all of the case values in a vector so that we can sort them // and detect duplicates. This vector contains the APInt for the case after // it has been converted to the condition type. typedef SmallVector, 64> CaseValsTy; CaseValsTy CaseVals; // Keep track of any GNU case ranges we see. The APSInt is the low value. typedef std::vector > CaseRangesTy; CaseRangesTy CaseRanges; DefaultStmt *TheDefaultStmt = nullptr; bool CaseListIsErroneous = false; for (SwitchCase *SC = SS->getSwitchCaseList(); SC && !HasDependentValue; SC = SC->getNextSwitchCase()) { if (DefaultStmt *DS = dyn_cast(SC)) { if (TheDefaultStmt) { Diag(DS->getDefaultLoc(), diag::err_multiple_default_labels_defined); Diag(TheDefaultStmt->getDefaultLoc(), diag::note_duplicate_case_prev); // FIXME: Remove the default statement from the switch block so that // we'll return a valid AST. This requires recursing down the AST and // finding it, not something we are set up to do right now. For now, // just lop the entire switch stmt out of the AST. CaseListIsErroneous = true; } TheDefaultStmt = DS; } else { CaseStmt *CS = cast(SC); Expr *Lo = CS->getLHS(); if (Lo->isValueDependent()) { HasDependentValue = true; break; } // We already verified that the expression has a constant value; // get that value (prior to conversions). const Expr *LoBeforePromotion = Lo; GetTypeBeforeIntegralPromotion(LoBeforePromotion); llvm::APSInt LoVal = LoBeforePromotion->EvaluateKnownConstInt(Context); // Check the unconverted value is within the range of possible values of // the switch expression. checkCaseValue(*this, Lo->getBeginLoc(), LoVal, CondWidthBeforePromotion, CondIsSignedBeforePromotion); // FIXME: This duplicates the check performed for warn_not_in_enum below. checkEnumTypesInSwitchStmt(*this, CondExprBeforePromotion, LoBeforePromotion); // Convert the value to the same width/sign as the condition. AdjustAPSInt(LoVal, CondWidth, CondIsSigned); // If this is a case range, remember it in CaseRanges, otherwise CaseVals. if (CS->getRHS()) { if (CS->getRHS()->isValueDependent()) { HasDependentValue = true; break; } CaseRanges.push_back(std::make_pair(LoVal, CS)); } else CaseVals.push_back(std::make_pair(LoVal, CS)); } } if (!HasDependentValue) { // If we don't have a default statement, check whether the // condition is constant. llvm::APSInt ConstantCondValue; bool HasConstantCond = false; if (!TheDefaultStmt) { Expr::EvalResult Result; HasConstantCond = CondExpr->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects); if (Result.Val.isInt()) ConstantCondValue = Result.Val.getInt(); assert(!HasConstantCond || (ConstantCondValue.getBitWidth() == CondWidth && ConstantCondValue.isSigned() == CondIsSigned)); } bool ShouldCheckConstantCond = HasConstantCond; // Sort all the scalar case values so we can easily detect duplicates. llvm::stable_sort(CaseVals, CmpCaseVals); if (!CaseVals.empty()) { for (unsigned i = 0, e = CaseVals.size(); i != e; ++i) { if (ShouldCheckConstantCond && CaseVals[i].first == ConstantCondValue) ShouldCheckConstantCond = false; if (i != 0 && CaseVals[i].first == CaseVals[i-1].first) { // If we have a duplicate, report it. // First, determine if either case value has a name StringRef PrevString, CurrString; Expr *PrevCase = CaseVals[i-1].second->getLHS()->IgnoreParenCasts(); Expr *CurrCase = CaseVals[i].second->getLHS()->IgnoreParenCasts(); if (DeclRefExpr *DeclRef = dyn_cast(PrevCase)) { PrevString = DeclRef->getDecl()->getName(); } if (DeclRefExpr *DeclRef = dyn_cast(CurrCase)) { CurrString = DeclRef->getDecl()->getName(); } SmallString<16> CaseValStr; CaseVals[i-1].first.toString(CaseValStr); if (PrevString == CurrString) Diag(CaseVals[i].second->getLHS()->getBeginLoc(), diag::err_duplicate_case) << (PrevString.empty() ? CaseValStr.str() : PrevString); else Diag(CaseVals[i].second->getLHS()->getBeginLoc(), diag::err_duplicate_case_differing_expr) << (PrevString.empty() ? CaseValStr.str() : PrevString) << (CurrString.empty() ? CaseValStr.str() : CurrString) << CaseValStr; Diag(CaseVals[i - 1].second->getLHS()->getBeginLoc(), diag::note_duplicate_case_prev); // FIXME: We really want to remove the bogus case stmt from the // substmt, but we have no way to do this right now. CaseListIsErroneous = true; } } } // Detect duplicate case ranges, which usually don't exist at all in // the first place. if (!CaseRanges.empty()) { // Sort all the case ranges by their low value so we can easily detect // overlaps between ranges. llvm::stable_sort(CaseRanges); // Scan the ranges, computing the high values and removing empty ranges. std::vector HiVals; for (unsigned i = 0, e = CaseRanges.size(); i != e; ++i) { llvm::APSInt &LoVal = CaseRanges[i].first; CaseStmt *CR = CaseRanges[i].second; Expr *Hi = CR->getRHS(); const Expr *HiBeforePromotion = Hi; GetTypeBeforeIntegralPromotion(HiBeforePromotion); llvm::APSInt HiVal = HiBeforePromotion->EvaluateKnownConstInt(Context); // Check the unconverted value is within the range of possible values of // the switch expression. checkCaseValue(*this, Hi->getBeginLoc(), HiVal, CondWidthBeforePromotion, CondIsSignedBeforePromotion); // Convert the value to the same width/sign as the condition. AdjustAPSInt(HiVal, CondWidth, CondIsSigned); // If the low value is bigger than the high value, the case is empty. if (LoVal > HiVal) { Diag(CR->getLHS()->getBeginLoc(), diag::warn_case_empty_range) << SourceRange(CR->getLHS()->getBeginLoc(), Hi->getEndLoc()); CaseRanges.erase(CaseRanges.begin()+i); --i; --e; continue; } if (ShouldCheckConstantCond && LoVal <= ConstantCondValue && ConstantCondValue <= HiVal) ShouldCheckConstantCond = false; HiVals.push_back(HiVal); } // Rescan the ranges, looking for overlap with singleton values and other // ranges. Since the range list is sorted, we only need to compare case // ranges with their neighbors. for (unsigned i = 0, e = CaseRanges.size(); i != e; ++i) { llvm::APSInt &CRLo = CaseRanges[i].first; llvm::APSInt &CRHi = HiVals[i]; CaseStmt *CR = CaseRanges[i].second; // Check to see whether the case range overlaps with any // singleton cases. CaseStmt *OverlapStmt = nullptr; llvm::APSInt OverlapVal(32); // Find the smallest value >= the lower bound. If I is in the // case range, then we have overlap. CaseValsTy::iterator I = llvm::lower_bound(CaseVals, CRLo, CaseCompareFunctor()); if (I != CaseVals.end() && I->first < CRHi) { OverlapVal = I->first; // Found overlap with scalar. OverlapStmt = I->second; } // Find the smallest value bigger than the upper bound. I = std::upper_bound(I, CaseVals.end(), CRHi, CaseCompareFunctor()); if (I != CaseVals.begin() && (I-1)->first >= CRLo) { OverlapVal = (I-1)->first; // Found overlap with scalar. OverlapStmt = (I-1)->second; } // Check to see if this case stmt overlaps with the subsequent // case range. if (i && CRLo <= HiVals[i-1]) { OverlapVal = HiVals[i-1]; // Found overlap with range. OverlapStmt = CaseRanges[i-1].second; } if (OverlapStmt) { // If we have a duplicate, report it. Diag(CR->getLHS()->getBeginLoc(), diag::err_duplicate_case) << toString(OverlapVal, 10); Diag(OverlapStmt->getLHS()->getBeginLoc(), diag::note_duplicate_case_prev); // FIXME: We really want to remove the bogus case stmt from the // substmt, but we have no way to do this right now. CaseListIsErroneous = true; } } } // Complain if we have a constant condition and we didn't find a match. if (!CaseListIsErroneous && !CaseListIsIncomplete && ShouldCheckConstantCond) { // TODO: it would be nice if we printed enums as enums, chars as // chars, etc. Diag(CondExpr->getExprLoc(), diag::warn_missing_case_for_condition) << toString(ConstantCondValue, 10) << CondExpr->getSourceRange(); } // Check to see if switch is over an Enum and handles all of its // values. We only issue a warning if there is not 'default:', but // we still do the analysis to preserve this information in the AST // (which can be used by flow-based analyes). // const EnumType *ET = CondTypeBeforePromotion->getAs(); // If switch has default case, then ignore it. if (!CaseListIsErroneous && !CaseListIsIncomplete && !HasConstantCond && ET && ET->getDecl()->isCompleteDefinition() && !empty(ET->getDecl()->enumerators())) { const EnumDecl *ED = ET->getDecl(); EnumValsTy EnumVals; // Gather all enum values, set their type and sort them, // allowing easier comparison with CaseVals. for (auto *EDI : ED->enumerators()) { llvm::APSInt Val = EDI->getInitVal(); AdjustAPSInt(Val, CondWidth, CondIsSigned); EnumVals.push_back(std::make_pair(Val, EDI)); } llvm::stable_sort(EnumVals, CmpEnumVals); auto EI = EnumVals.begin(), EIEnd = std::unique(EnumVals.begin(), EnumVals.end(), EqEnumVals); // See which case values aren't in enum. for (CaseValsTy::const_iterator CI = CaseVals.begin(); CI != CaseVals.end(); CI++) { Expr *CaseExpr = CI->second->getLHS(); if (ShouldDiagnoseSwitchCaseNotInEnum(*this, ED, CaseExpr, EI, EIEnd, CI->first)) Diag(CaseExpr->getExprLoc(), diag::warn_not_in_enum) << CondTypeBeforePromotion; } // See which of case ranges aren't in enum EI = EnumVals.begin(); for (CaseRangesTy::const_iterator RI = CaseRanges.begin(); RI != CaseRanges.end(); RI++) { Expr *CaseExpr = RI->second->getLHS(); if (ShouldDiagnoseSwitchCaseNotInEnum(*this, ED, CaseExpr, EI, EIEnd, RI->first)) Diag(CaseExpr->getExprLoc(), diag::warn_not_in_enum) << CondTypeBeforePromotion; llvm::APSInt Hi = RI->second->getRHS()->EvaluateKnownConstInt(Context); AdjustAPSInt(Hi, CondWidth, CondIsSigned); CaseExpr = RI->second->getRHS(); if (ShouldDiagnoseSwitchCaseNotInEnum(*this, ED, CaseExpr, EI, EIEnd, Hi)) Diag(CaseExpr->getExprLoc(), diag::warn_not_in_enum) << CondTypeBeforePromotion; } // Check which enum vals aren't in switch auto CI = CaseVals.begin(); auto RI = CaseRanges.begin(); bool hasCasesNotInSwitch = false; SmallVector UnhandledNames; for (EI = EnumVals.begin(); EI != EIEnd; EI++) { // Don't warn about omitted unavailable EnumConstantDecls. switch (EI->second->getAvailability()) { case AR_Deprecated: // Omitting a deprecated constant is ok; it should never materialize. case AR_Unavailable: continue; case AR_NotYetIntroduced: // Partially available enum constants should be present. Note that we // suppress -Wunguarded-availability diagnostics for such uses. case AR_Available: break; } if (EI->second->hasAttr()) continue; // Drop unneeded case values while (CI != CaseVals.end() && CI->first < EI->first) CI++; if (CI != CaseVals.end() && CI->first == EI->first) continue; // Drop unneeded case ranges for (; RI != CaseRanges.end(); RI++) { llvm::APSInt Hi = RI->second->getRHS()->EvaluateKnownConstInt(Context); AdjustAPSInt(Hi, CondWidth, CondIsSigned); if (EI->first <= Hi) break; } if (RI == CaseRanges.end() || EI->first < RI->first) { hasCasesNotInSwitch = true; UnhandledNames.push_back(EI->second->getDeclName()); } } if (TheDefaultStmt && UnhandledNames.empty() && ED->isClosedNonFlag()) Diag(TheDefaultStmt->getDefaultLoc(), diag::warn_unreachable_default); // Produce a nice diagnostic if multiple values aren't handled. if (!UnhandledNames.empty()) { auto DB = Diag(CondExpr->getExprLoc(), TheDefaultStmt ? diag::warn_def_missing_case : diag::warn_missing_case) << (int)UnhandledNames.size(); for (size_t I = 0, E = std::min(UnhandledNames.size(), (size_t)3); I != E; ++I) DB << UnhandledNames[I]; } if (!hasCasesNotInSwitch) SS->setAllEnumCasesCovered(); } } if (BodyStmt) DiagnoseEmptyStmtBody(CondExpr->getEndLoc(), BodyStmt, diag::warn_empty_switch_body); // FIXME: If the case list was broken is some way, we don't have a good system // to patch it up. Instead, just return the whole substmt as broken. if (CaseListIsErroneous) return StmtError(); return SS; } void Sema::DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr) { if (Diags.isIgnored(diag::warn_not_in_enum_assignment, SrcExpr->getExprLoc())) return; if (const EnumType *ET = DstType->getAs()) if (!Context.hasSameUnqualifiedType(SrcType, DstType) && SrcType->isIntegerType()) { if (!SrcExpr->isTypeDependent() && !SrcExpr->isValueDependent() && SrcExpr->isIntegerConstantExpr(Context)) { // Get the bitwidth of the enum value before promotions. unsigned DstWidth = Context.getIntWidth(DstType); bool DstIsSigned = DstType->isSignedIntegerOrEnumerationType(); llvm::APSInt RhsVal = SrcExpr->EvaluateKnownConstInt(Context); AdjustAPSInt(RhsVal, DstWidth, DstIsSigned); const EnumDecl *ED = ET->getDecl(); if (!ED->isClosed()) return; if (ED->hasAttr()) { if (!IsValueInFlagEnum(ED, RhsVal, true)) Diag(SrcExpr->getExprLoc(), diag::warn_not_in_enum_assignment) << DstType.getUnqualifiedType(); } else { typedef SmallVector, 64> EnumValsTy; EnumValsTy EnumVals; // Gather all enum values, set their type and sort them, // allowing easier comparison with rhs constant. for (auto *EDI : ED->enumerators()) { llvm::APSInt Val = EDI->getInitVal(); AdjustAPSInt(Val, DstWidth, DstIsSigned); EnumVals.push_back(std::make_pair(Val, EDI)); } if (EnumVals.empty()) return; llvm::stable_sort(EnumVals, CmpEnumVals); EnumValsTy::iterator EIend = std::unique(EnumVals.begin(), EnumVals.end(), EqEnumVals); // See which values aren't in the enum. EnumValsTy::const_iterator EI = EnumVals.begin(); while (EI != EIend && EI->first < RhsVal) EI++; if (EI == EIend || EI->first != RhsVal) { Diag(SrcExpr->getExprLoc(), diag::warn_not_in_enum_assignment) << DstType.getUnqualifiedType(); } } } } } StmtResult Sema::ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc, ConditionResult Cond, SourceLocation RParenLoc, Stmt *Body) { if (Cond.isInvalid()) return StmtError(); auto CondVal = Cond.get(); CheckBreakContinueBinding(CondVal.second); if (CondVal.second && !Diags.isIgnored(diag::warn_comma_operator, CondVal.second->getExprLoc())) CommaVisitor(*this).Visit(CondVal.second); if (isa(Body)) getCurCompoundScope().setHasEmptyLoopBodies(); return WhileStmt::Create(Context, CondVal.first, CondVal.second, Body, WhileLoc, LParenLoc, RParenLoc); } StmtResult Sema::ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen) { assert(Cond && "ActOnDoStmt(): missing expression"); CheckBreakContinueBinding(Cond); ExprResult CondResult = CheckBooleanCondition(DoLoc, Cond); if (CondResult.isInvalid()) return StmtError(); Cond = CondResult.get(); CondResult = ActOnFinishFullExpr(Cond, DoLoc, /*DiscardedValue*/ false); if (CondResult.isInvalid()) return StmtError(); Cond = CondResult.get(); // Only call the CommaVisitor for C89 due to differences in scope flags. if (Cond && !getLangOpts().C99 && !getLangOpts().CPlusPlus && !Diags.isIgnored(diag::warn_comma_operator, Cond->getExprLoc())) CommaVisitor(*this).Visit(Cond); return new (Context) DoStmt(Body, Cond, DoLoc, WhileLoc, CondRParen); } namespace { // Use SetVector since the diagnostic cares about the ordering of the Decl's. using DeclSetVector = llvm::SetVector, llvm::SmallPtrSet>; // This visitor will traverse a conditional statement and store all // the evaluated decls into a vector. Simple is set to true if none // of the excluded constructs are used. class DeclExtractor : public EvaluatedExprVisitor { DeclSetVector &Decls; SmallVectorImpl &Ranges; bool Simple; public: typedef EvaluatedExprVisitor Inherited; DeclExtractor(Sema &S, DeclSetVector &Decls, SmallVectorImpl &Ranges) : Inherited(S.Context), Decls(Decls), Ranges(Ranges), Simple(true) {} bool isSimple() { return Simple; } // Replaces the method in EvaluatedExprVisitor. void VisitMemberExpr(MemberExpr* E) { Simple = false; } // Any Stmt not explicitly listed will cause the condition to be marked // complex. void VisitStmt(Stmt *S) { Simple = false; } void VisitBinaryOperator(BinaryOperator *E) { Visit(E->getLHS()); Visit(E->getRHS()); } void VisitCastExpr(CastExpr *E) { Visit(E->getSubExpr()); } void VisitUnaryOperator(UnaryOperator *E) { // Skip checking conditionals with derefernces. if (E->getOpcode() == UO_Deref) Simple = false; else Visit(E->getSubExpr()); } void VisitConditionalOperator(ConditionalOperator *E) { Visit(E->getCond()); Visit(E->getTrueExpr()); Visit(E->getFalseExpr()); } void VisitParenExpr(ParenExpr *E) { Visit(E->getSubExpr()); } void VisitBinaryConditionalOperator(BinaryConditionalOperator *E) { Visit(E->getOpaqueValue()->getSourceExpr()); Visit(E->getFalseExpr()); } void VisitIntegerLiteral(IntegerLiteral *E) { } void VisitFloatingLiteral(FloatingLiteral *E) { } void VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E) { } void VisitCharacterLiteral(CharacterLiteral *E) { } void VisitGNUNullExpr(GNUNullExpr *E) { } void VisitImaginaryLiteral(ImaginaryLiteral *E) { } void VisitDeclRefExpr(DeclRefExpr *E) { VarDecl *VD = dyn_cast(E->getDecl()); if (!VD) { // Don't allow unhandled Decl types. Simple = false; return; } Ranges.push_back(E->getSourceRange()); Decls.insert(VD); } }; // end class DeclExtractor // DeclMatcher checks to see if the decls are used in a non-evaluated // context. class DeclMatcher : public EvaluatedExprVisitor { DeclSetVector &Decls; bool FoundDecl; public: typedef EvaluatedExprVisitor Inherited; DeclMatcher(Sema &S, DeclSetVector &Decls, Stmt *Statement) : Inherited(S.Context), Decls(Decls), FoundDecl(false) { if (!Statement) return; Visit(Statement); } void VisitReturnStmt(ReturnStmt *S) { FoundDecl = true; } void VisitBreakStmt(BreakStmt *S) { FoundDecl = true; } void VisitGotoStmt(GotoStmt *S) { FoundDecl = true; } void VisitCastExpr(CastExpr *E) { if (E->getCastKind() == CK_LValueToRValue) CheckLValueToRValueCast(E->getSubExpr()); else Visit(E->getSubExpr()); } void CheckLValueToRValueCast(Expr *E) { E = E->IgnoreParenImpCasts(); if (isa(E)) { return; } if (ConditionalOperator *CO = dyn_cast(E)) { Visit(CO->getCond()); CheckLValueToRValueCast(CO->getTrueExpr()); CheckLValueToRValueCast(CO->getFalseExpr()); return; } if (BinaryConditionalOperator *BCO = dyn_cast(E)) { CheckLValueToRValueCast(BCO->getOpaqueValue()->getSourceExpr()); CheckLValueToRValueCast(BCO->getFalseExpr()); return; } Visit(E); } void VisitDeclRefExpr(DeclRefExpr *E) { if (VarDecl *VD = dyn_cast(E->getDecl())) if (Decls.count(VD)) FoundDecl = true; } void VisitPseudoObjectExpr(PseudoObjectExpr *POE) { // Only need to visit the semantics for POE. // SyntaticForm doesn't really use the Decal. for (auto *S : POE->semantics()) { if (auto *OVE = dyn_cast(S)) // Look past the OVE into the expression it binds. Visit(OVE->getSourceExpr()); else Visit(S); } } bool FoundDeclInUse() { return FoundDecl; } }; // end class DeclMatcher void CheckForLoopConditionalStatement(Sema &S, Expr *Second, Expr *Third, Stmt *Body) { // Condition is empty if (!Second) return; if (S.Diags.isIgnored(diag::warn_variables_not_in_loop_body, Second->getBeginLoc())) return; PartialDiagnostic PDiag = S.PDiag(diag::warn_variables_not_in_loop_body); DeclSetVector Decls; SmallVector Ranges; DeclExtractor DE(S, Decls, Ranges); DE.Visit(Second); // Don't analyze complex conditionals. if (!DE.isSimple()) return; // No decls found. if (Decls.size() == 0) return; // Don't warn on volatile, static, or global variables. for (auto *VD : Decls) if (VD->getType().isVolatileQualified() || VD->hasGlobalStorage()) return; if (DeclMatcher(S, Decls, Second).FoundDeclInUse() || DeclMatcher(S, Decls, Third).FoundDeclInUse() || DeclMatcher(S, Decls, Body).FoundDeclInUse()) return; // Load decl names into diagnostic. if (Decls.size() > 4) { PDiag << 0; } else { PDiag << (unsigned)Decls.size(); for (auto *VD : Decls) PDiag << VD->getDeclName(); } for (auto Range : Ranges) PDiag << Range; S.Diag(Ranges.begin()->getBegin(), PDiag); } // If Statement is an incemement or decrement, return true and sets the // variables Increment and DRE. bool ProcessIterationStmt(Sema &S, Stmt* Statement, bool &Increment, DeclRefExpr *&DRE) { if (auto Cleanups = dyn_cast(Statement)) if (!Cleanups->cleanupsHaveSideEffects()) Statement = Cleanups->getSubExpr(); if (UnaryOperator *UO = dyn_cast(Statement)) { switch (UO->getOpcode()) { default: return false; case UO_PostInc: case UO_PreInc: Increment = true; break; case UO_PostDec: case UO_PreDec: Increment = false; break; } DRE = dyn_cast(UO->getSubExpr()); return DRE; } if (CXXOperatorCallExpr *Call = dyn_cast(Statement)) { FunctionDecl *FD = Call->getDirectCallee(); if (!FD || !FD->isOverloadedOperator()) return false; switch (FD->getOverloadedOperator()) { default: return false; case OO_PlusPlus: Increment = true; break; case OO_MinusMinus: Increment = false; break; } DRE = dyn_cast(Call->getArg(0)); return DRE; } return false; } // A visitor to determine if a continue or break statement is a // subexpression. class BreakContinueFinder : public ConstEvaluatedExprVisitor { SourceLocation BreakLoc; SourceLocation ContinueLoc; bool InSwitch = false; public: BreakContinueFinder(Sema &S, const Stmt* Body) : Inherited(S.Context) { Visit(Body); } typedef ConstEvaluatedExprVisitor Inherited; void VisitContinueStmt(const ContinueStmt* E) { ContinueLoc = E->getContinueLoc(); } void VisitBreakStmt(const BreakStmt* E) { if (!InSwitch) BreakLoc = E->getBreakLoc(); } void VisitSwitchStmt(const SwitchStmt* S) { if (const Stmt *Init = S->getInit()) Visit(Init); if (const Stmt *CondVar = S->getConditionVariableDeclStmt()) Visit(CondVar); if (const Stmt *Cond = S->getCond()) Visit(Cond); // Don't return break statements from the body of a switch. InSwitch = true; if (const Stmt *Body = S->getBody()) Visit(Body); InSwitch = false; } void VisitForStmt(const ForStmt *S) { // Only visit the init statement of a for loop; the body // has a different break/continue scope. if (const Stmt *Init = S->getInit()) Visit(Init); } void VisitWhileStmt(const WhileStmt *) { // Do nothing; the children of a while loop have a different // break/continue scope. } void VisitDoStmt(const DoStmt *) { // Do nothing; the children of a while loop have a different // break/continue scope. } void VisitCXXForRangeStmt(const CXXForRangeStmt *S) { // Only visit the initialization of a for loop; the body // has a different break/continue scope. if (const Stmt *Init = S->getInit()) Visit(Init); if (const Stmt *Range = S->getRangeStmt()) Visit(Range); if (const Stmt *Begin = S->getBeginStmt()) Visit(Begin); if (const Stmt *End = S->getEndStmt()) Visit(End); } void VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S) { // Only visit the initialization of a for loop; the body // has a different break/continue scope. if (const Stmt *Element = S->getElement()) Visit(Element); if (const Stmt *Collection = S->getCollection()) Visit(Collection); } bool ContinueFound() { return ContinueLoc.isValid(); } bool BreakFound() { return BreakLoc.isValid(); } SourceLocation GetContinueLoc() { return ContinueLoc; } SourceLocation GetBreakLoc() { return BreakLoc; } }; // end class BreakContinueFinder // Emit a warning when a loop increment/decrement appears twice per loop // iteration. The conditions which trigger this warning are: // 1) The last statement in the loop body and the third expression in the // for loop are both increment or both decrement of the same variable // 2) No continue statements in the loop body. void CheckForRedundantIteration(Sema &S, Expr *Third, Stmt *Body) { // Return when there is nothing to check. if (!Body || !Third) return; if (S.Diags.isIgnored(diag::warn_redundant_loop_iteration, Third->getBeginLoc())) return; // Get the last statement from the loop body. CompoundStmt *CS = dyn_cast(Body); if (!CS || CS->body_empty()) return; Stmt *LastStmt = CS->body_back(); if (!LastStmt) return; bool LoopIncrement, LastIncrement; DeclRefExpr *LoopDRE, *LastDRE; if (!ProcessIterationStmt(S, Third, LoopIncrement, LoopDRE)) return; if (!ProcessIterationStmt(S, LastStmt, LastIncrement, LastDRE)) return; // Check that the two statements are both increments or both decrements // on the same variable. if (LoopIncrement != LastIncrement || LoopDRE->getDecl() != LastDRE->getDecl()) return; if (BreakContinueFinder(S, Body).ContinueFound()) return; S.Diag(LastDRE->getLocation(), diag::warn_redundant_loop_iteration) << LastDRE->getDecl() << LastIncrement; S.Diag(LoopDRE->getLocation(), diag::note_loop_iteration_here) << LoopIncrement; } } // end namespace void Sema::CheckBreakContinueBinding(Expr *E) { if (!E || getLangOpts().CPlusPlus) return; BreakContinueFinder BCFinder(*this, E); Scope *BreakParent = CurScope->getBreakParent(); if (BCFinder.BreakFound() && BreakParent) { if (BreakParent->getFlags() & Scope::SwitchScope) { Diag(BCFinder.GetBreakLoc(), diag::warn_break_binds_to_switch); } else { Diag(BCFinder.GetBreakLoc(), diag::warn_loop_ctrl_binds_to_inner) << "break"; } } else if (BCFinder.ContinueFound() && CurScope->getContinueParent()) { Diag(BCFinder.GetContinueLoc(), diag::warn_loop_ctrl_binds_to_inner) << "continue"; } } StmtResult Sema::ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg third, SourceLocation RParenLoc, Stmt *Body) { if (Second.isInvalid()) return StmtError(); if (!getLangOpts().CPlusPlus) { if (DeclStmt *DS = dyn_cast_or_null(First)) { // C99 6.8.5p3: The declaration part of a 'for' statement shall only // declare identifiers for objects having storage class 'auto' or // 'register'. const Decl *NonVarSeen = nullptr; bool VarDeclSeen = false; for (auto *DI : DS->decls()) { if (VarDecl *VD = dyn_cast(DI)) { VarDeclSeen = true; if (VD->isLocalVarDecl() && !VD->hasLocalStorage()) { Diag(DI->getLocation(), diag::err_non_local_variable_decl_in_for); DI->setInvalidDecl(); } } else if (!NonVarSeen) { // Keep track of the first non-variable declaration we saw so that // we can diagnose if we don't see any variable declarations. This // covers a case like declaring a typedef, function, or structure // type rather than a variable. NonVarSeen = DI; } } // Diagnose if we saw a non-variable declaration but no variable // declarations. if (NonVarSeen && !VarDeclSeen) Diag(NonVarSeen->getLocation(), diag::err_non_variable_decl_in_for); } } CheckBreakContinueBinding(Second.get().second); CheckBreakContinueBinding(third.get()); if (!Second.get().first) CheckForLoopConditionalStatement(*this, Second.get().second, third.get(), Body); CheckForRedundantIteration(*this, third.get(), Body); if (Second.get().second && !Diags.isIgnored(diag::warn_comma_operator, Second.get().second->getExprLoc())) CommaVisitor(*this).Visit(Second.get().second); Expr *Third = third.release().getAs(); if (isa(Body)) getCurCompoundScope().setHasEmptyLoopBodies(); return new (Context) ForStmt(Context, First, Second.get().second, Second.get().first, Third, Body, ForLoc, LParenLoc, RParenLoc); } /// In an Objective C collection iteration statement: /// for (x in y) /// x can be an arbitrary l-value expression. Bind it up as a /// full-expression. StmtResult Sema::ActOnForEachLValueExpr(Expr *E) { // Reduce placeholder expressions here. Note that this rejects the // use of pseudo-object l-values in this position. ExprResult result = CheckPlaceholderExpr(E); if (result.isInvalid()) return StmtError(); E = result.get(); ExprResult FullExpr = ActOnFinishFullExpr(E, /*DiscardedValue*/ false); if (FullExpr.isInvalid()) return StmtError(); return StmtResult(static_cast(FullExpr.get())); } ExprResult Sema::CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection) { if (!collection) return ExprError(); ExprResult result = CorrectDelayedTyposInExpr(collection); if (!result.isUsable()) return ExprError(); collection = result.get(); // Bail out early if we've got a type-dependent expression. if (collection->isTypeDependent()) return collection; // Perform normal l-value conversion. result = DefaultFunctionArrayLvalueConversion(collection); if (result.isInvalid()) return ExprError(); collection = result.get(); // The operand needs to have object-pointer type. // TODO: should we do a contextual conversion? const ObjCObjectPointerType *pointerType = collection->getType()->getAs(); if (!pointerType) return Diag(forLoc, diag::err_collection_expr_type) << collection->getType() << collection->getSourceRange(); // Check that the operand provides // - countByEnumeratingWithState:objects:count: const ObjCObjectType *objectType = pointerType->getObjectType(); ObjCInterfaceDecl *iface = objectType->getInterface(); // If we have a forward-declared type, we can't do this check. // Under ARC, it is an error not to have a forward-declared class. if (iface && (getLangOpts().ObjCAutoRefCount ? RequireCompleteType(forLoc, QualType(objectType, 0), diag::err_arc_collection_forward, collection) : !isCompleteType(forLoc, QualType(objectType, 0)))) { // Otherwise, if we have any useful type information, check that // the type declares the appropriate method. } else if (iface || !objectType->qual_empty()) { IdentifierInfo *selectorIdents[] = { &Context.Idents.get("countByEnumeratingWithState"), &Context.Idents.get("objects"), &Context.Idents.get("count") }; Selector selector = Context.Selectors.getSelector(3, &selectorIdents[0]); ObjCMethodDecl *method = nullptr; // If there's an interface, look in both the public and private APIs. if (iface) { method = iface->lookupInstanceMethod(selector); if (!method) method = iface->lookupPrivateMethod(selector); } // Also check protocol qualifiers. if (!method) method = LookupMethodInQualifiedType(selector, pointerType, /*instance*/ true); // If we didn't find it anywhere, give up. if (!method) { Diag(forLoc, diag::warn_collection_expr_type) << collection->getType() << selector << collection->getSourceRange(); } // TODO: check for an incompatible signature? } // Wrap up any cleanups in the expression. return collection; } StmtResult Sema::ActOnObjCForCollectionStmt(SourceLocation ForLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc) { setFunctionHasBranchProtectedScope(); ExprResult CollectionExprResult = CheckObjCForCollectionOperand(ForLoc, collection); if (First) { QualType FirstType; if (DeclStmt *DS = dyn_cast(First)) { if (!DS->isSingleDecl()) return StmtError(Diag((*DS->decl_begin())->getLocation(), diag::err_toomany_element_decls)); VarDecl *D = dyn_cast(DS->getSingleDecl()); if (!D || D->isInvalidDecl()) return StmtError(); FirstType = D->getType(); // C99 6.8.5p3: The declaration part of a 'for' statement shall only // declare identifiers for objects having storage class 'auto' or // 'register'. if (!D->hasLocalStorage()) return StmtError(Diag(D->getLocation(), diag::err_non_local_variable_decl_in_for)); // If the type contained 'auto', deduce the 'auto' to 'id'. if (FirstType->getContainedAutoType()) { OpaqueValueExpr OpaqueId(D->getLocation(), Context.getObjCIdType(), VK_PRValue); Expr *DeducedInit = &OpaqueId; if (DeduceAutoType(D->getTypeSourceInfo(), DeducedInit, FirstType) == DAR_Failed) DiagnoseAutoDeductionFailure(D, DeducedInit); if (FirstType.isNull()) { D->setInvalidDecl(); return StmtError(); } D->setType(FirstType); if (!inTemplateInstantiation()) { SourceLocation Loc = D->getTypeSourceInfo()->getTypeLoc().getBeginLoc(); Diag(Loc, diag::warn_auto_var_is_id) << D->getDeclName(); } } } else { Expr *FirstE = cast(First); if (!FirstE->isTypeDependent() && !FirstE->isLValue()) return StmtError( Diag(First->getBeginLoc(), diag::err_selector_element_not_lvalue) << First->getSourceRange()); FirstType = static_cast(First)->getType(); if (FirstType.isConstQualified()) Diag(ForLoc, diag::err_selector_element_const_type) << FirstType << First->getSourceRange(); } if (!FirstType->isDependentType() && !FirstType->isObjCObjectPointerType() && !FirstType->isBlockPointerType()) return StmtError(Diag(ForLoc, diag::err_selector_element_type) << FirstType << First->getSourceRange()); } if (CollectionExprResult.isInvalid()) return StmtError(); CollectionExprResult = ActOnFinishFullExpr(CollectionExprResult.get(), /*DiscardedValue*/ false); if (CollectionExprResult.isInvalid()) return StmtError(); return new (Context) ObjCForCollectionStmt(First, CollectionExprResult.get(), nullptr, ForLoc, RParenLoc); } /// Finish building a variable declaration for a for-range statement. /// \return true if an error occurs. static bool FinishForRangeVarDecl(Sema &SemaRef, VarDecl *Decl, Expr *Init, SourceLocation Loc, int DiagID) { if (Decl->getType()->isUndeducedType()) { ExprResult Res = SemaRef.CorrectDelayedTyposInExpr(Init); if (!Res.isUsable()) { Decl->setInvalidDecl(); return true; } Init = Res.get(); } // Deduce the type for the iterator variable now rather than leaving it to // AddInitializerToDecl, so we can produce a more suitable diagnostic. QualType InitType; if ((!isa(Init) && Init->getType()->isVoidType()) || SemaRef.DeduceAutoType(Decl->getTypeSourceInfo(), Init, InitType) == Sema::DAR_Failed) SemaRef.Diag(Loc, DiagID) << Init->getType(); if (InitType.isNull()) { Decl->setInvalidDecl(); return true; } Decl->setType(InitType); // In ARC, infer lifetime. // FIXME: ARC may want to turn this into 'const __unsafe_unretained' if // we're doing the equivalent of fast iteration. if (SemaRef.getLangOpts().ObjCAutoRefCount && SemaRef.inferObjCARCLifetime(Decl)) Decl->setInvalidDecl(); SemaRef.AddInitializerToDecl(Decl, Init, /*DirectInit=*/false); SemaRef.FinalizeDeclaration(Decl); SemaRef.CurContext->addHiddenDecl(Decl); return false; } namespace { // An enum to represent whether something is dealing with a call to begin() // or a call to end() in a range-based for loop. enum BeginEndFunction { BEF_begin, BEF_end }; /// Produce a note indicating which begin/end function was implicitly called /// by a C++11 for-range statement. This is often not obvious from the code, /// nor from the diagnostics produced when analysing the implicit expressions /// required in a for-range statement. void NoteForRangeBeginEndFunction(Sema &SemaRef, Expr *E, BeginEndFunction BEF) { CallExpr *CE = dyn_cast(E); if (!CE) return; FunctionDecl *D = dyn_cast(CE->getCalleeDecl()); if (!D) return; SourceLocation Loc = D->getLocation(); std::string Description; bool IsTemplate = false; if (FunctionTemplateDecl *FunTmpl = D->getPrimaryTemplate()) { Description = SemaRef.getTemplateArgumentBindingsText( FunTmpl->getTemplateParameters(), *D->getTemplateSpecializationArgs()); IsTemplate = true; } SemaRef.Diag(Loc, diag::note_for_range_begin_end) << BEF << IsTemplate << Description << E->getType(); } /// Build a variable declaration for a for-range statement. VarDecl *BuildForRangeVarDecl(Sema &SemaRef, SourceLocation Loc, QualType Type, StringRef Name) { DeclContext *DC = SemaRef.CurContext; IdentifierInfo *II = &SemaRef.PP.getIdentifierTable().get(Name); TypeSourceInfo *TInfo = SemaRef.Context.getTrivialTypeSourceInfo(Type, Loc); VarDecl *Decl = VarDecl::Create(SemaRef.Context, DC, Loc, Loc, II, Type, TInfo, SC_None); Decl->setImplicit(); return Decl; } } static bool ObjCEnumerationCollection(Expr *Collection) { return !Collection->isTypeDependent() && Collection->getType()->getAs() != nullptr; } /// ActOnCXXForRangeStmt - Check and build a C++11 for-range statement. /// /// C++11 [stmt.ranged]: /// A range-based for statement is equivalent to /// /// { /// auto && __range = range-init; /// for ( auto __begin = begin-expr, /// __end = end-expr; /// __begin != __end; /// ++__begin ) { /// for-range-declaration = *__begin; /// statement /// } /// } /// /// The body of the loop is not available yet, since it cannot be analysed until /// we have determined the type of the for-range-declaration. StmtResult Sema::ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *First, SourceLocation ColonLoc, Expr *Range, SourceLocation RParenLoc, BuildForRangeKind Kind) { if (!First) return StmtError(); if (Range && ObjCEnumerationCollection(Range)) { // FIXME: Support init-statements in Objective-C++20 ranged for statement. if (InitStmt) return Diag(InitStmt->getBeginLoc(), diag::err_objc_for_range_init_stmt) << InitStmt->getSourceRange(); return ActOnObjCForCollectionStmt(ForLoc, First, Range, RParenLoc); } DeclStmt *DS = dyn_cast(First); assert(DS && "first part of for range not a decl stmt"); if (!DS->isSingleDecl()) { Diag(DS->getBeginLoc(), diag::err_type_defined_in_for_range); return StmtError(); } // This function is responsible for attaching an initializer to LoopVar. We // must call ActOnInitializerError if we fail to do so. Decl *LoopVar = DS->getSingleDecl(); if (LoopVar->isInvalidDecl() || !Range || DiagnoseUnexpandedParameterPack(Range, UPPC_Expression)) { ActOnInitializerError(LoopVar); return StmtError(); } // Build the coroutine state immediately and not later during template // instantiation if (!CoawaitLoc.isInvalid()) { if (!ActOnCoroutineBodyStart(S, CoawaitLoc, "co_await")) { ActOnInitializerError(LoopVar); return StmtError(); } } // Build auto && __range = range-init // Divide by 2, since the variables are in the inner scope (loop body). const auto DepthStr = std::to_string(S->getDepth() / 2); SourceLocation RangeLoc = Range->getBeginLoc(); VarDecl *RangeVar = BuildForRangeVarDecl(*this, RangeLoc, Context.getAutoRRefDeductType(), std::string("__range") + DepthStr); if (FinishForRangeVarDecl(*this, RangeVar, Range, RangeLoc, diag::err_for_range_deduction_failure)) { ActOnInitializerError(LoopVar); return StmtError(); } // Claim the type doesn't contain auto: we've already done the checking. DeclGroupPtrTy RangeGroup = BuildDeclaratorGroup(MutableArrayRef((Decl **)&RangeVar, 1)); StmtResult RangeDecl = ActOnDeclStmt(RangeGroup, RangeLoc, RangeLoc); if (RangeDecl.isInvalid()) { ActOnInitializerError(LoopVar); return StmtError(); } StmtResult R = BuildCXXForRangeStmt( ForLoc, CoawaitLoc, InitStmt, ColonLoc, RangeDecl.get(), /*BeginStmt=*/nullptr, /*EndStmt=*/nullptr, /*Cond=*/nullptr, /*Inc=*/nullptr, DS, RParenLoc, Kind); if (R.isInvalid()) { ActOnInitializerError(LoopVar); return StmtError(); } return R; } /// Create the initialization, compare, and increment steps for /// the range-based for loop expression. /// This function does not handle array-based for loops, /// which are created in Sema::BuildCXXForRangeStmt. /// /// \returns a ForRangeStatus indicating success or what kind of error occurred. /// BeginExpr and EndExpr are set and FRS_Success is returned on success; /// CandidateSet and BEF are set and some non-success value is returned on /// failure. static Sema::ForRangeStatus BuildNonArrayForRange(Sema &SemaRef, Expr *BeginRange, Expr *EndRange, QualType RangeType, VarDecl *BeginVar, VarDecl *EndVar, SourceLocation ColonLoc, SourceLocation CoawaitLoc, OverloadCandidateSet *CandidateSet, ExprResult *BeginExpr, ExprResult *EndExpr, BeginEndFunction *BEF) { DeclarationNameInfo BeginNameInfo( &SemaRef.PP.getIdentifierTable().get("begin"), ColonLoc); DeclarationNameInfo EndNameInfo(&SemaRef.PP.getIdentifierTable().get("end"), ColonLoc); LookupResult BeginMemberLookup(SemaRef, BeginNameInfo, Sema::LookupMemberName); LookupResult EndMemberLookup(SemaRef, EndNameInfo, Sema::LookupMemberName); auto BuildBegin = [&] { *BEF = BEF_begin; Sema::ForRangeStatus RangeStatus = SemaRef.BuildForRangeBeginEndCall(ColonLoc, ColonLoc, BeginNameInfo, BeginMemberLookup, CandidateSet, BeginRange, BeginExpr); if (RangeStatus != Sema::FRS_Success) { if (RangeStatus == Sema::FRS_DiagnosticIssued) SemaRef.Diag(BeginRange->getBeginLoc(), diag::note_in_for_range) << ColonLoc << BEF_begin << BeginRange->getType(); return RangeStatus; } if (!CoawaitLoc.isInvalid()) { // FIXME: getCurScope() should not be used during template instantiation. // We should pick up the set of unqualified lookup results for operator // co_await during the initial parse. *BeginExpr = SemaRef.ActOnCoawaitExpr(SemaRef.getCurScope(), ColonLoc, BeginExpr->get()); if (BeginExpr->isInvalid()) return Sema::FRS_DiagnosticIssued; } if (FinishForRangeVarDecl(SemaRef, BeginVar, BeginExpr->get(), ColonLoc, diag::err_for_range_iter_deduction_failure)) { NoteForRangeBeginEndFunction(SemaRef, BeginExpr->get(), *BEF); return Sema::FRS_DiagnosticIssued; } return Sema::FRS_Success; }; auto BuildEnd = [&] { *BEF = BEF_end; Sema::ForRangeStatus RangeStatus = SemaRef.BuildForRangeBeginEndCall(ColonLoc, ColonLoc, EndNameInfo, EndMemberLookup, CandidateSet, EndRange, EndExpr); if (RangeStatus != Sema::FRS_Success) { if (RangeStatus == Sema::FRS_DiagnosticIssued) SemaRef.Diag(EndRange->getBeginLoc(), diag::note_in_for_range) << ColonLoc << BEF_end << EndRange->getType(); return RangeStatus; } if (FinishForRangeVarDecl(SemaRef, EndVar, EndExpr->get(), ColonLoc, diag::err_for_range_iter_deduction_failure)) { NoteForRangeBeginEndFunction(SemaRef, EndExpr->get(), *BEF); return Sema::FRS_DiagnosticIssued; } return Sema::FRS_Success; }; if (CXXRecordDecl *D = RangeType->getAsCXXRecordDecl()) { // - if _RangeT is a class type, the unqualified-ids begin and end are // looked up in the scope of class _RangeT as if by class member access // lookup (3.4.5), and if either (or both) finds at least one // declaration, begin-expr and end-expr are __range.begin() and // __range.end(), respectively; SemaRef.LookupQualifiedName(BeginMemberLookup, D); if (BeginMemberLookup.isAmbiguous()) return Sema::FRS_DiagnosticIssued; SemaRef.LookupQualifiedName(EndMemberLookup, D); if (EndMemberLookup.isAmbiguous()) return Sema::FRS_DiagnosticIssued; if (BeginMemberLookup.empty() != EndMemberLookup.empty()) { // Look up the non-member form of the member we didn't find, first. // This way we prefer a "no viable 'end'" diagnostic over a "i found // a 'begin' but ignored it because there was no member 'end'" // diagnostic. auto BuildNonmember = [&]( BeginEndFunction BEFFound, LookupResult &Found, llvm::function_ref BuildFound, llvm::function_ref BuildNotFound) { LookupResult OldFound = std::move(Found); Found.clear(); if (Sema::ForRangeStatus Result = BuildNotFound()) return Result; switch (BuildFound()) { case Sema::FRS_Success: return Sema::FRS_Success; case Sema::FRS_NoViableFunction: CandidateSet->NoteCandidates( PartialDiagnosticAt(BeginRange->getBeginLoc(), SemaRef.PDiag(diag::err_for_range_invalid) << BeginRange->getType() << BEFFound), SemaRef, OCD_AllCandidates, BeginRange); LLVM_FALLTHROUGH; case Sema::FRS_DiagnosticIssued: for (NamedDecl *D : OldFound) { SemaRef.Diag(D->getLocation(), diag::note_for_range_member_begin_end_ignored) << BeginRange->getType() << BEFFound; } return Sema::FRS_DiagnosticIssued; } llvm_unreachable("unexpected ForRangeStatus"); }; if (BeginMemberLookup.empty()) return BuildNonmember(BEF_end, EndMemberLookup, BuildEnd, BuildBegin); return BuildNonmember(BEF_begin, BeginMemberLookup, BuildBegin, BuildEnd); } } else { // - otherwise, begin-expr and end-expr are begin(__range) and // end(__range), respectively, where begin and end are looked up with // argument-dependent lookup (3.4.2). For the purposes of this name // lookup, namespace std is an associated namespace. } if (Sema::ForRangeStatus Result = BuildBegin()) return Result; return BuildEnd(); } /// Speculatively attempt to dereference an invalid range expression. /// If the attempt fails, this function will return a valid, null StmtResult /// and emit no diagnostics. static StmtResult RebuildForRangeWithDereference(Sema &SemaRef, Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVarDecl, SourceLocation ColonLoc, Expr *Range, SourceLocation RangeLoc, SourceLocation RParenLoc) { // Determine whether we can rebuild the for-range statement with a // dereferenced range expression. ExprResult AdjustedRange; { Sema::SFINAETrap Trap(SemaRef); AdjustedRange = SemaRef.BuildUnaryOp(S, RangeLoc, UO_Deref, Range); if (AdjustedRange.isInvalid()) return StmtResult(); StmtResult SR = SemaRef.ActOnCXXForRangeStmt( S, ForLoc, CoawaitLoc, InitStmt, LoopVarDecl, ColonLoc, AdjustedRange.get(), RParenLoc, Sema::BFRK_Check); if (SR.isInvalid()) return StmtResult(); } // The attempt to dereference worked well enough that it could produce a valid // loop. Produce a fixit, and rebuild the loop with diagnostics enabled, in // case there are any other (non-fatal) problems with it. SemaRef.Diag(RangeLoc, diag::err_for_range_dereference) << Range->getType() << FixItHint::CreateInsertion(RangeLoc, "*"); return SemaRef.ActOnCXXForRangeStmt( S, ForLoc, CoawaitLoc, InitStmt, LoopVarDecl, ColonLoc, AdjustedRange.get(), RParenLoc, Sema::BFRK_Rebuild); } /// BuildCXXForRangeStmt - Build or instantiate a C++11 for-range statement. StmtResult Sema::BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind) { // FIXME: This should not be used during template instantiation. We should // pick up the set of unqualified lookup results for the != and + operators // in the initial parse. // // Testcase (accepts-invalid): // template void f() { for (auto x : T()) {} } // namespace N { struct X { X begin(); X end(); int operator*(); }; } // bool operator!=(N::X, N::X); void operator++(N::X); // void g() { f(); } Scope *S = getCurScope(); DeclStmt *RangeDS = cast(RangeDecl); VarDecl *RangeVar = cast(RangeDS->getSingleDecl()); QualType RangeVarType = RangeVar->getType(); DeclStmt *LoopVarDS = cast(LoopVarDecl); VarDecl *LoopVar = cast(LoopVarDS->getSingleDecl()); StmtResult BeginDeclStmt = Begin; StmtResult EndDeclStmt = End; ExprResult NotEqExpr = Cond, IncrExpr = Inc; if (RangeVarType->isDependentType()) { // The range is implicitly used as a placeholder when it is dependent. RangeVar->markUsed(Context); // Deduce any 'auto's in the loop variable as 'DependentTy'. We'll fill // them in properly when we instantiate the loop. if (!LoopVar->isInvalidDecl() && Kind != BFRK_Check) { if (auto *DD = dyn_cast(LoopVar)) for (auto *Binding : DD->bindings()) Binding->setType(Context.DependentTy); LoopVar->setType(SubstAutoType(LoopVar->getType(), Context.DependentTy)); } } else if (!BeginDeclStmt.get()) { SourceLocation RangeLoc = RangeVar->getLocation(); const QualType RangeVarNonRefType = RangeVarType.getNonReferenceType(); ExprResult BeginRangeRef = BuildDeclRefExpr(RangeVar, RangeVarNonRefType, VK_LValue, ColonLoc); if (BeginRangeRef.isInvalid()) return StmtError(); ExprResult EndRangeRef = BuildDeclRefExpr(RangeVar, RangeVarNonRefType, VK_LValue, ColonLoc); if (EndRangeRef.isInvalid()) return StmtError(); QualType AutoType = Context.getAutoDeductType(); Expr *Range = RangeVar->getInit(); if (!Range) return StmtError(); QualType RangeType = Range->getType(); if (RequireCompleteType(RangeLoc, RangeType, diag::err_for_range_incomplete_type)) return StmtError(); // Build auto __begin = begin-expr, __end = end-expr. // Divide by 2, since the variables are in the inner scope (loop body). const auto DepthStr = std::to_string(S->getDepth() / 2); VarDecl *BeginVar = BuildForRangeVarDecl(*this, ColonLoc, AutoType, std::string("__begin") + DepthStr); VarDecl *EndVar = BuildForRangeVarDecl(*this, ColonLoc, AutoType, std::string("__end") + DepthStr); // Build begin-expr and end-expr and attach to __begin and __end variables. ExprResult BeginExpr, EndExpr; if (const ArrayType *UnqAT = RangeType->getAsArrayTypeUnsafe()) { // - if _RangeT is an array type, begin-expr and end-expr are __range and // __range + __bound, respectively, where __bound is the array bound. If // _RangeT is an array of unknown size or an array of incomplete type, // the program is ill-formed; // begin-expr is __range. BeginExpr = BeginRangeRef; if (!CoawaitLoc.isInvalid()) { BeginExpr = ActOnCoawaitExpr(S, ColonLoc, BeginExpr.get()); if (BeginExpr.isInvalid()) return StmtError(); } if (FinishForRangeVarDecl(*this, BeginVar, BeginRangeRef.get(), ColonLoc, diag::err_for_range_iter_deduction_failure)) { NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin); return StmtError(); } // Find the array bound. ExprResult BoundExpr; if (const ConstantArrayType *CAT = dyn_cast(UnqAT)) BoundExpr = IntegerLiteral::Create( Context, CAT->getSize(), Context.getPointerDiffType(), RangeLoc); else if (const VariableArrayType *VAT = dyn_cast(UnqAT)) { // For a variably modified type we can't just use the expression within // the array bounds, since we don't want that to be re-evaluated here. // Rather, we need to determine what it was when the array was first // created - so we resort to using sizeof(vla)/sizeof(element). // For e.g. // void f(int b) { // int vla[b]; // b = -1; <-- This should not affect the num of iterations below // for (int &c : vla) { .. } // } // FIXME: This results in codegen generating IR that recalculates the // run-time number of elements (as opposed to just using the IR Value // that corresponds to the run-time value of each bound that was // generated when the array was created.) If this proves too embarrassing // even for unoptimized IR, consider passing a magic-value/cookie to // codegen that then knows to simply use that initial llvm::Value (that // corresponds to the bound at time of array creation) within // getelementptr. But be prepared to pay the price of increasing a // customized form of coupling between the two components - which could // be hard to maintain as the codebase evolves. ExprResult SizeOfVLAExprR = ActOnUnaryExprOrTypeTraitExpr( EndVar->getLocation(), UETT_SizeOf, /*IsType=*/true, CreateParsedType(VAT->desugar(), Context.getTrivialTypeSourceInfo( VAT->desugar(), RangeLoc)) .getAsOpaquePtr(), EndVar->getSourceRange()); if (SizeOfVLAExprR.isInvalid()) return StmtError(); ExprResult SizeOfEachElementExprR = ActOnUnaryExprOrTypeTraitExpr( EndVar->getLocation(), UETT_SizeOf, /*IsType=*/true, CreateParsedType(VAT->desugar(), Context.getTrivialTypeSourceInfo( VAT->getElementType(), RangeLoc)) .getAsOpaquePtr(), EndVar->getSourceRange()); if (SizeOfEachElementExprR.isInvalid()) return StmtError(); BoundExpr = ActOnBinOp(S, EndVar->getLocation(), tok::slash, SizeOfVLAExprR.get(), SizeOfEachElementExprR.get()); if (BoundExpr.isInvalid()) return StmtError(); } else { // Can't be a DependentSizedArrayType or an IncompleteArrayType since // UnqAT is not incomplete and Range is not type-dependent. llvm_unreachable("Unexpected array type in for-range"); } // end-expr is __range + __bound. EndExpr = ActOnBinOp(S, ColonLoc, tok::plus, EndRangeRef.get(), BoundExpr.get()); if (EndExpr.isInvalid()) return StmtError(); if (FinishForRangeVarDecl(*this, EndVar, EndExpr.get(), ColonLoc, diag::err_for_range_iter_deduction_failure)) { NoteForRangeBeginEndFunction(*this, EndExpr.get(), BEF_end); return StmtError(); } } else { OverloadCandidateSet CandidateSet(RangeLoc, OverloadCandidateSet::CSK_Normal); BeginEndFunction BEFFailure; ForRangeStatus RangeStatus = BuildNonArrayForRange( *this, BeginRangeRef.get(), EndRangeRef.get(), RangeType, BeginVar, EndVar, ColonLoc, CoawaitLoc, &CandidateSet, &BeginExpr, &EndExpr, &BEFFailure); if (Kind == BFRK_Build && RangeStatus == FRS_NoViableFunction && BEFFailure == BEF_begin) { // If the range is being built from an array parameter, emit a // a diagnostic that it is being treated as a pointer. if (DeclRefExpr *DRE = dyn_cast(Range)) { if (ParmVarDecl *PVD = dyn_cast(DRE->getDecl())) { QualType ArrayTy = PVD->getOriginalType(); QualType PointerTy = PVD->getType(); if (PointerTy->isPointerType() && ArrayTy->isArrayType()) { Diag(Range->getBeginLoc(), diag::err_range_on_array_parameter) << RangeLoc << PVD << ArrayTy << PointerTy; Diag(PVD->getLocation(), diag::note_declared_at); return StmtError(); } } } // If building the range failed, try dereferencing the range expression // unless a diagnostic was issued or the end function is problematic. StmtResult SR = RebuildForRangeWithDereference(*this, S, ForLoc, CoawaitLoc, InitStmt, LoopVarDecl, ColonLoc, Range, RangeLoc, RParenLoc); if (SR.isInvalid() || SR.isUsable()) return SR; } // Otherwise, emit diagnostics if we haven't already. if (RangeStatus == FRS_NoViableFunction) { Expr *Range = BEFFailure ? EndRangeRef.get() : BeginRangeRef.get(); CandidateSet.NoteCandidates( PartialDiagnosticAt(Range->getBeginLoc(), PDiag(diag::err_for_range_invalid) << RangeLoc << Range->getType() << BEFFailure), *this, OCD_AllCandidates, Range); } // Return an error if no fix was discovered. if (RangeStatus != FRS_Success) return StmtError(); } assert(!BeginExpr.isInvalid() && !EndExpr.isInvalid() && "invalid range expression in for loop"); // C++11 [dcl.spec.auto]p7: BeginType and EndType must be the same. // C++1z removes this restriction. QualType BeginType = BeginVar->getType(), EndType = EndVar->getType(); if (!Context.hasSameType(BeginType, EndType)) { Diag(RangeLoc, getLangOpts().CPlusPlus17 ? diag::warn_for_range_begin_end_types_differ : diag::ext_for_range_begin_end_types_differ) << BeginType << EndType; NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin); NoteForRangeBeginEndFunction(*this, EndExpr.get(), BEF_end); } BeginDeclStmt = ActOnDeclStmt(ConvertDeclToDeclGroup(BeginVar), ColonLoc, ColonLoc); EndDeclStmt = ActOnDeclStmt(ConvertDeclToDeclGroup(EndVar), ColonLoc, ColonLoc); const QualType BeginRefNonRefType = BeginType.getNonReferenceType(); ExprResult BeginRef = BuildDeclRefExpr(BeginVar, BeginRefNonRefType, VK_LValue, ColonLoc); if (BeginRef.isInvalid()) return StmtError(); ExprResult EndRef = BuildDeclRefExpr(EndVar, EndType.getNonReferenceType(), VK_LValue, ColonLoc); if (EndRef.isInvalid()) return StmtError(); // Build and check __begin != __end expression. NotEqExpr = ActOnBinOp(S, ColonLoc, tok::exclaimequal, BeginRef.get(), EndRef.get()); if (!NotEqExpr.isInvalid()) NotEqExpr = CheckBooleanCondition(ColonLoc, NotEqExpr.get()); if (!NotEqExpr.isInvalid()) NotEqExpr = ActOnFinishFullExpr(NotEqExpr.get(), /*DiscardedValue*/ false); if (NotEqExpr.isInvalid()) { Diag(RangeLoc, diag::note_for_range_invalid_iterator) << RangeLoc << 0 << BeginRangeRef.get()->getType(); NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin); if (!Context.hasSameType(BeginType, EndType)) NoteForRangeBeginEndFunction(*this, EndExpr.get(), BEF_end); return StmtError(); } // Build and check ++__begin expression. BeginRef = BuildDeclRefExpr(BeginVar, BeginRefNonRefType, VK_LValue, ColonLoc); if (BeginRef.isInvalid()) return StmtError(); IncrExpr = ActOnUnaryOp(S, ColonLoc, tok::plusplus, BeginRef.get()); if (!IncrExpr.isInvalid() && CoawaitLoc.isValid()) // FIXME: getCurScope() should not be used during template instantiation. // We should pick up the set of unqualified lookup results for operator // co_await during the initial parse. IncrExpr = ActOnCoawaitExpr(S, CoawaitLoc, IncrExpr.get()); if (!IncrExpr.isInvalid()) IncrExpr = ActOnFinishFullExpr(IncrExpr.get(), /*DiscardedValue*/ false); if (IncrExpr.isInvalid()) { Diag(RangeLoc, diag::note_for_range_invalid_iterator) << RangeLoc << 2 << BeginRangeRef.get()->getType() ; NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin); return StmtError(); } // Build and check *__begin expression. BeginRef = BuildDeclRefExpr(BeginVar, BeginRefNonRefType, VK_LValue, ColonLoc); if (BeginRef.isInvalid()) return StmtError(); ExprResult DerefExpr = ActOnUnaryOp(S, ColonLoc, tok::star, BeginRef.get()); if (DerefExpr.isInvalid()) { Diag(RangeLoc, diag::note_for_range_invalid_iterator) << RangeLoc << 1 << BeginRangeRef.get()->getType(); NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin); return StmtError(); } // Attach *__begin as initializer for VD. Don't touch it if we're just // trying to determine whether this would be a valid range. if (!LoopVar->isInvalidDecl() && Kind != BFRK_Check) { AddInitializerToDecl(LoopVar, DerefExpr.get(), /*DirectInit=*/false); if (LoopVar->isInvalidDecl() || (LoopVar->getInit() && LoopVar->getInit()->containsErrors())) NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin); } } // Don't bother to actually allocate the result if we're just trying to // determine whether it would be valid. if (Kind == BFRK_Check) return StmtResult(); // In OpenMP loop region loop control variable must be private. Perform // analysis of first part (if any). if (getLangOpts().OpenMP >= 50 && BeginDeclStmt.isUsable()) ActOnOpenMPLoopInitialization(ForLoc, BeginDeclStmt.get()); return new (Context) CXXForRangeStmt( InitStmt, RangeDS, cast_or_null(BeginDeclStmt.get()), cast_or_null(EndDeclStmt.get()), NotEqExpr.get(), IncrExpr.get(), LoopVarDS, /*Body=*/nullptr, ForLoc, CoawaitLoc, ColonLoc, RParenLoc); } /// FinishObjCForCollectionStmt - Attach the body to a objective-C foreach /// statement. StmtResult Sema::FinishObjCForCollectionStmt(Stmt *S, Stmt *B) { if (!S || !B) return StmtError(); ObjCForCollectionStmt * ForStmt = cast(S); ForStmt->setBody(B); return S; } // Warn when the loop variable is a const reference that creates a copy. // Suggest using the non-reference type for copies. If a copy can be prevented // suggest the const reference type that would do so. // For instance, given "for (const &Foo : Range)", suggest // "for (const Foo : Range)" to denote a copy is made for the loop. If // possible, also suggest "for (const &Bar : Range)" if this type prevents // the copy altogether. static void DiagnoseForRangeReferenceVariableCopies(Sema &SemaRef, const VarDecl *VD, QualType RangeInitType) { const Expr *InitExpr = VD->getInit(); if (!InitExpr) return; QualType VariableType = VD->getType(); if (auto Cleanups = dyn_cast(InitExpr)) if (!Cleanups->cleanupsHaveSideEffects()) InitExpr = Cleanups->getSubExpr(); const MaterializeTemporaryExpr *MTE = dyn_cast(InitExpr); // No copy made. if (!MTE) return; const Expr *E = MTE->getSubExpr()->IgnoreImpCasts(); // Searching for either UnaryOperator for dereference of a pointer or // CXXOperatorCallExpr for handling iterators. while (!isa(E) && !isa(E)) { if (const CXXConstructExpr *CCE = dyn_cast(E)) { E = CCE->getArg(0); } else if (const CXXMemberCallExpr *Call = dyn_cast(E)) { const MemberExpr *ME = cast(Call->getCallee()); E = ME->getBase(); } else { const MaterializeTemporaryExpr *MTE = cast(E); E = MTE->getSubExpr(); } E = E->IgnoreImpCasts(); } QualType ReferenceReturnType; if (isa(E)) { ReferenceReturnType = SemaRef.Context.getLValueReferenceType(E->getType()); } else { const CXXOperatorCallExpr *Call = cast(E); const FunctionDecl *FD = Call->getDirectCallee(); QualType ReturnType = FD->getReturnType(); if (ReturnType->isReferenceType()) ReferenceReturnType = ReturnType; } if (!ReferenceReturnType.isNull()) { // Loop variable creates a temporary. Suggest either to go with // non-reference loop variable to indicate a copy is made, or // the correct type to bind a const reference. SemaRef.Diag(VD->getLocation(), diag::warn_for_range_const_ref_binds_temp_built_from_ref) << VD << VariableType << ReferenceReturnType; QualType NonReferenceType = VariableType.getNonReferenceType(); NonReferenceType.removeLocalConst(); QualType NewReferenceType = SemaRef.Context.getLValueReferenceType(E->getType().withConst()); SemaRef.Diag(VD->getBeginLoc(), diag::note_use_type_or_non_reference) << NonReferenceType << NewReferenceType << VD->getSourceRange() << FixItHint::CreateRemoval(VD->getTypeSpecEndLoc()); } else if (!VariableType->isRValueReferenceType()) { // The range always returns a copy, so a temporary is always created. // Suggest removing the reference from the loop variable. // If the type is a rvalue reference do not warn since that changes the // semantic of the code. SemaRef.Diag(VD->getLocation(), diag::warn_for_range_ref_binds_ret_temp) << VD << RangeInitType; QualType NonReferenceType = VariableType.getNonReferenceType(); NonReferenceType.removeLocalConst(); SemaRef.Diag(VD->getBeginLoc(), diag::note_use_non_reference_type) << NonReferenceType << VD->getSourceRange() << FixItHint::CreateRemoval(VD->getTypeSpecEndLoc()); } } /// Determines whether the @p VariableType's declaration is a record with the /// clang::trivial_abi attribute. static bool hasTrivialABIAttr(QualType VariableType) { if (CXXRecordDecl *RD = VariableType->getAsCXXRecordDecl()) return RD->hasAttr(); return false; } // Warns when the loop variable can be changed to a reference type to // prevent a copy. For instance, if given "for (const Foo x : Range)" suggest // "for (const Foo &x : Range)" if this form does not make a copy. static void DiagnoseForRangeConstVariableCopies(Sema &SemaRef, const VarDecl *VD) { const Expr *InitExpr = VD->getInit(); if (!InitExpr) return; QualType VariableType = VD->getType(); if (const CXXConstructExpr *CE = dyn_cast(InitExpr)) { if (!CE->getConstructor()->isCopyConstructor()) return; } else if (const CastExpr *CE = dyn_cast(InitExpr)) { if (CE->getCastKind() != CK_LValueToRValue) return; } else { return; } // Small trivially copyable types are cheap to copy. Do not emit the // diagnostic for these instances. 64 bytes is a common size of a cache line. // (The function `getTypeSize` returns the size in bits.) ASTContext &Ctx = SemaRef.Context; if (Ctx.getTypeSize(VariableType) <= 64 * 8 && (VariableType.isTriviallyCopyableType(Ctx) || hasTrivialABIAttr(VariableType))) return; // Suggest changing from a const variable to a const reference variable // if doing so will prevent a copy. SemaRef.Diag(VD->getLocation(), diag::warn_for_range_copy) << VD << VariableType; SemaRef.Diag(VD->getBeginLoc(), diag::note_use_reference_type) << SemaRef.Context.getLValueReferenceType(VariableType) << VD->getSourceRange() << FixItHint::CreateInsertion(VD->getLocation(), "&"); } /// DiagnoseForRangeVariableCopies - Diagnose three cases and fixes for them. /// 1) for (const foo &x : foos) where foos only returns a copy. Suggest /// using "const foo x" to show that a copy is made /// 2) for (const bar &x : foos) where bar is a temporary initialized by bar. /// Suggest either "const bar x" to keep the copying or "const foo& x" to /// prevent the copy. /// 3) for (const foo x : foos) where x is constructed from a reference foo. /// Suggest "const foo &x" to prevent the copy. static void DiagnoseForRangeVariableCopies(Sema &SemaRef, const CXXForRangeStmt *ForStmt) { if (SemaRef.inTemplateInstantiation()) return; if (SemaRef.Diags.isIgnored( diag::warn_for_range_const_ref_binds_temp_built_from_ref, ForStmt->getBeginLoc()) && SemaRef.Diags.isIgnored(diag::warn_for_range_ref_binds_ret_temp, ForStmt->getBeginLoc()) && SemaRef.Diags.isIgnored(diag::warn_for_range_copy, ForStmt->getBeginLoc())) { return; } const VarDecl *VD = ForStmt->getLoopVariable(); if (!VD) return; QualType VariableType = VD->getType(); if (VariableType->isIncompleteType()) return; const Expr *InitExpr = VD->getInit(); if (!InitExpr) return; if (InitExpr->getExprLoc().isMacroID()) return; if (VariableType->isReferenceType()) { DiagnoseForRangeReferenceVariableCopies(SemaRef, VD, ForStmt->getRangeInit()->getType()); } else if (VariableType.isConstQualified()) { DiagnoseForRangeConstVariableCopies(SemaRef, VD); } } /// FinishCXXForRangeStmt - Attach the body to a C++0x for-range statement. /// This is a separate step from ActOnCXXForRangeStmt because analysis of the /// body cannot be performed until after the type of the range variable is /// determined. StmtResult Sema::FinishCXXForRangeStmt(Stmt *S, Stmt *B) { if (!S || !B) return StmtError(); if (isa(S)) return FinishObjCForCollectionStmt(S, B); CXXForRangeStmt *ForStmt = cast(S); ForStmt->setBody(B); DiagnoseEmptyStmtBody(ForStmt->getRParenLoc(), B, diag::warn_empty_range_based_for_body); DiagnoseForRangeVariableCopies(*this, ForStmt); return S; } StmtResult Sema::ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl) { setFunctionHasBranchIntoScope(); TheDecl->markUsed(Context); return new (Context) GotoStmt(TheDecl, GotoLoc, LabelLoc); } StmtResult Sema::ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *E) { // Convert operand to void* if (!E->isTypeDependent()) { QualType ETy = E->getType(); QualType DestTy = Context.getPointerType(Context.VoidTy.withConst()); ExprResult ExprRes = E; AssignConvertType ConvTy = CheckSingleAssignmentConstraints(DestTy, ExprRes); if (ExprRes.isInvalid()) return StmtError(); E = ExprRes.get(); if (DiagnoseAssignmentResult(ConvTy, StarLoc, DestTy, ETy, E, AA_Passing)) return StmtError(); } ExprResult ExprRes = ActOnFinishFullExpr(E, /*DiscardedValue*/ false); if (ExprRes.isInvalid()) return StmtError(); E = ExprRes.get(); setFunctionHasIndirectGoto(); return new (Context) IndirectGotoStmt(GotoLoc, StarLoc, E); } static void CheckJumpOutOfSEHFinally(Sema &S, SourceLocation Loc, const Scope &DestScope) { if (!S.CurrentSEHFinally.empty() && DestScope.Contains(*S.CurrentSEHFinally.back())) { S.Diag(Loc, diag::warn_jump_out_of_seh_finally); } } StmtResult Sema::ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope) { Scope *S = CurScope->getContinueParent(); if (!S) { // C99 6.8.6.2p1: A break shall appear only in or as a loop body. return StmtError(Diag(ContinueLoc, diag::err_continue_not_in_loop)); } if (S->getFlags() & Scope::ConditionVarScope) { // We cannot 'continue;' from within a statement expression in the // initializer of a condition variable because we would jump past the // initialization of that variable. return StmtError(Diag(ContinueLoc, diag::err_continue_from_cond_var_init)); } CheckJumpOutOfSEHFinally(*this, ContinueLoc, *S); return new (Context) ContinueStmt(ContinueLoc); } StmtResult Sema::ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope) { Scope *S = CurScope->getBreakParent(); if (!S) { // C99 6.8.6.3p1: A break shall appear only in or as a switch/loop body. return StmtError(Diag(BreakLoc, diag::err_break_not_in_loop_or_switch)); } if (S->isOpenMPLoopScope()) return StmtError(Diag(BreakLoc, diag::err_omp_loop_cannot_use_stmt) << "break"); CheckJumpOutOfSEHFinally(*this, BreakLoc, *S); return new (Context) BreakStmt(BreakLoc); } /// Determine whether the given expression might be move-eligible or /// copy-elidable in either a (co_)return statement or throw expression, /// without considering function return type, if applicable. /// /// \param E The expression being returned from the function or block, /// being thrown, or being co_returned from a coroutine. This expression /// might be modified by the implementation. /// /// \param ForceCXX2b Overrides detection of current language mode /// and uses the rules for C++2b. /// /// \returns An aggregate which contains the Candidate and isMoveEligible /// and isCopyElidable methods. If Candidate is non-null, it means /// isMoveEligible() would be true under the most permissive language standard. Sema::NamedReturnInfo Sema::getNamedReturnInfo(Expr *&E, SimplerImplicitMoveMode Mode) { if (!E) return NamedReturnInfo(); // - in a return statement in a function [where] ... // ... the expression is the name of a non-volatile automatic object ... const auto *DR = dyn_cast(E->IgnoreParens()); if (!DR || DR->refersToEnclosingVariableOrCapture()) return NamedReturnInfo(); const auto *VD = dyn_cast(DR->getDecl()); if (!VD) return NamedReturnInfo(); NamedReturnInfo Res = getNamedReturnInfo(VD); if (Res.Candidate && !E->isXValue() && (Mode == SimplerImplicitMoveMode::ForceOn || (Mode != SimplerImplicitMoveMode::ForceOff && getLangOpts().CPlusPlus2b))) { E = ImplicitCastExpr::Create(Context, VD->getType().getNonReferenceType(), CK_NoOp, E, nullptr, VK_XValue, FPOptionsOverride()); } return Res; } /// Determine whether the given NRVO candidate variable is move-eligible or /// copy-elidable, without considering function return type. /// /// \param VD The NRVO candidate variable. /// /// \returns An aggregate which contains the Candidate and isMoveEligible /// and isCopyElidable methods. If Candidate is non-null, it means /// isMoveEligible() would be true under the most permissive language standard. Sema::NamedReturnInfo Sema::getNamedReturnInfo(const VarDecl *VD) { NamedReturnInfo Info{VD, NamedReturnInfo::MoveEligibleAndCopyElidable}; // C++20 [class.copy.elision]p3: // - in a return statement in a function with ... // (other than a function ... parameter) if (VD->getKind() == Decl::ParmVar) Info.S = NamedReturnInfo::MoveEligible; else if (VD->getKind() != Decl::Var) return NamedReturnInfo(); // (other than ... a catch-clause parameter) if (VD->isExceptionVariable()) Info.S = NamedReturnInfo::MoveEligible; // ...automatic... if (!VD->hasLocalStorage()) return NamedReturnInfo(); // We don't want to implicitly move out of a __block variable during a return // because we cannot assume the variable will no longer be used. if (VD->hasAttr()) return NamedReturnInfo(); QualType VDType = VD->getType(); if (VDType->isObjectType()) { // C++17 [class.copy.elision]p3: // ...non-volatile automatic object... if (VDType.isVolatileQualified()) return NamedReturnInfo(); } else if (VDType->isRValueReferenceType()) { // C++20 [class.copy.elision]p3: // ...either a non-volatile object or an rvalue reference to a non-volatile // object type... QualType VDReferencedType = VDType.getNonReferenceType(); if (VDReferencedType.isVolatileQualified() || !VDReferencedType->isObjectType()) return NamedReturnInfo(); Info.S = NamedReturnInfo::MoveEligible; } else { return NamedReturnInfo(); } // Variables with higher required alignment than their type's ABI // alignment cannot use NRVO. if (!VD->hasDependentAlignment() && Context.getDeclAlign(VD) > Context.getTypeAlignInChars(VDType)) Info.S = NamedReturnInfo::MoveEligible; return Info; } /// Updates given NamedReturnInfo's move-eligible and /// copy-elidable statuses, considering the function /// return type criteria as applicable to return statements. /// /// \param Info The NamedReturnInfo object to update. /// /// \param ReturnType This is the return type of the function. /// \returns The copy elision candidate, in case the initial return expression /// was copy elidable, or nullptr otherwise. const VarDecl *Sema::getCopyElisionCandidate(NamedReturnInfo &Info, QualType ReturnType) { if (!Info.Candidate) return nullptr; auto invalidNRVO = [&] { Info = NamedReturnInfo(); return nullptr; }; // If we got a non-deduced auto ReturnType, we are in a dependent context and // there is no point in allowing copy elision since we won't have it deduced // by the point the VardDecl is instantiated, which is the last chance we have // of deciding if the candidate is really copy elidable. if ((ReturnType->getTypeClass() == Type::TypeClass::Auto && ReturnType->isCanonicalUnqualified()) || ReturnType->isSpecificBuiltinType(BuiltinType::Dependent)) return invalidNRVO(); if (!ReturnType->isDependentType()) { // - in a return statement in a function with ... // ... a class return type ... if (!ReturnType->isRecordType()) return invalidNRVO(); QualType VDType = Info.Candidate->getType(); // ... the same cv-unqualified type as the function return type ... // When considering moving this expression out, allow dissimilar types. if (!VDType->isDependentType() && !Context.hasSameUnqualifiedType(ReturnType, VDType)) Info.S = NamedReturnInfo::MoveEligible; } return Info.isCopyElidable() ? Info.Candidate : nullptr; } /// Verify that the initialization sequence that was picked for the /// first overload resolution is permissible under C++98. /// /// Reject (possibly converting) contructors not taking an rvalue reference, /// or user conversion operators which are not ref-qualified. static bool VerifyInitializationSequenceCXX98(const Sema &S, const InitializationSequence &Seq) { const auto *Step = llvm::find_if(Seq.steps(), [](const auto &Step) { return Step.Kind == InitializationSequence::SK_ConstructorInitialization || Step.Kind == InitializationSequence::SK_UserConversion; }); if (Step != Seq.step_end()) { const auto *FD = Step->Function.Function; if (isa(FD) ? !FD->getParamDecl(0)->getType()->isRValueReferenceType() : cast(FD)->getRefQualifier() == RQ_None) return false; } return true; } /// Perform the initialization of a potentially-movable value, which /// is the result of return value. /// /// This routine implements C++20 [class.copy.elision]p3, which attempts to /// treat returned lvalues as rvalues in certain cases (to prefer move /// construction), then falls back to treating them as lvalues if that failed. ExprResult Sema::PerformMoveOrCopyInitialization( const InitializedEntity &Entity, const NamedReturnInfo &NRInfo, Expr *Value, bool SupressSimplerImplicitMoves) { - if ((!getLangOpts().CPlusPlus2b || SupressSimplerImplicitMoves) && + if (getLangOpts().CPlusPlus && + (!getLangOpts().CPlusPlus2b || SupressSimplerImplicitMoves) && NRInfo.isMoveEligible()) { ImplicitCastExpr AsRvalue(ImplicitCastExpr::OnStack, Value->getType(), CK_NoOp, Value, VK_XValue, FPOptionsOverride()); Expr *InitExpr = &AsRvalue; auto Kind = InitializationKind::CreateCopy(Value->getBeginLoc(), Value->getBeginLoc()); InitializationSequence Seq(*this, Entity, Kind, InitExpr); auto Res = Seq.getFailedOverloadResult(); if ((Res == OR_Success || Res == OR_Deleted) && (getLangOpts().CPlusPlus11 || VerifyInitializationSequenceCXX98(*this, Seq))) { // Promote "AsRvalue" to the heap, since we now need this // expression node to persist. Value = ImplicitCastExpr::Create(Context, Value->getType(), CK_NoOp, Value, nullptr, VK_XValue, FPOptionsOverride()); // Complete type-checking the initialization of the return type // using the constructor we found. return Seq.Perform(*this, Entity, Kind, Value); } } // Either we didn't meet the criteria for treating an lvalue as an rvalue, // above, or overload resolution failed. Either way, we need to try // (again) now with the return value expression as written. return PerformCopyInitialization(Entity, SourceLocation(), Value); } /// Determine whether the declared return type of the specified function /// contains 'auto'. static bool hasDeducedReturnType(FunctionDecl *FD) { const FunctionProtoType *FPT = FD->getTypeSourceInfo()->getType()->castAs(); return FPT->getReturnType()->isUndeducedType(); } /// ActOnCapScopeReturnStmt - Utility routine to type-check return statements /// for capturing scopes. /// StmtResult Sema::ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, NamedReturnInfo &NRInfo, bool SupressSimplerImplicitMoves) { // If this is the first return we've seen, infer the return type. // [expr.prim.lambda]p4 in C++11; block literals follow the same rules. CapturingScopeInfo *CurCap = cast(getCurFunction()); QualType FnRetType = CurCap->ReturnType; LambdaScopeInfo *CurLambda = dyn_cast(CurCap); bool HasDeducedReturnType = CurLambda && hasDeducedReturnType(CurLambda->CallOperator); if (ExprEvalContexts.back().Context == ExpressionEvaluationContext::DiscardedStatement && (HasDeducedReturnType || CurCap->HasImplicitReturnType)) { if (RetValExp) { ExprResult ER = ActOnFinishFullExpr(RetValExp, ReturnLoc, /*DiscardedValue*/ false); if (ER.isInvalid()) return StmtError(); RetValExp = ER.get(); } return ReturnStmt::Create(Context, ReturnLoc, RetValExp, /* NRVOCandidate=*/nullptr); } if (HasDeducedReturnType) { FunctionDecl *FD = CurLambda->CallOperator; // If we've already decided this lambda is invalid, e.g. because // we saw a `return` whose expression had an error, don't keep // trying to deduce its return type. if (FD->isInvalidDecl()) return StmtError(); // In C++1y, the return type may involve 'auto'. // FIXME: Blocks might have a return type of 'auto' explicitly specified. if (CurCap->ReturnType.isNull()) CurCap->ReturnType = FD->getReturnType(); AutoType *AT = CurCap->ReturnType->getContainedAutoType(); assert(AT && "lost auto type from lambda return type"); if (DeduceFunctionTypeFromReturnExpr(FD, ReturnLoc, RetValExp, AT)) { FD->setInvalidDecl(); // FIXME: preserve the ill-formed return expression. return StmtError(); } CurCap->ReturnType = FnRetType = FD->getReturnType(); } else if (CurCap->HasImplicitReturnType) { // For blocks/lambdas with implicit return types, we check each return // statement individually, and deduce the common return type when the block // or lambda is completed. // FIXME: Fold this into the 'auto' codepath above. if (RetValExp && !isa(RetValExp)) { ExprResult Result = DefaultFunctionArrayLvalueConversion(RetValExp); if (Result.isInvalid()) return StmtError(); RetValExp = Result.get(); // DR1048: even prior to C++14, we should use the 'auto' deduction rules // when deducing a return type for a lambda-expression (or by extension // for a block). These rules differ from the stated C++11 rules only in // that they remove top-level cv-qualifiers. if (!CurContext->isDependentContext()) FnRetType = RetValExp->getType().getUnqualifiedType(); else FnRetType = CurCap->ReturnType = Context.DependentTy; } else { if (RetValExp) { // C++11 [expr.lambda.prim]p4 bans inferring the result from an // initializer list, because it is not an expression (even // though we represent it as one). We still deduce 'void'. Diag(ReturnLoc, diag::err_lambda_return_init_list) << RetValExp->getSourceRange(); } FnRetType = Context.VoidTy; } // Although we'll properly infer the type of the block once it's completed, // make sure we provide a return type now for better error recovery. if (CurCap->ReturnType.isNull()) CurCap->ReturnType = FnRetType; } const VarDecl *NRVOCandidate = getCopyElisionCandidate(NRInfo, FnRetType); if (auto *CurBlock = dyn_cast(CurCap)) { if (CurBlock->FunctionType->castAs()->getNoReturnAttr()) { Diag(ReturnLoc, diag::err_noreturn_block_has_return_expr); return StmtError(); } } else if (auto *CurRegion = dyn_cast(CurCap)) { Diag(ReturnLoc, diag::err_return_in_captured_stmt) << CurRegion->getRegionName(); return StmtError(); } else { assert(CurLambda && "unknown kind of captured scope"); if (CurLambda->CallOperator->getType() ->castAs() ->getNoReturnAttr()) { Diag(ReturnLoc, diag::err_noreturn_lambda_has_return_expr); return StmtError(); } } // Otherwise, verify that this result type matches the previous one. We are // pickier with blocks than for normal functions because we don't have GCC // compatibility to worry about here. if (FnRetType->isDependentType()) { // Delay processing for now. TODO: there are lots of dependent // types we can conclusively prove aren't void. } else if (FnRetType->isVoidType()) { if (RetValExp && !isa(RetValExp) && !(getLangOpts().CPlusPlus && (RetValExp->isTypeDependent() || RetValExp->getType()->isVoidType()))) { if (!getLangOpts().CPlusPlus && RetValExp->getType()->isVoidType()) Diag(ReturnLoc, diag::ext_return_has_void_expr) << "literal" << 2; else { Diag(ReturnLoc, diag::err_return_block_has_expr); RetValExp = nullptr; } } } else if (!RetValExp) { return StmtError(Diag(ReturnLoc, diag::err_block_return_missing_expr)); } else if (!RetValExp->isTypeDependent()) { // we have a non-void block with an expression, continue checking // C99 6.8.6.4p3(136): The return statement is not an assignment. The // overlap restriction of subclause 6.5.16.1 does not apply to the case of // function return. // In C++ the return statement is handled via a copy initialization. // the C version of which boils down to CheckSingleAssignmentConstraints. InitializedEntity Entity = InitializedEntity::InitializeResult( ReturnLoc, FnRetType, NRVOCandidate != nullptr); ExprResult Res = PerformMoveOrCopyInitialization( Entity, NRInfo, RetValExp, SupressSimplerImplicitMoves); if (Res.isInvalid()) { // FIXME: Cleanup temporaries here, anyway? return StmtError(); } RetValExp = Res.get(); CheckReturnValExpr(RetValExp, FnRetType, ReturnLoc); } if (RetValExp) { ExprResult ER = ActOnFinishFullExpr(RetValExp, ReturnLoc, /*DiscardedValue*/ false); if (ER.isInvalid()) return StmtError(); RetValExp = ER.get(); } auto *Result = ReturnStmt::Create(Context, ReturnLoc, RetValExp, NRVOCandidate); // If we need to check for the named return value optimization, // or if we need to infer the return type, // save the return statement in our scope for later processing. if (CurCap->HasImplicitReturnType || NRVOCandidate) FunctionScopes.back()->Returns.push_back(Result); if (FunctionScopes.back()->FirstReturnLoc.isInvalid()) FunctionScopes.back()->FirstReturnLoc = ReturnLoc; return Result; } namespace { /// Marks all typedefs in all local classes in a type referenced. /// /// In a function like /// auto f() { /// struct S { typedef int a; }; /// return S(); /// } /// /// the local type escapes and could be referenced in some TUs but not in /// others. Pretend that all local typedefs are always referenced, to not warn /// on this. This isn't necessary if f has internal linkage, or the typedef /// is private. class LocalTypedefNameReferencer : public RecursiveASTVisitor { public: LocalTypedefNameReferencer(Sema &S) : S(S) {} bool VisitRecordType(const RecordType *RT); private: Sema &S; }; bool LocalTypedefNameReferencer::VisitRecordType(const RecordType *RT) { auto *R = dyn_cast(RT->getDecl()); if (!R || !R->isLocalClass() || !R->isLocalClass()->isExternallyVisible() || R->isDependentType()) return true; for (auto *TmpD : R->decls()) if (auto *T = dyn_cast(TmpD)) if (T->getAccess() != AS_private || R->hasFriends()) S.MarkAnyDeclReferenced(T->getLocation(), T, /*OdrUse=*/false); return true; } } TypeLoc Sema::getReturnTypeLoc(FunctionDecl *FD) const { return FD->getTypeSourceInfo() ->getTypeLoc() .getAsAdjusted() .getReturnLoc(); } /// Deduce the return type for a function from a returned expression, per /// C++1y [dcl.spec.auto]p6. bool Sema::DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT) { // If this is the conversion function for a lambda, we choose to deduce it // type from the corresponding call operator, not from the synthesized return // statement within it. See Sema::DeduceReturnType. if (isLambdaConversionOperator(FD)) return false; TypeLoc OrigResultType = getReturnTypeLoc(FD); QualType Deduced; if (RetExpr && isa(RetExpr)) { // If the deduction is for a return statement and the initializer is // a braced-init-list, the program is ill-formed. Diag(RetExpr->getExprLoc(), getCurLambda() ? diag::err_lambda_return_init_list : diag::err_auto_fn_return_init_list) << RetExpr->getSourceRange(); return true; } if (FD->isDependentContext()) { // C++1y [dcl.spec.auto]p12: // Return type deduction [...] occurs when the definition is // instantiated even if the function body contains a return // statement with a non-type-dependent operand. assert(AT->isDeduced() && "should have deduced to dependent type"); return false; } if (RetExpr) { // Otherwise, [...] deduce a value for U using the rules of template // argument deduction. DeduceAutoResult DAR = DeduceAutoType(OrigResultType, RetExpr, Deduced); if (DAR == DAR_Failed && !FD->isInvalidDecl()) Diag(RetExpr->getExprLoc(), diag::err_auto_fn_deduction_failure) << OrigResultType.getType() << RetExpr->getType(); if (DAR != DAR_Succeeded) return true; // If a local type is part of the returned type, mark its fields as // referenced. LocalTypedefNameReferencer Referencer(*this); Referencer.TraverseType(RetExpr->getType()); } else { // In the case of a return with no operand, the initializer is considered // to be void(). // // Deduction here can only succeed if the return type is exactly 'cv auto' // or 'decltype(auto)', so just check for that case directly. if (!OrigResultType.getType()->getAs()) { Diag(ReturnLoc, diag::err_auto_fn_return_void_but_not_auto) << OrigResultType.getType(); return true; } // We always deduce U = void in this case. Deduced = SubstAutoType(OrigResultType.getType(), Context.VoidTy); if (Deduced.isNull()) return true; } // CUDA: Kernel function must have 'void' return type. if (getLangOpts().CUDA) if (FD->hasAttr() && !Deduced->isVoidType()) { Diag(FD->getLocation(), diag::err_kern_type_not_void_return) << FD->getType() << FD->getSourceRange(); return true; } // If a function with a declared return type that contains a placeholder type // has multiple return statements, the return type is deduced for each return // statement. [...] if the type deduced is not the same in each deduction, // the program is ill-formed. QualType DeducedT = AT->getDeducedType(); if (!DeducedT.isNull() && !FD->isInvalidDecl()) { AutoType *NewAT = Deduced->getContainedAutoType(); // It is possible that NewAT->getDeducedType() is null. When that happens, // we should not crash, instead we ignore this deduction. if (NewAT->getDeducedType().isNull()) return false; CanQualType OldDeducedType = Context.getCanonicalFunctionResultType( DeducedT); CanQualType NewDeducedType = Context.getCanonicalFunctionResultType( NewAT->getDeducedType()); if (!FD->isDependentContext() && OldDeducedType != NewDeducedType) { const LambdaScopeInfo *LambdaSI = getCurLambda(); if (LambdaSI && LambdaSI->HasImplicitReturnType) { Diag(ReturnLoc, diag::err_typecheck_missing_return_type_incompatible) << NewAT->getDeducedType() << DeducedT << true /*IsLambda*/; } else { Diag(ReturnLoc, diag::err_auto_fn_different_deductions) << (AT->isDecltypeAuto() ? 1 : 0) << NewAT->getDeducedType() << DeducedT; } return true; } } else if (!FD->isInvalidDecl()) { // Update all declarations of the function to have the deduced return type. Context.adjustDeducedFunctionResultType(FD, Deduced); } return false; } StmtResult Sema::ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope) { // Correct typos, in case the containing function returns 'auto' and // RetValExp should determine the deduced type. ExprResult RetVal = CorrectDelayedTyposInExpr( RetValExp, nullptr, /*RecoverUncorrectedTypos=*/true); if (RetVal.isInvalid()) return StmtError(); StmtResult R = BuildReturnStmt(ReturnLoc, RetVal.get()); if (R.isInvalid() || ExprEvalContexts.back().Context == ExpressionEvaluationContext::DiscardedStatement) return R; if (VarDecl *VD = const_cast(cast(R.get())->getNRVOCandidate())) { CurScope->addNRVOCandidate(VD); } else { CurScope->setNoNRVO(); } CheckJumpOutOfSEHFinally(*this, ReturnLoc, *CurScope->getFnParent()); return R; } static bool CheckSimplerImplicitMovesMSVCWorkaround(const Sema &S, const Expr *E) { if (!E || !S.getLangOpts().CPlusPlus2b || !S.getLangOpts().MSVCCompat) return false; const Decl *D = E->getReferencedDeclOfCallee(); if (!D || !S.SourceMgr.isInSystemHeader(D->getLocation())) return false; for (const DeclContext *DC = D->getDeclContext(); DC; DC = DC->getParent()) { if (DC->isStdNamespace()) return true; } return false; } StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) { // Check for unexpanded parameter packs. if (RetValExp && DiagnoseUnexpandedParameterPack(RetValExp)) return StmtError(); // HACK: We supress simpler implicit move here in msvc compatibility mode // just as a temporary work around, as the MSVC STL has issues with // this change. bool SupressSimplerImplicitMoves = CheckSimplerImplicitMovesMSVCWorkaround(*this, RetValExp); NamedReturnInfo NRInfo = getNamedReturnInfo( RetValExp, SupressSimplerImplicitMoves ? SimplerImplicitMoveMode::ForceOff : SimplerImplicitMoveMode::Normal); if (isa(getCurFunction())) return ActOnCapScopeReturnStmt(ReturnLoc, RetValExp, NRInfo, SupressSimplerImplicitMoves); QualType FnRetType; QualType RelatedRetType; const AttrVec *Attrs = nullptr; bool isObjCMethod = false; if (const FunctionDecl *FD = getCurFunctionDecl()) { FnRetType = FD->getReturnType(); if (FD->hasAttrs()) Attrs = &FD->getAttrs(); if (FD->isNoReturn()) Diag(ReturnLoc, diag::warn_noreturn_function_has_return_expr) << FD; if (FD->isMain() && RetValExp) if (isa(RetValExp)) Diag(ReturnLoc, diag::warn_main_returns_bool_literal) << RetValExp->getSourceRange(); if (FD->hasAttr() && RetValExp) { if (const auto *RT = dyn_cast(FnRetType.getCanonicalType())) { if (RT->getDecl()->isOrContainsUnion()) Diag(RetValExp->getBeginLoc(), diag::warn_cmse_nonsecure_union) << 1; } } } else if (ObjCMethodDecl *MD = getCurMethodDecl()) { FnRetType = MD->getReturnType(); isObjCMethod = true; if (MD->hasAttrs()) Attrs = &MD->getAttrs(); if (MD->hasRelatedResultType() && MD->getClassInterface()) { // In the implementation of a method with a related return type, the // type used to type-check the validity of return statements within the // method body is a pointer to the type of the class being implemented. RelatedRetType = Context.getObjCInterfaceType(MD->getClassInterface()); RelatedRetType = Context.getObjCObjectPointerType(RelatedRetType); } } else // If we don't have a function/method context, bail. return StmtError(); // C++1z: discarded return statements are not considered when deducing a // return type. if (ExprEvalContexts.back().Context == ExpressionEvaluationContext::DiscardedStatement && FnRetType->getContainedAutoType()) { if (RetValExp) { ExprResult ER = ActOnFinishFullExpr(RetValExp, ReturnLoc, /*DiscardedValue*/ false); if (ER.isInvalid()) return StmtError(); RetValExp = ER.get(); } return ReturnStmt::Create(Context, ReturnLoc, RetValExp, /* NRVOCandidate=*/nullptr); } // FIXME: Add a flag to the ScopeInfo to indicate whether we're performing // deduction. if (getLangOpts().CPlusPlus14) { if (AutoType *AT = FnRetType->getContainedAutoType()) { FunctionDecl *FD = cast(CurContext); // If we've already decided this function is invalid, e.g. because // we saw a `return` whose expression had an error, don't keep // trying to deduce its return type. if (FD->isInvalidDecl()) return StmtError(); if (DeduceFunctionTypeFromReturnExpr(FD, ReturnLoc, RetValExp, AT)) { FD->setInvalidDecl(); return StmtError(); } else { FnRetType = FD->getReturnType(); } } } const VarDecl *NRVOCandidate = getCopyElisionCandidate(NRInfo, FnRetType); bool HasDependentReturnType = FnRetType->isDependentType(); ReturnStmt *Result = nullptr; if (FnRetType->isVoidType()) { if (RetValExp) { if (isa(RetValExp)) { // We simply never allow init lists as the return value of void // functions. This is compatible because this was never allowed before, // so there's no legacy code to deal with. NamedDecl *CurDecl = getCurFunctionOrMethodDecl(); int FunctionKind = 0; if (isa(CurDecl)) FunctionKind = 1; else if (isa(CurDecl)) FunctionKind = 2; else if (isa(CurDecl)) FunctionKind = 3; Diag(ReturnLoc, diag::err_return_init_list) << CurDecl << FunctionKind << RetValExp->getSourceRange(); // Drop the expression. RetValExp = nullptr; } else if (!RetValExp->isTypeDependent()) { // C99 6.8.6.4p1 (ext_ since GCC warns) unsigned D = diag::ext_return_has_expr; if (RetValExp->getType()->isVoidType()) { NamedDecl *CurDecl = getCurFunctionOrMethodDecl(); if (isa(CurDecl) || isa(CurDecl)) D = diag::err_ctor_dtor_returns_void; else D = diag::ext_return_has_void_expr; } else { ExprResult Result = RetValExp; Result = IgnoredValueConversions(Result.get()); if (Result.isInvalid()) return StmtError(); RetValExp = Result.get(); RetValExp = ImpCastExprToType(RetValExp, Context.VoidTy, CK_ToVoid).get(); } // return of void in constructor/destructor is illegal in C++. if (D == diag::err_ctor_dtor_returns_void) { NamedDecl *CurDecl = getCurFunctionOrMethodDecl(); Diag(ReturnLoc, D) << CurDecl << isa(CurDecl) << RetValExp->getSourceRange(); } // return (some void expression); is legal in C++. else if (D != diag::ext_return_has_void_expr || !getLangOpts().CPlusPlus) { NamedDecl *CurDecl = getCurFunctionOrMethodDecl(); int FunctionKind = 0; if (isa(CurDecl)) FunctionKind = 1; else if (isa(CurDecl)) FunctionKind = 2; else if (isa(CurDecl)) FunctionKind = 3; Diag(ReturnLoc, D) << CurDecl << FunctionKind << RetValExp->getSourceRange(); } } if (RetValExp) { ExprResult ER = ActOnFinishFullExpr(RetValExp, ReturnLoc, /*DiscardedValue*/ false); if (ER.isInvalid()) return StmtError(); RetValExp = ER.get(); } } Result = ReturnStmt::Create(Context, ReturnLoc, RetValExp, /* NRVOCandidate=*/nullptr); } else if (!RetValExp && !HasDependentReturnType) { FunctionDecl *FD = getCurFunctionDecl(); if (getLangOpts().CPlusPlus11 && FD && FD->isConstexpr()) { // C++11 [stmt.return]p2 Diag(ReturnLoc, diag::err_constexpr_return_missing_expr) << FD << FD->isConsteval(); FD->setInvalidDecl(); } else { // C99 6.8.6.4p1 (ext_ since GCC warns) // C90 6.6.6.4p4 unsigned DiagID = getLangOpts().C99 ? diag::ext_return_missing_expr : diag::warn_return_missing_expr; // Note that at this point one of getCurFunctionDecl() or // getCurMethodDecl() must be non-null (see above). assert((getCurFunctionDecl() || getCurMethodDecl()) && "Not in a FunctionDecl or ObjCMethodDecl?"); bool IsMethod = FD == nullptr; const NamedDecl *ND = IsMethod ? cast(getCurMethodDecl()) : cast(FD); Diag(ReturnLoc, DiagID) << ND << IsMethod; } Result = ReturnStmt::Create(Context, ReturnLoc, /* RetExpr=*/nullptr, /* NRVOCandidate=*/nullptr); } else { assert(RetValExp || HasDependentReturnType); QualType RetType = RelatedRetType.isNull() ? FnRetType : RelatedRetType; // C99 6.8.6.4p3(136): The return statement is not an assignment. The // overlap restriction of subclause 6.5.16.1 does not apply to the case of // function return. // In C++ the return statement is handled via a copy initialization, // the C version of which boils down to CheckSingleAssignmentConstraints. if (!HasDependentReturnType && !RetValExp->isTypeDependent()) { // we have a non-void function with an expression, continue checking InitializedEntity Entity = InitializedEntity::InitializeResult( ReturnLoc, RetType, NRVOCandidate != nullptr); ExprResult Res = PerformMoveOrCopyInitialization( Entity, NRInfo, RetValExp, SupressSimplerImplicitMoves); if (Res.isInvalid()) { // FIXME: Clean up temporaries here anyway? return StmtError(); } RetValExp = Res.getAs(); // If we have a related result type, we need to implicitly // convert back to the formal result type. We can't pretend to // initialize the result again --- we might end double-retaining // --- so instead we initialize a notional temporary. if (!RelatedRetType.isNull()) { Entity = InitializedEntity::InitializeRelatedResult(getCurMethodDecl(), FnRetType); Res = PerformCopyInitialization(Entity, ReturnLoc, RetValExp); if (Res.isInvalid()) { // FIXME: Clean up temporaries here anyway? return StmtError(); } RetValExp = Res.getAs(); } CheckReturnValExpr(RetValExp, FnRetType, ReturnLoc, isObjCMethod, Attrs, getCurFunctionDecl()); } if (RetValExp) { ExprResult ER = ActOnFinishFullExpr(RetValExp, ReturnLoc, /*DiscardedValue*/ false); if (ER.isInvalid()) return StmtError(); RetValExp = ER.get(); } Result = ReturnStmt::Create(Context, ReturnLoc, RetValExp, NRVOCandidate); } // If we need to check for the named return value optimization, save the // return statement in our scope for later processing. if (Result->getNRVOCandidate()) FunctionScopes.back()->Returns.push_back(Result); if (FunctionScopes.back()->FirstReturnLoc.isInvalid()) FunctionScopes.back()->FirstReturnLoc = ReturnLoc; return Result; } StmtResult Sema::ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body) { VarDecl *Var = cast_or_null(Parm); if (Var && Var->isInvalidDecl()) return StmtError(); return new (Context) ObjCAtCatchStmt(AtLoc, RParen, Var, Body); } StmtResult Sema::ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body) { return new (Context) ObjCAtFinallyStmt(AtLoc, Body); } StmtResult Sema::ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg CatchStmts, Stmt *Finally) { if (!getLangOpts().ObjCExceptions) Diag(AtLoc, diag::err_objc_exceptions_disabled) << "@try"; setFunctionHasBranchProtectedScope(); unsigned NumCatchStmts = CatchStmts.size(); return ObjCAtTryStmt::Create(Context, AtLoc, Try, CatchStmts.data(), NumCatchStmts, Finally); } StmtResult Sema::BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw) { if (Throw) { ExprResult Result = DefaultLvalueConversion(Throw); if (Result.isInvalid()) return StmtError(); Result = ActOnFinishFullExpr(Result.get(), /*DiscardedValue*/ false); if (Result.isInvalid()) return StmtError(); Throw = Result.get(); QualType ThrowType = Throw->getType(); // Make sure the expression type is an ObjC pointer or "void *". if (!ThrowType->isDependentType() && !ThrowType->isObjCObjectPointerType()) { const PointerType *PT = ThrowType->getAs(); if (!PT || !PT->getPointeeType()->isVoidType()) return StmtError(Diag(AtLoc, diag::err_objc_throw_expects_object) << Throw->getType() << Throw->getSourceRange()); } } return new (Context) ObjCAtThrowStmt(AtLoc, Throw); } StmtResult Sema::ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope) { if (!getLangOpts().ObjCExceptions) Diag(AtLoc, diag::err_objc_exceptions_disabled) << "@throw"; if (!Throw) { // @throw without an expression designates a rethrow (which must occur // in the context of an @catch clause). Scope *AtCatchParent = CurScope; while (AtCatchParent && !AtCatchParent->isAtCatchScope()) AtCatchParent = AtCatchParent->getParent(); if (!AtCatchParent) return StmtError(Diag(AtLoc, diag::err_rethrow_used_outside_catch)); } return BuildObjCAtThrowStmt(AtLoc, Throw); } ExprResult Sema::ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand) { ExprResult result = DefaultLvalueConversion(operand); if (result.isInvalid()) return ExprError(); operand = result.get(); // Make sure the expression type is an ObjC pointer or "void *". QualType type = operand->getType(); if (!type->isDependentType() && !type->isObjCObjectPointerType()) { const PointerType *pointerType = type->getAs(); if (!pointerType || !pointerType->getPointeeType()->isVoidType()) { if (getLangOpts().CPlusPlus) { if (RequireCompleteType(atLoc, type, diag::err_incomplete_receiver_type)) return Diag(atLoc, diag::err_objc_synchronized_expects_object) << type << operand->getSourceRange(); ExprResult result = PerformContextuallyConvertToObjCPointer(operand); if (result.isInvalid()) return ExprError(); if (!result.isUsable()) return Diag(atLoc, diag::err_objc_synchronized_expects_object) << type << operand->getSourceRange(); operand = result.get(); } else { return Diag(atLoc, diag::err_objc_synchronized_expects_object) << type << operand->getSourceRange(); } } } // The operand to @synchronized is a full-expression. return ActOnFinishFullExpr(operand, /*DiscardedValue*/ false); } StmtResult Sema::ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SyncExpr, Stmt *SyncBody) { // We can't jump into or indirect-jump out of a @synchronized block. setFunctionHasBranchProtectedScope(); return new (Context) ObjCAtSynchronizedStmt(AtLoc, SyncExpr, SyncBody); } /// ActOnCXXCatchBlock - Takes an exception declaration and a handler block /// and creates a proper catch handler from them. StmtResult Sema::ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock) { // There's nothing to test that ActOnExceptionDecl didn't already test. return new (Context) CXXCatchStmt(CatchLoc, cast_or_null(ExDecl), HandlerBlock); } StmtResult Sema::ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body) { setFunctionHasBranchProtectedScope(); return new (Context) ObjCAutoreleasePoolStmt(AtLoc, Body); } namespace { class CatchHandlerType { QualType QT; unsigned IsPointer : 1; // This is a special constructor to be used only with DenseMapInfo's // getEmptyKey() and getTombstoneKey() functions. friend struct llvm::DenseMapInfo; enum Unique { ForDenseMap }; CatchHandlerType(QualType QT, Unique) : QT(QT), IsPointer(false) {} public: /// Used when creating a CatchHandlerType from a handler type; will determine /// whether the type is a pointer or reference and will strip off the top /// level pointer and cv-qualifiers. CatchHandlerType(QualType Q) : QT(Q), IsPointer(false) { if (QT->isPointerType()) IsPointer = true; if (IsPointer || QT->isReferenceType()) QT = QT->getPointeeType(); QT = QT.getUnqualifiedType(); } /// Used when creating a CatchHandlerType from a base class type; pretends the /// type passed in had the pointer qualifier, does not need to get an /// unqualified type. CatchHandlerType(QualType QT, bool IsPointer) : QT(QT), IsPointer(IsPointer) {} QualType underlying() const { return QT; } bool isPointer() const { return IsPointer; } friend bool operator==(const CatchHandlerType &LHS, const CatchHandlerType &RHS) { // If the pointer qualification does not match, we can return early. if (LHS.IsPointer != RHS.IsPointer) return false; // Otherwise, check the underlying type without cv-qualifiers. return LHS.QT == RHS.QT; } }; } // namespace namespace llvm { template <> struct DenseMapInfo { static CatchHandlerType getEmptyKey() { return CatchHandlerType(DenseMapInfo::getEmptyKey(), CatchHandlerType::ForDenseMap); } static CatchHandlerType getTombstoneKey() { return CatchHandlerType(DenseMapInfo::getTombstoneKey(), CatchHandlerType::ForDenseMap); } static unsigned getHashValue(const CatchHandlerType &Base) { return DenseMapInfo::getHashValue(Base.underlying()); } static bool isEqual(const CatchHandlerType &LHS, const CatchHandlerType &RHS) { return LHS == RHS; } }; } namespace { class CatchTypePublicBases { ASTContext &Ctx; const llvm::DenseMap &TypesToCheck; const bool CheckAgainstPointer; CXXCatchStmt *FoundHandler; CanQualType FoundHandlerType; public: CatchTypePublicBases( ASTContext &Ctx, const llvm::DenseMap &T, bool C) : Ctx(Ctx), TypesToCheck(T), CheckAgainstPointer(C), FoundHandler(nullptr) {} CXXCatchStmt *getFoundHandler() const { return FoundHandler; } CanQualType getFoundHandlerType() const { return FoundHandlerType; } bool operator()(const CXXBaseSpecifier *S, CXXBasePath &) { if (S->getAccessSpecifier() == AccessSpecifier::AS_public) { CatchHandlerType Check(S->getType(), CheckAgainstPointer); const auto &M = TypesToCheck; auto I = M.find(Check); if (I != M.end()) { FoundHandler = I->second; FoundHandlerType = Ctx.getCanonicalType(S->getType()); return true; } } return false; } }; } /// ActOnCXXTryBlock - Takes a try compound-statement and a number of /// handlers and creates a try statement from them. StmtResult Sema::ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef Handlers) { // Don't report an error if 'try' is used in system headers. if (!getLangOpts().CXXExceptions && !getSourceManager().isInSystemHeader(TryLoc) && !getLangOpts().CUDA) { // Delay error emission for the OpenMP device code. targetDiag(TryLoc, diag::err_exceptions_disabled) << "try"; } // Exceptions aren't allowed in CUDA device code. if (getLangOpts().CUDA) CUDADiagIfDeviceCode(TryLoc, diag::err_cuda_device_exceptions) << "try" << CurrentCUDATarget(); if (getCurScope() && getCurScope()->isOpenMPSimdDirectiveScope()) Diag(TryLoc, diag::err_omp_simd_region_cannot_use_stmt) << "try"; sema::FunctionScopeInfo *FSI = getCurFunction(); // C++ try is incompatible with SEH __try. if (!getLangOpts().Borland && FSI->FirstSEHTryLoc.isValid()) { Diag(TryLoc, diag::err_mixing_cxx_try_seh_try); Diag(FSI->FirstSEHTryLoc, diag::note_conflicting_try_here) << "'__try'"; } const unsigned NumHandlers = Handlers.size(); assert(!Handlers.empty() && "The parser shouldn't call this if there are no handlers."); llvm::DenseMap HandledTypes; for (unsigned i = 0; i < NumHandlers; ++i) { CXXCatchStmt *H = cast(Handlers[i]); // Diagnose when the handler is a catch-all handler, but it isn't the last // handler for the try block. [except.handle]p5. Also, skip exception // declarations that are invalid, since we can't usefully report on them. if (!H->getExceptionDecl()) { if (i < NumHandlers - 1) return StmtError(Diag(H->getBeginLoc(), diag::err_early_catch_all)); continue; } else if (H->getExceptionDecl()->isInvalidDecl()) continue; // Walk the type hierarchy to diagnose when this type has already been // handled (duplication), or cannot be handled (derivation inversion). We // ignore top-level cv-qualifiers, per [except.handle]p3 CatchHandlerType HandlerCHT = (QualType)Context.getCanonicalType(H->getCaughtType()); // We can ignore whether the type is a reference or a pointer; we need the // underlying declaration type in order to get at the underlying record // decl, if there is one. QualType Underlying = HandlerCHT.underlying(); if (auto *RD = Underlying->getAsCXXRecordDecl()) { if (!RD->hasDefinition()) continue; // Check that none of the public, unambiguous base classes are in the // map ([except.handle]p1). Give the base classes the same pointer // qualification as the original type we are basing off of. This allows // comparison against the handler type using the same top-level pointer // as the original type. CXXBasePaths Paths; Paths.setOrigin(RD); CatchTypePublicBases CTPB(Context, HandledTypes, HandlerCHT.isPointer()); if (RD->lookupInBases(CTPB, Paths)) { const CXXCatchStmt *Problem = CTPB.getFoundHandler(); if (!Paths.isAmbiguous(CTPB.getFoundHandlerType())) { Diag(H->getExceptionDecl()->getTypeSpecStartLoc(), diag::warn_exception_caught_by_earlier_handler) << H->getCaughtType(); Diag(Problem->getExceptionDecl()->getTypeSpecStartLoc(), diag::note_previous_exception_handler) << Problem->getCaughtType(); } } } // Add the type the list of ones we have handled; diagnose if we've already // handled it. auto R = HandledTypes.insert(std::make_pair(H->getCaughtType(), H)); if (!R.second) { const CXXCatchStmt *Problem = R.first->second; Diag(H->getExceptionDecl()->getTypeSpecStartLoc(), diag::warn_exception_caught_by_earlier_handler) << H->getCaughtType(); Diag(Problem->getExceptionDecl()->getTypeSpecStartLoc(), diag::note_previous_exception_handler) << Problem->getCaughtType(); } } FSI->setHasCXXTry(TryLoc); return CXXTryStmt::Create(Context, TryLoc, TryBlock, Handlers); } StmtResult Sema::ActOnSEHTryBlock(bool IsCXXTry, SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler) { assert(TryBlock && Handler); sema::FunctionScopeInfo *FSI = getCurFunction(); // SEH __try is incompatible with C++ try. Borland appears to support this, // however. if (!getLangOpts().Borland) { if (FSI->FirstCXXTryLoc.isValid()) { Diag(TryLoc, diag::err_mixing_cxx_try_seh_try); Diag(FSI->FirstCXXTryLoc, diag::note_conflicting_try_here) << "'try'"; } } FSI->setHasSEHTry(TryLoc); // Reject __try in Obj-C methods, blocks, and captured decls, since we don't // track if they use SEH. DeclContext *DC = CurContext; while (DC && !DC->isFunctionOrMethod()) DC = DC->getParent(); FunctionDecl *FD = dyn_cast_or_null(DC); if (FD) FD->setUsesSEHTry(true); else Diag(TryLoc, diag::err_seh_try_outside_functions); // Reject __try on unsupported targets. if (!Context.getTargetInfo().isSEHTrySupported()) Diag(TryLoc, diag::err_seh_try_unsupported); return SEHTryStmt::Create(Context, IsCXXTry, TryLoc, TryBlock, Handler); } StmtResult Sema::ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block) { assert(FilterExpr && Block); QualType FTy = FilterExpr->getType(); if (!FTy->isIntegerType() && !FTy->isDependentType()) { return StmtError( Diag(FilterExpr->getExprLoc(), diag::err_filter_expression_integral) << FTy); } return SEHExceptStmt::Create(Context, Loc, FilterExpr, Block); } void Sema::ActOnStartSEHFinallyBlock() { CurrentSEHFinally.push_back(CurScope); } void Sema::ActOnAbortSEHFinallyBlock() { CurrentSEHFinally.pop_back(); } StmtResult Sema::ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block) { assert(Block); CurrentSEHFinally.pop_back(); return SEHFinallyStmt::Create(Context, Loc, Block); } StmtResult Sema::ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope) { Scope *SEHTryParent = CurScope; while (SEHTryParent && !SEHTryParent->isSEHTryScope()) SEHTryParent = SEHTryParent->getParent(); if (!SEHTryParent) return StmtError(Diag(Loc, diag::err_ms___leave_not_in___try)); CheckJumpOutOfSEHFinally(*this, Loc, *SEHTryParent); return new (Context) SEHLeaveStmt(Loc); } StmtResult Sema::BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested) { return new (Context) MSDependentExistsStmt(KeywordLoc, IsIfExists, QualifierLoc, NameInfo, cast(Nested)); } StmtResult Sema::ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested) { return BuildMSDependentExistsStmt(KeywordLoc, IsIfExists, SS.getWithLocInContext(Context), GetNameFromUnqualifiedId(Name), Nested); } RecordDecl* Sema::CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams) { DeclContext *DC = CurContext; while (!(DC->isFunctionOrMethod() || DC->isRecord() || DC->isFileContext())) DC = DC->getParent(); RecordDecl *RD = nullptr; if (getLangOpts().CPlusPlus) RD = CXXRecordDecl::Create(Context, TTK_Struct, DC, Loc, Loc, /*Id=*/nullptr); else RD = RecordDecl::Create(Context, TTK_Struct, DC, Loc, Loc, /*Id=*/nullptr); RD->setCapturedRecord(); DC->addDecl(RD); RD->setImplicit(); RD->startDefinition(); assert(NumParams > 0 && "CapturedStmt requires context parameter"); CD = CapturedDecl::Create(Context, CurContext, NumParams); DC->addDecl(CD); return RD; } static bool buildCapturedStmtCaptureList(Sema &S, CapturedRegionScopeInfo *RSI, SmallVectorImpl &Captures, SmallVectorImpl &CaptureInits) { for (const sema::Capture &Cap : RSI->Captures) { if (Cap.isInvalid()) continue; // Form the initializer for the capture. ExprResult Init = S.BuildCaptureInit(Cap, Cap.getLocation(), RSI->CapRegionKind == CR_OpenMP); // FIXME: Bail out now if the capture is not used and the initializer has // no side-effects. // Create a field for this capture. FieldDecl *Field = S.BuildCaptureField(RSI->TheRecordDecl, Cap); // Add the capture to our list of captures. if (Cap.isThisCapture()) { Captures.push_back(CapturedStmt::Capture(Cap.getLocation(), CapturedStmt::VCK_This)); } else if (Cap.isVLATypeCapture()) { Captures.push_back( CapturedStmt::Capture(Cap.getLocation(), CapturedStmt::VCK_VLAType)); } else { assert(Cap.isVariableCapture() && "unknown kind of capture"); if (S.getLangOpts().OpenMP && RSI->CapRegionKind == CR_OpenMP) S.setOpenMPCaptureKind(Field, Cap.getVariable(), RSI->OpenMPLevel); Captures.push_back(CapturedStmt::Capture(Cap.getLocation(), Cap.isReferenceCapture() ? CapturedStmt::VCK_ByRef : CapturedStmt::VCK_ByCopy, Cap.getVariable())); } CaptureInits.push_back(Init.get()); } return false; } void Sema::ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams) { CapturedDecl *CD = nullptr; RecordDecl *RD = CreateCapturedStmtRecordDecl(CD, Loc, NumParams); // Build the context parameter DeclContext *DC = CapturedDecl::castToDeclContext(CD); IdentifierInfo *ParamName = &Context.Idents.get("__context"); QualType ParamType = Context.getPointerType(Context.getTagDeclType(RD)); auto *Param = ImplicitParamDecl::Create(Context, DC, Loc, ParamName, ParamType, ImplicitParamDecl::CapturedContext); DC->addDecl(Param); CD->setContextParam(0, Param); // Enter the capturing scope for this captured region. PushCapturedRegionScope(CurScope, CD, RD, Kind); if (CurScope) PushDeclContext(CurScope, CD); else CurContext = CD; PushExpressionEvaluationContext( ExpressionEvaluationContext::PotentiallyEvaluated); } void Sema::ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef Params, unsigned OpenMPCaptureLevel) { CapturedDecl *CD = nullptr; RecordDecl *RD = CreateCapturedStmtRecordDecl(CD, Loc, Params.size()); // Build the context parameter DeclContext *DC = CapturedDecl::castToDeclContext(CD); bool ContextIsFound = false; unsigned ParamNum = 0; for (ArrayRef::iterator I = Params.begin(), E = Params.end(); I != E; ++I, ++ParamNum) { if (I->second.isNull()) { assert(!ContextIsFound && "null type has been found already for '__context' parameter"); IdentifierInfo *ParamName = &Context.Idents.get("__context"); QualType ParamType = Context.getPointerType(Context.getTagDeclType(RD)) .withConst() .withRestrict(); auto *Param = ImplicitParamDecl::Create(Context, DC, Loc, ParamName, ParamType, ImplicitParamDecl::CapturedContext); DC->addDecl(Param); CD->setContextParam(ParamNum, Param); ContextIsFound = true; } else { IdentifierInfo *ParamName = &Context.Idents.get(I->first); auto *Param = ImplicitParamDecl::Create(Context, DC, Loc, ParamName, I->second, ImplicitParamDecl::CapturedContext); DC->addDecl(Param); CD->setParam(ParamNum, Param); } } assert(ContextIsFound && "no null type for '__context' parameter"); if (!ContextIsFound) { // Add __context implicitly if it is not specified. IdentifierInfo *ParamName = &Context.Idents.get("__context"); QualType ParamType = Context.getPointerType(Context.getTagDeclType(RD)); auto *Param = ImplicitParamDecl::Create(Context, DC, Loc, ParamName, ParamType, ImplicitParamDecl::CapturedContext); DC->addDecl(Param); CD->setContextParam(ParamNum, Param); } // Enter the capturing scope for this captured region. PushCapturedRegionScope(CurScope, CD, RD, Kind, OpenMPCaptureLevel); if (CurScope) PushDeclContext(CurScope, CD); else CurContext = CD; PushExpressionEvaluationContext( ExpressionEvaluationContext::PotentiallyEvaluated); } void Sema::ActOnCapturedRegionError() { DiscardCleanupsInEvaluationContext(); PopExpressionEvaluationContext(); PopDeclContext(); PoppedFunctionScopePtr ScopeRAII = PopFunctionScopeInfo(); CapturedRegionScopeInfo *RSI = cast(ScopeRAII.get()); RecordDecl *Record = RSI->TheRecordDecl; Record->setInvalidDecl(); SmallVector Fields(Record->fields()); ActOnFields(/*Scope=*/nullptr, Record->getLocation(), Record, Fields, SourceLocation(), SourceLocation(), ParsedAttributesView()); } StmtResult Sema::ActOnCapturedRegionEnd(Stmt *S) { // Leave the captured scope before we start creating captures in the // enclosing scope. DiscardCleanupsInEvaluationContext(); PopExpressionEvaluationContext(); PopDeclContext(); PoppedFunctionScopePtr ScopeRAII = PopFunctionScopeInfo(); CapturedRegionScopeInfo *RSI = cast(ScopeRAII.get()); SmallVector Captures; SmallVector CaptureInits; if (buildCapturedStmtCaptureList(*this, RSI, Captures, CaptureInits)) return StmtError(); CapturedDecl *CD = RSI->TheCapturedDecl; RecordDecl *RD = RSI->TheRecordDecl; CapturedStmt *Res = CapturedStmt::Create( getASTContext(), S, static_cast(RSI->CapRegionKind), Captures, CaptureInits, CD, RD); CD->setBody(Res->getCapturedStmt()); RD->completeDefinition(); return Res; } diff --git a/compiler-rt/lib/builtins/clear_cache.c b/compiler-rt/lib/builtins/clear_cache.c index 3c12b74e8fa6..da0715914b41 100644 --- a/compiler-rt/lib/builtins/clear_cache.c +++ b/compiler-rt/lib/builtins/clear_cache.c @@ -1,179 +1,186 @@ //===-- clear_cache.c - Implement __clear_cache ---------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "int_lib.h" #if defined(__linux__) #include #endif #include #if __APPLE__ #include #endif #if defined(_WIN32) // Forward declare Win32 APIs since the GCC mode driver does not handle the // newer SDKs as well as needed. uint32_t FlushInstructionCache(uintptr_t hProcess, void *lpBaseAddress, uintptr_t dwSize); uintptr_t GetCurrentProcess(void); #endif #if defined(__FreeBSD__) && defined(__arm__) // clang-format off #include #include // clang-format on #endif #if defined(__NetBSD__) && defined(__arm__) #include #endif -#if defined(__OpenBSD__) && (defined(__arm__) || defined(__mips__)) +#if defined(__OpenBSD__) && (defined(__arm__) || defined(__mips__) || defined(__riscv)) // clang-format off #include #include // clang-format on #endif #if defined(__linux__) && defined(__mips__) #include #include #include #endif #if defined(__linux__) && defined(__riscv) // to get platform-specific syscall definitions #include #endif // The compiler generates calls to __clear_cache() when creating // trampoline functions on the stack for use with nested functions. // It is expected to invalidate the instruction cache for the // specified range. void __clear_cache(void *start, void *end) { #if __i386__ || __x86_64__ || defined(_M_IX86) || defined(_M_X64) // Intel processors have a unified instruction and data cache // so there is nothing to do #elif defined(_WIN32) && (defined(__arm__) || defined(__aarch64__)) FlushInstructionCache(GetCurrentProcess(), start, end - start); #elif defined(__arm__) && !defined(__APPLE__) #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) struct arm_sync_icache_args arg; arg.addr = (uintptr_t)start; arg.len = (uintptr_t)end - (uintptr_t)start; sysarch(ARM_SYNC_ICACHE, &arg); #elif defined(__linux__) // We used to include asm/unistd.h for the __ARM_NR_cacheflush define, but // it also brought many other unused defines, as well as a dependency on // kernel headers to be installed. // // This value is stable at least since Linux 3.13 and should remain so for // compatibility reasons, warranting it's re-definition here. #define __ARM_NR_cacheflush 0x0f0002 register int start_reg __asm("r0") = (int)(intptr_t)start; const register int end_reg __asm("r1") = (int)(intptr_t)end; const register int flags __asm("r2") = 0; const register int syscall_nr __asm("r7") = __ARM_NR_cacheflush; __asm __volatile("svc 0x0" : "=r"(start_reg) : "r"(syscall_nr), "r"(start_reg), "r"(end_reg), "r"(flags)); assert(start_reg == 0 && "Cache flush syscall failed."); #else compilerrt_abort(); #endif #elif defined(__linux__) && defined(__mips__) const uintptr_t start_int = (uintptr_t)start; const uintptr_t end_int = (uintptr_t)end; syscall(__NR_cacheflush, start, (end_int - start_int), BCACHE); #elif defined(__mips__) && defined(__OpenBSD__) cacheflush(start, (uintptr_t)end - (uintptr_t)start, BCACHE); #elif defined(__aarch64__) && !defined(__APPLE__) uint64_t xstart = (uint64_t)(uintptr_t)start; uint64_t xend = (uint64_t)(uintptr_t)end; // Get Cache Type Info. static uint64_t ctr_el0 = 0; if (ctr_el0 == 0) __asm __volatile("mrs %0, ctr_el0" : "=r"(ctr_el0)); // The DC and IC instructions must use 64-bit registers so we don't use // uintptr_t in case this runs in an IPL32 environment. uint64_t addr; // If CTR_EL0.IDC is set, data cache cleaning to the point of unification // is not required for instruction to data coherence. if (((ctr_el0 >> 28) & 0x1) == 0x0) { const size_t dcache_line_size = 4 << ((ctr_el0 >> 16) & 15); for (addr = xstart & ~(dcache_line_size - 1); addr < xend; addr += dcache_line_size) __asm __volatile("dc cvau, %0" ::"r"(addr)); } __asm __volatile("dsb ish"); // If CTR_EL0.DIC is set, instruction cache invalidation to the point of // unification is not required for instruction to data coherence. if (((ctr_el0 >> 29) & 0x1) == 0x0) { const size_t icache_line_size = 4 << ((ctr_el0 >> 0) & 15); for (addr = xstart & ~(icache_line_size - 1); addr < xend; addr += icache_line_size) __asm __volatile("ic ivau, %0" ::"r"(addr)); __asm __volatile("dsb ish"); } __asm __volatile("isb sy"); #elif defined(__powerpc64__) const size_t line_size = 32; const size_t len = (uintptr_t)end - (uintptr_t)start; const uintptr_t mask = ~(line_size - 1); const uintptr_t start_line = ((uintptr_t)start) & mask; const uintptr_t end_line = ((uintptr_t)start + len + line_size - 1) & mask; for (uintptr_t line = start_line; line < end_line; line += line_size) __asm__ volatile("dcbf 0, %0" : : "r"(line)); __asm__ volatile("sync"); for (uintptr_t line = start_line; line < end_line; line += line_size) __asm__ volatile("icbi 0, %0" : : "r"(line)); __asm__ volatile("isync"); #elif defined(__sparc__) const size_t dword_size = 8; const size_t len = (uintptr_t)end - (uintptr_t)start; const uintptr_t mask = ~(dword_size - 1); const uintptr_t start_dword = ((uintptr_t)start) & mask; const uintptr_t end_dword = ((uintptr_t)start + len + dword_size - 1) & mask; for (uintptr_t dword = start_dword; dword < end_dword; dword += dword_size) __asm__ volatile("flush %0" : : "r"(dword)); #elif defined(__riscv) && defined(__linux__) // See: arch/riscv/include/asm/cacheflush.h, arch/riscv/kernel/sys_riscv.c register void *start_reg __asm("a0") = start; const register void *end_reg __asm("a1") = end; // "0" means that we clear cache for all threads (SYS_RISCV_FLUSH_ICACHE_ALL) const register long flags __asm("a2") = 0; const register long syscall_nr __asm("a7") = __NR_riscv_flush_icache; __asm __volatile("ecall" : "=r"(start_reg) : "r"(start_reg), "r"(end_reg), "r"(flags), "r"(syscall_nr)); assert(start_reg == 0 && "Cache flush syscall failed."); +#elif defined(__riscv) && defined(__OpenBSD__) + struct riscv_sync_icache_args arg; + + arg.addr = (uintptr_t)start; + arg.len = (uintptr_t)end - (uintptr_t)start; + + sysarch(RISCV_SYNC_ICACHE, &arg); #else #if __APPLE__ // On Darwin, sys_icache_invalidate() provides this functionality sys_icache_invalidate(start, end - start); #elif defined(__ve__) __asm__ volatile("fencec 2"); #else compilerrt_abort(); #endif #endif } diff --git a/lld/ELF/InputSection.cpp b/lld/ELF/InputSection.cpp index 1f9fa961fc26..7d952e9037f1 100644 --- a/lld/ELF/InputSection.cpp +++ b/lld/ELF/InputSection.cpp @@ -1,1499 +1,1492 @@ //===- InputSection.cpp ---------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "InputSection.h" #include "Config.h" #include "EhFrame.h" #include "InputFiles.h" #include "LinkerScript.h" #include "OutputSections.h" #include "Relocations.h" #include "SymbolTable.h" #include "Symbols.h" #include "SyntheticSections.h" #include "Target.h" #include "Thunks.h" #include "lld/Common/ErrorHandler.h" #include "lld/Common/Memory.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/Compression.h" #include "llvm/Support/Endian.h" #include "llvm/Support/Threading.h" #include "llvm/Support/xxhash.h" #include #include #include #include #include using namespace llvm; using namespace llvm::ELF; using namespace llvm::object; using namespace llvm::support; using namespace llvm::support::endian; using namespace llvm::sys; using namespace lld; using namespace lld::elf; std::vector elf::inputSections; DenseSet> elf::ppc64noTocRelax; // Returns a string to construct an error message. std::string lld::toString(const InputSectionBase *sec) { return (toString(sec->file) + ":(" + sec->name + ")").str(); } template static ArrayRef getSectionContents(ObjFile &file, const typename ELFT::Shdr &hdr) { if (hdr.sh_type == SHT_NOBITS) return makeArrayRef(nullptr, hdr.sh_size); return check(file.getObj().getSectionContents(hdr)); } InputSectionBase::InputSectionBase(InputFile *file, uint64_t flags, uint32_t type, uint64_t entsize, uint32_t link, uint32_t info, uint32_t alignment, ArrayRef data, StringRef name, Kind sectionKind) : SectionBase(sectionKind, name, flags, entsize, alignment, type, info, link), file(file), rawData(data) { // In order to reduce memory allocation, we assume that mergeable // sections are smaller than 4 GiB, which is not an unreasonable // assumption as of 2017. if (sectionKind == SectionBase::Merge && rawData.size() > UINT32_MAX) error(toString(this) + ": section too large"); numRelocations = 0; areRelocsRela = false; // The ELF spec states that a value of 0 means the section has // no alignment constraints. uint32_t v = std::max(alignment, 1); if (!isPowerOf2_64(v)) fatal(toString(this) + ": sh_addralign is not a power of 2"); this->alignment = v; // In ELF, each section can be compressed by zlib, and if compressed, // section name may be mangled by appending "z" (e.g. ".zdebug_info"). // If that's the case, demangle section name so that we can handle a // section as if it weren't compressed. if ((flags & SHF_COMPRESSED) || name.startswith(".zdebug")) { if (!zlib::isAvailable()) error(toString(file) + ": contains a compressed section, " + "but zlib is not available"); - parseCompressedHeader(); + switch (config->ekind) { + case ELF32LEKind: + parseCompressedHeader(); + break; + case ELF32BEKind: + parseCompressedHeader(); + break; + case ELF64LEKind: + parseCompressedHeader(); + break; + case ELF64BEKind: + parseCompressedHeader(); + break; + default: + llvm_unreachable("unknown ELFT"); + } } } // Drop SHF_GROUP bit unless we are producing a re-linkable object file. // SHF_GROUP is a marker that a section belongs to some comdat group. // That flag doesn't make sense in an executable. static uint64_t getFlags(uint64_t flags) { flags &= ~(uint64_t)SHF_INFO_LINK; if (!config->relocatable) flags &= ~(uint64_t)SHF_GROUP; return flags; } // GNU assembler 2.24 and LLVM 4.0.0's MC (the newest release as of // March 2017) fail to infer section types for sections starting with // ".init_array." or ".fini_array.". They set SHT_PROGBITS instead of // SHF_INIT_ARRAY. As a result, the following assembler directive // creates ".init_array.100" with SHT_PROGBITS, for example. // // .section .init_array.100, "aw" // // This function forces SHT_{INIT,FINI}_ARRAY so that we can handle // incorrect inputs as if they were correct from the beginning. static uint64_t getType(uint64_t type, StringRef name) { if (type == SHT_PROGBITS && name.startswith(".init_array.")) return SHT_INIT_ARRAY; if (type == SHT_PROGBITS && name.startswith(".fini_array.")) return SHT_FINI_ARRAY; return type; } template InputSectionBase::InputSectionBase(ObjFile &file, const typename ELFT::Shdr &hdr, StringRef name, Kind sectionKind) : InputSectionBase(&file, getFlags(hdr.sh_flags), getType(hdr.sh_type, name), hdr.sh_entsize, hdr.sh_link, hdr.sh_info, hdr.sh_addralign, getSectionContents(file, hdr), name, sectionKind) { // We reject object files having insanely large alignments even though // they are allowed by the spec. I think 4GB is a reasonable limitation. // We might want to relax this in the future. if (hdr.sh_addralign > UINT32_MAX) fatal(toString(&file) + ": section sh_addralign is too large"); } size_t InputSectionBase::getSize() const { if (auto *s = dyn_cast(this)) return s->getSize(); if (uncompressedSize >= 0) return uncompressedSize; return rawData.size() - bytesDropped; } void InputSectionBase::uncompress() const { size_t size = uncompressedSize; char *uncompressedBuf; { static std::mutex mu; std::lock_guard lock(mu); uncompressedBuf = bAlloc.Allocate(size); } if (Error e = zlib::uncompress(toStringRef(rawData), uncompressedBuf, size)) fatal(toString(this) + ": uncompress failed: " + llvm::toString(std::move(e))); rawData = makeArrayRef((uint8_t *)uncompressedBuf, size); uncompressedSize = -1; } uint64_t InputSectionBase::getOffsetInFile() const { const uint8_t *fileStart = (const uint8_t *)file->mb.getBufferStart(); const uint8_t *secStart = data().begin(); return secStart - fileStart; } uint64_t SectionBase::getOffset(uint64_t offset) const { switch (kind()) { case Output: { auto *os = cast(this); // For output sections we treat offset -1 as the end of the section. return offset == uint64_t(-1) ? os->size : offset; } case Regular: case Synthetic: return cast(this)->getOffset(offset); case EHFrame: // The file crtbeginT.o has relocations pointing to the start of an empty // .eh_frame that is known to be the first in the link. It does that to // identify the start of the output .eh_frame. return offset; case Merge: const MergeInputSection *ms = cast(this); if (InputSection *isec = ms->getParent()) return isec->getOffset(ms->getParentOffset(offset)); return ms->getParentOffset(offset); } llvm_unreachable("invalid section kind"); } uint64_t SectionBase::getVA(uint64_t offset) const { const OutputSection *out = getOutputSection(); return (out ? out->addr : 0) + getOffset(offset); } OutputSection *SectionBase::getOutputSection() { InputSection *sec; if (auto *isec = dyn_cast(this)) sec = isec; else if (auto *ms = dyn_cast(this)) sec = ms->getParent(); else if (auto *eh = dyn_cast(this)) sec = eh->getParent(); else return cast(this); return sec ? sec->getParent() : nullptr; } // When a section is compressed, `rawData` consists with a header followed // by zlib-compressed data. This function parses a header to initialize // `uncompressedSize` member and remove the header from `rawData`. -void InputSectionBase::parseCompressedHeader() { - using Chdr64 = typename ELF64LE::Chdr; - using Chdr32 = typename ELF32LE::Chdr; - +template void InputSectionBase::parseCompressedHeader() { // Old-style header if (name.startswith(".zdebug")) { if (!toStringRef(rawData).startswith("ZLIB")) { error(toString(this) + ": corrupted compressed section header"); return; } rawData = rawData.slice(4); if (rawData.size() < 8) { error(toString(this) + ": corrupted compressed section header"); return; } uncompressedSize = read64be(rawData.data()); rawData = rawData.slice(8); // Restore the original section name. // (e.g. ".zdebug_info" -> ".debug_info") name = saver.save("." + name.substr(2)); return; } assert(flags & SHF_COMPRESSED); flags &= ~(uint64_t)SHF_COMPRESSED; - // New-style 64-bit header - if (config->is64) { - if (rawData.size() < sizeof(Chdr64)) { - error(toString(this) + ": corrupted compressed section"); - return; - } - - auto *hdr = reinterpret_cast(rawData.data()); - if (hdr->ch_type != ELFCOMPRESS_ZLIB) { - error(toString(this) + ": unsupported compression type"); - return; - } - - uncompressedSize = hdr->ch_size; - alignment = std::max(hdr->ch_addralign, 1); - rawData = rawData.slice(sizeof(*hdr)); - return; - } - - // New-style 32-bit header - if (rawData.size() < sizeof(Chdr32)) { + // New-style header + if (rawData.size() < sizeof(typename ELFT::Chdr)) { error(toString(this) + ": corrupted compressed section"); return; } - auto *hdr = reinterpret_cast(rawData.data()); + auto *hdr = reinterpret_cast(rawData.data()); if (hdr->ch_type != ELFCOMPRESS_ZLIB) { error(toString(this) + ": unsupported compression type"); return; } uncompressedSize = hdr->ch_size; alignment = std::max(hdr->ch_addralign, 1); rawData = rawData.slice(sizeof(*hdr)); } InputSection *InputSectionBase::getLinkOrderDep() const { assert(flags & SHF_LINK_ORDER); if (!link) return nullptr; return cast(file->getSections()[link]); } // Find a function symbol that encloses a given location. template Defined *InputSectionBase::getEnclosingFunction(uint64_t offset) { for (Symbol *b : file->getSymbols()) if (Defined *d = dyn_cast(b)) if (d->section == this && d->type == STT_FUNC && d->value <= offset && offset < d->value + d->size) return d; return nullptr; } // Returns a source location string. Used to construct an error message. template std::string InputSectionBase::getLocation(uint64_t offset) { std::string secAndOffset = (name + "+0x" + utohexstr(offset)).str(); // We don't have file for synthetic sections. if (getFile() == nullptr) return (config->outputFile + ":(" + secAndOffset + ")") .str(); // First check if we can get desired values from debugging information. if (Optional info = getFile()->getDILineInfo(this, offset)) return info->FileName + ":" + std::to_string(info->Line) + ":(" + secAndOffset + ")"; // File->sourceFile contains STT_FILE symbol that contains a // source file name. If it's missing, we use an object file name. std::string srcFile = std::string(getFile()->sourceFile); if (srcFile.empty()) srcFile = toString(file); if (Defined *d = getEnclosingFunction(offset)) return srcFile + ":(function " + toString(*d) + ": " + secAndOffset + ")"; // If there's no symbol, print out the offset in the section. return (srcFile + ":(" + secAndOffset + ")"); } // This function is intended to be used for constructing an error message. // The returned message looks like this: // // foo.c:42 (/home/alice/possibly/very/long/path/foo.c:42) // // Returns an empty string if there's no way to get line info. std::string InputSectionBase::getSrcMsg(const Symbol &sym, uint64_t offset) { return file->getSrcMsg(sym, *this, offset); } // Returns a filename string along with an optional section name. This // function is intended to be used for constructing an error // message. The returned message looks like this: // // path/to/foo.o:(function bar) // // or // // path/to/foo.o:(function bar) in archive path/to/bar.a std::string InputSectionBase::getObjMsg(uint64_t off) { std::string filename = std::string(file->getName()); std::string archive; if (!file->archiveName.empty()) archive = " in archive " + file->archiveName; // Find a symbol that encloses a given location. for (Symbol *b : file->getSymbols()) if (auto *d = dyn_cast(b)) if (d->section == this && d->value <= off && off < d->value + d->size) return filename + ":(" + toString(*d) + ")" + archive; // If there's no symbol, print out the offset in the section. return (filename + ":(" + name + "+0x" + utohexstr(off) + ")" + archive) .str(); } InputSection InputSection::discarded(nullptr, 0, 0, 0, ArrayRef(), ""); InputSection::InputSection(InputFile *f, uint64_t flags, uint32_t type, uint32_t alignment, ArrayRef data, StringRef name, Kind k) : InputSectionBase(f, flags, type, /*Entsize*/ 0, /*Link*/ 0, /*Info*/ 0, alignment, data, name, k) {} template InputSection::InputSection(ObjFile &f, const typename ELFT::Shdr &header, StringRef name) : InputSectionBase(f, header, name, InputSectionBase::Regular) {} bool InputSection::classof(const SectionBase *s) { return s->kind() == SectionBase::Regular || s->kind() == SectionBase::Synthetic; } OutputSection *InputSection::getParent() const { return cast_or_null(parent); } // Copy SHT_GROUP section contents. Used only for the -r option. template void InputSection::copyShtGroup(uint8_t *buf) { // ELFT::Word is the 32-bit integral type in the target endianness. using u32 = typename ELFT::Word; ArrayRef from = getDataAs(); auto *to = reinterpret_cast(buf); // The first entry is not a section number but a flag. *to++ = from[0]; // Adjust section numbers because section numbers in an input object files are // different in the output. We also need to handle combined or discarded // members. ArrayRef sections = file->getSections(); std::unordered_set seen; for (uint32_t idx : from.slice(1)) { OutputSection *osec = sections[idx]->getOutputSection(); if (osec && seen.insert(osec->sectionIndex).second) *to++ = osec->sectionIndex; } } InputSectionBase *InputSection::getRelocatedSection() const { if (!file || (type != SHT_RELA && type != SHT_REL)) return nullptr; ArrayRef sections = file->getSections(); return sections[info]; } // This is used for -r and --emit-relocs. We can't use memcpy to copy // relocations because we need to update symbol table offset and section index // for each relocation. So we copy relocations one by one. template void InputSection::copyRelocations(uint8_t *buf, ArrayRef rels) { InputSectionBase *sec = getRelocatedSection(); for (const RelTy &rel : rels) { RelType type = rel.getType(config->isMips64EL); const ObjFile *file = getFile(); Symbol &sym = file->getRelocTargetSym(rel); auto *p = reinterpret_cast(buf); buf += sizeof(RelTy); if (RelTy::IsRela) p->r_addend = getAddend(rel); // Output section VA is zero for -r, so r_offset is an offset within the // section, but for --emit-relocs it is a virtual address. p->r_offset = sec->getVA(rel.r_offset); p->setSymbolAndType(in.symTab->getSymbolIndex(&sym), type, config->isMips64EL); if (sym.type == STT_SECTION) { // We combine multiple section symbols into only one per // section. This means we have to update the addend. That is // trivial for Elf_Rela, but for Elf_Rel we have to write to the // section data. We do that by adding to the Relocation vector. // .eh_frame is horribly special and can reference discarded sections. To // avoid having to parse and recreate .eh_frame, we just replace any // relocation in it pointing to discarded sections with R_*_NONE, which // hopefully creates a frame that is ignored at runtime. Also, don't warn // on .gcc_except_table and debug sections. // // See the comment in maybeReportUndefined for PPC32 .got2 and PPC64 .toc auto *d = dyn_cast(&sym); if (!d) { if (!isDebugSection(*sec) && sec->name != ".eh_frame" && sec->name != ".gcc_except_table" && sec->name != ".got2" && sec->name != ".toc") { uint32_t secIdx = cast(sym).discardedSecIdx; Elf_Shdr_Impl sec = CHECK(file->getObj().sections(), file)[secIdx]; warn("relocation refers to a discarded section: " + CHECK(file->getObj().getSectionName(sec), file) + "\n>>> referenced by " + getObjMsg(p->r_offset)); } p->setSymbolAndType(0, 0, false); continue; } SectionBase *section = d->section->repl; if (!section->isLive()) { p->setSymbolAndType(0, 0, false); continue; } int64_t addend = getAddend(rel); const uint8_t *bufLoc = sec->data().begin() + rel.r_offset; if (!RelTy::IsRela) addend = target->getImplicitAddend(bufLoc, type); if (config->emachine == EM_MIPS && target->getRelExpr(type, sym, bufLoc) == R_MIPS_GOTREL) { // Some MIPS relocations depend on "gp" value. By default, // this value has 0x7ff0 offset from a .got section. But // relocatable files produced by a compiler or a linker // might redefine this default value and we must use it // for a calculation of the relocation result. When we // generate EXE or DSO it's trivial. Generating a relocatable // output is more difficult case because the linker does // not calculate relocations in this mode and loses // individual "gp" values used by each input object file. // As a workaround we add the "gp" value to the relocation // addend and save it back to the file. addend += sec->getFile()->mipsGp0; } if (RelTy::IsRela) p->r_addend = sym.getVA(addend) - section->getOutputSection()->addr; else if (config->relocatable && type != target->noneRel) sec->relocations.push_back({R_ABS, type, rel.r_offset, addend, &sym}); } else if (config->emachine == EM_PPC && type == R_PPC_PLTREL24 && p->r_addend >= 0x8000) { // Similar to R_MIPS_GPREL{16,32}. If the addend of R_PPC_PLTREL24 // indicates that r30 is relative to the input section .got2 // (r_addend>=0x8000), after linking, r30 should be relative to the output // section .got2 . To compensate for the shift, adjust r_addend by // ppc32Got2OutSecOff. p->r_addend += sec->file->ppc32Got2OutSecOff; } } } // The ARM and AArch64 ABI handle pc-relative relocations to undefined weak // references specially. The general rule is that the value of the symbol in // this context is the address of the place P. A further special case is that // branch relocations to an undefined weak reference resolve to the next // instruction. static uint32_t getARMUndefinedRelativeWeakVA(RelType type, uint32_t a, uint32_t p) { switch (type) { // Unresolved branch relocations to weak references resolve to next // instruction, this will be either 2 or 4 bytes on from P. case R_ARM_THM_JUMP11: return p + 2 + a; case R_ARM_CALL: case R_ARM_JUMP24: case R_ARM_PC24: case R_ARM_PLT32: case R_ARM_PREL31: case R_ARM_THM_JUMP19: case R_ARM_THM_JUMP24: return p + 4 + a; case R_ARM_THM_CALL: // We don't want an interworking BLX to ARM return p + 5 + a; // Unresolved non branch pc-relative relocations // R_ARM_TARGET2 which can be resolved relatively is not present as it never // targets a weak-reference. case R_ARM_MOVW_PREL_NC: case R_ARM_MOVT_PREL: case R_ARM_REL32: case R_ARM_THM_ALU_PREL_11_0: case R_ARM_THM_MOVW_PREL_NC: case R_ARM_THM_MOVT_PREL: case R_ARM_THM_PC12: return p + a; // p + a is unrepresentable as negative immediates can't be encoded. case R_ARM_THM_PC8: return p; } llvm_unreachable("ARM pc-relative relocation expected\n"); } // The comment above getARMUndefinedRelativeWeakVA applies to this function. static uint64_t getAArch64UndefinedRelativeWeakVA(uint64_t type, uint64_t p) { switch (type) { // Unresolved branch relocations to weak references resolve to next // instruction, this is 4 bytes on from P. case R_AARCH64_CALL26: case R_AARCH64_CONDBR19: case R_AARCH64_JUMP26: case R_AARCH64_TSTBR14: return p + 4; // Unresolved non branch pc-relative relocations case R_AARCH64_PREL16: case R_AARCH64_PREL32: case R_AARCH64_PREL64: case R_AARCH64_ADR_PREL_LO21: case R_AARCH64_LD_PREL_LO19: case R_AARCH64_PLT32: return p; } llvm_unreachable("AArch64 pc-relative relocation expected\n"); } static uint64_t getRISCVUndefinedRelativeWeakVA(uint64_t type, uint64_t p) { switch (type) { case R_RISCV_BRANCH: case R_RISCV_JAL: case R_RISCV_CALL: case R_RISCV_CALL_PLT: case R_RISCV_RVC_BRANCH: case R_RISCV_RVC_JUMP: return p; default: return 0; } } // ARM SBREL relocations are of the form S + A - B where B is the static base // The ARM ABI defines base to be "addressing origin of the output segment // defining the symbol S". We defined the "addressing origin"/static base to be // the base of the PT_LOAD segment containing the Sym. // The procedure call standard only defines a Read Write Position Independent // RWPI variant so in practice we should expect the static base to be the base // of the RW segment. static uint64_t getARMStaticBase(const Symbol &sym) { OutputSection *os = sym.getOutputSection(); if (!os || !os->ptLoad || !os->ptLoad->firstSec) fatal("SBREL relocation to " + sym.getName() + " without static base"); return os->ptLoad->firstSec->addr; } // For R_RISCV_PC_INDIRECT (R_RISCV_PCREL_LO12_{I,S}), the symbol actually // points the corresponding R_RISCV_PCREL_HI20 relocation, and the target VA // is calculated using PCREL_HI20's symbol. // // This function returns the R_RISCV_PCREL_HI20 relocation from // R_RISCV_PCREL_LO12's symbol and addend. static Relocation *getRISCVPCRelHi20(const Symbol *sym, uint64_t addend) { const Defined *d = cast(sym); if (!d->section) { error("R_RISCV_PCREL_LO12 relocation points to an absolute symbol: " + sym->getName()); return nullptr; } InputSection *isec = cast(d->section); if (addend != 0) warn("Non-zero addend in R_RISCV_PCREL_LO12 relocation to " + isec->getObjMsg(d->value) + " is ignored"); // Relocations are sorted by offset, so we can use std::equal_range to do // binary search. Relocation r; r.offset = d->value; auto range = std::equal_range(isec->relocations.begin(), isec->relocations.end(), r, [](const Relocation &lhs, const Relocation &rhs) { return lhs.offset < rhs.offset; }); for (auto it = range.first; it != range.second; ++it) if (it->type == R_RISCV_PCREL_HI20 || it->type == R_RISCV_GOT_HI20 || it->type == R_RISCV_TLS_GD_HI20 || it->type == R_RISCV_TLS_GOT_HI20) return &*it; error("R_RISCV_PCREL_LO12 relocation points to " + isec->getObjMsg(d->value) + " without an associated R_RISCV_PCREL_HI20 relocation"); return nullptr; } // A TLS symbol's virtual address is relative to the TLS segment. Add a // target-specific adjustment to produce a thread-pointer-relative offset. static int64_t getTlsTpOffset(const Symbol &s) { // On targets that support TLSDESC, _TLS_MODULE_BASE_@tpoff = 0. if (&s == ElfSym::tlsModuleBase) return 0; // There are 2 TLS layouts. Among targets we support, x86 uses TLS Variant 2 // while most others use Variant 1. At run time TP will be aligned to p_align. // Variant 1. TP will be followed by an optional gap (which is the size of 2 // pointers on ARM/AArch64, 0 on other targets), followed by alignment // padding, then the static TLS blocks. The alignment padding is added so that // (TP + gap + padding) is congruent to p_vaddr modulo p_align. // // Variant 2. Static TLS blocks, followed by alignment padding are placed // before TP. The alignment padding is added so that (TP - padding - // p_memsz) is congruent to p_vaddr modulo p_align. PhdrEntry *tls = Out::tlsPhdr; switch (config->emachine) { // Variant 1. case EM_ARM: case EM_AARCH64: return s.getVA(0) + config->wordsize * 2 + ((tls->p_vaddr - config->wordsize * 2) & (tls->p_align - 1)); case EM_MIPS: case EM_PPC: case EM_PPC64: // Adjusted Variant 1. TP is placed with a displacement of 0x7000, which is // to allow a signed 16-bit offset to reach 0x1000 of TCB/thread-library // data and 0xf000 of the program's TLS segment. return s.getVA(0) + (tls->p_vaddr & (tls->p_align - 1)) - 0x7000; case EM_RISCV: return s.getVA(0) + (tls->p_vaddr & (tls->p_align - 1)); // Variant 2. case EM_HEXAGON: case EM_SPARCV9: case EM_386: case EM_X86_64: return s.getVA(0) - tls->p_memsz - ((-tls->p_vaddr - tls->p_memsz) & (tls->p_align - 1)); default: llvm_unreachable("unhandled Config->EMachine"); } } uint64_t InputSectionBase::getRelocTargetVA(const InputFile *file, RelType type, int64_t a, uint64_t p, const Symbol &sym, RelExpr expr) { switch (expr) { case R_ABS: case R_DTPREL: case R_RELAX_TLS_LD_TO_LE_ABS: case R_RELAX_GOT_PC_NOPIC: case R_RISCV_ADD: return sym.getVA(a); case R_ADDEND: return a; case R_ARM_SBREL: return sym.getVA(a) - getARMStaticBase(sym); case R_GOT: case R_RELAX_TLS_GD_TO_IE_ABS: return sym.getGotVA() + a; case R_GOTONLY_PC: return in.got->getVA() + a - p; case R_GOTPLTONLY_PC: return in.gotPlt->getVA() + a - p; case R_GOTREL: case R_PPC64_RELAX_TOC: return sym.getVA(a) - in.got->getVA(); case R_GOTPLTREL: return sym.getVA(a) - in.gotPlt->getVA(); case R_GOTPLT: case R_RELAX_TLS_GD_TO_IE_GOTPLT: return sym.getGotVA() + a - in.gotPlt->getVA(); case R_TLSLD_GOT_OFF: case R_GOT_OFF: case R_RELAX_TLS_GD_TO_IE_GOT_OFF: return sym.getGotOffset() + a; case R_AARCH64_GOT_PAGE_PC: case R_AARCH64_RELAX_TLS_GD_TO_IE_PAGE_PC: return getAArch64Page(sym.getGotVA() + a) - getAArch64Page(p); case R_AARCH64_GOT_PAGE: return sym.getGotVA() + a - getAArch64Page(in.got->getVA()); case R_GOT_PC: case R_RELAX_TLS_GD_TO_IE: return sym.getGotVA() + a - p; case R_MIPS_GOTREL: return sym.getVA(a) - in.mipsGot->getGp(file); case R_MIPS_GOT_GP: return in.mipsGot->getGp(file) + a; case R_MIPS_GOT_GP_PC: { // R_MIPS_LO16 expression has R_MIPS_GOT_GP_PC type iif the target // is _gp_disp symbol. In that case we should use the following // formula for calculation "AHL + GP - P + 4". For details see p. 4-19 at // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf // microMIPS variants of these relocations use slightly different // expressions: AHL + GP - P + 3 for %lo() and AHL + GP - P - 1 for %hi() // to correctly handle less-significant bit of the microMIPS symbol. uint64_t v = in.mipsGot->getGp(file) + a - p; if (type == R_MIPS_LO16 || type == R_MICROMIPS_LO16) v += 4; if (type == R_MICROMIPS_LO16 || type == R_MICROMIPS_HI16) v -= 1; return v; } case R_MIPS_GOT_LOCAL_PAGE: // If relocation against MIPS local symbol requires GOT entry, this entry // should be initialized by 'page address'. This address is high 16-bits // of sum the symbol's value and the addend. return in.mipsGot->getVA() + in.mipsGot->getPageEntryOffset(file, sym, a) - in.mipsGot->getGp(file); case R_MIPS_GOT_OFF: case R_MIPS_GOT_OFF32: // In case of MIPS if a GOT relocation has non-zero addend this addend // should be applied to the GOT entry content not to the GOT entry offset. // That is why we use separate expression type. return in.mipsGot->getVA() + in.mipsGot->getSymEntryOffset(file, sym, a) - in.mipsGot->getGp(file); case R_MIPS_TLSGD: return in.mipsGot->getVA() + in.mipsGot->getGlobalDynOffset(file, sym) - in.mipsGot->getGp(file); case R_MIPS_TLSLD: return in.mipsGot->getVA() + in.mipsGot->getTlsIndexOffset(file) - in.mipsGot->getGp(file); case R_AARCH64_PAGE_PC: { uint64_t val = sym.isUndefWeak() ? p + a : sym.getVA(a); return getAArch64Page(val) - getAArch64Page(p); } case R_RISCV_PC_INDIRECT: { if (const Relocation *hiRel = getRISCVPCRelHi20(&sym, a)) return getRelocTargetVA(file, hiRel->type, hiRel->addend, sym.getVA(), *hiRel->sym, hiRel->expr); return 0; } case R_PC: case R_ARM_PCA: { uint64_t dest; if (expr == R_ARM_PCA) // Some PC relative ARM (Thumb) relocations align down the place. p = p & 0xfffffffc; if (sym.isUndefWeak()) { // On ARM and AArch64 a branch to an undefined weak resolves to the next // instruction, otherwise the place. On RISCV, resolve an undefined weak // to the same instruction to cause an infinite loop (making the user // aware of the issue) while ensuring no overflow. if (config->emachine == EM_ARM) dest = getARMUndefinedRelativeWeakVA(type, a, p); else if (config->emachine == EM_AARCH64) dest = getAArch64UndefinedRelativeWeakVA(type, p) + a; else if (config->emachine == EM_PPC) dest = p; else if (config->emachine == EM_RISCV) dest = getRISCVUndefinedRelativeWeakVA(type, p) + a; else dest = sym.getVA(a); } else { dest = sym.getVA(a); } return dest - p; } case R_PLT: return sym.getPltVA() + a; case R_PLT_PC: case R_PPC64_CALL_PLT: return sym.getPltVA() + a - p; case R_PPC32_PLTREL: // R_PPC_PLTREL24 uses the addend (usually 0 or 0x8000) to indicate r30 // stores _GLOBAL_OFFSET_TABLE_ or .got2+0x8000. The addend is ignored for // target VA computation. return sym.getPltVA() - p; case R_PPC64_CALL: { uint64_t symVA = sym.getVA(a); // If we have an undefined weak symbol, we might get here with a symbol // address of zero. That could overflow, but the code must be unreachable, // so don't bother doing anything at all. if (!symVA) return 0; // PPC64 V2 ABI describes two entry points to a function. The global entry // point is used for calls where the caller and callee (may) have different // TOC base pointers and r2 needs to be modified to hold the TOC base for // the callee. For local calls the caller and callee share the same // TOC base and so the TOC pointer initialization code should be skipped by // branching to the local entry point. return symVA - p + getPPC64GlobalEntryToLocalEntryOffset(sym.stOther); } case R_PPC64_TOCBASE: return getPPC64TocBase() + a; case R_RELAX_GOT_PC: case R_PPC64_RELAX_GOT_PC: return sym.getVA(a) - p; case R_RELAX_TLS_GD_TO_LE: case R_RELAX_TLS_IE_TO_LE: case R_RELAX_TLS_LD_TO_LE: case R_TPREL: // It is not very clear what to return if the symbol is undefined. With // --noinhibit-exec, even a non-weak undefined reference may reach here. // Just return A, which matches R_ABS, and the behavior of some dynamic // loaders. if (sym.isUndefined() || sym.isLazy()) return a; return getTlsTpOffset(sym) + a; case R_RELAX_TLS_GD_TO_LE_NEG: case R_TPREL_NEG: if (sym.isUndefined()) return a; return -getTlsTpOffset(sym) + a; case R_SIZE: return sym.getSize() + a; case R_TLSDESC: return in.got->getGlobalDynAddr(sym) + a; case R_TLSDESC_PC: return in.got->getGlobalDynAddr(sym) + a - p; case R_AARCH64_TLSDESC_PAGE: return getAArch64Page(in.got->getGlobalDynAddr(sym) + a) - getAArch64Page(p); case R_TLSGD_GOT: return in.got->getGlobalDynOffset(sym) + a; case R_TLSGD_GOTPLT: return in.got->getGlobalDynAddr(sym) + a - in.gotPlt->getVA(); case R_TLSGD_PC: return in.got->getGlobalDynAddr(sym) + a - p; case R_TLSLD_GOTPLT: return in.got->getVA() + in.got->getTlsIndexOff() + a - in.gotPlt->getVA(); case R_TLSLD_GOT: return in.got->getTlsIndexOff() + a; case R_TLSLD_PC: return in.got->getTlsIndexVA() + a - p; default: llvm_unreachable("invalid expression"); } } // This function applies relocations to sections without SHF_ALLOC bit. // Such sections are never mapped to memory at runtime. Debug sections are // an example. Relocations in non-alloc sections are much easier to // handle than in allocated sections because it will never need complex // treatment such as GOT or PLT (because at runtime no one refers them). // So, we handle relocations for non-alloc sections directly in this // function as a performance optimization. template void InputSection::relocateNonAlloc(uint8_t *buf, ArrayRef rels) { const unsigned bits = sizeof(typename ELFT::uint) * 8; const bool isDebug = isDebugSection(*this); const bool isDebugLocOrRanges = isDebug && (name == ".debug_loc" || name == ".debug_ranges"); const bool isDebugLine = isDebug && name == ".debug_line"; Optional tombstone; for (const auto &patAndValue : llvm::reverse(config->deadRelocInNonAlloc)) if (patAndValue.first.match(this->name)) { tombstone = patAndValue.second; break; } for (const RelTy &rel : rels) { RelType type = rel.getType(config->isMips64EL); // GCC 8.0 or earlier have a bug that they emit R_386_GOTPC relocations // against _GLOBAL_OFFSET_TABLE_ for .debug_info. The bug has been fixed // in 2017 (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82630), but we // need to keep this bug-compatible code for a while. if (config->emachine == EM_386 && type == R_386_GOTPC) continue; uint64_t offset = rel.r_offset; uint8_t *bufLoc = buf + offset; int64_t addend = getAddend(rel); if (!RelTy::IsRela) addend += target->getImplicitAddend(bufLoc, type); Symbol &sym = getFile()->getRelocTargetSym(rel); RelExpr expr = target->getRelExpr(type, sym, bufLoc); if (expr == R_NONE) continue; if (expr == R_SIZE) { target->relocateNoSym(bufLoc, type, SignExtend64(sym.getSize() + addend)); continue; } // R_ABS/R_DTPREL and some other relocations can be used from non-SHF_ALLOC // sections. if (expr != R_ABS && expr != R_DTPREL && expr != R_GOTPLTREL && expr != R_RISCV_ADD) { std::string msg = getLocation(offset) + ": has non-ABS relocation " + toString(type) + " against symbol '" + toString(sym) + "'"; if (expr != R_PC && expr != R_ARM_PCA) { error(msg); return; } // If the control reaches here, we found a PC-relative relocation in a // non-ALLOC section. Since non-ALLOC section is not loaded into memory // at runtime, the notion of PC-relative doesn't make sense here. So, // this is a usage error. However, GNU linkers historically accept such // relocations without any errors and relocate them as if they were at // address 0. For bug-compatibilty, we accept them with warnings. We // know Steel Bank Common Lisp as of 2018 have this bug. warn(msg); target->relocateNoSym( bufLoc, type, SignExtend64(sym.getVA(addend - offset - outSecOff))); continue; } if (tombstone || (isDebug && (type == target->symbolicRel || expr == R_DTPREL))) { // Resolve relocations in .debug_* referencing (discarded symbols or ICF // folded section symbols) to a tombstone value. Resolving to addend is // unsatisfactory because the result address range may collide with a // valid range of low address, or leave multiple CUs claiming ownership of // the same range of code, which may confuse consumers. // // To address the problems, we use -1 as a tombstone value for most // .debug_* sections. We have to ignore the addend because we don't want // to resolve an address attribute (which may have a non-zero addend) to // -1+addend (wrap around to a low address). // // R_DTPREL type relocations represent an offset into the dynamic thread // vector. The computed value is st_value plus a non-negative offset. // Negative values are invalid, so -1 can be used as the tombstone value. // // If the referenced symbol is discarded (made Undefined), or the // section defining the referenced symbol is garbage collected, // sym.getOutputSection() is nullptr. `ds->section->repl != ds->section` // catches the ICF folded case. However, resolving a relocation in // .debug_line to -1 would stop debugger users from setting breakpoints on // the folded-in function, so exclude .debug_line. // // For pre-DWARF-v5 .debug_loc and .debug_ranges, -1 is a reserved value // (base address selection entry), use 1 (which is used by GNU ld for // .debug_ranges). // // TODO To reduce disruption, we use 0 instead of -1 as the tombstone // value. Enable -1 in a future release. auto *ds = dyn_cast(&sym); if (!sym.getOutputSection() || (ds && ds->section->repl != ds->section && !isDebugLine)) { // If -z dead-reloc-in-nonalloc= is specified, respect it. const uint64_t value = tombstone ? SignExtend64(*tombstone) : (isDebugLocOrRanges ? 1 : 0); target->relocateNoSym(bufLoc, type, value); continue; } } target->relocateNoSym(bufLoc, type, SignExtend64(sym.getVA(addend))); } } // This is used when '-r' is given. // For REL targets, InputSection::copyRelocations() may store artificial // relocations aimed to update addends. They are handled in relocateAlloc() // for allocatable sections, and this function does the same for // non-allocatable sections, such as sections with debug information. static void relocateNonAllocForRelocatable(InputSection *sec, uint8_t *buf) { const unsigned bits = config->is64 ? 64 : 32; for (const Relocation &rel : sec->relocations) { // InputSection::copyRelocations() adds only R_ABS relocations. assert(rel.expr == R_ABS); uint8_t *bufLoc = buf + rel.offset; uint64_t targetVA = SignExtend64(rel.sym->getVA(rel.addend), bits); target->relocate(bufLoc, rel, targetVA); } } template void InputSectionBase::relocate(uint8_t *buf, uint8_t *bufEnd) { if (flags & SHF_EXECINSTR) adjustSplitStackFunctionPrologues(buf, bufEnd); if (flags & SHF_ALLOC) { relocateAlloc(buf, bufEnd); return; } auto *sec = cast(this); if (config->relocatable) relocateNonAllocForRelocatable(sec, buf); else if (sec->areRelocsRela) sec->relocateNonAlloc(buf, sec->template relas()); else sec->relocateNonAlloc(buf, sec->template rels()); } void InputSectionBase::relocateAlloc(uint8_t *buf, uint8_t *bufEnd) { assert(flags & SHF_ALLOC); const unsigned bits = config->wordsize * 8; uint64_t lastPPCRelaxedRelocOff = UINT64_C(-1); for (const Relocation &rel : relocations) { if (rel.expr == R_NONE) continue; uint64_t offset = rel.offset; uint8_t *bufLoc = buf + offset; RelType type = rel.type; uint64_t addrLoc = getOutputSection()->addr + offset; if (auto *sec = dyn_cast(this)) addrLoc += sec->outSecOff; RelExpr expr = rel.expr; uint64_t targetVA = SignExtend64( getRelocTargetVA(file, type, rel.addend, addrLoc, *rel.sym, expr), bits); switch (expr) { case R_RELAX_GOT_PC: case R_RELAX_GOT_PC_NOPIC: target->relaxGot(bufLoc, rel, targetVA); break; case R_PPC64_RELAX_GOT_PC: { // The R_PPC64_PCREL_OPT relocation must appear immediately after // R_PPC64_GOT_PCREL34 in the relocations table at the same offset. // We can only relax R_PPC64_PCREL_OPT if we have also relaxed // the associated R_PPC64_GOT_PCREL34 since only the latter has an // associated symbol. So save the offset when relaxing R_PPC64_GOT_PCREL34 // and only relax the other if the saved offset matches. if (type == R_PPC64_GOT_PCREL34) lastPPCRelaxedRelocOff = offset; if (type == R_PPC64_PCREL_OPT && offset != lastPPCRelaxedRelocOff) break; target->relaxGot(bufLoc, rel, targetVA); break; } case R_PPC64_RELAX_TOC: // rel.sym refers to the STT_SECTION symbol associated to the .toc input // section. If an R_PPC64_TOC16_LO (.toc + addend) references the TOC // entry, there may be R_PPC64_TOC16_HA not paired with // R_PPC64_TOC16_LO_DS. Don't relax. This loses some relaxation // opportunities but is safe. if (ppc64noTocRelax.count({rel.sym, rel.addend}) || !tryRelaxPPC64TocIndirection(rel, bufLoc)) target->relocate(bufLoc, rel, targetVA); break; case R_RELAX_TLS_IE_TO_LE: target->relaxTlsIeToLe(bufLoc, rel, targetVA); break; case R_RELAX_TLS_LD_TO_LE: case R_RELAX_TLS_LD_TO_LE_ABS: target->relaxTlsLdToLe(bufLoc, rel, targetVA); break; case R_RELAX_TLS_GD_TO_LE: case R_RELAX_TLS_GD_TO_LE_NEG: target->relaxTlsGdToLe(bufLoc, rel, targetVA); break; case R_AARCH64_RELAX_TLS_GD_TO_IE_PAGE_PC: case R_RELAX_TLS_GD_TO_IE: case R_RELAX_TLS_GD_TO_IE_ABS: case R_RELAX_TLS_GD_TO_IE_GOT_OFF: case R_RELAX_TLS_GD_TO_IE_GOTPLT: target->relaxTlsGdToIe(bufLoc, rel, targetVA); break; case R_PPC64_CALL: // If this is a call to __tls_get_addr, it may be part of a TLS // sequence that has been relaxed and turned into a nop. In this // case, we don't want to handle it as a call. if (read32(bufLoc) == 0x60000000) // nop break; // Patch a nop (0x60000000) to a ld. if (rel.sym->needsTocRestore) { // gcc/gfortran 5.4, 6.3 and earlier versions do not add nop for // recursive calls even if the function is preemptible. This is not // wrong in the common case where the function is not preempted at // runtime. Just ignore. if ((bufLoc + 8 > bufEnd || read32(bufLoc + 4) != 0x60000000) && rel.sym->file != file) { // Use substr(6) to remove the "__plt_" prefix. errorOrWarn(getErrorLocation(bufLoc) + "call to " + lld::toString(*rel.sym).substr(6) + " lacks nop, can't restore toc"); break; } write32(bufLoc + 4, 0xe8410018); // ld %r2, 24(%r1) } target->relocate(bufLoc, rel, targetVA); break; default: target->relocate(bufLoc, rel, targetVA); break; } } // Apply jumpInstrMods. jumpInstrMods are created when the opcode of // a jmp insn must be modified to shrink the jmp insn or to flip the jmp // insn. This is primarily used to relax and optimize jumps created with // basic block sections. if (isa(this)) { for (const JumpInstrMod &jumpMod : jumpInstrMods) { uint64_t offset = jumpMod.offset; uint8_t *bufLoc = buf + offset; target->applyJumpInstrMod(bufLoc, jumpMod.original, jumpMod.size); } } } // For each function-defining prologue, find any calls to __morestack, // and replace them with calls to __morestack_non_split. static void switchMorestackCallsToMorestackNonSplit( DenseSet &prologues, std::vector &morestackCalls) { // If the target adjusted a function's prologue, all calls to // __morestack inside that function should be switched to // __morestack_non_split. Symbol *moreStackNonSplit = symtab->find("__morestack_non_split"); if (!moreStackNonSplit) { error("Mixing split-stack objects requires a definition of " "__morestack_non_split"); return; } // Sort both collections to compare addresses efficiently. llvm::sort(morestackCalls, [](const Relocation *l, const Relocation *r) { return l->offset < r->offset; }); std::vector functions(prologues.begin(), prologues.end()); llvm::sort(functions, [](const Defined *l, const Defined *r) { return l->value < r->value; }); auto it = morestackCalls.begin(); for (Defined *f : functions) { // Find the first call to __morestack within the function. while (it != morestackCalls.end() && (*it)->offset < f->value) ++it; // Adjust all calls inside the function. while (it != morestackCalls.end() && (*it)->offset < f->value + f->size) { (*it)->sym = moreStackNonSplit; ++it; } } } static bool enclosingPrologueAttempted(uint64_t offset, const DenseSet &prologues) { for (Defined *f : prologues) if (f->value <= offset && offset < f->value + f->size) return true; return false; } // If a function compiled for split stack calls a function not // compiled for split stack, then the caller needs its prologue // adjusted to ensure that the called function will have enough stack // available. Find those functions, and adjust their prologues. template void InputSectionBase::adjustSplitStackFunctionPrologues(uint8_t *buf, uint8_t *end) { if (!getFile()->splitStack) return; DenseSet prologues; std::vector morestackCalls; for (Relocation &rel : relocations) { // Local symbols can't possibly be cross-calls, and should have been // resolved long before this line. if (rel.sym->isLocal()) continue; // Ignore calls into the split-stack api. if (rel.sym->getName().startswith("__morestack")) { if (rel.sym->getName().equals("__morestack")) morestackCalls.push_back(&rel); continue; } // A relocation to non-function isn't relevant. Sometimes // __morestack is not marked as a function, so this check comes // after the name check. if (rel.sym->type != STT_FUNC) continue; // If the callee's-file was compiled with split stack, nothing to do. In // this context, a "Defined" symbol is one "defined by the binary currently // being produced". So an "undefined" symbol might be provided by a shared // library. It is not possible to tell how such symbols were compiled, so be // conservative. if (Defined *d = dyn_cast(rel.sym)) if (InputSection *isec = cast_or_null(d->section)) if (!isec || !isec->getFile() || isec->getFile()->splitStack) continue; if (enclosingPrologueAttempted(rel.offset, prologues)) continue; if (Defined *f = getEnclosingFunction(rel.offset)) { prologues.insert(f); if (target->adjustPrologueForCrossSplitStack(buf + f->value, end, f->stOther)) continue; if (!getFile()->someNoSplitStack) error(lld::toString(this) + ": " + f->getName() + " (with -fsplit-stack) calls " + rel.sym->getName() + " (without -fsplit-stack), but couldn't adjust its prologue"); } } if (target->needsMoreStackNonSplit) switchMorestackCallsToMorestackNonSplit(prologues, morestackCalls); } template void InputSection::writeTo(uint8_t *buf) { if (type == SHT_NOBITS) return; if (auto *s = dyn_cast(this)) { s->writeTo(buf + outSecOff); return; } // If -r or --emit-relocs is given, then an InputSection // may be a relocation section. if (type == SHT_RELA) { copyRelocations(buf + outSecOff, getDataAs()); return; } if (type == SHT_REL) { copyRelocations(buf + outSecOff, getDataAs()); return; } // If -r is given, we may have a SHT_GROUP section. if (type == SHT_GROUP) { copyShtGroup(buf + outSecOff); return; } // If this is a compressed section, uncompress section contents directly // to the buffer. if (uncompressedSize >= 0) { size_t size = uncompressedSize; if (Error e = zlib::uncompress(toStringRef(rawData), (char *)(buf + outSecOff), size)) fatal(toString(this) + ": uncompress failed: " + llvm::toString(std::move(e))); uint8_t *bufEnd = buf + outSecOff + size; relocate(buf + outSecOff, bufEnd); return; } // Copy section contents from source object file to output file // and then apply relocations. memcpy(buf + outSecOff, data().data(), data().size()); uint8_t *bufEnd = buf + outSecOff + data().size(); relocate(buf + outSecOff, bufEnd); } void InputSection::replace(InputSection *other) { alignment = std::max(alignment, other->alignment); // When a section is replaced with another section that was allocated to // another partition, the replacement section (and its associated sections) // need to be placed in the main partition so that both partitions will be // able to access it. if (partition != other->partition) { partition = 1; for (InputSection *isec : dependentSections) isec->partition = 1; } other->repl = repl; other->markDead(); } template EhInputSection::EhInputSection(ObjFile &f, const typename ELFT::Shdr &header, StringRef name) : InputSectionBase(f, header, name, InputSectionBase::EHFrame) {} SyntheticSection *EhInputSection::getParent() const { return cast_or_null(parent); } // Returns the index of the first relocation that points to a region between // Begin and Begin+Size. template static unsigned getReloc(IntTy begin, IntTy size, const ArrayRef &rels, unsigned &relocI) { // Start search from RelocI for fast access. That works because the // relocations are sorted in .eh_frame. for (unsigned n = rels.size(); relocI < n; ++relocI) { const RelTy &rel = rels[relocI]; if (rel.r_offset < begin) continue; if (rel.r_offset < begin + size) return relocI; return -1; } return -1; } // .eh_frame is a sequence of CIE or FDE records. // This function splits an input section into records and returns them. template void EhInputSection::split() { if (areRelocsRela) split(relas()); else split(rels()); } template void EhInputSection::split(ArrayRef rels) { // getReloc expects the relocations to be sorted by r_offset. See the comment // in scanRelocs. SmallVector storage; rels = sortRels(rels, storage); unsigned relI = 0; for (size_t off = 0, end = data().size(); off != end;) { size_t size = readEhRecordSize(this, off); pieces.emplace_back(off, this, size, getReloc(off, size, rels, relI)); // The empty record is the end marker. if (size == 4) break; off += size; } } static size_t findNull(StringRef s, size_t entSize) { // Optimize the common case. if (entSize == 1) return s.find(0); for (unsigned i = 0, n = s.size(); i != n; i += entSize) { const char *b = s.begin() + i; if (std::all_of(b, b + entSize, [](char c) { return c == 0; })) return i; } return StringRef::npos; } SyntheticSection *MergeInputSection::getParent() const { return cast_or_null(parent); } // Split SHF_STRINGS section. Such section is a sequence of // null-terminated strings. void MergeInputSection::splitStrings(ArrayRef data, size_t entSize) { size_t off = 0; bool isAlloc = flags & SHF_ALLOC; StringRef s = toStringRef(data); while (!s.empty()) { size_t end = findNull(s, entSize); if (end == StringRef::npos) fatal(toString(this) + ": string is not null terminated"); size_t size = end + entSize; pieces.emplace_back(off, xxHash64(s.substr(0, size)), !isAlloc); s = s.substr(size); off += size; } } // Split non-SHF_STRINGS section. Such section is a sequence of // fixed size records. void MergeInputSection::splitNonStrings(ArrayRef data, size_t entSize) { size_t size = data.size(); assert((size % entSize) == 0); bool isAlloc = flags & SHF_ALLOC; for (size_t i = 0; i != size; i += entSize) pieces.emplace_back(i, xxHash64(data.slice(i, entSize)), !isAlloc); } template MergeInputSection::MergeInputSection(ObjFile &f, const typename ELFT::Shdr &header, StringRef name) : InputSectionBase(f, header, name, InputSectionBase::Merge) {} MergeInputSection::MergeInputSection(uint64_t flags, uint32_t type, uint64_t entsize, ArrayRef data, StringRef name) : InputSectionBase(nullptr, flags, type, entsize, /*Link*/ 0, /*Info*/ 0, /*Alignment*/ entsize, data, name, SectionBase::Merge) {} // This function is called after we obtain a complete list of input sections // that need to be linked. This is responsible to split section contents // into small chunks for further processing. // // Note that this function is called from parallelForEach. This must be // thread-safe (i.e. no memory allocation from the pools). void MergeInputSection::splitIntoPieces() { assert(pieces.empty()); if (flags & SHF_STRINGS) splitStrings(data(), entsize); else splitNonStrings(data(), entsize); } SectionPiece *MergeInputSection::getSectionPiece(uint64_t offset) { if (this->data().size() <= offset) fatal(toString(this) + ": offset is outside the section"); // If Offset is not at beginning of a section piece, it is not in the map. // In that case we need to do a binary search of the original section piece vector. auto it = partition_point( pieces, [=](SectionPiece p) { return p.inputOff <= offset; }); return &it[-1]; } // Returns the offset in an output section for a given input offset. // Because contents of a mergeable section is not contiguous in output, // it is not just an addition to a base output offset. uint64_t MergeInputSection::getParentOffset(uint64_t offset) const { // If Offset is not at beginning of a section piece, it is not in the map. // In that case we need to search from the original section piece vector. const SectionPiece &piece = *getSectionPiece(offset); uint64_t addend = offset - piece.inputOff; return piece.outputOff + addend; } template InputSection::InputSection(ObjFile &, const ELF32LE::Shdr &, StringRef); template InputSection::InputSection(ObjFile &, const ELF32BE::Shdr &, StringRef); template InputSection::InputSection(ObjFile &, const ELF64LE::Shdr &, StringRef); template InputSection::InputSection(ObjFile &, const ELF64BE::Shdr &, StringRef); template std::string InputSectionBase::getLocation(uint64_t); template std::string InputSectionBase::getLocation(uint64_t); template std::string InputSectionBase::getLocation(uint64_t); template std::string InputSectionBase::getLocation(uint64_t); template void InputSection::writeTo(uint8_t *); template void InputSection::writeTo(uint8_t *); template void InputSection::writeTo(uint8_t *); template void InputSection::writeTo(uint8_t *); template MergeInputSection::MergeInputSection(ObjFile &, const ELF32LE::Shdr &, StringRef); template MergeInputSection::MergeInputSection(ObjFile &, const ELF32BE::Shdr &, StringRef); template MergeInputSection::MergeInputSection(ObjFile &, const ELF64LE::Shdr &, StringRef); template MergeInputSection::MergeInputSection(ObjFile &, const ELF64BE::Shdr &, StringRef); template EhInputSection::EhInputSection(ObjFile &, const ELF32LE::Shdr &, StringRef); template EhInputSection::EhInputSection(ObjFile &, const ELF32BE::Shdr &, StringRef); template EhInputSection::EhInputSection(ObjFile &, const ELF64LE::Shdr &, StringRef); template EhInputSection::EhInputSection(ObjFile &, const ELF64BE::Shdr &, StringRef); template void EhInputSection::split(); template void EhInputSection::split(); template void EhInputSection::split(); template void EhInputSection::split(); diff --git a/lld/ELF/InputSection.h b/lld/ELF/InputSection.h index 5b91c1c90bd2..c914d0b42155 100644 --- a/lld/ELF/InputSection.h +++ b/lld/ELF/InputSection.h @@ -1,417 +1,418 @@ //===- InputSection.h -------------------------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #ifndef LLD_ELF_INPUT_SECTION_H #define LLD_ELF_INPUT_SECTION_H #include "Config.h" #include "Relocations.h" #include "Thunks.h" #include "lld/Common/LLVM.h" #include "llvm/ADT/CachedHashString.h" #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/TinyPtrVector.h" #include "llvm/Object/ELF.h" namespace lld { namespace elf { class Symbol; struct SectionPiece; class Defined; struct Partition; class SyntheticSection; class MergeSyntheticSection; template class ObjFile; class OutputSection; extern std::vector partitions; // This is the base class of all sections that lld handles. Some are sections in // input files, some are sections in the produced output file and some exist // just as a convenience for implementing special ways of combining some // sections. class SectionBase { public: enum Kind { Regular, EHFrame, Merge, Synthetic, Output }; Kind kind() const { return (Kind)sectionKind; } StringRef name; // This pointer points to the "real" instance of this instance. // Usually Repl == this. However, if ICF merges two sections, // Repl pointer of one section points to another section. So, // if you need to get a pointer to this instance, do not use // this but instead this->Repl. SectionBase *repl; uint8_t sectionKind : 3; // The next two bit fields are only used by InputSectionBase, but we // put them here so the struct packs better. uint8_t bss : 1; // Set for sections that should not be folded by ICF. uint8_t keepUnique : 1; // The 1-indexed partition that this section is assigned to by the garbage // collector, or 0 if this section is dead. Normally there is only one // partition, so this will either be 0 or 1. uint8_t partition; elf::Partition &getPartition() const; // These corresponds to the fields in Elf_Shdr. uint32_t alignment; uint64_t flags; uint64_t entsize; uint32_t type; uint32_t link; uint32_t info; OutputSection *getOutputSection(); const OutputSection *getOutputSection() const { return const_cast(this)->getOutputSection(); } // Translate an offset in the input section to an offset in the output // section. uint64_t getOffset(uint64_t offset) const; uint64_t getVA(uint64_t offset = 0) const; bool isLive() const { return partition != 0; } void markLive() { partition = 1; } void markDead() { partition = 0; } protected: SectionBase(Kind sectionKind, StringRef name, uint64_t flags, uint64_t entsize, uint64_t alignment, uint32_t type, uint32_t info, uint32_t link) : name(name), repl(this), sectionKind(sectionKind), bss(false), keepUnique(false), partition(0), alignment(alignment), flags(flags), entsize(entsize), type(type), link(link), info(info) {} }; // This corresponds to a section of an input file. class InputSectionBase : public SectionBase { public: template InputSectionBase(ObjFile &file, const typename ELFT::Shdr &header, StringRef name, Kind sectionKind); InputSectionBase(InputFile *file, uint64_t flags, uint32_t type, uint64_t entsize, uint32_t link, uint32_t info, uint32_t alignment, ArrayRef data, StringRef name, Kind sectionKind); static bool classof(const SectionBase *s) { return s->kind() != Output; } // Relocations that refer to this section. unsigned numRelocations : 31; unsigned areRelocsRela : 1; const void *firstRelocation = nullptr; // The file which contains this section. Its dynamic type is always // ObjFile, but in order to avoid ELFT, we use InputFile as // its static type. InputFile *file; template ObjFile *getFile() const { return cast_or_null>(file); } // If basic block sections are enabled, many code sections could end up with // one or two jump instructions at the end that could be relaxed to a smaller // instruction. The members below help trimming the trailing jump instruction // and shrinking a section. unsigned bytesDropped = 0; // Whether the section needs to be padded with a NOP filler due to // deleteFallThruJmpInsn. bool nopFiller = false; void drop_back(uint64_t num) { bytesDropped += num; } void push_back(uint64_t num) { assert(bytesDropped >= num); bytesDropped -= num; } void trim() { if (bytesDropped) { rawData = rawData.drop_back(bytesDropped); bytesDropped = 0; } } ArrayRef data() const { if (uncompressedSize >= 0) uncompress(); return rawData; } uint64_t getOffsetInFile() const; // Input sections are part of an output section. Special sections // like .eh_frame and merge sections are first combined into a // synthetic section that is then added to an output section. In all // cases this points one level up. SectionBase *parent = nullptr; // The next member in the section group if this section is in a group. This is // used by --gc-sections. InputSectionBase *nextInSectionGroup = nullptr; template ArrayRef rels() const { assert(!areRelocsRela); return llvm::makeArrayRef( static_cast(firstRelocation), numRelocations); } template ArrayRef relas() const { assert(areRelocsRela); return llvm::makeArrayRef( static_cast(firstRelocation), numRelocations); } // InputSections that are dependent on us (reverse dependency for GC) llvm::TinyPtrVector dependentSections; // Returns the size of this section (even if this is a common or BSS.) size_t getSize() const; InputSection *getLinkOrderDep() const; // Get the function symbol that encloses this offset from within the // section. template Defined *getEnclosingFunction(uint64_t offset); // Returns a source location string. Used to construct an error message. template std::string getLocation(uint64_t offset); std::string getSrcMsg(const Symbol &sym, uint64_t offset); std::string getObjMsg(uint64_t offset); // Each section knows how to relocate itself. These functions apply // relocations, assuming that Buf points to this section's copy in // the mmap'ed output buffer. template void relocate(uint8_t *buf, uint8_t *bufEnd); void relocateAlloc(uint8_t *buf, uint8_t *bufEnd); static uint64_t getRelocTargetVA(const InputFile *File, RelType Type, int64_t A, uint64_t P, const Symbol &Sym, RelExpr Expr); // The native ELF reloc data type is not very convenient to handle. // So we convert ELF reloc records to our own records in Relocations.cpp. // This vector contains such "cooked" relocations. SmallVector relocations; // These are modifiers to jump instructions that are necessary when basic // block sections are enabled. Basic block sections creates opportunities to // relax jump instructions at basic block boundaries after reordering the // basic blocks. SmallVector jumpInstrMods; // A function compiled with -fsplit-stack calling a function // compiled without -fsplit-stack needs its prologue adjusted. Find // such functions and adjust their prologues. This is very similar // to relocation. See https://gcc.gnu.org/wiki/SplitStacks for more // information. template void adjustSplitStackFunctionPrologues(uint8_t *buf, uint8_t *end); template llvm::ArrayRef getDataAs() const { size_t s = data().size(); assert(s % sizeof(T) == 0); return llvm::makeArrayRef((const T *)data().data(), s / sizeof(T)); } protected: + template void parseCompressedHeader(); void uncompress() const; mutable ArrayRef rawData; // This field stores the uncompressed size of the compressed data in rawData, // or -1 if rawData is not compressed (either because the section wasn't // compressed in the first place, or because we ended up uncompressing it). // Since the feature is not used often, this is usually -1. mutable int64_t uncompressedSize = -1; }; // SectionPiece represents a piece of splittable section contents. // We allocate a lot of these and binary search on them. This means that they // have to be as compact as possible, which is why we don't store the size (can // be found by looking at the next one). struct SectionPiece { SectionPiece(size_t off, uint32_t hash, bool live) : inputOff(off), live(live || !config->gcSections), hash(hash >> 1) {} uint32_t inputOff; uint32_t live : 1; uint32_t hash : 31; uint64_t outputOff = 0; }; static_assert(sizeof(SectionPiece) == 16, "SectionPiece is too big"); // This corresponds to a SHF_MERGE section of an input file. class MergeInputSection : public InputSectionBase { public: template MergeInputSection(ObjFile &f, const typename ELFT::Shdr &header, StringRef name); MergeInputSection(uint64_t flags, uint32_t type, uint64_t entsize, ArrayRef data, StringRef name); static bool classof(const SectionBase *s) { return s->kind() == Merge; } void splitIntoPieces(); // Translate an offset in the input section to an offset in the parent // MergeSyntheticSection. uint64_t getParentOffset(uint64_t offset) const; // Splittable sections are handled as a sequence of data // rather than a single large blob of data. std::vector pieces; // Returns I'th piece's data. This function is very hot when // string merging is enabled, so we want to inline. LLVM_ATTRIBUTE_ALWAYS_INLINE llvm::CachedHashStringRef getData(size_t i) const { size_t begin = pieces[i].inputOff; size_t end = (pieces.size() - 1 == i) ? data().size() : pieces[i + 1].inputOff; return {toStringRef(data().slice(begin, end - begin)), pieces[i].hash}; } // Returns the SectionPiece at a given input section offset. SectionPiece *getSectionPiece(uint64_t offset); const SectionPiece *getSectionPiece(uint64_t offset) const { return const_cast(this)->getSectionPiece(offset); } SyntheticSection *getParent() const; private: void splitStrings(ArrayRef a, size_t size); void splitNonStrings(ArrayRef a, size_t size); }; struct EhSectionPiece { EhSectionPiece(size_t off, InputSectionBase *sec, uint32_t size, unsigned firstRelocation) : inputOff(off), sec(sec), size(size), firstRelocation(firstRelocation) {} ArrayRef data() const { return {sec->data().data() + this->inputOff, size}; } size_t inputOff; ssize_t outputOff = -1; InputSectionBase *sec; uint32_t size; unsigned firstRelocation; }; // This corresponds to a .eh_frame section of an input file. class EhInputSection : public InputSectionBase { public: template EhInputSection(ObjFile &f, const typename ELFT::Shdr &header, StringRef name); static bool classof(const SectionBase *s) { return s->kind() == EHFrame; } template void split(); template void split(ArrayRef rels); // Splittable sections are handled as a sequence of data // rather than a single large blob of data. std::vector pieces; SyntheticSection *getParent() const; }; // This is a section that is added directly to an output section // instead of needing special combination via a synthetic section. This // includes all input sections with the exceptions of SHF_MERGE and // .eh_frame. It also includes the synthetic sections themselves. class InputSection : public InputSectionBase { public: InputSection(InputFile *f, uint64_t flags, uint32_t type, uint32_t alignment, ArrayRef data, StringRef name, Kind k = Regular); template InputSection(ObjFile &f, const typename ELFT::Shdr &header, StringRef name); // Write this section to a mmap'ed file, assuming Buf is pointing to // beginning of the output section. template void writeTo(uint8_t *buf); uint64_t getOffset(uint64_t offset) const { return outSecOff + offset; } OutputSection *getParent() const; // This variable has two usages. Initially, it represents an index in the // OutputSection's InputSection list, and is used when ordering SHF_LINK_ORDER // sections. After assignAddresses is called, it represents the offset from // the beginning of the output section this section was assigned to. uint64_t outSecOff = 0; static bool classof(const SectionBase *s); InputSectionBase *getRelocatedSection() const; template void relocateNonAlloc(uint8_t *buf, llvm::ArrayRef rels); // Used by ICF. uint32_t eqClass[2] = {0, 0}; // Called by ICF to merge two input sections. void replace(InputSection *other); static InputSection discarded; private: template void copyRelocations(uint8_t *buf, llvm::ArrayRef rels); template void copyShtGroup(uint8_t *buf); }; #ifdef _WIN32 static_assert(sizeof(InputSection) <= 192, "InputSection is too big"); #else static_assert(sizeof(InputSection) <= 184, "InputSection is too big"); #endif inline bool isDebugSection(const InputSectionBase &sec) { return (sec.flags & llvm::ELF::SHF_ALLOC) == 0 && (sec.name.startswith(".debug") || sec.name.startswith(".zdebug")); } // The list of all input sections. extern std::vector inputSections; // The set of TOC entries (.toc + addend) for which we should not apply // toc-indirect to toc-relative relaxation. const Symbol * refers to the // STT_SECTION symbol associated to the .toc input section. extern llvm::DenseSet> ppc64noTocRelax; } // namespace elf std::string toString(const elf::InputSectionBase *); } // namespace lld #endif diff --git a/lldb/source/Plugins/Process/Utility/RegisterInfoPOSIX_arm64.h b/lldb/source/Plugins/Process/Utility/RegisterInfoPOSIX_arm64.h index ba873ba4436b..96cab49d5ac8 100644 --- a/lldb/source/Plugins/Process/Utility/RegisterInfoPOSIX_arm64.h +++ b/lldb/source/Plugins/Process/Utility/RegisterInfoPOSIX_arm64.h @@ -1,156 +1,157 @@ //===-- RegisterInfoPOSIX_arm64.h -------------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #ifndef LLDB_SOURCE_PLUGINS_PROCESS_UTILITY_REGISTERINFOPOSIX_ARM64_H #define LLDB_SOURCE_PLUGINS_PROCESS_UTILITY_REGISTERINFOPOSIX_ARM64_H #include "RegisterInfoAndSetInterface.h" #include "lldb/Target/RegisterContext.h" #include "lldb/Utility/Flags.h" #include "lldb/lldb-private.h" #include enum class SVEState { Unknown, Disabled, FPSIMD, Full }; class RegisterInfoPOSIX_arm64 : public lldb_private::RegisterInfoAndSetInterface { public: enum { GPRegSet = 0, FPRegSet }; // AArch64 register set mask value enum { eRegsetMaskDefault = 0, eRegsetMaskSVE = 1, eRegsetMaskPAuth = 2, eRegsetMaskMTE = 4, eRegsetMaskDynamic = ~1, }; // AArch64 Register set FP/SIMD feature configuration enum { eVectorQuadwordAArch64, eVectorQuadwordAArch64SVE, eVectorQuadwordAArch64SVEMax = 256 }; // based on RegisterContextDarwin_arm64.h LLVM_PACKED_START struct GPR { uint64_t x[29]; // x0-x28 uint64_t fp; // x29 uint64_t lr; // x30 uint64_t sp; // x31 uint64_t pc; // pc uint32_t cpsr; // cpsr }; LLVM_PACKED_END // based on RegisterContextDarwin_arm64.h struct VReg { uint8_t bytes[16]; }; // based on RegisterContextDarwin_arm64.h struct FPU { VReg v[32]; uint32_t fpsr; uint32_t fpcr; }; // based on RegisterContextDarwin_arm64.h struct EXC { uint64_t far; // Virtual Fault Address uint32_t esr; // Exception syndrome uint32_t exception; // number of arm exception token }; // based on RegisterContextDarwin_arm64.h struct DBG { uint64_t bvr[16]; uint64_t bcr[16]; uint64_t wvr[16]; uint64_t wcr[16]; uint64_t mdscr_el1; }; RegisterInfoPOSIX_arm64(const lldb_private::ArchSpec &target_arch, lldb_private::Flags opt_regsets); size_t GetGPRSize() const override; size_t GetFPRSize() const override; const lldb_private::RegisterInfo *GetRegisterInfo() const override; uint32_t GetRegisterCount() const override; const lldb_private::RegisterSet * GetRegisterSet(size_t reg_set) const override; size_t GetRegisterSetCount() const override; size_t GetRegisterSetFromRegisterIndex(uint32_t reg_index) const override; void AddRegSetPAuth(); void AddRegSetMTE(); uint32_t ConfigureVectorLength(uint32_t sve_vq); bool VectorSizeIsValid(uint32_t vq) { if (vq >= eVectorQuadwordAArch64 && vq <= eVectorQuadwordAArch64SVEMax) return true; return false; } bool IsSVEEnabled() const { return m_opt_regsets.AnySet(eRegsetMaskSVE); } bool IsPAuthEnabled() const { return m_opt_regsets.AnySet(eRegsetMaskPAuth); } + bool IsMTEEnabled() const { return m_opt_regsets.AnySet(eRegsetMaskMTE); } bool IsSVEReg(unsigned reg) const; bool IsSVEZReg(unsigned reg) const; bool IsSVEPReg(unsigned reg) const; bool IsSVERegVG(unsigned reg) const; bool IsPAuthReg(unsigned reg) const; bool IsMTEReg(unsigned reg) const; uint32_t GetRegNumSVEZ0() const; uint32_t GetRegNumSVEFFR() const; uint32_t GetRegNumFPCR() const; uint32_t GetRegNumFPSR() const; uint32_t GetRegNumSVEVG() const; uint32_t GetPAuthOffset() const; uint32_t GetMTEOffset() const; private: typedef std::map> per_vq_register_infos; per_vq_register_infos m_per_vq_reg_infos; uint32_t m_vector_reg_vq = eVectorQuadwordAArch64; const lldb_private::RegisterInfo *m_register_info_p; uint32_t m_register_info_count; const lldb_private::RegisterSet *m_register_set_p; uint32_t m_register_set_count; // Contains pair of [start, end] register numbers of a register set with start // and end included. std::map> m_per_regset_regnum_range; lldb_private::Flags m_opt_regsets; std::vector m_dynamic_reg_infos; std::vector m_dynamic_reg_sets; std::vector pauth_regnum_collection; std::vector m_mte_regnum_collection; }; #endif diff --git a/llvm/include/llvm/MC/MCContext.h b/llvm/include/llvm/MC/MCContext.h index 877b2dc4ac92..2ff9c967e848 100644 --- a/llvm/include/llvm/MC/MCContext.h +++ b/llvm/include/llvm/MC/MCContext.h @@ -1,910 +1,908 @@ //===- MCContext.h - Machine Code Context -----------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #ifndef LLVM_MC_MCCONTEXT_H #define LLVM_MC_MCCONTEXT_H #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/Twine.h" #include "llvm/BinaryFormat/Dwarf.h" #include "llvm/BinaryFormat/ELF.h" #include "llvm/BinaryFormat/XCOFF.h" #include "llvm/MC/MCAsmMacro.h" #include "llvm/MC/MCDwarf.h" #include "llvm/MC/MCPseudoProbe.h" #include "llvm/MC/MCSubtargetInfo.h" #include "llvm/MC/MCTargetOptions.h" #include "llvm/MC/SectionKind.h" #include "llvm/Support/Allocator.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/Error.h" #include "llvm/Support/MD5.h" #include "llvm/Support/raw_ostream.h" #include #include #include #include #include #include #include #include #include #include namespace llvm { class CodeViewContext; class MCAsmInfo; class MCLabel; class MCObjectFileInfo; class MCRegisterInfo; class MCSection; class MCSectionCOFF; class MCSectionELF; class MCSectionGOFF; class MCSectionMachO; class MCSectionWasm; class MCSectionXCOFF; class MCStreamer; class MCSymbol; class MCSymbolELF; class MCSymbolWasm; class MCSymbolXCOFF; class MDNode; class SMDiagnostic; class SMLoc; class SourceMgr; /// Context object for machine code objects. This class owns all of the /// sections that it creates. /// class MCContext { public: using SymbolTable = StringMap; using DiagHandlerTy = std::function &)>; enum Environment { IsMachO, IsELF, IsGOFF, IsCOFF, IsWasm, IsXCOFF }; private: Environment Env; /// The triple for this object. Triple TT; /// The SourceMgr for this object, if any. const SourceMgr *SrcMgr; /// The SourceMgr for inline assembly, if any. std::unique_ptr InlineSrcMgr; std::vector LocInfos; DiagHandlerTy DiagHandler; /// The MCAsmInfo for this target. const MCAsmInfo *MAI; /// The MCRegisterInfo for this target. const MCRegisterInfo *MRI; /// The MCObjectFileInfo for this target. const MCObjectFileInfo *MOFI; /// The MCSubtargetInfo for this target. const MCSubtargetInfo *MSTI; std::unique_ptr CVContext; /// Allocator object used for creating machine code objects. /// /// We use a bump pointer allocator to avoid the need to track all allocated /// objects. BumpPtrAllocator Allocator; SpecificBumpPtrAllocator COFFAllocator; SpecificBumpPtrAllocator ELFAllocator; SpecificBumpPtrAllocator MachOAllocator; SpecificBumpPtrAllocator GOFFAllocator; SpecificBumpPtrAllocator WasmAllocator; SpecificBumpPtrAllocator XCOFFAllocator; SpecificBumpPtrAllocator MCInstAllocator; /// Bindings of names to symbols. SymbolTable Symbols; /// A mapping from a local label number and an instance count to a symbol. /// For example, in the assembly /// 1: /// 2: /// 1: /// We have three labels represented by the pairs (1, 0), (2, 0) and (1, 1) DenseMap, MCSymbol *> LocalSymbols; /// Keeps tracks of names that were used both for used declared and /// artificial symbols. The value is "true" if the name has been used for a /// non-section symbol (there can be at most one of those, plus an unlimited /// number of section symbols with the same name). StringMap UsedNames; /// Keeps track of labels that are used in inline assembly. SymbolTable InlineAsmUsedLabelNames; /// The next ID to dole out to an unnamed assembler temporary symbol with /// a given prefix. StringMap NextID; /// Instances of directional local labels. DenseMap Instances; /// NextInstance() creates the next instance of the directional local label /// for the LocalLabelVal and adds it to the map if needed. unsigned NextInstance(unsigned LocalLabelVal); /// GetInstance() gets the current instance of the directional local label /// for the LocalLabelVal and adds it to the map if needed. unsigned GetInstance(unsigned LocalLabelVal); /// The file name of the log file from the environment variable /// AS_SECURE_LOG_FILE. Which must be set before the .secure_log_unique /// directive is used or it is an error. char *SecureLogFile; /// The stream that gets written to for the .secure_log_unique directive. std::unique_ptr SecureLog; /// Boolean toggled when .secure_log_unique / .secure_log_reset is seen to /// catch errors if .secure_log_unique appears twice without /// .secure_log_reset appearing between them. bool SecureLogUsed = false; /// The compilation directory to use for DW_AT_comp_dir. SmallString<128> CompilationDir; /// Prefix replacement map for source file information. std::map DebugPrefixMap; /// The main file name if passed in explicitly. std::string MainFileName; /// The dwarf file and directory tables from the dwarf .file directive. /// We now emit a line table for each compile unit. To reduce the prologue /// size of each line table, the files and directories used by each compile /// unit are separated. std::map MCDwarfLineTablesCUMap; /// The current dwarf line information from the last dwarf .loc directive. MCDwarfLoc CurrentDwarfLoc; bool DwarfLocSeen = false; /// Generate dwarf debugging info for assembly source files. bool GenDwarfForAssembly = false; /// The current dwarf file number when generate dwarf debugging info for /// assembly source files. unsigned GenDwarfFileNumber = 0; /// Sections for generating the .debug_ranges and .debug_aranges sections. SetVector SectionsForRanges; /// The information gathered from labels that will have dwarf label /// entries when generating dwarf assembly source files. std::vector MCGenDwarfLabelEntries; /// The string to embed in the debug information for the compile unit, if /// non-empty. StringRef DwarfDebugFlags; /// The string to embed in as the dwarf AT_producer for the compile unit, if /// non-empty. StringRef DwarfDebugProducer; /// The maximum version of dwarf that we should emit. uint16_t DwarfVersion = 4; /// The format of dwarf that we emit. dwarf::DwarfFormat DwarfFormat = dwarf::DWARF32; /// Honor temporary labels, this is useful for debugging semantic /// differences between temporary and non-temporary labels (primarily on /// Darwin). bool AllowTemporaryLabels = true; bool UseNamesOnTempLabels = false; /// The Compile Unit ID that we are currently processing. unsigned DwarfCompileUnitID = 0; /// A collection of MCPseudoProbe in the current module MCPseudoProbeTable PseudoProbeTable; // Sections are differentiated by the quadruple (section_name, group_name, // unique_id, link_to_symbol_name). Sections sharing the same quadruple are // combined into one section. struct ELFSectionKey { std::string SectionName; StringRef GroupName; StringRef LinkedToName; unsigned UniqueID; ELFSectionKey(StringRef SectionName, StringRef GroupName, StringRef LinkedToName, unsigned UniqueID) : SectionName(SectionName), GroupName(GroupName), LinkedToName(LinkedToName), UniqueID(UniqueID) {} bool operator<(const ELFSectionKey &Other) const { if (SectionName != Other.SectionName) return SectionName < Other.SectionName; if (GroupName != Other.GroupName) return GroupName < Other.GroupName; if (int O = LinkedToName.compare(Other.LinkedToName)) return O < 0; return UniqueID < Other.UniqueID; } }; struct COFFSectionKey { std::string SectionName; StringRef GroupName; int SelectionKey; unsigned UniqueID; COFFSectionKey(StringRef SectionName, StringRef GroupName, int SelectionKey, unsigned UniqueID) : SectionName(SectionName), GroupName(GroupName), SelectionKey(SelectionKey), UniqueID(UniqueID) {} bool operator<(const COFFSectionKey &Other) const { if (SectionName != Other.SectionName) return SectionName < Other.SectionName; if (GroupName != Other.GroupName) return GroupName < Other.GroupName; if (SelectionKey != Other.SelectionKey) return SelectionKey < Other.SelectionKey; return UniqueID < Other.UniqueID; } }; struct WasmSectionKey { std::string SectionName; StringRef GroupName; unsigned UniqueID; WasmSectionKey(StringRef SectionName, StringRef GroupName, unsigned UniqueID) : SectionName(SectionName), GroupName(GroupName), UniqueID(UniqueID) { } bool operator<(const WasmSectionKey &Other) const { if (SectionName != Other.SectionName) return SectionName < Other.SectionName; if (GroupName != Other.GroupName) return GroupName < Other.GroupName; return UniqueID < Other.UniqueID; } }; struct XCOFFSectionKey { // Section name. std::string SectionName; // Section property. // For csect section, it is storage mapping class. // For debug section, it is section type flags. union { XCOFF::StorageMappingClass MappingClass; XCOFF::DwarfSectionSubtypeFlags DwarfSubtypeFlags; }; bool IsCsect; XCOFFSectionKey(StringRef SectionName, XCOFF::StorageMappingClass MappingClass) : SectionName(SectionName), MappingClass(MappingClass), IsCsect(true) {} XCOFFSectionKey(StringRef SectionName, XCOFF::DwarfSectionSubtypeFlags DwarfSubtypeFlags) : SectionName(SectionName), DwarfSubtypeFlags(DwarfSubtypeFlags), IsCsect(false) {} bool operator<(const XCOFFSectionKey &Other) const { if (IsCsect && Other.IsCsect) return std::tie(SectionName, MappingClass) < std::tie(Other.SectionName, Other.MappingClass); if (IsCsect != Other.IsCsect) return IsCsect; return std::tie(SectionName, DwarfSubtypeFlags) < std::tie(Other.SectionName, Other.DwarfSubtypeFlags); } }; StringMap MachOUniquingMap; std::map ELFUniquingMap; std::map COFFUniquingMap; std::map GOFFUniquingMap; std::map WasmUniquingMap; std::map XCOFFUniquingMap; StringMap RelSecNames; SpecificBumpPtrAllocator MCSubtargetAllocator; /// Do automatic reset in destructor bool AutoReset; MCTargetOptions const *TargetOptions; bool HadError = false; void reportCommon(SMLoc Loc, std::function); MCSymbol *createSymbolImpl(const StringMapEntry *Name, bool CanBeUnnamed); MCSymbol *createSymbol(StringRef Name, bool AlwaysAddSuffix, bool IsTemporary); MCSymbol *getOrCreateDirectionalLocalSymbol(unsigned LocalLabelVal, unsigned Instance); MCSectionELF *createELFSectionImpl(StringRef Section, unsigned Type, unsigned Flags, SectionKind K, unsigned EntrySize, const MCSymbolELF *Group, bool IsComdat, unsigned UniqueID, const MCSymbolELF *LinkedToSym); MCSymbolXCOFF *createXCOFFSymbolImpl(const StringMapEntry *Name, bool IsTemporary); /// Map of currently defined macros. StringMap MacroMap; struct ELFEntrySizeKey { std::string SectionName; unsigned Flags; unsigned EntrySize; ELFEntrySizeKey(StringRef SectionName, unsigned Flags, unsigned EntrySize) : SectionName(SectionName), Flags(Flags), EntrySize(EntrySize) {} bool operator<(const ELFEntrySizeKey &Other) const { if (SectionName != Other.SectionName) return SectionName < Other.SectionName; - if (Flags != Other.Flags) - return Flags < Other.Flags; + if ((Flags & ELF::SHF_STRINGS) != (Other.Flags & ELF::SHF_STRINGS)) + return Other.Flags & ELF::SHF_STRINGS; return EntrySize < Other.EntrySize; } }; - // Symbols must be assigned to a section with a compatible entry size and - // flags. This map is used to assign unique IDs to sections to distinguish - // between sections with identical names but incompatible entry sizes and/or - // flags. This can occur when a symbol is explicitly assigned to a section, - // e.g. via __attribute__((section("myname"))). + // Symbols must be assigned to a section with a compatible entry + // size. This map is used to assign unique IDs to sections to + // distinguish between sections with identical names but incompatible entry + // sizes. This can occur when a symbol is explicitly assigned to a + // section, e.g. via __attribute__((section("myname"))). std::map ELFEntrySizeMap; // This set is used to record the generic mergeable section names seen. // These are sections that are created as mergeable e.g. .debug_str. We need // to avoid assigning non-mergeable symbols to these sections. It is used // to prevent non-mergeable symbols being explicitly assigned to mergeable // sections (e.g. via _attribute_((section("myname")))). DenseSet ELFSeenGenericMergeableSections; public: explicit MCContext(const Triple &TheTriple, const MCAsmInfo *MAI, const MCRegisterInfo *MRI, const MCSubtargetInfo *MSTI, const SourceMgr *Mgr = nullptr, MCTargetOptions const *TargetOpts = nullptr, bool DoAutoReset = true); MCContext(const MCContext &) = delete; MCContext &operator=(const MCContext &) = delete; ~MCContext(); Environment getObjectFileType() const { return Env; } const Triple &getTargetTriple() const { return TT; } const SourceMgr *getSourceManager() const { return SrcMgr; } void initInlineSourceManager(); SourceMgr *getInlineSourceManager() { return InlineSrcMgr.get(); } std::vector &getLocInfos() { return LocInfos; } void setDiagnosticHandler(DiagHandlerTy DiagHandler) { this->DiagHandler = DiagHandler; } void setObjectFileInfo(const MCObjectFileInfo *Mofi) { MOFI = Mofi; } const MCAsmInfo *getAsmInfo() const { return MAI; } const MCRegisterInfo *getRegisterInfo() const { return MRI; } const MCObjectFileInfo *getObjectFileInfo() const { return MOFI; } const MCSubtargetInfo *getSubtargetInfo() const { return MSTI; } CodeViewContext &getCVContext(); void setAllowTemporaryLabels(bool Value) { AllowTemporaryLabels = Value; } void setUseNamesOnTempLabels(bool Value) { UseNamesOnTempLabels = Value; } /// \name Module Lifetime Management /// @{ /// reset - return object to right after construction state to prepare /// to process a new module void reset(); /// @} /// \name McInst Management /// Create and return a new MC instruction. MCInst *createMCInst(); /// \name Symbol Management /// @{ /// Create and return a new linker temporary symbol with a unique but /// unspecified name. MCSymbol *createLinkerPrivateTempSymbol(); /// Create a temporary symbol with a unique name. The name will be omitted /// in the symbol table if UseNamesOnTempLabels is false (default except /// MCAsmStreamer). The overload without Name uses an unspecified name. MCSymbol *createTempSymbol(); MCSymbol *createTempSymbol(const Twine &Name, bool AlwaysAddSuffix = true); /// Create a temporary symbol with a unique name whose name cannot be /// omitted in the symbol table. This is rarely used. MCSymbol *createNamedTempSymbol(); MCSymbol *createNamedTempSymbol(const Twine &Name); /// Create the definition of a directional local symbol for numbered label /// (used for "1:" definitions). MCSymbol *createDirectionalLocalSymbol(unsigned LocalLabelVal); /// Create and return a directional local symbol for numbered label (used /// for "1b" or 1f" references). MCSymbol *getDirectionalLocalSymbol(unsigned LocalLabelVal, bool Before); /// Lookup the symbol inside with the specified \p Name. If it exists, /// return it. If not, create a forward reference and return it. /// /// \param Name - The symbol name, which must be unique across all symbols. MCSymbol *getOrCreateSymbol(const Twine &Name); /// Gets a symbol that will be defined to the final stack offset of a local /// variable after codegen. /// /// \param Idx - The index of a local variable passed to \@llvm.localescape. MCSymbol *getOrCreateFrameAllocSymbol(StringRef FuncName, unsigned Idx); MCSymbol *getOrCreateParentFrameOffsetSymbol(StringRef FuncName); MCSymbol *getOrCreateLSDASymbol(StringRef FuncName); /// Get the symbol for \p Name, or null. MCSymbol *lookupSymbol(const Twine &Name) const; /// Set value for a symbol. void setSymbolValue(MCStreamer &Streamer, StringRef Sym, uint64_t Val); /// getSymbols - Get a reference for the symbol table for clients that /// want to, for example, iterate over all symbols. 'const' because we /// still want any modifications to the table itself to use the MCContext /// APIs. const SymbolTable &getSymbols() const { return Symbols; } /// isInlineAsmLabel - Return true if the name is a label referenced in /// inline assembly. MCSymbol *getInlineAsmLabel(StringRef Name) const { return InlineAsmUsedLabelNames.lookup(Name); } /// registerInlineAsmLabel - Records that the name is a label referenced in /// inline assembly. void registerInlineAsmLabel(MCSymbol *Sym); /// @} /// \name Section Management /// @{ enum : unsigned { /// Pass this value as the UniqueID during section creation to get the /// generic section with the given name and characteristics. The usual /// sections such as .text use this ID. GenericSectionID = ~0U }; /// Return the MCSection for the specified mach-o section. This requires /// the operands to be valid. MCSectionMachO *getMachOSection(StringRef Segment, StringRef Section, unsigned TypeAndAttributes, unsigned Reserved2, SectionKind K, const char *BeginSymName = nullptr); MCSectionMachO *getMachOSection(StringRef Segment, StringRef Section, unsigned TypeAndAttributes, SectionKind K, const char *BeginSymName = nullptr) { return getMachOSection(Segment, Section, TypeAndAttributes, 0, K, BeginSymName); } MCSectionELF *getELFSection(const Twine &Section, unsigned Type, unsigned Flags) { return getELFSection(Section, Type, Flags, 0, "", false); } MCSectionELF *getELFSection(const Twine &Section, unsigned Type, unsigned Flags, unsigned EntrySize) { return getELFSection(Section, Type, Flags, EntrySize, "", false, MCSection::NonUniqueID, nullptr); } MCSectionELF *getELFSection(const Twine &Section, unsigned Type, unsigned Flags, unsigned EntrySize, const Twine &Group, bool IsComdat) { return getELFSection(Section, Type, Flags, EntrySize, Group, IsComdat, MCSection::NonUniqueID, nullptr); } MCSectionELF *getELFSection(const Twine &Section, unsigned Type, unsigned Flags, unsigned EntrySize, const Twine &Group, bool IsComdat, unsigned UniqueID, const MCSymbolELF *LinkedToSym); MCSectionELF *getELFSection(const Twine &Section, unsigned Type, unsigned Flags, unsigned EntrySize, const MCSymbolELF *Group, bool IsComdat, unsigned UniqueID, const MCSymbolELF *LinkedToSym); /// Get a section with the provided group identifier. This section is /// named by concatenating \p Prefix with '.' then \p Suffix. The \p Type /// describes the type of the section and \p Flags are used to further /// configure this named section. MCSectionELF *getELFNamedSection(const Twine &Prefix, const Twine &Suffix, unsigned Type, unsigned Flags, unsigned EntrySize = 0); MCSectionELF *createELFRelSection(const Twine &Name, unsigned Type, unsigned Flags, unsigned EntrySize, const MCSymbolELF *Group, const MCSectionELF *RelInfoSection); void renameELFSection(MCSectionELF *Section, StringRef Name); MCSectionELF *createELFGroupSection(const MCSymbolELF *Group, bool IsComdat); void recordELFMergeableSectionInfo(StringRef SectionName, unsigned Flags, unsigned UniqueID, unsigned EntrySize); bool isELFImplicitMergeableSectionNamePrefix(StringRef Name); bool isELFGenericMergeableSection(StringRef Name); - /// Return the unique ID of the section with the given name, flags and entry - /// size, if it exists. Optional getELFUniqueIDForEntsize(StringRef SectionName, unsigned Flags, unsigned EntrySize); MCSectionGOFF *getGOFFSection(StringRef Section, SectionKind Kind); MCSectionCOFF *getCOFFSection(StringRef Section, unsigned Characteristics, SectionKind Kind, StringRef COMDATSymName, int Selection, unsigned UniqueID = GenericSectionID, const char *BeginSymName = nullptr); MCSectionCOFF *getCOFFSection(StringRef Section, unsigned Characteristics, SectionKind Kind, const char *BeginSymName = nullptr); /// Gets or creates a section equivalent to Sec that is associated with the /// section containing KeySym. For example, to create a debug info section /// associated with an inline function, pass the normal debug info section /// as Sec and the function symbol as KeySym. MCSectionCOFF * getAssociativeCOFFSection(MCSectionCOFF *Sec, const MCSymbol *KeySym, unsigned UniqueID = GenericSectionID); MCSectionWasm *getWasmSection(const Twine &Section, SectionKind K, unsigned Flags = 0) { return getWasmSection(Section, K, Flags, nullptr); } MCSectionWasm *getWasmSection(const Twine &Section, SectionKind K, unsigned Flags, const char *BeginSymName) { return getWasmSection(Section, K, Flags, "", ~0, BeginSymName); } MCSectionWasm *getWasmSection(const Twine &Section, SectionKind K, unsigned Flags, const Twine &Group, unsigned UniqueID) { return getWasmSection(Section, K, Flags, Group, UniqueID, nullptr); } MCSectionWasm *getWasmSection(const Twine &Section, SectionKind K, unsigned Flags, const Twine &Group, unsigned UniqueID, const char *BeginSymName); MCSectionWasm *getWasmSection(const Twine &Section, SectionKind K, unsigned Flags, const MCSymbolWasm *Group, unsigned UniqueID, const char *BeginSymName); MCSectionXCOFF *getXCOFFSection( StringRef Section, SectionKind K, Optional CsectProp = None, bool MultiSymbolsAllowed = false, const char *BeginSymName = nullptr, Optional DwarfSubtypeFlags = None); // Create and save a copy of STI and return a reference to the copy. MCSubtargetInfo &getSubtargetCopy(const MCSubtargetInfo &STI); /// @} /// \name Dwarf Management /// @{ /// Get the compilation directory for DW_AT_comp_dir /// The compilation directory should be set with \c setCompilationDir before /// calling this function. If it is unset, an empty string will be returned. StringRef getCompilationDir() const { return CompilationDir; } /// Set the compilation directory for DW_AT_comp_dir void setCompilationDir(StringRef S) { CompilationDir = S.str(); } /// Add an entry to the debug prefix map. void addDebugPrefixMapEntry(const std::string &From, const std::string &To); // Remaps all debug directory paths in-place as per the debug prefix map. void RemapDebugPaths(); /// Get the main file name for use in error messages and debug /// info. This can be set to ensure we've got the correct file name /// after preprocessing or for -save-temps. const std::string &getMainFileName() const { return MainFileName; } /// Set the main file name and override the default. void setMainFileName(StringRef S) { MainFileName = std::string(S); } /// Creates an entry in the dwarf file and directory tables. Expected getDwarfFile(StringRef Directory, StringRef FileName, unsigned FileNumber, Optional Checksum, Optional Source, unsigned CUID); bool isValidDwarfFileNumber(unsigned FileNumber, unsigned CUID = 0); const std::map &getMCDwarfLineTables() const { return MCDwarfLineTablesCUMap; } MCDwarfLineTable &getMCDwarfLineTable(unsigned CUID) { return MCDwarfLineTablesCUMap[CUID]; } const MCDwarfLineTable &getMCDwarfLineTable(unsigned CUID) const { auto I = MCDwarfLineTablesCUMap.find(CUID); assert(I != MCDwarfLineTablesCUMap.end()); return I->second; } const SmallVectorImpl &getMCDwarfFiles(unsigned CUID = 0) { return getMCDwarfLineTable(CUID).getMCDwarfFiles(); } const SmallVectorImpl &getMCDwarfDirs(unsigned CUID = 0) { return getMCDwarfLineTable(CUID).getMCDwarfDirs(); } unsigned getDwarfCompileUnitID() { return DwarfCompileUnitID; } void setDwarfCompileUnitID(unsigned CUIndex) { DwarfCompileUnitID = CUIndex; } /// Specifies the "root" file and directory of the compilation unit. /// These are "file 0" and "directory 0" in DWARF v5. void setMCLineTableRootFile(unsigned CUID, StringRef CompilationDir, StringRef Filename, Optional Checksum, Optional Source) { getMCDwarfLineTable(CUID).setRootFile(CompilationDir, Filename, Checksum, Source); } /// Reports whether MD5 checksum usage is consistent (all-or-none). bool isDwarfMD5UsageConsistent(unsigned CUID) const { return getMCDwarfLineTable(CUID).isMD5UsageConsistent(); } /// Saves the information from the currently parsed dwarf .loc directive /// and sets DwarfLocSeen. When the next instruction is assembled an entry /// in the line number table with this information and the address of the /// instruction will be created. void setCurrentDwarfLoc(unsigned FileNum, unsigned Line, unsigned Column, unsigned Flags, unsigned Isa, unsigned Discriminator) { CurrentDwarfLoc.setFileNum(FileNum); CurrentDwarfLoc.setLine(Line); CurrentDwarfLoc.setColumn(Column); CurrentDwarfLoc.setFlags(Flags); CurrentDwarfLoc.setIsa(Isa); CurrentDwarfLoc.setDiscriminator(Discriminator); DwarfLocSeen = true; } void clearDwarfLocSeen() { DwarfLocSeen = false; } bool getDwarfLocSeen() { return DwarfLocSeen; } const MCDwarfLoc &getCurrentDwarfLoc() { return CurrentDwarfLoc; } bool getGenDwarfForAssembly() { return GenDwarfForAssembly; } void setGenDwarfForAssembly(bool Value) { GenDwarfForAssembly = Value; } unsigned getGenDwarfFileNumber() { return GenDwarfFileNumber; } void setGenDwarfFileNumber(unsigned FileNumber) { GenDwarfFileNumber = FileNumber; } /// Specifies information about the "root file" for assembler clients /// (e.g., llvm-mc). Assumes compilation dir etc. have been set up. void setGenDwarfRootFile(StringRef FileName, StringRef Buffer); const SetVector &getGenDwarfSectionSyms() { return SectionsForRanges; } bool addGenDwarfSection(MCSection *Sec) { return SectionsForRanges.insert(Sec); } void finalizeDwarfSections(MCStreamer &MCOS); const std::vector &getMCGenDwarfLabelEntries() const { return MCGenDwarfLabelEntries; } void addMCGenDwarfLabelEntry(const MCGenDwarfLabelEntry &E) { MCGenDwarfLabelEntries.push_back(E); } void setDwarfDebugFlags(StringRef S) { DwarfDebugFlags = S; } StringRef getDwarfDebugFlags() { return DwarfDebugFlags; } void setDwarfDebugProducer(StringRef S) { DwarfDebugProducer = S; } StringRef getDwarfDebugProducer() { return DwarfDebugProducer; } void setDwarfFormat(dwarf::DwarfFormat f) { DwarfFormat = f; } dwarf::DwarfFormat getDwarfFormat() const { return DwarfFormat; } void setDwarfVersion(uint16_t v) { DwarfVersion = v; } uint16_t getDwarfVersion() const { return DwarfVersion; } /// @} char *getSecureLogFile() { return SecureLogFile; } raw_fd_ostream *getSecureLog() { return SecureLog.get(); } void setSecureLog(std::unique_ptr Value) { SecureLog = std::move(Value); } bool getSecureLogUsed() { return SecureLogUsed; } void setSecureLogUsed(bool Value) { SecureLogUsed = Value; } void *allocate(unsigned Size, unsigned Align = 8) { return Allocator.Allocate(Size, Align); } void deallocate(void *Ptr) {} bool hadError() { return HadError; } void diagnose(const SMDiagnostic &SMD); void reportError(SMLoc L, const Twine &Msg); void reportWarning(SMLoc L, const Twine &Msg); // Unrecoverable error has occurred. Display the best diagnostic we can // and bail via exit(1). For now, most MC backend errors are unrecoverable. // FIXME: We should really do something about that. LLVM_ATTRIBUTE_NORETURN void reportFatalError(SMLoc L, const Twine &Msg); const MCAsmMacro *lookupMacro(StringRef Name) { StringMap::iterator I = MacroMap.find(Name); return (I == MacroMap.end()) ? nullptr : &I->getValue(); } void defineMacro(StringRef Name, MCAsmMacro Macro) { MacroMap.insert(std::make_pair(Name, std::move(Macro))); } void undefineMacro(StringRef Name) { MacroMap.erase(Name); } MCPseudoProbeTable &getMCPseudoProbeTable() { return PseudoProbeTable; } }; } // end namespace llvm // operator new and delete aren't allowed inside namespaces. // The throw specifications are mandated by the standard. /// Placement new for using the MCContext's allocator. /// /// This placement form of operator new uses the MCContext's allocator for /// obtaining memory. It is a non-throwing new, which means that it returns /// null on error. (If that is what the allocator does. The current does, so if /// this ever changes, this operator will have to be changed, too.) /// Usage looks like this (assuming there's an MCContext 'Context' in scope): /// \code /// // Default alignment (8) /// IntegerLiteral *Ex = new (Context) IntegerLiteral(arguments); /// // Specific alignment /// IntegerLiteral *Ex2 = new (Context, 4) IntegerLiteral(arguments); /// \endcode /// Please note that you cannot use delete on the pointer; it must be /// deallocated using an explicit destructor call followed by /// \c Context.Deallocate(Ptr). /// /// \param Bytes The number of bytes to allocate. Calculated by the compiler. /// \param C The MCContext that provides the allocator. /// \param Alignment The alignment of the allocated memory (if the underlying /// allocator supports it). /// \return The allocated memory. Could be NULL. inline void *operator new(size_t Bytes, llvm::MCContext &C, size_t Alignment = 8) noexcept { return C.allocate(Bytes, Alignment); } /// Placement delete companion to the new above. /// /// This operator is just a companion to the new above. There is no way of /// invoking it directly; see the new operator for more details. This operator /// is called implicitly by the compiler if a placement new expression using /// the MCContext throws in the object constructor. inline void operator delete(void *Ptr, llvm::MCContext &C, size_t) noexcept { C.deallocate(Ptr); } /// This placement form of operator new[] uses the MCContext's allocator for /// obtaining memory. It is a non-throwing new[], which means that it returns /// null on error. /// Usage looks like this (assuming there's an MCContext 'Context' in scope): /// \code /// // Default alignment (8) /// char *data = new (Context) char[10]; /// // Specific alignment /// char *data = new (Context, 4) char[10]; /// \endcode /// Please note that you cannot use delete on the pointer; it must be /// deallocated using an explicit destructor call followed by /// \c Context.Deallocate(Ptr). /// /// \param Bytes The number of bytes to allocate. Calculated by the compiler. /// \param C The MCContext that provides the allocator. /// \param Alignment The alignment of the allocated memory (if the underlying /// allocator supports it). /// \return The allocated memory. Could be NULL. inline void *operator new[](size_t Bytes, llvm::MCContext &C, size_t Alignment = 8) noexcept { return C.allocate(Bytes, Alignment); } /// Placement delete[] companion to the new[] above. /// /// This operator is just a companion to the new[] above. There is no way of /// invoking it directly; see the new[] operator for more details. This operator /// is called implicitly by the compiler if a placement new[] expression using /// the MCContext throws in the object constructor. inline void operator delete[](void *Ptr, llvm::MCContext &C) noexcept { C.deallocate(Ptr); } #endif // LLVM_MC_MCCONTEXT_H diff --git a/llvm/include/llvm/Transforms/Scalar/MemCpyOptimizer.h b/llvm/include/llvm/Transforms/Scalar/MemCpyOptimizer.h index 635b706d0bef..6203f37ebb01 100644 --- a/llvm/include/llvm/Transforms/Scalar/MemCpyOptimizer.h +++ b/llvm/include/llvm/Transforms/Scalar/MemCpyOptimizer.h @@ -1,84 +1,84 @@ //===- MemCpyOptimizer.h - memcpy optimization ------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This pass performs various transformations related to eliminating memcpy // calls, or transforming sets of stores into memset's. // //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_SCALAR_MEMCPYOPTIMIZER_H #define LLVM_TRANSFORMS_SCALAR_MEMCPYOPTIMIZER_H #include "llvm/IR/BasicBlock.h" #include "llvm/IR/PassManager.h" #include #include namespace llvm { class AAResults; class AssumptionCache; class CallBase; class CallInst; class DominatorTree; class Function; class Instruction; class LoadInst; class MemCpyInst; class MemMoveInst; class MemoryDependenceResults; class MemorySSA; class MemorySSAUpdater; class MemSetInst; class StoreInst; class TargetLibraryInfo; class Value; class MemCpyOptPass : public PassInfoMixin { MemoryDependenceResults *MD = nullptr; TargetLibraryInfo *TLI = nullptr; AAResults *AA = nullptr; AssumptionCache *AC = nullptr; DominatorTree *DT = nullptr; MemorySSA *MSSA = nullptr; MemorySSAUpdater *MSSAU = nullptr; public: MemCpyOptPass() = default; PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); // Glue for the old PM. bool runImpl(Function &F, MemoryDependenceResults *MD, TargetLibraryInfo *TLI, AAResults *AA, AssumptionCache *AC, DominatorTree *DT, MemorySSA *MSSA); private: // Helper functions bool processStore(StoreInst *SI, BasicBlock::iterator &BBI); bool processMemSet(MemSetInst *SI, BasicBlock::iterator &BBI); bool processMemCpy(MemCpyInst *M, BasicBlock::iterator &BBI); bool processMemMove(MemMoveInst *M); bool performCallSlotOptzn(Instruction *cpyLoad, Instruction *cpyStore, - Value *cpyDst, Value *cpySrc, uint64_t cpyLen, + Value *cpyDst, Value *cpySrc, TypeSize cpyLen, Align cpyAlign, CallInst *C); bool processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep); bool processMemSetMemCpyDependence(MemCpyInst *MemCpy, MemSetInst *MemSet); bool performMemCpyToMemSetOptzn(MemCpyInst *MemCpy, MemSetInst *MemSet); bool processByValArgument(CallBase &CB, unsigned ArgNo); Instruction *tryMergingIntoMemset(Instruction *I, Value *StartPtr, Value *ByteVal); bool moveUp(StoreInst *SI, Instruction *P, const LoadInst *LI); void eraseInstruction(Instruction *I); bool iterateOnFunction(Function &F); }; } // end namespace llvm #endif // LLVM_TRANSFORMS_SCALAR_MEMCPYOPTIMIZER_H diff --git a/llvm/lib/IR/Constants.cpp b/llvm/lib/IR/Constants.cpp index 6c75085a6678..1e72cb4d3a66 100644 --- a/llvm/lib/IR/Constants.cpp +++ b/llvm/lib/IR/Constants.cpp @@ -1,3545 +1,3547 @@ //===-- Constants.cpp - Implement Constant nodes --------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements the Constant* classes. // //===----------------------------------------------------------------------===// #include "llvm/IR/Constants.h" #include "ConstantFold.h" #include "LLVMContextImpl.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringMap.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/GetElementPtrTypeIterator.h" #include "llvm/IR/GlobalValue.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Module.h" #include "llvm/IR/Operator.h" #include "llvm/IR/PatternMatch.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/ManagedStatic.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" #include using namespace llvm; using namespace PatternMatch; //===----------------------------------------------------------------------===// // Constant Class //===----------------------------------------------------------------------===// bool Constant::isNegativeZeroValue() const { // Floating point values have an explicit -0.0 value. if (const ConstantFP *CFP = dyn_cast(this)) return CFP->isZero() && CFP->isNegative(); // Equivalent for a vector of -0.0's. if (getType()->isVectorTy()) if (const auto *SplatCFP = dyn_cast_or_null(getSplatValue())) return SplatCFP->isNegativeZeroValue(); // We've already handled true FP case; any other FP vectors can't represent -0.0. if (getType()->isFPOrFPVectorTy()) return false; // Otherwise, just use +0.0. return isNullValue(); } // Return true iff this constant is positive zero (floating point), negative // zero (floating point), or a null value. bool Constant::isZeroValue() const { // Floating point values have an explicit -0.0 value. if (const ConstantFP *CFP = dyn_cast(this)) return CFP->isZero(); // Check for constant splat vectors of 1 values. if (getType()->isVectorTy()) if (const auto *SplatCFP = dyn_cast_or_null(getSplatValue())) return SplatCFP->isZero(); // Otherwise, just use +0.0. return isNullValue(); } bool Constant::isNullValue() const { // 0 is null. if (const ConstantInt *CI = dyn_cast(this)) return CI->isZero(); // +0.0 is null. if (const ConstantFP *CFP = dyn_cast(this)) // ppc_fp128 determine isZero using high order double only // Should check the bitwise value to make sure all bits are zero. return CFP->isExactlyValue(+0.0); // constant zero is zero for aggregates, cpnull is null for pointers, none for // tokens. return isa(this) || isa(this) || isa(this); } bool Constant::isAllOnesValue() const { // Check for -1 integers if (const ConstantInt *CI = dyn_cast(this)) return CI->isMinusOne(); // Check for FP which are bitcasted from -1 integers if (const ConstantFP *CFP = dyn_cast(this)) return CFP->getValueAPF().bitcastToAPInt().isAllOnesValue(); // Check for constant splat vectors of 1 values. if (getType()->isVectorTy()) if (const auto *SplatVal = getSplatValue()) return SplatVal->isAllOnesValue(); return false; } bool Constant::isOneValue() const { // Check for 1 integers if (const ConstantInt *CI = dyn_cast(this)) return CI->isOne(); // Check for FP which are bitcasted from 1 integers if (const ConstantFP *CFP = dyn_cast(this)) return CFP->getValueAPF().bitcastToAPInt().isOneValue(); // Check for constant splat vectors of 1 values. if (getType()->isVectorTy()) if (const auto *SplatVal = getSplatValue()) return SplatVal->isOneValue(); return false; } bool Constant::isNotOneValue() const { // Check for 1 integers if (const ConstantInt *CI = dyn_cast(this)) return !CI->isOneValue(); // Check for FP which are bitcasted from 1 integers if (const ConstantFP *CFP = dyn_cast(this)) return !CFP->getValueAPF().bitcastToAPInt().isOneValue(); // Check that vectors don't contain 1 if (auto *VTy = dyn_cast(getType())) { for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) { Constant *Elt = getAggregateElement(I); if (!Elt || !Elt->isNotOneValue()) return false; } return true; } // Check for splats that don't contain 1 if (getType()->isVectorTy()) if (const auto *SplatVal = getSplatValue()) return SplatVal->isNotOneValue(); // It *may* contain 1, we can't tell. return false; } bool Constant::isMinSignedValue() const { // Check for INT_MIN integers if (const ConstantInt *CI = dyn_cast(this)) return CI->isMinValue(/*isSigned=*/true); // Check for FP which are bitcasted from INT_MIN integers if (const ConstantFP *CFP = dyn_cast(this)) return CFP->getValueAPF().bitcastToAPInt().isMinSignedValue(); // Check for splats of INT_MIN values. if (getType()->isVectorTy()) if (const auto *SplatVal = getSplatValue()) return SplatVal->isMinSignedValue(); return false; } bool Constant::isNotMinSignedValue() const { // Check for INT_MIN integers if (const ConstantInt *CI = dyn_cast(this)) return !CI->isMinValue(/*isSigned=*/true); // Check for FP which are bitcasted from INT_MIN integers if (const ConstantFP *CFP = dyn_cast(this)) return !CFP->getValueAPF().bitcastToAPInt().isMinSignedValue(); // Check that vectors don't contain INT_MIN if (auto *VTy = dyn_cast(getType())) { for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) { Constant *Elt = getAggregateElement(I); if (!Elt || !Elt->isNotMinSignedValue()) return false; } return true; } // Check for splats that aren't INT_MIN if (getType()->isVectorTy()) if (const auto *SplatVal = getSplatValue()) return SplatVal->isNotMinSignedValue(); // It *may* contain INT_MIN, we can't tell. return false; } bool Constant::isFiniteNonZeroFP() const { if (auto *CFP = dyn_cast(this)) return CFP->getValueAPF().isFiniteNonZero(); if (auto *VTy = dyn_cast(getType())) { for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) { auto *CFP = dyn_cast_or_null(getAggregateElement(I)); if (!CFP || !CFP->getValueAPF().isFiniteNonZero()) return false; } return true; } if (getType()->isVectorTy()) if (const auto *SplatCFP = dyn_cast_or_null(getSplatValue())) return SplatCFP->isFiniteNonZeroFP(); // It *may* contain finite non-zero, we can't tell. return false; } bool Constant::isNormalFP() const { if (auto *CFP = dyn_cast(this)) return CFP->getValueAPF().isNormal(); if (auto *VTy = dyn_cast(getType())) { for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) { auto *CFP = dyn_cast_or_null(getAggregateElement(I)); if (!CFP || !CFP->getValueAPF().isNormal()) return false; } return true; } if (getType()->isVectorTy()) if (const auto *SplatCFP = dyn_cast_or_null(getSplatValue())) return SplatCFP->isNormalFP(); // It *may* contain a normal fp value, we can't tell. return false; } bool Constant::hasExactInverseFP() const { if (auto *CFP = dyn_cast(this)) return CFP->getValueAPF().getExactInverse(nullptr); if (auto *VTy = dyn_cast(getType())) { for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) { auto *CFP = dyn_cast_or_null(getAggregateElement(I)); if (!CFP || !CFP->getValueAPF().getExactInverse(nullptr)) return false; } return true; } if (getType()->isVectorTy()) if (const auto *SplatCFP = dyn_cast_or_null(getSplatValue())) return SplatCFP->hasExactInverseFP(); // It *may* have an exact inverse fp value, we can't tell. return false; } bool Constant::isNaN() const { if (auto *CFP = dyn_cast(this)) return CFP->isNaN(); if (auto *VTy = dyn_cast(getType())) { for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) { auto *CFP = dyn_cast_or_null(getAggregateElement(I)); if (!CFP || !CFP->isNaN()) return false; } return true; } if (getType()->isVectorTy()) if (const auto *SplatCFP = dyn_cast_or_null(getSplatValue())) return SplatCFP->isNaN(); // It *may* be NaN, we can't tell. return false; } bool Constant::isElementWiseEqual(Value *Y) const { // Are they fully identical? if (this == Y) return true; // The input value must be a vector constant with the same type. auto *VTy = dyn_cast(getType()); if (!isa(Y) || !VTy || VTy != Y->getType()) return false; // TODO: Compare pointer constants? if (!(VTy->getElementType()->isIntegerTy() || VTy->getElementType()->isFloatingPointTy())) return false; // They may still be identical element-wise (if they have `undef`s). // Bitcast to integer to allow exact bitwise comparison for all types. Type *IntTy = VectorType::getInteger(VTy); Constant *C0 = ConstantExpr::getBitCast(const_cast(this), IntTy); Constant *C1 = ConstantExpr::getBitCast(cast(Y), IntTy); Constant *CmpEq = ConstantExpr::getICmp(ICmpInst::ICMP_EQ, C0, C1); return isa(CmpEq) || match(CmpEq, m_One()); } static bool containsUndefinedElement(const Constant *C, function_ref HasFn) { if (auto *VTy = dyn_cast(C->getType())) { if (HasFn(C)) return true; if (isa(C)) return false; if (isa(C->getType())) return false; for (unsigned i = 0, e = cast(VTy)->getNumElements(); - i != e; ++i) - if (HasFn(C->getAggregateElement(i))) - return true; + i != e; ++i) { + if (Constant *Elem = C->getAggregateElement(i)) + if (HasFn(Elem)) + return true; + } } return false; } bool Constant::containsUndefOrPoisonElement() const { return containsUndefinedElement( this, [&](const auto *C) { return isa(C); }); } bool Constant::containsPoisonElement() const { return containsUndefinedElement( this, [&](const auto *C) { return isa(C); }); } bool Constant::containsConstantExpression() const { if (auto *VTy = dyn_cast(getType())) { for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) if (isa(getAggregateElement(i))) return true; } return false; } /// Constructor to create a '0' constant of arbitrary type. Constant *Constant::getNullValue(Type *Ty) { switch (Ty->getTypeID()) { case Type::IntegerTyID: return ConstantInt::get(Ty, 0); case Type::HalfTyID: return ConstantFP::get(Ty->getContext(), APFloat::getZero(APFloat::IEEEhalf())); case Type::BFloatTyID: return ConstantFP::get(Ty->getContext(), APFloat::getZero(APFloat::BFloat())); case Type::FloatTyID: return ConstantFP::get(Ty->getContext(), APFloat::getZero(APFloat::IEEEsingle())); case Type::DoubleTyID: return ConstantFP::get(Ty->getContext(), APFloat::getZero(APFloat::IEEEdouble())); case Type::X86_FP80TyID: return ConstantFP::get(Ty->getContext(), APFloat::getZero(APFloat::x87DoubleExtended())); case Type::FP128TyID: return ConstantFP::get(Ty->getContext(), APFloat::getZero(APFloat::IEEEquad())); case Type::PPC_FP128TyID: return ConstantFP::get(Ty->getContext(), APFloat(APFloat::PPCDoubleDouble(), APInt::getNullValue(128))); case Type::PointerTyID: return ConstantPointerNull::get(cast(Ty)); case Type::StructTyID: case Type::ArrayTyID: case Type::FixedVectorTyID: case Type::ScalableVectorTyID: return ConstantAggregateZero::get(Ty); case Type::TokenTyID: return ConstantTokenNone::get(Ty->getContext()); default: // Function, Label, or Opaque type? llvm_unreachable("Cannot create a null constant of that type!"); } } Constant *Constant::getIntegerValue(Type *Ty, const APInt &V) { Type *ScalarTy = Ty->getScalarType(); // Create the base integer constant. Constant *C = ConstantInt::get(Ty->getContext(), V); // Convert an integer to a pointer, if necessary. if (PointerType *PTy = dyn_cast(ScalarTy)) C = ConstantExpr::getIntToPtr(C, PTy); // Broadcast a scalar to a vector, if necessary. if (VectorType *VTy = dyn_cast(Ty)) C = ConstantVector::getSplat(VTy->getElementCount(), C); return C; } Constant *Constant::getAllOnesValue(Type *Ty) { if (IntegerType *ITy = dyn_cast(Ty)) return ConstantInt::get(Ty->getContext(), APInt::getAllOnesValue(ITy->getBitWidth())); if (Ty->isFloatingPointTy()) { APFloat FL = APFloat::getAllOnesValue(Ty->getFltSemantics(), Ty->getPrimitiveSizeInBits()); return ConstantFP::get(Ty->getContext(), FL); } VectorType *VTy = cast(Ty); return ConstantVector::getSplat(VTy->getElementCount(), getAllOnesValue(VTy->getElementType())); } Constant *Constant::getAggregateElement(unsigned Elt) const { assert((getType()->isAggregateType() || getType()->isVectorTy()) && "Must be an aggregate/vector constant"); if (const auto *CC = dyn_cast(this)) return Elt < CC->getNumOperands() ? CC->getOperand(Elt) : nullptr; if (const auto *CAZ = dyn_cast(this)) return Elt < CAZ->getElementCount().getKnownMinValue() ? CAZ->getElementValue(Elt) : nullptr; // FIXME: getNumElements() will fail for non-fixed vector types. if (isa(getType())) return nullptr; if (const auto *PV = dyn_cast(this)) return Elt < PV->getNumElements() ? PV->getElementValue(Elt) : nullptr; if (const auto *UV = dyn_cast(this)) return Elt < UV->getNumElements() ? UV->getElementValue(Elt) : nullptr; if (const auto *CDS = dyn_cast(this)) return Elt < CDS->getNumElements() ? CDS->getElementAsConstant(Elt) : nullptr; return nullptr; } Constant *Constant::getAggregateElement(Constant *Elt) const { assert(isa(Elt->getType()) && "Index must be an integer"); if (ConstantInt *CI = dyn_cast(Elt)) { // Check if the constant fits into an uint64_t. if (CI->getValue().getActiveBits() > 64) return nullptr; return getAggregateElement(CI->getZExtValue()); } return nullptr; } void Constant::destroyConstant() { /// First call destroyConstantImpl on the subclass. This gives the subclass /// a chance to remove the constant from any maps/pools it's contained in. switch (getValueID()) { default: llvm_unreachable("Not a constant!"); #define HANDLE_CONSTANT(Name) \ case Value::Name##Val: \ cast(this)->destroyConstantImpl(); \ break; #include "llvm/IR/Value.def" } // When a Constant is destroyed, there may be lingering // references to the constant by other constants in the constant pool. These // constants are implicitly dependent on the module that is being deleted, // but they don't know that. Because we only find out when the CPV is // deleted, we must now notify all of our users (that should only be // Constants) that they are, in fact, invalid now and should be deleted. // while (!use_empty()) { Value *V = user_back(); #ifndef NDEBUG // Only in -g mode... if (!isa(V)) { dbgs() << "While deleting: " << *this << "\n\nUse still stuck around after Def is destroyed: " << *V << "\n\n"; } #endif assert(isa(V) && "References remain to Constant being destroyed"); cast(V)->destroyConstant(); // The constant should remove itself from our use list... assert((use_empty() || user_back() != V) && "Constant not removed!"); } // Value has no outstanding references it is safe to delete it now... deleteConstant(this); } void llvm::deleteConstant(Constant *C) { switch (C->getValueID()) { case Constant::ConstantIntVal: delete static_cast(C); break; case Constant::ConstantFPVal: delete static_cast(C); break; case Constant::ConstantAggregateZeroVal: delete static_cast(C); break; case Constant::ConstantArrayVal: delete static_cast(C); break; case Constant::ConstantStructVal: delete static_cast(C); break; case Constant::ConstantVectorVal: delete static_cast(C); break; case Constant::ConstantPointerNullVal: delete static_cast(C); break; case Constant::ConstantDataArrayVal: delete static_cast(C); break; case Constant::ConstantDataVectorVal: delete static_cast(C); break; case Constant::ConstantTokenNoneVal: delete static_cast(C); break; case Constant::BlockAddressVal: delete static_cast(C); break; case Constant::DSOLocalEquivalentVal: delete static_cast(C); break; case Constant::UndefValueVal: delete static_cast(C); break; case Constant::PoisonValueVal: delete static_cast(C); break; case Constant::ConstantExprVal: if (isa(C)) delete static_cast(C); else if (isa(C)) delete static_cast(C); else if (isa(C)) delete static_cast(C); else if (isa(C)) delete static_cast(C); else if (isa(C)) delete static_cast(C); else if (isa(C)) delete static_cast(C); else if (isa(C)) delete static_cast(C); else if (isa(C)) delete static_cast(C); else if (isa(C)) delete static_cast(C); else if (isa(C)) delete static_cast(C); else llvm_unreachable("Unexpected constant expr"); break; default: llvm_unreachable("Unexpected constant"); } } static bool canTrapImpl(const Constant *C, SmallPtrSetImpl &NonTrappingOps) { assert(C->getType()->isFirstClassType() && "Cannot evaluate aggregate vals!"); // The only thing that could possibly trap are constant exprs. const ConstantExpr *CE = dyn_cast(C); if (!CE) return false; // ConstantExpr traps if any operands can trap. for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i) { if (ConstantExpr *Op = dyn_cast(CE->getOperand(i))) { if (NonTrappingOps.insert(Op).second && canTrapImpl(Op, NonTrappingOps)) return true; } } // Otherwise, only specific operations can trap. switch (CE->getOpcode()) { default: return false; case Instruction::UDiv: case Instruction::SDiv: case Instruction::URem: case Instruction::SRem: // Div and rem can trap if the RHS is not known to be non-zero. if (!isa(CE->getOperand(1)) ||CE->getOperand(1)->isNullValue()) return true; return false; } } bool Constant::canTrap() const { SmallPtrSet NonTrappingOps; return canTrapImpl(this, NonTrappingOps); } /// Check if C contains a GlobalValue for which Predicate is true. static bool ConstHasGlobalValuePredicate(const Constant *C, bool (*Predicate)(const GlobalValue *)) { SmallPtrSet Visited; SmallVector WorkList; WorkList.push_back(C); Visited.insert(C); while (!WorkList.empty()) { const Constant *WorkItem = WorkList.pop_back_val(); if (const auto *GV = dyn_cast(WorkItem)) if (Predicate(GV)) return true; for (const Value *Op : WorkItem->operands()) { const Constant *ConstOp = dyn_cast(Op); if (!ConstOp) continue; if (Visited.insert(ConstOp).second) WorkList.push_back(ConstOp); } } return false; } bool Constant::isThreadDependent() const { auto DLLImportPredicate = [](const GlobalValue *GV) { return GV->isThreadLocal(); }; return ConstHasGlobalValuePredicate(this, DLLImportPredicate); } bool Constant::isDLLImportDependent() const { auto DLLImportPredicate = [](const GlobalValue *GV) { return GV->hasDLLImportStorageClass(); }; return ConstHasGlobalValuePredicate(this, DLLImportPredicate); } bool Constant::isConstantUsed() const { for (const User *U : users()) { const Constant *UC = dyn_cast(U); if (!UC || isa(UC)) return true; if (UC->isConstantUsed()) return true; } return false; } bool Constant::needsDynamicRelocation() const { return getRelocationInfo() == GlobalRelocation; } bool Constant::needsRelocation() const { return getRelocationInfo() != NoRelocation; } Constant::PossibleRelocationsTy Constant::getRelocationInfo() const { if (isa(this)) return GlobalRelocation; // Global reference. if (const BlockAddress *BA = dyn_cast(this)) return BA->getFunction()->getRelocationInfo(); if (const ConstantExpr *CE = dyn_cast(this)) { if (CE->getOpcode() == Instruction::Sub) { ConstantExpr *LHS = dyn_cast(CE->getOperand(0)); ConstantExpr *RHS = dyn_cast(CE->getOperand(1)); if (LHS && RHS && LHS->getOpcode() == Instruction::PtrToInt && RHS->getOpcode() == Instruction::PtrToInt) { Constant *LHSOp0 = LHS->getOperand(0); Constant *RHSOp0 = RHS->getOperand(0); // While raw uses of blockaddress need to be relocated, differences // between two of them don't when they are for labels in the same // function. This is a common idiom when creating a table for the // indirect goto extension, so we handle it efficiently here. if (isa(LHSOp0) && isa(RHSOp0) && cast(LHSOp0)->getFunction() == cast(RHSOp0)->getFunction()) return NoRelocation; // Relative pointers do not need to be dynamically relocated. if (auto *RHSGV = dyn_cast(RHSOp0->stripInBoundsConstantOffsets())) { auto *LHS = LHSOp0->stripInBoundsConstantOffsets(); if (auto *LHSGV = dyn_cast(LHS)) { if (LHSGV->isDSOLocal() && RHSGV->isDSOLocal()) return LocalRelocation; } else if (isa(LHS)) { if (RHSGV->isDSOLocal()) return LocalRelocation; } } } } } PossibleRelocationsTy Result = NoRelocation; for (unsigned i = 0, e = getNumOperands(); i != e; ++i) Result = std::max(cast(getOperand(i))->getRelocationInfo(), Result); return Result; } /// If the specified constantexpr is dead, remove it. This involves recursively /// eliminating any dead users of the constantexpr. static bool removeDeadUsersOfConstant(const Constant *C) { if (isa(C)) return false; // Cannot remove this while (!C->use_empty()) { const Constant *User = dyn_cast(C->user_back()); if (!User) return false; // Non-constant usage; if (!removeDeadUsersOfConstant(User)) return false; // Constant wasn't dead } // If C is only used by metadata, it should not be preserved but should have // its uses replaced. if (C->isUsedByMetadata()) { const_cast(C)->replaceAllUsesWith( UndefValue::get(C->getType())); } const_cast(C)->destroyConstant(); return true; } void Constant::removeDeadConstantUsers() const { Value::const_user_iterator I = user_begin(), E = user_end(); Value::const_user_iterator LastNonDeadUser = E; while (I != E) { const Constant *User = dyn_cast(*I); if (!User) { LastNonDeadUser = I; ++I; continue; } if (!removeDeadUsersOfConstant(User)) { // If the constant wasn't dead, remember that this was the last live use // and move on to the next constant. LastNonDeadUser = I; ++I; continue; } // If the constant was dead, then the iterator is invalidated. if (LastNonDeadUser == E) I = user_begin(); else I = std::next(LastNonDeadUser); } } Constant *Constant::replaceUndefsWith(Constant *C, Constant *Replacement) { assert(C && Replacement && "Expected non-nullptr constant arguments"); Type *Ty = C->getType(); if (match(C, m_Undef())) { assert(Ty == Replacement->getType() && "Expected matching types"); return Replacement; } // Don't know how to deal with this constant. auto *VTy = dyn_cast(Ty); if (!VTy) return C; unsigned NumElts = VTy->getNumElements(); SmallVector NewC(NumElts); for (unsigned i = 0; i != NumElts; ++i) { Constant *EltC = C->getAggregateElement(i); assert((!EltC || EltC->getType() == Replacement->getType()) && "Expected matching types"); NewC[i] = EltC && match(EltC, m_Undef()) ? Replacement : EltC; } return ConstantVector::get(NewC); } Constant *Constant::mergeUndefsWith(Constant *C, Constant *Other) { assert(C && Other && "Expected non-nullptr constant arguments"); if (match(C, m_Undef())) return C; Type *Ty = C->getType(); if (match(Other, m_Undef())) return UndefValue::get(Ty); auto *VTy = dyn_cast(Ty); if (!VTy) return C; Type *EltTy = VTy->getElementType(); unsigned NumElts = VTy->getNumElements(); assert(isa(Other->getType()) && cast(Other->getType())->getNumElements() == NumElts && "Type mismatch"); bool FoundExtraUndef = false; SmallVector NewC(NumElts); for (unsigned I = 0; I != NumElts; ++I) { NewC[I] = C->getAggregateElement(I); Constant *OtherEltC = Other->getAggregateElement(I); assert(NewC[I] && OtherEltC && "Unknown vector element"); if (!match(NewC[I], m_Undef()) && match(OtherEltC, m_Undef())) { NewC[I] = UndefValue::get(EltTy); FoundExtraUndef = true; } } if (FoundExtraUndef) return ConstantVector::get(NewC); return C; } bool Constant::isManifestConstant() const { if (isa(this)) return true; if (isa(this) || isa(this)) { for (const Value *Op : operand_values()) if (!cast(Op)->isManifestConstant()) return false; return true; } return false; } //===----------------------------------------------------------------------===// // ConstantInt //===----------------------------------------------------------------------===// ConstantInt::ConstantInt(IntegerType *Ty, const APInt &V) : ConstantData(Ty, ConstantIntVal), Val(V) { assert(V.getBitWidth() == Ty->getBitWidth() && "Invalid constant for type"); } ConstantInt *ConstantInt::getTrue(LLVMContext &Context) { LLVMContextImpl *pImpl = Context.pImpl; if (!pImpl->TheTrueVal) pImpl->TheTrueVal = ConstantInt::get(Type::getInt1Ty(Context), 1); return pImpl->TheTrueVal; } ConstantInt *ConstantInt::getFalse(LLVMContext &Context) { LLVMContextImpl *pImpl = Context.pImpl; if (!pImpl->TheFalseVal) pImpl->TheFalseVal = ConstantInt::get(Type::getInt1Ty(Context), 0); return pImpl->TheFalseVal; } ConstantInt *ConstantInt::getBool(LLVMContext &Context, bool V) { return V ? getTrue(Context) : getFalse(Context); } Constant *ConstantInt::getTrue(Type *Ty) { assert(Ty->isIntOrIntVectorTy(1) && "Type not i1 or vector of i1."); ConstantInt *TrueC = ConstantInt::getTrue(Ty->getContext()); if (auto *VTy = dyn_cast(Ty)) return ConstantVector::getSplat(VTy->getElementCount(), TrueC); return TrueC; } Constant *ConstantInt::getFalse(Type *Ty) { assert(Ty->isIntOrIntVectorTy(1) && "Type not i1 or vector of i1."); ConstantInt *FalseC = ConstantInt::getFalse(Ty->getContext()); if (auto *VTy = dyn_cast(Ty)) return ConstantVector::getSplat(VTy->getElementCount(), FalseC); return FalseC; } Constant *ConstantInt::getBool(Type *Ty, bool V) { return V ? getTrue(Ty) : getFalse(Ty); } // Get a ConstantInt from an APInt. ConstantInt *ConstantInt::get(LLVMContext &Context, const APInt &V) { // get an existing value or the insertion position LLVMContextImpl *pImpl = Context.pImpl; std::unique_ptr &Slot = pImpl->IntConstants[V]; if (!Slot) { // Get the corresponding integer type for the bit width of the value. IntegerType *ITy = IntegerType::get(Context, V.getBitWidth()); Slot.reset(new ConstantInt(ITy, V)); } assert(Slot->getType() == IntegerType::get(Context, V.getBitWidth())); return Slot.get(); } Constant *ConstantInt::get(Type *Ty, uint64_t V, bool isSigned) { Constant *C = get(cast(Ty->getScalarType()), V, isSigned); // For vectors, broadcast the value. if (VectorType *VTy = dyn_cast(Ty)) return ConstantVector::getSplat(VTy->getElementCount(), C); return C; } ConstantInt *ConstantInt::get(IntegerType *Ty, uint64_t V, bool isSigned) { return get(Ty->getContext(), APInt(Ty->getBitWidth(), V, isSigned)); } ConstantInt *ConstantInt::getSigned(IntegerType *Ty, int64_t V) { return get(Ty, V, true); } Constant *ConstantInt::getSigned(Type *Ty, int64_t V) { return get(Ty, V, true); } Constant *ConstantInt::get(Type *Ty, const APInt& V) { ConstantInt *C = get(Ty->getContext(), V); assert(C->getType() == Ty->getScalarType() && "ConstantInt type doesn't match the type implied by its value!"); // For vectors, broadcast the value. if (VectorType *VTy = dyn_cast(Ty)) return ConstantVector::getSplat(VTy->getElementCount(), C); return C; } ConstantInt *ConstantInt::get(IntegerType* Ty, StringRef Str, uint8_t radix) { return get(Ty->getContext(), APInt(Ty->getBitWidth(), Str, radix)); } /// Remove the constant from the constant table. void ConstantInt::destroyConstantImpl() { llvm_unreachable("You can't ConstantInt->destroyConstantImpl()!"); } //===----------------------------------------------------------------------===// // ConstantFP //===----------------------------------------------------------------------===// Constant *ConstantFP::get(Type *Ty, double V) { LLVMContext &Context = Ty->getContext(); APFloat FV(V); bool ignored; FV.convert(Ty->getScalarType()->getFltSemantics(), APFloat::rmNearestTiesToEven, &ignored); Constant *C = get(Context, FV); // For vectors, broadcast the value. if (VectorType *VTy = dyn_cast(Ty)) return ConstantVector::getSplat(VTy->getElementCount(), C); return C; } Constant *ConstantFP::get(Type *Ty, const APFloat &V) { ConstantFP *C = get(Ty->getContext(), V); assert(C->getType() == Ty->getScalarType() && "ConstantFP type doesn't match the type implied by its value!"); // For vectors, broadcast the value. if (auto *VTy = dyn_cast(Ty)) return ConstantVector::getSplat(VTy->getElementCount(), C); return C; } Constant *ConstantFP::get(Type *Ty, StringRef Str) { LLVMContext &Context = Ty->getContext(); APFloat FV(Ty->getScalarType()->getFltSemantics(), Str); Constant *C = get(Context, FV); // For vectors, broadcast the value. if (VectorType *VTy = dyn_cast(Ty)) return ConstantVector::getSplat(VTy->getElementCount(), C); return C; } Constant *ConstantFP::getNaN(Type *Ty, bool Negative, uint64_t Payload) { const fltSemantics &Semantics = Ty->getScalarType()->getFltSemantics(); APFloat NaN = APFloat::getNaN(Semantics, Negative, Payload); Constant *C = get(Ty->getContext(), NaN); if (VectorType *VTy = dyn_cast(Ty)) return ConstantVector::getSplat(VTy->getElementCount(), C); return C; } Constant *ConstantFP::getQNaN(Type *Ty, bool Negative, APInt *Payload) { const fltSemantics &Semantics = Ty->getScalarType()->getFltSemantics(); APFloat NaN = APFloat::getQNaN(Semantics, Negative, Payload); Constant *C = get(Ty->getContext(), NaN); if (VectorType *VTy = dyn_cast(Ty)) return ConstantVector::getSplat(VTy->getElementCount(), C); return C; } Constant *ConstantFP::getSNaN(Type *Ty, bool Negative, APInt *Payload) { const fltSemantics &Semantics = Ty->getScalarType()->getFltSemantics(); APFloat NaN = APFloat::getSNaN(Semantics, Negative, Payload); Constant *C = get(Ty->getContext(), NaN); if (VectorType *VTy = dyn_cast(Ty)) return ConstantVector::getSplat(VTy->getElementCount(), C); return C; } Constant *ConstantFP::getNegativeZero(Type *Ty) { const fltSemantics &Semantics = Ty->getScalarType()->getFltSemantics(); APFloat NegZero = APFloat::getZero(Semantics, /*Negative=*/true); Constant *C = get(Ty->getContext(), NegZero); if (VectorType *VTy = dyn_cast(Ty)) return ConstantVector::getSplat(VTy->getElementCount(), C); return C; } Constant *ConstantFP::getZeroValueForNegation(Type *Ty) { if (Ty->isFPOrFPVectorTy()) return getNegativeZero(Ty); return Constant::getNullValue(Ty); } // ConstantFP accessors. ConstantFP* ConstantFP::get(LLVMContext &Context, const APFloat& V) { LLVMContextImpl* pImpl = Context.pImpl; std::unique_ptr &Slot = pImpl->FPConstants[V]; if (!Slot) { Type *Ty = Type::getFloatingPointTy(Context, V.getSemantics()); Slot.reset(new ConstantFP(Ty, V)); } return Slot.get(); } Constant *ConstantFP::getInfinity(Type *Ty, bool Negative) { const fltSemantics &Semantics = Ty->getScalarType()->getFltSemantics(); Constant *C = get(Ty->getContext(), APFloat::getInf(Semantics, Negative)); if (VectorType *VTy = dyn_cast(Ty)) return ConstantVector::getSplat(VTy->getElementCount(), C); return C; } ConstantFP::ConstantFP(Type *Ty, const APFloat &V) : ConstantData(Ty, ConstantFPVal), Val(V) { assert(&V.getSemantics() == &Ty->getFltSemantics() && "FP type Mismatch"); } bool ConstantFP::isExactlyValue(const APFloat &V) const { return Val.bitwiseIsEqual(V); } /// Remove the constant from the constant table. void ConstantFP::destroyConstantImpl() { llvm_unreachable("You can't ConstantFP->destroyConstantImpl()!"); } //===----------------------------------------------------------------------===// // ConstantAggregateZero Implementation //===----------------------------------------------------------------------===// Constant *ConstantAggregateZero::getSequentialElement() const { if (auto *AT = dyn_cast(getType())) return Constant::getNullValue(AT->getElementType()); return Constant::getNullValue(cast(getType())->getElementType()); } Constant *ConstantAggregateZero::getStructElement(unsigned Elt) const { return Constant::getNullValue(getType()->getStructElementType(Elt)); } Constant *ConstantAggregateZero::getElementValue(Constant *C) const { if (isa(getType()) || isa(getType())) return getSequentialElement(); return getStructElement(cast(C)->getZExtValue()); } Constant *ConstantAggregateZero::getElementValue(unsigned Idx) const { if (isa(getType()) || isa(getType())) return getSequentialElement(); return getStructElement(Idx); } ElementCount ConstantAggregateZero::getElementCount() const { Type *Ty = getType(); if (auto *AT = dyn_cast(Ty)) return ElementCount::getFixed(AT->getNumElements()); if (auto *VT = dyn_cast(Ty)) return VT->getElementCount(); return ElementCount::getFixed(Ty->getStructNumElements()); } //===----------------------------------------------------------------------===// // UndefValue Implementation //===----------------------------------------------------------------------===// UndefValue *UndefValue::getSequentialElement() const { if (ArrayType *ATy = dyn_cast(getType())) return UndefValue::get(ATy->getElementType()); return UndefValue::get(cast(getType())->getElementType()); } UndefValue *UndefValue::getStructElement(unsigned Elt) const { return UndefValue::get(getType()->getStructElementType(Elt)); } UndefValue *UndefValue::getElementValue(Constant *C) const { if (isa(getType()) || isa(getType())) return getSequentialElement(); return getStructElement(cast(C)->getZExtValue()); } UndefValue *UndefValue::getElementValue(unsigned Idx) const { if (isa(getType()) || isa(getType())) return getSequentialElement(); return getStructElement(Idx); } unsigned UndefValue::getNumElements() const { Type *Ty = getType(); if (auto *AT = dyn_cast(Ty)) return AT->getNumElements(); if (auto *VT = dyn_cast(Ty)) return cast(VT)->getNumElements(); return Ty->getStructNumElements(); } //===----------------------------------------------------------------------===// // PoisonValue Implementation //===----------------------------------------------------------------------===// PoisonValue *PoisonValue::getSequentialElement() const { if (ArrayType *ATy = dyn_cast(getType())) return PoisonValue::get(ATy->getElementType()); return PoisonValue::get(cast(getType())->getElementType()); } PoisonValue *PoisonValue::getStructElement(unsigned Elt) const { return PoisonValue::get(getType()->getStructElementType(Elt)); } PoisonValue *PoisonValue::getElementValue(Constant *C) const { if (isa(getType()) || isa(getType())) return getSequentialElement(); return getStructElement(cast(C)->getZExtValue()); } PoisonValue *PoisonValue::getElementValue(unsigned Idx) const { if (isa(getType()) || isa(getType())) return getSequentialElement(); return getStructElement(Idx); } //===----------------------------------------------------------------------===// // ConstantXXX Classes //===----------------------------------------------------------------------===// template static bool rangeOnlyContains(ItTy Start, ItTy End, EltTy Elt) { for (; Start != End; ++Start) if (*Start != Elt) return false; return true; } template static Constant *getIntSequenceIfElementsMatch(ArrayRef V) { assert(!V.empty() && "Cannot get empty int sequence."); SmallVector Elts; for (Constant *C : V) if (auto *CI = dyn_cast(C)) Elts.push_back(CI->getZExtValue()); else return nullptr; return SequentialTy::get(V[0]->getContext(), Elts); } template static Constant *getFPSequenceIfElementsMatch(ArrayRef V) { assert(!V.empty() && "Cannot get empty FP sequence."); SmallVector Elts; for (Constant *C : V) if (auto *CFP = dyn_cast(C)) Elts.push_back(CFP->getValueAPF().bitcastToAPInt().getLimitedValue()); else return nullptr; return SequentialTy::getFP(V[0]->getType(), Elts); } template static Constant *getSequenceIfElementsMatch(Constant *C, ArrayRef V) { // We speculatively build the elements here even if it turns out that there is // a constantexpr or something else weird, since it is so uncommon for that to // happen. if (ConstantInt *CI = dyn_cast(C)) { if (CI->getType()->isIntegerTy(8)) return getIntSequenceIfElementsMatch(V); else if (CI->getType()->isIntegerTy(16)) return getIntSequenceIfElementsMatch(V); else if (CI->getType()->isIntegerTy(32)) return getIntSequenceIfElementsMatch(V); else if (CI->getType()->isIntegerTy(64)) return getIntSequenceIfElementsMatch(V); } else if (ConstantFP *CFP = dyn_cast(C)) { if (CFP->getType()->isHalfTy() || CFP->getType()->isBFloatTy()) return getFPSequenceIfElementsMatch(V); else if (CFP->getType()->isFloatTy()) return getFPSequenceIfElementsMatch(V); else if (CFP->getType()->isDoubleTy()) return getFPSequenceIfElementsMatch(V); } return nullptr; } ConstantAggregate::ConstantAggregate(Type *T, ValueTy VT, ArrayRef V) : Constant(T, VT, OperandTraits::op_end(this) - V.size(), V.size()) { llvm::copy(V, op_begin()); // Check that types match, unless this is an opaque struct. if (auto *ST = dyn_cast(T)) { if (ST->isOpaque()) return; for (unsigned I = 0, E = V.size(); I != E; ++I) assert(V[I]->getType() == ST->getTypeAtIndex(I) && "Initializer for struct element doesn't match!"); } } ConstantArray::ConstantArray(ArrayType *T, ArrayRef V) : ConstantAggregate(T, ConstantArrayVal, V) { assert(V.size() == T->getNumElements() && "Invalid initializer for constant array"); } Constant *ConstantArray::get(ArrayType *Ty, ArrayRef V) { if (Constant *C = getImpl(Ty, V)) return C; return Ty->getContext().pImpl->ArrayConstants.getOrCreate(Ty, V); } Constant *ConstantArray::getImpl(ArrayType *Ty, ArrayRef V) { // Empty arrays are canonicalized to ConstantAggregateZero. if (V.empty()) return ConstantAggregateZero::get(Ty); for (unsigned i = 0, e = V.size(); i != e; ++i) { assert(V[i]->getType() == Ty->getElementType() && "Wrong type in array element initializer"); } // If this is an all-zero array, return a ConstantAggregateZero object. If // all undef, return an UndefValue, if "all simple", then return a // ConstantDataArray. Constant *C = V[0]; if (isa(C) && rangeOnlyContains(V.begin(), V.end(), C)) return PoisonValue::get(Ty); if (isa(C) && rangeOnlyContains(V.begin(), V.end(), C)) return UndefValue::get(Ty); if (C->isNullValue() && rangeOnlyContains(V.begin(), V.end(), C)) return ConstantAggregateZero::get(Ty); // Check to see if all of the elements are ConstantFP or ConstantInt and if // the element type is compatible with ConstantDataVector. If so, use it. if (ConstantDataSequential::isElementTypeCompatible(C->getType())) return getSequenceIfElementsMatch(C, V); // Otherwise, we really do want to create a ConstantArray. return nullptr; } StructType *ConstantStruct::getTypeForElements(LLVMContext &Context, ArrayRef V, bool Packed) { unsigned VecSize = V.size(); SmallVector EltTypes(VecSize); for (unsigned i = 0; i != VecSize; ++i) EltTypes[i] = V[i]->getType(); return StructType::get(Context, EltTypes, Packed); } StructType *ConstantStruct::getTypeForElements(ArrayRef V, bool Packed) { assert(!V.empty() && "ConstantStruct::getTypeForElements cannot be called on empty list"); return getTypeForElements(V[0]->getContext(), V, Packed); } ConstantStruct::ConstantStruct(StructType *T, ArrayRef V) : ConstantAggregate(T, ConstantStructVal, V) { assert((T->isOpaque() || V.size() == T->getNumElements()) && "Invalid initializer for constant struct"); } // ConstantStruct accessors. Constant *ConstantStruct::get(StructType *ST, ArrayRef V) { assert((ST->isOpaque() || ST->getNumElements() == V.size()) && "Incorrect # elements specified to ConstantStruct::get"); // Create a ConstantAggregateZero value if all elements are zeros. bool isZero = true; bool isUndef = false; bool isPoison = false; if (!V.empty()) { isUndef = isa(V[0]); isPoison = isa(V[0]); isZero = V[0]->isNullValue(); // PoisonValue inherits UndefValue, so its check is not necessary. if (isUndef || isZero) { for (unsigned i = 0, e = V.size(); i != e; ++i) { if (!V[i]->isNullValue()) isZero = false; if (!isa(V[i])) isPoison = false; if (isa(V[i]) || !isa(V[i])) isUndef = false; } } } if (isZero) return ConstantAggregateZero::get(ST); if (isPoison) return PoisonValue::get(ST); if (isUndef) return UndefValue::get(ST); return ST->getContext().pImpl->StructConstants.getOrCreate(ST, V); } ConstantVector::ConstantVector(VectorType *T, ArrayRef V) : ConstantAggregate(T, ConstantVectorVal, V) { assert(V.size() == cast(T)->getNumElements() && "Invalid initializer for constant vector"); } // ConstantVector accessors. Constant *ConstantVector::get(ArrayRef V) { if (Constant *C = getImpl(V)) return C; auto *Ty = FixedVectorType::get(V.front()->getType(), V.size()); return Ty->getContext().pImpl->VectorConstants.getOrCreate(Ty, V); } Constant *ConstantVector::getImpl(ArrayRef V) { assert(!V.empty() && "Vectors can't be empty"); auto *T = FixedVectorType::get(V.front()->getType(), V.size()); // If this is an all-undef or all-zero vector, return a // ConstantAggregateZero or UndefValue. Constant *C = V[0]; bool isZero = C->isNullValue(); bool isUndef = isa(C); bool isPoison = isa(C); if (isZero || isUndef) { for (unsigned i = 1, e = V.size(); i != e; ++i) if (V[i] != C) { isZero = isUndef = isPoison = false; break; } } if (isZero) return ConstantAggregateZero::get(T); if (isPoison) return PoisonValue::get(T); if (isUndef) return UndefValue::get(T); // Check to see if all of the elements are ConstantFP or ConstantInt and if // the element type is compatible with ConstantDataVector. If so, use it. if (ConstantDataSequential::isElementTypeCompatible(C->getType())) return getSequenceIfElementsMatch(C, V); // Otherwise, the element type isn't compatible with ConstantDataVector, or // the operand list contains a ConstantExpr or something else strange. return nullptr; } Constant *ConstantVector::getSplat(ElementCount EC, Constant *V) { if (!EC.isScalable()) { // If this splat is compatible with ConstantDataVector, use it instead of // ConstantVector. if ((isa(V) || isa(V)) && ConstantDataSequential::isElementTypeCompatible(V->getType())) return ConstantDataVector::getSplat(EC.getKnownMinValue(), V); SmallVector Elts(EC.getKnownMinValue(), V); return get(Elts); } Type *VTy = VectorType::get(V->getType(), EC); if (V->isNullValue()) return ConstantAggregateZero::get(VTy); else if (isa(V)) return UndefValue::get(VTy); Type *I32Ty = Type::getInt32Ty(VTy->getContext()); // Move scalar into vector. Constant *UndefV = UndefValue::get(VTy); V = ConstantExpr::getInsertElement(UndefV, V, ConstantInt::get(I32Ty, 0)); // Build shuffle mask to perform the splat. SmallVector Zeros(EC.getKnownMinValue(), 0); // Splat. return ConstantExpr::getShuffleVector(V, UndefV, Zeros); } ConstantTokenNone *ConstantTokenNone::get(LLVMContext &Context) { LLVMContextImpl *pImpl = Context.pImpl; if (!pImpl->TheNoneToken) pImpl->TheNoneToken.reset(new ConstantTokenNone(Context)); return pImpl->TheNoneToken.get(); } /// Remove the constant from the constant table. void ConstantTokenNone::destroyConstantImpl() { llvm_unreachable("You can't ConstantTokenNone->destroyConstantImpl()!"); } // Utility function for determining if a ConstantExpr is a CastOp or not. This // can't be inline because we don't want to #include Instruction.h into // Constant.h bool ConstantExpr::isCast() const { return Instruction::isCast(getOpcode()); } bool ConstantExpr::isCompare() const { return getOpcode() == Instruction::ICmp || getOpcode() == Instruction::FCmp; } bool ConstantExpr::isGEPWithNoNotionalOverIndexing() const { if (getOpcode() != Instruction::GetElementPtr) return false; gep_type_iterator GEPI = gep_type_begin(this), E = gep_type_end(this); User::const_op_iterator OI = std::next(this->op_begin()); // The remaining indices may be compile-time known integers within the bounds // of the corresponding notional static array types. for (; GEPI != E; ++GEPI, ++OI) { if (isa(*OI)) continue; auto *CI = dyn_cast(*OI); if (!CI || (GEPI.isBoundedSequential() && (CI->getValue().getActiveBits() > 64 || CI->getZExtValue() >= GEPI.getSequentialNumElements()))) return false; } // All the indices checked out. return true; } bool ConstantExpr::hasIndices() const { return getOpcode() == Instruction::ExtractValue || getOpcode() == Instruction::InsertValue; } ArrayRef ConstantExpr::getIndices() const { if (const ExtractValueConstantExpr *EVCE = dyn_cast(this)) return EVCE->Indices; return cast(this)->Indices; } unsigned ConstantExpr::getPredicate() const { return cast(this)->predicate; } ArrayRef ConstantExpr::getShuffleMask() const { return cast(this)->ShuffleMask; } Constant *ConstantExpr::getShuffleMaskForBitcode() const { return cast(this)->ShuffleMaskForBitcode; } Constant * ConstantExpr::getWithOperandReplaced(unsigned OpNo, Constant *Op) const { assert(Op->getType() == getOperand(OpNo)->getType() && "Replacing operand with value of different type!"); if (getOperand(OpNo) == Op) return const_cast(this); SmallVector NewOps; for (unsigned i = 0, e = getNumOperands(); i != e; ++i) NewOps.push_back(i == OpNo ? Op : getOperand(i)); return getWithOperands(NewOps); } Constant *ConstantExpr::getWithOperands(ArrayRef Ops, Type *Ty, bool OnlyIfReduced, Type *SrcTy) const { assert(Ops.size() == getNumOperands() && "Operand count mismatch!"); // If no operands changed return self. if (Ty == getType() && std::equal(Ops.begin(), Ops.end(), op_begin())) return const_cast(this); Type *OnlyIfReducedTy = OnlyIfReduced ? Ty : nullptr; switch (getOpcode()) { case Instruction::Trunc: case Instruction::ZExt: case Instruction::SExt: case Instruction::FPTrunc: case Instruction::FPExt: case Instruction::UIToFP: case Instruction::SIToFP: case Instruction::FPToUI: case Instruction::FPToSI: case Instruction::PtrToInt: case Instruction::IntToPtr: case Instruction::BitCast: case Instruction::AddrSpaceCast: return ConstantExpr::getCast(getOpcode(), Ops[0], Ty, OnlyIfReduced); case Instruction::Select: return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2], OnlyIfReducedTy); case Instruction::InsertElement: return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2], OnlyIfReducedTy); case Instruction::ExtractElement: return ConstantExpr::getExtractElement(Ops[0], Ops[1], OnlyIfReducedTy); case Instruction::InsertValue: return ConstantExpr::getInsertValue(Ops[0], Ops[1], getIndices(), OnlyIfReducedTy); case Instruction::ExtractValue: return ConstantExpr::getExtractValue(Ops[0], getIndices(), OnlyIfReducedTy); case Instruction::FNeg: return ConstantExpr::getFNeg(Ops[0]); case Instruction::ShuffleVector: return ConstantExpr::getShuffleVector(Ops[0], Ops[1], getShuffleMask(), OnlyIfReducedTy); case Instruction::GetElementPtr: { auto *GEPO = cast(this); assert(SrcTy || (Ops[0]->getType() == getOperand(0)->getType())); return ConstantExpr::getGetElementPtr( SrcTy ? SrcTy : GEPO->getSourceElementType(), Ops[0], Ops.slice(1), GEPO->isInBounds(), GEPO->getInRangeIndex(), OnlyIfReducedTy); } case Instruction::ICmp: case Instruction::FCmp: return ConstantExpr::getCompare(getPredicate(), Ops[0], Ops[1], OnlyIfReducedTy); default: assert(getNumOperands() == 2 && "Must be binary operator?"); return ConstantExpr::get(getOpcode(), Ops[0], Ops[1], SubclassOptionalData, OnlyIfReducedTy); } } //===----------------------------------------------------------------------===// // isValueValidForType implementations bool ConstantInt::isValueValidForType(Type *Ty, uint64_t Val) { unsigned NumBits = Ty->getIntegerBitWidth(); // assert okay if (Ty->isIntegerTy(1)) return Val == 0 || Val == 1; return isUIntN(NumBits, Val); } bool ConstantInt::isValueValidForType(Type *Ty, int64_t Val) { unsigned NumBits = Ty->getIntegerBitWidth(); if (Ty->isIntegerTy(1)) return Val == 0 || Val == 1 || Val == -1; return isIntN(NumBits, Val); } bool ConstantFP::isValueValidForType(Type *Ty, const APFloat& Val) { // convert modifies in place, so make a copy. APFloat Val2 = APFloat(Val); bool losesInfo; switch (Ty->getTypeID()) { default: return false; // These can't be represented as floating point! // FIXME rounding mode needs to be more flexible case Type::HalfTyID: { if (&Val2.getSemantics() == &APFloat::IEEEhalf()) return true; Val2.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &losesInfo); return !losesInfo; } case Type::BFloatTyID: { if (&Val2.getSemantics() == &APFloat::BFloat()) return true; Val2.convert(APFloat::BFloat(), APFloat::rmNearestTiesToEven, &losesInfo); return !losesInfo; } case Type::FloatTyID: { if (&Val2.getSemantics() == &APFloat::IEEEsingle()) return true; Val2.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven, &losesInfo); return !losesInfo; } case Type::DoubleTyID: { if (&Val2.getSemantics() == &APFloat::IEEEhalf() || &Val2.getSemantics() == &APFloat::BFloat() || &Val2.getSemantics() == &APFloat::IEEEsingle() || &Val2.getSemantics() == &APFloat::IEEEdouble()) return true; Val2.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &losesInfo); return !losesInfo; } case Type::X86_FP80TyID: return &Val2.getSemantics() == &APFloat::IEEEhalf() || &Val2.getSemantics() == &APFloat::BFloat() || &Val2.getSemantics() == &APFloat::IEEEsingle() || &Val2.getSemantics() == &APFloat::IEEEdouble() || &Val2.getSemantics() == &APFloat::x87DoubleExtended(); case Type::FP128TyID: return &Val2.getSemantics() == &APFloat::IEEEhalf() || &Val2.getSemantics() == &APFloat::BFloat() || &Val2.getSemantics() == &APFloat::IEEEsingle() || &Val2.getSemantics() == &APFloat::IEEEdouble() || &Val2.getSemantics() == &APFloat::IEEEquad(); case Type::PPC_FP128TyID: return &Val2.getSemantics() == &APFloat::IEEEhalf() || &Val2.getSemantics() == &APFloat::BFloat() || &Val2.getSemantics() == &APFloat::IEEEsingle() || &Val2.getSemantics() == &APFloat::IEEEdouble() || &Val2.getSemantics() == &APFloat::PPCDoubleDouble(); } } //===----------------------------------------------------------------------===// // Factory Function Implementation ConstantAggregateZero *ConstantAggregateZero::get(Type *Ty) { assert((Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy()) && "Cannot create an aggregate zero of non-aggregate type!"); std::unique_ptr &Entry = Ty->getContext().pImpl->CAZConstants[Ty]; if (!Entry) Entry.reset(new ConstantAggregateZero(Ty)); return Entry.get(); } /// Remove the constant from the constant table. void ConstantAggregateZero::destroyConstantImpl() { getContext().pImpl->CAZConstants.erase(getType()); } /// Remove the constant from the constant table. void ConstantArray::destroyConstantImpl() { getType()->getContext().pImpl->ArrayConstants.remove(this); } //---- ConstantStruct::get() implementation... // /// Remove the constant from the constant table. void ConstantStruct::destroyConstantImpl() { getType()->getContext().pImpl->StructConstants.remove(this); } /// Remove the constant from the constant table. void ConstantVector::destroyConstantImpl() { getType()->getContext().pImpl->VectorConstants.remove(this); } Constant *Constant::getSplatValue(bool AllowUndefs) const { assert(this->getType()->isVectorTy() && "Only valid for vectors!"); if (isa(this)) return getNullValue(cast(getType())->getElementType()); if (const ConstantDataVector *CV = dyn_cast(this)) return CV->getSplatValue(); if (const ConstantVector *CV = dyn_cast(this)) return CV->getSplatValue(AllowUndefs); // Check if this is a constant expression splat of the form returned by // ConstantVector::getSplat() const auto *Shuf = dyn_cast(this); if (Shuf && Shuf->getOpcode() == Instruction::ShuffleVector && isa(Shuf->getOperand(1))) { const auto *IElt = dyn_cast(Shuf->getOperand(0)); if (IElt && IElt->getOpcode() == Instruction::InsertElement && isa(IElt->getOperand(0))) { ArrayRef Mask = Shuf->getShuffleMask(); Constant *SplatVal = IElt->getOperand(1); ConstantInt *Index = dyn_cast(IElt->getOperand(2)); if (Index && Index->getValue() == 0 && llvm::all_of(Mask, [](int I) { return I == 0; })) return SplatVal; } } return nullptr; } Constant *ConstantVector::getSplatValue(bool AllowUndefs) const { // Check out first element. Constant *Elt = getOperand(0); // Then make sure all remaining elements point to the same value. for (unsigned I = 1, E = getNumOperands(); I < E; ++I) { Constant *OpC = getOperand(I); if (OpC == Elt) continue; // Strict mode: any mismatch is not a splat. if (!AllowUndefs) return nullptr; // Allow undefs mode: ignore undefined elements. if (isa(OpC)) continue; // If we do not have a defined element yet, use the current operand. if (isa(Elt)) Elt = OpC; if (OpC != Elt) return nullptr; } return Elt; } const APInt &Constant::getUniqueInteger() const { if (const ConstantInt *CI = dyn_cast(this)) return CI->getValue(); assert(this->getSplatValue() && "Doesn't contain a unique integer!"); const Constant *C = this->getAggregateElement(0U); assert(C && isa(C) && "Not a vector of numbers!"); return cast(C)->getValue(); } //---- ConstantPointerNull::get() implementation. // ConstantPointerNull *ConstantPointerNull::get(PointerType *Ty) { std::unique_ptr &Entry = Ty->getContext().pImpl->CPNConstants[Ty]; if (!Entry) Entry.reset(new ConstantPointerNull(Ty)); return Entry.get(); } /// Remove the constant from the constant table. void ConstantPointerNull::destroyConstantImpl() { getContext().pImpl->CPNConstants.erase(getType()); } UndefValue *UndefValue::get(Type *Ty) { std::unique_ptr &Entry = Ty->getContext().pImpl->UVConstants[Ty]; if (!Entry) Entry.reset(new UndefValue(Ty)); return Entry.get(); } /// Remove the constant from the constant table. void UndefValue::destroyConstantImpl() { // Free the constant and any dangling references to it. if (getValueID() == UndefValueVal) { getContext().pImpl->UVConstants.erase(getType()); } else if (getValueID() == PoisonValueVal) { getContext().pImpl->PVConstants.erase(getType()); } llvm_unreachable("Not a undef or a poison!"); } PoisonValue *PoisonValue::get(Type *Ty) { std::unique_ptr &Entry = Ty->getContext().pImpl->PVConstants[Ty]; if (!Entry) Entry.reset(new PoisonValue(Ty)); return Entry.get(); } /// Remove the constant from the constant table. void PoisonValue::destroyConstantImpl() { // Free the constant and any dangling references to it. getContext().pImpl->PVConstants.erase(getType()); } BlockAddress *BlockAddress::get(BasicBlock *BB) { assert(BB->getParent() && "Block must have a parent"); return get(BB->getParent(), BB); } BlockAddress *BlockAddress::get(Function *F, BasicBlock *BB) { BlockAddress *&BA = F->getContext().pImpl->BlockAddresses[std::make_pair(F, BB)]; if (!BA) BA = new BlockAddress(F, BB); assert(BA->getFunction() == F && "Basic block moved between functions"); return BA; } BlockAddress::BlockAddress(Function *F, BasicBlock *BB) : Constant(Type::getInt8PtrTy(F->getContext(), F->getAddressSpace()), Value::BlockAddressVal, &Op<0>(), 2) { setOperand(0, F); setOperand(1, BB); BB->AdjustBlockAddressRefCount(1); } BlockAddress *BlockAddress::lookup(const BasicBlock *BB) { if (!BB->hasAddressTaken()) return nullptr; const Function *F = BB->getParent(); assert(F && "Block must have a parent"); BlockAddress *BA = F->getContext().pImpl->BlockAddresses.lookup(std::make_pair(F, BB)); assert(BA && "Refcount and block address map disagree!"); return BA; } /// Remove the constant from the constant table. void BlockAddress::destroyConstantImpl() { getFunction()->getType()->getContext().pImpl ->BlockAddresses.erase(std::make_pair(getFunction(), getBasicBlock())); getBasicBlock()->AdjustBlockAddressRefCount(-1); } Value *BlockAddress::handleOperandChangeImpl(Value *From, Value *To) { // This could be replacing either the Basic Block or the Function. In either // case, we have to remove the map entry. Function *NewF = getFunction(); BasicBlock *NewBB = getBasicBlock(); if (From == NewF) NewF = cast(To->stripPointerCasts()); else { assert(From == NewBB && "From does not match any operand"); NewBB = cast(To); } // See if the 'new' entry already exists, if not, just update this in place // and return early. BlockAddress *&NewBA = getContext().pImpl->BlockAddresses[std::make_pair(NewF, NewBB)]; if (NewBA) return NewBA; getBasicBlock()->AdjustBlockAddressRefCount(-1); // Remove the old entry, this can't cause the map to rehash (just a // tombstone will get added). getContext().pImpl->BlockAddresses.erase(std::make_pair(getFunction(), getBasicBlock())); NewBA = this; setOperand(0, NewF); setOperand(1, NewBB); getBasicBlock()->AdjustBlockAddressRefCount(1); // If we just want to keep the existing value, then return null. // Callers know that this means we shouldn't delete this value. return nullptr; } DSOLocalEquivalent *DSOLocalEquivalent::get(GlobalValue *GV) { DSOLocalEquivalent *&Equiv = GV->getContext().pImpl->DSOLocalEquivalents[GV]; if (!Equiv) Equiv = new DSOLocalEquivalent(GV); assert(Equiv->getGlobalValue() == GV && "DSOLocalFunction does not match the expected global value"); return Equiv; } DSOLocalEquivalent::DSOLocalEquivalent(GlobalValue *GV) : Constant(GV->getType(), Value::DSOLocalEquivalentVal, &Op<0>(), 1) { setOperand(0, GV); } /// Remove the constant from the constant table. void DSOLocalEquivalent::destroyConstantImpl() { const GlobalValue *GV = getGlobalValue(); GV->getContext().pImpl->DSOLocalEquivalents.erase(GV); } Value *DSOLocalEquivalent::handleOperandChangeImpl(Value *From, Value *To) { assert(From == getGlobalValue() && "Changing value does not match operand."); assert(isa(To) && "Can only replace the operands with a constant"); // The replacement is with another global value. if (const auto *ToObj = dyn_cast(To)) { DSOLocalEquivalent *&NewEquiv = getContext().pImpl->DSOLocalEquivalents[ToObj]; if (NewEquiv) return llvm::ConstantExpr::getBitCast(NewEquiv, getType()); } // If the argument is replaced with a null value, just replace this constant // with a null value. if (cast(To)->isNullValue()) return To; // The replacement could be a bitcast or an alias to another function. We can // replace it with a bitcast to the dso_local_equivalent of that function. auto *Func = cast(To->stripPointerCastsAndAliases()); DSOLocalEquivalent *&NewEquiv = getContext().pImpl->DSOLocalEquivalents[Func]; if (NewEquiv) return llvm::ConstantExpr::getBitCast(NewEquiv, getType()); // Replace this with the new one. getContext().pImpl->DSOLocalEquivalents.erase(getGlobalValue()); NewEquiv = this; setOperand(0, Func); if (Func->getType() != getType()) { // It is ok to mutate the type here because this constant should always // reflect the type of the function it's holding. mutateType(Func->getType()); } return nullptr; } //---- ConstantExpr::get() implementations. // /// This is a utility function to handle folding of casts and lookup of the /// cast in the ExprConstants map. It is used by the various get* methods below. static Constant *getFoldedCast(Instruction::CastOps opc, Constant *C, Type *Ty, bool OnlyIfReduced = false) { assert(Ty->isFirstClassType() && "Cannot cast to an aggregate type!"); // Fold a few common cases if (Constant *FC = ConstantFoldCastInstruction(opc, C, Ty)) return FC; if (OnlyIfReduced) return nullptr; LLVMContextImpl *pImpl = Ty->getContext().pImpl; // Look up the constant in the table first to ensure uniqueness. ConstantExprKeyType Key(opc, C); return pImpl->ExprConstants.getOrCreate(Ty, Key); } Constant *ConstantExpr::getCast(unsigned oc, Constant *C, Type *Ty, bool OnlyIfReduced) { Instruction::CastOps opc = Instruction::CastOps(oc); assert(Instruction::isCast(opc) && "opcode out of range"); assert(C && Ty && "Null arguments to getCast"); assert(CastInst::castIsValid(opc, C, Ty) && "Invalid constantexpr cast!"); switch (opc) { default: llvm_unreachable("Invalid cast opcode"); case Instruction::Trunc: return getTrunc(C, Ty, OnlyIfReduced); case Instruction::ZExt: return getZExt(C, Ty, OnlyIfReduced); case Instruction::SExt: return getSExt(C, Ty, OnlyIfReduced); case Instruction::FPTrunc: return getFPTrunc(C, Ty, OnlyIfReduced); case Instruction::FPExt: return getFPExtend(C, Ty, OnlyIfReduced); case Instruction::UIToFP: return getUIToFP(C, Ty, OnlyIfReduced); case Instruction::SIToFP: return getSIToFP(C, Ty, OnlyIfReduced); case Instruction::FPToUI: return getFPToUI(C, Ty, OnlyIfReduced); case Instruction::FPToSI: return getFPToSI(C, Ty, OnlyIfReduced); case Instruction::PtrToInt: return getPtrToInt(C, Ty, OnlyIfReduced); case Instruction::IntToPtr: return getIntToPtr(C, Ty, OnlyIfReduced); case Instruction::BitCast: return getBitCast(C, Ty, OnlyIfReduced); case Instruction::AddrSpaceCast: return getAddrSpaceCast(C, Ty, OnlyIfReduced); } } Constant *ConstantExpr::getZExtOrBitCast(Constant *C, Type *Ty) { if (C->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) return getBitCast(C, Ty); return getZExt(C, Ty); } Constant *ConstantExpr::getSExtOrBitCast(Constant *C, Type *Ty) { if (C->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) return getBitCast(C, Ty); return getSExt(C, Ty); } Constant *ConstantExpr::getTruncOrBitCast(Constant *C, Type *Ty) { if (C->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) return getBitCast(C, Ty); return getTrunc(C, Ty); } Constant *ConstantExpr::getPointerCast(Constant *S, Type *Ty) { assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) && "Invalid cast"); if (Ty->isIntOrIntVectorTy()) return getPtrToInt(S, Ty); unsigned SrcAS = S->getType()->getPointerAddressSpace(); if (Ty->isPtrOrPtrVectorTy() && SrcAS != Ty->getPointerAddressSpace()) return getAddrSpaceCast(S, Ty); return getBitCast(S, Ty); } Constant *ConstantExpr::getPointerBitCastOrAddrSpaceCast(Constant *S, Type *Ty) { assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast"); if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace()) return getAddrSpaceCast(S, Ty); return getBitCast(S, Ty); } Constant *ConstantExpr::getIntegerCast(Constant *C, Type *Ty, bool isSigned) { assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() && "Invalid cast"); unsigned SrcBits = C->getType()->getScalarSizeInBits(); unsigned DstBits = Ty->getScalarSizeInBits(); Instruction::CastOps opcode = (SrcBits == DstBits ? Instruction::BitCast : (SrcBits > DstBits ? Instruction::Trunc : (isSigned ? Instruction::SExt : Instruction::ZExt))); return getCast(opcode, C, Ty); } Constant *ConstantExpr::getFPCast(Constant *C, Type *Ty) { assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() && "Invalid cast"); unsigned SrcBits = C->getType()->getScalarSizeInBits(); unsigned DstBits = Ty->getScalarSizeInBits(); if (SrcBits == DstBits) return C; // Avoid a useless cast Instruction::CastOps opcode = (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt); return getCast(opcode, C, Ty); } Constant *ConstantExpr::getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced) { #ifndef NDEBUG bool fromVec = isa(C->getType()); bool toVec = isa(Ty); #endif assert((fromVec == toVec) && "Cannot convert from scalar to/from vector"); assert(C->getType()->isIntOrIntVectorTy() && "Trunc operand must be integer"); assert(Ty->isIntOrIntVectorTy() && "Trunc produces only integral"); assert(C->getType()->getScalarSizeInBits() > Ty->getScalarSizeInBits()&& "SrcTy must be larger than DestTy for Trunc!"); return getFoldedCast(Instruction::Trunc, C, Ty, OnlyIfReduced); } Constant *ConstantExpr::getSExt(Constant *C, Type *Ty, bool OnlyIfReduced) { #ifndef NDEBUG bool fromVec = isa(C->getType()); bool toVec = isa(Ty); #endif assert((fromVec == toVec) && "Cannot convert from scalar to/from vector"); assert(C->getType()->isIntOrIntVectorTy() && "SExt operand must be integral"); assert(Ty->isIntOrIntVectorTy() && "SExt produces only integer"); assert(C->getType()->getScalarSizeInBits() < Ty->getScalarSizeInBits()&& "SrcTy must be smaller than DestTy for SExt!"); return getFoldedCast(Instruction::SExt, C, Ty, OnlyIfReduced); } Constant *ConstantExpr::getZExt(Constant *C, Type *Ty, bool OnlyIfReduced) { #ifndef NDEBUG bool fromVec = isa(C->getType()); bool toVec = isa(Ty); #endif assert((fromVec == toVec) && "Cannot convert from scalar to/from vector"); assert(C->getType()->isIntOrIntVectorTy() && "ZEXt operand must be integral"); assert(Ty->isIntOrIntVectorTy() && "ZExt produces only integer"); assert(C->getType()->getScalarSizeInBits() < Ty->getScalarSizeInBits()&& "SrcTy must be smaller than DestTy for ZExt!"); return getFoldedCast(Instruction::ZExt, C, Ty, OnlyIfReduced); } Constant *ConstantExpr::getFPTrunc(Constant *C, Type *Ty, bool OnlyIfReduced) { #ifndef NDEBUG bool fromVec = isa(C->getType()); bool toVec = isa(Ty); #endif assert((fromVec == toVec) && "Cannot convert from scalar to/from vector"); assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() && C->getType()->getScalarSizeInBits() > Ty->getScalarSizeInBits()&& "This is an illegal floating point truncation!"); return getFoldedCast(Instruction::FPTrunc, C, Ty, OnlyIfReduced); } Constant *ConstantExpr::getFPExtend(Constant *C, Type *Ty, bool OnlyIfReduced) { #ifndef NDEBUG bool fromVec = isa(C->getType()); bool toVec = isa(Ty); #endif assert((fromVec == toVec) && "Cannot convert from scalar to/from vector"); assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() && C->getType()->getScalarSizeInBits() < Ty->getScalarSizeInBits()&& "This is an illegal floating point extension!"); return getFoldedCast(Instruction::FPExt, C, Ty, OnlyIfReduced); } Constant *ConstantExpr::getUIToFP(Constant *C, Type *Ty, bool OnlyIfReduced) { #ifndef NDEBUG bool fromVec = isa(C->getType()); bool toVec = isa(Ty); #endif assert((fromVec == toVec) && "Cannot convert from scalar to/from vector"); assert(C->getType()->isIntOrIntVectorTy() && Ty->isFPOrFPVectorTy() && "This is an illegal uint to floating point cast!"); return getFoldedCast(Instruction::UIToFP, C, Ty, OnlyIfReduced); } Constant *ConstantExpr::getSIToFP(Constant *C, Type *Ty, bool OnlyIfReduced) { #ifndef NDEBUG bool fromVec = isa(C->getType()); bool toVec = isa(Ty); #endif assert((fromVec == toVec) && "Cannot convert from scalar to/from vector"); assert(C->getType()->isIntOrIntVectorTy() && Ty->isFPOrFPVectorTy() && "This is an illegal sint to floating point cast!"); return getFoldedCast(Instruction::SIToFP, C, Ty, OnlyIfReduced); } Constant *ConstantExpr::getFPToUI(Constant *C, Type *Ty, bool OnlyIfReduced) { #ifndef NDEBUG bool fromVec = isa(C->getType()); bool toVec = isa(Ty); #endif assert((fromVec == toVec) && "Cannot convert from scalar to/from vector"); assert(C->getType()->isFPOrFPVectorTy() && Ty->isIntOrIntVectorTy() && "This is an illegal floating point to uint cast!"); return getFoldedCast(Instruction::FPToUI, C, Ty, OnlyIfReduced); } Constant *ConstantExpr::getFPToSI(Constant *C, Type *Ty, bool OnlyIfReduced) { #ifndef NDEBUG bool fromVec = isa(C->getType()); bool toVec = isa(Ty); #endif assert((fromVec == toVec) && "Cannot convert from scalar to/from vector"); assert(C->getType()->isFPOrFPVectorTy() && Ty->isIntOrIntVectorTy() && "This is an illegal floating point to sint cast!"); return getFoldedCast(Instruction::FPToSI, C, Ty, OnlyIfReduced); } Constant *ConstantExpr::getPtrToInt(Constant *C, Type *DstTy, bool OnlyIfReduced) { assert(C->getType()->isPtrOrPtrVectorTy() && "PtrToInt source must be pointer or pointer vector"); assert(DstTy->isIntOrIntVectorTy() && "PtrToInt destination must be integer or integer vector"); assert(isa(C->getType()) == isa(DstTy)); if (isa(C->getType())) assert(cast(C->getType())->getNumElements() == cast(DstTy)->getNumElements() && "Invalid cast between a different number of vector elements"); return getFoldedCast(Instruction::PtrToInt, C, DstTy, OnlyIfReduced); } Constant *ConstantExpr::getIntToPtr(Constant *C, Type *DstTy, bool OnlyIfReduced) { assert(C->getType()->isIntOrIntVectorTy() && "IntToPtr source must be integer or integer vector"); assert(DstTy->isPtrOrPtrVectorTy() && "IntToPtr destination must be a pointer or pointer vector"); assert(isa(C->getType()) == isa(DstTy)); if (isa(C->getType())) assert(cast(C->getType())->getElementCount() == cast(DstTy)->getElementCount() && "Invalid cast between a different number of vector elements"); return getFoldedCast(Instruction::IntToPtr, C, DstTy, OnlyIfReduced); } Constant *ConstantExpr::getBitCast(Constant *C, Type *DstTy, bool OnlyIfReduced) { assert(CastInst::castIsValid(Instruction::BitCast, C, DstTy) && "Invalid constantexpr bitcast!"); // It is common to ask for a bitcast of a value to its own type, handle this // speedily. if (C->getType() == DstTy) return C; return getFoldedCast(Instruction::BitCast, C, DstTy, OnlyIfReduced); } Constant *ConstantExpr::getAddrSpaceCast(Constant *C, Type *DstTy, bool OnlyIfReduced) { assert(CastInst::castIsValid(Instruction::AddrSpaceCast, C, DstTy) && "Invalid constantexpr addrspacecast!"); // Canonicalize addrspacecasts between different pointer types by first // bitcasting the pointer type and then converting the address space. PointerType *SrcScalarTy = cast(C->getType()->getScalarType()); PointerType *DstScalarTy = cast(DstTy->getScalarType()); if (!SrcScalarTy->hasSameElementTypeAs(DstScalarTy)) { Type *MidTy = PointerType::getWithSamePointeeType( DstScalarTy, SrcScalarTy->getAddressSpace()); if (VectorType *VT = dyn_cast(DstTy)) { // Handle vectors of pointers. MidTy = FixedVectorType::get(MidTy, cast(VT)->getNumElements()); } C = getBitCast(C, MidTy); } return getFoldedCast(Instruction::AddrSpaceCast, C, DstTy, OnlyIfReduced); } Constant *ConstantExpr::get(unsigned Opcode, Constant *C, unsigned Flags, Type *OnlyIfReducedTy) { // Check the operands for consistency first. assert(Instruction::isUnaryOp(Opcode) && "Invalid opcode in unary constant expression"); #ifndef NDEBUG switch (Opcode) { case Instruction::FNeg: assert(C->getType()->isFPOrFPVectorTy() && "Tried to create a floating-point operation on a " "non-floating-point type!"); break; default: break; } #endif if (Constant *FC = ConstantFoldUnaryInstruction(Opcode, C)) return FC; if (OnlyIfReducedTy == C->getType()) return nullptr; Constant *ArgVec[] = { C }; ConstantExprKeyType Key(Opcode, ArgVec, 0, Flags); LLVMContextImpl *pImpl = C->getContext().pImpl; return pImpl->ExprConstants.getOrCreate(C->getType(), Key); } Constant *ConstantExpr::get(unsigned Opcode, Constant *C1, Constant *C2, unsigned Flags, Type *OnlyIfReducedTy) { // Check the operands for consistency first. assert(Instruction::isBinaryOp(Opcode) && "Invalid opcode in binary constant expression"); assert(C1->getType() == C2->getType() && "Operand types in binary constant expression should match"); #ifndef NDEBUG switch (Opcode) { case Instruction::Add: case Instruction::Sub: case Instruction::Mul: case Instruction::UDiv: case Instruction::SDiv: case Instruction::URem: case Instruction::SRem: assert(C1->getType()->isIntOrIntVectorTy() && "Tried to create an integer operation on a non-integer type!"); break; case Instruction::FAdd: case Instruction::FSub: case Instruction::FMul: case Instruction::FDiv: case Instruction::FRem: assert(C1->getType()->isFPOrFPVectorTy() && "Tried to create a floating-point operation on a " "non-floating-point type!"); break; case Instruction::And: case Instruction::Or: case Instruction::Xor: assert(C1->getType()->isIntOrIntVectorTy() && "Tried to create a logical operation on a non-integral type!"); break; case Instruction::Shl: case Instruction::LShr: case Instruction::AShr: assert(C1->getType()->isIntOrIntVectorTy() && "Tried to create a shift operation on a non-integer type!"); break; default: break; } #endif if (Constant *FC = ConstantFoldBinaryInstruction(Opcode, C1, C2)) return FC; if (OnlyIfReducedTy == C1->getType()) return nullptr; Constant *ArgVec[] = { C1, C2 }; ConstantExprKeyType Key(Opcode, ArgVec, 0, Flags); LLVMContextImpl *pImpl = C1->getContext().pImpl; return pImpl->ExprConstants.getOrCreate(C1->getType(), Key); } Constant *ConstantExpr::getSizeOf(Type* Ty) { // sizeof is implemented as: (i64) gep (Ty*)null, 1 // Note that a non-inbounds gep is used, as null isn't within any object. Constant *GEPIdx = ConstantInt::get(Type::getInt32Ty(Ty->getContext()), 1); Constant *GEP = getGetElementPtr( Ty, Constant::getNullValue(PointerType::getUnqual(Ty)), GEPIdx); return getPtrToInt(GEP, Type::getInt64Ty(Ty->getContext())); } Constant *ConstantExpr::getAlignOf(Type* Ty) { // alignof is implemented as: (i64) gep ({i1,Ty}*)null, 0, 1 // Note that a non-inbounds gep is used, as null isn't within any object. Type *AligningTy = StructType::get(Type::getInt1Ty(Ty->getContext()), Ty); Constant *NullPtr = Constant::getNullValue(AligningTy->getPointerTo(0)); Constant *Zero = ConstantInt::get(Type::getInt64Ty(Ty->getContext()), 0); Constant *One = ConstantInt::get(Type::getInt32Ty(Ty->getContext()), 1); Constant *Indices[2] = { Zero, One }; Constant *GEP = getGetElementPtr(AligningTy, NullPtr, Indices); return getPtrToInt(GEP, Type::getInt64Ty(Ty->getContext())); } Constant *ConstantExpr::getOffsetOf(StructType* STy, unsigned FieldNo) { return getOffsetOf(STy, ConstantInt::get(Type::getInt32Ty(STy->getContext()), FieldNo)); } Constant *ConstantExpr::getOffsetOf(Type* Ty, Constant *FieldNo) { // offsetof is implemented as: (i64) gep (Ty*)null, 0, FieldNo // Note that a non-inbounds gep is used, as null isn't within any object. Constant *GEPIdx[] = { ConstantInt::get(Type::getInt64Ty(Ty->getContext()), 0), FieldNo }; Constant *GEP = getGetElementPtr( Ty, Constant::getNullValue(PointerType::getUnqual(Ty)), GEPIdx); return getPtrToInt(GEP, Type::getInt64Ty(Ty->getContext())); } Constant *ConstantExpr::getCompare(unsigned short Predicate, Constant *C1, Constant *C2, bool OnlyIfReduced) { assert(C1->getType() == C2->getType() && "Op types should be identical!"); switch (Predicate) { default: llvm_unreachable("Invalid CmpInst predicate"); case CmpInst::FCMP_FALSE: case CmpInst::FCMP_OEQ: case CmpInst::FCMP_OGT: case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLT: case CmpInst::FCMP_OLE: case CmpInst::FCMP_ONE: case CmpInst::FCMP_ORD: case CmpInst::FCMP_UNO: case CmpInst::FCMP_UEQ: case CmpInst::FCMP_UGT: case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULT: case CmpInst::FCMP_ULE: case CmpInst::FCMP_UNE: case CmpInst::FCMP_TRUE: return getFCmp(Predicate, C1, C2, OnlyIfReduced); case CmpInst::ICMP_EQ: case CmpInst::ICMP_NE: case CmpInst::ICMP_UGT: case CmpInst::ICMP_UGE: case CmpInst::ICMP_ULT: case CmpInst::ICMP_ULE: case CmpInst::ICMP_SGT: case CmpInst::ICMP_SGE: case CmpInst::ICMP_SLT: case CmpInst::ICMP_SLE: return getICmp(Predicate, C1, C2, OnlyIfReduced); } } Constant *ConstantExpr::getSelect(Constant *C, Constant *V1, Constant *V2, Type *OnlyIfReducedTy) { assert(!SelectInst::areInvalidOperands(C, V1, V2)&&"Invalid select operands"); if (Constant *SC = ConstantFoldSelectInstruction(C, V1, V2)) return SC; // Fold common cases if (OnlyIfReducedTy == V1->getType()) return nullptr; Constant *ArgVec[] = { C, V1, V2 }; ConstantExprKeyType Key(Instruction::Select, ArgVec); LLVMContextImpl *pImpl = C->getContext().pImpl; return pImpl->ExprConstants.getOrCreate(V1->getType(), Key); } Constant *ConstantExpr::getGetElementPtr(Type *Ty, Constant *C, ArrayRef Idxs, bool InBounds, Optional InRangeIndex, Type *OnlyIfReducedTy) { PointerType *OrigPtrTy = cast(C->getType()->getScalarType()); assert(Ty && "Must specify element type"); assert(OrigPtrTy->isOpaqueOrPointeeTypeMatches(Ty)); if (Constant *FC = ConstantFoldGetElementPtr(Ty, C, InBounds, InRangeIndex, Idxs)) return FC; // Fold a few common cases. // Get the result type of the getelementptr! Type *DestTy = GetElementPtrInst::getIndexedType(Ty, Idxs); assert(DestTy && "GEP indices invalid!"); unsigned AS = OrigPtrTy->getAddressSpace(); Type *ReqTy = OrigPtrTy->isOpaque() ? PointerType::get(OrigPtrTy->getContext(), AS) : DestTy->getPointerTo(AS); auto EltCount = ElementCount::getFixed(0); if (VectorType *VecTy = dyn_cast(C->getType())) EltCount = VecTy->getElementCount(); else for (auto Idx : Idxs) if (VectorType *VecTy = dyn_cast(Idx->getType())) EltCount = VecTy->getElementCount(); if (EltCount.isNonZero()) ReqTy = VectorType::get(ReqTy, EltCount); if (OnlyIfReducedTy == ReqTy) return nullptr; // Look up the constant in the table first to ensure uniqueness std::vector ArgVec; ArgVec.reserve(1 + Idxs.size()); ArgVec.push_back(C); auto GTI = gep_type_begin(Ty, Idxs), GTE = gep_type_end(Ty, Idxs); for (; GTI != GTE; ++GTI) { auto *Idx = cast(GTI.getOperand()); assert( (!isa(Idx->getType()) || cast(Idx->getType())->getElementCount() == EltCount) && "getelementptr index type missmatch"); if (GTI.isStruct() && Idx->getType()->isVectorTy()) { Idx = Idx->getSplatValue(); } else if (GTI.isSequential() && EltCount.isNonZero() && !Idx->getType()->isVectorTy()) { Idx = ConstantVector::getSplat(EltCount, Idx); } ArgVec.push_back(Idx); } unsigned SubClassOptionalData = InBounds ? GEPOperator::IsInBounds : 0; if (InRangeIndex && *InRangeIndex < 63) SubClassOptionalData |= (*InRangeIndex + 1) << 1; const ConstantExprKeyType Key(Instruction::GetElementPtr, ArgVec, 0, SubClassOptionalData, None, None, Ty); LLVMContextImpl *pImpl = C->getContext().pImpl; return pImpl->ExprConstants.getOrCreate(ReqTy, Key); } Constant *ConstantExpr::getICmp(unsigned short pred, Constant *LHS, Constant *RHS, bool OnlyIfReduced) { assert(LHS->getType() == RHS->getType()); assert(CmpInst::isIntPredicate((CmpInst::Predicate)pred) && "Invalid ICmp Predicate"); if (Constant *FC = ConstantFoldCompareInstruction(pred, LHS, RHS)) return FC; // Fold a few common cases... if (OnlyIfReduced) return nullptr; // Look up the constant in the table first to ensure uniqueness Constant *ArgVec[] = { LHS, RHS }; // Get the key type with both the opcode and predicate const ConstantExprKeyType Key(Instruction::ICmp, ArgVec, pred); Type *ResultTy = Type::getInt1Ty(LHS->getContext()); if (VectorType *VT = dyn_cast(LHS->getType())) ResultTy = VectorType::get(ResultTy, VT->getElementCount()); LLVMContextImpl *pImpl = LHS->getType()->getContext().pImpl; return pImpl->ExprConstants.getOrCreate(ResultTy, Key); } Constant *ConstantExpr::getFCmp(unsigned short pred, Constant *LHS, Constant *RHS, bool OnlyIfReduced) { assert(LHS->getType() == RHS->getType()); assert(CmpInst::isFPPredicate((CmpInst::Predicate)pred) && "Invalid FCmp Predicate"); if (Constant *FC = ConstantFoldCompareInstruction(pred, LHS, RHS)) return FC; // Fold a few common cases... if (OnlyIfReduced) return nullptr; // Look up the constant in the table first to ensure uniqueness Constant *ArgVec[] = { LHS, RHS }; // Get the key type with both the opcode and predicate const ConstantExprKeyType Key(Instruction::FCmp, ArgVec, pred); Type *ResultTy = Type::getInt1Ty(LHS->getContext()); if (VectorType *VT = dyn_cast(LHS->getType())) ResultTy = VectorType::get(ResultTy, VT->getElementCount()); LLVMContextImpl *pImpl = LHS->getType()->getContext().pImpl; return pImpl->ExprConstants.getOrCreate(ResultTy, Key); } Constant *ConstantExpr::getExtractElement(Constant *Val, Constant *Idx, Type *OnlyIfReducedTy) { assert(Val->getType()->isVectorTy() && "Tried to create extractelement operation on non-vector type!"); assert(Idx->getType()->isIntegerTy() && "Extractelement index must be an integer type!"); if (Constant *FC = ConstantFoldExtractElementInstruction(Val, Idx)) return FC; // Fold a few common cases. Type *ReqTy = cast(Val->getType())->getElementType(); if (OnlyIfReducedTy == ReqTy) return nullptr; // Look up the constant in the table first to ensure uniqueness Constant *ArgVec[] = { Val, Idx }; const ConstantExprKeyType Key(Instruction::ExtractElement, ArgVec); LLVMContextImpl *pImpl = Val->getContext().pImpl; return pImpl->ExprConstants.getOrCreate(ReqTy, Key); } Constant *ConstantExpr::getInsertElement(Constant *Val, Constant *Elt, Constant *Idx, Type *OnlyIfReducedTy) { assert(Val->getType()->isVectorTy() && "Tried to create insertelement operation on non-vector type!"); assert(Elt->getType() == cast(Val->getType())->getElementType() && "Insertelement types must match!"); assert(Idx->getType()->isIntegerTy() && "Insertelement index must be i32 type!"); if (Constant *FC = ConstantFoldInsertElementInstruction(Val, Elt, Idx)) return FC; // Fold a few common cases. if (OnlyIfReducedTy == Val->getType()) return nullptr; // Look up the constant in the table first to ensure uniqueness Constant *ArgVec[] = { Val, Elt, Idx }; const ConstantExprKeyType Key(Instruction::InsertElement, ArgVec); LLVMContextImpl *pImpl = Val->getContext().pImpl; return pImpl->ExprConstants.getOrCreate(Val->getType(), Key); } Constant *ConstantExpr::getShuffleVector(Constant *V1, Constant *V2, ArrayRef Mask, Type *OnlyIfReducedTy) { assert(ShuffleVectorInst::isValidOperands(V1, V2, Mask) && "Invalid shuffle vector constant expr operands!"); if (Constant *FC = ConstantFoldShuffleVectorInstruction(V1, V2, Mask)) return FC; // Fold a few common cases. unsigned NElts = Mask.size(); auto V1VTy = cast(V1->getType()); Type *EltTy = V1VTy->getElementType(); bool TypeIsScalable = isa(V1VTy); Type *ShufTy = VectorType::get(EltTy, NElts, TypeIsScalable); if (OnlyIfReducedTy == ShufTy) return nullptr; // Look up the constant in the table first to ensure uniqueness Constant *ArgVec[] = {V1, V2}; ConstantExprKeyType Key(Instruction::ShuffleVector, ArgVec, 0, 0, None, Mask); LLVMContextImpl *pImpl = ShufTy->getContext().pImpl; return pImpl->ExprConstants.getOrCreate(ShufTy, Key); } Constant *ConstantExpr::getInsertValue(Constant *Agg, Constant *Val, ArrayRef Idxs, Type *OnlyIfReducedTy) { assert(Agg->getType()->isFirstClassType() && "Non-first-class type for constant insertvalue expression"); assert(ExtractValueInst::getIndexedType(Agg->getType(), Idxs) == Val->getType() && "insertvalue indices invalid!"); Type *ReqTy = Val->getType(); if (Constant *FC = ConstantFoldInsertValueInstruction(Agg, Val, Idxs)) return FC; if (OnlyIfReducedTy == ReqTy) return nullptr; Constant *ArgVec[] = { Agg, Val }; const ConstantExprKeyType Key(Instruction::InsertValue, ArgVec, 0, 0, Idxs); LLVMContextImpl *pImpl = Agg->getContext().pImpl; return pImpl->ExprConstants.getOrCreate(ReqTy, Key); } Constant *ConstantExpr::getExtractValue(Constant *Agg, ArrayRef Idxs, Type *OnlyIfReducedTy) { assert(Agg->getType()->isFirstClassType() && "Tried to create extractelement operation on non-first-class type!"); Type *ReqTy = ExtractValueInst::getIndexedType(Agg->getType(), Idxs); (void)ReqTy; assert(ReqTy && "extractvalue indices invalid!"); assert(Agg->getType()->isFirstClassType() && "Non-first-class type for constant extractvalue expression"); if (Constant *FC = ConstantFoldExtractValueInstruction(Agg, Idxs)) return FC; if (OnlyIfReducedTy == ReqTy) return nullptr; Constant *ArgVec[] = { Agg }; const ConstantExprKeyType Key(Instruction::ExtractValue, ArgVec, 0, 0, Idxs); LLVMContextImpl *pImpl = Agg->getContext().pImpl; return pImpl->ExprConstants.getOrCreate(ReqTy, Key); } Constant *ConstantExpr::getNeg(Constant *C, bool HasNUW, bool HasNSW) { assert(C->getType()->isIntOrIntVectorTy() && "Cannot NEG a nonintegral value!"); return getSub(ConstantFP::getZeroValueForNegation(C->getType()), C, HasNUW, HasNSW); } Constant *ConstantExpr::getFNeg(Constant *C) { assert(C->getType()->isFPOrFPVectorTy() && "Cannot FNEG a non-floating-point value!"); return get(Instruction::FNeg, C); } Constant *ConstantExpr::getNot(Constant *C) { assert(C->getType()->isIntOrIntVectorTy() && "Cannot NOT a nonintegral value!"); return get(Instruction::Xor, C, Constant::getAllOnesValue(C->getType())); } Constant *ConstantExpr::getAdd(Constant *C1, Constant *C2, bool HasNUW, bool HasNSW) { unsigned Flags = (HasNUW ? OverflowingBinaryOperator::NoUnsignedWrap : 0) | (HasNSW ? OverflowingBinaryOperator::NoSignedWrap : 0); return get(Instruction::Add, C1, C2, Flags); } Constant *ConstantExpr::getFAdd(Constant *C1, Constant *C2) { return get(Instruction::FAdd, C1, C2); } Constant *ConstantExpr::getSub(Constant *C1, Constant *C2, bool HasNUW, bool HasNSW) { unsigned Flags = (HasNUW ? OverflowingBinaryOperator::NoUnsignedWrap : 0) | (HasNSW ? OverflowingBinaryOperator::NoSignedWrap : 0); return get(Instruction::Sub, C1, C2, Flags); } Constant *ConstantExpr::getFSub(Constant *C1, Constant *C2) { return get(Instruction::FSub, C1, C2); } Constant *ConstantExpr::getMul(Constant *C1, Constant *C2, bool HasNUW, bool HasNSW) { unsigned Flags = (HasNUW ? OverflowingBinaryOperator::NoUnsignedWrap : 0) | (HasNSW ? OverflowingBinaryOperator::NoSignedWrap : 0); return get(Instruction::Mul, C1, C2, Flags); } Constant *ConstantExpr::getFMul(Constant *C1, Constant *C2) { return get(Instruction::FMul, C1, C2); } Constant *ConstantExpr::getUDiv(Constant *C1, Constant *C2, bool isExact) { return get(Instruction::UDiv, C1, C2, isExact ? PossiblyExactOperator::IsExact : 0); } Constant *ConstantExpr::getSDiv(Constant *C1, Constant *C2, bool isExact) { return get(Instruction::SDiv, C1, C2, isExact ? PossiblyExactOperator::IsExact : 0); } Constant *ConstantExpr::getFDiv(Constant *C1, Constant *C2) { return get(Instruction::FDiv, C1, C2); } Constant *ConstantExpr::getURem(Constant *C1, Constant *C2) { return get(Instruction::URem, C1, C2); } Constant *ConstantExpr::getSRem(Constant *C1, Constant *C2) { return get(Instruction::SRem, C1, C2); } Constant *ConstantExpr::getFRem(Constant *C1, Constant *C2) { return get(Instruction::FRem, C1, C2); } Constant *ConstantExpr::getAnd(Constant *C1, Constant *C2) { return get(Instruction::And, C1, C2); } Constant *ConstantExpr::getOr(Constant *C1, Constant *C2) { return get(Instruction::Or, C1, C2); } Constant *ConstantExpr::getXor(Constant *C1, Constant *C2) { return get(Instruction::Xor, C1, C2); } Constant *ConstantExpr::getUMin(Constant *C1, Constant *C2) { Constant *Cmp = ConstantExpr::getICmp(CmpInst::ICMP_ULT, C1, C2); return getSelect(Cmp, C1, C2); } Constant *ConstantExpr::getShl(Constant *C1, Constant *C2, bool HasNUW, bool HasNSW) { unsigned Flags = (HasNUW ? OverflowingBinaryOperator::NoUnsignedWrap : 0) | (HasNSW ? OverflowingBinaryOperator::NoSignedWrap : 0); return get(Instruction::Shl, C1, C2, Flags); } Constant *ConstantExpr::getLShr(Constant *C1, Constant *C2, bool isExact) { return get(Instruction::LShr, C1, C2, isExact ? PossiblyExactOperator::IsExact : 0); } Constant *ConstantExpr::getAShr(Constant *C1, Constant *C2, bool isExact) { return get(Instruction::AShr, C1, C2, isExact ? PossiblyExactOperator::IsExact : 0); } Constant *ConstantExpr::getExactLogBase2(Constant *C) { Type *Ty = C->getType(); const APInt *IVal; if (match(C, m_APInt(IVal)) && IVal->isPowerOf2()) return ConstantInt::get(Ty, IVal->logBase2()); // FIXME: We can extract pow of 2 of splat constant for scalable vectors. auto *VecTy = dyn_cast(Ty); if (!VecTy) return nullptr; SmallVector Elts; for (unsigned I = 0, E = VecTy->getNumElements(); I != E; ++I) { Constant *Elt = C->getAggregateElement(I); if (!Elt) return nullptr; // Note that log2(iN undef) is *NOT* iN undef, because log2(iN undef) u< N. if (isa(Elt)) { Elts.push_back(Constant::getNullValue(Ty->getScalarType())); continue; } if (!match(Elt, m_APInt(IVal)) || !IVal->isPowerOf2()) return nullptr; Elts.push_back(ConstantInt::get(Ty->getScalarType(), IVal->logBase2())); } return ConstantVector::get(Elts); } Constant *ConstantExpr::getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant) { assert(Instruction::isBinaryOp(Opcode) && "Only binops allowed"); // Commutative opcodes: it does not matter if AllowRHSConstant is set. if (Instruction::isCommutative(Opcode)) { switch (Opcode) { case Instruction::Add: // X + 0 = X case Instruction::Or: // X | 0 = X case Instruction::Xor: // X ^ 0 = X return Constant::getNullValue(Ty); case Instruction::Mul: // X * 1 = X return ConstantInt::get(Ty, 1); case Instruction::And: // X & -1 = X return Constant::getAllOnesValue(Ty); case Instruction::FAdd: // X + -0.0 = X // TODO: If the fadd has 'nsz', should we return +0.0? return ConstantFP::getNegativeZero(Ty); case Instruction::FMul: // X * 1.0 = X return ConstantFP::get(Ty, 1.0); default: llvm_unreachable("Every commutative binop has an identity constant"); } } // Non-commutative opcodes: AllowRHSConstant must be set. if (!AllowRHSConstant) return nullptr; switch (Opcode) { case Instruction::Sub: // X - 0 = X case Instruction::Shl: // X << 0 = X case Instruction::LShr: // X >>u 0 = X case Instruction::AShr: // X >> 0 = X case Instruction::FSub: // X - 0.0 = X return Constant::getNullValue(Ty); case Instruction::SDiv: // X / 1 = X case Instruction::UDiv: // X /u 1 = X return ConstantInt::get(Ty, 1); case Instruction::FDiv: // X / 1.0 = X return ConstantFP::get(Ty, 1.0); default: return nullptr; } } Constant *ConstantExpr::getBinOpAbsorber(unsigned Opcode, Type *Ty) { switch (Opcode) { default: // Doesn't have an absorber. return nullptr; case Instruction::Or: return Constant::getAllOnesValue(Ty); case Instruction::And: case Instruction::Mul: return Constant::getNullValue(Ty); } } /// Remove the constant from the constant table. void ConstantExpr::destroyConstantImpl() { getType()->getContext().pImpl->ExprConstants.remove(this); } const char *ConstantExpr::getOpcodeName() const { return Instruction::getOpcodeName(getOpcode()); } GetElementPtrConstantExpr::GetElementPtrConstantExpr( Type *SrcElementTy, Constant *C, ArrayRef IdxList, Type *DestTy) : ConstantExpr(DestTy, Instruction::GetElementPtr, OperandTraits::op_end(this) - (IdxList.size() + 1), IdxList.size() + 1), SrcElementTy(SrcElementTy), ResElementTy(GetElementPtrInst::getIndexedType(SrcElementTy, IdxList)) { Op<0>() = C; Use *OperandList = getOperandList(); for (unsigned i = 0, E = IdxList.size(); i != E; ++i) OperandList[i+1] = IdxList[i]; } Type *GetElementPtrConstantExpr::getSourceElementType() const { return SrcElementTy; } Type *GetElementPtrConstantExpr::getResultElementType() const { return ResElementTy; } //===----------------------------------------------------------------------===// // ConstantData* implementations Type *ConstantDataSequential::getElementType() const { if (ArrayType *ATy = dyn_cast(getType())) return ATy->getElementType(); return cast(getType())->getElementType(); } StringRef ConstantDataSequential::getRawDataValues() const { return StringRef(DataElements, getNumElements()*getElementByteSize()); } bool ConstantDataSequential::isElementTypeCompatible(Type *Ty) { if (Ty->isHalfTy() || Ty->isBFloatTy() || Ty->isFloatTy() || Ty->isDoubleTy()) return true; if (auto *IT = dyn_cast(Ty)) { switch (IT->getBitWidth()) { case 8: case 16: case 32: case 64: return true; default: break; } } return false; } unsigned ConstantDataSequential::getNumElements() const { if (ArrayType *AT = dyn_cast(getType())) return AT->getNumElements(); return cast(getType())->getNumElements(); } uint64_t ConstantDataSequential::getElementByteSize() const { return getElementType()->getPrimitiveSizeInBits()/8; } /// Return the start of the specified element. const char *ConstantDataSequential::getElementPointer(unsigned Elt) const { assert(Elt < getNumElements() && "Invalid Elt"); return DataElements+Elt*getElementByteSize(); } /// Return true if the array is empty or all zeros. static bool isAllZeros(StringRef Arr) { for (char I : Arr) if (I != 0) return false; return true; } /// This is the underlying implementation of all of the /// ConstantDataSequential::get methods. They all thunk down to here, providing /// the correct element type. We take the bytes in as a StringRef because /// we *want* an underlying "char*" to avoid TBAA type punning violations. Constant *ConstantDataSequential::getImpl(StringRef Elements, Type *Ty) { #ifndef NDEBUG if (ArrayType *ATy = dyn_cast(Ty)) assert(isElementTypeCompatible(ATy->getElementType())); else assert(isElementTypeCompatible(cast(Ty)->getElementType())); #endif // If the elements are all zero or there are no elements, return a CAZ, which // is more dense and canonical. if (isAllZeros(Elements)) return ConstantAggregateZero::get(Ty); // Do a lookup to see if we have already formed one of these. auto &Slot = *Ty->getContext() .pImpl->CDSConstants.insert(std::make_pair(Elements, nullptr)) .first; // The bucket can point to a linked list of different CDS's that have the same // body but different types. For example, 0,0,0,1 could be a 4 element array // of i8, or a 1-element array of i32. They'll both end up in the same /// StringMap bucket, linked up by their Next pointers. Walk the list. std::unique_ptr *Entry = &Slot.second; for (; *Entry; Entry = &(*Entry)->Next) if ((*Entry)->getType() == Ty) return Entry->get(); // Okay, we didn't get a hit. Create a node of the right class, link it in, // and return it. if (isa(Ty)) { // Use reset because std::make_unique can't access the constructor. Entry->reset(new ConstantDataArray(Ty, Slot.first().data())); return Entry->get(); } assert(isa(Ty)); // Use reset because std::make_unique can't access the constructor. Entry->reset(new ConstantDataVector(Ty, Slot.first().data())); return Entry->get(); } void ConstantDataSequential::destroyConstantImpl() { // Remove the constant from the StringMap. StringMap> &CDSConstants = getType()->getContext().pImpl->CDSConstants; auto Slot = CDSConstants.find(getRawDataValues()); assert(Slot != CDSConstants.end() && "CDS not found in uniquing table"); std::unique_ptr *Entry = &Slot->getValue(); // Remove the entry from the hash table. if (!(*Entry)->Next) { // If there is only one value in the bucket (common case) it must be this // entry, and removing the entry should remove the bucket completely. assert(Entry->get() == this && "Hash mismatch in ConstantDataSequential"); getContext().pImpl->CDSConstants.erase(Slot); return; } // Otherwise, there are multiple entries linked off the bucket, unlink the // node we care about but keep the bucket around. while (true) { std::unique_ptr &Node = *Entry; assert(Node && "Didn't find entry in its uniquing hash table!"); // If we found our entry, unlink it from the list and we're done. if (Node.get() == this) { Node = std::move(Node->Next); return; } Entry = &Node->Next; } } /// getFP() constructors - Return a constant of array type with a float /// element type taken from argument `ElementType', and count taken from /// argument `Elts'. The amount of bits of the contained type must match the /// number of bits of the type contained in the passed in ArrayRef. /// (i.e. half or bfloat for 16bits, float for 32bits, double for 64bits) Note /// that this can return a ConstantAggregateZero object. Constant *ConstantDataArray::getFP(Type *ElementType, ArrayRef Elts) { assert((ElementType->isHalfTy() || ElementType->isBFloatTy()) && "Element type is not a 16-bit float type"); Type *Ty = ArrayType::get(ElementType, Elts.size()); const char *Data = reinterpret_cast(Elts.data()); return getImpl(StringRef(Data, Elts.size() * 2), Ty); } Constant *ConstantDataArray::getFP(Type *ElementType, ArrayRef Elts) { assert(ElementType->isFloatTy() && "Element type is not a 32-bit float type"); Type *Ty = ArrayType::get(ElementType, Elts.size()); const char *Data = reinterpret_cast(Elts.data()); return getImpl(StringRef(Data, Elts.size() * 4), Ty); } Constant *ConstantDataArray::getFP(Type *ElementType, ArrayRef Elts) { assert(ElementType->isDoubleTy() && "Element type is not a 64-bit float type"); Type *Ty = ArrayType::get(ElementType, Elts.size()); const char *Data = reinterpret_cast(Elts.data()); return getImpl(StringRef(Data, Elts.size() * 8), Ty); } Constant *ConstantDataArray::getString(LLVMContext &Context, StringRef Str, bool AddNull) { if (!AddNull) { const uint8_t *Data = Str.bytes_begin(); return get(Context, makeArrayRef(Data, Str.size())); } SmallVector ElementVals; ElementVals.append(Str.begin(), Str.end()); ElementVals.push_back(0); return get(Context, ElementVals); } /// get() constructors - Return a constant with vector type with an element /// count and element type matching the ArrayRef passed in. Note that this /// can return a ConstantAggregateZero object. Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef Elts){ auto *Ty = FixedVectorType::get(Type::getInt8Ty(Context), Elts.size()); const char *Data = reinterpret_cast(Elts.data()); return getImpl(StringRef(Data, Elts.size() * 1), Ty); } Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef Elts){ auto *Ty = FixedVectorType::get(Type::getInt16Ty(Context), Elts.size()); const char *Data = reinterpret_cast(Elts.data()); return getImpl(StringRef(Data, Elts.size() * 2), Ty); } Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef Elts){ auto *Ty = FixedVectorType::get(Type::getInt32Ty(Context), Elts.size()); const char *Data = reinterpret_cast(Elts.data()); return getImpl(StringRef(Data, Elts.size() * 4), Ty); } Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef Elts){ auto *Ty = FixedVectorType::get(Type::getInt64Ty(Context), Elts.size()); const char *Data = reinterpret_cast(Elts.data()); return getImpl(StringRef(Data, Elts.size() * 8), Ty); } Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef Elts) { auto *Ty = FixedVectorType::get(Type::getFloatTy(Context), Elts.size()); const char *Data = reinterpret_cast(Elts.data()); return getImpl(StringRef(Data, Elts.size() * 4), Ty); } Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef Elts) { auto *Ty = FixedVectorType::get(Type::getDoubleTy(Context), Elts.size()); const char *Data = reinterpret_cast(Elts.data()); return getImpl(StringRef(Data, Elts.size() * 8), Ty); } /// getFP() constructors - Return a constant of vector type with a float /// element type taken from argument `ElementType', and count taken from /// argument `Elts'. The amount of bits of the contained type must match the /// number of bits of the type contained in the passed in ArrayRef. /// (i.e. half or bfloat for 16bits, float for 32bits, double for 64bits) Note /// that this can return a ConstantAggregateZero object. Constant *ConstantDataVector::getFP(Type *ElementType, ArrayRef Elts) { assert((ElementType->isHalfTy() || ElementType->isBFloatTy()) && "Element type is not a 16-bit float type"); auto *Ty = FixedVectorType::get(ElementType, Elts.size()); const char *Data = reinterpret_cast(Elts.data()); return getImpl(StringRef(Data, Elts.size() * 2), Ty); } Constant *ConstantDataVector::getFP(Type *ElementType, ArrayRef Elts) { assert(ElementType->isFloatTy() && "Element type is not a 32-bit float type"); auto *Ty = FixedVectorType::get(ElementType, Elts.size()); const char *Data = reinterpret_cast(Elts.data()); return getImpl(StringRef(Data, Elts.size() * 4), Ty); } Constant *ConstantDataVector::getFP(Type *ElementType, ArrayRef Elts) { assert(ElementType->isDoubleTy() && "Element type is not a 64-bit float type"); auto *Ty = FixedVectorType::get(ElementType, Elts.size()); const char *Data = reinterpret_cast(Elts.data()); return getImpl(StringRef(Data, Elts.size() * 8), Ty); } Constant *ConstantDataVector::getSplat(unsigned NumElts, Constant *V) { assert(isElementTypeCompatible(V->getType()) && "Element type not compatible with ConstantData"); if (ConstantInt *CI = dyn_cast(V)) { if (CI->getType()->isIntegerTy(8)) { SmallVector Elts(NumElts, CI->getZExtValue()); return get(V->getContext(), Elts); } if (CI->getType()->isIntegerTy(16)) { SmallVector Elts(NumElts, CI->getZExtValue()); return get(V->getContext(), Elts); } if (CI->getType()->isIntegerTy(32)) { SmallVector Elts(NumElts, CI->getZExtValue()); return get(V->getContext(), Elts); } assert(CI->getType()->isIntegerTy(64) && "Unsupported ConstantData type"); SmallVector Elts(NumElts, CI->getZExtValue()); return get(V->getContext(), Elts); } if (ConstantFP *CFP = dyn_cast(V)) { if (CFP->getType()->isHalfTy()) { SmallVector Elts( NumElts, CFP->getValueAPF().bitcastToAPInt().getLimitedValue()); return getFP(V->getType(), Elts); } if (CFP->getType()->isBFloatTy()) { SmallVector Elts( NumElts, CFP->getValueAPF().bitcastToAPInt().getLimitedValue()); return getFP(V->getType(), Elts); } if (CFP->getType()->isFloatTy()) { SmallVector Elts( NumElts, CFP->getValueAPF().bitcastToAPInt().getLimitedValue()); return getFP(V->getType(), Elts); } if (CFP->getType()->isDoubleTy()) { SmallVector Elts( NumElts, CFP->getValueAPF().bitcastToAPInt().getLimitedValue()); return getFP(V->getType(), Elts); } } return ConstantVector::getSplat(ElementCount::getFixed(NumElts), V); } uint64_t ConstantDataSequential::getElementAsInteger(unsigned Elt) const { assert(isa(getElementType()) && "Accessor can only be used when element is an integer"); const char *EltPtr = getElementPointer(Elt); // The data is stored in host byte order, make sure to cast back to the right // type to load with the right endianness. switch (getElementType()->getIntegerBitWidth()) { default: llvm_unreachable("Invalid bitwidth for CDS"); case 8: return *reinterpret_cast(EltPtr); case 16: return *reinterpret_cast(EltPtr); case 32: return *reinterpret_cast(EltPtr); case 64: return *reinterpret_cast(EltPtr); } } APInt ConstantDataSequential::getElementAsAPInt(unsigned Elt) const { assert(isa(getElementType()) && "Accessor can only be used when element is an integer"); const char *EltPtr = getElementPointer(Elt); // The data is stored in host byte order, make sure to cast back to the right // type to load with the right endianness. switch (getElementType()->getIntegerBitWidth()) { default: llvm_unreachable("Invalid bitwidth for CDS"); case 8: { auto EltVal = *reinterpret_cast(EltPtr); return APInt(8, EltVal); } case 16: { auto EltVal = *reinterpret_cast(EltPtr); return APInt(16, EltVal); } case 32: { auto EltVal = *reinterpret_cast(EltPtr); return APInt(32, EltVal); } case 64: { auto EltVal = *reinterpret_cast(EltPtr); return APInt(64, EltVal); } } } APFloat ConstantDataSequential::getElementAsAPFloat(unsigned Elt) const { const char *EltPtr = getElementPointer(Elt); switch (getElementType()->getTypeID()) { default: llvm_unreachable("Accessor can only be used when element is float/double!"); case Type::HalfTyID: { auto EltVal = *reinterpret_cast(EltPtr); return APFloat(APFloat::IEEEhalf(), APInt(16, EltVal)); } case Type::BFloatTyID: { auto EltVal = *reinterpret_cast(EltPtr); return APFloat(APFloat::BFloat(), APInt(16, EltVal)); } case Type::FloatTyID: { auto EltVal = *reinterpret_cast(EltPtr); return APFloat(APFloat::IEEEsingle(), APInt(32, EltVal)); } case Type::DoubleTyID: { auto EltVal = *reinterpret_cast(EltPtr); return APFloat(APFloat::IEEEdouble(), APInt(64, EltVal)); } } } float ConstantDataSequential::getElementAsFloat(unsigned Elt) const { assert(getElementType()->isFloatTy() && "Accessor can only be used when element is a 'float'"); return *reinterpret_cast(getElementPointer(Elt)); } double ConstantDataSequential::getElementAsDouble(unsigned Elt) const { assert(getElementType()->isDoubleTy() && "Accessor can only be used when element is a 'float'"); return *reinterpret_cast(getElementPointer(Elt)); } Constant *ConstantDataSequential::getElementAsConstant(unsigned Elt) const { if (getElementType()->isHalfTy() || getElementType()->isBFloatTy() || getElementType()->isFloatTy() || getElementType()->isDoubleTy()) return ConstantFP::get(getContext(), getElementAsAPFloat(Elt)); return ConstantInt::get(getElementType(), getElementAsInteger(Elt)); } bool ConstantDataSequential::isString(unsigned CharSize) const { return isa(getType()) && getElementType()->isIntegerTy(CharSize); } bool ConstantDataSequential::isCString() const { if (!isString()) return false; StringRef Str = getAsString(); // The last value must be nul. if (Str.back() != 0) return false; // Other elements must be non-nul. return Str.drop_back().find(0) == StringRef::npos; } bool ConstantDataVector::isSplatData() const { const char *Base = getRawDataValues().data(); // Compare elements 1+ to the 0'th element. unsigned EltSize = getElementByteSize(); for (unsigned i = 1, e = getNumElements(); i != e; ++i) if (memcmp(Base, Base+i*EltSize, EltSize)) return false; return true; } bool ConstantDataVector::isSplat() const { if (!IsSplatSet) { IsSplatSet = true; IsSplat = isSplatData(); } return IsSplat; } Constant *ConstantDataVector::getSplatValue() const { // If they're all the same, return the 0th one as a representative. return isSplat() ? getElementAsConstant(0) : nullptr; } //===----------------------------------------------------------------------===// // handleOperandChange implementations /// Update this constant array to change uses of /// 'From' to be uses of 'To'. This must update the uniquing data structures /// etc. /// /// Note that we intentionally replace all uses of From with To here. Consider /// a large array that uses 'From' 1000 times. By handling this case all here, /// ConstantArray::handleOperandChange is only invoked once, and that /// single invocation handles all 1000 uses. Handling them one at a time would /// work, but would be really slow because it would have to unique each updated /// array instance. /// void Constant::handleOperandChange(Value *From, Value *To) { Value *Replacement = nullptr; switch (getValueID()) { default: llvm_unreachable("Not a constant!"); #define HANDLE_CONSTANT(Name) \ case Value::Name##Val: \ Replacement = cast(this)->handleOperandChangeImpl(From, To); \ break; #include "llvm/IR/Value.def" } // If handleOperandChangeImpl returned nullptr, then it handled // replacing itself and we don't want to delete or replace anything else here. if (!Replacement) return; // I do need to replace this with an existing value. assert(Replacement != this && "I didn't contain From!"); // Everyone using this now uses the replacement. replaceAllUsesWith(Replacement); // Delete the old constant! destroyConstant(); } Value *ConstantArray::handleOperandChangeImpl(Value *From, Value *To) { assert(isa(To) && "Cannot make Constant refer to non-constant!"); Constant *ToC = cast(To); SmallVector Values; Values.reserve(getNumOperands()); // Build replacement array. // Fill values with the modified operands of the constant array. Also, // compute whether this turns into an all-zeros array. unsigned NumUpdated = 0; // Keep track of whether all the values in the array are "ToC". bool AllSame = true; Use *OperandList = getOperandList(); unsigned OperandNo = 0; for (Use *O = OperandList, *E = OperandList+getNumOperands(); O != E; ++O) { Constant *Val = cast(O->get()); if (Val == From) { OperandNo = (O - OperandList); Val = ToC; ++NumUpdated; } Values.push_back(Val); AllSame &= Val == ToC; } if (AllSame && ToC->isNullValue()) return ConstantAggregateZero::get(getType()); if (AllSame && isa(ToC)) return UndefValue::get(getType()); // Check for any other type of constant-folding. if (Constant *C = getImpl(getType(), Values)) return C; // Update to the new value. return getContext().pImpl->ArrayConstants.replaceOperandsInPlace( Values, this, From, ToC, NumUpdated, OperandNo); } Value *ConstantStruct::handleOperandChangeImpl(Value *From, Value *To) { assert(isa(To) && "Cannot make Constant refer to non-constant!"); Constant *ToC = cast(To); Use *OperandList = getOperandList(); SmallVector Values; Values.reserve(getNumOperands()); // Build replacement struct. // Fill values with the modified operands of the constant struct. Also, // compute whether this turns into an all-zeros struct. unsigned NumUpdated = 0; bool AllSame = true; unsigned OperandNo = 0; for (Use *O = OperandList, *E = OperandList + getNumOperands(); O != E; ++O) { Constant *Val = cast(O->get()); if (Val == From) { OperandNo = (O - OperandList); Val = ToC; ++NumUpdated; } Values.push_back(Val); AllSame &= Val == ToC; } if (AllSame && ToC->isNullValue()) return ConstantAggregateZero::get(getType()); if (AllSame && isa(ToC)) return UndefValue::get(getType()); // Update to the new value. return getContext().pImpl->StructConstants.replaceOperandsInPlace( Values, this, From, ToC, NumUpdated, OperandNo); } Value *ConstantVector::handleOperandChangeImpl(Value *From, Value *To) { assert(isa(To) && "Cannot make Constant refer to non-constant!"); Constant *ToC = cast(To); SmallVector Values; Values.reserve(getNumOperands()); // Build replacement array... unsigned NumUpdated = 0; unsigned OperandNo = 0; for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { Constant *Val = getOperand(i); if (Val == From) { OperandNo = i; ++NumUpdated; Val = ToC; } Values.push_back(Val); } if (Constant *C = getImpl(Values)) return C; // Update to the new value. return getContext().pImpl->VectorConstants.replaceOperandsInPlace( Values, this, From, ToC, NumUpdated, OperandNo); } Value *ConstantExpr::handleOperandChangeImpl(Value *From, Value *ToV) { assert(isa(ToV) && "Cannot make Constant refer to non-constant!"); Constant *To = cast(ToV); SmallVector NewOps; unsigned NumUpdated = 0; unsigned OperandNo = 0; for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { Constant *Op = getOperand(i); if (Op == From) { OperandNo = i; ++NumUpdated; Op = To; } NewOps.push_back(Op); } assert(NumUpdated && "I didn't contain From!"); if (Constant *C = getWithOperands(NewOps, getType(), true)) return C; // Update to the new value. return getContext().pImpl->ExprConstants.replaceOperandsInPlace( NewOps, this, From, To, NumUpdated, OperandNo); } Instruction *ConstantExpr::getAsInstruction() const { SmallVector ValueOperands(operands()); ArrayRef Ops(ValueOperands); switch (getOpcode()) { case Instruction::Trunc: case Instruction::ZExt: case Instruction::SExt: case Instruction::FPTrunc: case Instruction::FPExt: case Instruction::UIToFP: case Instruction::SIToFP: case Instruction::FPToUI: case Instruction::FPToSI: case Instruction::PtrToInt: case Instruction::IntToPtr: case Instruction::BitCast: case Instruction::AddrSpaceCast: return CastInst::Create((Instruction::CastOps)getOpcode(), Ops[0], getType()); case Instruction::Select: return SelectInst::Create(Ops[0], Ops[1], Ops[2]); case Instruction::InsertElement: return InsertElementInst::Create(Ops[0], Ops[1], Ops[2]); case Instruction::ExtractElement: return ExtractElementInst::Create(Ops[0], Ops[1]); case Instruction::InsertValue: return InsertValueInst::Create(Ops[0], Ops[1], getIndices()); case Instruction::ExtractValue: return ExtractValueInst::Create(Ops[0], getIndices()); case Instruction::ShuffleVector: return new ShuffleVectorInst(Ops[0], Ops[1], getShuffleMask()); case Instruction::GetElementPtr: { const auto *GO = cast(this); if (GO->isInBounds()) return GetElementPtrInst::CreateInBounds(GO->getSourceElementType(), Ops[0], Ops.slice(1)); return GetElementPtrInst::Create(GO->getSourceElementType(), Ops[0], Ops.slice(1)); } case Instruction::ICmp: case Instruction::FCmp: return CmpInst::Create((Instruction::OtherOps)getOpcode(), (CmpInst::Predicate)getPredicate(), Ops[0], Ops[1]); case Instruction::FNeg: return UnaryOperator::Create((Instruction::UnaryOps)getOpcode(), Ops[0]); default: assert(getNumOperands() == 2 && "Must be binary operator?"); BinaryOperator *BO = BinaryOperator::Create((Instruction::BinaryOps)getOpcode(), Ops[0], Ops[1]); if (isa(BO)) { BO->setHasNoUnsignedWrap(SubclassOptionalData & OverflowingBinaryOperator::NoUnsignedWrap); BO->setHasNoSignedWrap(SubclassOptionalData & OverflowingBinaryOperator::NoSignedWrap); } if (isa(BO)) BO->setIsExact(SubclassOptionalData & PossiblyExactOperator::IsExact); return BO; } } diff --git a/llvm/lib/MC/MCContext.cpp b/llvm/lib/MC/MCContext.cpp index aa4051aa2400..cc349af6393b 100644 --- a/llvm/lib/MC/MCContext.cpp +++ b/llvm/lib/MC/MCContext.cpp @@ -1,990 +1,990 @@ //===- lib/MC/MCContext.cpp - Machine Code Context ------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "llvm/MC/MCContext.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/Twine.h" #include "llvm/BinaryFormat/COFF.h" #include "llvm/BinaryFormat/ELF.h" #include "llvm/BinaryFormat/XCOFF.h" #include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCCodeView.h" #include "llvm/MC/MCDwarf.h" #include "llvm/MC/MCExpr.h" #include "llvm/MC/MCFragment.h" #include "llvm/MC/MCLabel.h" #include "llvm/MC/MCObjectFileInfo.h" #include "llvm/MC/MCSectionCOFF.h" #include "llvm/MC/MCSectionELF.h" #include "llvm/MC/MCSectionGOFF.h" #include "llvm/MC/MCSectionMachO.h" #include "llvm/MC/MCSectionWasm.h" #include "llvm/MC/MCSectionXCOFF.h" #include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCSymbol.h" #include "llvm/MC/MCSymbolCOFF.h" #include "llvm/MC/MCSymbolELF.h" #include "llvm/MC/MCSymbolGOFF.h" #include "llvm/MC/MCSymbolMachO.h" #include "llvm/MC/MCSymbolWasm.h" #include "llvm/MC/MCSymbolXCOFF.h" #include "llvm/MC/SectionKind.h" #include "llvm/Support/Casting.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MemoryBuffer.h" #include "llvm/Support/Path.h" #include "llvm/Support/Signals.h" #include "llvm/Support/SourceMgr.h" #include "llvm/Support/raw_ostream.h" #include #include #include #include using namespace llvm; static cl::opt AsSecureLogFileName("as-secure-log-file-name", cl::desc("As secure log file name (initialized from " "AS_SECURE_LOG_FILE env variable)"), cl::init(getenv("AS_SECURE_LOG_FILE")), cl::Hidden); static void defaultDiagHandler(const SMDiagnostic &SMD, bool, const SourceMgr &, std::vector &) { SMD.print(nullptr, errs()); } MCContext::MCContext(const Triple &TheTriple, const MCAsmInfo *mai, const MCRegisterInfo *mri, const MCSubtargetInfo *msti, const SourceMgr *mgr, MCTargetOptions const *TargetOpts, bool DoAutoReset) : TT(TheTriple), SrcMgr(mgr), InlineSrcMgr(nullptr), DiagHandler(defaultDiagHandler), MAI(mai), MRI(mri), MSTI(msti), Symbols(Allocator), UsedNames(Allocator), InlineAsmUsedLabelNames(Allocator), CurrentDwarfLoc(0, 0, 0, DWARF2_FLAG_IS_STMT, 0, 0), AutoReset(DoAutoReset), TargetOptions(TargetOpts) { SecureLogFile = AsSecureLogFileName; if (SrcMgr && SrcMgr->getNumBuffers()) MainFileName = std::string(SrcMgr->getMemoryBuffer(SrcMgr->getMainFileID()) ->getBufferIdentifier()); switch (TheTriple.getObjectFormat()) { case Triple::MachO: Env = IsMachO; break; case Triple::COFF: if (!TheTriple.isOSWindows()) report_fatal_error( "Cannot initialize MC for non-Windows COFF object files."); Env = IsCOFF; break; case Triple::ELF: Env = IsELF; break; case Triple::Wasm: Env = IsWasm; break; case Triple::XCOFF: Env = IsXCOFF; break; case Triple::GOFF: Env = IsGOFF; break; case Triple::UnknownObjectFormat: report_fatal_error("Cannot initialize MC for unknown object file format."); break; } } MCContext::~MCContext() { if (AutoReset) reset(); // NOTE: The symbols are all allocated out of a bump pointer allocator, // we don't need to free them here. } void MCContext::initInlineSourceManager() { if (!InlineSrcMgr) InlineSrcMgr.reset(new SourceMgr()); } //===----------------------------------------------------------------------===// // Module Lifetime Management //===----------------------------------------------------------------------===// void MCContext::reset() { SrcMgr = nullptr; InlineSrcMgr.reset(); LocInfos.clear(); DiagHandler = defaultDiagHandler; // Call the destructors so the fragments are freed COFFAllocator.DestroyAll(); ELFAllocator.DestroyAll(); GOFFAllocator.DestroyAll(); MachOAllocator.DestroyAll(); XCOFFAllocator.DestroyAll(); MCInstAllocator.DestroyAll(); MCSubtargetAllocator.DestroyAll(); InlineAsmUsedLabelNames.clear(); UsedNames.clear(); Symbols.clear(); Allocator.Reset(); Instances.clear(); CompilationDir.clear(); MainFileName.clear(); MCDwarfLineTablesCUMap.clear(); SectionsForRanges.clear(); MCGenDwarfLabelEntries.clear(); DwarfDebugFlags = StringRef(); DwarfCompileUnitID = 0; CurrentDwarfLoc = MCDwarfLoc(0, 0, 0, DWARF2_FLAG_IS_STMT, 0, 0); CVContext.reset(); MachOUniquingMap.clear(); ELFUniquingMap.clear(); GOFFUniquingMap.clear(); COFFUniquingMap.clear(); WasmUniquingMap.clear(); XCOFFUniquingMap.clear(); ELFEntrySizeMap.clear(); ELFSeenGenericMergeableSections.clear(); NextID.clear(); AllowTemporaryLabels = true; DwarfLocSeen = false; GenDwarfForAssembly = false; GenDwarfFileNumber = 0; HadError = false; } //===----------------------------------------------------------------------===// // MCInst Management //===----------------------------------------------------------------------===// MCInst *MCContext::createMCInst() { return new (MCInstAllocator.Allocate()) MCInst; } //===----------------------------------------------------------------------===// // Symbol Manipulation //===----------------------------------------------------------------------===// MCSymbol *MCContext::getOrCreateSymbol(const Twine &Name) { SmallString<128> NameSV; StringRef NameRef = Name.toStringRef(NameSV); assert(!NameRef.empty() && "Normal symbols cannot be unnamed!"); MCSymbol *&Sym = Symbols[NameRef]; if (!Sym) Sym = createSymbol(NameRef, false, false); return Sym; } MCSymbol *MCContext::getOrCreateFrameAllocSymbol(StringRef FuncName, unsigned Idx) { return getOrCreateSymbol(Twine(MAI->getPrivateGlobalPrefix()) + FuncName + "$frame_escape_" + Twine(Idx)); } MCSymbol *MCContext::getOrCreateParentFrameOffsetSymbol(StringRef FuncName) { return getOrCreateSymbol(Twine(MAI->getPrivateGlobalPrefix()) + FuncName + "$parent_frame_offset"); } MCSymbol *MCContext::getOrCreateLSDASymbol(StringRef FuncName) { return getOrCreateSymbol(Twine(MAI->getPrivateGlobalPrefix()) + "__ehtable$" + FuncName); } MCSymbol *MCContext::createSymbolImpl(const StringMapEntry *Name, bool IsTemporary) { static_assert(std::is_trivially_destructible(), "MCSymbol classes must be trivially destructible"); static_assert(std::is_trivially_destructible(), "MCSymbol classes must be trivially destructible"); static_assert(std::is_trivially_destructible(), "MCSymbol classes must be trivially destructible"); static_assert(std::is_trivially_destructible(), "MCSymbol classes must be trivially destructible"); static_assert(std::is_trivially_destructible(), "MCSymbol classes must be trivially destructible"); switch (getObjectFileType()) { case MCContext::IsCOFF: return new (Name, *this) MCSymbolCOFF(Name, IsTemporary); case MCContext::IsELF: return new (Name, *this) MCSymbolELF(Name, IsTemporary); case MCContext::IsGOFF: return new (Name, *this) MCSymbolGOFF(Name, IsTemporary); case MCContext::IsMachO: return new (Name, *this) MCSymbolMachO(Name, IsTemporary); case MCContext::IsWasm: return new (Name, *this) MCSymbolWasm(Name, IsTemporary); case MCContext::IsXCOFF: return createXCOFFSymbolImpl(Name, IsTemporary); } return new (Name, *this) MCSymbol(MCSymbol::SymbolKindUnset, Name, IsTemporary); } MCSymbol *MCContext::createSymbol(StringRef Name, bool AlwaysAddSuffix, bool CanBeUnnamed) { if (CanBeUnnamed && !UseNamesOnTempLabels) return createSymbolImpl(nullptr, true); // Determine whether this is a user written assembler temporary or normal // label, if used. bool IsTemporary = CanBeUnnamed; if (AllowTemporaryLabels && !IsTemporary) IsTemporary = Name.startswith(MAI->getPrivateGlobalPrefix()); SmallString<128> NewName = Name; bool AddSuffix = AlwaysAddSuffix; unsigned &NextUniqueID = NextID[Name]; while (true) { if (AddSuffix) { NewName.resize(Name.size()); raw_svector_ostream(NewName) << NextUniqueID++; } auto NameEntry = UsedNames.insert(std::make_pair(NewName.str(), true)); if (NameEntry.second || !NameEntry.first->second) { // Ok, we found a name. // Mark it as used for a non-section symbol. NameEntry.first->second = true; // Have the MCSymbol object itself refer to the copy of the string that is // embedded in the UsedNames entry. return createSymbolImpl(&*NameEntry.first, IsTemporary); } assert(IsTemporary && "Cannot rename non-temporary symbols"); AddSuffix = true; } llvm_unreachable("Infinite loop"); } MCSymbol *MCContext::createTempSymbol(const Twine &Name, bool AlwaysAddSuffix) { SmallString<128> NameSV; raw_svector_ostream(NameSV) << MAI->getPrivateGlobalPrefix() << Name; return createSymbol(NameSV, AlwaysAddSuffix, true); } MCSymbol *MCContext::createNamedTempSymbol(const Twine &Name) { SmallString<128> NameSV; raw_svector_ostream(NameSV) << MAI->getPrivateGlobalPrefix() << Name; return createSymbol(NameSV, true, false); } MCSymbol *MCContext::createLinkerPrivateTempSymbol() { SmallString<128> NameSV; raw_svector_ostream(NameSV) << MAI->getLinkerPrivateGlobalPrefix() << "tmp"; return createSymbol(NameSV, true, false); } MCSymbol *MCContext::createTempSymbol() { return createTempSymbol("tmp"); } MCSymbol *MCContext::createNamedTempSymbol() { return createNamedTempSymbol("tmp"); } unsigned MCContext::NextInstance(unsigned LocalLabelVal) { MCLabel *&Label = Instances[LocalLabelVal]; if (!Label) Label = new (*this) MCLabel(0); return Label->incInstance(); } unsigned MCContext::GetInstance(unsigned LocalLabelVal) { MCLabel *&Label = Instances[LocalLabelVal]; if (!Label) Label = new (*this) MCLabel(0); return Label->getInstance(); } MCSymbol *MCContext::getOrCreateDirectionalLocalSymbol(unsigned LocalLabelVal, unsigned Instance) { MCSymbol *&Sym = LocalSymbols[std::make_pair(LocalLabelVal, Instance)]; if (!Sym) Sym = createNamedTempSymbol(); return Sym; } MCSymbol *MCContext::createDirectionalLocalSymbol(unsigned LocalLabelVal) { unsigned Instance = NextInstance(LocalLabelVal); return getOrCreateDirectionalLocalSymbol(LocalLabelVal, Instance); } MCSymbol *MCContext::getDirectionalLocalSymbol(unsigned LocalLabelVal, bool Before) { unsigned Instance = GetInstance(LocalLabelVal); if (!Before) ++Instance; return getOrCreateDirectionalLocalSymbol(LocalLabelVal, Instance); } MCSymbol *MCContext::lookupSymbol(const Twine &Name) const { SmallString<128> NameSV; StringRef NameRef = Name.toStringRef(NameSV); return Symbols.lookup(NameRef); } void MCContext::setSymbolValue(MCStreamer &Streamer, StringRef Sym, uint64_t Val) { auto Symbol = getOrCreateSymbol(Sym); Streamer.emitAssignment(Symbol, MCConstantExpr::create(Val, *this)); } void MCContext::registerInlineAsmLabel(MCSymbol *Sym) { InlineAsmUsedLabelNames[Sym->getName()] = Sym; } MCSymbolXCOFF * MCContext::createXCOFFSymbolImpl(const StringMapEntry *Name, bool IsTemporary) { if (!Name) return new (nullptr, *this) MCSymbolXCOFF(nullptr, IsTemporary); StringRef OriginalName = Name->first(); if (OriginalName.startswith("._Renamed..") || OriginalName.startswith("_Renamed..")) reportError(SMLoc(), "invalid symbol name from source"); if (MAI->isValidUnquotedName(OriginalName)) return new (Name, *this) MCSymbolXCOFF(Name, IsTemporary); // Now we have a name that contains invalid character(s) for XCOFF symbol. // Let's replace with something valid, but save the original name so that // we could still use the original name in the symbol table. SmallString<128> InvalidName(OriginalName); // If it's an entry point symbol, we will keep the '.' // in front for the convention purpose. Otherwise, add "_Renamed.." // as prefix to signal this is an renamed symbol. const bool IsEntryPoint = !InvalidName.empty() && InvalidName[0] == '.'; SmallString<128> ValidName = StringRef(IsEntryPoint ? "._Renamed.." : "_Renamed.."); // Append the hex values of '_' and invalid characters with "_Renamed.."; // at the same time replace invalid characters with '_'. for (size_t I = 0; I < InvalidName.size(); ++I) { if (!MAI->isAcceptableChar(InvalidName[I]) || InvalidName[I] == '_') { raw_svector_ostream(ValidName).write_hex(InvalidName[I]); InvalidName[I] = '_'; } } // Skip entry point symbol's '.' as we already have a '.' in front of // "_Renamed". if (IsEntryPoint) ValidName.append(InvalidName.substr(1, InvalidName.size() - 1)); else ValidName.append(InvalidName); auto NameEntry = UsedNames.insert(std::make_pair(ValidName.str(), true)); assert((NameEntry.second || !NameEntry.first->second) && "This name is used somewhere else."); // Mark the name as used for a non-section symbol. NameEntry.first->second = true; // Have the MCSymbol object itself refer to the copy of the string // that is embedded in the UsedNames entry. MCSymbolXCOFF *XSym = new (&*NameEntry.first, *this) MCSymbolXCOFF(&*NameEntry.first, IsTemporary); XSym->setSymbolTableName(MCSymbolXCOFF::getUnqualifiedName(OriginalName)); return XSym; } //===----------------------------------------------------------------------===// // Section Management //===----------------------------------------------------------------------===// MCSectionMachO *MCContext::getMachOSection(StringRef Segment, StringRef Section, unsigned TypeAndAttributes, unsigned Reserved2, SectionKind Kind, const char *BeginSymName) { // We unique sections by their segment/section pair. The returned section // may not have the same flags as the requested section, if so this should be // diagnosed by the client as an error. // Form the name to look up. assert(Section.size() <= 16 && "section name is too long"); assert(!memchr(Section.data(), '\0', Section.size()) && "section name cannot contain NUL"); // Do the lookup, if we have a hit, return it. auto R = MachOUniquingMap.try_emplace((Segment + Twine(',') + Section).str()); if (!R.second) return R.first->second; MCSymbol *Begin = nullptr; if (BeginSymName) Begin = createTempSymbol(BeginSymName, false); // Otherwise, return a new section. StringRef Name = R.first->first(); R.first->second = new (MachOAllocator.Allocate()) MCSectionMachO(Segment, Name.substr(Name.size() - Section.size()), TypeAndAttributes, Reserved2, Kind, Begin); return R.first->second; } void MCContext::renameELFSection(MCSectionELF *Section, StringRef Name) { StringRef GroupName; if (const MCSymbol *Group = Section->getGroup()) GroupName = Group->getName(); // This function is only used by .debug*, which should not have the // SHF_LINK_ORDER flag. unsigned UniqueID = Section->getUniqueID(); ELFUniquingMap.erase( ELFSectionKey{Section->getName(), GroupName, "", UniqueID}); auto I = ELFUniquingMap .insert(std::make_pair( ELFSectionKey{Name, GroupName, "", UniqueID}, Section)) .first; StringRef CachedName = I->first.SectionName; const_cast(Section)->setSectionName(CachedName); } MCSectionELF *MCContext::createELFSectionImpl(StringRef Section, unsigned Type, unsigned Flags, SectionKind K, unsigned EntrySize, const MCSymbolELF *Group, bool Comdat, unsigned UniqueID, const MCSymbolELF *LinkedToSym) { MCSymbolELF *R; MCSymbol *&Sym = Symbols[Section]; // A section symbol can not redefine regular symbols. There may be multiple // sections with the same name, in which case the first such section wins. if (Sym && Sym->isDefined() && (!Sym->isInSection() || Sym->getSection().getBeginSymbol() != Sym)) reportError(SMLoc(), "invalid symbol redefinition"); if (Sym && Sym->isUndefined()) { R = cast(Sym); } else { auto NameIter = UsedNames.insert(std::make_pair(Section, false)).first; R = new (&*NameIter, *this) MCSymbolELF(&*NameIter, /*isTemporary*/ false); if (!Sym) Sym = R; } R->setBinding(ELF::STB_LOCAL); R->setType(ELF::STT_SECTION); auto *Ret = new (ELFAllocator.Allocate()) MCSectionELF(Section, Type, Flags, K, EntrySize, Group, Comdat, UniqueID, R, LinkedToSym); auto *F = new MCDataFragment(); Ret->getFragmentList().insert(Ret->begin(), F); F->setParent(Ret); R->setFragment(F); return Ret; } MCSectionELF *MCContext::createELFRelSection(const Twine &Name, unsigned Type, unsigned Flags, unsigned EntrySize, const MCSymbolELF *Group, const MCSectionELF *RelInfoSection) { StringMap::iterator I; bool Inserted; std::tie(I, Inserted) = RelSecNames.insert(std::make_pair(Name.str(), true)); return createELFSectionImpl( I->getKey(), Type, Flags, SectionKind::getReadOnly(), EntrySize, Group, true, true, cast(RelInfoSection->getBeginSymbol())); } MCSectionELF *MCContext::getELFNamedSection(const Twine &Prefix, const Twine &Suffix, unsigned Type, unsigned Flags, unsigned EntrySize) { return getELFSection(Prefix + "." + Suffix, Type, Flags, EntrySize, Suffix, /*IsComdat=*/true); } MCSectionELF *MCContext::getELFSection(const Twine &Section, unsigned Type, unsigned Flags, unsigned EntrySize, const Twine &Group, bool IsComdat, unsigned UniqueID, const MCSymbolELF *LinkedToSym) { MCSymbolELF *GroupSym = nullptr; if (!Group.isTriviallyEmpty() && !Group.str().empty()) GroupSym = cast(getOrCreateSymbol(Group)); return getELFSection(Section, Type, Flags, EntrySize, GroupSym, IsComdat, UniqueID, LinkedToSym); } MCSectionELF *MCContext::getELFSection(const Twine &Section, unsigned Type, unsigned Flags, unsigned EntrySize, const MCSymbolELF *GroupSym, bool IsComdat, unsigned UniqueID, const MCSymbolELF *LinkedToSym) { StringRef Group = ""; if (GroupSym) Group = GroupSym->getName(); assert(!(LinkedToSym && LinkedToSym->getName().empty())); // Do the lookup, if we have a hit, return it. auto IterBool = ELFUniquingMap.insert(std::make_pair( ELFSectionKey{Section.str(), Group, LinkedToSym ? LinkedToSym->getName() : "", UniqueID}, nullptr)); auto &Entry = *IterBool.first; if (!IterBool.second) return Entry.second; StringRef CachedName = Entry.first.SectionName; SectionKind Kind; if (Flags & ELF::SHF_ARM_PURECODE) Kind = SectionKind::getExecuteOnly(); else if (Flags & ELF::SHF_EXECINSTR) Kind = SectionKind::getText(); else Kind = SectionKind::getReadOnly(); MCSectionELF *Result = createELFSectionImpl(CachedName, Type, Flags, Kind, EntrySize, GroupSym, IsComdat, UniqueID, LinkedToSym); Entry.second = Result; recordELFMergeableSectionInfo(Result->getName(), Result->getFlags(), Result->getUniqueID(), Result->getEntrySize()); return Result; } MCSectionELF *MCContext::createELFGroupSection(const MCSymbolELF *Group, bool IsComdat) { return createELFSectionImpl(".group", ELF::SHT_GROUP, 0, SectionKind::getReadOnly(), 4, Group, IsComdat, MCSection::NonUniqueID, nullptr); } void MCContext::recordELFMergeableSectionInfo(StringRef SectionName, unsigned Flags, unsigned UniqueID, unsigned EntrySize) { bool IsMergeable = Flags & ELF::SHF_MERGE; - if (UniqueID == GenericSectionID) + if (IsMergeable && (UniqueID == GenericSectionID)) ELFSeenGenericMergeableSections.insert(SectionName); // For mergeable sections or non-mergeable sections with a generic mergeable // section name we enter their Unique ID into the ELFEntrySizeMap so that // compatible globals can be assigned to the same section. if (IsMergeable || isELFGenericMergeableSection(SectionName)) { ELFEntrySizeMap.insert(std::make_pair( ELFEntrySizeKey{SectionName, Flags, EntrySize}, UniqueID)); } } bool MCContext::isELFImplicitMergeableSectionNamePrefix(StringRef SectionName) { return SectionName.startswith(".rodata.str") || SectionName.startswith(".rodata.cst"); } bool MCContext::isELFGenericMergeableSection(StringRef SectionName) { return isELFImplicitMergeableSectionNamePrefix(SectionName) || ELFSeenGenericMergeableSections.count(SectionName); } Optional MCContext::getELFUniqueIDForEntsize(StringRef SectionName, unsigned Flags, unsigned EntrySize) { auto I = ELFEntrySizeMap.find( MCContext::ELFEntrySizeKey{SectionName, Flags, EntrySize}); return (I != ELFEntrySizeMap.end()) ? Optional(I->second) : None; } MCSectionGOFF *MCContext::getGOFFSection(StringRef Section, SectionKind Kind) { // Do the lookup. If we don't have a hit, return a new section. auto &GOFFSection = GOFFUniquingMap[Section.str()]; if (!GOFFSection) GOFFSection = new (GOFFAllocator.Allocate()) MCSectionGOFF(Section, Kind); return GOFFSection; } MCSectionCOFF *MCContext::getCOFFSection(StringRef Section, unsigned Characteristics, SectionKind Kind, StringRef COMDATSymName, int Selection, unsigned UniqueID, const char *BeginSymName) { MCSymbol *COMDATSymbol = nullptr; if (!COMDATSymName.empty()) { COMDATSymbol = getOrCreateSymbol(COMDATSymName); COMDATSymName = COMDATSymbol->getName(); } // Do the lookup, if we have a hit, return it. COFFSectionKey T{Section, COMDATSymName, Selection, UniqueID}; auto IterBool = COFFUniquingMap.insert(std::make_pair(T, nullptr)); auto Iter = IterBool.first; if (!IterBool.second) return Iter->second; MCSymbol *Begin = nullptr; if (BeginSymName) Begin = createTempSymbol(BeginSymName, false); StringRef CachedName = Iter->first.SectionName; MCSectionCOFF *Result = new (COFFAllocator.Allocate()) MCSectionCOFF( CachedName, Characteristics, COMDATSymbol, Selection, Kind, Begin); Iter->second = Result; return Result; } MCSectionCOFF *MCContext::getCOFFSection(StringRef Section, unsigned Characteristics, SectionKind Kind, const char *BeginSymName) { return getCOFFSection(Section, Characteristics, Kind, "", 0, GenericSectionID, BeginSymName); } MCSectionCOFF *MCContext::getAssociativeCOFFSection(MCSectionCOFF *Sec, const MCSymbol *KeySym, unsigned UniqueID) { // Return the normal section if we don't have to be associative or unique. if (!KeySym && UniqueID == GenericSectionID) return Sec; // If we have a key symbol, make an associative section with the same name and // kind as the normal section. unsigned Characteristics = Sec->getCharacteristics(); if (KeySym) { Characteristics |= COFF::IMAGE_SCN_LNK_COMDAT; return getCOFFSection(Sec->getName(), Characteristics, Sec->getKind(), KeySym->getName(), COFF::IMAGE_COMDAT_SELECT_ASSOCIATIVE, UniqueID); } return getCOFFSection(Sec->getName(), Characteristics, Sec->getKind(), "", 0, UniqueID); } MCSectionWasm *MCContext::getWasmSection(const Twine &Section, SectionKind K, unsigned Flags, const Twine &Group, unsigned UniqueID, const char *BeginSymName) { MCSymbolWasm *GroupSym = nullptr; if (!Group.isTriviallyEmpty() && !Group.str().empty()) { GroupSym = cast(getOrCreateSymbol(Group)); GroupSym->setComdat(true); } return getWasmSection(Section, K, Flags, GroupSym, UniqueID, BeginSymName); } MCSectionWasm *MCContext::getWasmSection(const Twine &Section, SectionKind Kind, unsigned Flags, const MCSymbolWasm *GroupSym, unsigned UniqueID, const char *BeginSymName) { StringRef Group = ""; if (GroupSym) Group = GroupSym->getName(); // Do the lookup, if we have a hit, return it. auto IterBool = WasmUniquingMap.insert( std::make_pair(WasmSectionKey{Section.str(), Group, UniqueID}, nullptr)); auto &Entry = *IterBool.first; if (!IterBool.second) return Entry.second; StringRef CachedName = Entry.first.SectionName; MCSymbol *Begin = createSymbol(CachedName, true, false); Symbols[Begin->getName()] = Begin; cast(Begin)->setType(wasm::WASM_SYMBOL_TYPE_SECTION); MCSectionWasm *Result = new (WasmAllocator.Allocate()) MCSectionWasm(CachedName, Kind, Flags, GroupSym, UniqueID, Begin); Entry.second = Result; auto *F = new MCDataFragment(); Result->getFragmentList().insert(Result->begin(), F); F->setParent(Result); Begin->setFragment(F); return Result; } MCSectionXCOFF *MCContext::getXCOFFSection( StringRef Section, SectionKind Kind, Optional CsectProp, bool MultiSymbolsAllowed, const char *BeginSymName, Optional DwarfSectionSubtypeFlags) { bool IsDwarfSec = DwarfSectionSubtypeFlags.hasValue(); assert((IsDwarfSec != CsectProp.hasValue()) && "Invalid XCOFF section!"); // Do the lookup. If we have a hit, return it. auto IterBool = XCOFFUniquingMap.insert(std::make_pair( IsDwarfSec ? XCOFFSectionKey(Section.str(), DwarfSectionSubtypeFlags.getValue()) : XCOFFSectionKey(Section.str(), CsectProp->MappingClass), nullptr)); auto &Entry = *IterBool.first; if (!IterBool.second) { MCSectionXCOFF *ExistedEntry = Entry.second; if (ExistedEntry->isMultiSymbolsAllowed() != MultiSymbolsAllowed) report_fatal_error("section's multiply symbols policy does not match"); return ExistedEntry; } // Otherwise, return a new section. StringRef CachedName = Entry.first.SectionName; MCSymbolXCOFF *QualName = nullptr; // Debug section don't have storage class attribute. if (IsDwarfSec) QualName = cast(getOrCreateSymbol(CachedName)); else QualName = cast(getOrCreateSymbol( CachedName + "[" + XCOFF::getMappingClassString(CsectProp->MappingClass) + "]")); MCSymbol *Begin = nullptr; if (BeginSymName) Begin = createTempSymbol(BeginSymName, false); // QualName->getUnqualifiedName() and CachedName are the same except when // CachedName contains invalid character(s) such as '$' for an XCOFF symbol. MCSectionXCOFF *Result = nullptr; if (IsDwarfSec) Result = new (XCOFFAllocator.Allocate()) MCSectionXCOFF(QualName->getUnqualifiedName(), Kind, QualName, DwarfSectionSubtypeFlags.getValue(), Begin, CachedName, MultiSymbolsAllowed); else Result = new (XCOFFAllocator.Allocate()) MCSectionXCOFF(QualName->getUnqualifiedName(), CsectProp->MappingClass, CsectProp->Type, Kind, QualName, Begin, CachedName, MultiSymbolsAllowed); Entry.second = Result; auto *F = new MCDataFragment(); Result->getFragmentList().insert(Result->begin(), F); F->setParent(Result); if (Begin) Begin->setFragment(F); return Result; } MCSubtargetInfo &MCContext::getSubtargetCopy(const MCSubtargetInfo &STI) { return *new (MCSubtargetAllocator.Allocate()) MCSubtargetInfo(STI); } void MCContext::addDebugPrefixMapEntry(const std::string &From, const std::string &To) { DebugPrefixMap.insert(std::make_pair(From, To)); } void MCContext::RemapDebugPaths() { const auto &DebugPrefixMap = this->DebugPrefixMap; if (DebugPrefixMap.empty()) return; const auto RemapDebugPath = [&DebugPrefixMap](std::string &Path) { SmallString<256> P(Path); for (const auto &Entry : DebugPrefixMap) { if (llvm::sys::path::replace_path_prefix(P, Entry.first, Entry.second)) { Path = P.str().str(); break; } } }; // Remap compilation directory. std::string CompDir = std::string(CompilationDir.str()); RemapDebugPath(CompDir); CompilationDir = CompDir; // Remap MCDwarfDirs in all compilation units. for (auto &CUIDTablePair : MCDwarfLineTablesCUMap) for (auto &Dir : CUIDTablePair.second.getMCDwarfDirs()) RemapDebugPath(Dir); } //===----------------------------------------------------------------------===// // Dwarf Management //===----------------------------------------------------------------------===// void MCContext::setGenDwarfRootFile(StringRef InputFileName, StringRef Buffer) { // MCDwarf needs the root file as well as the compilation directory. // If we find a '.file 0' directive that will supersede these values. Optional Cksum; if (getDwarfVersion() >= 5) { MD5 Hash; MD5::MD5Result Sum; Hash.update(Buffer); Hash.final(Sum); Cksum = Sum; } // Canonicalize the root filename. It cannot be empty, and should not // repeat the compilation dir. // The MCContext ctor initializes MainFileName to the name associated with // the SrcMgr's main file ID, which might be the same as InputFileName (and // possibly include directory components). // Or, MainFileName might have been overridden by a -main-file-name option, // which is supposed to be just a base filename with no directory component. // So, if the InputFileName and MainFileName are not equal, assume // MainFileName is a substitute basename and replace the last component. SmallString<1024> FileNameBuf = InputFileName; if (FileNameBuf.empty() || FileNameBuf == "-") FileNameBuf = ""; if (!getMainFileName().empty() && FileNameBuf != getMainFileName()) { llvm::sys::path::remove_filename(FileNameBuf); llvm::sys::path::append(FileNameBuf, getMainFileName()); } StringRef FileName = FileNameBuf; if (FileName.consume_front(getCompilationDir())) if (llvm::sys::path::is_separator(FileName.front())) FileName = FileName.drop_front(); assert(!FileName.empty()); setMCLineTableRootFile( /*CUID=*/0, getCompilationDir(), FileName, Cksum, None); } /// getDwarfFile - takes a file name and number to place in the dwarf file and /// directory tables. If the file number has already been allocated it is an /// error and zero is returned and the client reports the error, else the /// allocated file number is returned. The file numbers may be in any order. Expected MCContext::getDwarfFile(StringRef Directory, StringRef FileName, unsigned FileNumber, Optional Checksum, Optional Source, unsigned CUID) { MCDwarfLineTable &Table = MCDwarfLineTablesCUMap[CUID]; return Table.tryGetFile(Directory, FileName, Checksum, Source, DwarfVersion, FileNumber); } /// isValidDwarfFileNumber - takes a dwarf file number and returns true if it /// currently is assigned and false otherwise. bool MCContext::isValidDwarfFileNumber(unsigned FileNumber, unsigned CUID) { const MCDwarfLineTable &LineTable = getMCDwarfLineTable(CUID); if (FileNumber == 0) return getDwarfVersion() >= 5; if (FileNumber >= LineTable.getMCDwarfFiles().size()) return false; return !LineTable.getMCDwarfFiles()[FileNumber].Name.empty(); } /// Remove empty sections from SectionsForRanges, to avoid generating /// useless debug info for them. void MCContext::finalizeDwarfSections(MCStreamer &MCOS) { SectionsForRanges.remove_if( [&](MCSection *Sec) { return !MCOS.mayHaveInstructions(*Sec); }); } CodeViewContext &MCContext::getCVContext() { if (!CVContext.get()) CVContext.reset(new CodeViewContext); return *CVContext.get(); } //===----------------------------------------------------------------------===// // Error Reporting //===----------------------------------------------------------------------===// void MCContext::diagnose(const SMDiagnostic &SMD) { assert(DiagHandler && "MCContext::DiagHandler is not set"); bool UseInlineSrcMgr = false; const SourceMgr *SMP = nullptr; if (SrcMgr) { SMP = SrcMgr; } else if (InlineSrcMgr) { SMP = InlineSrcMgr.get(); UseInlineSrcMgr = true; } else llvm_unreachable("Either SourceMgr should be available"); DiagHandler(SMD, UseInlineSrcMgr, *SMP, LocInfos); } void MCContext::reportCommon( SMLoc Loc, std::function GetMessage) { // * MCContext::SrcMgr is null when the MC layer emits machine code for input // other than assembly file, say, for .c/.cpp/.ll/.bc. // * MCContext::InlineSrcMgr is null when the inline asm is not used. // * A default SourceMgr is needed for diagnosing when both MCContext::SrcMgr // and MCContext::InlineSrcMgr are null. SourceMgr SM; const SourceMgr *SMP = &SM; bool UseInlineSrcMgr = false; // FIXME: Simplify these by combining InlineSrcMgr & SrcMgr. // For MC-only execution, only SrcMgr is used; // For non MC-only execution, InlineSrcMgr is only ctor'd if there is // inline asm in the IR. if (Loc.isValid()) { if (SrcMgr) { SMP = SrcMgr; } else if (InlineSrcMgr) { SMP = InlineSrcMgr.get(); UseInlineSrcMgr = true; } else llvm_unreachable("Either SourceMgr should be available"); } SMDiagnostic D; GetMessage(D, SMP); DiagHandler(D, UseInlineSrcMgr, *SMP, LocInfos); } void MCContext::reportError(SMLoc Loc, const Twine &Msg) { HadError = true; reportCommon(Loc, [&](SMDiagnostic &D, const SourceMgr *SMP) { D = SMP->GetMessage(Loc, SourceMgr::DK_Error, Msg); }); } void MCContext::reportWarning(SMLoc Loc, const Twine &Msg) { if (TargetOptions && TargetOptions->MCNoWarn) return; if (TargetOptions && TargetOptions->MCFatalWarnings) { reportError(Loc, Msg); } else { reportCommon(Loc, [&](SMDiagnostic &D, const SourceMgr *SMP) { D = SMP->GetMessage(Loc, SourceMgr::DK_Warning, Msg); }); } } void MCContext::reportFatalError(SMLoc Loc, const Twine &Msg) { reportError(Loc, Msg); // If we reached here, we are failing ungracefully. Run the interrupt handlers // to make sure any special cleanups get done, in particular that we remove // files registered with RemoveFileOnSignal. sys::RunInterruptHandlers(); exit(1); } diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 60c00f47859b..494554ae7b33 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1,18753 +1,18754 @@ //===-- AArch64ISelLowering.cpp - AArch64 DAG Lowering Implementation ----===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements the AArch64TargetLowering class. // //===----------------------------------------------------------------------===// #include "AArch64ISelLowering.h" #include "AArch64CallingConvention.h" #include "AArch64ExpandImm.h" #include "AArch64MachineFunctionInfo.h" #include "AArch64PerfectShuffle.h" #include "AArch64RegisterInfo.h" #include "AArch64Subtarget.h" #include "MCTargetDesc/AArch64AddressingModes.h" #include "Utils/AArch64BaseInfo.h" #include "llvm/ADT/APFloat.h" #include "llvm/ADT/APInt.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/Triple.h" #include "llvm/ADT/Twine.h" #include "llvm/Analysis/ObjCARCUtil.h" #include "llvm/Analysis/VectorUtils.h" #include "llvm/CodeGen/Analysis.h" #include "llvm/CodeGen/CallingConvLower.h" #include "llvm/CodeGen/MachineBasicBlock.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineInstr.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineMemOperand.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/RuntimeLibcalls.h" #include "llvm/CodeGen/SelectionDAG.h" #include "llvm/CodeGen/SelectionDAGNodes.h" #include "llvm/CodeGen/TargetCallingConv.h" #include "llvm/CodeGen/TargetInstrInfo.h" #include "llvm/CodeGen/ValueTypes.h" #include "llvm/IR/Attributes.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DebugLoc.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Function.h" #include "llvm/IR/GetElementPtrTypeIterator.h" #include "llvm/IR/GlobalValue.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/IntrinsicsAArch64.h" #include "llvm/IR/Module.h" #include "llvm/IR/OperandTraits.h" #include "llvm/IR/PatternMatch.h" #include "llvm/IR/Type.h" #include "llvm/IR/Use.h" #include "llvm/IR/Value.h" #include "llvm/MC/MCRegisterInfo.h" #include "llvm/Support/Casting.h" #include "llvm/Support/CodeGen.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/KnownBits.h" #include "llvm/Support/MachineValueType.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetOptions.h" #include #include #include #include #include #include #include #include #include #include #include using namespace llvm; using namespace llvm::PatternMatch; #define DEBUG_TYPE "aarch64-lower" STATISTIC(NumTailCalls, "Number of tail calls"); STATISTIC(NumShiftInserts, "Number of vector shift inserts"); STATISTIC(NumOptimizedImms, "Number of times immediates were optimized"); // FIXME: The necessary dtprel relocations don't seem to be supported // well in the GNU bfd and gold linkers at the moment. Therefore, by // default, for now, fall back to GeneralDynamic code generation. cl::opt EnableAArch64ELFLocalDynamicTLSGeneration( "aarch64-elf-ldtls-generation", cl::Hidden, cl::desc("Allow AArch64 Local Dynamic TLS code generation"), cl::init(false)); static cl::opt EnableOptimizeLogicalImm("aarch64-enable-logical-imm", cl::Hidden, cl::desc("Enable AArch64 logical imm instruction " "optimization"), cl::init(true)); // Temporary option added for the purpose of testing functionality added // to DAGCombiner.cpp in D92230. It is expected that this can be removed // in future when both implementations will be based off MGATHER rather // than the GLD1 nodes added for the SVE gather load intrinsics. static cl::opt EnableCombineMGatherIntrinsics("aarch64-enable-mgather-combine", cl::Hidden, cl::desc("Combine extends of AArch64 masked " "gather intrinsics"), cl::init(true)); /// Value type used for condition codes. static const MVT MVT_CC = MVT::i32; static inline EVT getPackedSVEVectorVT(EVT VT) { switch (VT.getSimpleVT().SimpleTy) { default: llvm_unreachable("unexpected element type for vector"); case MVT::i8: return MVT::nxv16i8; case MVT::i16: return MVT::nxv8i16; case MVT::i32: return MVT::nxv4i32; case MVT::i64: return MVT::nxv2i64; case MVT::f16: return MVT::nxv8f16; case MVT::f32: return MVT::nxv4f32; case MVT::f64: return MVT::nxv2f64; case MVT::bf16: return MVT::nxv8bf16; } } // NOTE: Currently there's only a need to return integer vector types. If this // changes then just add an extra "type" parameter. static inline EVT getPackedSVEVectorVT(ElementCount EC) { switch (EC.getKnownMinValue()) { default: llvm_unreachable("unexpected element count for vector"); case 16: return MVT::nxv16i8; case 8: return MVT::nxv8i16; case 4: return MVT::nxv4i32; case 2: return MVT::nxv2i64; } } static inline EVT getPromotedVTForPredicate(EVT VT) { assert(VT.isScalableVector() && (VT.getVectorElementType() == MVT::i1) && "Expected scalable predicate vector type!"); switch (VT.getVectorMinNumElements()) { default: llvm_unreachable("unexpected element count for vector"); case 2: return MVT::nxv2i64; case 4: return MVT::nxv4i32; case 8: return MVT::nxv8i16; case 16: return MVT::nxv16i8; } } /// Returns true if VT's elements occupy the lowest bit positions of its /// associated register class without any intervening space. /// /// For example, nxv2f16, nxv4f16 and nxv8f16 are legal types that belong to the /// same register class, but only nxv8f16 can be treated as a packed vector. static inline bool isPackedVectorType(EVT VT, SelectionDAG &DAG) { assert(VT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) && "Expected legal vector type!"); return VT.isFixedLengthVector() || VT.getSizeInBits().getKnownMinSize() == AArch64::SVEBitsPerBlock; } // Returns true for ####_MERGE_PASSTHRU opcodes, whose operands have a leading // predicate and end with a passthru value matching the result type. static bool isMergePassthruOpcode(unsigned Opc) { switch (Opc) { default: return false; case AArch64ISD::BITREVERSE_MERGE_PASSTHRU: case AArch64ISD::BSWAP_MERGE_PASSTHRU: case AArch64ISD::CTLZ_MERGE_PASSTHRU: case AArch64ISD::CTPOP_MERGE_PASSTHRU: case AArch64ISD::DUP_MERGE_PASSTHRU: case AArch64ISD::ABS_MERGE_PASSTHRU: case AArch64ISD::NEG_MERGE_PASSTHRU: case AArch64ISD::FNEG_MERGE_PASSTHRU: case AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU: case AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU: case AArch64ISD::FCEIL_MERGE_PASSTHRU: case AArch64ISD::FFLOOR_MERGE_PASSTHRU: case AArch64ISD::FNEARBYINT_MERGE_PASSTHRU: case AArch64ISD::FRINT_MERGE_PASSTHRU: case AArch64ISD::FROUND_MERGE_PASSTHRU: case AArch64ISD::FROUNDEVEN_MERGE_PASSTHRU: case AArch64ISD::FTRUNC_MERGE_PASSTHRU: case AArch64ISD::FP_ROUND_MERGE_PASSTHRU: case AArch64ISD::FP_EXTEND_MERGE_PASSTHRU: case AArch64ISD::SINT_TO_FP_MERGE_PASSTHRU: case AArch64ISD::UINT_TO_FP_MERGE_PASSTHRU: case AArch64ISD::FCVTZU_MERGE_PASSTHRU: case AArch64ISD::FCVTZS_MERGE_PASSTHRU: case AArch64ISD::FSQRT_MERGE_PASSTHRU: case AArch64ISD::FRECPX_MERGE_PASSTHRU: case AArch64ISD::FABS_MERGE_PASSTHRU: return true; } } AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM, const AArch64Subtarget &STI) : TargetLowering(TM), Subtarget(&STI) { // AArch64 doesn't have comparisons which set GPRs or setcc instructions, so // we have to make something up. Arbitrarily, choose ZeroOrOne. setBooleanContents(ZeroOrOneBooleanContent); // When comparing vectors the result sets the different elements in the // vector to all-one or all-zero. setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); // Set up the register classes. addRegisterClass(MVT::i32, &AArch64::GPR32allRegClass); addRegisterClass(MVT::i64, &AArch64::GPR64allRegClass); if (Subtarget->hasLS64()) { addRegisterClass(MVT::i64x8, &AArch64::GPR64x8ClassRegClass); setOperationAction(ISD::LOAD, MVT::i64x8, Custom); setOperationAction(ISD::STORE, MVT::i64x8, Custom); } if (Subtarget->hasFPARMv8()) { addRegisterClass(MVT::f16, &AArch64::FPR16RegClass); addRegisterClass(MVT::bf16, &AArch64::FPR16RegClass); addRegisterClass(MVT::f32, &AArch64::FPR32RegClass); addRegisterClass(MVT::f64, &AArch64::FPR64RegClass); addRegisterClass(MVT::f128, &AArch64::FPR128RegClass); } if (Subtarget->hasNEON()) { addRegisterClass(MVT::v16i8, &AArch64::FPR8RegClass); addRegisterClass(MVT::v8i16, &AArch64::FPR16RegClass); // Someone set us up the NEON. addDRTypeForNEON(MVT::v2f32); addDRTypeForNEON(MVT::v8i8); addDRTypeForNEON(MVT::v4i16); addDRTypeForNEON(MVT::v2i32); addDRTypeForNEON(MVT::v1i64); addDRTypeForNEON(MVT::v1f64); addDRTypeForNEON(MVT::v4f16); if (Subtarget->hasBF16()) addDRTypeForNEON(MVT::v4bf16); addQRTypeForNEON(MVT::v4f32); addQRTypeForNEON(MVT::v2f64); addQRTypeForNEON(MVT::v16i8); addQRTypeForNEON(MVT::v8i16); addQRTypeForNEON(MVT::v4i32); addQRTypeForNEON(MVT::v2i64); addQRTypeForNEON(MVT::v8f16); if (Subtarget->hasBF16()) addQRTypeForNEON(MVT::v8bf16); } if (Subtarget->hasSVE()) { // Add legal sve predicate types addRegisterClass(MVT::nxv2i1, &AArch64::PPRRegClass); addRegisterClass(MVT::nxv4i1, &AArch64::PPRRegClass); addRegisterClass(MVT::nxv8i1, &AArch64::PPRRegClass); addRegisterClass(MVT::nxv16i1, &AArch64::PPRRegClass); // Add legal sve data types addRegisterClass(MVT::nxv16i8, &AArch64::ZPRRegClass); addRegisterClass(MVT::nxv8i16, &AArch64::ZPRRegClass); addRegisterClass(MVT::nxv4i32, &AArch64::ZPRRegClass); addRegisterClass(MVT::nxv2i64, &AArch64::ZPRRegClass); addRegisterClass(MVT::nxv2f16, &AArch64::ZPRRegClass); addRegisterClass(MVT::nxv4f16, &AArch64::ZPRRegClass); addRegisterClass(MVT::nxv8f16, &AArch64::ZPRRegClass); addRegisterClass(MVT::nxv2f32, &AArch64::ZPRRegClass); addRegisterClass(MVT::nxv4f32, &AArch64::ZPRRegClass); addRegisterClass(MVT::nxv2f64, &AArch64::ZPRRegClass); if (Subtarget->hasBF16()) { addRegisterClass(MVT::nxv2bf16, &AArch64::ZPRRegClass); addRegisterClass(MVT::nxv4bf16, &AArch64::ZPRRegClass); addRegisterClass(MVT::nxv8bf16, &AArch64::ZPRRegClass); } if (Subtarget->useSVEForFixedLengthVectors()) { for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) if (useSVEForFixedLengthVectorVT(VT)) addRegisterClass(VT, &AArch64::ZPRRegClass); for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) if (useSVEForFixedLengthVectorVT(VT)) addRegisterClass(VT, &AArch64::ZPRRegClass); } for (auto VT : { MVT::nxv16i8, MVT::nxv8i16, MVT::nxv4i32, MVT::nxv2i64 }) { setOperationAction(ISD::SADDSAT, VT, Legal); setOperationAction(ISD::UADDSAT, VT, Legal); setOperationAction(ISD::SSUBSAT, VT, Legal); setOperationAction(ISD::USUBSAT, VT, Legal); setOperationAction(ISD::UREM, VT, Expand); setOperationAction(ISD::SREM, VT, Expand); setOperationAction(ISD::SDIVREM, VT, Expand); setOperationAction(ISD::UDIVREM, VT, Expand); } for (auto VT : { MVT::nxv2i8, MVT::nxv2i16, MVT::nxv2i32, MVT::nxv2i64, MVT::nxv4i8, MVT::nxv4i16, MVT::nxv4i32, MVT::nxv8i8, MVT::nxv8i16 }) setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Legal); for (auto VT : { MVT::nxv2f16, MVT::nxv4f16, MVT::nxv8f16, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv2f64 }) { setCondCodeAction(ISD::SETO, VT, Expand); setCondCodeAction(ISD::SETOLT, VT, Expand); setCondCodeAction(ISD::SETLT, VT, Expand); setCondCodeAction(ISD::SETOLE, VT, Expand); setCondCodeAction(ISD::SETLE, VT, Expand); setCondCodeAction(ISD::SETULT, VT, Expand); setCondCodeAction(ISD::SETULE, VT, Expand); setCondCodeAction(ISD::SETUGE, VT, Expand); setCondCodeAction(ISD::SETUGT, VT, Expand); setCondCodeAction(ISD::SETUEQ, VT, Expand); setCondCodeAction(ISD::SETUNE, VT, Expand); setOperationAction(ISD::FREM, VT, Expand); setOperationAction(ISD::FPOW, VT, Expand); setOperationAction(ISD::FPOWI, VT, Expand); setOperationAction(ISD::FCOS, VT, Expand); setOperationAction(ISD::FSIN, VT, Expand); setOperationAction(ISD::FSINCOS, VT, Expand); setOperationAction(ISD::FEXP, VT, Expand); setOperationAction(ISD::FEXP2, VT, Expand); setOperationAction(ISD::FLOG, VT, Expand); setOperationAction(ISD::FLOG2, VT, Expand); setOperationAction(ISD::FLOG10, VT, Expand); } } // Compute derived properties from the register classes computeRegisterProperties(Subtarget->getRegisterInfo()); // Provide all sorts of operation actions setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); setOperationAction(ISD::SETCC, MVT::i32, Custom); setOperationAction(ISD::SETCC, MVT::i64, Custom); setOperationAction(ISD::SETCC, MVT::f16, Custom); setOperationAction(ISD::SETCC, MVT::f32, Custom); setOperationAction(ISD::SETCC, MVT::f64, Custom); setOperationAction(ISD::STRICT_FSETCC, MVT::f16, Custom); setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Custom); setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Custom); setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Custom); setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Custom); setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Custom); setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); setOperationAction(ISD::BITREVERSE, MVT::i64, Legal); setOperationAction(ISD::BRCOND, MVT::Other, Expand); setOperationAction(ISD::BR_CC, MVT::i32, Custom); setOperationAction(ISD::BR_CC, MVT::i64, Custom); setOperationAction(ISD::BR_CC, MVT::f16, Custom); setOperationAction(ISD::BR_CC, MVT::f32, Custom); setOperationAction(ISD::BR_CC, MVT::f64, Custom); setOperationAction(ISD::SELECT, MVT::i32, Custom); setOperationAction(ISD::SELECT, MVT::i64, Custom); setOperationAction(ISD::SELECT, MVT::f16, Custom); setOperationAction(ISD::SELECT, MVT::f32, Custom); setOperationAction(ISD::SELECT, MVT::f64, Custom); setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); setOperationAction(ISD::SELECT_CC, MVT::i64, Custom); setOperationAction(ISD::SELECT_CC, MVT::f16, Custom); setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); setOperationAction(ISD::BR_JT, MVT::Other, Custom); setOperationAction(ISD::JumpTable, MVT::i64, Custom); setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); setOperationAction(ISD::FREM, MVT::f32, Expand); setOperationAction(ISD::FREM, MVT::f64, Expand); setOperationAction(ISD::FREM, MVT::f80, Expand); setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); // Custom lowering hooks are needed for XOR // to fold it into CSINC/CSINV. setOperationAction(ISD::XOR, MVT::i32, Custom); setOperationAction(ISD::XOR, MVT::i64, Custom); // Virtually no operation on f128 is legal, but LLVM can't expand them when // there's a valid register class, so we need custom operations in most cases. setOperationAction(ISD::FABS, MVT::f128, Expand); setOperationAction(ISD::FADD, MVT::f128, LibCall); setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand); setOperationAction(ISD::FCOS, MVT::f128, Expand); setOperationAction(ISD::FDIV, MVT::f128, LibCall); setOperationAction(ISD::FMA, MVT::f128, Expand); setOperationAction(ISD::FMUL, MVT::f128, LibCall); setOperationAction(ISD::FNEG, MVT::f128, Expand); setOperationAction(ISD::FPOW, MVT::f128, Expand); setOperationAction(ISD::FREM, MVT::f128, Expand); setOperationAction(ISD::FRINT, MVT::f128, Expand); setOperationAction(ISD::FSIN, MVT::f128, Expand); setOperationAction(ISD::FSINCOS, MVT::f128, Expand); setOperationAction(ISD::FSQRT, MVT::f128, Expand); setOperationAction(ISD::FSUB, MVT::f128, LibCall); setOperationAction(ISD::FTRUNC, MVT::f128, Expand); setOperationAction(ISD::SETCC, MVT::f128, Custom); setOperationAction(ISD::STRICT_FSETCC, MVT::f128, Custom); setOperationAction(ISD::STRICT_FSETCCS, MVT::f128, Custom); setOperationAction(ISD::BR_CC, MVT::f128, Custom); setOperationAction(ISD::SELECT, MVT::f128, Custom); setOperationAction(ISD::SELECT_CC, MVT::f128, Custom); setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom); // Lowering for many of the conversions is actually specified by the non-f128 // type. The LowerXXX function will be trivial when f128 isn't involved. setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); setOperationAction(ISD::FP_TO_SINT, MVT::i128, Custom); setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom); setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom); setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i128, Custom); setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); setOperationAction(ISD::FP_TO_UINT, MVT::i128, Custom); setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom); setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom); setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i128, Custom); setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); setOperationAction(ISD::SINT_TO_FP, MVT::i128, Custom); setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom); setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom); setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i128, Custom); setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); setOperationAction(ISD::UINT_TO_FP, MVT::i128, Custom); setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom); setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom); setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i128, Custom); setOperationAction(ISD::FP_ROUND, MVT::f16, Custom); setOperationAction(ISD::FP_ROUND, MVT::f32, Custom); setOperationAction(ISD::FP_ROUND, MVT::f64, Custom); setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Custom); setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Custom); setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Custom); setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i32, Custom); setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i64, Custom); setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i32, Custom); setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i64, Custom); // Variable arguments. setOperationAction(ISD::VASTART, MVT::Other, Custom); setOperationAction(ISD::VAARG, MVT::Other, Custom); setOperationAction(ISD::VACOPY, MVT::Other, Custom); setOperationAction(ISD::VAEND, MVT::Other, Expand); // Variable-sized objects. setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); if (Subtarget->isTargetWindows()) setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom); else setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand); // Constant pool entries setOperationAction(ISD::ConstantPool, MVT::i64, Custom); // BlockAddress setOperationAction(ISD::BlockAddress, MVT::i64, Custom); // Add/Sub overflow ops with MVT::Glues are lowered to NZCV dependences. setOperationAction(ISD::ADDC, MVT::i32, Custom); setOperationAction(ISD::ADDE, MVT::i32, Custom); setOperationAction(ISD::SUBC, MVT::i32, Custom); setOperationAction(ISD::SUBE, MVT::i32, Custom); setOperationAction(ISD::ADDC, MVT::i64, Custom); setOperationAction(ISD::ADDE, MVT::i64, Custom); setOperationAction(ISD::SUBC, MVT::i64, Custom); setOperationAction(ISD::SUBE, MVT::i64, Custom); // AArch64 lacks both left-rotate and popcount instructions. setOperationAction(ISD::ROTL, MVT::i32, Expand); setOperationAction(ISD::ROTL, MVT::i64, Expand); for (MVT VT : MVT::fixedlen_vector_valuetypes()) { setOperationAction(ISD::ROTL, VT, Expand); setOperationAction(ISD::ROTR, VT, Expand); } // AArch64 doesn't have i32 MULH{S|U}. setOperationAction(ISD::MULHU, MVT::i32, Expand); setOperationAction(ISD::MULHS, MVT::i32, Expand); // AArch64 doesn't have {U|S}MUL_LOHI. setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); setOperationAction(ISD::CTPOP, MVT::i32, Custom); setOperationAction(ISD::CTPOP, MVT::i64, Custom); setOperationAction(ISD::CTPOP, MVT::i128, Custom); setOperationAction(ISD::ABS, MVT::i32, Custom); setOperationAction(ISD::ABS, MVT::i64, Custom); setOperationAction(ISD::SDIVREM, MVT::i32, Expand); setOperationAction(ISD::SDIVREM, MVT::i64, Expand); for (MVT VT : MVT::fixedlen_vector_valuetypes()) { setOperationAction(ISD::SDIVREM, VT, Expand); setOperationAction(ISD::UDIVREM, VT, Expand); } setOperationAction(ISD::SREM, MVT::i32, Expand); setOperationAction(ISD::SREM, MVT::i64, Expand); setOperationAction(ISD::UDIVREM, MVT::i32, Expand); setOperationAction(ISD::UDIVREM, MVT::i64, Expand); setOperationAction(ISD::UREM, MVT::i32, Expand); setOperationAction(ISD::UREM, MVT::i64, Expand); // Custom lower Add/Sub/Mul with overflow. setOperationAction(ISD::SADDO, MVT::i32, Custom); setOperationAction(ISD::SADDO, MVT::i64, Custom); setOperationAction(ISD::UADDO, MVT::i32, Custom); setOperationAction(ISD::UADDO, MVT::i64, Custom); setOperationAction(ISD::SSUBO, MVT::i32, Custom); setOperationAction(ISD::SSUBO, MVT::i64, Custom); setOperationAction(ISD::USUBO, MVT::i32, Custom); setOperationAction(ISD::USUBO, MVT::i64, Custom); setOperationAction(ISD::SMULO, MVT::i32, Custom); setOperationAction(ISD::SMULO, MVT::i64, Custom); setOperationAction(ISD::UMULO, MVT::i32, Custom); setOperationAction(ISD::UMULO, MVT::i64, Custom); setOperationAction(ISD::FSIN, MVT::f32, Expand); setOperationAction(ISD::FSIN, MVT::f64, Expand); setOperationAction(ISD::FCOS, MVT::f32, Expand); setOperationAction(ISD::FCOS, MVT::f64, Expand); setOperationAction(ISD::FPOW, MVT::f32, Expand); setOperationAction(ISD::FPOW, MVT::f64, Expand); setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); if (Subtarget->hasFullFP16()) setOperationAction(ISD::FCOPYSIGN, MVT::f16, Custom); else setOperationAction(ISD::FCOPYSIGN, MVT::f16, Promote); setOperationAction(ISD::FREM, MVT::f16, Promote); setOperationAction(ISD::FREM, MVT::v4f16, Expand); setOperationAction(ISD::FREM, MVT::v8f16, Expand); setOperationAction(ISD::FPOW, MVT::f16, Promote); setOperationAction(ISD::FPOW, MVT::v4f16, Expand); setOperationAction(ISD::FPOW, MVT::v8f16, Expand); setOperationAction(ISD::FPOWI, MVT::f16, Promote); setOperationAction(ISD::FPOWI, MVT::v4f16, Expand); setOperationAction(ISD::FPOWI, MVT::v8f16, Expand); setOperationAction(ISD::FCOS, MVT::f16, Promote); setOperationAction(ISD::FCOS, MVT::v4f16, Expand); setOperationAction(ISD::FCOS, MVT::v8f16, Expand); setOperationAction(ISD::FSIN, MVT::f16, Promote); setOperationAction(ISD::FSIN, MVT::v4f16, Expand); setOperationAction(ISD::FSIN, MVT::v8f16, Expand); setOperationAction(ISD::FSINCOS, MVT::f16, Promote); setOperationAction(ISD::FSINCOS, MVT::v4f16, Expand); setOperationAction(ISD::FSINCOS, MVT::v8f16, Expand); setOperationAction(ISD::FEXP, MVT::f16, Promote); setOperationAction(ISD::FEXP, MVT::v4f16, Expand); setOperationAction(ISD::FEXP, MVT::v8f16, Expand); setOperationAction(ISD::FEXP2, MVT::f16, Promote); setOperationAction(ISD::FEXP2, MVT::v4f16, Expand); setOperationAction(ISD::FEXP2, MVT::v8f16, Expand); setOperationAction(ISD::FLOG, MVT::f16, Promote); setOperationAction(ISD::FLOG, MVT::v4f16, Expand); setOperationAction(ISD::FLOG, MVT::v8f16, Expand); setOperationAction(ISD::FLOG2, MVT::f16, Promote); setOperationAction(ISD::FLOG2, MVT::v4f16, Expand); setOperationAction(ISD::FLOG2, MVT::v8f16, Expand); setOperationAction(ISD::FLOG10, MVT::f16, Promote); setOperationAction(ISD::FLOG10, MVT::v4f16, Expand); setOperationAction(ISD::FLOG10, MVT::v8f16, Expand); if (!Subtarget->hasFullFP16()) { setOperationAction(ISD::SELECT, MVT::f16, Promote); setOperationAction(ISD::SELECT_CC, MVT::f16, Promote); setOperationAction(ISD::SETCC, MVT::f16, Promote); setOperationAction(ISD::BR_CC, MVT::f16, Promote); setOperationAction(ISD::FADD, MVT::f16, Promote); setOperationAction(ISD::FSUB, MVT::f16, Promote); setOperationAction(ISD::FMUL, MVT::f16, Promote); setOperationAction(ISD::FDIV, MVT::f16, Promote); setOperationAction(ISD::FMA, MVT::f16, Promote); setOperationAction(ISD::FNEG, MVT::f16, Promote); setOperationAction(ISD::FABS, MVT::f16, Promote); setOperationAction(ISD::FCEIL, MVT::f16, Promote); setOperationAction(ISD::FSQRT, MVT::f16, Promote); setOperationAction(ISD::FFLOOR, MVT::f16, Promote); setOperationAction(ISD::FNEARBYINT, MVT::f16, Promote); setOperationAction(ISD::FRINT, MVT::f16, Promote); setOperationAction(ISD::FROUND, MVT::f16, Promote); setOperationAction(ISD::FROUNDEVEN, MVT::f16, Promote); setOperationAction(ISD::FTRUNC, MVT::f16, Promote); setOperationAction(ISD::FMINNUM, MVT::f16, Promote); setOperationAction(ISD::FMAXNUM, MVT::f16, Promote); setOperationAction(ISD::FMINIMUM, MVT::f16, Promote); setOperationAction(ISD::FMAXIMUM, MVT::f16, Promote); // promote v4f16 to v4f32 when that is known to be safe. setOperationAction(ISD::FADD, MVT::v4f16, Promote); setOperationAction(ISD::FSUB, MVT::v4f16, Promote); setOperationAction(ISD::FMUL, MVT::v4f16, Promote); setOperationAction(ISD::FDIV, MVT::v4f16, Promote); AddPromotedToType(ISD::FADD, MVT::v4f16, MVT::v4f32); AddPromotedToType(ISD::FSUB, MVT::v4f16, MVT::v4f32); AddPromotedToType(ISD::FMUL, MVT::v4f16, MVT::v4f32); AddPromotedToType(ISD::FDIV, MVT::v4f16, MVT::v4f32); setOperationAction(ISD::FABS, MVT::v4f16, Expand); setOperationAction(ISD::FNEG, MVT::v4f16, Expand); setOperationAction(ISD::FROUND, MVT::v4f16, Expand); setOperationAction(ISD::FROUNDEVEN, MVT::v4f16, Expand); setOperationAction(ISD::FMA, MVT::v4f16, Expand); setOperationAction(ISD::SETCC, MVT::v4f16, Expand); setOperationAction(ISD::BR_CC, MVT::v4f16, Expand); setOperationAction(ISD::SELECT, MVT::v4f16, Expand); setOperationAction(ISD::SELECT_CC, MVT::v4f16, Expand); setOperationAction(ISD::FTRUNC, MVT::v4f16, Expand); setOperationAction(ISD::FCOPYSIGN, MVT::v4f16, Expand); setOperationAction(ISD::FFLOOR, MVT::v4f16, Expand); setOperationAction(ISD::FCEIL, MVT::v4f16, Expand); setOperationAction(ISD::FRINT, MVT::v4f16, Expand); setOperationAction(ISD::FNEARBYINT, MVT::v4f16, Expand); setOperationAction(ISD::FSQRT, MVT::v4f16, Expand); setOperationAction(ISD::FABS, MVT::v8f16, Expand); setOperationAction(ISD::FADD, MVT::v8f16, Expand); setOperationAction(ISD::FCEIL, MVT::v8f16, Expand); setOperationAction(ISD::FCOPYSIGN, MVT::v8f16, Expand); setOperationAction(ISD::FDIV, MVT::v8f16, Expand); setOperationAction(ISD::FFLOOR, MVT::v8f16, Expand); setOperationAction(ISD::FMA, MVT::v8f16, Expand); setOperationAction(ISD::FMUL, MVT::v8f16, Expand); setOperationAction(ISD::FNEARBYINT, MVT::v8f16, Expand); setOperationAction(ISD::FNEG, MVT::v8f16, Expand); setOperationAction(ISD::FROUND, MVT::v8f16, Expand); setOperationAction(ISD::FROUNDEVEN, MVT::v8f16, Expand); setOperationAction(ISD::FRINT, MVT::v8f16, Expand); setOperationAction(ISD::FSQRT, MVT::v8f16, Expand); setOperationAction(ISD::FSUB, MVT::v8f16, Expand); setOperationAction(ISD::FTRUNC, MVT::v8f16, Expand); setOperationAction(ISD::SETCC, MVT::v8f16, Expand); setOperationAction(ISD::BR_CC, MVT::v8f16, Expand); setOperationAction(ISD::SELECT, MVT::v8f16, Expand); setOperationAction(ISD::SELECT_CC, MVT::v8f16, Expand); setOperationAction(ISD::FP_EXTEND, MVT::v8f16, Expand); } // AArch64 has implementations of a lot of rounding-like FP operations. for (MVT Ty : {MVT::f32, MVT::f64}) { setOperationAction(ISD::FFLOOR, Ty, Legal); setOperationAction(ISD::FNEARBYINT, Ty, Legal); setOperationAction(ISD::FCEIL, Ty, Legal); setOperationAction(ISD::FRINT, Ty, Legal); setOperationAction(ISD::FTRUNC, Ty, Legal); setOperationAction(ISD::FROUND, Ty, Legal); setOperationAction(ISD::FROUNDEVEN, Ty, Legal); setOperationAction(ISD::FMINNUM, Ty, Legal); setOperationAction(ISD::FMAXNUM, Ty, Legal); setOperationAction(ISD::FMINIMUM, Ty, Legal); setOperationAction(ISD::FMAXIMUM, Ty, Legal); setOperationAction(ISD::LROUND, Ty, Legal); setOperationAction(ISD::LLROUND, Ty, Legal); setOperationAction(ISD::LRINT, Ty, Legal); setOperationAction(ISD::LLRINT, Ty, Legal); } if (Subtarget->hasFullFP16()) { setOperationAction(ISD::FNEARBYINT, MVT::f16, Legal); setOperationAction(ISD::FFLOOR, MVT::f16, Legal); setOperationAction(ISD::FCEIL, MVT::f16, Legal); setOperationAction(ISD::FRINT, MVT::f16, Legal); setOperationAction(ISD::FTRUNC, MVT::f16, Legal); setOperationAction(ISD::FROUND, MVT::f16, Legal); setOperationAction(ISD::FROUNDEVEN, MVT::f16, Legal); setOperationAction(ISD::FMINNUM, MVT::f16, Legal); setOperationAction(ISD::FMAXNUM, MVT::f16, Legal); setOperationAction(ISD::FMINIMUM, MVT::f16, Legal); setOperationAction(ISD::FMAXIMUM, MVT::f16, Legal); } setOperationAction(ISD::PREFETCH, MVT::Other, Custom); setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom); setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i128, Custom); setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom); setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom); setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom); setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom); // Generate outline atomics library calls only if LSE was not specified for // subtarget if (Subtarget->outlineAtomics() && !Subtarget->hasLSE()) { setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i8, LibCall); setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i16, LibCall); setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, LibCall); setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, LibCall); setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i128, LibCall); setOperationAction(ISD::ATOMIC_SWAP, MVT::i8, LibCall); setOperationAction(ISD::ATOMIC_SWAP, MVT::i16, LibCall); setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, LibCall); setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, LibCall); setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i8, LibCall); setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i16, LibCall); setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, LibCall); setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, LibCall); setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i8, LibCall); setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i16, LibCall); setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, LibCall); setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, LibCall); setOperationAction(ISD::ATOMIC_LOAD_CLR, MVT::i8, LibCall); setOperationAction(ISD::ATOMIC_LOAD_CLR, MVT::i16, LibCall); setOperationAction(ISD::ATOMIC_LOAD_CLR, MVT::i32, LibCall); setOperationAction(ISD::ATOMIC_LOAD_CLR, MVT::i64, LibCall); setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i8, LibCall); setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i16, LibCall); setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, LibCall); setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, LibCall); #define LCALLNAMES(A, B, N) \ setLibcallName(A##N##_RELAX, #B #N "_relax"); \ setLibcallName(A##N##_ACQ, #B #N "_acq"); \ setLibcallName(A##N##_REL, #B #N "_rel"); \ setLibcallName(A##N##_ACQ_REL, #B #N "_acq_rel"); #define LCALLNAME4(A, B) \ LCALLNAMES(A, B, 1) \ LCALLNAMES(A, B, 2) LCALLNAMES(A, B, 4) LCALLNAMES(A, B, 8) #define LCALLNAME5(A, B) \ LCALLNAMES(A, B, 1) \ LCALLNAMES(A, B, 2) \ LCALLNAMES(A, B, 4) LCALLNAMES(A, B, 8) LCALLNAMES(A, B, 16) LCALLNAME5(RTLIB::OUTLINE_ATOMIC_CAS, __aarch64_cas) LCALLNAME4(RTLIB::OUTLINE_ATOMIC_SWP, __aarch64_swp) LCALLNAME4(RTLIB::OUTLINE_ATOMIC_LDADD, __aarch64_ldadd) LCALLNAME4(RTLIB::OUTLINE_ATOMIC_LDSET, __aarch64_ldset) LCALLNAME4(RTLIB::OUTLINE_ATOMIC_LDCLR, __aarch64_ldclr) LCALLNAME4(RTLIB::OUTLINE_ATOMIC_LDEOR, __aarch64_ldeor) #undef LCALLNAMES #undef LCALLNAME4 #undef LCALLNAME5 } // 128-bit loads and stores can be done without expanding setOperationAction(ISD::LOAD, MVT::i128, Custom); setOperationAction(ISD::STORE, MVT::i128, Custom); // 256 bit non-temporal stores can be lowered to STNP. Do this as part of the // custom lowering, as there are no un-paired non-temporal stores and // legalization will break up 256 bit inputs. setOperationAction(ISD::STORE, MVT::v32i8, Custom); setOperationAction(ISD::STORE, MVT::v16i16, Custom); setOperationAction(ISD::STORE, MVT::v16f16, Custom); setOperationAction(ISD::STORE, MVT::v8i32, Custom); setOperationAction(ISD::STORE, MVT::v8f32, Custom); setOperationAction(ISD::STORE, MVT::v4f64, Custom); setOperationAction(ISD::STORE, MVT::v4i64, Custom); // Lower READCYCLECOUNTER using an mrs from PMCCNTR_EL0. // This requires the Performance Monitors extension. if (Subtarget->hasPerfMon()) setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal); if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr && getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) { // Issue __sincos_stret if available. setOperationAction(ISD::FSINCOS, MVT::f64, Custom); setOperationAction(ISD::FSINCOS, MVT::f32, Custom); } else { setOperationAction(ISD::FSINCOS, MVT::f64, Expand); setOperationAction(ISD::FSINCOS, MVT::f32, Expand); } if (Subtarget->getTargetTriple().isOSMSVCRT()) { // MSVCRT doesn't have powi; fall back to pow setLibcallName(RTLIB::POWI_F32, nullptr); setLibcallName(RTLIB::POWI_F64, nullptr); } // Make floating-point constants legal for the large code model, so they don't // become loads from the constant pool. if (Subtarget->isTargetMachO() && TM.getCodeModel() == CodeModel::Large) { setOperationAction(ISD::ConstantFP, MVT::f32, Legal); setOperationAction(ISD::ConstantFP, MVT::f64, Legal); } // AArch64 does not have floating-point extending loads, i1 sign-extending // load, floating-point truncating stores, or v2i32->v2i16 truncating store. for (MVT VT : MVT::fp_valuetypes()) { setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand); setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand); setLoadExtAction(ISD::EXTLOAD, VT, MVT::f64, Expand); setLoadExtAction(ISD::EXTLOAD, VT, MVT::f80, Expand); } for (MVT VT : MVT::integer_valuetypes()) setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Expand); setTruncStoreAction(MVT::f32, MVT::f16, Expand); setTruncStoreAction(MVT::f64, MVT::f32, Expand); setTruncStoreAction(MVT::f64, MVT::f16, Expand); setTruncStoreAction(MVT::f128, MVT::f80, Expand); setTruncStoreAction(MVT::f128, MVT::f64, Expand); setTruncStoreAction(MVT::f128, MVT::f32, Expand); setTruncStoreAction(MVT::f128, MVT::f16, Expand); setOperationAction(ISD::BITCAST, MVT::i16, Custom); setOperationAction(ISD::BITCAST, MVT::f16, Custom); setOperationAction(ISD::BITCAST, MVT::bf16, Custom); // Indexed loads and stores are supported. for (unsigned im = (unsigned)ISD::PRE_INC; im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { setIndexedLoadAction(im, MVT::i8, Legal); setIndexedLoadAction(im, MVT::i16, Legal); setIndexedLoadAction(im, MVT::i32, Legal); setIndexedLoadAction(im, MVT::i64, Legal); setIndexedLoadAction(im, MVT::f64, Legal); setIndexedLoadAction(im, MVT::f32, Legal); setIndexedLoadAction(im, MVT::f16, Legal); setIndexedLoadAction(im, MVT::bf16, Legal); setIndexedStoreAction(im, MVT::i8, Legal); setIndexedStoreAction(im, MVT::i16, Legal); setIndexedStoreAction(im, MVT::i32, Legal); setIndexedStoreAction(im, MVT::i64, Legal); setIndexedStoreAction(im, MVT::f64, Legal); setIndexedStoreAction(im, MVT::f32, Legal); setIndexedStoreAction(im, MVT::f16, Legal); setIndexedStoreAction(im, MVT::bf16, Legal); } // Trap. setOperationAction(ISD::TRAP, MVT::Other, Legal); setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal); setOperationAction(ISD::UBSANTRAP, MVT::Other, Legal); // We combine OR nodes for bitfield operations. setTargetDAGCombine(ISD::OR); // Try to create BICs for vector ANDs. setTargetDAGCombine(ISD::AND); // Vector add and sub nodes may conceal a high-half opportunity. // Also, try to fold ADD into CSINC/CSINV.. setTargetDAGCombine(ISD::ADD); setTargetDAGCombine(ISD::ABS); setTargetDAGCombine(ISD::SUB); setTargetDAGCombine(ISD::SRL); setTargetDAGCombine(ISD::XOR); setTargetDAGCombine(ISD::SINT_TO_FP); setTargetDAGCombine(ISD::UINT_TO_FP); // TODO: Do the same for FP_TO_*INT_SAT. setTargetDAGCombine(ISD::FP_TO_SINT); setTargetDAGCombine(ISD::FP_TO_UINT); setTargetDAGCombine(ISD::FDIV); // Try and combine setcc with csel setTargetDAGCombine(ISD::SETCC); setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); setTargetDAGCombine(ISD::ANY_EXTEND); setTargetDAGCombine(ISD::ZERO_EXTEND); setTargetDAGCombine(ISD::SIGN_EXTEND); setTargetDAGCombine(ISD::VECTOR_SPLICE); setTargetDAGCombine(ISD::SIGN_EXTEND_INREG); setTargetDAGCombine(ISD::TRUNCATE); setTargetDAGCombine(ISD::CONCAT_VECTORS); setTargetDAGCombine(ISD::STORE); if (Subtarget->supportsAddressTopByteIgnored()) setTargetDAGCombine(ISD::LOAD); setTargetDAGCombine(ISD::MUL); setTargetDAGCombine(ISD::SELECT); setTargetDAGCombine(ISD::VSELECT); setTargetDAGCombine(ISD::INTRINSIC_VOID); setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); setTargetDAGCombine(ISD::VECREDUCE_ADD); setTargetDAGCombine(ISD::STEP_VECTOR); setTargetDAGCombine(ISD::GlobalAddress); // In case of strict alignment, avoid an excessive number of byte wide stores. MaxStoresPerMemsetOptSize = 8; MaxStoresPerMemset = Subtarget->requiresStrictAlign() ? MaxStoresPerMemsetOptSize : 32; MaxGluedStoresPerMemcpy = 4; MaxStoresPerMemcpyOptSize = 4; MaxStoresPerMemcpy = Subtarget->requiresStrictAlign() ? MaxStoresPerMemcpyOptSize : 16; MaxStoresPerMemmoveOptSize = MaxStoresPerMemmove = 4; MaxLoadsPerMemcmpOptSize = 4; MaxLoadsPerMemcmp = Subtarget->requiresStrictAlign() ? MaxLoadsPerMemcmpOptSize : 8; setStackPointerRegisterToSaveRestore(AArch64::SP); setSchedulingPreference(Sched::Hybrid); EnableExtLdPromotion = true; // Set required alignment. setMinFunctionAlignment(Align(4)); // Set preferred alignments. setPrefLoopAlignment(Align(1ULL << STI.getPrefLoopLogAlignment())); setPrefFunctionAlignment(Align(1ULL << STI.getPrefFunctionLogAlignment())); // Only change the limit for entries in a jump table if specified by // the sub target, but not at the command line. unsigned MaxJT = STI.getMaximumJumpTableSize(); if (MaxJT && getMaximumJumpTableSize() == UINT_MAX) setMaximumJumpTableSize(MaxJT); setHasExtractBitsInsn(true); setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); if (Subtarget->hasNEON()) { // FIXME: v1f64 shouldn't be legal if we can avoid it, because it leads to // silliness like this: setOperationAction(ISD::FABS, MVT::v1f64, Expand); setOperationAction(ISD::FADD, MVT::v1f64, Expand); setOperationAction(ISD::FCEIL, MVT::v1f64, Expand); setOperationAction(ISD::FCOPYSIGN, MVT::v1f64, Expand); setOperationAction(ISD::FCOS, MVT::v1f64, Expand); setOperationAction(ISD::FDIV, MVT::v1f64, Expand); setOperationAction(ISD::FFLOOR, MVT::v1f64, Expand); setOperationAction(ISD::FMA, MVT::v1f64, Expand); setOperationAction(ISD::FMUL, MVT::v1f64, Expand); setOperationAction(ISD::FNEARBYINT, MVT::v1f64, Expand); setOperationAction(ISD::FNEG, MVT::v1f64, Expand); setOperationAction(ISD::FPOW, MVT::v1f64, Expand); setOperationAction(ISD::FREM, MVT::v1f64, Expand); setOperationAction(ISD::FROUND, MVT::v1f64, Expand); setOperationAction(ISD::FROUNDEVEN, MVT::v1f64, Expand); setOperationAction(ISD::FRINT, MVT::v1f64, Expand); setOperationAction(ISD::FSIN, MVT::v1f64, Expand); setOperationAction(ISD::FSINCOS, MVT::v1f64, Expand); setOperationAction(ISD::FSQRT, MVT::v1f64, Expand); setOperationAction(ISD::FSUB, MVT::v1f64, Expand); setOperationAction(ISD::FTRUNC, MVT::v1f64, Expand); setOperationAction(ISD::SETCC, MVT::v1f64, Expand); setOperationAction(ISD::BR_CC, MVT::v1f64, Expand); setOperationAction(ISD::SELECT, MVT::v1f64, Expand); setOperationAction(ISD::SELECT_CC, MVT::v1f64, Expand); setOperationAction(ISD::FP_EXTEND, MVT::v1f64, Expand); setOperationAction(ISD::FP_TO_SINT, MVT::v1i64, Expand); setOperationAction(ISD::FP_TO_UINT, MVT::v1i64, Expand); setOperationAction(ISD::SINT_TO_FP, MVT::v1i64, Expand); setOperationAction(ISD::UINT_TO_FP, MVT::v1i64, Expand); setOperationAction(ISD::FP_ROUND, MVT::v1f64, Expand); setOperationAction(ISD::MUL, MVT::v1i64, Expand); // AArch64 doesn't have a direct vector ->f32 conversion instructions for // elements smaller than i32, so promote the input to i32 first. setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v4i8, MVT::v4i32); setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v4i8, MVT::v4i32); setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v8i8, MVT::v8i32); setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v8i8, MVT::v8i32); setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v16i8, MVT::v16i32); setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v16i8, MVT::v16i32); // Similarly, there is no direct i32 -> f64 vector conversion instruction. setOperationAction(ISD::SINT_TO_FP, MVT::v2i32, Custom); setOperationAction(ISD::UINT_TO_FP, MVT::v2i32, Custom); setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Custom); setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Custom); // Or, direct i32 -> f16 vector conversion. Set it so custom, so the // conversion happens in two steps: v4i32 -> v4f32 -> v4f16 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Custom); setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom); if (Subtarget->hasFullFP16()) { setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Custom); setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom); } else { // when AArch64 doesn't have fullfp16 support, promote the input // to i32 first. setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v4i16, MVT::v4i32); setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v4i16, MVT::v4i32); setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v8i16, MVT::v8i32); setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v8i16, MVT::v8i32); } setOperationAction(ISD::CTLZ, MVT::v1i64, Expand); setOperationAction(ISD::CTLZ, MVT::v2i64, Expand); setOperationAction(ISD::BITREVERSE, MVT::v8i8, Legal); setOperationAction(ISD::BITREVERSE, MVT::v16i8, Legal); setOperationAction(ISD::BITREVERSE, MVT::v2i32, Custom); setOperationAction(ISD::BITREVERSE, MVT::v4i32, Custom); setOperationAction(ISD::BITREVERSE, MVT::v1i64, Custom); setOperationAction(ISD::BITREVERSE, MVT::v2i64, Custom); // AArch64 doesn't have MUL.2d: setOperationAction(ISD::MUL, MVT::v2i64, Expand); // Custom handling for some quad-vector types to detect MULL. setOperationAction(ISD::MUL, MVT::v8i16, Custom); setOperationAction(ISD::MUL, MVT::v4i32, Custom); setOperationAction(ISD::MUL, MVT::v2i64, Custom); // Saturates for (MVT VT : { MVT::v8i8, MVT::v4i16, MVT::v2i32, MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) { setOperationAction(ISD::SADDSAT, VT, Legal); setOperationAction(ISD::UADDSAT, VT, Legal); setOperationAction(ISD::SSUBSAT, VT, Legal); setOperationAction(ISD::USUBSAT, VT, Legal); } for (MVT VT : {MVT::v8i8, MVT::v4i16, MVT::v2i32, MVT::v16i8, MVT::v8i16, MVT::v4i32}) { setOperationAction(ISD::ABDS, VT, Legal); setOperationAction(ISD::ABDU, VT, Legal); } // Vector reductions for (MVT VT : { MVT::v4f16, MVT::v2f32, MVT::v8f16, MVT::v4f32, MVT::v2f64 }) { if (VT.getVectorElementType() != MVT::f16 || Subtarget->hasFullFP16()) { setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom); setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom); setOperationAction(ISD::VECREDUCE_FADD, VT, Legal); } } for (MVT VT : { MVT::v8i8, MVT::v4i16, MVT::v2i32, MVT::v16i8, MVT::v8i16, MVT::v4i32 }) { setOperationAction(ISD::VECREDUCE_ADD, VT, Custom); setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom); setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom); setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom); setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom); } setOperationAction(ISD::VECREDUCE_ADD, MVT::v2i64, Custom); setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Legal); setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand); // Likewise, narrowing and extending vector loads/stores aren't handled // directly. for (MVT VT : MVT::fixedlen_vector_valuetypes()) { setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32) { setOperationAction(ISD::MULHS, VT, Legal); setOperationAction(ISD::MULHU, VT, Legal); } else { setOperationAction(ISD::MULHS, VT, Expand); setOperationAction(ISD::MULHU, VT, Expand); } setOperationAction(ISD::SMUL_LOHI, VT, Expand); setOperationAction(ISD::UMUL_LOHI, VT, Expand); setOperationAction(ISD::BSWAP, VT, Expand); setOperationAction(ISD::CTTZ, VT, Expand); for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) { setTruncStoreAction(VT, InnerVT, Expand); setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); } } // AArch64 has implementations of a lot of rounding-like FP operations. for (MVT Ty : {MVT::v2f32, MVT::v4f32, MVT::v2f64}) { setOperationAction(ISD::FFLOOR, Ty, Legal); setOperationAction(ISD::FNEARBYINT, Ty, Legal); setOperationAction(ISD::FCEIL, Ty, Legal); setOperationAction(ISD::FRINT, Ty, Legal); setOperationAction(ISD::FTRUNC, Ty, Legal); setOperationAction(ISD::FROUND, Ty, Legal); setOperationAction(ISD::FROUNDEVEN, Ty, Legal); } if (Subtarget->hasFullFP16()) { for (MVT Ty : {MVT::v4f16, MVT::v8f16}) { setOperationAction(ISD::FFLOOR, Ty, Legal); setOperationAction(ISD::FNEARBYINT, Ty, Legal); setOperationAction(ISD::FCEIL, Ty, Legal); setOperationAction(ISD::FRINT, Ty, Legal); setOperationAction(ISD::FTRUNC, Ty, Legal); setOperationAction(ISD::FROUND, Ty, Legal); setOperationAction(ISD::FROUNDEVEN, Ty, Legal); } } if (Subtarget->hasSVE()) setOperationAction(ISD::VSCALE, MVT::i32, Custom); setTruncStoreAction(MVT::v4i16, MVT::v4i8, Custom); setLoadExtAction(ISD::EXTLOAD, MVT::v4i16, MVT::v4i8, Custom); setLoadExtAction(ISD::SEXTLOAD, MVT::v4i16, MVT::v4i8, Custom); setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i16, MVT::v4i8, Custom); setLoadExtAction(ISD::EXTLOAD, MVT::v4i32, MVT::v4i8, Custom); setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i8, Custom); setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i8, Custom); } if (Subtarget->hasSVE()) { for (auto VT : {MVT::nxv16i8, MVT::nxv8i16, MVT::nxv4i32, MVT::nxv2i64}) { setOperationAction(ISD::BITREVERSE, VT, Custom); setOperationAction(ISD::BSWAP, VT, Custom); setOperationAction(ISD::CTLZ, VT, Custom); setOperationAction(ISD::CTPOP, VT, Custom); setOperationAction(ISD::CTTZ, VT, Custom); setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); setOperationAction(ISD::UINT_TO_FP, VT, Custom); setOperationAction(ISD::SINT_TO_FP, VT, Custom); setOperationAction(ISD::FP_TO_UINT, VT, Custom); setOperationAction(ISD::FP_TO_SINT, VT, Custom); setOperationAction(ISD::MGATHER, VT, Custom); setOperationAction(ISD::MSCATTER, VT, Custom); setOperationAction(ISD::MLOAD, VT, Custom); setOperationAction(ISD::MUL, VT, Custom); setOperationAction(ISD::MULHS, VT, Custom); setOperationAction(ISD::MULHU, VT, Custom); setOperationAction(ISD::SPLAT_VECTOR, VT, Custom); setOperationAction(ISD::VECTOR_SPLICE, VT, Custom); setOperationAction(ISD::SELECT, VT, Custom); setOperationAction(ISD::SETCC, VT, Custom); setOperationAction(ISD::SDIV, VT, Custom); setOperationAction(ISD::UDIV, VT, Custom); setOperationAction(ISD::SMIN, VT, Custom); setOperationAction(ISD::UMIN, VT, Custom); setOperationAction(ISD::SMAX, VT, Custom); setOperationAction(ISD::UMAX, VT, Custom); setOperationAction(ISD::SHL, VT, Custom); setOperationAction(ISD::SRL, VT, Custom); setOperationAction(ISD::SRA, VT, Custom); setOperationAction(ISD::ABS, VT, Custom); setOperationAction(ISD::VECREDUCE_ADD, VT, Custom); setOperationAction(ISD::VECREDUCE_AND, VT, Custom); setOperationAction(ISD::VECREDUCE_OR, VT, Custom); setOperationAction(ISD::VECREDUCE_XOR, VT, Custom); setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom); setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom); setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom); setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom); setOperationAction(ISD::UMUL_LOHI, VT, Expand); setOperationAction(ISD::SMUL_LOHI, VT, Expand); setOperationAction(ISD::SELECT_CC, VT, Expand); setOperationAction(ISD::ROTL, VT, Expand); setOperationAction(ISD::ROTR, VT, Expand); } // Illegal unpacked integer vector types. for (auto VT : {MVT::nxv8i8, MVT::nxv4i16, MVT::nxv2i32}) { setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); } // Legalize unpacked bitcasts to REINTERPRET_CAST. for (auto VT : {MVT::nxv2i16, MVT::nxv4i16, MVT::nxv2i32, MVT::nxv2bf16, MVT::nxv2f16, MVT::nxv4f16, MVT::nxv2f32}) setOperationAction(ISD::BITCAST, VT, Custom); for (auto VT : {MVT::nxv16i1, MVT::nxv8i1, MVT::nxv4i1, MVT::nxv2i1}) { setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); setOperationAction(ISD::SELECT, VT, Custom); setOperationAction(ISD::SETCC, VT, Custom); setOperationAction(ISD::SPLAT_VECTOR, VT, Custom); setOperationAction(ISD::TRUNCATE, VT, Custom); setOperationAction(ISD::VECREDUCE_AND, VT, Custom); setOperationAction(ISD::VECREDUCE_OR, VT, Custom); setOperationAction(ISD::VECREDUCE_XOR, VT, Custom); setOperationAction(ISD::SELECT_CC, VT, Expand); setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); // There are no legal MVT::nxv16f## based types. if (VT != MVT::nxv16i1) { setOperationAction(ISD::SINT_TO_FP, VT, Custom); setOperationAction(ISD::UINT_TO_FP, VT, Custom); } } // NEON doesn't support masked loads/stores/gathers/scatters, but SVE does for (auto VT : {MVT::v4f16, MVT::v8f16, MVT::v2f32, MVT::v4f32, MVT::v1f64, MVT::v2f64, MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16, MVT::v2i32, MVT::v4i32, MVT::v1i64, MVT::v2i64}) { setOperationAction(ISD::MLOAD, VT, Custom); setOperationAction(ISD::MSTORE, VT, Custom); setOperationAction(ISD::MGATHER, VT, Custom); setOperationAction(ISD::MSCATTER, VT, Custom); } for (MVT VT : MVT::fp_scalable_vector_valuetypes()) { for (MVT InnerVT : MVT::fp_scalable_vector_valuetypes()) { // Avoid marking truncating FP stores as legal to prevent the // DAGCombiner from creating unsupported truncating stores. setTruncStoreAction(VT, InnerVT, Expand); // SVE does not have floating-point extending loads. setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); } } // SVE supports truncating stores of 64 and 128-bit vectors setTruncStoreAction(MVT::v2i64, MVT::v2i8, Custom); setTruncStoreAction(MVT::v2i64, MVT::v2i16, Custom); setTruncStoreAction(MVT::v2i64, MVT::v2i32, Custom); setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom); setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom); for (auto VT : {MVT::nxv2f16, MVT::nxv4f16, MVT::nxv8f16, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv2f64}) { setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); setOperationAction(ISD::MGATHER, VT, Custom); setOperationAction(ISD::MSCATTER, VT, Custom); setOperationAction(ISD::MLOAD, VT, Custom); setOperationAction(ISD::SPLAT_VECTOR, VT, Custom); setOperationAction(ISD::SELECT, VT, Custom); setOperationAction(ISD::FADD, VT, Custom); setOperationAction(ISD::FDIV, VT, Custom); setOperationAction(ISD::FMA, VT, Custom); setOperationAction(ISD::FMAXIMUM, VT, Custom); setOperationAction(ISD::FMAXNUM, VT, Custom); setOperationAction(ISD::FMINIMUM, VT, Custom); setOperationAction(ISD::FMINNUM, VT, Custom); setOperationAction(ISD::FMUL, VT, Custom); setOperationAction(ISD::FNEG, VT, Custom); setOperationAction(ISD::FSUB, VT, Custom); setOperationAction(ISD::FCEIL, VT, Custom); setOperationAction(ISD::FFLOOR, VT, Custom); setOperationAction(ISD::FNEARBYINT, VT, Custom); setOperationAction(ISD::FRINT, VT, Custom); setOperationAction(ISD::FROUND, VT, Custom); setOperationAction(ISD::FROUNDEVEN, VT, Custom); setOperationAction(ISD::FTRUNC, VT, Custom); setOperationAction(ISD::FSQRT, VT, Custom); setOperationAction(ISD::FABS, VT, Custom); setOperationAction(ISD::FP_EXTEND, VT, Custom); setOperationAction(ISD::FP_ROUND, VT, Custom); setOperationAction(ISD::VECREDUCE_FADD, VT, Custom); setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom); setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom); setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom); setOperationAction(ISD::VECTOR_SPLICE, VT, Custom); setOperationAction(ISD::SELECT_CC, VT, Expand); } for (auto VT : {MVT::nxv2bf16, MVT::nxv4bf16, MVT::nxv8bf16}) { setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); setOperationAction(ISD::MGATHER, VT, Custom); setOperationAction(ISD::MSCATTER, VT, Custom); setOperationAction(ISD::MLOAD, VT, Custom); } setOperationAction(ISD::SPLAT_VECTOR, MVT::nxv8bf16, Custom); setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom); setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom); // NOTE: Currently this has to happen after computeRegisterProperties rather // than the preferred option of combining it with the addRegisterClass call. if (Subtarget->useSVEForFixedLengthVectors()) { for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) if (useSVEForFixedLengthVectorVT(VT)) addTypeForFixedLengthSVE(VT); for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) if (useSVEForFixedLengthVectorVT(VT)) addTypeForFixedLengthSVE(VT); // 64bit results can mean a bigger than NEON input. for (auto VT : {MVT::v8i8, MVT::v4i16}) setOperationAction(ISD::TRUNCATE, VT, Custom); setOperationAction(ISD::FP_ROUND, MVT::v4f16, Custom); // 128bit results imply a bigger than NEON input. for (auto VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32}) setOperationAction(ISD::TRUNCATE, VT, Custom); for (auto VT : {MVT::v8f16, MVT::v4f32}) setOperationAction(ISD::FP_ROUND, VT, Custom); // These operations are not supported on NEON but SVE can do them. setOperationAction(ISD::BITREVERSE, MVT::v1i64, Custom); setOperationAction(ISD::CTLZ, MVT::v1i64, Custom); setOperationAction(ISD::CTLZ, MVT::v2i64, Custom); setOperationAction(ISD::CTTZ, MVT::v1i64, Custom); setOperationAction(ISD::MUL, MVT::v1i64, Custom); setOperationAction(ISD::MUL, MVT::v2i64, Custom); setOperationAction(ISD::MULHS, MVT::v1i64, Custom); setOperationAction(ISD::MULHS, MVT::v2i64, Custom); setOperationAction(ISD::MULHU, MVT::v1i64, Custom); setOperationAction(ISD::MULHU, MVT::v2i64, Custom); setOperationAction(ISD::SDIV, MVT::v8i8, Custom); setOperationAction(ISD::SDIV, MVT::v16i8, Custom); setOperationAction(ISD::SDIV, MVT::v4i16, Custom); setOperationAction(ISD::SDIV, MVT::v8i16, Custom); setOperationAction(ISD::SDIV, MVT::v2i32, Custom); setOperationAction(ISD::SDIV, MVT::v4i32, Custom); setOperationAction(ISD::SDIV, MVT::v1i64, Custom); setOperationAction(ISD::SDIV, MVT::v2i64, Custom); setOperationAction(ISD::SMAX, MVT::v1i64, Custom); setOperationAction(ISD::SMAX, MVT::v2i64, Custom); setOperationAction(ISD::SMIN, MVT::v1i64, Custom); setOperationAction(ISD::SMIN, MVT::v2i64, Custom); setOperationAction(ISD::UDIV, MVT::v8i8, Custom); setOperationAction(ISD::UDIV, MVT::v16i8, Custom); setOperationAction(ISD::UDIV, MVT::v4i16, Custom); setOperationAction(ISD::UDIV, MVT::v8i16, Custom); setOperationAction(ISD::UDIV, MVT::v2i32, Custom); setOperationAction(ISD::UDIV, MVT::v4i32, Custom); setOperationAction(ISD::UDIV, MVT::v1i64, Custom); setOperationAction(ISD::UDIV, MVT::v2i64, Custom); setOperationAction(ISD::UMAX, MVT::v1i64, Custom); setOperationAction(ISD::UMAX, MVT::v2i64, Custom); setOperationAction(ISD::UMIN, MVT::v1i64, Custom); setOperationAction(ISD::UMIN, MVT::v2i64, Custom); setOperationAction(ISD::VECREDUCE_SMAX, MVT::v2i64, Custom); setOperationAction(ISD::VECREDUCE_SMIN, MVT::v2i64, Custom); setOperationAction(ISD::VECREDUCE_UMAX, MVT::v2i64, Custom); setOperationAction(ISD::VECREDUCE_UMIN, MVT::v2i64, Custom); // Int operations with no NEON support. for (auto VT : {MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16, MVT::v2i32, MVT::v4i32, MVT::v2i64}) { setOperationAction(ISD::BITREVERSE, VT, Custom); setOperationAction(ISD::CTTZ, VT, Custom); setOperationAction(ISD::VECREDUCE_AND, VT, Custom); setOperationAction(ISD::VECREDUCE_OR, VT, Custom); setOperationAction(ISD::VECREDUCE_XOR, VT, Custom); } // FP operations with no NEON support. for (auto VT : {MVT::v4f16, MVT::v8f16, MVT::v2f32, MVT::v4f32, MVT::v1f64, MVT::v2f64}) setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom); // Use SVE for vectors with more than 2 elements. for (auto VT : {MVT::v4f16, MVT::v8f16, MVT::v4f32}) setOperationAction(ISD::VECREDUCE_FADD, VT, Custom); } setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv2i1, MVT::nxv2i64); setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv4i1, MVT::nxv4i32); setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv8i1, MVT::nxv8i16); setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv16i1, MVT::nxv16i8); } PredictableSelectIsExpensive = Subtarget->predictableSelectIsExpensive(); } void AArch64TargetLowering::addTypeForNEON(MVT VT) { assert(VT.isVector() && "VT should be a vector type"); if (VT.isFloatingPoint()) { MVT PromoteTo = EVT(VT).changeVectorElementTypeToInteger().getSimpleVT(); setOperationPromotedToType(ISD::LOAD, VT, PromoteTo); setOperationPromotedToType(ISD::STORE, VT, PromoteTo); } // Mark vector float intrinsics as expand. if (VT == MVT::v2f32 || VT == MVT::v4f32 || VT == MVT::v2f64) { setOperationAction(ISD::FSIN, VT, Expand); setOperationAction(ISD::FCOS, VT, Expand); setOperationAction(ISD::FPOW, VT, Expand); setOperationAction(ISD::FLOG, VT, Expand); setOperationAction(ISD::FLOG2, VT, Expand); setOperationAction(ISD::FLOG10, VT, Expand); setOperationAction(ISD::FEXP, VT, Expand); setOperationAction(ISD::FEXP2, VT, Expand); } // But we do support custom-lowering for FCOPYSIGN. if (VT == MVT::v2f32 || VT == MVT::v4f32 || VT == MVT::v2f64 || ((VT == MVT::v4f16 || VT == MVT::v8f16) && Subtarget->hasFullFP16())) setOperationAction(ISD::FCOPYSIGN, VT, Custom); setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); setOperationAction(ISD::BUILD_VECTOR, VT, Custom); setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); setOperationAction(ISD::SRA, VT, Custom); setOperationAction(ISD::SRL, VT, Custom); setOperationAction(ISD::SHL, VT, Custom); setOperationAction(ISD::OR, VT, Custom); setOperationAction(ISD::SETCC, VT, Custom); setOperationAction(ISD::CONCAT_VECTORS, VT, Legal); setOperationAction(ISD::SELECT, VT, Expand); setOperationAction(ISD::SELECT_CC, VT, Expand); setOperationAction(ISD::VSELECT, VT, Expand); for (MVT InnerVT : MVT::all_valuetypes()) setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand); // CNT supports only B element sizes, then use UADDLP to widen. if (VT != MVT::v8i8 && VT != MVT::v16i8) setOperationAction(ISD::CTPOP, VT, Custom); setOperationAction(ISD::UDIV, VT, Expand); setOperationAction(ISD::SDIV, VT, Expand); setOperationAction(ISD::UREM, VT, Expand); setOperationAction(ISD::SREM, VT, Expand); setOperationAction(ISD::FREM, VT, Expand); setOperationAction(ISD::FP_TO_SINT, VT, Custom); setOperationAction(ISD::FP_TO_UINT, VT, Custom); if (!VT.isFloatingPoint()) setOperationAction(ISD::ABS, VT, Legal); // [SU][MIN|MAX] are available for all NEON types apart from i64. if (!VT.isFloatingPoint() && VT != MVT::v2i64 && VT != MVT::v1i64) for (unsigned Opcode : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}) setOperationAction(Opcode, VT, Legal); // F[MIN|MAX][NUM|NAN] are available for all FP NEON types. if (VT.isFloatingPoint() && VT.getVectorElementType() != MVT::bf16 && (VT.getVectorElementType() != MVT::f16 || Subtarget->hasFullFP16())) for (unsigned Opcode : {ISD::FMINIMUM, ISD::FMAXIMUM, ISD::FMINNUM, ISD::FMAXNUM}) setOperationAction(Opcode, VT, Legal); if (Subtarget->isLittleEndian()) { for (unsigned im = (unsigned)ISD::PRE_INC; im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { setIndexedLoadAction(im, VT, Legal); setIndexedStoreAction(im, VT, Legal); } } } void AArch64TargetLowering::addTypeForFixedLengthSVE(MVT VT) { assert(VT.isFixedLengthVector() && "Expected fixed length vector type!"); // By default everything must be expanded. for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) setOperationAction(Op, VT, Expand); // We use EXTRACT_SUBVECTOR to "cast" a scalable vector to a fixed length one. setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); if (VT.isFloatingPoint()) { setCondCodeAction(ISD::SETO, VT, Expand); setCondCodeAction(ISD::SETOLT, VT, Expand); setCondCodeAction(ISD::SETLT, VT, Expand); setCondCodeAction(ISD::SETOLE, VT, Expand); setCondCodeAction(ISD::SETLE, VT, Expand); setCondCodeAction(ISD::SETULT, VT, Expand); setCondCodeAction(ISD::SETULE, VT, Expand); setCondCodeAction(ISD::SETUGE, VT, Expand); setCondCodeAction(ISD::SETUGT, VT, Expand); setCondCodeAction(ISD::SETUEQ, VT, Expand); setCondCodeAction(ISD::SETUNE, VT, Expand); } // Mark integer truncating stores as having custom lowering if (VT.isInteger()) { MVT InnerVT = VT.changeVectorElementType(MVT::i8); while (InnerVT != VT) { setTruncStoreAction(VT, InnerVT, Custom); InnerVT = InnerVT.changeVectorElementType( MVT::getIntegerVT(2 * InnerVT.getScalarSizeInBits())); } } // Lower fixed length vector operations to scalable equivalents. setOperationAction(ISD::ABS, VT, Custom); setOperationAction(ISD::ADD, VT, Custom); setOperationAction(ISD::AND, VT, Custom); setOperationAction(ISD::ANY_EXTEND, VT, Custom); setOperationAction(ISD::BITCAST, VT, Custom); setOperationAction(ISD::BITREVERSE, VT, Custom); setOperationAction(ISD::BSWAP, VT, Custom); setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); setOperationAction(ISD::CTLZ, VT, Custom); setOperationAction(ISD::CTPOP, VT, Custom); setOperationAction(ISD::CTTZ, VT, Custom); setOperationAction(ISD::FABS, VT, Custom); setOperationAction(ISD::FADD, VT, Custom); setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); setOperationAction(ISD::FCEIL, VT, Custom); setOperationAction(ISD::FDIV, VT, Custom); setOperationAction(ISD::FFLOOR, VT, Custom); setOperationAction(ISD::FMA, VT, Custom); setOperationAction(ISD::FMAXIMUM, VT, Custom); setOperationAction(ISD::FMAXNUM, VT, Custom); setOperationAction(ISD::FMINIMUM, VT, Custom); setOperationAction(ISD::FMINNUM, VT, Custom); setOperationAction(ISD::FMUL, VT, Custom); setOperationAction(ISD::FNEARBYINT, VT, Custom); setOperationAction(ISD::FNEG, VT, Custom); setOperationAction(ISD::FP_EXTEND, VT, Custom); setOperationAction(ISD::FP_ROUND, VT, Custom); setOperationAction(ISD::FP_TO_SINT, VT, Custom); setOperationAction(ISD::FP_TO_UINT, VT, Custom); setOperationAction(ISD::FRINT, VT, Custom); setOperationAction(ISD::FROUND, VT, Custom); setOperationAction(ISD::FROUNDEVEN, VT, Custom); setOperationAction(ISD::FSQRT, VT, Custom); setOperationAction(ISD::FSUB, VT, Custom); setOperationAction(ISD::FTRUNC, VT, Custom); setOperationAction(ISD::LOAD, VT, Custom); setOperationAction(ISD::MGATHER, VT, Custom); setOperationAction(ISD::MLOAD, VT, Custom); setOperationAction(ISD::MSCATTER, VT, Custom); setOperationAction(ISD::MSTORE, VT, Custom); setOperationAction(ISD::MUL, VT, Custom); setOperationAction(ISD::MULHS, VT, Custom); setOperationAction(ISD::MULHU, VT, Custom); setOperationAction(ISD::OR, VT, Custom); setOperationAction(ISD::SDIV, VT, Custom); setOperationAction(ISD::SELECT, VT, Custom); setOperationAction(ISD::SETCC, VT, Custom); setOperationAction(ISD::SHL, VT, Custom); setOperationAction(ISD::SIGN_EXTEND, VT, Custom); setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Custom); setOperationAction(ISD::SINT_TO_FP, VT, Custom); setOperationAction(ISD::SMAX, VT, Custom); setOperationAction(ISD::SMIN, VT, Custom); setOperationAction(ISD::SPLAT_VECTOR, VT, Custom); setOperationAction(ISD::VECTOR_SPLICE, VT, Custom); setOperationAction(ISD::SRA, VT, Custom); setOperationAction(ISD::SRL, VT, Custom); setOperationAction(ISD::STORE, VT, Custom); setOperationAction(ISD::SUB, VT, Custom); setOperationAction(ISD::TRUNCATE, VT, Custom); setOperationAction(ISD::UDIV, VT, Custom); setOperationAction(ISD::UINT_TO_FP, VT, Custom); setOperationAction(ISD::UMAX, VT, Custom); setOperationAction(ISD::UMIN, VT, Custom); setOperationAction(ISD::VECREDUCE_ADD, VT, Custom); setOperationAction(ISD::VECREDUCE_AND, VT, Custom); setOperationAction(ISD::VECREDUCE_FADD, VT, Custom); setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom); setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom); setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom); setOperationAction(ISD::VECREDUCE_OR, VT, Custom); setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom); setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom); setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom); setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom); setOperationAction(ISD::VECREDUCE_XOR, VT, Custom); setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); setOperationAction(ISD::VSELECT, VT, Custom); setOperationAction(ISD::XOR, VT, Custom); setOperationAction(ISD::ZERO_EXTEND, VT, Custom); } void AArch64TargetLowering::addDRTypeForNEON(MVT VT) { addRegisterClass(VT, &AArch64::FPR64RegClass); addTypeForNEON(VT); } void AArch64TargetLowering::addQRTypeForNEON(MVT VT) { addRegisterClass(VT, &AArch64::FPR128RegClass); addTypeForNEON(VT); } EVT AArch64TargetLowering::getSetCCResultType(const DataLayout &, LLVMContext &C, EVT VT) const { if (!VT.isVector()) return MVT::i32; if (VT.isScalableVector()) return EVT::getVectorVT(C, MVT::i1, VT.getVectorElementCount()); return VT.changeVectorElementTypeToInteger(); } static bool optimizeLogicalImm(SDValue Op, unsigned Size, uint64_t Imm, const APInt &Demanded, TargetLowering::TargetLoweringOpt &TLO, unsigned NewOpc) { uint64_t OldImm = Imm, NewImm, Enc; uint64_t Mask = ((uint64_t)(-1LL) >> (64 - Size)), OrigMask = Mask; // Return if the immediate is already all zeros, all ones, a bimm32 or a // bimm64. if (Imm == 0 || Imm == Mask || AArch64_AM::isLogicalImmediate(Imm & Mask, Size)) return false; unsigned EltSize = Size; uint64_t DemandedBits = Demanded.getZExtValue(); // Clear bits that are not demanded. Imm &= DemandedBits; while (true) { // The goal here is to set the non-demanded bits in a way that minimizes // the number of switching between 0 and 1. In order to achieve this goal, // we set the non-demanded bits to the value of the preceding demanded bits. // For example, if we have an immediate 0bx10xx0x1 ('x' indicates a // non-demanded bit), we copy bit0 (1) to the least significant 'x', // bit2 (0) to 'xx', and bit6 (1) to the most significant 'x'. // The final result is 0b11000011. uint64_t NonDemandedBits = ~DemandedBits; uint64_t InvertedImm = ~Imm & DemandedBits; uint64_t RotatedImm = ((InvertedImm << 1) | (InvertedImm >> (EltSize - 1) & 1)) & NonDemandedBits; uint64_t Sum = RotatedImm + NonDemandedBits; bool Carry = NonDemandedBits & ~Sum & (1ULL << (EltSize - 1)); uint64_t Ones = (Sum + Carry) & NonDemandedBits; NewImm = (Imm | Ones) & Mask; // If NewImm or its bitwise NOT is a shifted mask, it is a bitmask immediate // or all-ones or all-zeros, in which case we can stop searching. Otherwise, // we halve the element size and continue the search. if (isShiftedMask_64(NewImm) || isShiftedMask_64(~(NewImm | ~Mask))) break; // We cannot shrink the element size any further if it is 2-bits. if (EltSize == 2) return false; EltSize /= 2; Mask >>= EltSize; uint64_t Hi = Imm >> EltSize, DemandedBitsHi = DemandedBits >> EltSize; // Return if there is mismatch in any of the demanded bits of Imm and Hi. if (((Imm ^ Hi) & (DemandedBits & DemandedBitsHi) & Mask) != 0) return false; // Merge the upper and lower halves of Imm and DemandedBits. Imm |= Hi; DemandedBits |= DemandedBitsHi; } ++NumOptimizedImms; // Replicate the element across the register width. while (EltSize < Size) { NewImm |= NewImm << EltSize; EltSize *= 2; } (void)OldImm; assert(((OldImm ^ NewImm) & Demanded.getZExtValue()) == 0 && "demanded bits should never be altered"); assert(OldImm != NewImm && "the new imm shouldn't be equal to the old imm"); // Create the new constant immediate node. EVT VT = Op.getValueType(); SDLoc DL(Op); SDValue New; // If the new constant immediate is all-zeros or all-ones, let the target // independent DAG combine optimize this node. if (NewImm == 0 || NewImm == OrigMask) { New = TLO.DAG.getNode(Op.getOpcode(), DL, VT, Op.getOperand(0), TLO.DAG.getConstant(NewImm, DL, VT)); // Otherwise, create a machine node so that target independent DAG combine // doesn't undo this optimization. } else { Enc = AArch64_AM::encodeLogicalImmediate(NewImm, Size); SDValue EncConst = TLO.DAG.getTargetConstant(Enc, DL, VT); New = SDValue( TLO.DAG.getMachineNode(NewOpc, DL, VT, Op.getOperand(0), EncConst), 0); } return TLO.CombineTo(Op, New); } bool AArch64TargetLowering::targetShrinkDemandedConstant( SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const { // Delay this optimization to as late as possible. if (!TLO.LegalOps) return false; if (!EnableOptimizeLogicalImm) return false; EVT VT = Op.getValueType(); if (VT.isVector()) return false; unsigned Size = VT.getSizeInBits(); assert((Size == 32 || Size == 64) && "i32 or i64 is expected after legalization."); // Exit early if we demand all bits. if (DemandedBits.countPopulation() == Size) return false; unsigned NewOpc; switch (Op.getOpcode()) { default: return false; case ISD::AND: NewOpc = Size == 32 ? AArch64::ANDWri : AArch64::ANDXri; break; case ISD::OR: NewOpc = Size == 32 ? AArch64::ORRWri : AArch64::ORRXri; break; case ISD::XOR: NewOpc = Size == 32 ? AArch64::EORWri : AArch64::EORXri; break; } ConstantSDNode *C = dyn_cast(Op.getOperand(1)); if (!C) return false; uint64_t Imm = C->getZExtValue(); return optimizeLogicalImm(Op, Size, Imm, DemandedBits, TLO, NewOpc); } /// computeKnownBitsForTargetNode - Determine which of the bits specified in /// Mask are known to be either zero or one and return them Known. void AArch64TargetLowering::computeKnownBitsForTargetNode( const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const { switch (Op.getOpcode()) { default: break; case AArch64ISD::CSEL: { KnownBits Known2; Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1); Known2 = DAG.computeKnownBits(Op->getOperand(1), Depth + 1); Known = KnownBits::commonBits(Known, Known2); break; } case AArch64ISD::LOADgot: case AArch64ISD::ADDlow: { if (!Subtarget->isTargetILP32()) break; // In ILP32 mode all valid pointers are in the low 4GB of the address-space. Known.Zero = APInt::getHighBitsSet(64, 32); break; } case ISD::INTRINSIC_W_CHAIN: { ConstantSDNode *CN = cast(Op->getOperand(1)); Intrinsic::ID IntID = static_cast(CN->getZExtValue()); switch (IntID) { default: return; case Intrinsic::aarch64_ldaxr: case Intrinsic::aarch64_ldxr: { unsigned BitWidth = Known.getBitWidth(); EVT VT = cast(Op)->getMemoryVT(); unsigned MemBits = VT.getScalarSizeInBits(); Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits); return; } } break; } case ISD::INTRINSIC_WO_CHAIN: case ISD::INTRINSIC_VOID: { unsigned IntNo = cast(Op.getOperand(0))->getZExtValue(); switch (IntNo) { default: break; case Intrinsic::aarch64_neon_umaxv: case Intrinsic::aarch64_neon_uminv: { // Figure out the datatype of the vector operand. The UMINV instruction // will zero extend the result, so we can mark as known zero all the // bits larger than the element datatype. 32-bit or larget doesn't need // this as those are legal types and will be handled by isel directly. MVT VT = Op.getOperand(1).getValueType().getSimpleVT(); unsigned BitWidth = Known.getBitWidth(); if (VT == MVT::v8i8 || VT == MVT::v16i8) { assert(BitWidth >= 8 && "Unexpected width!"); APInt Mask = APInt::getHighBitsSet(BitWidth, BitWidth - 8); Known.Zero |= Mask; } else if (VT == MVT::v4i16 || VT == MVT::v8i16) { assert(BitWidth >= 16 && "Unexpected width!"); APInt Mask = APInt::getHighBitsSet(BitWidth, BitWidth - 16); Known.Zero |= Mask; } break; } break; } } } } MVT AArch64TargetLowering::getScalarShiftAmountTy(const DataLayout &DL, EVT) const { return MVT::i64; } bool AArch64TargetLowering::allowsMisalignedMemoryAccesses( EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, bool *Fast) const { if (Subtarget->requiresStrictAlign()) return false; if (Fast) { // Some CPUs are fine with unaligned stores except for 128-bit ones. *Fast = !Subtarget->isMisaligned128StoreSlow() || VT.getStoreSize() != 16 || // See comments in performSTORECombine() for more details about // these conditions. // Code that uses clang vector extensions can mark that it // wants unaligned accesses to be treated as fast by // underspecifying alignment to be 1 or 2. Alignment <= 2 || // Disregard v2i64. Memcpy lowering produces those and splitting // them regresses performance on micro-benchmarks and olden/bh. VT == MVT::v2i64; } return true; } // Same as above but handling LLTs instead. bool AArch64TargetLowering::allowsMisalignedMemoryAccesses( LLT Ty, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, bool *Fast) const { if (Subtarget->requiresStrictAlign()) return false; if (Fast) { // Some CPUs are fine with unaligned stores except for 128-bit ones. *Fast = !Subtarget->isMisaligned128StoreSlow() || Ty.getSizeInBytes() != 16 || // See comments in performSTORECombine() for more details about // these conditions. // Code that uses clang vector extensions can mark that it // wants unaligned accesses to be treated as fast by // underspecifying alignment to be 1 or 2. Alignment <= 2 || // Disregard v2i64. Memcpy lowering produces those and splitting // them regresses performance on micro-benchmarks and olden/bh. Ty == LLT::fixed_vector(2, 64); } return true; } FastISel * AArch64TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo) const { return AArch64::createFastISel(funcInfo, libInfo); } const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const { #define MAKE_CASE(V) \ case V: \ return #V; switch ((AArch64ISD::NodeType)Opcode) { case AArch64ISD::FIRST_NUMBER: break; MAKE_CASE(AArch64ISD::CALL) MAKE_CASE(AArch64ISD::ADRP) MAKE_CASE(AArch64ISD::ADR) MAKE_CASE(AArch64ISD::ADDlow) MAKE_CASE(AArch64ISD::LOADgot) MAKE_CASE(AArch64ISD::RET_FLAG) MAKE_CASE(AArch64ISD::BRCOND) MAKE_CASE(AArch64ISD::CSEL) MAKE_CASE(AArch64ISD::CSINV) MAKE_CASE(AArch64ISD::CSNEG) MAKE_CASE(AArch64ISD::CSINC) MAKE_CASE(AArch64ISD::THREAD_POINTER) MAKE_CASE(AArch64ISD::TLSDESC_CALLSEQ) MAKE_CASE(AArch64ISD::ADD_PRED) MAKE_CASE(AArch64ISD::MUL_PRED) MAKE_CASE(AArch64ISD::MULHS_PRED) MAKE_CASE(AArch64ISD::MULHU_PRED) MAKE_CASE(AArch64ISD::SDIV_PRED) MAKE_CASE(AArch64ISD::SHL_PRED) MAKE_CASE(AArch64ISD::SMAX_PRED) MAKE_CASE(AArch64ISD::SMIN_PRED) MAKE_CASE(AArch64ISD::SRA_PRED) MAKE_CASE(AArch64ISD::SRL_PRED) MAKE_CASE(AArch64ISD::SUB_PRED) MAKE_CASE(AArch64ISD::UDIV_PRED) MAKE_CASE(AArch64ISD::UMAX_PRED) MAKE_CASE(AArch64ISD::UMIN_PRED) MAKE_CASE(AArch64ISD::FNEG_MERGE_PASSTHRU) MAKE_CASE(AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU) MAKE_CASE(AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU) MAKE_CASE(AArch64ISD::FCEIL_MERGE_PASSTHRU) MAKE_CASE(AArch64ISD::FFLOOR_MERGE_PASSTHRU) MAKE_CASE(AArch64ISD::FNEARBYINT_MERGE_PASSTHRU) MAKE_CASE(AArch64ISD::FRINT_MERGE_PASSTHRU) MAKE_CASE(AArch64ISD::FROUND_MERGE_PASSTHRU) MAKE_CASE(AArch64ISD::FROUNDEVEN_MERGE_PASSTHRU) MAKE_CASE(AArch64ISD::FTRUNC_MERGE_PASSTHRU) MAKE_CASE(AArch64ISD::FP_ROUND_MERGE_PASSTHRU) MAKE_CASE(AArch64ISD::FP_EXTEND_MERGE_PASSTHRU) MAKE_CASE(AArch64ISD::SINT_TO_FP_MERGE_PASSTHRU) MAKE_CASE(AArch64ISD::UINT_TO_FP_MERGE_PASSTHRU) MAKE_CASE(AArch64ISD::FCVTZU_MERGE_PASSTHRU) MAKE_CASE(AArch64ISD::FCVTZS_MERGE_PASSTHRU) MAKE_CASE(AArch64ISD::FSQRT_MERGE_PASSTHRU) MAKE_CASE(AArch64ISD::FRECPX_MERGE_PASSTHRU) MAKE_CASE(AArch64ISD::FABS_MERGE_PASSTHRU) MAKE_CASE(AArch64ISD::ABS_MERGE_PASSTHRU) MAKE_CASE(AArch64ISD::NEG_MERGE_PASSTHRU) MAKE_CASE(AArch64ISD::SETCC_MERGE_ZERO) MAKE_CASE(AArch64ISD::ADC) MAKE_CASE(AArch64ISD::SBC) MAKE_CASE(AArch64ISD::ADDS) MAKE_CASE(AArch64ISD::SUBS) MAKE_CASE(AArch64ISD::ADCS) MAKE_CASE(AArch64ISD::SBCS) MAKE_CASE(AArch64ISD::ANDS) MAKE_CASE(AArch64ISD::CCMP) MAKE_CASE(AArch64ISD::CCMN) MAKE_CASE(AArch64ISD::FCCMP) MAKE_CASE(AArch64ISD::FCMP) MAKE_CASE(AArch64ISD::STRICT_FCMP) MAKE_CASE(AArch64ISD::STRICT_FCMPE) MAKE_CASE(AArch64ISD::DUP) MAKE_CASE(AArch64ISD::DUPLANE8) MAKE_CASE(AArch64ISD::DUPLANE16) MAKE_CASE(AArch64ISD::DUPLANE32) MAKE_CASE(AArch64ISD::DUPLANE64) MAKE_CASE(AArch64ISD::MOVI) MAKE_CASE(AArch64ISD::MOVIshift) MAKE_CASE(AArch64ISD::MOVIedit) MAKE_CASE(AArch64ISD::MOVImsl) MAKE_CASE(AArch64ISD::FMOV) MAKE_CASE(AArch64ISD::MVNIshift) MAKE_CASE(AArch64ISD::MVNImsl) MAKE_CASE(AArch64ISD::BICi) MAKE_CASE(AArch64ISD::ORRi) MAKE_CASE(AArch64ISD::BSP) MAKE_CASE(AArch64ISD::EXTR) MAKE_CASE(AArch64ISD::ZIP1) MAKE_CASE(AArch64ISD::ZIP2) MAKE_CASE(AArch64ISD::UZP1) MAKE_CASE(AArch64ISD::UZP2) MAKE_CASE(AArch64ISD::TRN1) MAKE_CASE(AArch64ISD::TRN2) MAKE_CASE(AArch64ISD::REV16) MAKE_CASE(AArch64ISD::REV32) MAKE_CASE(AArch64ISD::REV64) MAKE_CASE(AArch64ISD::EXT) MAKE_CASE(AArch64ISD::SPLICE) MAKE_CASE(AArch64ISD::VSHL) MAKE_CASE(AArch64ISD::VLSHR) MAKE_CASE(AArch64ISD::VASHR) MAKE_CASE(AArch64ISD::VSLI) MAKE_CASE(AArch64ISD::VSRI) MAKE_CASE(AArch64ISD::CMEQ) MAKE_CASE(AArch64ISD::CMGE) MAKE_CASE(AArch64ISD::CMGT) MAKE_CASE(AArch64ISD::CMHI) MAKE_CASE(AArch64ISD::CMHS) MAKE_CASE(AArch64ISD::FCMEQ) MAKE_CASE(AArch64ISD::FCMGE) MAKE_CASE(AArch64ISD::FCMGT) MAKE_CASE(AArch64ISD::CMEQz) MAKE_CASE(AArch64ISD::CMGEz) MAKE_CASE(AArch64ISD::CMGTz) MAKE_CASE(AArch64ISD::CMLEz) MAKE_CASE(AArch64ISD::CMLTz) MAKE_CASE(AArch64ISD::FCMEQz) MAKE_CASE(AArch64ISD::FCMGEz) MAKE_CASE(AArch64ISD::FCMGTz) MAKE_CASE(AArch64ISD::FCMLEz) MAKE_CASE(AArch64ISD::FCMLTz) MAKE_CASE(AArch64ISD::SADDV) MAKE_CASE(AArch64ISD::UADDV) MAKE_CASE(AArch64ISD::SRHADD) MAKE_CASE(AArch64ISD::URHADD) MAKE_CASE(AArch64ISD::SHADD) MAKE_CASE(AArch64ISD::UHADD) MAKE_CASE(AArch64ISD::SDOT) MAKE_CASE(AArch64ISD::UDOT) MAKE_CASE(AArch64ISD::SMINV) MAKE_CASE(AArch64ISD::UMINV) MAKE_CASE(AArch64ISD::SMAXV) MAKE_CASE(AArch64ISD::UMAXV) MAKE_CASE(AArch64ISD::SADDV_PRED) MAKE_CASE(AArch64ISD::UADDV_PRED) MAKE_CASE(AArch64ISD::SMAXV_PRED) MAKE_CASE(AArch64ISD::UMAXV_PRED) MAKE_CASE(AArch64ISD::SMINV_PRED) MAKE_CASE(AArch64ISD::UMINV_PRED) MAKE_CASE(AArch64ISD::ORV_PRED) MAKE_CASE(AArch64ISD::EORV_PRED) MAKE_CASE(AArch64ISD::ANDV_PRED) MAKE_CASE(AArch64ISD::CLASTA_N) MAKE_CASE(AArch64ISD::CLASTB_N) MAKE_CASE(AArch64ISD::LASTA) MAKE_CASE(AArch64ISD::LASTB) MAKE_CASE(AArch64ISD::REINTERPRET_CAST) MAKE_CASE(AArch64ISD::LS64_BUILD) MAKE_CASE(AArch64ISD::LS64_EXTRACT) MAKE_CASE(AArch64ISD::TBL) MAKE_CASE(AArch64ISD::FADD_PRED) MAKE_CASE(AArch64ISD::FADDA_PRED) MAKE_CASE(AArch64ISD::FADDV_PRED) MAKE_CASE(AArch64ISD::FDIV_PRED) MAKE_CASE(AArch64ISD::FMA_PRED) MAKE_CASE(AArch64ISD::FMAX_PRED) MAKE_CASE(AArch64ISD::FMAXV_PRED) MAKE_CASE(AArch64ISD::FMAXNM_PRED) MAKE_CASE(AArch64ISD::FMAXNMV_PRED) MAKE_CASE(AArch64ISD::FMIN_PRED) MAKE_CASE(AArch64ISD::FMINV_PRED) MAKE_CASE(AArch64ISD::FMINNM_PRED) MAKE_CASE(AArch64ISD::FMINNMV_PRED) MAKE_CASE(AArch64ISD::FMUL_PRED) MAKE_CASE(AArch64ISD::FSUB_PRED) MAKE_CASE(AArch64ISD::BIC) MAKE_CASE(AArch64ISD::BIT) MAKE_CASE(AArch64ISD::CBZ) MAKE_CASE(AArch64ISD::CBNZ) MAKE_CASE(AArch64ISD::TBZ) MAKE_CASE(AArch64ISD::TBNZ) MAKE_CASE(AArch64ISD::TC_RETURN) MAKE_CASE(AArch64ISD::PREFETCH) MAKE_CASE(AArch64ISD::SITOF) MAKE_CASE(AArch64ISD::UITOF) MAKE_CASE(AArch64ISD::NVCAST) MAKE_CASE(AArch64ISD::MRS) MAKE_CASE(AArch64ISD::SQSHL_I) MAKE_CASE(AArch64ISD::UQSHL_I) MAKE_CASE(AArch64ISD::SRSHR_I) MAKE_CASE(AArch64ISD::URSHR_I) MAKE_CASE(AArch64ISD::SQSHLU_I) MAKE_CASE(AArch64ISD::WrapperLarge) MAKE_CASE(AArch64ISD::LD2post) MAKE_CASE(AArch64ISD::LD3post) MAKE_CASE(AArch64ISD::LD4post) MAKE_CASE(AArch64ISD::ST2post) MAKE_CASE(AArch64ISD::ST3post) MAKE_CASE(AArch64ISD::ST4post) MAKE_CASE(AArch64ISD::LD1x2post) MAKE_CASE(AArch64ISD::LD1x3post) MAKE_CASE(AArch64ISD::LD1x4post) MAKE_CASE(AArch64ISD::ST1x2post) MAKE_CASE(AArch64ISD::ST1x3post) MAKE_CASE(AArch64ISD::ST1x4post) MAKE_CASE(AArch64ISD::LD1DUPpost) MAKE_CASE(AArch64ISD::LD2DUPpost) MAKE_CASE(AArch64ISD::LD3DUPpost) MAKE_CASE(AArch64ISD::LD4DUPpost) MAKE_CASE(AArch64ISD::LD1LANEpost) MAKE_CASE(AArch64ISD::LD2LANEpost) MAKE_CASE(AArch64ISD::LD3LANEpost) MAKE_CASE(AArch64ISD::LD4LANEpost) MAKE_CASE(AArch64ISD::ST2LANEpost) MAKE_CASE(AArch64ISD::ST3LANEpost) MAKE_CASE(AArch64ISD::ST4LANEpost) MAKE_CASE(AArch64ISD::SMULL) MAKE_CASE(AArch64ISD::UMULL) MAKE_CASE(AArch64ISD::FRECPE) MAKE_CASE(AArch64ISD::FRECPS) MAKE_CASE(AArch64ISD::FRSQRTE) MAKE_CASE(AArch64ISD::FRSQRTS) MAKE_CASE(AArch64ISD::STG) MAKE_CASE(AArch64ISD::STZG) MAKE_CASE(AArch64ISD::ST2G) MAKE_CASE(AArch64ISD::STZ2G) MAKE_CASE(AArch64ISD::SUNPKHI) MAKE_CASE(AArch64ISD::SUNPKLO) MAKE_CASE(AArch64ISD::UUNPKHI) MAKE_CASE(AArch64ISD::UUNPKLO) MAKE_CASE(AArch64ISD::INSR) MAKE_CASE(AArch64ISD::PTEST) MAKE_CASE(AArch64ISD::PTRUE) MAKE_CASE(AArch64ISD::LD1_MERGE_ZERO) MAKE_CASE(AArch64ISD::LD1S_MERGE_ZERO) MAKE_CASE(AArch64ISD::LDNF1_MERGE_ZERO) MAKE_CASE(AArch64ISD::LDNF1S_MERGE_ZERO) MAKE_CASE(AArch64ISD::LDFF1_MERGE_ZERO) MAKE_CASE(AArch64ISD::LDFF1S_MERGE_ZERO) MAKE_CASE(AArch64ISD::LD1RQ_MERGE_ZERO) MAKE_CASE(AArch64ISD::LD1RO_MERGE_ZERO) MAKE_CASE(AArch64ISD::SVE_LD2_MERGE_ZERO) MAKE_CASE(AArch64ISD::SVE_LD3_MERGE_ZERO) MAKE_CASE(AArch64ISD::SVE_LD4_MERGE_ZERO) MAKE_CASE(AArch64ISD::GLD1_MERGE_ZERO) MAKE_CASE(AArch64ISD::GLD1_SCALED_MERGE_ZERO) MAKE_CASE(AArch64ISD::GLD1_SXTW_MERGE_ZERO) MAKE_CASE(AArch64ISD::GLD1_UXTW_MERGE_ZERO) MAKE_CASE(AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO) MAKE_CASE(AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO) MAKE_CASE(AArch64ISD::GLD1_IMM_MERGE_ZERO) MAKE_CASE(AArch64ISD::GLD1S_MERGE_ZERO) MAKE_CASE(AArch64ISD::GLD1S_SCALED_MERGE_ZERO) MAKE_CASE(AArch64ISD::GLD1S_SXTW_MERGE_ZERO) MAKE_CASE(AArch64ISD::GLD1S_UXTW_MERGE_ZERO) MAKE_CASE(AArch64ISD::GLD1S_SXTW_SCALED_MERGE_ZERO) MAKE_CASE(AArch64ISD::GLD1S_UXTW_SCALED_MERGE_ZERO) MAKE_CASE(AArch64ISD::GLD1S_IMM_MERGE_ZERO) MAKE_CASE(AArch64ISD::GLDFF1_MERGE_ZERO) MAKE_CASE(AArch64ISD::GLDFF1_SCALED_MERGE_ZERO) MAKE_CASE(AArch64ISD::GLDFF1_SXTW_MERGE_ZERO) MAKE_CASE(AArch64ISD::GLDFF1_UXTW_MERGE_ZERO) MAKE_CASE(AArch64ISD::GLDFF1_SXTW_SCALED_MERGE_ZERO) MAKE_CASE(AArch64ISD::GLDFF1_UXTW_SCALED_MERGE_ZERO) MAKE_CASE(AArch64ISD::GLDFF1_IMM_MERGE_ZERO) MAKE_CASE(AArch64ISD::GLDFF1S_MERGE_ZERO) MAKE_CASE(AArch64ISD::GLDFF1S_SCALED_MERGE_ZERO) MAKE_CASE(AArch64ISD::GLDFF1S_SXTW_MERGE_ZERO) MAKE_CASE(AArch64ISD::GLDFF1S_UXTW_MERGE_ZERO) MAKE_CASE(AArch64ISD::GLDFF1S_SXTW_SCALED_MERGE_ZERO) MAKE_CASE(AArch64ISD::GLDFF1S_UXTW_SCALED_MERGE_ZERO) MAKE_CASE(AArch64ISD::GLDFF1S_IMM_MERGE_ZERO) MAKE_CASE(AArch64ISD::GLDNT1_MERGE_ZERO) MAKE_CASE(AArch64ISD::GLDNT1_INDEX_MERGE_ZERO) MAKE_CASE(AArch64ISD::GLDNT1S_MERGE_ZERO) MAKE_CASE(AArch64ISD::ST1_PRED) MAKE_CASE(AArch64ISD::SST1_PRED) MAKE_CASE(AArch64ISD::SST1_SCALED_PRED) MAKE_CASE(AArch64ISD::SST1_SXTW_PRED) MAKE_CASE(AArch64ISD::SST1_UXTW_PRED) MAKE_CASE(AArch64ISD::SST1_SXTW_SCALED_PRED) MAKE_CASE(AArch64ISD::SST1_UXTW_SCALED_PRED) MAKE_CASE(AArch64ISD::SST1_IMM_PRED) MAKE_CASE(AArch64ISD::SSTNT1_PRED) MAKE_CASE(AArch64ISD::SSTNT1_INDEX_PRED) MAKE_CASE(AArch64ISD::LDP) MAKE_CASE(AArch64ISD::STP) MAKE_CASE(AArch64ISD::STNP) MAKE_CASE(AArch64ISD::BITREVERSE_MERGE_PASSTHRU) MAKE_CASE(AArch64ISD::BSWAP_MERGE_PASSTHRU) MAKE_CASE(AArch64ISD::CTLZ_MERGE_PASSTHRU) MAKE_CASE(AArch64ISD::CTPOP_MERGE_PASSTHRU) MAKE_CASE(AArch64ISD::DUP_MERGE_PASSTHRU) MAKE_CASE(AArch64ISD::INDEX_VECTOR) MAKE_CASE(AArch64ISD::UADDLP) MAKE_CASE(AArch64ISD::CALL_RVMARKER) } #undef MAKE_CASE return nullptr; } MachineBasicBlock * AArch64TargetLowering::EmitF128CSEL(MachineInstr &MI, MachineBasicBlock *MBB) const { // We materialise the F128CSEL pseudo-instruction as some control flow and a // phi node: // OrigBB: // [... previous instrs leading to comparison ...] // b.ne TrueBB // b EndBB // TrueBB: // ; Fallthrough // EndBB: // Dest = PHI [IfTrue, TrueBB], [IfFalse, OrigBB] MachineFunction *MF = MBB->getParent(); const TargetInstrInfo *TII = Subtarget->getInstrInfo(); const BasicBlock *LLVM_BB = MBB->getBasicBlock(); DebugLoc DL = MI.getDebugLoc(); MachineFunction::iterator It = ++MBB->getIterator(); Register DestReg = MI.getOperand(0).getReg(); Register IfTrueReg = MI.getOperand(1).getReg(); Register IfFalseReg = MI.getOperand(2).getReg(); unsigned CondCode = MI.getOperand(3).getImm(); bool NZCVKilled = MI.getOperand(4).isKill(); MachineBasicBlock *TrueBB = MF->CreateMachineBasicBlock(LLVM_BB); MachineBasicBlock *EndBB = MF->CreateMachineBasicBlock(LLVM_BB); MF->insert(It, TrueBB); MF->insert(It, EndBB); // Transfer rest of current basic-block to EndBB EndBB->splice(EndBB->begin(), MBB, std::next(MachineBasicBlock::iterator(MI)), MBB->end()); EndBB->transferSuccessorsAndUpdatePHIs(MBB); BuildMI(MBB, DL, TII->get(AArch64::Bcc)).addImm(CondCode).addMBB(TrueBB); BuildMI(MBB, DL, TII->get(AArch64::B)).addMBB(EndBB); MBB->addSuccessor(TrueBB); MBB->addSuccessor(EndBB); // TrueBB falls through to the end. TrueBB->addSuccessor(EndBB); if (!NZCVKilled) { TrueBB->addLiveIn(AArch64::NZCV); EndBB->addLiveIn(AArch64::NZCV); } BuildMI(*EndBB, EndBB->begin(), DL, TII->get(AArch64::PHI), DestReg) .addReg(IfTrueReg) .addMBB(TrueBB) .addReg(IfFalseReg) .addMBB(MBB); MI.eraseFromParent(); return EndBB; } MachineBasicBlock *AArch64TargetLowering::EmitLoweredCatchRet( MachineInstr &MI, MachineBasicBlock *BB) const { assert(!isAsynchronousEHPersonality(classifyEHPersonality( BB->getParent()->getFunction().getPersonalityFn())) && "SEH does not use catchret!"); return BB; } MachineBasicBlock *AArch64TargetLowering::EmitInstrWithCustomInserter( MachineInstr &MI, MachineBasicBlock *BB) const { switch (MI.getOpcode()) { default: #ifndef NDEBUG MI.dump(); #endif llvm_unreachable("Unexpected instruction for custom inserter!"); case AArch64::F128CSEL: return EmitF128CSEL(MI, BB); case TargetOpcode::STACKMAP: case TargetOpcode::PATCHPOINT: case TargetOpcode::STATEPOINT: return emitPatchPoint(MI, BB); case AArch64::CATCHRET: return EmitLoweredCatchRet(MI, BB); } } //===----------------------------------------------------------------------===// // AArch64 Lowering private implementation. //===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// // Lowering Code //===----------------------------------------------------------------------===// // Forward declarations of SVE fixed length lowering helpers static EVT getContainerForFixedLengthVector(SelectionDAG &DAG, EVT VT); static SDValue convertToScalableVector(SelectionDAG &DAG, EVT VT, SDValue V); static SDValue convertFromScalableVector(SelectionDAG &DAG, EVT VT, SDValue V); static SDValue convertFixedMaskToScalableVector(SDValue Mask, SelectionDAG &DAG); /// isZerosVector - Check whether SDNode N is a zero-filled vector. static bool isZerosVector(const SDNode *N) { // Look through a bit convert. while (N->getOpcode() == ISD::BITCAST) N = N->getOperand(0).getNode(); if (ISD::isConstantSplatVectorAllZeros(N)) return true; if (N->getOpcode() != AArch64ISD::DUP) return false; auto Opnd0 = N->getOperand(0); auto *CINT = dyn_cast(Opnd0); auto *CFP = dyn_cast(Opnd0); return (CINT && CINT->isNullValue()) || (CFP && CFP->isZero()); } /// changeIntCCToAArch64CC - Convert a DAG integer condition code to an AArch64 /// CC static AArch64CC::CondCode changeIntCCToAArch64CC(ISD::CondCode CC) { switch (CC) { default: llvm_unreachable("Unknown condition code!"); case ISD::SETNE: return AArch64CC::NE; case ISD::SETEQ: return AArch64CC::EQ; case ISD::SETGT: return AArch64CC::GT; case ISD::SETGE: return AArch64CC::GE; case ISD::SETLT: return AArch64CC::LT; case ISD::SETLE: return AArch64CC::LE; case ISD::SETUGT: return AArch64CC::HI; case ISD::SETUGE: return AArch64CC::HS; case ISD::SETULT: return AArch64CC::LO; case ISD::SETULE: return AArch64CC::LS; } } /// changeFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64 CC. static void changeFPCCToAArch64CC(ISD::CondCode CC, AArch64CC::CondCode &CondCode, AArch64CC::CondCode &CondCode2) { CondCode2 = AArch64CC::AL; switch (CC) { default: llvm_unreachable("Unknown FP condition!"); case ISD::SETEQ: case ISD::SETOEQ: CondCode = AArch64CC::EQ; break; case ISD::SETGT: case ISD::SETOGT: CondCode = AArch64CC::GT; break; case ISD::SETGE: case ISD::SETOGE: CondCode = AArch64CC::GE; break; case ISD::SETOLT: CondCode = AArch64CC::MI; break; case ISD::SETOLE: CondCode = AArch64CC::LS; break; case ISD::SETONE: CondCode = AArch64CC::MI; CondCode2 = AArch64CC::GT; break; case ISD::SETO: CondCode = AArch64CC::VC; break; case ISD::SETUO: CondCode = AArch64CC::VS; break; case ISD::SETUEQ: CondCode = AArch64CC::EQ; CondCode2 = AArch64CC::VS; break; case ISD::SETUGT: CondCode = AArch64CC::HI; break; case ISD::SETUGE: CondCode = AArch64CC::PL; break; case ISD::SETLT: case ISD::SETULT: CondCode = AArch64CC::LT; break; case ISD::SETLE: case ISD::SETULE: CondCode = AArch64CC::LE; break; case ISD::SETNE: case ISD::SETUNE: CondCode = AArch64CC::NE; break; } } /// Convert a DAG fp condition code to an AArch64 CC. /// This differs from changeFPCCToAArch64CC in that it returns cond codes that /// should be AND'ed instead of OR'ed. static void changeFPCCToANDAArch64CC(ISD::CondCode CC, AArch64CC::CondCode &CondCode, AArch64CC::CondCode &CondCode2) { CondCode2 = AArch64CC::AL; switch (CC) { default: changeFPCCToAArch64CC(CC, CondCode, CondCode2); assert(CondCode2 == AArch64CC::AL); break; case ISD::SETONE: // (a one b) // == ((a olt b) || (a ogt b)) // == ((a ord b) && (a une b)) CondCode = AArch64CC::VC; CondCode2 = AArch64CC::NE; break; case ISD::SETUEQ: // (a ueq b) // == ((a uno b) || (a oeq b)) // == ((a ule b) && (a uge b)) CondCode = AArch64CC::PL; CondCode2 = AArch64CC::LE; break; } } /// changeVectorFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64 /// CC usable with the vector instructions. Fewer operations are available /// without a real NZCV register, so we have to use less efficient combinations /// to get the same effect. static void changeVectorFPCCToAArch64CC(ISD::CondCode CC, AArch64CC::CondCode &CondCode, AArch64CC::CondCode &CondCode2, bool &Invert) { Invert = false; switch (CC) { default: // Mostly the scalar mappings work fine. changeFPCCToAArch64CC(CC, CondCode, CondCode2); break; case ISD::SETUO: Invert = true; LLVM_FALLTHROUGH; case ISD::SETO: CondCode = AArch64CC::MI; CondCode2 = AArch64CC::GE; break; case ISD::SETUEQ: case ISD::SETULT: case ISD::SETULE: case ISD::SETUGT: case ISD::SETUGE: // All of the compare-mask comparisons are ordered, but we can switch // between the two by a double inversion. E.g. ULE == !OGT. Invert = true; changeFPCCToAArch64CC(getSetCCInverse(CC, /* FP inverse */ MVT::f32), CondCode, CondCode2); break; } } static bool isLegalArithImmed(uint64_t C) { // Matches AArch64DAGToDAGISel::SelectArithImmed(). bool IsLegal = (C >> 12 == 0) || ((C & 0xFFFULL) == 0 && C >> 24 == 0); LLVM_DEBUG(dbgs() << "Is imm " << C << " legal: " << (IsLegal ? "yes\n" : "no\n")); return IsLegal; } // Can a (CMP op1, (sub 0, op2) be turned into a CMN instruction on // the grounds that "op1 - (-op2) == op1 + op2" ? Not always, the C and V flags // can be set differently by this operation. It comes down to whether // "SInt(~op2)+1 == SInt(~op2+1)" (and the same for UInt). If they are then // everything is fine. If not then the optimization is wrong. Thus general // comparisons are only valid if op2 != 0. // // So, finally, the only LLVM-native comparisons that don't mention C and V // are SETEQ and SETNE. They're the only ones we can safely use CMN for in // the absence of information about op2. static bool isCMN(SDValue Op, ISD::CondCode CC) { return Op.getOpcode() == ISD::SUB && isNullConstant(Op.getOperand(0)) && (CC == ISD::SETEQ || CC == ISD::SETNE); } static SDValue emitStrictFPComparison(SDValue LHS, SDValue RHS, const SDLoc &dl, SelectionDAG &DAG, SDValue Chain, bool IsSignaling) { EVT VT = LHS.getValueType(); assert(VT != MVT::f128); assert(VT != MVT::f16 && "Lowering of strict fp16 not yet implemented"); unsigned Opcode = IsSignaling ? AArch64ISD::STRICT_FCMPE : AArch64ISD::STRICT_FCMP; return DAG.getNode(Opcode, dl, {VT, MVT::Other}, {Chain, LHS, RHS}); } static SDValue emitComparison(SDValue LHS, SDValue RHS, ISD::CondCode CC, const SDLoc &dl, SelectionDAG &DAG) { EVT VT = LHS.getValueType(); const bool FullFP16 = static_cast(DAG.getSubtarget()).hasFullFP16(); if (VT.isFloatingPoint()) { assert(VT != MVT::f128); if (VT == MVT::f16 && !FullFP16) { LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, LHS); RHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, RHS); VT = MVT::f32; } return DAG.getNode(AArch64ISD::FCMP, dl, VT, LHS, RHS); } // The CMP instruction is just an alias for SUBS, and representing it as // SUBS means that it's possible to get CSE with subtract operations. // A later phase can perform the optimization of setting the destination // register to WZR/XZR if it ends up being unused. unsigned Opcode = AArch64ISD::SUBS; if (isCMN(RHS, CC)) { // Can we combine a (CMP op1, (sub 0, op2) into a CMN instruction ? Opcode = AArch64ISD::ADDS; RHS = RHS.getOperand(1); } else if (isCMN(LHS, CC)) { // As we are looking for EQ/NE compares, the operands can be commuted ; can // we combine a (CMP (sub 0, op1), op2) into a CMN instruction ? Opcode = AArch64ISD::ADDS; LHS = LHS.getOperand(1); } else if (isNullConstant(RHS) && !isUnsignedIntSetCC(CC)) { if (LHS.getOpcode() == ISD::AND) { // Similarly, (CMP (and X, Y), 0) can be implemented with a TST // (a.k.a. ANDS) except that the flags are only guaranteed to work for one // of the signed comparisons. const SDValue ANDSNode = DAG.getNode(AArch64ISD::ANDS, dl, DAG.getVTList(VT, MVT_CC), LHS.getOperand(0), LHS.getOperand(1)); // Replace all users of (and X, Y) with newly generated (ands X, Y) DAG.ReplaceAllUsesWith(LHS, ANDSNode); return ANDSNode.getValue(1); } else if (LHS.getOpcode() == AArch64ISD::ANDS) { // Use result of ANDS return LHS.getValue(1); } } return DAG.getNode(Opcode, dl, DAG.getVTList(VT, MVT_CC), LHS, RHS) .getValue(1); } /// \defgroup AArch64CCMP CMP;CCMP matching /// /// These functions deal with the formation of CMP;CCMP;... sequences. /// The CCMP/CCMN/FCCMP/FCCMPE instructions allow the conditional execution of /// a comparison. They set the NZCV flags to a predefined value if their /// predicate is false. This allows to express arbitrary conjunctions, for /// example "cmp 0 (and (setCA (cmp A)) (setCB (cmp B)))" /// expressed as: /// cmp A /// ccmp B, inv(CB), CA /// check for CB flags /// /// This naturally lets us implement chains of AND operations with SETCC /// operands. And we can even implement some other situations by transforming /// them: /// - We can implement (NEG SETCC) i.e. negating a single comparison by /// negating the flags used in a CCMP/FCCMP operations. /// - We can negate the result of a whole chain of CMP/CCMP/FCCMP operations /// by negating the flags we test for afterwards. i.e. /// NEG (CMP CCMP CCCMP ...) can be implemented. /// - Note that we can only ever negate all previously processed results. /// What we can not implement by flipping the flags to test is a negation /// of two sub-trees (because the negation affects all sub-trees emitted so /// far, so the 2nd sub-tree we emit would also affect the first). /// With those tools we can implement some OR operations: /// - (OR (SETCC A) (SETCC B)) can be implemented via: /// NEG (AND (NEG (SETCC A)) (NEG (SETCC B))) /// - After transforming OR to NEG/AND combinations we may be able to use NEG /// elimination rules from earlier to implement the whole thing as a /// CCMP/FCCMP chain. /// /// As complete example: /// or (or (setCA (cmp A)) (setCB (cmp B))) /// (and (setCC (cmp C)) (setCD (cmp D)))" /// can be reassociated to: /// or (and (setCC (cmp C)) setCD (cmp D)) // (or (setCA (cmp A)) (setCB (cmp B))) /// can be transformed to: /// not (and (not (and (setCC (cmp C)) (setCD (cmp D)))) /// (and (not (setCA (cmp A)) (not (setCB (cmp B))))))" /// which can be implemented as: /// cmp C /// ccmp D, inv(CD), CC /// ccmp A, CA, inv(CD) /// ccmp B, CB, inv(CA) /// check for CB flags /// /// A counterexample is "or (and A B) (and C D)" which translates to /// not (and (not (and (not A) (not B))) (not (and (not C) (not D)))), we /// can only implement 1 of the inner (not) operations, but not both! /// @{ /// Create a conditional comparison; Use CCMP, CCMN or FCCMP as appropriate. static SDValue emitConditionalComparison(SDValue LHS, SDValue RHS, ISD::CondCode CC, SDValue CCOp, AArch64CC::CondCode Predicate, AArch64CC::CondCode OutCC, const SDLoc &DL, SelectionDAG &DAG) { unsigned Opcode = 0; const bool FullFP16 = static_cast(DAG.getSubtarget()).hasFullFP16(); if (LHS.getValueType().isFloatingPoint()) { assert(LHS.getValueType() != MVT::f128); if (LHS.getValueType() == MVT::f16 && !FullFP16) { LHS = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, LHS); RHS = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, RHS); } Opcode = AArch64ISD::FCCMP; } else if (RHS.getOpcode() == ISD::SUB) { SDValue SubOp0 = RHS.getOperand(0); if (isNullConstant(SubOp0) && (CC == ISD::SETEQ || CC == ISD::SETNE)) { // See emitComparison() on why we can only do this for SETEQ and SETNE. Opcode = AArch64ISD::CCMN; RHS = RHS.getOperand(1); } } if (Opcode == 0) Opcode = AArch64ISD::CCMP; SDValue Condition = DAG.getConstant(Predicate, DL, MVT_CC); AArch64CC::CondCode InvOutCC = AArch64CC::getInvertedCondCode(OutCC); unsigned NZCV = AArch64CC::getNZCVToSatisfyCondCode(InvOutCC); SDValue NZCVOp = DAG.getConstant(NZCV, DL, MVT::i32); return DAG.getNode(Opcode, DL, MVT_CC, LHS, RHS, NZCVOp, Condition, CCOp); } /// Returns true if @p Val is a tree of AND/OR/SETCC operations that can be /// expressed as a conjunction. See \ref AArch64CCMP. /// \param CanNegate Set to true if we can negate the whole sub-tree just by /// changing the conditions on the SETCC tests. /// (this means we can call emitConjunctionRec() with /// Negate==true on this sub-tree) /// \param MustBeFirst Set to true if this subtree needs to be negated and we /// cannot do the negation naturally. We are required to /// emit the subtree first in this case. /// \param WillNegate Is true if are called when the result of this /// subexpression must be negated. This happens when the /// outer expression is an OR. We can use this fact to know /// that we have a double negation (or (or ...) ...) that /// can be implemented for free. static bool canEmitConjunction(const SDValue Val, bool &CanNegate, bool &MustBeFirst, bool WillNegate, unsigned Depth = 0) { if (!Val.hasOneUse()) return false; unsigned Opcode = Val->getOpcode(); if (Opcode == ISD::SETCC) { if (Val->getOperand(0).getValueType() == MVT::f128) return false; CanNegate = true; MustBeFirst = false; return true; } // Protect against exponential runtime and stack overflow. if (Depth > 6) return false; if (Opcode == ISD::AND || Opcode == ISD::OR) { bool IsOR = Opcode == ISD::OR; SDValue O0 = Val->getOperand(0); SDValue O1 = Val->getOperand(1); bool CanNegateL; bool MustBeFirstL; if (!canEmitConjunction(O0, CanNegateL, MustBeFirstL, IsOR, Depth+1)) return false; bool CanNegateR; bool MustBeFirstR; if (!canEmitConjunction(O1, CanNegateR, MustBeFirstR, IsOR, Depth+1)) return false; if (MustBeFirstL && MustBeFirstR) return false; if (IsOR) { // For an OR expression we need to be able to naturally negate at least // one side or we cannot do the transformation at all. if (!CanNegateL && !CanNegateR) return false; // If we the result of the OR will be negated and we can naturally negate // the leafs, then this sub-tree as a whole negates naturally. CanNegate = WillNegate && CanNegateL && CanNegateR; // If we cannot naturally negate the whole sub-tree, then this must be // emitted first. MustBeFirst = !CanNegate; } else { assert(Opcode == ISD::AND && "Must be OR or AND"); // We cannot naturally negate an AND operation. CanNegate = false; MustBeFirst = MustBeFirstL || MustBeFirstR; } return true; } return false; } /// Emit conjunction or disjunction tree with the CMP/FCMP followed by a chain /// of CCMP/CFCMP ops. See @ref AArch64CCMP. /// Tries to transform the given i1 producing node @p Val to a series compare /// and conditional compare operations. @returns an NZCV flags producing node /// and sets @p OutCC to the flags that should be tested or returns SDValue() if /// transformation was not possible. /// \p Negate is true if we want this sub-tree being negated just by changing /// SETCC conditions. static SDValue emitConjunctionRec(SelectionDAG &DAG, SDValue Val, AArch64CC::CondCode &OutCC, bool Negate, SDValue CCOp, AArch64CC::CondCode Predicate) { // We're at a tree leaf, produce a conditional comparison operation. unsigned Opcode = Val->getOpcode(); if (Opcode == ISD::SETCC) { SDValue LHS = Val->getOperand(0); SDValue RHS = Val->getOperand(1); ISD::CondCode CC = cast(Val->getOperand(2))->get(); bool isInteger = LHS.getValueType().isInteger(); if (Negate) CC = getSetCCInverse(CC, LHS.getValueType()); SDLoc DL(Val); // Determine OutCC and handle FP special case. if (isInteger) { OutCC = changeIntCCToAArch64CC(CC); } else { assert(LHS.getValueType().isFloatingPoint()); AArch64CC::CondCode ExtraCC; changeFPCCToANDAArch64CC(CC, OutCC, ExtraCC); // Some floating point conditions can't be tested with a single condition // code. Construct an additional comparison in this case. if (ExtraCC != AArch64CC::AL) { SDValue ExtraCmp; if (!CCOp.getNode()) ExtraCmp = emitComparison(LHS, RHS, CC, DL, DAG); else ExtraCmp = emitConditionalComparison(LHS, RHS, CC, CCOp, Predicate, ExtraCC, DL, DAG); CCOp = ExtraCmp; Predicate = ExtraCC; } } // Produce a normal comparison if we are first in the chain if (!CCOp) return emitComparison(LHS, RHS, CC, DL, DAG); // Otherwise produce a ccmp. return emitConditionalComparison(LHS, RHS, CC, CCOp, Predicate, OutCC, DL, DAG); } assert(Val->hasOneUse() && "Valid conjunction/disjunction tree"); bool IsOR = Opcode == ISD::OR; SDValue LHS = Val->getOperand(0); bool CanNegateL; bool MustBeFirstL; bool ValidL = canEmitConjunction(LHS, CanNegateL, MustBeFirstL, IsOR); assert(ValidL && "Valid conjunction/disjunction tree"); (void)ValidL; SDValue RHS = Val->getOperand(1); bool CanNegateR; bool MustBeFirstR; bool ValidR = canEmitConjunction(RHS, CanNegateR, MustBeFirstR, IsOR); assert(ValidR && "Valid conjunction/disjunction tree"); (void)ValidR; // Swap sub-tree that must come first to the right side. if (MustBeFirstL) { assert(!MustBeFirstR && "Valid conjunction/disjunction tree"); std::swap(LHS, RHS); std::swap(CanNegateL, CanNegateR); std::swap(MustBeFirstL, MustBeFirstR); } bool NegateR; bool NegateAfterR; bool NegateL; bool NegateAfterAll; if (Opcode == ISD::OR) { // Swap the sub-tree that we can negate naturally to the left. if (!CanNegateL) { assert(CanNegateR && "at least one side must be negatable"); assert(!MustBeFirstR && "invalid conjunction/disjunction tree"); assert(!Negate); std::swap(LHS, RHS); NegateR = false; NegateAfterR = true; } else { // Negate the left sub-tree if possible, otherwise negate the result. NegateR = CanNegateR; NegateAfterR = !CanNegateR; } NegateL = true; NegateAfterAll = !Negate; } else { assert(Opcode == ISD::AND && "Valid conjunction/disjunction tree"); assert(!Negate && "Valid conjunction/disjunction tree"); NegateL = false; NegateR = false; NegateAfterR = false; NegateAfterAll = false; } // Emit sub-trees. AArch64CC::CondCode RHSCC; SDValue CmpR = emitConjunctionRec(DAG, RHS, RHSCC, NegateR, CCOp, Predicate); if (NegateAfterR) RHSCC = AArch64CC::getInvertedCondCode(RHSCC); SDValue CmpL = emitConjunctionRec(DAG, LHS, OutCC, NegateL, CmpR, RHSCC); if (NegateAfterAll) OutCC = AArch64CC::getInvertedCondCode(OutCC); return CmpL; } /// Emit expression as a conjunction (a series of CCMP/CFCMP ops). /// In some cases this is even possible with OR operations in the expression. /// See \ref AArch64CCMP. /// \see emitConjunctionRec(). static SDValue emitConjunction(SelectionDAG &DAG, SDValue Val, AArch64CC::CondCode &OutCC) { bool DummyCanNegate; bool DummyMustBeFirst; if (!canEmitConjunction(Val, DummyCanNegate, DummyMustBeFirst, false)) return SDValue(); return emitConjunctionRec(DAG, Val, OutCC, false, SDValue(), AArch64CC::AL); } /// @} /// Returns how profitable it is to fold a comparison's operand's shift and/or /// extension operations. static unsigned getCmpOperandFoldingProfit(SDValue Op) { auto isSupportedExtend = [&](SDValue V) { if (V.getOpcode() == ISD::SIGN_EXTEND_INREG) return true; if (V.getOpcode() == ISD::AND) if (ConstantSDNode *MaskCst = dyn_cast(V.getOperand(1))) { uint64_t Mask = MaskCst->getZExtValue(); return (Mask == 0xFF || Mask == 0xFFFF || Mask == 0xFFFFFFFF); } return false; }; if (!Op.hasOneUse()) return 0; if (isSupportedExtend(Op)) return 1; unsigned Opc = Op.getOpcode(); if (Opc == ISD::SHL || Opc == ISD::SRL || Opc == ISD::SRA) if (ConstantSDNode *ShiftCst = dyn_cast(Op.getOperand(1))) { uint64_t Shift = ShiftCst->getZExtValue(); if (isSupportedExtend(Op.getOperand(0))) return (Shift <= 4) ? 2 : 1; EVT VT = Op.getValueType(); if ((VT == MVT::i32 && Shift <= 31) || (VT == MVT::i64 && Shift <= 63)) return 1; } return 0; } static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, SDValue &AArch64cc, SelectionDAG &DAG, const SDLoc &dl) { if (ConstantSDNode *RHSC = dyn_cast(RHS.getNode())) { EVT VT = RHS.getValueType(); uint64_t C = RHSC->getZExtValue(); if (!isLegalArithImmed(C)) { // Constant does not fit, try adjusting it by one? switch (CC) { default: break; case ISD::SETLT: case ISD::SETGE: if ((VT == MVT::i32 && C != 0x80000000 && isLegalArithImmed((uint32_t)(C - 1))) || (VT == MVT::i64 && C != 0x80000000ULL && isLegalArithImmed(C - 1ULL))) { CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; C = (VT == MVT::i32) ? (uint32_t)(C - 1) : C - 1; RHS = DAG.getConstant(C, dl, VT); } break; case ISD::SETULT: case ISD::SETUGE: if ((VT == MVT::i32 && C != 0 && isLegalArithImmed((uint32_t)(C - 1))) || (VT == MVT::i64 && C != 0ULL && isLegalArithImmed(C - 1ULL))) { CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; C = (VT == MVT::i32) ? (uint32_t)(C - 1) : C - 1; RHS = DAG.getConstant(C, dl, VT); } break; case ISD::SETLE: case ISD::SETGT: if ((VT == MVT::i32 && C != INT32_MAX && isLegalArithImmed((uint32_t)(C + 1))) || (VT == MVT::i64 && C != INT64_MAX && isLegalArithImmed(C + 1ULL))) { CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; C = (VT == MVT::i32) ? (uint32_t)(C + 1) : C + 1; RHS = DAG.getConstant(C, dl, VT); } break; case ISD::SETULE: case ISD::SETUGT: if ((VT == MVT::i32 && C != UINT32_MAX && isLegalArithImmed((uint32_t)(C + 1))) || (VT == MVT::i64 && C != UINT64_MAX && isLegalArithImmed(C + 1ULL))) { CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; C = (VT == MVT::i32) ? (uint32_t)(C + 1) : C + 1; RHS = DAG.getConstant(C, dl, VT); } break; } } } // Comparisons are canonicalized so that the RHS operand is simpler than the // LHS one, the extreme case being when RHS is an immediate. However, AArch64 // can fold some shift+extend operations on the RHS operand, so swap the // operands if that can be done. // // For example: // lsl w13, w11, #1 // cmp w13, w12 // can be turned into: // cmp w12, w11, lsl #1 if (!isa(RHS) || !isLegalArithImmed(cast(RHS)->getZExtValue())) { SDValue TheLHS = isCMN(LHS, CC) ? LHS.getOperand(1) : LHS; if (getCmpOperandFoldingProfit(TheLHS) > getCmpOperandFoldingProfit(RHS)) { std::swap(LHS, RHS); CC = ISD::getSetCCSwappedOperands(CC); } } SDValue Cmp; AArch64CC::CondCode AArch64CC; if ((CC == ISD::SETEQ || CC == ISD::SETNE) && isa(RHS)) { const ConstantSDNode *RHSC = cast(RHS); // The imm operand of ADDS is an unsigned immediate, in the range 0 to 4095. // For the i8 operand, the largest immediate is 255, so this can be easily // encoded in the compare instruction. For the i16 operand, however, the // largest immediate cannot be encoded in the compare. // Therefore, use a sign extending load and cmn to avoid materializing the // -1 constant. For example, // movz w1, #65535 // ldrh w0, [x0, #0] // cmp w0, w1 // > // ldrsh w0, [x0, #0] // cmn w0, #1 // Fundamental, we're relying on the property that (zext LHS) == (zext RHS) // if and only if (sext LHS) == (sext RHS). The checks are in place to // ensure both the LHS and RHS are truly zero extended and to make sure the // transformation is profitable. if ((RHSC->getZExtValue() >> 16 == 0) && isa(LHS) && cast(LHS)->getExtensionType() == ISD::ZEXTLOAD && cast(LHS)->getMemoryVT() == MVT::i16 && LHS.getNode()->hasNUsesOfValue(1, 0)) { int16_t ValueofRHS = cast(RHS)->getZExtValue(); if (ValueofRHS < 0 && isLegalArithImmed(-ValueofRHS)) { SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, LHS.getValueType(), LHS, DAG.getValueType(MVT::i16)); Cmp = emitComparison(SExt, DAG.getConstant(ValueofRHS, dl, RHS.getValueType()), CC, dl, DAG); AArch64CC = changeIntCCToAArch64CC(CC); } } if (!Cmp && (RHSC->isNullValue() || RHSC->isOne())) { if ((Cmp = emitConjunction(DAG, LHS, AArch64CC))) { if ((CC == ISD::SETNE) ^ RHSC->isNullValue()) AArch64CC = AArch64CC::getInvertedCondCode(AArch64CC); } } } if (!Cmp) { Cmp = emitComparison(LHS, RHS, CC, dl, DAG); AArch64CC = changeIntCCToAArch64CC(CC); } AArch64cc = DAG.getConstant(AArch64CC, dl, MVT_CC); return Cmp; } static std::pair getAArch64XALUOOp(AArch64CC::CondCode &CC, SDValue Op, SelectionDAG &DAG) { assert((Op.getValueType() == MVT::i32 || Op.getValueType() == MVT::i64) && "Unsupported value type"); SDValue Value, Overflow; SDLoc DL(Op); SDValue LHS = Op.getOperand(0); SDValue RHS = Op.getOperand(1); unsigned Opc = 0; switch (Op.getOpcode()) { default: llvm_unreachable("Unknown overflow instruction!"); case ISD::SADDO: Opc = AArch64ISD::ADDS; CC = AArch64CC::VS; break; case ISD::UADDO: Opc = AArch64ISD::ADDS; CC = AArch64CC::HS; break; case ISD::SSUBO: Opc = AArch64ISD::SUBS; CC = AArch64CC::VS; break; case ISD::USUBO: Opc = AArch64ISD::SUBS; CC = AArch64CC::LO; break; // Multiply needs a little bit extra work. case ISD::SMULO: case ISD::UMULO: { CC = AArch64CC::NE; bool IsSigned = Op.getOpcode() == ISD::SMULO; if (Op.getValueType() == MVT::i32) { // Extend to 64-bits, then perform a 64-bit multiply. unsigned ExtendOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; LHS = DAG.getNode(ExtendOpc, DL, MVT::i64, LHS); RHS = DAG.getNode(ExtendOpc, DL, MVT::i64, RHS); SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, LHS, RHS); Value = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul); // Check that the result fits into a 32-bit integer. SDVTList VTs = DAG.getVTList(MVT::i64, MVT_CC); if (IsSigned) { // cmp xreg, wreg, sxtw SDValue SExtMul = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Value); Overflow = DAG.getNode(AArch64ISD::SUBS, DL, VTs, Mul, SExtMul).getValue(1); } else { // tst xreg, #0xffffffff00000000 SDValue UpperBits = DAG.getConstant(0xFFFFFFFF00000000, DL, MVT::i64); Overflow = DAG.getNode(AArch64ISD::ANDS, DL, VTs, Mul, UpperBits).getValue(1); } break; } assert(Op.getValueType() == MVT::i64 && "Expected an i64 value type"); // For the 64 bit multiply Value = DAG.getNode(ISD::MUL, DL, MVT::i64, LHS, RHS); if (IsSigned) { SDValue UpperBits = DAG.getNode(ISD::MULHS, DL, MVT::i64, LHS, RHS); SDValue LowerBits = DAG.getNode(ISD::SRA, DL, MVT::i64, Value, DAG.getConstant(63, DL, MVT::i64)); // It is important that LowerBits is last, otherwise the arithmetic // shift will not be folded into the compare (SUBS). SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i32); Overflow = DAG.getNode(AArch64ISD::SUBS, DL, VTs, UpperBits, LowerBits) .getValue(1); } else { SDValue UpperBits = DAG.getNode(ISD::MULHU, DL, MVT::i64, LHS, RHS); SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i32); Overflow = DAG.getNode(AArch64ISD::SUBS, DL, VTs, DAG.getConstant(0, DL, MVT::i64), UpperBits).getValue(1); } break; } } // switch (...) if (Opc) { SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::i32); // Emit the AArch64 operation with overflow check. Value = DAG.getNode(Opc, DL, VTs, LHS, RHS); Overflow = Value.getValue(1); } return std::make_pair(Value, Overflow); } SDValue AArch64TargetLowering::LowerXOR(SDValue Op, SelectionDAG &DAG) const { if (useSVEForFixedLengthVectorVT(Op.getValueType())) return LowerToScalableOp(Op, DAG); SDValue Sel = Op.getOperand(0); SDValue Other = Op.getOperand(1); SDLoc dl(Sel); // If the operand is an overflow checking operation, invert the condition // code and kill the Not operation. I.e., transform: // (xor (overflow_op_bool, 1)) // --> // (csel 1, 0, invert(cc), overflow_op_bool) // ... which later gets transformed to just a cset instruction with an // inverted condition code, rather than a cset + eor sequence. if (isOneConstant(Other) && ISD::isOverflowIntrOpRes(Sel)) { // Only lower legal XALUO ops. if (!DAG.getTargetLoweringInfo().isTypeLegal(Sel->getValueType(0))) return SDValue(); SDValue TVal = DAG.getConstant(1, dl, MVT::i32); SDValue FVal = DAG.getConstant(0, dl, MVT::i32); AArch64CC::CondCode CC; SDValue Value, Overflow; std::tie(Value, Overflow) = getAArch64XALUOOp(CC, Sel.getValue(0), DAG); SDValue CCVal = DAG.getConstant(getInvertedCondCode(CC), dl, MVT::i32); return DAG.getNode(AArch64ISD::CSEL, dl, Op.getValueType(), TVal, FVal, CCVal, Overflow); } // If neither operand is a SELECT_CC, give up. if (Sel.getOpcode() != ISD::SELECT_CC) std::swap(Sel, Other); if (Sel.getOpcode() != ISD::SELECT_CC) return Op; // The folding we want to perform is: // (xor x, (select_cc a, b, cc, 0, -1) ) // --> // (csel x, (xor x, -1), cc ...) // // The latter will get matched to a CSINV instruction. ISD::CondCode CC = cast(Sel.getOperand(4))->get(); SDValue LHS = Sel.getOperand(0); SDValue RHS = Sel.getOperand(1); SDValue TVal = Sel.getOperand(2); SDValue FVal = Sel.getOperand(3); // FIXME: This could be generalized to non-integer comparisons. if (LHS.getValueType() != MVT::i32 && LHS.getValueType() != MVT::i64) return Op; ConstantSDNode *CFVal = dyn_cast(FVal); ConstantSDNode *CTVal = dyn_cast(TVal); // The values aren't constants, this isn't the pattern we're looking for. if (!CFVal || !CTVal) return Op; // We can commute the SELECT_CC by inverting the condition. This // might be needed to make this fit into a CSINV pattern. if (CTVal->isAllOnesValue() && CFVal->isNullValue()) { std::swap(TVal, FVal); std::swap(CTVal, CFVal); CC = ISD::getSetCCInverse(CC, LHS.getValueType()); } // If the constants line up, perform the transform! if (CTVal->isNullValue() && CFVal->isAllOnesValue()) { SDValue CCVal; SDValue Cmp = getAArch64Cmp(LHS, RHS, CC, CCVal, DAG, dl); FVal = Other; TVal = DAG.getNode(ISD::XOR, dl, Other.getValueType(), Other, DAG.getConstant(-1ULL, dl, Other.getValueType())); return DAG.getNode(AArch64ISD::CSEL, dl, Sel.getValueType(), FVal, TVal, CCVal, Cmp); } return Op; } static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { EVT VT = Op.getValueType(); // Let legalize expand this if it isn't a legal type yet. if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) return SDValue(); SDVTList VTs = DAG.getVTList(VT, MVT::i32); unsigned Opc; bool ExtraOp = false; switch (Op.getOpcode()) { default: llvm_unreachable("Invalid code"); case ISD::ADDC: Opc = AArch64ISD::ADDS; break; case ISD::SUBC: Opc = AArch64ISD::SUBS; break; case ISD::ADDE: Opc = AArch64ISD::ADCS; ExtraOp = true; break; case ISD::SUBE: Opc = AArch64ISD::SBCS; ExtraOp = true; break; } if (!ExtraOp) return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), Op.getOperand(1)); return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), Op.getOperand(1), Op.getOperand(2)); } static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) { // Let legalize expand this if it isn't a legal type yet. if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType())) return SDValue(); SDLoc dl(Op); AArch64CC::CondCode CC; // The actual operation that sets the overflow or carry flag. SDValue Value, Overflow; std::tie(Value, Overflow) = getAArch64XALUOOp(CC, Op, DAG); // We use 0 and 1 as false and true values. SDValue TVal = DAG.getConstant(1, dl, MVT::i32); SDValue FVal = DAG.getConstant(0, dl, MVT::i32); // We use an inverted condition, because the conditional select is inverted // too. This will allow it to be selected to a single instruction: // CSINC Wd, WZR, WZR, invert(cond). SDValue CCVal = DAG.getConstant(getInvertedCondCode(CC), dl, MVT::i32); Overflow = DAG.getNode(AArch64ISD::CSEL, dl, MVT::i32, FVal, TVal, CCVal, Overflow); SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow); } // Prefetch operands are: // 1: Address to prefetch // 2: bool isWrite // 3: int locality (0 = no locality ... 3 = extreme locality) // 4: bool isDataCache static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG) { SDLoc DL(Op); unsigned IsWrite = cast(Op.getOperand(2))->getZExtValue(); unsigned Locality = cast(Op.getOperand(3))->getZExtValue(); unsigned IsData = cast(Op.getOperand(4))->getZExtValue(); bool IsStream = !Locality; // When the locality number is set if (Locality) { // The front-end should have filtered out the out-of-range values assert(Locality <= 3 && "Prefetch locality out-of-range"); // The locality degree is the opposite of the cache speed. // Put the number the other way around. // The encoding starts at 0 for level 1 Locality = 3 - Locality; } // built the mask value encoding the expected behavior. unsigned PrfOp = (IsWrite << 4) | // Load/Store bit (!IsData << 3) | // IsDataCache bit (Locality << 1) | // Cache level bits (unsigned)IsStream; // Stream bit return DAG.getNode(AArch64ISD::PREFETCH, DL, MVT::Other, Op.getOperand(0), DAG.getConstant(PrfOp, DL, MVT::i32), Op.getOperand(1)); } SDValue AArch64TargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); if (VT.isScalableVector()) return LowerToPredicatedOp(Op, DAG, AArch64ISD::FP_EXTEND_MERGE_PASSTHRU); if (useSVEForFixedLengthVectorVT(VT)) return LowerFixedLengthFPExtendToSVE(Op, DAG); assert(Op.getValueType() == MVT::f128 && "Unexpected lowering"); return SDValue(); } SDValue AArch64TargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { if (Op.getValueType().isScalableVector()) return LowerToPredicatedOp(Op, DAG, AArch64ISD::FP_ROUND_MERGE_PASSTHRU); bool IsStrict = Op->isStrictFPOpcode(); SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0); EVT SrcVT = SrcVal.getValueType(); if (useSVEForFixedLengthVectorVT(SrcVT)) return LowerFixedLengthFPRoundToSVE(Op, DAG); if (SrcVT != MVT::f128) { // Expand cases where the input is a vector bigger than NEON. if (useSVEForFixedLengthVectorVT(SrcVT)) return SDValue(); // It's legal except when f128 is involved return Op; } return SDValue(); } SDValue AArch64TargetLowering::LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) const { // Warning: We maintain cost tables in AArch64TargetTransformInfo.cpp. // Any additional optimization in this function should be recorded // in the cost tables. EVT InVT = Op.getOperand(0).getValueType(); EVT VT = Op.getValueType(); if (VT.isScalableVector()) { unsigned Opcode = Op.getOpcode() == ISD::FP_TO_UINT ? AArch64ISD::FCVTZU_MERGE_PASSTHRU : AArch64ISD::FCVTZS_MERGE_PASSTHRU; return LowerToPredicatedOp(Op, DAG, Opcode); } if (useSVEForFixedLengthVectorVT(VT) || useSVEForFixedLengthVectorVT(InVT)) return LowerFixedLengthFPToIntToSVE(Op, DAG); unsigned NumElts = InVT.getVectorNumElements(); // f16 conversions are promoted to f32 when full fp16 is not supported. if (InVT.getVectorElementType() == MVT::f16 && !Subtarget->hasFullFP16()) { MVT NewVT = MVT::getVectorVT(MVT::f32, NumElts); SDLoc dl(Op); return DAG.getNode( Op.getOpcode(), dl, Op.getValueType(), DAG.getNode(ISD::FP_EXTEND, dl, NewVT, Op.getOperand(0))); } uint64_t VTSize = VT.getFixedSizeInBits(); uint64_t InVTSize = InVT.getFixedSizeInBits(); if (VTSize < InVTSize) { SDLoc dl(Op); SDValue Cv = DAG.getNode(Op.getOpcode(), dl, InVT.changeVectorElementTypeToInteger(), Op.getOperand(0)); return DAG.getNode(ISD::TRUNCATE, dl, VT, Cv); } if (VTSize > InVTSize) { SDLoc dl(Op); MVT ExtVT = MVT::getVectorVT(MVT::getFloatingPointVT(VT.getScalarSizeInBits()), VT.getVectorNumElements()); SDValue Ext = DAG.getNode(ISD::FP_EXTEND, dl, ExtVT, Op.getOperand(0)); return DAG.getNode(Op.getOpcode(), dl, VT, Ext); } // Type changing conversions are illegal. return Op; } SDValue AArch64TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const { bool IsStrict = Op->isStrictFPOpcode(); SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0); if (SrcVal.getValueType().isVector()) return LowerVectorFP_TO_INT(Op, DAG); // f16 conversions are promoted to f32 when full fp16 is not supported. if (SrcVal.getValueType() == MVT::f16 && !Subtarget->hasFullFP16()) { assert(!IsStrict && "Lowering of strict fp16 not yet implemented"); SDLoc dl(Op); return DAG.getNode( Op.getOpcode(), dl, Op.getValueType(), DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, SrcVal)); } if (SrcVal.getValueType() != MVT::f128) { // It's legal except when f128 is involved return Op; } return SDValue(); } SDValue AArch64TargetLowering::LowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) const { // AArch64 FP-to-int conversions saturate to the destination register size, so // we can lower common saturating conversions to simple instructions. SDValue SrcVal = Op.getOperand(0); EVT SrcVT = SrcVal.getValueType(); EVT DstVT = Op.getValueType(); EVT SatVT = cast(Op.getOperand(1))->getVT(); uint64_t SatWidth = SatVT.getScalarSizeInBits(); uint64_t DstWidth = DstVT.getScalarSizeInBits(); assert(SatWidth <= DstWidth && "Saturation width cannot exceed result width"); // TODO: Support lowering of NEON and SVE conversions. if (SrcVT.isVector()) return SDValue(); // TODO: Saturate to SatWidth explicitly. if (SatWidth != DstWidth) return SDValue(); // In the absence of FP16 support, promote f32 to f16, like LowerFP_TO_INT(). if (SrcVT == MVT::f16 && !Subtarget->hasFullFP16()) return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(), DAG.getNode(ISD::FP_EXTEND, SDLoc(Op), MVT::f32, SrcVal), Op.getOperand(1)); // Cases that we can emit directly. if ((SrcVT == MVT::f64 || SrcVT == MVT::f32 || (SrcVT == MVT::f16 && Subtarget->hasFullFP16())) && (DstVT == MVT::i64 || DstVT == MVT::i32)) return Op; // For all other cases, fall back on the expanded form. return SDValue(); } SDValue AArch64TargetLowering::LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) const { // Warning: We maintain cost tables in AArch64TargetTransformInfo.cpp. // Any additional optimization in this function should be recorded // in the cost tables. EVT VT = Op.getValueType(); SDLoc dl(Op); SDValue In = Op.getOperand(0); EVT InVT = In.getValueType(); unsigned Opc = Op.getOpcode(); bool IsSigned = Opc == ISD::SINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP; if (VT.isScalableVector()) { if (InVT.getVectorElementType() == MVT::i1) { // We can't directly extend an SVE predicate; extend it first. unsigned CastOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; EVT CastVT = getPromotedVTForPredicate(InVT); In = DAG.getNode(CastOpc, dl, CastVT, In); return DAG.getNode(Opc, dl, VT, In); } unsigned Opcode = IsSigned ? AArch64ISD::SINT_TO_FP_MERGE_PASSTHRU : AArch64ISD::UINT_TO_FP_MERGE_PASSTHRU; return LowerToPredicatedOp(Op, DAG, Opcode); } if (useSVEForFixedLengthVectorVT(VT) || useSVEForFixedLengthVectorVT(InVT)) return LowerFixedLengthIntToFPToSVE(Op, DAG); uint64_t VTSize = VT.getFixedSizeInBits(); uint64_t InVTSize = InVT.getFixedSizeInBits(); if (VTSize < InVTSize) { MVT CastVT = MVT::getVectorVT(MVT::getFloatingPointVT(InVT.getScalarSizeInBits()), InVT.getVectorNumElements()); In = DAG.getNode(Opc, dl, CastVT, In); return DAG.getNode(ISD::FP_ROUND, dl, VT, In, DAG.getIntPtrConstant(0, dl)); } if (VTSize > InVTSize) { unsigned CastOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; EVT CastVT = VT.changeVectorElementTypeToInteger(); In = DAG.getNode(CastOpc, dl, CastVT, In); return DAG.getNode(Opc, dl, VT, In); } return Op; } SDValue AArch64TargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const { if (Op.getValueType().isVector()) return LowerVectorINT_TO_FP(Op, DAG); bool IsStrict = Op->isStrictFPOpcode(); SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0); // f16 conversions are promoted to f32 when full fp16 is not supported. if (Op.getValueType() == MVT::f16 && !Subtarget->hasFullFP16()) { assert(!IsStrict && "Lowering of strict fp16 not yet implemented"); SDLoc dl(Op); return DAG.getNode( ISD::FP_ROUND, dl, MVT::f16, DAG.getNode(Op.getOpcode(), dl, MVT::f32, SrcVal), DAG.getIntPtrConstant(0, dl)); } // i128 conversions are libcalls. if (SrcVal.getValueType() == MVT::i128) return SDValue(); // Other conversions are legal, unless it's to the completely software-based // fp128. if (Op.getValueType() != MVT::f128) return Op; return SDValue(); } SDValue AArch64TargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const { // For iOS, we want to call an alternative entry point: __sincos_stret, // which returns the values in two S / D registers. SDLoc dl(Op); SDValue Arg = Op.getOperand(0); EVT ArgVT = Arg.getValueType(); Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); ArgListTy Args; ArgListEntry Entry; Entry.Node = Arg; Entry.Ty = ArgTy; Entry.IsSExt = false; Entry.IsZExt = false; Args.push_back(Entry); RTLIB::Libcall LC = ArgVT == MVT::f64 ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32; const char *LibcallName = getLibcallName(LC); SDValue Callee = DAG.getExternalSymbol(LibcallName, getPointerTy(DAG.getDataLayout())); StructType *RetTy = StructType::get(ArgTy, ArgTy); TargetLowering::CallLoweringInfo CLI(DAG); CLI.setDebugLoc(dl) .setChain(DAG.getEntryNode()) .setLibCallee(CallingConv::Fast, RetTy, Callee, std::move(Args)); std::pair CallResult = LowerCallTo(CLI); return CallResult.first; } static MVT getSVEContainerType(EVT ContentTy); SDValue AArch64TargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const { EVT OpVT = Op.getValueType(); EVT ArgVT = Op.getOperand(0).getValueType(); if (useSVEForFixedLengthVectorVT(OpVT)) return LowerFixedLengthBitcastToSVE(Op, DAG); if (OpVT.isScalableVector()) { if (isTypeLegal(OpVT) && !isTypeLegal(ArgVT)) { assert(OpVT.isFloatingPoint() && !ArgVT.isFloatingPoint() && "Expected int->fp bitcast!"); SDValue ExtResult = DAG.getNode(ISD::ANY_EXTEND, SDLoc(Op), getSVEContainerType(ArgVT), Op.getOperand(0)); return getSVESafeBitCast(OpVT, ExtResult, DAG); } return getSVESafeBitCast(OpVT, Op.getOperand(0), DAG); } if (OpVT != MVT::f16 && OpVT != MVT::bf16) return SDValue(); assert(ArgVT == MVT::i16); SDLoc DL(Op); Op = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op.getOperand(0)); Op = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Op); return SDValue( DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, OpVT, Op, DAG.getTargetConstant(AArch64::hsub, DL, MVT::i32)), 0); } static EVT getExtensionTo64Bits(const EVT &OrigVT) { if (OrigVT.getSizeInBits() >= 64) return OrigVT; assert(OrigVT.isSimple() && "Expecting a simple value type"); MVT::SimpleValueType OrigSimpleTy = OrigVT.getSimpleVT().SimpleTy; switch (OrigSimpleTy) { default: llvm_unreachable("Unexpected Vector Type"); case MVT::v2i8: case MVT::v2i16: return MVT::v2i32; case MVT::v4i8: return MVT::v4i16; } } static SDValue addRequiredExtensionForVectorMULL(SDValue N, SelectionDAG &DAG, const EVT &OrigTy, const EVT &ExtTy, unsigned ExtOpcode) { // The vector originally had a size of OrigTy. It was then extended to ExtTy. // We expect the ExtTy to be 128-bits total. If the OrigTy is less than // 64-bits we need to insert a new extension so that it will be 64-bits. assert(ExtTy.is128BitVector() && "Unexpected extension size"); if (OrigTy.getSizeInBits() >= 64) return N; // Must extend size to at least 64 bits to be used as an operand for VMULL. EVT NewVT = getExtensionTo64Bits(OrigTy); return DAG.getNode(ExtOpcode, SDLoc(N), NewVT, N); } static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, bool isSigned) { EVT VT = N->getValueType(0); if (N->getOpcode() != ISD::BUILD_VECTOR) return false; for (const SDValue &Elt : N->op_values()) { if (ConstantSDNode *C = dyn_cast(Elt)) { unsigned EltSize = VT.getScalarSizeInBits(); unsigned HalfSize = EltSize / 2; if (isSigned) { if (!isIntN(HalfSize, C->getSExtValue())) return false; } else { if (!isUIntN(HalfSize, C->getZExtValue())) return false; } continue; } return false; } return true; } static SDValue skipExtensionForVectorMULL(SDNode *N, SelectionDAG &DAG) { if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND || N->getOpcode() == ISD::ANY_EXTEND) return addRequiredExtensionForVectorMULL(N->getOperand(0), DAG, N->getOperand(0)->getValueType(0), N->getValueType(0), N->getOpcode()); assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR"); EVT VT = N->getValueType(0); SDLoc dl(N); unsigned EltSize = VT.getScalarSizeInBits() / 2; unsigned NumElts = VT.getVectorNumElements(); MVT TruncVT = MVT::getIntegerVT(EltSize); SmallVector Ops; for (unsigned i = 0; i != NumElts; ++i) { ConstantSDNode *C = cast(N->getOperand(i)); const APInt &CInt = C->getAPIntValue(); // Element types smaller than 32 bits are not legal, so use i32 elements. // The values are implicitly truncated so sext vs. zext doesn't matter. Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), dl, MVT::i32)); } return DAG.getBuildVector(MVT::getVectorVT(TruncVT, NumElts), dl, Ops); } static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { return N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ANY_EXTEND || isExtendedBUILD_VECTOR(N, DAG, true); } static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { return N->getOpcode() == ISD::ZERO_EXTEND || N->getOpcode() == ISD::ANY_EXTEND || isExtendedBUILD_VECTOR(N, DAG, false); } static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) { unsigned Opcode = N->getOpcode(); if (Opcode == ISD::ADD || Opcode == ISD::SUB) { SDNode *N0 = N->getOperand(0).getNode(); SDNode *N1 = N->getOperand(1).getNode(); return N0->hasOneUse() && N1->hasOneUse() && isSignExtended(N0, DAG) && isSignExtended(N1, DAG); } return false; } static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) { unsigned Opcode = N->getOpcode(); if (Opcode == ISD::ADD || Opcode == ISD::SUB) { SDNode *N0 = N->getOperand(0).getNode(); SDNode *N1 = N->getOperand(1).getNode(); return N0->hasOneUse() && N1->hasOneUse() && isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG); } return false; } SDValue AArch64TargetLowering::LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const { // The rounding mode is in bits 23:22 of the FPSCR. // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) // so that the shift + and get folded into a bitfield extract. SDLoc dl(Op); SDValue Chain = Op.getOperand(0); SDValue FPCR_64 = DAG.getNode( ISD::INTRINSIC_W_CHAIN, dl, {MVT::i64, MVT::Other}, {Chain, DAG.getConstant(Intrinsic::aarch64_get_fpcr, dl, MVT::i64)}); Chain = FPCR_64.getValue(1); SDValue FPCR_32 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, FPCR_64); SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPCR_32, DAG.getConstant(1U << 22, dl, MVT::i32)); SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds, DAG.getConstant(22, dl, MVT::i32)); SDValue AND = DAG.getNode(ISD::AND, dl, MVT::i32, RMODE, DAG.getConstant(3, dl, MVT::i32)); return DAG.getMergeValues({AND, Chain}, dl); } SDValue AArch64TargetLowering::LowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const { SDLoc DL(Op); SDValue Chain = Op->getOperand(0); SDValue RMValue = Op->getOperand(1); // The rounding mode is in bits 23:22 of the FPCR. // The llvm.set.rounding argument value to the rounding mode in FPCR mapping // is 0->3, 1->0, 2->1, 3->2. The formula we use to implement this is // ((arg - 1) & 3) << 22). // // The argument of llvm.set.rounding must be within the segment [0, 3], so // NearestTiesToAway (4) is not handled here. It is responsibility of the code // generated llvm.set.rounding to ensure this condition. // Calculate new value of FPCR[23:22]. RMValue = DAG.getNode(ISD::SUB, DL, MVT::i32, RMValue, DAG.getConstant(1, DL, MVT::i32)); RMValue = DAG.getNode(ISD::AND, DL, MVT::i32, RMValue, DAG.getConstant(0x3, DL, MVT::i32)); RMValue = DAG.getNode(ISD::SHL, DL, MVT::i32, RMValue, DAG.getConstant(AArch64::RoundingBitsPos, DL, MVT::i32)); RMValue = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, RMValue); // Get current value of FPCR. SDValue Ops[] = { Chain, DAG.getTargetConstant(Intrinsic::aarch64_get_fpcr, DL, MVT::i64)}; SDValue FPCR = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, {MVT::i64, MVT::Other}, Ops); Chain = FPCR.getValue(1); FPCR = FPCR.getValue(0); // Put new rounding mode into FPSCR[23:22]. const int RMMask = ~(AArch64::Rounding::rmMask << AArch64::RoundingBitsPos); FPCR = DAG.getNode(ISD::AND, DL, MVT::i64, FPCR, DAG.getConstant(RMMask, DL, MVT::i64)); FPCR = DAG.getNode(ISD::OR, DL, MVT::i64, FPCR, RMValue); SDValue Ops2[] = { Chain, DAG.getTargetConstant(Intrinsic::aarch64_set_fpcr, DL, MVT::i64), FPCR}; return DAG.getNode(ISD::INTRINSIC_VOID, DL, MVT::Other, Ops2); } SDValue AArch64TargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); // If SVE is available then i64 vector multiplications can also be made legal. bool OverrideNEON = VT == MVT::v2i64 || VT == MVT::v1i64; if (VT.isScalableVector() || useSVEForFixedLengthVectorVT(VT, OverrideNEON)) return LowerToPredicatedOp(Op, DAG, AArch64ISD::MUL_PRED, OverrideNEON); // Multiplications are only custom-lowered for 128-bit vectors so that // VMULL can be detected. Otherwise v2i64 multiplications are not legal. assert(VT.is128BitVector() && VT.isInteger() && "unexpected type for custom-lowering ISD::MUL"); SDNode *N0 = Op.getOperand(0).getNode(); SDNode *N1 = Op.getOperand(1).getNode(); unsigned NewOpc = 0; bool isMLA = false; bool isN0SExt = isSignExtended(N0, DAG); bool isN1SExt = isSignExtended(N1, DAG); if (isN0SExt && isN1SExt) NewOpc = AArch64ISD::SMULL; else { bool isN0ZExt = isZeroExtended(N0, DAG); bool isN1ZExt = isZeroExtended(N1, DAG); if (isN0ZExt && isN1ZExt) NewOpc = AArch64ISD::UMULL; else if (isN1SExt || isN1ZExt) { // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these // into (s/zext A * s/zext C) + (s/zext B * s/zext C) if (isN1SExt && isAddSubSExt(N0, DAG)) { NewOpc = AArch64ISD::SMULL; isMLA = true; } else if (isN1ZExt && isAddSubZExt(N0, DAG)) { NewOpc = AArch64ISD::UMULL; isMLA = true; } else if (isN0ZExt && isAddSubZExt(N1, DAG)) { std::swap(N0, N1); NewOpc = AArch64ISD::UMULL; isMLA = true; } } if (!NewOpc) { if (VT == MVT::v2i64) // Fall through to expand this. It is not legal. return SDValue(); else // Other vector multiplications are legal. return Op; } } // Legalize to a S/UMULL instruction SDLoc DL(Op); SDValue Op0; SDValue Op1 = skipExtensionForVectorMULL(N1, DAG); if (!isMLA) { Op0 = skipExtensionForVectorMULL(N0, DAG); assert(Op0.getValueType().is64BitVector() && Op1.getValueType().is64BitVector() && "unexpected types for extended operands to VMULL"); return DAG.getNode(NewOpc, DL, VT, Op0, Op1); } // Optimizing (zext A + zext B) * C, to (S/UMULL A, C) + (S/UMULL B, C) during // isel lowering to take advantage of no-stall back to back s/umul + s/umla. // This is true for CPUs with accumulate forwarding such as Cortex-A53/A57 SDValue N00 = skipExtensionForVectorMULL(N0->getOperand(0).getNode(), DAG); SDValue N01 = skipExtensionForVectorMULL(N0->getOperand(1).getNode(), DAG); EVT Op1VT = Op1.getValueType(); return DAG.getNode(N0->getOpcode(), DL, VT, DAG.getNode(NewOpc, DL, VT, DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1), DAG.getNode(NewOpc, DL, VT, DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1)); } static inline SDValue getPTrue(SelectionDAG &DAG, SDLoc DL, EVT VT, int Pattern) { return DAG.getNode(AArch64ISD::PTRUE, DL, VT, DAG.getTargetConstant(Pattern, DL, MVT::i32)); } static SDValue lowerConvertToSVBool(SDValue Op, SelectionDAG &DAG) { SDLoc DL(Op); EVT OutVT = Op.getValueType(); SDValue InOp = Op.getOperand(1); EVT InVT = InOp.getValueType(); // Return the operand if the cast isn't changing type, // i.e. -> if (InVT == OutVT) return InOp; SDValue Reinterpret = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, OutVT, InOp); // If the argument converted to an svbool is a ptrue or a comparison, the // lanes introduced by the widening are zero by construction. switch (InOp.getOpcode()) { case AArch64ISD::SETCC_MERGE_ZERO: return Reinterpret; case ISD::INTRINSIC_WO_CHAIN: if (InOp.getConstantOperandVal(0) == Intrinsic::aarch64_sve_ptrue) return Reinterpret; } // Otherwise, zero the newly introduced lanes. SDValue Mask = getPTrue(DAG, DL, InVT, AArch64SVEPredPattern::all); SDValue MaskReinterpret = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, OutVT, Mask); return DAG.getNode(ISD::AND, DL, OutVT, Reinterpret, MaskReinterpret); } SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { unsigned IntNo = cast(Op.getOperand(0))->getZExtValue(); SDLoc dl(Op); switch (IntNo) { default: return SDValue(); // Don't custom lower most intrinsics. case Intrinsic::thread_pointer: { EVT PtrVT = getPointerTy(DAG.getDataLayout()); return DAG.getNode(AArch64ISD::THREAD_POINTER, dl, PtrVT); } case Intrinsic::aarch64_neon_abs: { EVT Ty = Op.getValueType(); if (Ty == MVT::i64) { SDValue Result = DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Op.getOperand(1)); Result = DAG.getNode(ISD::ABS, dl, MVT::v1i64, Result); return DAG.getNode(ISD::BITCAST, dl, MVT::i64, Result); } else if (Ty.isVector() && Ty.isInteger() && isTypeLegal(Ty)) { return DAG.getNode(ISD::ABS, dl, Ty, Op.getOperand(1)); } else { report_fatal_error("Unexpected type for AArch64 NEON intrinic"); } } case Intrinsic::aarch64_neon_smax: return DAG.getNode(ISD::SMAX, dl, Op.getValueType(), Op.getOperand(1), Op.getOperand(2)); case Intrinsic::aarch64_neon_umax: return DAG.getNode(ISD::UMAX, dl, Op.getValueType(), Op.getOperand(1), Op.getOperand(2)); case Intrinsic::aarch64_neon_smin: return DAG.getNode(ISD::SMIN, dl, Op.getValueType(), Op.getOperand(1), Op.getOperand(2)); case Intrinsic::aarch64_neon_umin: return DAG.getNode(ISD::UMIN, dl, Op.getValueType(), Op.getOperand(1), Op.getOperand(2)); case Intrinsic::aarch64_sve_sunpkhi: return DAG.getNode(AArch64ISD::SUNPKHI, dl, Op.getValueType(), Op.getOperand(1)); case Intrinsic::aarch64_sve_sunpklo: return DAG.getNode(AArch64ISD::SUNPKLO, dl, Op.getValueType(), Op.getOperand(1)); case Intrinsic::aarch64_sve_uunpkhi: return DAG.getNode(AArch64ISD::UUNPKHI, dl, Op.getValueType(), Op.getOperand(1)); case Intrinsic::aarch64_sve_uunpklo: return DAG.getNode(AArch64ISD::UUNPKLO, dl, Op.getValueType(), Op.getOperand(1)); case Intrinsic::aarch64_sve_clasta_n: return DAG.getNode(AArch64ISD::CLASTA_N, dl, Op.getValueType(), Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); case Intrinsic::aarch64_sve_clastb_n: return DAG.getNode(AArch64ISD::CLASTB_N, dl, Op.getValueType(), Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); case Intrinsic::aarch64_sve_lasta: return DAG.getNode(AArch64ISD::LASTA, dl, Op.getValueType(), Op.getOperand(1), Op.getOperand(2)); case Intrinsic::aarch64_sve_lastb: return DAG.getNode(AArch64ISD::LASTB, dl, Op.getValueType(), Op.getOperand(1), Op.getOperand(2)); case Intrinsic::aarch64_sve_rev: return DAG.getNode(ISD::VECTOR_REVERSE, dl, Op.getValueType(), Op.getOperand(1)); case Intrinsic::aarch64_sve_tbl: return DAG.getNode(AArch64ISD::TBL, dl, Op.getValueType(), Op.getOperand(1), Op.getOperand(2)); case Intrinsic::aarch64_sve_trn1: return DAG.getNode(AArch64ISD::TRN1, dl, Op.getValueType(), Op.getOperand(1), Op.getOperand(2)); case Intrinsic::aarch64_sve_trn2: return DAG.getNode(AArch64ISD::TRN2, dl, Op.getValueType(), Op.getOperand(1), Op.getOperand(2)); case Intrinsic::aarch64_sve_uzp1: return DAG.getNode(AArch64ISD::UZP1, dl, Op.getValueType(), Op.getOperand(1), Op.getOperand(2)); case Intrinsic::aarch64_sve_uzp2: return DAG.getNode(AArch64ISD::UZP2, dl, Op.getValueType(), Op.getOperand(1), Op.getOperand(2)); case Intrinsic::aarch64_sve_zip1: return DAG.getNode(AArch64ISD::ZIP1, dl, Op.getValueType(), Op.getOperand(1), Op.getOperand(2)); case Intrinsic::aarch64_sve_zip2: return DAG.getNode(AArch64ISD::ZIP2, dl, Op.getValueType(), Op.getOperand(1), Op.getOperand(2)); case Intrinsic::aarch64_sve_splice: return DAG.getNode(AArch64ISD::SPLICE, dl, Op.getValueType(), Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); case Intrinsic::aarch64_sve_ptrue: return DAG.getNode(AArch64ISD::PTRUE, dl, Op.getValueType(), Op.getOperand(1)); case Intrinsic::aarch64_sve_clz: return DAG.getNode(AArch64ISD::CTLZ_MERGE_PASSTHRU, dl, Op.getValueType(), Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); case Intrinsic::aarch64_sve_cnt: { SDValue Data = Op.getOperand(3); // CTPOP only supports integer operands. if (Data.getValueType().isFloatingPoint()) Data = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Data); return DAG.getNode(AArch64ISD::CTPOP_MERGE_PASSTHRU, dl, Op.getValueType(), Op.getOperand(2), Data, Op.getOperand(1)); } case Intrinsic::aarch64_sve_dupq_lane: return LowerDUPQLane(Op, DAG); case Intrinsic::aarch64_sve_convert_from_svbool: return DAG.getNode(AArch64ISD::REINTERPRET_CAST, dl, Op.getValueType(), Op.getOperand(1)); case Intrinsic::aarch64_sve_convert_to_svbool: return lowerConvertToSVBool(Op, DAG); case Intrinsic::aarch64_sve_fneg: return DAG.getNode(AArch64ISD::FNEG_MERGE_PASSTHRU, dl, Op.getValueType(), Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); case Intrinsic::aarch64_sve_frintp: return DAG.getNode(AArch64ISD::FCEIL_MERGE_PASSTHRU, dl, Op.getValueType(), Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); case Intrinsic::aarch64_sve_frintm: return DAG.getNode(AArch64ISD::FFLOOR_MERGE_PASSTHRU, dl, Op.getValueType(), Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); case Intrinsic::aarch64_sve_frinti: return DAG.getNode(AArch64ISD::FNEARBYINT_MERGE_PASSTHRU, dl, Op.getValueType(), Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); case Intrinsic::aarch64_sve_frintx: return DAG.getNode(AArch64ISD::FRINT_MERGE_PASSTHRU, dl, Op.getValueType(), Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); case Intrinsic::aarch64_sve_frinta: return DAG.getNode(AArch64ISD::FROUND_MERGE_PASSTHRU, dl, Op.getValueType(), Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); case Intrinsic::aarch64_sve_frintn: return DAG.getNode(AArch64ISD::FROUNDEVEN_MERGE_PASSTHRU, dl, Op.getValueType(), Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); case Intrinsic::aarch64_sve_frintz: return DAG.getNode(AArch64ISD::FTRUNC_MERGE_PASSTHRU, dl, Op.getValueType(), Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); case Intrinsic::aarch64_sve_ucvtf: return DAG.getNode(AArch64ISD::UINT_TO_FP_MERGE_PASSTHRU, dl, Op.getValueType(), Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); case Intrinsic::aarch64_sve_scvtf: return DAG.getNode(AArch64ISD::SINT_TO_FP_MERGE_PASSTHRU, dl, Op.getValueType(), Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); case Intrinsic::aarch64_sve_fcvtzu: return DAG.getNode(AArch64ISD::FCVTZU_MERGE_PASSTHRU, dl, Op.getValueType(), Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); case Intrinsic::aarch64_sve_fcvtzs: return DAG.getNode(AArch64ISD::FCVTZS_MERGE_PASSTHRU, dl, Op.getValueType(), Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); case Intrinsic::aarch64_sve_fsqrt: return DAG.getNode(AArch64ISD::FSQRT_MERGE_PASSTHRU, dl, Op.getValueType(), Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); case Intrinsic::aarch64_sve_frecpx: return DAG.getNode(AArch64ISD::FRECPX_MERGE_PASSTHRU, dl, Op.getValueType(), Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); case Intrinsic::aarch64_sve_fabs: return DAG.getNode(AArch64ISD::FABS_MERGE_PASSTHRU, dl, Op.getValueType(), Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); case Intrinsic::aarch64_sve_abs: return DAG.getNode(AArch64ISD::ABS_MERGE_PASSTHRU, dl, Op.getValueType(), Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); case Intrinsic::aarch64_sve_neg: return DAG.getNode(AArch64ISD::NEG_MERGE_PASSTHRU, dl, Op.getValueType(), Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); case Intrinsic::aarch64_sve_insr: { SDValue Scalar = Op.getOperand(2); EVT ScalarTy = Scalar.getValueType(); if ((ScalarTy == MVT::i8) || (ScalarTy == MVT::i16)) Scalar = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Scalar); return DAG.getNode(AArch64ISD::INSR, dl, Op.getValueType(), Op.getOperand(1), Scalar); } case Intrinsic::aarch64_sve_rbit: return DAG.getNode(AArch64ISD::BITREVERSE_MERGE_PASSTHRU, dl, Op.getValueType(), Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); case Intrinsic::aarch64_sve_revb: return DAG.getNode(AArch64ISD::BSWAP_MERGE_PASSTHRU, dl, Op.getValueType(), Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); case Intrinsic::aarch64_sve_sxtb: return DAG.getNode( AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(), Op.getOperand(2), Op.getOperand(3), DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i8)), Op.getOperand(1)); case Intrinsic::aarch64_sve_sxth: return DAG.getNode( AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(), Op.getOperand(2), Op.getOperand(3), DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i16)), Op.getOperand(1)); case Intrinsic::aarch64_sve_sxtw: return DAG.getNode( AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(), Op.getOperand(2), Op.getOperand(3), DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i32)), Op.getOperand(1)); case Intrinsic::aarch64_sve_uxtb: return DAG.getNode( AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(), Op.getOperand(2), Op.getOperand(3), DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i8)), Op.getOperand(1)); case Intrinsic::aarch64_sve_uxth: return DAG.getNode( AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(), Op.getOperand(2), Op.getOperand(3), DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i16)), Op.getOperand(1)); case Intrinsic::aarch64_sve_uxtw: return DAG.getNode( AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(), Op.getOperand(2), Op.getOperand(3), DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i32)), Op.getOperand(1)); case Intrinsic::localaddress: { const auto &MF = DAG.getMachineFunction(); const auto *RegInfo = Subtarget->getRegisterInfo(); unsigned Reg = RegInfo->getLocalAddressRegister(MF); return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, Op.getSimpleValueType()); } case Intrinsic::eh_recoverfp: { // FIXME: This needs to be implemented to correctly handle highly aligned // stack objects. For now we simply return the incoming FP. Refer D53541 // for more details. SDValue FnOp = Op.getOperand(1); SDValue IncomingFPOp = Op.getOperand(2); GlobalAddressSDNode *GSD = dyn_cast(FnOp); auto *Fn = dyn_cast_or_null(GSD ? GSD->getGlobal() : nullptr); if (!Fn) report_fatal_error( "llvm.eh.recoverfp must take a function as the first argument"); return IncomingFPOp; } case Intrinsic::aarch64_neon_vsri: case Intrinsic::aarch64_neon_vsli: { EVT Ty = Op.getValueType(); if (!Ty.isVector()) report_fatal_error("Unexpected type for aarch64_neon_vsli"); assert(Op.getConstantOperandVal(3) <= Ty.getScalarSizeInBits()); bool IsShiftRight = IntNo == Intrinsic::aarch64_neon_vsri; unsigned Opcode = IsShiftRight ? AArch64ISD::VSRI : AArch64ISD::VSLI; return DAG.getNode(Opcode, dl, Ty, Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); } case Intrinsic::aarch64_neon_srhadd: case Intrinsic::aarch64_neon_urhadd: case Intrinsic::aarch64_neon_shadd: case Intrinsic::aarch64_neon_uhadd: { bool IsSignedAdd = (IntNo == Intrinsic::aarch64_neon_srhadd || IntNo == Intrinsic::aarch64_neon_shadd); bool IsRoundingAdd = (IntNo == Intrinsic::aarch64_neon_srhadd || IntNo == Intrinsic::aarch64_neon_urhadd); unsigned Opcode = IsSignedAdd ? (IsRoundingAdd ? AArch64ISD::SRHADD : AArch64ISD::SHADD) : (IsRoundingAdd ? AArch64ISD::URHADD : AArch64ISD::UHADD); return DAG.getNode(Opcode, dl, Op.getValueType(), Op.getOperand(1), Op.getOperand(2)); } case Intrinsic::aarch64_neon_sabd: case Intrinsic::aarch64_neon_uabd: { unsigned Opcode = IntNo == Intrinsic::aarch64_neon_uabd ? ISD::ABDU : ISD::ABDS; return DAG.getNode(Opcode, dl, Op.getValueType(), Op.getOperand(1), Op.getOperand(2)); } case Intrinsic::aarch64_neon_uaddlp: { unsigned Opcode = AArch64ISD::UADDLP; return DAG.getNode(Opcode, dl, Op.getValueType(), Op.getOperand(1)); } case Intrinsic::aarch64_neon_sdot: case Intrinsic::aarch64_neon_udot: case Intrinsic::aarch64_sve_sdot: case Intrinsic::aarch64_sve_udot: { unsigned Opcode = (IntNo == Intrinsic::aarch64_neon_udot || IntNo == Intrinsic::aarch64_sve_udot) ? AArch64ISD::UDOT : AArch64ISD::SDOT; return DAG.getNode(Opcode, dl, Op.getValueType(), Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); } } } bool AArch64TargetLowering::shouldExtendGSIndex(EVT VT, EVT &EltTy) const { if (VT.getVectorElementType() == MVT::i8 || VT.getVectorElementType() == MVT::i16) { EltTy = MVT::i32; return true; } return false; } bool AArch64TargetLowering::shouldRemoveExtendFromGSIndex(EVT VT) const { if (VT.getVectorElementType() == MVT::i32 && - VT.getVectorElementCount().getKnownMinValue() >= 4) + VT.getVectorElementCount().getKnownMinValue() >= 4 && + !VT.isFixedLengthVector()) return true; return false; } bool AArch64TargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const { return ExtVal.getValueType().isScalableVector(); } unsigned getGatherVecOpcode(bool IsScaled, bool IsSigned, bool NeedsExtend) { std::map, unsigned> AddrModes = { {std::make_tuple(/*Scaled*/ false, /*Signed*/ false, /*Extend*/ false), AArch64ISD::GLD1_MERGE_ZERO}, {std::make_tuple(/*Scaled*/ false, /*Signed*/ false, /*Extend*/ true), AArch64ISD::GLD1_UXTW_MERGE_ZERO}, {std::make_tuple(/*Scaled*/ false, /*Signed*/ true, /*Extend*/ false), AArch64ISD::GLD1_MERGE_ZERO}, {std::make_tuple(/*Scaled*/ false, /*Signed*/ true, /*Extend*/ true), AArch64ISD::GLD1_SXTW_MERGE_ZERO}, {std::make_tuple(/*Scaled*/ true, /*Signed*/ false, /*Extend*/ false), AArch64ISD::GLD1_SCALED_MERGE_ZERO}, {std::make_tuple(/*Scaled*/ true, /*Signed*/ false, /*Extend*/ true), AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO}, {std::make_tuple(/*Scaled*/ true, /*Signed*/ true, /*Extend*/ false), AArch64ISD::GLD1_SCALED_MERGE_ZERO}, {std::make_tuple(/*Scaled*/ true, /*Signed*/ true, /*Extend*/ true), AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO}, }; auto Key = std::make_tuple(IsScaled, IsSigned, NeedsExtend); return AddrModes.find(Key)->second; } unsigned getScatterVecOpcode(bool IsScaled, bool IsSigned, bool NeedsExtend) { std::map, unsigned> AddrModes = { {std::make_tuple(/*Scaled*/ false, /*Signed*/ false, /*Extend*/ false), AArch64ISD::SST1_PRED}, {std::make_tuple(/*Scaled*/ false, /*Signed*/ false, /*Extend*/ true), AArch64ISD::SST1_UXTW_PRED}, {std::make_tuple(/*Scaled*/ false, /*Signed*/ true, /*Extend*/ false), AArch64ISD::SST1_PRED}, {std::make_tuple(/*Scaled*/ false, /*Signed*/ true, /*Extend*/ true), AArch64ISD::SST1_SXTW_PRED}, {std::make_tuple(/*Scaled*/ true, /*Signed*/ false, /*Extend*/ false), AArch64ISD::SST1_SCALED_PRED}, {std::make_tuple(/*Scaled*/ true, /*Signed*/ false, /*Extend*/ true), AArch64ISD::SST1_UXTW_SCALED_PRED}, {std::make_tuple(/*Scaled*/ true, /*Signed*/ true, /*Extend*/ false), AArch64ISD::SST1_SCALED_PRED}, {std::make_tuple(/*Scaled*/ true, /*Signed*/ true, /*Extend*/ true), AArch64ISD::SST1_SXTW_SCALED_PRED}, }; auto Key = std::make_tuple(IsScaled, IsSigned, NeedsExtend); return AddrModes.find(Key)->second; } unsigned getSignExtendedGatherOpcode(unsigned Opcode) { switch (Opcode) { default: llvm_unreachable("unimplemented opcode"); return Opcode; case AArch64ISD::GLD1_MERGE_ZERO: return AArch64ISD::GLD1S_MERGE_ZERO; case AArch64ISD::GLD1_IMM_MERGE_ZERO: return AArch64ISD::GLD1S_IMM_MERGE_ZERO; case AArch64ISD::GLD1_UXTW_MERGE_ZERO: return AArch64ISD::GLD1S_UXTW_MERGE_ZERO; case AArch64ISD::GLD1_SXTW_MERGE_ZERO: return AArch64ISD::GLD1S_SXTW_MERGE_ZERO; case AArch64ISD::GLD1_SCALED_MERGE_ZERO: return AArch64ISD::GLD1S_SCALED_MERGE_ZERO; case AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO: return AArch64ISD::GLD1S_UXTW_SCALED_MERGE_ZERO; case AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO: return AArch64ISD::GLD1S_SXTW_SCALED_MERGE_ZERO; } } bool getGatherScatterIndexIsExtended(SDValue Index) { unsigned Opcode = Index.getOpcode(); if (Opcode == ISD::SIGN_EXTEND_INREG) return true; if (Opcode == ISD::AND) { SDValue Splat = Index.getOperand(1); if (Splat.getOpcode() != ISD::SPLAT_VECTOR) return false; ConstantSDNode *Mask = dyn_cast(Splat.getOperand(0)); if (!Mask || Mask->getZExtValue() != 0xFFFFFFFF) return false; return true; } return false; } // If the base pointer of a masked gather or scatter is null, we // may be able to swap BasePtr & Index and use the vector + register // or vector + immediate addressing mode, e.g. // VECTOR + REGISTER: // getelementptr nullptr, (splat(%offset)) + %indices) // -> getelementptr %offset, %indices // VECTOR + IMMEDIATE: // getelementptr nullptr, (splat(#x)) + %indices) // -> getelementptr #x, %indices void selectGatherScatterAddrMode(SDValue &BasePtr, SDValue &Index, EVT MemVT, unsigned &Opcode, bool IsGather, SelectionDAG &DAG) { if (!isNullConstant(BasePtr)) return; // FIXME: This will not match for fixed vector type codegen as the nodes in // question will have fixed<->scalable conversions around them. This should be // moved to a DAG combine or complex pattern so that is executes after all of // the fixed vector insert and extracts have been removed. This deficiency // will result in a sub-optimal addressing mode being used, i.e. an ADD not // being folded into the scatter/gather. ConstantSDNode *Offset = nullptr; if (Index.getOpcode() == ISD::ADD) if (auto SplatVal = DAG.getSplatValue(Index.getOperand(1))) { if (isa(SplatVal)) Offset = cast(SplatVal); else { BasePtr = SplatVal; Index = Index->getOperand(0); return; } } unsigned NewOp = IsGather ? AArch64ISD::GLD1_IMM_MERGE_ZERO : AArch64ISD::SST1_IMM_PRED; if (!Offset) { std::swap(BasePtr, Index); Opcode = NewOp; return; } uint64_t OffsetVal = Offset->getZExtValue(); unsigned ScalarSizeInBytes = MemVT.getScalarSizeInBits() / 8; auto ConstOffset = DAG.getConstant(OffsetVal, SDLoc(Index), MVT::i64); if (OffsetVal % ScalarSizeInBytes || OffsetVal / ScalarSizeInBytes > 31) { // Index is out of range for the immediate addressing mode BasePtr = ConstOffset; Index = Index->getOperand(0); return; } // Immediate is in range Opcode = NewOp; BasePtr = Index->getOperand(0); Index = ConstOffset; } SDValue AArch64TargetLowering::LowerMGATHER(SDValue Op, SelectionDAG &DAG) const { SDLoc DL(Op); MaskedGatherSDNode *MGT = cast(Op); assert(MGT && "Can only custom lower gather load nodes"); bool IsFixedLength = MGT->getMemoryVT().isFixedLengthVector(); SDValue Index = MGT->getIndex(); SDValue Chain = MGT->getChain(); SDValue PassThru = MGT->getPassThru(); SDValue Mask = MGT->getMask(); SDValue BasePtr = MGT->getBasePtr(); ISD::LoadExtType ExtTy = MGT->getExtensionType(); ISD::MemIndexType IndexType = MGT->getIndexType(); bool IsScaled = IndexType == ISD::SIGNED_SCALED || IndexType == ISD::UNSIGNED_SCALED; bool IsSigned = IndexType == ISD::SIGNED_SCALED || IndexType == ISD::SIGNED_UNSCALED; bool IdxNeedsExtend = getGatherScatterIndexIsExtended(Index) || Index.getSimpleValueType().getVectorElementType() == MVT::i32; bool ResNeedsSignExtend = ExtTy == ISD::EXTLOAD || ExtTy == ISD::SEXTLOAD; EVT VT = PassThru.getSimpleValueType(); EVT IndexVT = Index.getSimpleValueType(); EVT MemVT = MGT->getMemoryVT(); SDValue InputVT = DAG.getValueType(MemVT); if (VT.getVectorElementType() == MVT::bf16 && !static_cast(DAG.getSubtarget()).hasBF16()) return SDValue(); if (IsFixedLength) { assert(Subtarget->useSVEForFixedLengthVectors() && "Cannot lower when not using SVE for fixed vectors"); if (MemVT.getScalarSizeInBits() <= IndexVT.getScalarSizeInBits()) { IndexVT = getContainerForFixedLengthVector(DAG, IndexVT); MemVT = IndexVT.changeVectorElementType(MemVT.getVectorElementType()); } else { MemVT = getContainerForFixedLengthVector(DAG, MemVT); IndexVT = MemVT.changeTypeToInteger(); } InputVT = DAG.getValueType(MemVT.changeTypeToInteger()); Mask = DAG.getNode( ISD::ZERO_EXTEND, DL, VT.changeVectorElementType(IndexVT.getVectorElementType()), Mask); } if (PassThru->isUndef() || isZerosVector(PassThru.getNode())) PassThru = SDValue(); if (VT.isFloatingPoint() && !IsFixedLength) { // Handle FP data by using an integer gather and casting the result. if (PassThru) { EVT PassThruVT = getPackedSVEVectorVT(VT.getVectorElementCount()); PassThru = getSVESafeBitCast(PassThruVT, PassThru, DAG); } InputVT = DAG.getValueType(MemVT.changeVectorElementTypeToInteger()); } SDVTList VTs = DAG.getVTList(IndexVT, MVT::Other); if (getGatherScatterIndexIsExtended(Index)) Index = Index.getOperand(0); unsigned Opcode = getGatherVecOpcode(IsScaled, IsSigned, IdxNeedsExtend); selectGatherScatterAddrMode(BasePtr, Index, MemVT, Opcode, /*isGather=*/true, DAG); if (ResNeedsSignExtend) Opcode = getSignExtendedGatherOpcode(Opcode); if (IsFixedLength) { if (Index.getSimpleValueType().isFixedLengthVector()) Index = convertToScalableVector(DAG, IndexVT, Index); if (BasePtr.getSimpleValueType().isFixedLengthVector()) BasePtr = convertToScalableVector(DAG, IndexVT, BasePtr); Mask = convertFixedMaskToScalableVector(Mask, DAG); } SDValue Ops[] = {Chain, Mask, BasePtr, Index, InputVT}; SDValue Result = DAG.getNode(Opcode, DL, VTs, Ops); Chain = Result.getValue(1); if (IsFixedLength) { Result = convertFromScalableVector( DAG, VT.changeVectorElementType(IndexVT.getVectorElementType()), Result); Result = DAG.getNode(ISD::TRUNCATE, DL, VT.changeTypeToInteger(), Result); Result = DAG.getNode(ISD::BITCAST, DL, VT, Result); if (PassThru) Result = DAG.getSelect(DL, VT, MGT->getMask(), Result, PassThru); } else { if (PassThru) Result = DAG.getSelect(DL, IndexVT, Mask, Result, PassThru); if (VT.isFloatingPoint()) Result = getSVESafeBitCast(VT, Result, DAG); } return DAG.getMergeValues({Result, Chain}, DL); } SDValue AArch64TargetLowering::LowerMSCATTER(SDValue Op, SelectionDAG &DAG) const { SDLoc DL(Op); MaskedScatterSDNode *MSC = cast(Op); assert(MSC && "Can only custom lower scatter store nodes"); bool IsFixedLength = MSC->getMemoryVT().isFixedLengthVector(); SDValue Index = MSC->getIndex(); SDValue Chain = MSC->getChain(); SDValue StoreVal = MSC->getValue(); SDValue Mask = MSC->getMask(); SDValue BasePtr = MSC->getBasePtr(); ISD::MemIndexType IndexType = MSC->getIndexType(); bool IsScaled = IndexType == ISD::SIGNED_SCALED || IndexType == ISD::UNSIGNED_SCALED; bool IsSigned = IndexType == ISD::SIGNED_SCALED || IndexType == ISD::SIGNED_UNSCALED; bool NeedsExtend = getGatherScatterIndexIsExtended(Index) || Index.getSimpleValueType().getVectorElementType() == MVT::i32; EVT VT = StoreVal.getSimpleValueType(); EVT IndexVT = Index.getSimpleValueType(); SDVTList VTs = DAG.getVTList(MVT::Other); EVT MemVT = MSC->getMemoryVT(); SDValue InputVT = DAG.getValueType(MemVT); if (VT.getVectorElementType() == MVT::bf16 && !static_cast(DAG.getSubtarget()).hasBF16()) return SDValue(); if (IsFixedLength) { assert(Subtarget->useSVEForFixedLengthVectors() && "Cannot lower when not using SVE for fixed vectors"); if (MemVT.getScalarSizeInBits() <= IndexVT.getScalarSizeInBits()) { IndexVT = getContainerForFixedLengthVector(DAG, IndexVT); MemVT = IndexVT.changeVectorElementType(MemVT.getVectorElementType()); } else { MemVT = getContainerForFixedLengthVector(DAG, MemVT); IndexVT = MemVT.changeTypeToInteger(); } InputVT = DAG.getValueType(MemVT.changeTypeToInteger()); StoreVal = DAG.getNode(ISD::BITCAST, DL, VT.changeTypeToInteger(), StoreVal); StoreVal = DAG.getNode( ISD::ANY_EXTEND, DL, VT.changeVectorElementType(IndexVT.getVectorElementType()), StoreVal); StoreVal = convertToScalableVector(DAG, IndexVT, StoreVal); Mask = DAG.getNode( ISD::ZERO_EXTEND, DL, VT.changeVectorElementType(IndexVT.getVectorElementType()), Mask); } else if (VT.isFloatingPoint()) { // Handle FP data by casting the data so an integer scatter can be used. EVT StoreValVT = getPackedSVEVectorVT(VT.getVectorElementCount()); StoreVal = getSVESafeBitCast(StoreValVT, StoreVal, DAG); InputVT = DAG.getValueType(MemVT.changeVectorElementTypeToInteger()); } if (getGatherScatterIndexIsExtended(Index)) Index = Index.getOperand(0); unsigned Opcode = getScatterVecOpcode(IsScaled, IsSigned, NeedsExtend); selectGatherScatterAddrMode(BasePtr, Index, MemVT, Opcode, /*isGather=*/false, DAG); if (IsFixedLength) { if (Index.getSimpleValueType().isFixedLengthVector()) Index = convertToScalableVector(DAG, IndexVT, Index); if (BasePtr.getSimpleValueType().isFixedLengthVector()) BasePtr = convertToScalableVector(DAG, IndexVT, BasePtr); Mask = convertFixedMaskToScalableVector(Mask, DAG); } SDValue Ops[] = {Chain, StoreVal, Mask, BasePtr, Index, InputVT}; return DAG.getNode(Opcode, DL, VTs, Ops); } SDValue AArch64TargetLowering::LowerMLOAD(SDValue Op, SelectionDAG &DAG) const { SDLoc DL(Op); MaskedLoadSDNode *LoadNode = cast(Op); assert(LoadNode && "Expected custom lowering of a masked load node"); EVT VT = Op->getValueType(0); if (useSVEForFixedLengthVectorVT(VT, true)) return LowerFixedLengthVectorMLoadToSVE(Op, DAG); SDValue PassThru = LoadNode->getPassThru(); SDValue Mask = LoadNode->getMask(); if (PassThru->isUndef() || isZerosVector(PassThru.getNode())) return Op; SDValue Load = DAG.getMaskedLoad( VT, DL, LoadNode->getChain(), LoadNode->getBasePtr(), LoadNode->getOffset(), Mask, DAG.getUNDEF(VT), LoadNode->getMemoryVT(), LoadNode->getMemOperand(), LoadNode->getAddressingMode(), LoadNode->getExtensionType()); SDValue Result = DAG.getSelect(DL, VT, Mask, Load, PassThru); return DAG.getMergeValues({Result, Load.getValue(1)}, DL); } // Custom lower trunc store for v4i8 vectors, since it is promoted to v4i16. static SDValue LowerTruncateVectorStore(SDLoc DL, StoreSDNode *ST, EVT VT, EVT MemVT, SelectionDAG &DAG) { assert(VT.isVector() && "VT should be a vector type"); assert(MemVT == MVT::v4i8 && VT == MVT::v4i16); SDValue Value = ST->getValue(); // It first extend the promoted v4i16 to v8i16, truncate to v8i8, and extract // the word lane which represent the v4i8 subvector. It optimizes the store // to: // // xtn v0.8b, v0.8h // str s0, [x0] SDValue Undef = DAG.getUNDEF(MVT::i16); SDValue UndefVec = DAG.getBuildVector(MVT::v4i16, DL, {Undef, Undef, Undef, Undef}); SDValue TruncExt = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i16, Value, UndefVec); SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i8, TruncExt); Trunc = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Trunc); SDValue ExtractTrunc = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, Trunc, DAG.getConstant(0, DL, MVT::i64)); return DAG.getStore(ST->getChain(), DL, ExtractTrunc, ST->getBasePtr(), ST->getMemOperand()); } // Custom lowering for any store, vector or scalar and/or default or with // a truncate operations. Currently only custom lower truncate operation // from vector v4i16 to v4i8 or volatile stores of i128. SDValue AArch64TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { SDLoc Dl(Op); StoreSDNode *StoreNode = cast(Op); assert (StoreNode && "Can only custom lower store nodes"); SDValue Value = StoreNode->getValue(); EVT VT = Value.getValueType(); EVT MemVT = StoreNode->getMemoryVT(); if (VT.isVector()) { if (useSVEForFixedLengthVectorVT(VT, true)) return LowerFixedLengthVectorStoreToSVE(Op, DAG); unsigned AS = StoreNode->getAddressSpace(); Align Alignment = StoreNode->getAlign(); if (Alignment < MemVT.getStoreSize() && !allowsMisalignedMemoryAccesses(MemVT, AS, Alignment, StoreNode->getMemOperand()->getFlags(), nullptr)) { return scalarizeVectorStore(StoreNode, DAG); } if (StoreNode->isTruncatingStore() && VT == MVT::v4i16 && MemVT == MVT::v4i8) { return LowerTruncateVectorStore(Dl, StoreNode, VT, MemVT, DAG); } // 256 bit non-temporal stores can be lowered to STNP. Do this as part of // the custom lowering, as there are no un-paired non-temporal stores and // legalization will break up 256 bit inputs. ElementCount EC = MemVT.getVectorElementCount(); if (StoreNode->isNonTemporal() && MemVT.getSizeInBits() == 256u && EC.isKnownEven() && ((MemVT.getScalarSizeInBits() == 8u || MemVT.getScalarSizeInBits() == 16u || MemVT.getScalarSizeInBits() == 32u || MemVT.getScalarSizeInBits() == 64u))) { SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, Dl, MemVT.getHalfNumVectorElementsVT(*DAG.getContext()), StoreNode->getValue(), DAG.getConstant(0, Dl, MVT::i64)); SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, Dl, MemVT.getHalfNumVectorElementsVT(*DAG.getContext()), StoreNode->getValue(), DAG.getConstant(EC.getKnownMinValue() / 2, Dl, MVT::i64)); SDValue Result = DAG.getMemIntrinsicNode( AArch64ISD::STNP, Dl, DAG.getVTList(MVT::Other), {StoreNode->getChain(), Lo, Hi, StoreNode->getBasePtr()}, StoreNode->getMemoryVT(), StoreNode->getMemOperand()); return Result; } } else if (MemVT == MVT::i128 && StoreNode->isVolatile()) { assert(StoreNode->getValue()->getValueType(0) == MVT::i128); SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i64, StoreNode->getValue(), DAG.getConstant(0, Dl, MVT::i64)); SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i64, StoreNode->getValue(), DAG.getConstant(1, Dl, MVT::i64)); SDValue Result = DAG.getMemIntrinsicNode( AArch64ISD::STP, Dl, DAG.getVTList(MVT::Other), {StoreNode->getChain(), Lo, Hi, StoreNode->getBasePtr()}, StoreNode->getMemoryVT(), StoreNode->getMemOperand()); return Result; } else if (MemVT == MVT::i64x8) { SDValue Value = StoreNode->getValue(); assert(Value->getValueType(0) == MVT::i64x8); SDValue Chain = StoreNode->getChain(); SDValue Base = StoreNode->getBasePtr(); EVT PtrVT = Base.getValueType(); for (unsigned i = 0; i < 8; i++) { SDValue Part = DAG.getNode(AArch64ISD::LS64_EXTRACT, Dl, MVT::i64, Value, DAG.getConstant(i, Dl, MVT::i32)); SDValue Ptr = DAG.getNode(ISD::ADD, Dl, PtrVT, Base, DAG.getConstant(i * 8, Dl, PtrVT)); Chain = DAG.getStore(Chain, Dl, Part, Ptr, StoreNode->getPointerInfo(), StoreNode->getOriginalAlign()); } return Chain; } return SDValue(); } SDValue AArch64TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { SDLoc DL(Op); LoadSDNode *LoadNode = cast(Op); assert(LoadNode && "Expected custom lowering of a load node"); if (LoadNode->getMemoryVT() == MVT::i64x8) { SmallVector Ops; SDValue Base = LoadNode->getBasePtr(); SDValue Chain = LoadNode->getChain(); EVT PtrVT = Base.getValueType(); for (unsigned i = 0; i < 8; i++) { SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base, DAG.getConstant(i * 8, DL, PtrVT)); SDValue Part = DAG.getLoad(MVT::i64, DL, Chain, Ptr, LoadNode->getPointerInfo(), LoadNode->getOriginalAlign()); Ops.push_back(Part); Chain = SDValue(Part.getNode(), 1); } SDValue Loaded = DAG.getNode(AArch64ISD::LS64_BUILD, DL, MVT::i64x8, Ops); return DAG.getMergeValues({Loaded, Chain}, DL); } // Custom lowering for extending v4i8 vector loads. EVT VT = Op->getValueType(0); assert((VT == MVT::v4i16 || VT == MVT::v4i32) && "Expected v4i16 or v4i32"); if (LoadNode->getMemoryVT() != MVT::v4i8) return SDValue(); unsigned ExtType; if (LoadNode->getExtensionType() == ISD::SEXTLOAD) ExtType = ISD::SIGN_EXTEND; else if (LoadNode->getExtensionType() == ISD::ZEXTLOAD || LoadNode->getExtensionType() == ISD::EXTLOAD) ExtType = ISD::ZERO_EXTEND; else return SDValue(); SDValue Load = DAG.getLoad(MVT::f32, DL, LoadNode->getChain(), LoadNode->getBasePtr(), MachinePointerInfo()); SDValue Chain = Load.getValue(1); SDValue Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f32, Load); SDValue BC = DAG.getNode(ISD::BITCAST, DL, MVT::v8i8, Vec); SDValue Ext = DAG.getNode(ExtType, DL, MVT::v8i16, BC); Ext = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, Ext, DAG.getConstant(0, DL, MVT::i64)); if (VT == MVT::v4i32) Ext = DAG.getNode(ExtType, DL, MVT::v4i32, Ext); return DAG.getMergeValues({Ext, Chain}, DL); } // Generate SUBS and CSEL for integer abs. SDValue AArch64TargetLowering::LowerABS(SDValue Op, SelectionDAG &DAG) const { MVT VT = Op.getSimpleValueType(); if (VT.isVector()) return LowerToPredicatedOp(Op, DAG, AArch64ISD::ABS_MERGE_PASSTHRU); SDLoc DL(Op); SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op.getOperand(0)); // Generate SUBS & CSEL. SDValue Cmp = DAG.getNode(AArch64ISD::SUBS, DL, DAG.getVTList(VT, MVT::i32), Op.getOperand(0), DAG.getConstant(0, DL, VT)); return DAG.getNode(AArch64ISD::CSEL, DL, VT, Op.getOperand(0), Neg, DAG.getConstant(AArch64CC::PL, DL, MVT::i32), Cmp.getValue(1)); } SDValue AArch64TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { LLVM_DEBUG(dbgs() << "Custom lowering: "); LLVM_DEBUG(Op.dump()); switch (Op.getOpcode()) { default: llvm_unreachable("unimplemented operand"); return SDValue(); case ISD::BITCAST: return LowerBITCAST(Op, DAG); case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); case ISD::SETCC: case ISD::STRICT_FSETCC: case ISD::STRICT_FSETCCS: return LowerSETCC(Op, DAG); case ISD::BR_CC: return LowerBR_CC(Op, DAG); case ISD::SELECT: return LowerSELECT(Op, DAG); case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); case ISD::JumpTable: return LowerJumpTable(Op, DAG); case ISD::BR_JT: return LowerBR_JT(Op, DAG); case ISD::ConstantPool: return LowerConstantPool(Op, DAG); case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); case ISD::VASTART: return LowerVASTART(Op, DAG); case ISD::VACOPY: return LowerVACOPY(Op, DAG); case ISD::VAARG: return LowerVAARG(Op, DAG); case ISD::ADDC: case ISD::ADDE: case ISD::SUBC: case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); case ISD::SADDO: case ISD::UADDO: case ISD::SSUBO: case ISD::USUBO: case ISD::SMULO: case ISD::UMULO: return LowerXALUO(Op, DAG); case ISD::FADD: return LowerToPredicatedOp(Op, DAG, AArch64ISD::FADD_PRED); case ISD::FSUB: return LowerToPredicatedOp(Op, DAG, AArch64ISD::FSUB_PRED); case ISD::FMUL: return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMUL_PRED); case ISD::FMA: return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMA_PRED); case ISD::FDIV: return LowerToPredicatedOp(Op, DAG, AArch64ISD::FDIV_PRED); case ISD::FNEG: return LowerToPredicatedOp(Op, DAG, AArch64ISD::FNEG_MERGE_PASSTHRU); case ISD::FCEIL: return LowerToPredicatedOp(Op, DAG, AArch64ISD::FCEIL_MERGE_PASSTHRU); case ISD::FFLOOR: return LowerToPredicatedOp(Op, DAG, AArch64ISD::FFLOOR_MERGE_PASSTHRU); case ISD::FNEARBYINT: return LowerToPredicatedOp(Op, DAG, AArch64ISD::FNEARBYINT_MERGE_PASSTHRU); case ISD::FRINT: return LowerToPredicatedOp(Op, DAG, AArch64ISD::FRINT_MERGE_PASSTHRU); case ISD::FROUND: return LowerToPredicatedOp(Op, DAG, AArch64ISD::FROUND_MERGE_PASSTHRU); case ISD::FROUNDEVEN: return LowerToPredicatedOp(Op, DAG, AArch64ISD::FROUNDEVEN_MERGE_PASSTHRU); case ISD::FTRUNC: return LowerToPredicatedOp(Op, DAG, AArch64ISD::FTRUNC_MERGE_PASSTHRU); case ISD::FSQRT: return LowerToPredicatedOp(Op, DAG, AArch64ISD::FSQRT_MERGE_PASSTHRU); case ISD::FABS: return LowerToPredicatedOp(Op, DAG, AArch64ISD::FABS_MERGE_PASSTHRU); case ISD::FP_ROUND: case ISD::STRICT_FP_ROUND: return LowerFP_ROUND(Op, DAG); case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG); case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); case ISD::SPONENTRY: return LowerSPONENTRY(Op, DAG); case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); case ISD::ADDROFRETURNADDR: return LowerADDROFRETURNADDR(Op, DAG); case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); case ISD::SPLAT_VECTOR: return LowerSPLAT_VECTOR(Op, DAG); case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG); case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, DAG); case ISD::SDIV: case ISD::UDIV: return LowerDIV(Op, DAG); case ISD::SMIN: return LowerToPredicatedOp(Op, DAG, AArch64ISD::SMIN_PRED, /*OverrideNEON=*/true); case ISD::UMIN: return LowerToPredicatedOp(Op, DAG, AArch64ISD::UMIN_PRED, /*OverrideNEON=*/true); case ISD::SMAX: return LowerToPredicatedOp(Op, DAG, AArch64ISD::SMAX_PRED, /*OverrideNEON=*/true); case ISD::UMAX: return LowerToPredicatedOp(Op, DAG, AArch64ISD::UMAX_PRED, /*OverrideNEON=*/true); case ISD::SRA: case ISD::SRL: case ISD::SHL: return LowerVectorSRA_SRL_SHL(Op, DAG); case ISD::SHL_PARTS: case ISD::SRL_PARTS: case ISD::SRA_PARTS: return LowerShiftParts(Op, DAG); case ISD::CTPOP: return LowerCTPOP(Op, DAG); case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); case ISD::OR: return LowerVectorOR(Op, DAG); case ISD::XOR: return LowerXOR(Op, DAG); case ISD::PREFETCH: return LowerPREFETCH(Op, DAG); case ISD::SINT_TO_FP: case ISD::UINT_TO_FP: case ISD::STRICT_SINT_TO_FP: case ISD::STRICT_UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); case ISD::FP_TO_SINT: case ISD::FP_TO_UINT: case ISD::STRICT_FP_TO_SINT: case ISD::STRICT_FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); case ISD::FP_TO_SINT_SAT: case ISD::FP_TO_UINT_SAT: return LowerFP_TO_INT_SAT(Op, DAG); case ISD::FSINCOS: return LowerFSINCOS(Op, DAG); case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); case ISD::SET_ROUNDING: return LowerSET_ROUNDING(Op, DAG); case ISD::MUL: return LowerMUL(Op, DAG); case ISD::MULHS: return LowerToPredicatedOp(Op, DAG, AArch64ISD::MULHS_PRED, /*OverrideNEON=*/true); case ISD::MULHU: return LowerToPredicatedOp(Op, DAG, AArch64ISD::MULHU_PRED, /*OverrideNEON=*/true); case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); case ISD::STORE: return LowerSTORE(Op, DAG); case ISD::MSTORE: return LowerFixedLengthVectorMStoreToSVE(Op, DAG); case ISD::MGATHER: return LowerMGATHER(Op, DAG); case ISD::MSCATTER: return LowerMSCATTER(Op, DAG); case ISD::VECREDUCE_SEQ_FADD: return LowerVECREDUCE_SEQ_FADD(Op, DAG); case ISD::VECREDUCE_ADD: case ISD::VECREDUCE_AND: case ISD::VECREDUCE_OR: case ISD::VECREDUCE_XOR: case ISD::VECREDUCE_SMAX: case ISD::VECREDUCE_SMIN: case ISD::VECREDUCE_UMAX: case ISD::VECREDUCE_UMIN: case ISD::VECREDUCE_FADD: case ISD::VECREDUCE_FMAX: case ISD::VECREDUCE_FMIN: return LowerVECREDUCE(Op, DAG); case ISD::ATOMIC_LOAD_SUB: return LowerATOMIC_LOAD_SUB(Op, DAG); case ISD::ATOMIC_LOAD_AND: return LowerATOMIC_LOAD_AND(Op, DAG); case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); case ISD::VSCALE: return LowerVSCALE(Op, DAG); case ISD::ANY_EXTEND: case ISD::SIGN_EXTEND: case ISD::ZERO_EXTEND: return LowerFixedLengthVectorIntExtendToSVE(Op, DAG); case ISD::SIGN_EXTEND_INREG: { // Only custom lower when ExtraVT has a legal byte based element type. EVT ExtraVT = cast(Op.getOperand(1))->getVT(); EVT ExtraEltVT = ExtraVT.getVectorElementType(); if ((ExtraEltVT != MVT::i8) && (ExtraEltVT != MVT::i16) && (ExtraEltVT != MVT::i32) && (ExtraEltVT != MVT::i64)) return SDValue(); return LowerToPredicatedOp(Op, DAG, AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU); } case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG); case ISD::MLOAD: return LowerMLOAD(Op, DAG); case ISD::LOAD: if (useSVEForFixedLengthVectorVT(Op.getValueType())) return LowerFixedLengthVectorLoadToSVE(Op, DAG); return LowerLOAD(Op, DAG); case ISD::ADD: return LowerToPredicatedOp(Op, DAG, AArch64ISD::ADD_PRED); case ISD::AND: return LowerToScalableOp(Op, DAG); case ISD::SUB: return LowerToPredicatedOp(Op, DAG, AArch64ISD::SUB_PRED); case ISD::FMAXIMUM: return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMAX_PRED); case ISD::FMAXNUM: return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMAXNM_PRED); case ISD::FMINIMUM: return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMIN_PRED); case ISD::FMINNUM: return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMINNM_PRED); case ISD::VSELECT: return LowerFixedLengthVectorSelectToSVE(Op, DAG); case ISD::ABS: return LowerABS(Op, DAG); case ISD::BITREVERSE: return LowerBitreverse(Op, DAG); case ISD::BSWAP: return LowerToPredicatedOp(Op, DAG, AArch64ISD::BSWAP_MERGE_PASSTHRU); case ISD::CTLZ: return LowerToPredicatedOp(Op, DAG, AArch64ISD::CTLZ_MERGE_PASSTHRU, /*OverrideNEON=*/true); case ISD::CTTZ: return LowerCTTZ(Op, DAG); case ISD::VECTOR_SPLICE: return LowerVECTOR_SPLICE(Op, DAG); } } bool AArch64TargetLowering::mergeStoresAfterLegalization(EVT VT) const { return !Subtarget->useSVEForFixedLengthVectors(); } bool AArch64TargetLowering::useSVEForFixedLengthVectorVT( EVT VT, bool OverrideNEON) const { if (!Subtarget->useSVEForFixedLengthVectors()) return false; if (!VT.isFixedLengthVector()) return false; // Don't use SVE for vectors we cannot scalarize if required. switch (VT.getVectorElementType().getSimpleVT().SimpleTy) { // Fixed length predicates should be promoted to i8. // NOTE: This is consistent with how NEON (and thus 64/128bit vectors) work. case MVT::i1: default: return false; case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64: case MVT::f16: case MVT::f32: case MVT::f64: break; } // All SVE implementations support NEON sized vectors. if (OverrideNEON && (VT.is128BitVector() || VT.is64BitVector())) return true; // Ensure NEON MVTs only belong to a single register class. if (VT.getFixedSizeInBits() <= 128) return false; // Don't use SVE for types that don't fit. if (VT.getFixedSizeInBits() > Subtarget->getMinSVEVectorSizeInBits()) return false; // TODO: Perhaps an artificial restriction, but worth having whilst getting // the base fixed length SVE support in place. if (!VT.isPow2VectorType()) return false; return true; } //===----------------------------------------------------------------------===// // Calling Convention Implementation //===----------------------------------------------------------------------===// /// Selects the correct CCAssignFn for a given CallingConvention value. CCAssignFn *AArch64TargetLowering::CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const { switch (CC) { default: report_fatal_error("Unsupported calling convention."); case CallingConv::WebKit_JS: return CC_AArch64_WebKit_JS; case CallingConv::GHC: return CC_AArch64_GHC; case CallingConv::C: case CallingConv::Fast: case CallingConv::PreserveMost: case CallingConv::CXX_FAST_TLS: case CallingConv::Swift: case CallingConv::SwiftTail: case CallingConv::Tail: if (Subtarget->isTargetWindows() && IsVarArg) return CC_AArch64_Win64_VarArg; if (!Subtarget->isTargetDarwin()) return CC_AArch64_AAPCS; if (!IsVarArg) return CC_AArch64_DarwinPCS; return Subtarget->isTargetILP32() ? CC_AArch64_DarwinPCS_ILP32_VarArg : CC_AArch64_DarwinPCS_VarArg; case CallingConv::Win64: return IsVarArg ? CC_AArch64_Win64_VarArg : CC_AArch64_AAPCS; case CallingConv::CFGuard_Check: return CC_AArch64_Win64_CFGuard_Check; case CallingConv::AArch64_VectorCall: case CallingConv::AArch64_SVE_VectorCall: return CC_AArch64_AAPCS; } } CCAssignFn * AArch64TargetLowering::CCAssignFnForReturn(CallingConv::ID CC) const { return CC == CallingConv::WebKit_JS ? RetCC_AArch64_WebKit_JS : RetCC_AArch64_AAPCS; } SDValue AArch64TargetLowering::LowerFormalArguments( SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl &Ins, const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl &InVals) const { MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo &MFI = MF.getFrameInfo(); bool IsWin64 = Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv()); // Assign locations to all of the incoming arguments. SmallVector ArgLocs; DenseMap CopiedRegs; CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, *DAG.getContext()); // At this point, Ins[].VT may already be promoted to i32. To correctly // handle passing i8 as i8 instead of i32 on stack, we pass in both i32 and // i8 to CC_AArch64_AAPCS with i32 being ValVT and i8 being LocVT. // Since AnalyzeFormalArguments uses Ins[].VT for both ValVT and LocVT, here // we use a special version of AnalyzeFormalArguments to pass in ValVT and // LocVT. unsigned NumArgs = Ins.size(); Function::const_arg_iterator CurOrigArg = MF.getFunction().arg_begin(); unsigned CurArgIdx = 0; for (unsigned i = 0; i != NumArgs; ++i) { MVT ValVT = Ins[i].VT; if (Ins[i].isOrigArg()) { std::advance(CurOrigArg, Ins[i].getOrigArgIndex() - CurArgIdx); CurArgIdx = Ins[i].getOrigArgIndex(); // Get type of the original argument. EVT ActualVT = getValueType(DAG.getDataLayout(), CurOrigArg->getType(), /*AllowUnknown*/ true); MVT ActualMVT = ActualVT.isSimple() ? ActualVT.getSimpleVT() : MVT::Other; // If ActualMVT is i1/i8/i16, we should set LocVT to i8/i8/i16. if (ActualMVT == MVT::i1 || ActualMVT == MVT::i8) ValVT = MVT::i8; else if (ActualMVT == MVT::i16) ValVT = MVT::i16; } bool UseVarArgCC = false; if (IsWin64) UseVarArgCC = isVarArg; CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, UseVarArgCC); bool Res = AssignFn(i, ValVT, ValVT, CCValAssign::Full, Ins[i].Flags, CCInfo); assert(!Res && "Call operand has unhandled type"); (void)Res; } SmallVector ArgValues; unsigned ExtraArgLocs = 0; for (unsigned i = 0, e = Ins.size(); i != e; ++i) { CCValAssign &VA = ArgLocs[i - ExtraArgLocs]; if (Ins[i].Flags.isByVal()) { // Byval is used for HFAs in the PCS, but the system should work in a // non-compliant manner for larger structs. EVT PtrVT = getPointerTy(DAG.getDataLayout()); int Size = Ins[i].Flags.getByValSize(); unsigned NumRegs = (Size + 7) / 8; // FIXME: This works on big-endian for composite byvals, which are the common // case. It should also work for fundamental types too. unsigned FrameIdx = MFI.CreateFixedObject(8 * NumRegs, VA.getLocMemOffset(), false); SDValue FrameIdxN = DAG.getFrameIndex(FrameIdx, PtrVT); InVals.push_back(FrameIdxN); continue; } if (Ins[i].Flags.isSwiftAsync()) MF.getInfo()->setHasSwiftAsyncContext(true); SDValue ArgValue; if (VA.isRegLoc()) { // Arguments stored in registers. EVT RegVT = VA.getLocVT(); const TargetRegisterClass *RC; if (RegVT == MVT::i32) RC = &AArch64::GPR32RegClass; else if (RegVT == MVT::i64) RC = &AArch64::GPR64RegClass; else if (RegVT == MVT::f16 || RegVT == MVT::bf16) RC = &AArch64::FPR16RegClass; else if (RegVT == MVT::f32) RC = &AArch64::FPR32RegClass; else if (RegVT == MVT::f64 || RegVT.is64BitVector()) RC = &AArch64::FPR64RegClass; else if (RegVT == MVT::f128 || RegVT.is128BitVector()) RC = &AArch64::FPR128RegClass; else if (RegVT.isScalableVector() && RegVT.getVectorElementType() == MVT::i1) RC = &AArch64::PPRRegClass; else if (RegVT.isScalableVector()) RC = &AArch64::ZPRRegClass; else llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering"); // Transform the arguments in physical registers into virtual ones. unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT); // If this is an 8, 16 or 32-bit value, it is really passed promoted // to 64 bits. Insert an assert[sz]ext to capture this, then // truncate to the right size. switch (VA.getLocInfo()) { default: llvm_unreachable("Unknown loc info!"); case CCValAssign::Full: break; case CCValAssign::Indirect: assert(VA.getValVT().isScalableVector() && "Only scalable vectors can be passed indirectly"); break; case CCValAssign::BCvt: ArgValue = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), ArgValue); break; case CCValAssign::AExt: case CCValAssign::SExt: case CCValAssign::ZExt: break; case CCValAssign::AExtUpper: ArgValue = DAG.getNode(ISD::SRL, DL, RegVT, ArgValue, DAG.getConstant(32, DL, RegVT)); ArgValue = DAG.getZExtOrTrunc(ArgValue, DL, VA.getValVT()); break; } } else { // VA.isRegLoc() assert(VA.isMemLoc() && "CCValAssign is neither reg nor mem"); unsigned ArgOffset = VA.getLocMemOffset(); unsigned ArgSize = (VA.getLocInfo() == CCValAssign::Indirect ? VA.getLocVT().getSizeInBits() : VA.getValVT().getSizeInBits()) / 8; uint32_t BEAlign = 0; if (!Subtarget->isLittleEndian() && ArgSize < 8 && !Ins[i].Flags.isInConsecutiveRegs()) BEAlign = 8 - ArgSize; int FI = MFI.CreateFixedObject(ArgSize, ArgOffset + BEAlign, true); // Create load nodes to retrieve arguments from the stack. SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT) ISD::LoadExtType ExtType = ISD::NON_EXTLOAD; MVT MemVT = VA.getValVT(); switch (VA.getLocInfo()) { default: break; case CCValAssign::Trunc: case CCValAssign::BCvt: MemVT = VA.getLocVT(); break; case CCValAssign::Indirect: assert(VA.getValVT().isScalableVector() && "Only scalable vectors can be passed indirectly"); MemVT = VA.getLocVT(); break; case CCValAssign::SExt: ExtType = ISD::SEXTLOAD; break; case CCValAssign::ZExt: ExtType = ISD::ZEXTLOAD; break; case CCValAssign::AExt: ExtType = ISD::EXTLOAD; break; } ArgValue = DAG.getExtLoad( ExtType, DL, VA.getLocVT(), Chain, FIN, MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), MemVT); } if (VA.getLocInfo() == CCValAssign::Indirect) { assert(VA.getValVT().isScalableVector() && "Only scalable vectors can be passed indirectly"); uint64_t PartSize = VA.getValVT().getStoreSize().getKnownMinSize(); unsigned NumParts = 1; if (Ins[i].Flags.isInConsecutiveRegs()) { assert(!Ins[i].Flags.isInConsecutiveRegsLast()); while (!Ins[i + NumParts - 1].Flags.isInConsecutiveRegsLast()) ++NumParts; } MVT PartLoad = VA.getValVT(); SDValue Ptr = ArgValue; // Ensure we generate all loads for each tuple part, whilst updating the // pointer after each load correctly using vscale. while (NumParts > 0) { ArgValue = DAG.getLoad(PartLoad, DL, Chain, Ptr, MachinePointerInfo()); InVals.push_back(ArgValue); NumParts--; if (NumParts > 0) { SDValue BytesIncrement = DAG.getVScale( DL, Ptr.getValueType(), APInt(Ptr.getValueSizeInBits().getFixedSize(), PartSize)); SDNodeFlags Flags; Flags.setNoUnsignedWrap(true); Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr, BytesIncrement, Flags); ExtraArgLocs++; i++; } } } else { if (Subtarget->isTargetILP32() && Ins[i].Flags.isPointer()) ArgValue = DAG.getNode(ISD::AssertZext, DL, ArgValue.getValueType(), ArgValue, DAG.getValueType(MVT::i32)); InVals.push_back(ArgValue); } } assert((ArgLocs.size() + ExtraArgLocs) == Ins.size()); // varargs AArch64FunctionInfo *FuncInfo = MF.getInfo(); if (isVarArg) { if (!Subtarget->isTargetDarwin() || IsWin64) { // The AAPCS variadic function ABI is identical to the non-variadic // one. As a result there may be more arguments in registers and we should // save them for future reference. // Win64 variadic functions also pass arguments in registers, but all float // arguments are passed in integer registers. saveVarArgRegisters(CCInfo, DAG, DL, Chain); } // This will point to the next argument passed via stack. unsigned StackOffset = CCInfo.getNextStackOffset(); // We currently pass all varargs at 8-byte alignment, or 4 for ILP32 StackOffset = alignTo(StackOffset, Subtarget->isTargetILP32() ? 4 : 8); FuncInfo->setVarArgsStackIndex(MFI.CreateFixedObject(4, StackOffset, true)); if (MFI.hasMustTailInVarArgFunc()) { SmallVector RegParmTypes; RegParmTypes.push_back(MVT::i64); RegParmTypes.push_back(MVT::f128); // Compute the set of forwarded registers. The rest are scratch. SmallVectorImpl &Forwards = FuncInfo->getForwardedMustTailRegParms(); CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_AArch64_AAPCS); // Conservatively forward X8, since it might be used for aggregate return. if (!CCInfo.isAllocated(AArch64::X8)) { unsigned X8VReg = MF.addLiveIn(AArch64::X8, &AArch64::GPR64RegClass); Forwards.push_back(ForwardedRegister(X8VReg, AArch64::X8, MVT::i64)); } } } // On Windows, InReg pointers must be returned, so record the pointer in a // virtual register at the start of the function so it can be returned in the // epilogue. if (IsWin64) { for (unsigned I = 0, E = Ins.size(); I != E; ++I) { if (Ins[I].Flags.isInReg()) { assert(!FuncInfo->getSRetReturnReg()); MVT PtrTy = getPointerTy(DAG.getDataLayout()); Register Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy)); FuncInfo->setSRetReturnReg(Reg); SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), DL, Reg, InVals[I]); Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Copy, Chain); break; } } } unsigned StackArgSize = CCInfo.getNextStackOffset(); bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; if (DoesCalleeRestoreStack(CallConv, TailCallOpt)) { // This is a non-standard ABI so by fiat I say we're allowed to make full // use of the stack area to be popped, which must be aligned to 16 bytes in // any case: StackArgSize = alignTo(StackArgSize, 16); // If we're expected to restore the stack (e.g. fastcc) then we'll be adding // a multiple of 16. FuncInfo->setArgumentStackToRestore(StackArgSize); // This realignment carries over to the available bytes below. Our own // callers will guarantee the space is free by giving an aligned value to // CALLSEQ_START. } // Even if we're not expected to free up the space, it's useful to know how // much is there while considering tail calls (because we can reuse it). FuncInfo->setBytesInStackArgArea(StackArgSize); if (Subtarget->hasCustomCallingConv()) Subtarget->getRegisterInfo()->UpdateCustomCalleeSavedRegs(MF); return Chain; } void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &DL, SDValue &Chain) const { MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo &MFI = MF.getFrameInfo(); AArch64FunctionInfo *FuncInfo = MF.getInfo(); auto PtrVT = getPointerTy(DAG.getDataLayout()); bool IsWin64 = Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv()); SmallVector MemOps; static const MCPhysReg GPRArgRegs[] = { AArch64::X0, AArch64::X1, AArch64::X2, AArch64::X3, AArch64::X4, AArch64::X5, AArch64::X6, AArch64::X7 }; static const unsigned NumGPRArgRegs = array_lengthof(GPRArgRegs); unsigned FirstVariadicGPR = CCInfo.getFirstUnallocated(GPRArgRegs); unsigned GPRSaveSize = 8 * (NumGPRArgRegs - FirstVariadicGPR); int GPRIdx = 0; if (GPRSaveSize != 0) { if (IsWin64) { GPRIdx = MFI.CreateFixedObject(GPRSaveSize, -(int)GPRSaveSize, false); if (GPRSaveSize & 15) // The extra size here, if triggered, will always be 8. MFI.CreateFixedObject(16 - (GPRSaveSize & 15), -(int)alignTo(GPRSaveSize, 16), false); } else GPRIdx = MFI.CreateStackObject(GPRSaveSize, Align(8), false); SDValue FIN = DAG.getFrameIndex(GPRIdx, PtrVT); for (unsigned i = FirstVariadicGPR; i < NumGPRArgRegs; ++i) { unsigned VReg = MF.addLiveIn(GPRArgRegs[i], &AArch64::GPR64RegClass); SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64); SDValue Store = DAG.getStore( Val.getValue(1), DL, Val, FIN, IsWin64 ? MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), GPRIdx, (i - FirstVariadicGPR) * 8) : MachinePointerInfo::getStack(DAG.getMachineFunction(), i * 8)); MemOps.push_back(Store); FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getConstant(8, DL, PtrVT)); } } FuncInfo->setVarArgsGPRIndex(GPRIdx); FuncInfo->setVarArgsGPRSize(GPRSaveSize); if (Subtarget->hasFPARMv8() && !IsWin64) { static const MCPhysReg FPRArgRegs[] = { AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3, AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64::Q7}; static const unsigned NumFPRArgRegs = array_lengthof(FPRArgRegs); unsigned FirstVariadicFPR = CCInfo.getFirstUnallocated(FPRArgRegs); unsigned FPRSaveSize = 16 * (NumFPRArgRegs - FirstVariadicFPR); int FPRIdx = 0; if (FPRSaveSize != 0) { FPRIdx = MFI.CreateStackObject(FPRSaveSize, Align(16), false); SDValue FIN = DAG.getFrameIndex(FPRIdx, PtrVT); for (unsigned i = FirstVariadicFPR; i < NumFPRArgRegs; ++i) { unsigned VReg = MF.addLiveIn(FPRArgRegs[i], &AArch64::FPR128RegClass); SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f128); SDValue Store = DAG.getStore( Val.getValue(1), DL, Val, FIN, MachinePointerInfo::getStack(DAG.getMachineFunction(), i * 16)); MemOps.push_back(Store); FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getConstant(16, DL, PtrVT)); } } FuncInfo->setVarArgsFPRIndex(FPRIdx); FuncInfo->setVarArgsFPRSize(FPRSaveSize); } if (!MemOps.empty()) { Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps); } } /// LowerCallResult - Lower the result values of a call into the /// appropriate copies out of appropriate physical registers. SDValue AArch64TargetLowering::LowerCallResult( SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl &Ins, const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl &InVals, bool isThisReturn, SDValue ThisVal) const { CCAssignFn *RetCC = CCAssignFnForReturn(CallConv); // Assign locations to each value returned by this call. SmallVector RVLocs; DenseMap CopiedRegs; CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, *DAG.getContext()); CCInfo.AnalyzeCallResult(Ins, RetCC); // Copy all of the result registers out of their specified physreg. for (unsigned i = 0; i != RVLocs.size(); ++i) { CCValAssign VA = RVLocs[i]; // Pass 'this' value directly from the argument to return value, to avoid // reg unit interference if (i == 0 && isThisReturn) { assert(!VA.needsCustom() && VA.getLocVT() == MVT::i64 && "unexpected return calling convention register assignment"); InVals.push_back(ThisVal); continue; } // Avoid copying a physreg twice since RegAllocFast is incompetent and only // allows one use of a physreg per block. SDValue Val = CopiedRegs.lookup(VA.getLocReg()); if (!Val) { Val = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag); Chain = Val.getValue(1); InFlag = Val.getValue(2); CopiedRegs[VA.getLocReg()] = Val; } switch (VA.getLocInfo()) { default: llvm_unreachable("Unknown loc info!"); case CCValAssign::Full: break; case CCValAssign::BCvt: Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val); break; case CCValAssign::AExtUpper: Val = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Val, DAG.getConstant(32, DL, VA.getLocVT())); LLVM_FALLTHROUGH; case CCValAssign::AExt: LLVM_FALLTHROUGH; case CCValAssign::ZExt: Val = DAG.getZExtOrTrunc(Val, DL, VA.getValVT()); break; } InVals.push_back(Val); } return Chain; } /// Return true if the calling convention is one that we can guarantee TCO for. static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls) { return (CC == CallingConv::Fast && GuaranteeTailCalls) || CC == CallingConv::Tail || CC == CallingConv::SwiftTail; } /// Return true if we might ever do TCO for calls with this calling convention. static bool mayTailCallThisCC(CallingConv::ID CC) { switch (CC) { case CallingConv::C: case CallingConv::AArch64_SVE_VectorCall: case CallingConv::PreserveMost: case CallingConv::Swift: case CallingConv::SwiftTail: case CallingConv::Tail: case CallingConv::Fast: return true; default: return false; } } bool AArch64TargetLowering::isEligibleForTailCallOptimization( SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg, const SmallVectorImpl &Outs, const SmallVectorImpl &OutVals, const SmallVectorImpl &Ins, SelectionDAG &DAG) const { if (!mayTailCallThisCC(CalleeCC)) return false; MachineFunction &MF = DAG.getMachineFunction(); const Function &CallerF = MF.getFunction(); CallingConv::ID CallerCC = CallerF.getCallingConv(); // Functions using the C or Fast calling convention that have an SVE signature // preserve more registers and should assume the SVE_VectorCall CC. // The check for matching callee-saved regs will determine whether it is // eligible for TCO. if ((CallerCC == CallingConv::C || CallerCC == CallingConv::Fast) && AArch64RegisterInfo::hasSVEArgsOrReturn(&MF)) CallerCC = CallingConv::AArch64_SVE_VectorCall; bool CCMatch = CallerCC == CalleeCC; // When using the Windows calling convention on a non-windows OS, we want // to back up and restore X18 in such functions; we can't do a tail call // from those functions. if (CallerCC == CallingConv::Win64 && !Subtarget->isTargetWindows() && CalleeCC != CallingConv::Win64) return false; // Byval parameters hand the function a pointer directly into the stack area // we want to reuse during a tail call. Working around this *is* possible (see // X86) but less efficient and uglier in LowerCall. for (Function::const_arg_iterator i = CallerF.arg_begin(), e = CallerF.arg_end(); i != e; ++i) { if (i->hasByValAttr()) return false; // On Windows, "inreg" attributes signify non-aggregate indirect returns. // In this case, it is necessary to save/restore X0 in the callee. Tail // call opt interferes with this. So we disable tail call opt when the // caller has an argument with "inreg" attribute. // FIXME: Check whether the callee also has an "inreg" argument. if (i->hasInRegAttr()) return false; } if (canGuaranteeTCO(CalleeCC, getTargetMachine().Options.GuaranteedTailCallOpt)) return CCMatch; // Externally-defined functions with weak linkage should not be // tail-called on AArch64 when the OS does not support dynamic // pre-emption of symbols, as the AAELF spec requires normal calls // to undefined weak functions to be replaced with a NOP or jump to the // next instruction. The behaviour of branch instructions in this // situation (as used for tail calls) is implementation-defined, so we // cannot rely on the linker replacing the tail call with a return. if (GlobalAddressSDNode *G = dyn_cast(Callee)) { const GlobalValue *GV = G->getGlobal(); const Triple &TT = getTargetMachine().getTargetTriple(); if (GV->hasExternalWeakLinkage() && (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO())) return false; } // Now we search for cases where we can use a tail call without changing the // ABI. Sibcall is used in some places (particularly gcc) to refer to this // concept. // I want anyone implementing a new calling convention to think long and hard // about this assert. assert((!isVarArg || CalleeCC == CallingConv::C) && "Unexpected variadic calling convention"); LLVMContext &C = *DAG.getContext(); if (isVarArg && !Outs.empty()) { // At least two cases here: if caller is fastcc then we can't have any // memory arguments (we'd be expected to clean up the stack afterwards). If // caller is C then we could potentially use its argument area. // FIXME: for now we take the most conservative of these in both cases: // disallow all variadic memory operands. SmallVector ArgLocs; CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C); CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, true)); for (const CCValAssign &ArgLoc : ArgLocs) if (!ArgLoc.isRegLoc()) return false; } // Check that the call results are passed in the same way. if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins, CCAssignFnForCall(CalleeCC, isVarArg), CCAssignFnForCall(CallerCC, isVarArg))) return false; // The callee has to preserve all registers the caller needs to preserve. const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo(); const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); if (!CCMatch) { const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); if (Subtarget->hasCustomCallingConv()) { TRI->UpdateCustomCallPreservedMask(MF, &CallerPreserved); TRI->UpdateCustomCallPreservedMask(MF, &CalleePreserved); } if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) return false; } // Nothing more to check if the callee is taking no arguments if (Outs.empty()) return true; SmallVector ArgLocs; CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C); CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, isVarArg)); const AArch64FunctionInfo *FuncInfo = MF.getInfo(); // If any of the arguments is passed indirectly, it must be SVE, so the // 'getBytesInStackArgArea' is not sufficient to determine whether we need to // allocate space on the stack. That is why we determine this explicitly here // the call cannot be a tailcall. if (llvm::any_of(ArgLocs, [](CCValAssign &A) { assert((A.getLocInfo() != CCValAssign::Indirect || A.getValVT().isScalableVector()) && "Expected value to be scalable"); return A.getLocInfo() == CCValAssign::Indirect; })) return false; // If the stack arguments for this call do not fit into our own save area then // the call cannot be made tail. if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea()) return false; const MachineRegisterInfo &MRI = MF.getRegInfo(); if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals)) return false; return true; } SDValue AArch64TargetLowering::addTokenForArgument(SDValue Chain, SelectionDAG &DAG, MachineFrameInfo &MFI, int ClobberedFI) const { SmallVector ArgChains; int64_t FirstByte = MFI.getObjectOffset(ClobberedFI); int64_t LastByte = FirstByte + MFI.getObjectSize(ClobberedFI) - 1; // Include the original chain at the beginning of the list. When this is // used by target LowerCall hooks, this helps legalize find the // CALLSEQ_BEGIN node. ArgChains.push_back(Chain); // Add a chain value for each stack argument corresponding for (SDNode::use_iterator U = DAG.getEntryNode().getNode()->use_begin(), UE = DAG.getEntryNode().getNode()->use_end(); U != UE; ++U) if (LoadSDNode *L = dyn_cast(*U)) if (FrameIndexSDNode *FI = dyn_cast(L->getBasePtr())) if (FI->getIndex() < 0) { int64_t InFirstByte = MFI.getObjectOffset(FI->getIndex()); int64_t InLastByte = InFirstByte; InLastByte += MFI.getObjectSize(FI->getIndex()) - 1; if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) || (FirstByte <= InFirstByte && InFirstByte <= LastByte)) ArgChains.push_back(SDValue(L, 1)); } // Build a tokenfactor for all the chains. return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains); } bool AArch64TargetLowering::DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const { return (CallCC == CallingConv::Fast && TailCallOpt) || CallCC == CallingConv::Tail || CallCC == CallingConv::SwiftTail; } /// LowerCall - Lower a call to a callseq_start + CALL + callseq_end chain, /// and add input and output parameter nodes. SDValue AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI, SmallVectorImpl &InVals) const { SelectionDAG &DAG = CLI.DAG; SDLoc &DL = CLI.DL; SmallVector &Outs = CLI.Outs; SmallVector &OutVals = CLI.OutVals; SmallVector &Ins = CLI.Ins; SDValue Chain = CLI.Chain; SDValue Callee = CLI.Callee; bool &IsTailCall = CLI.IsTailCall; CallingConv::ID CallConv = CLI.CallConv; bool IsVarArg = CLI.IsVarArg; MachineFunction &MF = DAG.getMachineFunction(); MachineFunction::CallSiteInfo CSInfo; bool IsThisReturn = false; AArch64FunctionInfo *FuncInfo = MF.getInfo(); bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; bool IsSibCall = false; bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CallConv); // Check callee args/returns for SVE registers and set calling convention // accordingly. if (CallConv == CallingConv::C || CallConv == CallingConv::Fast) { bool CalleeOutSVE = any_of(Outs, [](ISD::OutputArg &Out){ return Out.VT.isScalableVector(); }); bool CalleeInSVE = any_of(Ins, [](ISD::InputArg &In){ return In.VT.isScalableVector(); }); if (CalleeInSVE || CalleeOutSVE) CallConv = CallingConv::AArch64_SVE_VectorCall; } if (IsTailCall) { // Check if it's really possible to do a tail call. IsTailCall = isEligibleForTailCallOptimization( Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG); // A sibling call is one where we're under the usual C ABI and not planning // to change that but can still do a tail call: if (!TailCallOpt && IsTailCall && CallConv != CallingConv::Tail && CallConv != CallingConv::SwiftTail) IsSibCall = true; if (IsTailCall) ++NumTailCalls; } if (!IsTailCall && CLI.CB && CLI.CB->isMustTailCall()) report_fatal_error("failed to perform tail call elimination on a call " "site marked musttail"); // Analyze operands of the call, assigning locations to each operand. SmallVector ArgLocs; CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs, *DAG.getContext()); if (IsVarArg) { // Handle fixed and variable vector arguments differently. // Variable vector arguments always go into memory. unsigned NumArgs = Outs.size(); for (unsigned i = 0; i != NumArgs; ++i) { MVT ArgVT = Outs[i].VT; if (!Outs[i].IsFixed && ArgVT.isScalableVector()) report_fatal_error("Passing SVE types to variadic functions is " "currently not supported"); ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; bool UseVarArgCC = !Outs[i].IsFixed; // On Windows, the fixed arguments in a vararg call are passed in GPRs // too, so use the vararg CC to force them to integer registers. if (IsCalleeWin64) UseVarArgCC = true; CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, UseVarArgCC); bool Res = AssignFn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo); assert(!Res && "Call operand has unhandled type"); (void)Res; } } else { // At this point, Outs[].VT may already be promoted to i32. To correctly // handle passing i8 as i8 instead of i32 on stack, we pass in both i32 and // i8 to CC_AArch64_AAPCS with i32 being ValVT and i8 being LocVT. // Since AnalyzeCallOperands uses Ins[].VT for both ValVT and LocVT, here // we use a special version of AnalyzeCallOperands to pass in ValVT and // LocVT. unsigned NumArgs = Outs.size(); for (unsigned i = 0; i != NumArgs; ++i) { MVT ValVT = Outs[i].VT; // Get type of the original argument. EVT ActualVT = getValueType(DAG.getDataLayout(), CLI.getArgs()[Outs[i].OrigArgIndex].Ty, /*AllowUnknown*/ true); MVT ActualMVT = ActualVT.isSimple() ? ActualVT.getSimpleVT() : ValVT; ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; // If ActualMVT is i1/i8/i16, we should set LocVT to i8/i8/i16. if (ActualMVT == MVT::i1 || ActualMVT == MVT::i8) ValVT = MVT::i8; else if (ActualMVT == MVT::i16) ValVT = MVT::i16; CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, /*IsVarArg=*/false); bool Res = AssignFn(i, ValVT, ValVT, CCValAssign::Full, ArgFlags, CCInfo); assert(!Res && "Call operand has unhandled type"); (void)Res; } } // Get a count of how many bytes are to be pushed on the stack. unsigned NumBytes = CCInfo.getNextStackOffset(); if (IsSibCall) { // Since we're not changing the ABI to make this a tail call, the memory // operands are already available in the caller's incoming argument space. NumBytes = 0; } // FPDiff is the byte offset of the call's argument area from the callee's. // Stores to callee stack arguments will be placed in FixedStackSlots offset // by this amount for a tail call. In a sibling call it must be 0 because the // caller will deallocate the entire stack and the callee still expects its // arguments to begin at SP+0. Completely unused for non-tail calls. int FPDiff = 0; if (IsTailCall && !IsSibCall) { unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea(); // Since callee will pop argument stack as a tail call, we must keep the // popped size 16-byte aligned. NumBytes = alignTo(NumBytes, 16); // FPDiff will be negative if this tail call requires more space than we // would automatically have in our incoming argument space. Positive if we // can actually shrink the stack. FPDiff = NumReusableBytes - NumBytes; // Update the required reserved area if this is the tail call requiring the // most argument stack space. if (FPDiff < 0 && FuncInfo->getTailCallReservedStack() < (unsigned)-FPDiff) FuncInfo->setTailCallReservedStack(-FPDiff); // The stack pointer must be 16-byte aligned at all times it's used for a // memory operation, which in practice means at *all* times and in // particular across call boundaries. Therefore our own arguments started at // a 16-byte aligned SP and the delta applied for the tail call should // satisfy the same constraint. assert(FPDiff % 16 == 0 && "unaligned stack on tail call"); } // Adjust the stack pointer for the new arguments... // These operations are automatically eliminated by the prolog/epilog pass if (!IsSibCall) Chain = DAG.getCALLSEQ_START(Chain, IsTailCall ? 0 : NumBytes, 0, DL); SDValue StackPtr = DAG.getCopyFromReg(Chain, DL, AArch64::SP, getPointerTy(DAG.getDataLayout())); SmallVector, 8> RegsToPass; SmallSet RegsUsed; SmallVector MemOpChains; auto PtrVT = getPointerTy(DAG.getDataLayout()); if (IsVarArg && CLI.CB && CLI.CB->isMustTailCall()) { const auto &Forwards = FuncInfo->getForwardedMustTailRegParms(); for (const auto &F : Forwards) { SDValue Val = DAG.getCopyFromReg(Chain, DL, F.VReg, F.VT); RegsToPass.emplace_back(F.PReg, Val); } } // Walk the register/memloc assignments, inserting copies/loads. unsigned ExtraArgLocs = 0; for (unsigned i = 0, e = Outs.size(); i != e; ++i) { CCValAssign &VA = ArgLocs[i - ExtraArgLocs]; SDValue Arg = OutVals[i]; ISD::ArgFlagsTy Flags = Outs[i].Flags; // Promote the value if needed. switch (VA.getLocInfo()) { default: llvm_unreachable("Unknown loc info!"); case CCValAssign::Full: break; case CCValAssign::SExt: Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); break; case CCValAssign::ZExt: Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); break; case CCValAssign::AExt: if (Outs[i].ArgVT == MVT::i1) { // AAPCS requires i1 to be zero-extended to 8-bits by the caller. Arg = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Arg); Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i8, Arg); } Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); break; case CCValAssign::AExtUpper: assert(VA.getValVT() == MVT::i32 && "only expect 32 -> 64 upper bits"); Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); Arg = DAG.getNode(ISD::SHL, DL, VA.getLocVT(), Arg, DAG.getConstant(32, DL, VA.getLocVT())); break; case CCValAssign::BCvt: Arg = DAG.getBitcast(VA.getLocVT(), Arg); break; case CCValAssign::Trunc: Arg = DAG.getZExtOrTrunc(Arg, DL, VA.getLocVT()); break; case CCValAssign::FPExt: Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg); break; case CCValAssign::Indirect: assert(VA.getValVT().isScalableVector() && "Only scalable vectors can be passed indirectly"); uint64_t StoreSize = VA.getValVT().getStoreSize().getKnownMinSize(); uint64_t PartSize = StoreSize; unsigned NumParts = 1; if (Outs[i].Flags.isInConsecutiveRegs()) { assert(!Outs[i].Flags.isInConsecutiveRegsLast()); while (!Outs[i + NumParts - 1].Flags.isInConsecutiveRegsLast()) ++NumParts; StoreSize *= NumParts; } MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); Type *Ty = EVT(VA.getValVT()).getTypeForEVT(*DAG.getContext()); Align Alignment = DAG.getDataLayout().getPrefTypeAlign(Ty); int FI = MFI.CreateStackObject(StoreSize, Alignment, false); MFI.setStackID(FI, TargetStackID::ScalableVector); MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI); SDValue Ptr = DAG.getFrameIndex( FI, DAG.getTargetLoweringInfo().getFrameIndexTy(DAG.getDataLayout())); SDValue SpillSlot = Ptr; // Ensure we generate all stores for each tuple part, whilst updating the // pointer after each store correctly using vscale. while (NumParts) { Chain = DAG.getStore(Chain, DL, OutVals[i], Ptr, MPI); NumParts--; if (NumParts > 0) { SDValue BytesIncrement = DAG.getVScale( DL, Ptr.getValueType(), APInt(Ptr.getValueSizeInBits().getFixedSize(), PartSize)); SDNodeFlags Flags; Flags.setNoUnsignedWrap(true); MPI = MachinePointerInfo(MPI.getAddrSpace()); Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr, BytesIncrement, Flags); ExtraArgLocs++; i++; } } Arg = SpillSlot; break; } if (VA.isRegLoc()) { if (i == 0 && Flags.isReturned() && !Flags.isSwiftSelf() && Outs[0].VT == MVT::i64) { assert(VA.getLocVT() == MVT::i64 && "unexpected calling convention register assignment"); assert(!Ins.empty() && Ins[0].VT == MVT::i64 && "unexpected use of 'returned'"); IsThisReturn = true; } if (RegsUsed.count(VA.getLocReg())) { // If this register has already been used then we're trying to pack // parts of an [N x i32] into an X-register. The extension type will // take care of putting the two halves in the right place but we have to // combine them. SDValue &Bits = llvm::find_if(RegsToPass, [=](const std::pair &Elt) { return Elt.first == VA.getLocReg(); }) ->second; Bits = DAG.getNode(ISD::OR, DL, Bits.getValueType(), Bits, Arg); // Call site info is used for function's parameter entry value // tracking. For now we track only simple cases when parameter // is transferred through whole register. llvm::erase_if(CSInfo, [&VA](MachineFunction::ArgRegPair ArgReg) { return ArgReg.Reg == VA.getLocReg(); }); } else { RegsToPass.emplace_back(VA.getLocReg(), Arg); RegsUsed.insert(VA.getLocReg()); const TargetOptions &Options = DAG.getTarget().Options; if (Options.EmitCallSiteInfo) CSInfo.emplace_back(VA.getLocReg(), i); } } else { assert(VA.isMemLoc()); SDValue DstAddr; MachinePointerInfo DstInfo; // FIXME: This works on big-endian for composite byvals, which are the // common case. It should also work for fundamental types too. uint32_t BEAlign = 0; unsigned OpSize; if (VA.getLocInfo() == CCValAssign::Indirect || VA.getLocInfo() == CCValAssign::Trunc) OpSize = VA.getLocVT().getFixedSizeInBits(); else OpSize = Flags.isByVal() ? Flags.getByValSize() * 8 : VA.getValVT().getSizeInBits(); OpSize = (OpSize + 7) / 8; if (!Subtarget->isLittleEndian() && !Flags.isByVal() && !Flags.isInConsecutiveRegs()) { if (OpSize < 8) BEAlign = 8 - OpSize; } unsigned LocMemOffset = VA.getLocMemOffset(); int32_t Offset = LocMemOffset + BEAlign; SDValue PtrOff = DAG.getIntPtrConstant(Offset, DL); PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff); if (IsTailCall) { Offset = Offset + FPDiff; int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true); DstAddr = DAG.getFrameIndex(FI, PtrVT); DstInfo = MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI); // Make sure any stack arguments overlapping with where we're storing // are loaded before this eventual operation. Otherwise they'll be // clobbered. Chain = addTokenForArgument(Chain, DAG, MF.getFrameInfo(), FI); } else { SDValue PtrOff = DAG.getIntPtrConstant(Offset, DL); DstAddr = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff); DstInfo = MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset); } if (Outs[i].Flags.isByVal()) { SDValue SizeNode = DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i64); SDValue Cpy = DAG.getMemcpy( Chain, DL, DstAddr, Arg, SizeNode, Outs[i].Flags.getNonZeroByValAlign(), /*isVol = */ false, /*AlwaysInline = */ false, /*isTailCall = */ false, DstInfo, MachinePointerInfo()); MemOpChains.push_back(Cpy); } else { // Since we pass i1/i8/i16 as i1/i8/i16 on stack and Arg is already // promoted to a legal register type i32, we should truncate Arg back to // i1/i8/i16. if (VA.getValVT() == MVT::i1 || VA.getValVT() == MVT::i8 || VA.getValVT() == MVT::i16) Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg); SDValue Store = DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo); MemOpChains.push_back(Store); } } } if (!MemOpChains.empty()) Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); // Build a sequence of copy-to-reg nodes chained together with token chain // and flag operands which copy the outgoing args into the appropriate regs. SDValue InFlag; for (auto &RegToPass : RegsToPass) { Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first, RegToPass.second, InFlag); InFlag = Chain.getValue(1); } // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol // node so that legalize doesn't hack it. if (auto *G = dyn_cast(Callee)) { auto GV = G->getGlobal(); unsigned OpFlags = Subtarget->classifyGlobalFunctionReference(GV, getTargetMachine()); if (OpFlags & AArch64II::MO_GOT) { Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags); Callee = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, Callee); } else { const GlobalValue *GV = G->getGlobal(); Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 0); } } else if (auto *S = dyn_cast(Callee)) { if (getTargetMachine().getCodeModel() == CodeModel::Large && Subtarget->isTargetMachO()) { const char *Sym = S->getSymbol(); Callee = DAG.getTargetExternalSymbol(Sym, PtrVT, AArch64II::MO_GOT); Callee = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, Callee); } else { const char *Sym = S->getSymbol(); Callee = DAG.getTargetExternalSymbol(Sym, PtrVT, 0); } } // We don't usually want to end the call-sequence here because we would tidy // the frame up *after* the call, however in the ABI-changing tail-call case // we've carefully laid out the parameters so that when sp is reset they'll be // in the correct location. if (IsTailCall && !IsSibCall) { Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, DL, true), DAG.getIntPtrConstant(0, DL, true), InFlag, DL); InFlag = Chain.getValue(1); } std::vector Ops; Ops.push_back(Chain); Ops.push_back(Callee); if (IsTailCall) { // Each tail call may have to adjust the stack by a different amount, so // this information must travel along with the operation for eventual // consumption by emitEpilogue. Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32)); } // Add argument registers to the end of the list so that they are known live // into the call. for (auto &RegToPass : RegsToPass) Ops.push_back(DAG.getRegister(RegToPass.first, RegToPass.second.getValueType())); // Add a register mask operand representing the call-preserved registers. const uint32_t *Mask; const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo(); if (IsThisReturn) { // For 'this' returns, use the X0-preserving mask if applicable Mask = TRI->getThisReturnPreservedMask(MF, CallConv); if (!Mask) { IsThisReturn = false; Mask = TRI->getCallPreservedMask(MF, CallConv); } } else Mask = TRI->getCallPreservedMask(MF, CallConv); if (Subtarget->hasCustomCallingConv()) TRI->UpdateCustomCallPreservedMask(MF, &Mask); if (TRI->isAnyArgRegReserved(MF)) TRI->emitReservedArgRegCallError(MF); assert(Mask && "Missing call preserved mask for calling convention"); Ops.push_back(DAG.getRegisterMask(Mask)); if (InFlag.getNode()) Ops.push_back(InFlag); SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); // If we're doing a tall call, use a TC_RETURN here rather than an // actual call instruction. if (IsTailCall) { MF.getFrameInfo().setHasTailCall(); SDValue Ret = DAG.getNode(AArch64ISD::TC_RETURN, DL, NodeTys, Ops); DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo)); return Ret; } unsigned CallOpc = AArch64ISD::CALL; // Calls with operand bundle "clang.arc.attachedcall" are special. They should // be expanded to the call, directly followed by a special marker sequence. // Use the CALL_RVMARKER to do that. if (CLI.CB && objcarc::hasAttachedCallOpBundle(CLI.CB)) { assert(!IsTailCall && "tail calls cannot be marked with clang.arc.attachedcall"); CallOpc = AArch64ISD::CALL_RVMARKER; } // Returns a chain and a flag for retval copy to use. Chain = DAG.getNode(CallOpc, DL, NodeTys, Ops); DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge); InFlag = Chain.getValue(1); DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo)); uint64_t CalleePopBytes = DoesCalleeRestoreStack(CallConv, TailCallOpt) ? alignTo(NumBytes, 16) : 0; Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, DL, true), DAG.getIntPtrConstant(CalleePopBytes, DL, true), InFlag, DL); if (!Ins.empty()) InFlag = Chain.getValue(1); // Handle result values, copying them out of physregs into vregs that we // return. return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG, InVals, IsThisReturn, IsThisReturn ? OutVals[0] : SDValue()); } bool AArch64TargetLowering::CanLowerReturn( CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl &Outs, LLVMContext &Context) const { CCAssignFn *RetCC = CCAssignFnForReturn(CallConv); SmallVector RVLocs; CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); return CCInfo.CheckReturn(Outs, RetCC); } SDValue AArch64TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl &Outs, const SmallVectorImpl &OutVals, const SDLoc &DL, SelectionDAG &DAG) const { auto &MF = DAG.getMachineFunction(); auto *FuncInfo = MF.getInfo(); CCAssignFn *RetCC = CCAssignFnForReturn(CallConv); SmallVector RVLocs; CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, *DAG.getContext()); CCInfo.AnalyzeReturn(Outs, RetCC); // Copy the result values into the output registers. SDValue Flag; SmallVector, 4> RetVals; SmallSet RegsUsed; for (unsigned i = 0, realRVLocIdx = 0; i != RVLocs.size(); ++i, ++realRVLocIdx) { CCValAssign &VA = RVLocs[i]; assert(VA.isRegLoc() && "Can only return in registers!"); SDValue Arg = OutVals[realRVLocIdx]; switch (VA.getLocInfo()) { default: llvm_unreachable("Unknown loc info!"); case CCValAssign::Full: if (Outs[i].ArgVT == MVT::i1) { // AAPCS requires i1 to be zero-extended to i8 by the producer of the // value. This is strictly redundant on Darwin (which uses "zeroext // i1"), but will be optimised out before ISel. Arg = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Arg); Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); } break; case CCValAssign::BCvt: Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); break; case CCValAssign::AExt: case CCValAssign::ZExt: Arg = DAG.getZExtOrTrunc(Arg, DL, VA.getLocVT()); break; case CCValAssign::AExtUpper: assert(VA.getValVT() == MVT::i32 && "only expect 32 -> 64 upper bits"); Arg = DAG.getZExtOrTrunc(Arg, DL, VA.getLocVT()); Arg = DAG.getNode(ISD::SHL, DL, VA.getLocVT(), Arg, DAG.getConstant(32, DL, VA.getLocVT())); break; } if (RegsUsed.count(VA.getLocReg())) { SDValue &Bits = llvm::find_if(RetVals, [=](const std::pair &Elt) { return Elt.first == VA.getLocReg(); })->second; Bits = DAG.getNode(ISD::OR, DL, Bits.getValueType(), Bits, Arg); } else { RetVals.emplace_back(VA.getLocReg(), Arg); RegsUsed.insert(VA.getLocReg()); } } SmallVector RetOps(1, Chain); for (auto &RetVal : RetVals) { Chain = DAG.getCopyToReg(Chain, DL, RetVal.first, RetVal.second, Flag); Flag = Chain.getValue(1); RetOps.push_back( DAG.getRegister(RetVal.first, RetVal.second.getValueType())); } // Windows AArch64 ABIs require that for returning structs by value we copy // the sret argument into X0 for the return. // We saved the argument into a virtual register in the entry block, // so now we copy the value out and into X0. if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) { SDValue Val = DAG.getCopyFromReg(RetOps[0], DL, SRetReg, getPointerTy(MF.getDataLayout())); unsigned RetValReg = AArch64::X0; Chain = DAG.getCopyToReg(Chain, DL, RetValReg, Val, Flag); Flag = Chain.getValue(1); RetOps.push_back( DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout()))); } const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo(); const MCPhysReg *I = TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); if (I) { for (; *I; ++I) { if (AArch64::GPR64RegClass.contains(*I)) RetOps.push_back(DAG.getRegister(*I, MVT::i64)); else if (AArch64::FPR64RegClass.contains(*I)) RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64))); else llvm_unreachable("Unexpected register class in CSRsViaCopy!"); } } RetOps[0] = Chain; // Update chain. // Add the flag if we have it. if (Flag.getNode()) RetOps.push_back(Flag); return DAG.getNode(AArch64ISD::RET_FLAG, DL, MVT::Other, RetOps); } //===----------------------------------------------------------------------===// // Other Lowering Code //===----------------------------------------------------------------------===// SDValue AArch64TargetLowering::getTargetNode(GlobalAddressSDNode *N, EVT Ty, SelectionDAG &DAG, unsigned Flag) const { return DAG.getTargetGlobalAddress(N->getGlobal(), SDLoc(N), Ty, N->getOffset(), Flag); } SDValue AArch64TargetLowering::getTargetNode(JumpTableSDNode *N, EVT Ty, SelectionDAG &DAG, unsigned Flag) const { return DAG.getTargetJumpTable(N->getIndex(), Ty, Flag); } SDValue AArch64TargetLowering::getTargetNode(ConstantPoolSDNode *N, EVT Ty, SelectionDAG &DAG, unsigned Flag) const { return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(), N->getOffset(), Flag); } SDValue AArch64TargetLowering::getTargetNode(BlockAddressSDNode* N, EVT Ty, SelectionDAG &DAG, unsigned Flag) const { return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, 0, Flag); } // (loadGOT sym) template SDValue AArch64TargetLowering::getGOT(NodeTy *N, SelectionDAG &DAG, unsigned Flags) const { LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getGOT\n"); SDLoc DL(N); EVT Ty = getPointerTy(DAG.getDataLayout()); SDValue GotAddr = getTargetNode(N, Ty, DAG, AArch64II::MO_GOT | Flags); // FIXME: Once remat is capable of dealing with instructions with register // operands, expand this into two nodes instead of using a wrapper node. return DAG.getNode(AArch64ISD::LOADgot, DL, Ty, GotAddr); } // (wrapper %highest(sym), %higher(sym), %hi(sym), %lo(sym)) template SDValue AArch64TargetLowering::getAddrLarge(NodeTy *N, SelectionDAG &DAG, unsigned Flags) const { LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getAddrLarge\n"); SDLoc DL(N); EVT Ty = getPointerTy(DAG.getDataLayout()); const unsigned char MO_NC = AArch64II::MO_NC; return DAG.getNode( AArch64ISD::WrapperLarge, DL, Ty, getTargetNode(N, Ty, DAG, AArch64II::MO_G3 | Flags), getTargetNode(N, Ty, DAG, AArch64II::MO_G2 | MO_NC | Flags), getTargetNode(N, Ty, DAG, AArch64II::MO_G1 | MO_NC | Flags), getTargetNode(N, Ty, DAG, AArch64II::MO_G0 | MO_NC | Flags)); } // (addlow (adrp %hi(sym)) %lo(sym)) template SDValue AArch64TargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG, unsigned Flags) const { LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getAddr\n"); SDLoc DL(N); EVT Ty = getPointerTy(DAG.getDataLayout()); SDValue Hi = getTargetNode(N, Ty, DAG, AArch64II::MO_PAGE | Flags); SDValue Lo = getTargetNode(N, Ty, DAG, AArch64II::MO_PAGEOFF | AArch64II::MO_NC | Flags); SDValue ADRP = DAG.getNode(AArch64ISD::ADRP, DL, Ty, Hi); return DAG.getNode(AArch64ISD::ADDlow, DL, Ty, ADRP, Lo); } // (adr sym) template SDValue AArch64TargetLowering::getAddrTiny(NodeTy *N, SelectionDAG &DAG, unsigned Flags) const { LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getAddrTiny\n"); SDLoc DL(N); EVT Ty = getPointerTy(DAG.getDataLayout()); SDValue Sym = getTargetNode(N, Ty, DAG, Flags); return DAG.getNode(AArch64ISD::ADR, DL, Ty, Sym); } SDValue AArch64TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const { GlobalAddressSDNode *GN = cast(Op); const GlobalValue *GV = GN->getGlobal(); unsigned OpFlags = Subtarget->ClassifyGlobalReference(GV, getTargetMachine()); if (OpFlags != AArch64II::MO_NO_FLAG) assert(cast(Op)->getOffset() == 0 && "unexpected offset in global node"); // This also catches the large code model case for Darwin, and tiny code // model with got relocations. if ((OpFlags & AArch64II::MO_GOT) != 0) { return getGOT(GN, DAG, OpFlags); } SDValue Result; if (getTargetMachine().getCodeModel() == CodeModel::Large) { Result = getAddrLarge(GN, DAG, OpFlags); } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) { Result = getAddrTiny(GN, DAG, OpFlags); } else { Result = getAddr(GN, DAG, OpFlags); } EVT PtrVT = getPointerTy(DAG.getDataLayout()); SDLoc DL(GN); if (OpFlags & (AArch64II::MO_DLLIMPORT | AArch64II::MO_COFFSTUB)) Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result, MachinePointerInfo::getGOT(DAG.getMachineFunction())); return Result; } /// Convert a TLS address reference into the correct sequence of loads /// and calls to compute the variable's address (for Darwin, currently) and /// return an SDValue containing the final node. /// Darwin only has one TLS scheme which must be capable of dealing with the /// fully general situation, in the worst case. This means: /// + "extern __thread" declaration. /// + Defined in a possibly unknown dynamic library. /// /// The general system is that each __thread variable has a [3 x i64] descriptor /// which contains information used by the runtime to calculate the address. The /// only part of this the compiler needs to know about is the first xword, which /// contains a function pointer that must be called with the address of the /// entire descriptor in "x0". /// /// Since this descriptor may be in a different unit, in general even the /// descriptor must be accessed via an indirect load. The "ideal" code sequence /// is: /// adrp x0, _var@TLVPPAGE /// ldr x0, [x0, _var@TLVPPAGEOFF] ; x0 now contains address of descriptor /// ldr x1, [x0] ; x1 contains 1st entry of descriptor, /// ; the function pointer /// blr x1 ; Uses descriptor address in x0 /// ; Address of _var is now in x0. /// /// If the address of _var's descriptor *is* known to the linker, then it can /// change the first "ldr" instruction to an appropriate "add x0, x0, #imm" for /// a slight efficiency gain. SDValue AArch64TargetLowering::LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { assert(Subtarget->isTargetDarwin() && "This function expects a Darwin target"); SDLoc DL(Op); MVT PtrVT = getPointerTy(DAG.getDataLayout()); MVT PtrMemVT = getPointerMemTy(DAG.getDataLayout()); const GlobalValue *GV = cast(Op)->getGlobal(); SDValue TLVPAddr = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_TLS); SDValue DescAddr = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, TLVPAddr); // The first entry in the descriptor is a function pointer that we must call // to obtain the address of the variable. SDValue Chain = DAG.getEntryNode(); SDValue FuncTLVGet = DAG.getLoad( PtrMemVT, DL, Chain, DescAddr, MachinePointerInfo::getGOT(DAG.getMachineFunction()), Align(PtrMemVT.getSizeInBits() / 8), MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable); Chain = FuncTLVGet.getValue(1); // Extend loaded pointer if necessary (i.e. if ILP32) to DAG pointer. FuncTLVGet = DAG.getZExtOrTrunc(FuncTLVGet, DL, PtrVT); MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); MFI.setAdjustsStack(true); // TLS calls preserve all registers except those that absolutely must be // trashed: X0 (it takes an argument), LR (it's a call) and NZCV (let's not be // silly). const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo(); const uint32_t *Mask = TRI->getTLSCallPreservedMask(); if (Subtarget->hasCustomCallingConv()) TRI->UpdateCustomCallPreservedMask(DAG.getMachineFunction(), &Mask); // Finally, we can make the call. This is just a degenerate version of a // normal AArch64 call node: x0 takes the address of the descriptor, and // returns the address of the variable in this thread. Chain = DAG.getCopyToReg(Chain, DL, AArch64::X0, DescAddr, SDValue()); Chain = DAG.getNode(AArch64ISD::CALL, DL, DAG.getVTList(MVT::Other, MVT::Glue), Chain, FuncTLVGet, DAG.getRegister(AArch64::X0, MVT::i64), DAG.getRegisterMask(Mask), Chain.getValue(1)); return DAG.getCopyFromReg(Chain, DL, AArch64::X0, PtrVT, Chain.getValue(1)); } /// Convert a thread-local variable reference into a sequence of instructions to /// compute the variable's address for the local exec TLS model of ELF targets. /// The sequence depends on the maximum TLS area size. SDValue AArch64TargetLowering::LowerELFTLSLocalExec(const GlobalValue *GV, SDValue ThreadBase, const SDLoc &DL, SelectionDAG &DAG) const { EVT PtrVT = getPointerTy(DAG.getDataLayout()); SDValue TPOff, Addr; switch (DAG.getTarget().Options.TLSSize) { default: llvm_unreachable("Unexpected TLS size"); case 12: { // mrs x0, TPIDR_EL0 // add x0, x0, :tprel_lo12:a SDValue Var = DAG.getTargetGlobalAddress( GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_PAGEOFF); return SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, ThreadBase, Var, DAG.getTargetConstant(0, DL, MVT::i32)), 0); } case 24: { // mrs x0, TPIDR_EL0 // add x0, x0, :tprel_hi12:a // add x0, x0, :tprel_lo12_nc:a SDValue HiVar = DAG.getTargetGlobalAddress( GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_HI12); SDValue LoVar = DAG.getTargetGlobalAddress( GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_PAGEOFF | AArch64II::MO_NC); Addr = SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, ThreadBase, HiVar, DAG.getTargetConstant(0, DL, MVT::i32)), 0); return SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, Addr, LoVar, DAG.getTargetConstant(0, DL, MVT::i32)), 0); } case 32: { // mrs x1, TPIDR_EL0 // movz x0, #:tprel_g1:a // movk x0, #:tprel_g0_nc:a // add x0, x1, x0 SDValue HiVar = DAG.getTargetGlobalAddress( GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_G1); SDValue LoVar = DAG.getTargetGlobalAddress( GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_G0 | AArch64II::MO_NC); TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZXi, DL, PtrVT, HiVar, DAG.getTargetConstant(16, DL, MVT::i32)), 0); TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKXi, DL, PtrVT, TPOff, LoVar, DAG.getTargetConstant(0, DL, MVT::i32)), 0); return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff); } case 48: { // mrs x1, TPIDR_EL0 // movz x0, #:tprel_g2:a // movk x0, #:tprel_g1_nc:a // movk x0, #:tprel_g0_nc:a // add x0, x1, x0 SDValue HiVar = DAG.getTargetGlobalAddress( GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_G2); SDValue MiVar = DAG.getTargetGlobalAddress( GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_G1 | AArch64II::MO_NC); SDValue LoVar = DAG.getTargetGlobalAddress( GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_G0 | AArch64II::MO_NC); TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZXi, DL, PtrVT, HiVar, DAG.getTargetConstant(32, DL, MVT::i32)), 0); TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKXi, DL, PtrVT, TPOff, MiVar, DAG.getTargetConstant(16, DL, MVT::i32)), 0); TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKXi, DL, PtrVT, TPOff, LoVar, DAG.getTargetConstant(0, DL, MVT::i32)), 0); return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff); } } } /// When accessing thread-local variables under either the general-dynamic or /// local-dynamic system, we make a "TLS-descriptor" call. The variable will /// have a descriptor, accessible via a PC-relative ADRP, and whose first entry /// is a function pointer to carry out the resolution. /// /// The sequence is: /// adrp x0, :tlsdesc:var /// ldr x1, [x0, #:tlsdesc_lo12:var] /// add x0, x0, #:tlsdesc_lo12:var /// .tlsdesccall var /// blr x1 /// (TPIDR_EL0 offset now in x0) /// /// The above sequence must be produced unscheduled, to enable the linker to /// optimize/relax this sequence. /// Therefore, a pseudo-instruction (TLSDESC_CALLSEQ) is used to represent the /// above sequence, and expanded really late in the compilation flow, to ensure /// the sequence is produced as per above. SDValue AArch64TargetLowering::LowerELFTLSDescCallSeq(SDValue SymAddr, const SDLoc &DL, SelectionDAG &DAG) const { EVT PtrVT = getPointerTy(DAG.getDataLayout()); SDValue Chain = DAG.getEntryNode(); SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); Chain = DAG.getNode(AArch64ISD::TLSDESC_CALLSEQ, DL, NodeTys, {Chain, SymAddr}); SDValue Glue = Chain.getValue(1); return DAG.getCopyFromReg(Chain, DL, AArch64::X0, PtrVT, Glue); } SDValue AArch64TargetLowering::LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { assert(Subtarget->isTargetELF() && "This function expects an ELF target"); const GlobalAddressSDNode *GA = cast(Op); TLSModel::Model Model = getTargetMachine().getTLSModel(GA->getGlobal()); if (!EnableAArch64ELFLocalDynamicTLSGeneration) { if (Model == TLSModel::LocalDynamic) Model = TLSModel::GeneralDynamic; } if (getTargetMachine().getCodeModel() == CodeModel::Large && Model != TLSModel::LocalExec) report_fatal_error("ELF TLS only supported in small memory model or " "in local exec TLS model"); // Different choices can be made for the maximum size of the TLS area for a // module. For the small address model, the default TLS size is 16MiB and the // maximum TLS size is 4GiB. // FIXME: add tiny and large code model support for TLS access models other // than local exec. We currently generate the same code as small for tiny, // which may be larger than needed. SDValue TPOff; EVT PtrVT = getPointerTy(DAG.getDataLayout()); SDLoc DL(Op); const GlobalValue *GV = GA->getGlobal(); SDValue ThreadBase = DAG.getNode(AArch64ISD::THREAD_POINTER, DL, PtrVT); if (Model == TLSModel::LocalExec) { return LowerELFTLSLocalExec(GV, ThreadBase, DL, DAG); } else if (Model == TLSModel::InitialExec) { TPOff = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_TLS); TPOff = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, TPOff); } else if (Model == TLSModel::LocalDynamic) { // Local-dynamic accesses proceed in two phases. A general-dynamic TLS // descriptor call against the special symbol _TLS_MODULE_BASE_ to calculate // the beginning of the module's TLS region, followed by a DTPREL offset // calculation. // These accesses will need deduplicating if there's more than one. AArch64FunctionInfo *MFI = DAG.getMachineFunction().getInfo(); MFI->incNumLocalDynamicTLSAccesses(); // The call needs a relocation too for linker relaxation. It doesn't make // sense to call it MO_PAGE or MO_PAGEOFF though so we need another copy of // the address. SDValue SymAddr = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT, AArch64II::MO_TLS); // Now we can calculate the offset from TPIDR_EL0 to this module's // thread-local area. TPOff = LowerELFTLSDescCallSeq(SymAddr, DL, DAG); // Now use :dtprel_whatever: operations to calculate this variable's offset // in its thread-storage area. SDValue HiVar = DAG.getTargetGlobalAddress( GV, DL, MVT::i64, 0, AArch64II::MO_TLS | AArch64II::MO_HI12); SDValue LoVar = DAG.getTargetGlobalAddress( GV, DL, MVT::i64, 0, AArch64II::MO_TLS | AArch64II::MO_PAGEOFF | AArch64II::MO_NC); TPOff = SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, TPOff, HiVar, DAG.getTargetConstant(0, DL, MVT::i32)), 0); TPOff = SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, TPOff, LoVar, DAG.getTargetConstant(0, DL, MVT::i32)), 0); } else if (Model == TLSModel::GeneralDynamic) { // The call needs a relocation too for linker relaxation. It doesn't make // sense to call it MO_PAGE or MO_PAGEOFF though so we need another copy of // the address. SDValue SymAddr = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_TLS); // Finally we can make a call to calculate the offset from tpidr_el0. TPOff = LowerELFTLSDescCallSeq(SymAddr, DL, DAG); } else llvm_unreachable("Unsupported ELF TLS access model"); return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff); } SDValue AArch64TargetLowering::LowerWindowsGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { assert(Subtarget->isTargetWindows() && "Windows specific TLS lowering"); SDValue Chain = DAG.getEntryNode(); EVT PtrVT = getPointerTy(DAG.getDataLayout()); SDLoc DL(Op); SDValue TEB = DAG.getRegister(AArch64::X18, MVT::i64); // Load the ThreadLocalStoragePointer from the TEB // A pointer to the TLS array is located at offset 0x58 from the TEB. SDValue TLSArray = DAG.getNode(ISD::ADD, DL, PtrVT, TEB, DAG.getIntPtrConstant(0x58, DL)); TLSArray = DAG.getLoad(PtrVT, DL, Chain, TLSArray, MachinePointerInfo()); Chain = TLSArray.getValue(1); // Load the TLS index from the C runtime; // This does the same as getAddr(), but without having a GlobalAddressSDNode. // This also does the same as LOADgot, but using a generic i32 load, // while LOADgot only loads i64. SDValue TLSIndexHi = DAG.getTargetExternalSymbol("_tls_index", PtrVT, AArch64II::MO_PAGE); SDValue TLSIndexLo = DAG.getTargetExternalSymbol( "_tls_index", PtrVT, AArch64II::MO_PAGEOFF | AArch64II::MO_NC); SDValue ADRP = DAG.getNode(AArch64ISD::ADRP, DL, PtrVT, TLSIndexHi); SDValue TLSIndex = DAG.getNode(AArch64ISD::ADDlow, DL, PtrVT, ADRP, TLSIndexLo); TLSIndex = DAG.getLoad(MVT::i32, DL, Chain, TLSIndex, MachinePointerInfo()); Chain = TLSIndex.getValue(1); // The pointer to the thread's TLS data area is at the TLS Index scaled by 8 // offset into the TLSArray. TLSIndex = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TLSIndex); SDValue Slot = DAG.getNode(ISD::SHL, DL, PtrVT, TLSIndex, DAG.getConstant(3, DL, PtrVT)); SDValue TLS = DAG.getLoad(PtrVT, DL, Chain, DAG.getNode(ISD::ADD, DL, PtrVT, TLSArray, Slot), MachinePointerInfo()); Chain = TLS.getValue(1); const GlobalAddressSDNode *GA = cast(Op); const GlobalValue *GV = GA->getGlobal(); SDValue TGAHi = DAG.getTargetGlobalAddress( GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_HI12); SDValue TGALo = DAG.getTargetGlobalAddress( GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_PAGEOFF | AArch64II::MO_NC); // Add the offset from the start of the .tls section (section base). SDValue Addr = SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, TLS, TGAHi, DAG.getTargetConstant(0, DL, MVT::i32)), 0); Addr = DAG.getNode(AArch64ISD::ADDlow, DL, PtrVT, Addr, TGALo); return Addr; } SDValue AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { const GlobalAddressSDNode *GA = cast(Op); if (DAG.getTarget().useEmulatedTLS()) return LowerToTLSEmulatedModel(GA, DAG); if (Subtarget->isTargetDarwin()) return LowerDarwinGlobalTLSAddress(Op, DAG); if (Subtarget->isTargetELF()) return LowerELFGlobalTLSAddress(Op, DAG); if (Subtarget->isTargetWindows()) return LowerWindowsGlobalTLSAddress(Op, DAG); llvm_unreachable("Unexpected platform trying to use TLS"); } // Looks through \param Val to determine the bit that can be used to // check the sign of the value. It returns the unextended value and // the sign bit position. std::pair lookThroughSignExtension(SDValue Val) { if (Val.getOpcode() == ISD::SIGN_EXTEND_INREG) return {Val.getOperand(0), cast(Val.getOperand(1))->getVT().getFixedSizeInBits() - 1}; if (Val.getOpcode() == ISD::SIGN_EXTEND) return {Val.getOperand(0), Val.getOperand(0)->getValueType(0).getFixedSizeInBits() - 1}; return {Val, Val.getValueSizeInBits() - 1}; } SDValue AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { SDValue Chain = Op.getOperand(0); ISD::CondCode CC = cast(Op.getOperand(1))->get(); SDValue LHS = Op.getOperand(2); SDValue RHS = Op.getOperand(3); SDValue Dest = Op.getOperand(4); SDLoc dl(Op); MachineFunction &MF = DAG.getMachineFunction(); // Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z instructions // will not be produced, as they are conditional branch instructions that do // not set flags. bool ProduceNonFlagSettingCondBr = !MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening); // Handle f128 first, since lowering it will result in comparing the return // value of a libcall against zero, which is just what the rest of LowerBR_CC // is expecting to deal with. if (LHS.getValueType() == MVT::f128) { softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl, LHS, RHS); // If softenSetCCOperands returned a scalar, we need to compare the result // against zero to select between true and false values. if (!RHS.getNode()) { RHS = DAG.getConstant(0, dl, LHS.getValueType()); CC = ISD::SETNE; } } // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch // instruction. if (ISD::isOverflowIntrOpRes(LHS) && isOneConstant(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE)) { // Only lower legal XALUO ops. if (!DAG.getTargetLoweringInfo().isTypeLegal(LHS->getValueType(0))) return SDValue(); // The actual operation with overflow check. AArch64CC::CondCode OFCC; SDValue Value, Overflow; std::tie(Value, Overflow) = getAArch64XALUOOp(OFCC, LHS.getValue(0), DAG); if (CC == ISD::SETNE) OFCC = getInvertedCondCode(OFCC); SDValue CCVal = DAG.getConstant(OFCC, dl, MVT::i32); return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal, Overflow); } if (LHS.getValueType().isInteger()) { assert((LHS.getValueType() == RHS.getValueType()) && (LHS.getValueType() == MVT::i32 || LHS.getValueType() == MVT::i64)); // If the RHS of the comparison is zero, we can potentially fold this // to a specialized branch. const ConstantSDNode *RHSC = dyn_cast(RHS); if (RHSC && RHSC->getZExtValue() == 0 && ProduceNonFlagSettingCondBr) { if (CC == ISD::SETEQ) { // See if we can use a TBZ to fold in an AND as well. // TBZ has a smaller branch displacement than CBZ. If the offset is // out of bounds, a late MI-layer pass rewrites branches. // 403.gcc is an example that hits this case. if (LHS.getOpcode() == ISD::AND && isa(LHS.getOperand(1)) && isPowerOf2_64(LHS.getConstantOperandVal(1))) { SDValue Test = LHS.getOperand(0); uint64_t Mask = LHS.getConstantOperandVal(1); return DAG.getNode(AArch64ISD::TBZ, dl, MVT::Other, Chain, Test, DAG.getConstant(Log2_64(Mask), dl, MVT::i64), Dest); } return DAG.getNode(AArch64ISD::CBZ, dl, MVT::Other, Chain, LHS, Dest); } else if (CC == ISD::SETNE) { // See if we can use a TBZ to fold in an AND as well. // TBZ has a smaller branch displacement than CBZ. If the offset is // out of bounds, a late MI-layer pass rewrites branches. // 403.gcc is an example that hits this case. if (LHS.getOpcode() == ISD::AND && isa(LHS.getOperand(1)) && isPowerOf2_64(LHS.getConstantOperandVal(1))) { SDValue Test = LHS.getOperand(0); uint64_t Mask = LHS.getConstantOperandVal(1); return DAG.getNode(AArch64ISD::TBNZ, dl, MVT::Other, Chain, Test, DAG.getConstant(Log2_64(Mask), dl, MVT::i64), Dest); } return DAG.getNode(AArch64ISD::CBNZ, dl, MVT::Other, Chain, LHS, Dest); } else if (CC == ISD::SETLT && LHS.getOpcode() != ISD::AND) { // Don't combine AND since emitComparison converts the AND to an ANDS // (a.k.a. TST) and the test in the test bit and branch instruction // becomes redundant. This would also increase register pressure. uint64_t SignBitPos; std::tie(LHS, SignBitPos) = lookThroughSignExtension(LHS); return DAG.getNode(AArch64ISD::TBNZ, dl, MVT::Other, Chain, LHS, DAG.getConstant(SignBitPos, dl, MVT::i64), Dest); } } if (RHSC && RHSC->getSExtValue() == -1 && CC == ISD::SETGT && LHS.getOpcode() != ISD::AND && ProduceNonFlagSettingCondBr) { // Don't combine AND since emitComparison converts the AND to an ANDS // (a.k.a. TST) and the test in the test bit and branch instruction // becomes redundant. This would also increase register pressure. uint64_t SignBitPos; std::tie(LHS, SignBitPos) = lookThroughSignExtension(LHS); return DAG.getNode(AArch64ISD::TBZ, dl, MVT::Other, Chain, LHS, DAG.getConstant(SignBitPos, dl, MVT::i64), Dest); } SDValue CCVal; SDValue Cmp = getAArch64Cmp(LHS, RHS, CC, CCVal, DAG, dl); return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal, Cmp); } assert(LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::bf16 || LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64); // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't totally // clean. Some of them require two branches to implement. SDValue Cmp = emitComparison(LHS, RHS, CC, dl, DAG); AArch64CC::CondCode CC1, CC2; changeFPCCToAArch64CC(CC, CC1, CC2); SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32); SDValue BR1 = DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CC1Val, Cmp); if (CC2 != AArch64CC::AL) { SDValue CC2Val = DAG.getConstant(CC2, dl, MVT::i32); return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, BR1, Dest, CC2Val, Cmp); } return BR1; } SDValue AArch64TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); SDLoc DL(Op); SDValue In1 = Op.getOperand(0); SDValue In2 = Op.getOperand(1); EVT SrcVT = In2.getValueType(); if (SrcVT.bitsLT(VT)) In2 = DAG.getNode(ISD::FP_EXTEND, DL, VT, In2); else if (SrcVT.bitsGT(VT)) In2 = DAG.getNode(ISD::FP_ROUND, DL, VT, In2, DAG.getIntPtrConstant(0, DL)); EVT VecVT; uint64_t EltMask; SDValue VecVal1, VecVal2; auto setVecVal = [&] (int Idx) { if (!VT.isVector()) { VecVal1 = DAG.getTargetInsertSubreg(Idx, DL, VecVT, DAG.getUNDEF(VecVT), In1); VecVal2 = DAG.getTargetInsertSubreg(Idx, DL, VecVT, DAG.getUNDEF(VecVT), In2); } else { VecVal1 = DAG.getNode(ISD::BITCAST, DL, VecVT, In1); VecVal2 = DAG.getNode(ISD::BITCAST, DL, VecVT, In2); } }; if (VT == MVT::f32 || VT == MVT::v2f32 || VT == MVT::v4f32) { VecVT = (VT == MVT::v2f32 ? MVT::v2i32 : MVT::v4i32); EltMask = 0x80000000ULL; setVecVal(AArch64::ssub); } else if (VT == MVT::f64 || VT == MVT::v2f64) { VecVT = MVT::v2i64; // We want to materialize a mask with the high bit set, but the AdvSIMD // immediate moves cannot materialize that in a single instruction for // 64-bit elements. Instead, materialize zero and then negate it. EltMask = 0; setVecVal(AArch64::dsub); } else if (VT == MVT::f16 || VT == MVT::v4f16 || VT == MVT::v8f16) { VecVT = (VT == MVT::v4f16 ? MVT::v4i16 : MVT::v8i16); EltMask = 0x8000ULL; setVecVal(AArch64::hsub); } else { llvm_unreachable("Invalid type for copysign!"); } SDValue BuildVec = DAG.getConstant(EltMask, DL, VecVT); // If we couldn't materialize the mask above, then the mask vector will be // the zero vector, and we need to negate it here. if (VT == MVT::f64 || VT == MVT::v2f64) { BuildVec = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, BuildVec); BuildVec = DAG.getNode(ISD::FNEG, DL, MVT::v2f64, BuildVec); BuildVec = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, BuildVec); } SDValue Sel = DAG.getNode(AArch64ISD::BIT, DL, VecVT, VecVal1, VecVal2, BuildVec); if (VT == MVT::f16) return DAG.getTargetExtractSubreg(AArch64::hsub, DL, VT, Sel); if (VT == MVT::f32) return DAG.getTargetExtractSubreg(AArch64::ssub, DL, VT, Sel); else if (VT == MVT::f64) return DAG.getTargetExtractSubreg(AArch64::dsub, DL, VT, Sel); else return DAG.getNode(ISD::BITCAST, DL, VT, Sel); } SDValue AArch64TargetLowering::LowerCTPOP(SDValue Op, SelectionDAG &DAG) const { if (DAG.getMachineFunction().getFunction().hasFnAttribute( Attribute::NoImplicitFloat)) return SDValue(); if (!Subtarget->hasNEON()) return SDValue(); // While there is no integer popcount instruction, it can // be more efficiently lowered to the following sequence that uses // AdvSIMD registers/instructions as long as the copies to/from // the AdvSIMD registers are cheap. // FMOV D0, X0 // copy 64-bit int to vector, high bits zero'd // CNT V0.8B, V0.8B // 8xbyte pop-counts // ADDV B0, V0.8B // sum 8xbyte pop-counts // UMOV X0, V0.B[0] // copy byte result back to integer reg SDValue Val = Op.getOperand(0); SDLoc DL(Op); EVT VT = Op.getValueType(); if (VT == MVT::i32 || VT == MVT::i64) { if (VT == MVT::i32) Val = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Val); Val = DAG.getNode(ISD::BITCAST, DL, MVT::v8i8, Val); SDValue CtPop = DAG.getNode(ISD::CTPOP, DL, MVT::v8i8, Val); SDValue UaddLV = DAG.getNode( ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32, DAG.getConstant(Intrinsic::aarch64_neon_uaddlv, DL, MVT::i32), CtPop); if (VT == MVT::i64) UaddLV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, UaddLV); return UaddLV; } else if (VT == MVT::i128) { Val = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Val); SDValue CtPop = DAG.getNode(ISD::CTPOP, DL, MVT::v16i8, Val); SDValue UaddLV = DAG.getNode( ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32, DAG.getConstant(Intrinsic::aarch64_neon_uaddlv, DL, MVT::i32), CtPop); return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i128, UaddLV); } if (VT.isScalableVector() || useSVEForFixedLengthVectorVT(VT)) return LowerToPredicatedOp(Op, DAG, AArch64ISD::CTPOP_MERGE_PASSTHRU); assert((VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 || VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) && "Unexpected type for custom ctpop lowering"); EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8; Val = DAG.getBitcast(VT8Bit, Val); Val = DAG.getNode(ISD::CTPOP, DL, VT8Bit, Val); // Widen v8i8/v16i8 CTPOP result to VT by repeatedly widening pairwise adds. unsigned EltSize = 8; unsigned NumElts = VT.is64BitVector() ? 8 : 16; while (EltSize != VT.getScalarSizeInBits()) { EltSize *= 2; NumElts /= 2; MVT WidenVT = MVT::getVectorVT(MVT::getIntegerVT(EltSize), NumElts); Val = DAG.getNode( ISD::INTRINSIC_WO_CHAIN, DL, WidenVT, DAG.getConstant(Intrinsic::aarch64_neon_uaddlp, DL, MVT::i32), Val); } return Val; } SDValue AArch64TargetLowering::LowerCTTZ(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); assert(VT.isScalableVector() || useSVEForFixedLengthVectorVT(VT, /*OverrideNEON=*/true)); SDLoc DL(Op); SDValue RBIT = DAG.getNode(ISD::BITREVERSE, DL, VT, Op.getOperand(0)); return DAG.getNode(ISD::CTLZ, DL, VT, RBIT); } SDValue AArch64TargetLowering::LowerBitreverse(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); if (VT.isScalableVector() || useSVEForFixedLengthVectorVT(VT, /*OverrideNEON=*/true)) return LowerToPredicatedOp(Op, DAG, AArch64ISD::BITREVERSE_MERGE_PASSTHRU, true); SDLoc DL(Op); SDValue REVB; MVT VST; switch (VT.getSimpleVT().SimpleTy) { default: llvm_unreachable("Invalid type for bitreverse!"); case MVT::v2i32: { VST = MVT::v8i8; REVB = DAG.getNode(AArch64ISD::REV32, DL, VST, Op.getOperand(0)); break; } case MVT::v4i32: { VST = MVT::v16i8; REVB = DAG.getNode(AArch64ISD::REV32, DL, VST, Op.getOperand(0)); break; } case MVT::v1i64: { VST = MVT::v8i8; REVB = DAG.getNode(AArch64ISD::REV64, DL, VST, Op.getOperand(0)); break; } case MVT::v2i64: { VST = MVT::v16i8; REVB = DAG.getNode(AArch64ISD::REV64, DL, VST, Op.getOperand(0)); break; } } return DAG.getNode(AArch64ISD::NVCAST, DL, VT, DAG.getNode(ISD::BITREVERSE, DL, VST, REVB)); } SDValue AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { if (Op.getValueType().isVector()) return LowerVSETCC(Op, DAG); bool IsStrict = Op->isStrictFPOpcode(); bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS; unsigned OpNo = IsStrict ? 1 : 0; SDValue Chain; if (IsStrict) Chain = Op.getOperand(0); SDValue LHS = Op.getOperand(OpNo + 0); SDValue RHS = Op.getOperand(OpNo + 1); ISD::CondCode CC = cast(Op.getOperand(OpNo + 2))->get(); SDLoc dl(Op); // We chose ZeroOrOneBooleanContents, so use zero and one. EVT VT = Op.getValueType(); SDValue TVal = DAG.getConstant(1, dl, VT); SDValue FVal = DAG.getConstant(0, dl, VT); // Handle f128 first, since one possible outcome is a normal integer // comparison which gets picked up by the next if statement. if (LHS.getValueType() == MVT::f128) { softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl, LHS, RHS, Chain, IsSignaling); // If softenSetCCOperands returned a scalar, use it. if (!RHS.getNode()) { assert(LHS.getValueType() == Op.getValueType() && "Unexpected setcc expansion!"); return IsStrict ? DAG.getMergeValues({LHS, Chain}, dl) : LHS; } } if (LHS.getValueType().isInteger()) { SDValue CCVal; SDValue Cmp = getAArch64Cmp( LHS, RHS, ISD::getSetCCInverse(CC, LHS.getValueType()), CCVal, DAG, dl); // Note that we inverted the condition above, so we reverse the order of // the true and false operands here. This will allow the setcc to be // matched to a single CSINC instruction. SDValue Res = DAG.getNode(AArch64ISD::CSEL, dl, VT, FVal, TVal, CCVal, Cmp); return IsStrict ? DAG.getMergeValues({Res, Chain}, dl) : Res; } // Now we know we're dealing with FP values. assert(LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64); // If that fails, we'll need to perform an FCMP + CSEL sequence. Go ahead // and do the comparison. SDValue Cmp; if (IsStrict) Cmp = emitStrictFPComparison(LHS, RHS, dl, DAG, Chain, IsSignaling); else Cmp = emitComparison(LHS, RHS, CC, dl, DAG); AArch64CC::CondCode CC1, CC2; changeFPCCToAArch64CC(CC, CC1, CC2); SDValue Res; if (CC2 == AArch64CC::AL) { changeFPCCToAArch64CC(ISD::getSetCCInverse(CC, LHS.getValueType()), CC1, CC2); SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32); // Note that we inverted the condition above, so we reverse the order of // the true and false operands here. This will allow the setcc to be // matched to a single CSINC instruction. Res = DAG.getNode(AArch64ISD::CSEL, dl, VT, FVal, TVal, CC1Val, Cmp); } else { // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't // totally clean. Some of them require two CSELs to implement. As is in // this case, we emit the first CSEL and then emit a second using the output // of the first as the RHS. We're effectively OR'ing the two CC's together. // FIXME: It would be nice if we could match the two CSELs to two CSINCs. SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32); SDValue CS1 = DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, FVal, CC1Val, Cmp); SDValue CC2Val = DAG.getConstant(CC2, dl, MVT::i32); Res = DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, CS1, CC2Val, Cmp); } return IsStrict ? DAG.getMergeValues({Res, Cmp.getValue(1)}, dl) : Res; } SDValue AArch64TargetLowering::LowerSELECT_CC(ISD::CondCode CC, SDValue LHS, SDValue RHS, SDValue TVal, SDValue FVal, const SDLoc &dl, SelectionDAG &DAG) const { // Handle f128 first, because it will result in a comparison of some RTLIB // call result against zero. if (LHS.getValueType() == MVT::f128) { softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl, LHS, RHS); // If softenSetCCOperands returned a scalar, we need to compare the result // against zero to select between true and false values. if (!RHS.getNode()) { RHS = DAG.getConstant(0, dl, LHS.getValueType()); CC = ISD::SETNE; } } // Also handle f16, for which we need to do a f32 comparison. if (LHS.getValueType() == MVT::f16 && !Subtarget->hasFullFP16()) { LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, LHS); RHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, RHS); } // Next, handle integers. if (LHS.getValueType().isInteger()) { assert((LHS.getValueType() == RHS.getValueType()) && (LHS.getValueType() == MVT::i32 || LHS.getValueType() == MVT::i64)); ConstantSDNode *CFVal = dyn_cast(FVal); ConstantSDNode *CTVal = dyn_cast(TVal); ConstantSDNode *RHSC = dyn_cast(RHS); // Check for sign pattern (SELECT_CC setgt, iN lhs, -1, 1, -1) and transform // into (OR (ASR lhs, N-1), 1), which requires less instructions for the // supported types. if (CC == ISD::SETGT && RHSC && RHSC->isAllOnesValue() && CTVal && CFVal && CTVal->isOne() && CFVal->isAllOnesValue() && LHS.getValueType() == TVal.getValueType()) { EVT VT = LHS.getValueType(); SDValue Shift = DAG.getNode(ISD::SRA, dl, VT, LHS, DAG.getConstant(VT.getSizeInBits() - 1, dl, VT)); return DAG.getNode(ISD::OR, dl, VT, Shift, DAG.getConstant(1, dl, VT)); } unsigned Opcode = AArch64ISD::CSEL; // If both the TVal and the FVal are constants, see if we can swap them in // order to for a CSINV or CSINC out of them. if (CTVal && CFVal && CTVal->isAllOnesValue() && CFVal->isNullValue()) { std::swap(TVal, FVal); std::swap(CTVal, CFVal); CC = ISD::getSetCCInverse(CC, LHS.getValueType()); } else if (CTVal && CFVal && CTVal->isOne() && CFVal->isNullValue()) { std::swap(TVal, FVal); std::swap(CTVal, CFVal); CC = ISD::getSetCCInverse(CC, LHS.getValueType()); } else if (TVal.getOpcode() == ISD::XOR) { // If TVal is a NOT we want to swap TVal and FVal so that we can match // with a CSINV rather than a CSEL. if (isAllOnesConstant(TVal.getOperand(1))) { std::swap(TVal, FVal); std::swap(CTVal, CFVal); CC = ISD::getSetCCInverse(CC, LHS.getValueType()); } } else if (TVal.getOpcode() == ISD::SUB) { // If TVal is a negation (SUB from 0) we want to swap TVal and FVal so // that we can match with a CSNEG rather than a CSEL. if (isNullConstant(TVal.getOperand(0))) { std::swap(TVal, FVal); std::swap(CTVal, CFVal); CC = ISD::getSetCCInverse(CC, LHS.getValueType()); } } else if (CTVal && CFVal) { const int64_t TrueVal = CTVal->getSExtValue(); const int64_t FalseVal = CFVal->getSExtValue(); bool Swap = false; // If both TVal and FVal are constants, see if FVal is the // inverse/negation/increment of TVal and generate a CSINV/CSNEG/CSINC // instead of a CSEL in that case. if (TrueVal == ~FalseVal) { Opcode = AArch64ISD::CSINV; } else if (FalseVal > std::numeric_limits::min() && TrueVal == -FalseVal) { Opcode = AArch64ISD::CSNEG; } else if (TVal.getValueType() == MVT::i32) { // If our operands are only 32-bit wide, make sure we use 32-bit // arithmetic for the check whether we can use CSINC. This ensures that // the addition in the check will wrap around properly in case there is // an overflow (which would not be the case if we do the check with // 64-bit arithmetic). const uint32_t TrueVal32 = CTVal->getZExtValue(); const uint32_t FalseVal32 = CFVal->getZExtValue(); if ((TrueVal32 == FalseVal32 + 1) || (TrueVal32 + 1 == FalseVal32)) { Opcode = AArch64ISD::CSINC; if (TrueVal32 > FalseVal32) { Swap = true; } } // 64-bit check whether we can use CSINC. } else if ((TrueVal == FalseVal + 1) || (TrueVal + 1 == FalseVal)) { Opcode = AArch64ISD::CSINC; if (TrueVal > FalseVal) { Swap = true; } } // Swap TVal and FVal if necessary. if (Swap) { std::swap(TVal, FVal); std::swap(CTVal, CFVal); CC = ISD::getSetCCInverse(CC, LHS.getValueType()); } if (Opcode != AArch64ISD::CSEL) { // Drop FVal since we can get its value by simply inverting/negating // TVal. FVal = TVal; } } // Avoid materializing a constant when possible by reusing a known value in // a register. However, don't perform this optimization if the known value // is one, zero or negative one in the case of a CSEL. We can always // materialize these values using CSINC, CSEL and CSINV with wzr/xzr as the // FVal, respectively. ConstantSDNode *RHSVal = dyn_cast(RHS); if (Opcode == AArch64ISD::CSEL && RHSVal && !RHSVal->isOne() && !RHSVal->isNullValue() && !RHSVal->isAllOnesValue()) { AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC); // Transform "a == C ? C : x" to "a == C ? a : x" and "a != C ? x : C" to // "a != C ? x : a" to avoid materializing C. if (CTVal && CTVal == RHSVal && AArch64CC == AArch64CC::EQ) TVal = LHS; else if (CFVal && CFVal == RHSVal && AArch64CC == AArch64CC::NE) FVal = LHS; } else if (Opcode == AArch64ISD::CSNEG && RHSVal && RHSVal->isOne()) { assert (CTVal && CFVal && "Expected constant operands for CSNEG."); // Use a CSINV to transform "a == C ? 1 : -1" to "a == C ? a : -1" to // avoid materializing C. AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC); if (CTVal == RHSVal && AArch64CC == AArch64CC::EQ) { Opcode = AArch64ISD::CSINV; TVal = LHS; FVal = DAG.getConstant(0, dl, FVal.getValueType()); } } SDValue CCVal; SDValue Cmp = getAArch64Cmp(LHS, RHS, CC, CCVal, DAG, dl); EVT VT = TVal.getValueType(); return DAG.getNode(Opcode, dl, VT, TVal, FVal, CCVal, Cmp); } // Now we know we're dealing with FP values. assert(LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64); assert(LHS.getValueType() == RHS.getValueType()); EVT VT = TVal.getValueType(); SDValue Cmp = emitComparison(LHS, RHS, CC, dl, DAG); // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't totally // clean. Some of them require two CSELs to implement. AArch64CC::CondCode CC1, CC2; changeFPCCToAArch64CC(CC, CC1, CC2); if (DAG.getTarget().Options.UnsafeFPMath) { // Transform "a == 0.0 ? 0.0 : x" to "a == 0.0 ? a : x" and // "a != 0.0 ? x : 0.0" to "a != 0.0 ? x : a" to avoid materializing 0.0. ConstantFPSDNode *RHSVal = dyn_cast(RHS); if (RHSVal && RHSVal->isZero()) { ConstantFPSDNode *CFVal = dyn_cast(FVal); ConstantFPSDNode *CTVal = dyn_cast(TVal); if ((CC == ISD::SETEQ || CC == ISD::SETOEQ || CC == ISD::SETUEQ) && CTVal && CTVal->isZero() && TVal.getValueType() == LHS.getValueType()) TVal = LHS; else if ((CC == ISD::SETNE || CC == ISD::SETONE || CC == ISD::SETUNE) && CFVal && CFVal->isZero() && FVal.getValueType() == LHS.getValueType()) FVal = LHS; } } // Emit first, and possibly only, CSEL. SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32); SDValue CS1 = DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, FVal, CC1Val, Cmp); // If we need a second CSEL, emit it, using the output of the first as the // RHS. We're effectively OR'ing the two CC's together. if (CC2 != AArch64CC::AL) { SDValue CC2Val = DAG.getConstant(CC2, dl, MVT::i32); return DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, CS1, CC2Val, Cmp); } // Otherwise, return the output of the first CSEL. return CS1; } SDValue AArch64TargetLowering::LowerVECTOR_SPLICE(SDValue Op, SelectionDAG &DAG) const { EVT Ty = Op.getValueType(); auto Idx = Op.getConstantOperandAPInt(2); if (Idx.sge(-1) && Idx.slt(Ty.getVectorMinNumElements())) return Op; return SDValue(); } SDValue AArch64TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { ISD::CondCode CC = cast(Op.getOperand(4))->get(); SDValue LHS = Op.getOperand(0); SDValue RHS = Op.getOperand(1); SDValue TVal = Op.getOperand(2); SDValue FVal = Op.getOperand(3); SDLoc DL(Op); return LowerSELECT_CC(CC, LHS, RHS, TVal, FVal, DL, DAG); } SDValue AArch64TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { SDValue CCVal = Op->getOperand(0); SDValue TVal = Op->getOperand(1); SDValue FVal = Op->getOperand(2); SDLoc DL(Op); EVT Ty = Op.getValueType(); if (Ty.isScalableVector()) { SDValue TruncCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, CCVal); MVT PredVT = MVT::getVectorVT(MVT::i1, Ty.getVectorElementCount()); SDValue SplatPred = DAG.getNode(ISD::SPLAT_VECTOR, DL, PredVT, TruncCC); return DAG.getNode(ISD::VSELECT, DL, Ty, SplatPred, TVal, FVal); } if (useSVEForFixedLengthVectorVT(Ty)) { // FIXME: Ideally this would be the same as above using i1 types, however // for the moment we can't deal with fixed i1 vector types properly, so // instead extend the predicate to a result type sized integer vector. MVT SplatValVT = MVT::getIntegerVT(Ty.getScalarSizeInBits()); MVT PredVT = MVT::getVectorVT(SplatValVT, Ty.getVectorElementCount()); SDValue SplatVal = DAG.getSExtOrTrunc(CCVal, DL, SplatValVT); SDValue SplatPred = DAG.getNode(ISD::SPLAT_VECTOR, DL, PredVT, SplatVal); return DAG.getNode(ISD::VSELECT, DL, Ty, SplatPred, TVal, FVal); } // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a select // instruction. if (ISD::isOverflowIntrOpRes(CCVal)) { // Only lower legal XALUO ops. if (!DAG.getTargetLoweringInfo().isTypeLegal(CCVal->getValueType(0))) return SDValue(); AArch64CC::CondCode OFCC; SDValue Value, Overflow; std::tie(Value, Overflow) = getAArch64XALUOOp(OFCC, CCVal.getValue(0), DAG); SDValue CCVal = DAG.getConstant(OFCC, DL, MVT::i32); return DAG.getNode(AArch64ISD::CSEL, DL, Op.getValueType(), TVal, FVal, CCVal, Overflow); } // Lower it the same way as we would lower a SELECT_CC node. ISD::CondCode CC; SDValue LHS, RHS; if (CCVal.getOpcode() == ISD::SETCC) { LHS = CCVal.getOperand(0); RHS = CCVal.getOperand(1); CC = cast(CCVal.getOperand(2))->get(); } else { LHS = CCVal; RHS = DAG.getConstant(0, DL, CCVal.getValueType()); CC = ISD::SETNE; } return LowerSELECT_CC(CC, LHS, RHS, TVal, FVal, DL, DAG); } SDValue AArch64TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { // Jump table entries as PC relative offsets. No additional tweaking // is necessary here. Just get the address of the jump table. JumpTableSDNode *JT = cast(Op); if (getTargetMachine().getCodeModel() == CodeModel::Large && !Subtarget->isTargetMachO()) { return getAddrLarge(JT, DAG); } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) { return getAddrTiny(JT, DAG); } return getAddr(JT, DAG); } SDValue AArch64TargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { // Jump table entries as PC relative offsets. No additional tweaking // is necessary here. Just get the address of the jump table. SDLoc DL(Op); SDValue JT = Op.getOperand(1); SDValue Entry = Op.getOperand(2); int JTI = cast(JT.getNode())->getIndex(); auto *AFI = DAG.getMachineFunction().getInfo(); AFI->setJumpTableEntryInfo(JTI, 4, nullptr); SDNode *Dest = DAG.getMachineNode(AArch64::JumpTableDest32, DL, MVT::i64, MVT::i64, JT, Entry, DAG.getTargetJumpTable(JTI, MVT::i32)); return DAG.getNode(ISD::BRIND, DL, MVT::Other, Op.getOperand(0), SDValue(Dest, 0)); } SDValue AArch64TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const { ConstantPoolSDNode *CP = cast(Op); if (getTargetMachine().getCodeModel() == CodeModel::Large) { // Use the GOT for the large code model on iOS. if (Subtarget->isTargetMachO()) { return getGOT(CP, DAG); } return getAddrLarge(CP, DAG); } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) { return getAddrTiny(CP, DAG); } else { return getAddr(CP, DAG); } } SDValue AArch64TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { BlockAddressSDNode *BA = cast(Op); if (getTargetMachine().getCodeModel() == CodeModel::Large && !Subtarget->isTargetMachO()) { return getAddrLarge(BA, DAG); } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) { return getAddrTiny(BA, DAG); } return getAddr(BA, DAG); } SDValue AArch64TargetLowering::LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const { AArch64FunctionInfo *FuncInfo = DAG.getMachineFunction().getInfo(); SDLoc DL(Op); SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsStackIndex(), getPointerTy(DAG.getDataLayout())); FR = DAG.getZExtOrTrunc(FR, DL, getPointerMemTy(DAG.getDataLayout())); const Value *SV = cast(Op.getOperand(2))->getValue(); return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1), MachinePointerInfo(SV)); } SDValue AArch64TargetLowering::LowerWin64_VASTART(SDValue Op, SelectionDAG &DAG) const { AArch64FunctionInfo *FuncInfo = DAG.getMachineFunction().getInfo(); SDLoc DL(Op); SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsGPRSize() > 0 ? FuncInfo->getVarArgsGPRIndex() : FuncInfo->getVarArgsStackIndex(), getPointerTy(DAG.getDataLayout())); const Value *SV = cast(Op.getOperand(2))->getValue(); return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1), MachinePointerInfo(SV)); } SDValue AArch64TargetLowering::LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const { // The layout of the va_list struct is specified in the AArch64 Procedure Call // Standard, section B.3. MachineFunction &MF = DAG.getMachineFunction(); AArch64FunctionInfo *FuncInfo = MF.getInfo(); unsigned PtrSize = Subtarget->isTargetILP32() ? 4 : 8; auto PtrMemVT = getPointerMemTy(DAG.getDataLayout()); auto PtrVT = getPointerTy(DAG.getDataLayout()); SDLoc DL(Op); SDValue Chain = Op.getOperand(0); SDValue VAList = Op.getOperand(1); const Value *SV = cast(Op.getOperand(2))->getValue(); SmallVector MemOps; // void *__stack at offset 0 unsigned Offset = 0; SDValue Stack = DAG.getFrameIndex(FuncInfo->getVarArgsStackIndex(), PtrVT); Stack = DAG.getZExtOrTrunc(Stack, DL, PtrMemVT); MemOps.push_back(DAG.getStore(Chain, DL, Stack, VAList, MachinePointerInfo(SV), Align(PtrSize))); // void *__gr_top at offset 8 (4 on ILP32) Offset += PtrSize; int GPRSize = FuncInfo->getVarArgsGPRSize(); if (GPRSize > 0) { SDValue GRTop, GRTopAddr; GRTopAddr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList, DAG.getConstant(Offset, DL, PtrVT)); GRTop = DAG.getFrameIndex(FuncInfo->getVarArgsGPRIndex(), PtrVT); GRTop = DAG.getNode(ISD::ADD, DL, PtrVT, GRTop, DAG.getConstant(GPRSize, DL, PtrVT)); GRTop = DAG.getZExtOrTrunc(GRTop, DL, PtrMemVT); MemOps.push_back(DAG.getStore(Chain, DL, GRTop, GRTopAddr, MachinePointerInfo(SV, Offset), Align(PtrSize))); } // void *__vr_top at offset 16 (8 on ILP32) Offset += PtrSize; int FPRSize = FuncInfo->getVarArgsFPRSize(); if (FPRSize > 0) { SDValue VRTop, VRTopAddr; VRTopAddr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList, DAG.getConstant(Offset, DL, PtrVT)); VRTop = DAG.getFrameIndex(FuncInfo->getVarArgsFPRIndex(), PtrVT); VRTop = DAG.getNode(ISD::ADD, DL, PtrVT, VRTop, DAG.getConstant(FPRSize, DL, PtrVT)); VRTop = DAG.getZExtOrTrunc(VRTop, DL, PtrMemVT); MemOps.push_back(DAG.getStore(Chain, DL, VRTop, VRTopAddr, MachinePointerInfo(SV, Offset), Align(PtrSize))); } // int __gr_offs at offset 24 (12 on ILP32) Offset += PtrSize; SDValue GROffsAddr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList, DAG.getConstant(Offset, DL, PtrVT)); MemOps.push_back( DAG.getStore(Chain, DL, DAG.getConstant(-GPRSize, DL, MVT::i32), GROffsAddr, MachinePointerInfo(SV, Offset), Align(4))); // int __vr_offs at offset 28 (16 on ILP32) Offset += 4; SDValue VROffsAddr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList, DAG.getConstant(Offset, DL, PtrVT)); MemOps.push_back( DAG.getStore(Chain, DL, DAG.getConstant(-FPRSize, DL, MVT::i32), VROffsAddr, MachinePointerInfo(SV, Offset), Align(4))); return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps); } SDValue AArch64TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { MachineFunction &MF = DAG.getMachineFunction(); if (Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv())) return LowerWin64_VASTART(Op, DAG); else if (Subtarget->isTargetDarwin()) return LowerDarwin_VASTART(Op, DAG); else return LowerAAPCS_VASTART(Op, DAG); } SDValue AArch64TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { // AAPCS has three pointers and two ints (= 32 bytes), Darwin has single // pointer. SDLoc DL(Op); unsigned PtrSize = Subtarget->isTargetILP32() ? 4 : 8; unsigned VaListSize = (Subtarget->isTargetDarwin() || Subtarget->isTargetWindows()) ? PtrSize : Subtarget->isTargetILP32() ? 20 : 32; const Value *DestSV = cast(Op.getOperand(3))->getValue(); const Value *SrcSV = cast(Op.getOperand(4))->getValue(); return DAG.getMemcpy(Op.getOperand(0), DL, Op.getOperand(1), Op.getOperand(2), DAG.getConstant(VaListSize, DL, MVT::i32), Align(PtrSize), false, false, false, MachinePointerInfo(DestSV), MachinePointerInfo(SrcSV)); } SDValue AArch64TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { assert(Subtarget->isTargetDarwin() && "automatic va_arg instruction only works on Darwin"); const Value *V = cast(Op.getOperand(2))->getValue(); EVT VT = Op.getValueType(); SDLoc DL(Op); SDValue Chain = Op.getOperand(0); SDValue Addr = Op.getOperand(1); MaybeAlign Align(Op.getConstantOperandVal(3)); unsigned MinSlotSize = Subtarget->isTargetILP32() ? 4 : 8; auto PtrVT = getPointerTy(DAG.getDataLayout()); auto PtrMemVT = getPointerMemTy(DAG.getDataLayout()); SDValue VAList = DAG.getLoad(PtrMemVT, DL, Chain, Addr, MachinePointerInfo(V)); Chain = VAList.getValue(1); VAList = DAG.getZExtOrTrunc(VAList, DL, PtrVT); if (VT.isScalableVector()) report_fatal_error("Passing SVE types to variadic functions is " "currently not supported"); if (Align && *Align > MinSlotSize) { VAList = DAG.getNode(ISD::ADD, DL, PtrVT, VAList, DAG.getConstant(Align->value() - 1, DL, PtrVT)); VAList = DAG.getNode(ISD::AND, DL, PtrVT, VAList, DAG.getConstant(-(int64_t)Align->value(), DL, PtrVT)); } Type *ArgTy = VT.getTypeForEVT(*DAG.getContext()); unsigned ArgSize = DAG.getDataLayout().getTypeAllocSize(ArgTy); // Scalar integer and FP values smaller than 64 bits are implicitly extended // up to 64 bits. At the very least, we have to increase the striding of the // vaargs list to match this, and for FP values we need to introduce // FP_ROUND nodes as well. if (VT.isInteger() && !VT.isVector()) ArgSize = std::max(ArgSize, MinSlotSize); bool NeedFPTrunc = false; if (VT.isFloatingPoint() && !VT.isVector() && VT != MVT::f64) { ArgSize = 8; NeedFPTrunc = true; } // Increment the pointer, VAList, to the next vaarg SDValue VANext = DAG.getNode(ISD::ADD, DL, PtrVT, VAList, DAG.getConstant(ArgSize, DL, PtrVT)); VANext = DAG.getZExtOrTrunc(VANext, DL, PtrMemVT); // Store the incremented VAList to the legalized pointer SDValue APStore = DAG.getStore(Chain, DL, VANext, Addr, MachinePointerInfo(V)); // Load the actual argument out of the pointer VAList if (NeedFPTrunc) { // Load the value as an f64. SDValue WideFP = DAG.getLoad(MVT::f64, DL, APStore, VAList, MachinePointerInfo()); // Round the value down to an f32. SDValue NarrowFP = DAG.getNode(ISD::FP_ROUND, DL, VT, WideFP.getValue(0), DAG.getIntPtrConstant(1, DL)); SDValue Ops[] = { NarrowFP, WideFP.getValue(1) }; // Merge the rounded value with the chain output of the load. return DAG.getMergeValues(Ops, DL); } return DAG.getLoad(VT, DL, APStore, VAList, MachinePointerInfo()); } SDValue AArch64TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); MFI.setFrameAddressIsTaken(true); EVT VT = Op.getValueType(); SDLoc DL(Op); unsigned Depth = cast(Op.getOperand(0))->getZExtValue(); SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, AArch64::FP, MVT::i64); while (Depth--) FrameAddr = DAG.getLoad(VT, DL, DAG.getEntryNode(), FrameAddr, MachinePointerInfo()); if (Subtarget->isTargetILP32()) FrameAddr = DAG.getNode(ISD::AssertZext, DL, MVT::i64, FrameAddr, DAG.getValueType(VT)); return FrameAddr; } SDValue AArch64TargetLowering::LowerSPONENTRY(SDValue Op, SelectionDAG &DAG) const { MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); EVT VT = getPointerTy(DAG.getDataLayout()); SDLoc DL(Op); int FI = MFI.CreateFixedObject(4, 0, false); return DAG.getFrameIndex(FI, VT); } #define GET_REGISTER_MATCHER #include "AArch64GenAsmMatcher.inc" // FIXME? Maybe this could be a TableGen attribute on some registers and // this table could be generated automatically from RegInfo. Register AArch64TargetLowering:: getRegisterByName(const char* RegName, LLT VT, const MachineFunction &MF) const { Register Reg = MatchRegisterName(RegName); if (AArch64::X1 <= Reg && Reg <= AArch64::X28) { const MCRegisterInfo *MRI = Subtarget->getRegisterInfo(); unsigned DwarfRegNum = MRI->getDwarfRegNum(Reg, false); if (!Subtarget->isXRegisterReserved(DwarfRegNum)) Reg = 0; } if (Reg) return Reg; report_fatal_error(Twine("Invalid register name \"" + StringRef(RegName) + "\".")); } SDValue AArch64TargetLowering::LowerADDROFRETURNADDR(SDValue Op, SelectionDAG &DAG) const { DAG.getMachineFunction().getFrameInfo().setFrameAddressIsTaken(true); EVT VT = Op.getValueType(); SDLoc DL(Op); SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, AArch64::FP, VT); SDValue Offset = DAG.getConstant(8, DL, getPointerTy(DAG.getDataLayout())); return DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset); } SDValue AArch64TargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const { MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo &MFI = MF.getFrameInfo(); MFI.setReturnAddressIsTaken(true); EVT VT = Op.getValueType(); SDLoc DL(Op); unsigned Depth = cast(Op.getOperand(0))->getZExtValue(); SDValue ReturnAddress; if (Depth) { SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); SDValue Offset = DAG.getConstant(8, DL, getPointerTy(DAG.getDataLayout())); ReturnAddress = DAG.getLoad( VT, DL, DAG.getEntryNode(), DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset), MachinePointerInfo()); } else { // Return LR, which contains the return address. Mark it an implicit // live-in. unsigned Reg = MF.addLiveIn(AArch64::LR, &AArch64::GPR64RegClass); ReturnAddress = DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT); } // The XPACLRI instruction assembles to a hint-space instruction before // Armv8.3-A therefore this instruction can be safely used for any pre // Armv8.3-A architectures. On Armv8.3-A and onwards XPACI is available so use // that instead. SDNode *St; if (Subtarget->hasPAuth()) { St = DAG.getMachineNode(AArch64::XPACI, DL, VT, ReturnAddress); } else { // XPACLRI operates on LR therefore we must move the operand accordingly. SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, AArch64::LR, ReturnAddress); St = DAG.getMachineNode(AArch64::XPACLRI, DL, VT, Chain); } return SDValue(St, 0); } /// LowerShiftParts - Lower SHL_PARTS/SRA_PARTS/SRL_PARTS, which returns two /// i32 values and take a 2 x i32 value to shift plus a shift amount. SDValue AArch64TargetLowering::LowerShiftParts(SDValue Op, SelectionDAG &DAG) const { SDValue Lo, Hi; expandShiftParts(Op.getNode(), Lo, Hi, DAG); return DAG.getMergeValues({Lo, Hi}, SDLoc(Op)); } bool AArch64TargetLowering::isOffsetFoldingLegal( const GlobalAddressSDNode *GA) const { // Offsets are folded in the DAG combine rather than here so that we can // intelligently choose an offset based on the uses. return false; } bool AArch64TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, bool OptForSize) const { bool IsLegal = false; // We can materialize #0.0 as fmov $Rd, XZR for 64-bit, 32-bit cases, and // 16-bit case when target has full fp16 support. // FIXME: We should be able to handle f128 as well with a clever lowering. const APInt ImmInt = Imm.bitcastToAPInt(); if (VT == MVT::f64) IsLegal = AArch64_AM::getFP64Imm(ImmInt) != -1 || Imm.isPosZero(); else if (VT == MVT::f32) IsLegal = AArch64_AM::getFP32Imm(ImmInt) != -1 || Imm.isPosZero(); else if (VT == MVT::f16 && Subtarget->hasFullFP16()) IsLegal = AArch64_AM::getFP16Imm(ImmInt) != -1 || Imm.isPosZero(); // TODO: fmov h0, w0 is also legal, however on't have an isel pattern to // generate that fmov. // If we can not materialize in immediate field for fmov, check if the // value can be encoded as the immediate operand of a logical instruction. // The immediate value will be created with either MOVZ, MOVN, or ORR. if (!IsLegal && (VT == MVT::f64 || VT == MVT::f32)) { // The cost is actually exactly the same for mov+fmov vs. adrp+ldr; // however the mov+fmov sequence is always better because of the reduced // cache pressure. The timings are still the same if you consider // movw+movk+fmov vs. adrp+ldr (it's one instruction longer, but the // movw+movk is fused). So we limit up to 2 instrdduction at most. SmallVector Insn; AArch64_IMM::expandMOVImm(ImmInt.getZExtValue(), VT.getSizeInBits(), Insn); unsigned Limit = (OptForSize ? 1 : (Subtarget->hasFuseLiterals() ? 5 : 2)); IsLegal = Insn.size() <= Limit; } LLVM_DEBUG(dbgs() << (IsLegal ? "Legal " : "Illegal ") << VT.getEVTString() << " imm value: "; Imm.dump();); return IsLegal; } //===----------------------------------------------------------------------===// // AArch64 Optimization Hooks //===----------------------------------------------------------------------===// static SDValue getEstimate(const AArch64Subtarget *ST, unsigned Opcode, SDValue Operand, SelectionDAG &DAG, int &ExtraSteps) { EVT VT = Operand.getValueType(); if (ST->hasNEON() && (VT == MVT::f64 || VT == MVT::v1f64 || VT == MVT::v2f64 || VT == MVT::f32 || VT == MVT::v1f32 || VT == MVT::v2f32 || VT == MVT::v4f32)) { if (ExtraSteps == TargetLoweringBase::ReciprocalEstimate::Unspecified) // For the reciprocal estimates, convergence is quadratic, so the number // of digits is doubled after each iteration. In ARMv8, the accuracy of // the initial estimate is 2^-8. Thus the number of extra steps to refine // the result for float (23 mantissa bits) is 2 and for double (52 // mantissa bits) is 3. ExtraSteps = VT.getScalarType() == MVT::f64 ? 3 : 2; return DAG.getNode(Opcode, SDLoc(Operand), VT, Operand); } return SDValue(); } SDValue AArch64TargetLowering::getSqrtInputTest(SDValue Op, SelectionDAG &DAG, const DenormalMode &Mode) const { SDLoc DL(Op); EVT VT = Op.getValueType(); EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); SDValue FPZero = DAG.getConstantFP(0.0, DL, VT); return DAG.getSetCC(DL, CCVT, Op, FPZero, ISD::SETEQ); } SDValue AArch64TargetLowering::getSqrtResultForDenormInput(SDValue Op, SelectionDAG &DAG) const { return Op; } SDValue AArch64TargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &ExtraSteps, bool &UseOneConst, bool Reciprocal) const { if (Enabled == ReciprocalEstimate::Enabled || (Enabled == ReciprocalEstimate::Unspecified && Subtarget->useRSqrt())) if (SDValue Estimate = getEstimate(Subtarget, AArch64ISD::FRSQRTE, Operand, DAG, ExtraSteps)) { SDLoc DL(Operand); EVT VT = Operand.getValueType(); SDNodeFlags Flags; Flags.setAllowReassociation(true); // Newton reciprocal square root iteration: E * 0.5 * (3 - X * E^2) // AArch64 reciprocal square root iteration instruction: 0.5 * (3 - M * N) for (int i = ExtraSteps; i > 0; --i) { SDValue Step = DAG.getNode(ISD::FMUL, DL, VT, Estimate, Estimate, Flags); Step = DAG.getNode(AArch64ISD::FRSQRTS, DL, VT, Operand, Step, Flags); Estimate = DAG.getNode(ISD::FMUL, DL, VT, Estimate, Step, Flags); } if (!Reciprocal) Estimate = DAG.getNode(ISD::FMUL, DL, VT, Operand, Estimate, Flags); ExtraSteps = 0; return Estimate; } return SDValue(); } SDValue AArch64TargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &ExtraSteps) const { if (Enabled == ReciprocalEstimate::Enabled) if (SDValue Estimate = getEstimate(Subtarget, AArch64ISD::FRECPE, Operand, DAG, ExtraSteps)) { SDLoc DL(Operand); EVT VT = Operand.getValueType(); SDNodeFlags Flags; Flags.setAllowReassociation(true); // Newton reciprocal iteration: E * (2 - X * E) // AArch64 reciprocal iteration instruction: (2 - M * N) for (int i = ExtraSteps; i > 0; --i) { SDValue Step = DAG.getNode(AArch64ISD::FRECPS, DL, VT, Operand, Estimate, Flags); Estimate = DAG.getNode(ISD::FMUL, DL, VT, Estimate, Step, Flags); } ExtraSteps = 0; return Estimate; } return SDValue(); } //===----------------------------------------------------------------------===// // AArch64 Inline Assembly Support //===----------------------------------------------------------------------===// // Table of Constraints // TODO: This is the current set of constraints supported by ARM for the // compiler, not all of them may make sense. // // r - A general register // w - An FP/SIMD register of some size in the range v0-v31 // x - An FP/SIMD register of some size in the range v0-v15 // I - Constant that can be used with an ADD instruction // J - Constant that can be used with a SUB instruction // K - Constant that can be used with a 32-bit logical instruction // L - Constant that can be used with a 64-bit logical instruction // M - Constant that can be used as a 32-bit MOV immediate // N - Constant that can be used as a 64-bit MOV immediate // Q - A memory reference with base register and no offset // S - A symbolic address // Y - Floating point constant zero // Z - Integer constant zero // // Note that general register operands will be output using their 64-bit x // register name, whatever the size of the variable, unless the asm operand // is prefixed by the %w modifier. Floating-point and SIMD register operands // will be output with the v prefix unless prefixed by the %b, %h, %s, %d or // %q modifier. const char *AArch64TargetLowering::LowerXConstraint(EVT ConstraintVT) const { // At this point, we have to lower this constraint to something else, so we // lower it to an "r" or "w". However, by doing this we will force the result // to be in register, while the X constraint is much more permissive. // // Although we are correct (we are free to emit anything, without // constraints), we might break use cases that would expect us to be more // efficient and emit something else. if (!Subtarget->hasFPARMv8()) return "r"; if (ConstraintVT.isFloatingPoint()) return "w"; if (ConstraintVT.isVector() && (ConstraintVT.getSizeInBits() == 64 || ConstraintVT.getSizeInBits() == 128)) return "w"; return "r"; } enum PredicateConstraint { Upl, Upa, Invalid }; static PredicateConstraint parsePredicateConstraint(StringRef Constraint) { PredicateConstraint P = PredicateConstraint::Invalid; if (Constraint == "Upa") P = PredicateConstraint::Upa; if (Constraint == "Upl") P = PredicateConstraint::Upl; return P; } /// getConstraintType - Given a constraint letter, return the type of /// constraint it is for this target. AArch64TargetLowering::ConstraintType AArch64TargetLowering::getConstraintType(StringRef Constraint) const { if (Constraint.size() == 1) { switch (Constraint[0]) { default: break; case 'x': case 'w': case 'y': return C_RegisterClass; // An address with a single base register. Due to the way we // currently handle addresses it is the same as 'r'. case 'Q': return C_Memory; case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'Y': case 'Z': return C_Immediate; case 'z': case 'S': // A symbolic address return C_Other; } } else if (parsePredicateConstraint(Constraint) != PredicateConstraint::Invalid) return C_RegisterClass; return TargetLowering::getConstraintType(Constraint); } /// Examine constraint type and operand type and determine a weight value. /// This object must already have been set up with the operand type /// and the current alternative constraint selected. TargetLowering::ConstraintWeight AArch64TargetLowering::getSingleConstraintMatchWeight( AsmOperandInfo &info, const char *constraint) const { ConstraintWeight weight = CW_Invalid; Value *CallOperandVal = info.CallOperandVal; // If we don't have a value, we can't do a match, // but allow it at the lowest weight. if (!CallOperandVal) return CW_Default; Type *type = CallOperandVal->getType(); // Look at the constraint type. switch (*constraint) { default: weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); break; case 'x': case 'w': case 'y': if (type->isFloatingPointTy() || type->isVectorTy()) weight = CW_Register; break; case 'z': weight = CW_Constant; break; case 'U': if (parsePredicateConstraint(constraint) != PredicateConstraint::Invalid) weight = CW_Register; break; } return weight; } std::pair AArch64TargetLowering::getRegForInlineAsmConstraint( const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { if (Constraint.size() == 1) { switch (Constraint[0]) { case 'r': if (VT.isScalableVector()) return std::make_pair(0U, nullptr); if (Subtarget->hasLS64() && VT.getSizeInBits() == 512) return std::make_pair(0U, &AArch64::GPR64x8ClassRegClass); if (VT.getFixedSizeInBits() == 64) return std::make_pair(0U, &AArch64::GPR64commonRegClass); return std::make_pair(0U, &AArch64::GPR32commonRegClass); case 'w': { if (!Subtarget->hasFPARMv8()) break; if (VT.isScalableVector()) { if (VT.getVectorElementType() != MVT::i1) return std::make_pair(0U, &AArch64::ZPRRegClass); return std::make_pair(0U, nullptr); } uint64_t VTSize = VT.getFixedSizeInBits(); if (VTSize == 16) return std::make_pair(0U, &AArch64::FPR16RegClass); if (VTSize == 32) return std::make_pair(0U, &AArch64::FPR32RegClass); if (VTSize == 64) return std::make_pair(0U, &AArch64::FPR64RegClass); if (VTSize == 128) return std::make_pair(0U, &AArch64::FPR128RegClass); break; } // The instructions that this constraint is designed for can // only take 128-bit registers so just use that regclass. case 'x': if (!Subtarget->hasFPARMv8()) break; if (VT.isScalableVector()) return std::make_pair(0U, &AArch64::ZPR_4bRegClass); if (VT.getSizeInBits() == 128) return std::make_pair(0U, &AArch64::FPR128_loRegClass); break; case 'y': if (!Subtarget->hasFPARMv8()) break; if (VT.isScalableVector()) return std::make_pair(0U, &AArch64::ZPR_3bRegClass); break; } } else { PredicateConstraint PC = parsePredicateConstraint(Constraint); if (PC != PredicateConstraint::Invalid) { if (!VT.isScalableVector() || VT.getVectorElementType() != MVT::i1) return std::make_pair(0U, nullptr); bool restricted = (PC == PredicateConstraint::Upl); return restricted ? std::make_pair(0U, &AArch64::PPR_3bRegClass) : std::make_pair(0U, &AArch64::PPRRegClass); } } if (StringRef("{cc}").equals_insensitive(Constraint)) return std::make_pair(unsigned(AArch64::NZCV), &AArch64::CCRRegClass); // Use the default implementation in TargetLowering to convert the register // constraint into a member of a register class. std::pair Res; Res = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); // Not found as a standard register? if (!Res.second) { unsigned Size = Constraint.size(); if ((Size == 4 || Size == 5) && Constraint[0] == '{' && tolower(Constraint[1]) == 'v' && Constraint[Size - 1] == '}') { int RegNo; bool Failed = Constraint.slice(2, Size - 1).getAsInteger(10, RegNo); if (!Failed && RegNo >= 0 && RegNo <= 31) { // v0 - v31 are aliases of q0 - q31 or d0 - d31 depending on size. // By default we'll emit v0-v31 for this unless there's a modifier where // we'll emit the correct register as well. if (VT != MVT::Other && VT.getSizeInBits() == 64) { Res.first = AArch64::FPR64RegClass.getRegister(RegNo); Res.second = &AArch64::FPR64RegClass; } else { Res.first = AArch64::FPR128RegClass.getRegister(RegNo); Res.second = &AArch64::FPR128RegClass; } } } } if (Res.second && !Subtarget->hasFPARMv8() && !AArch64::GPR32allRegClass.hasSubClassEq(Res.second) && !AArch64::GPR64allRegClass.hasSubClassEq(Res.second)) return std::make_pair(0U, nullptr); return Res; } EVT AArch64TargetLowering::getAsmOperandValueType(const DataLayout &DL, llvm::Type *Ty, bool AllowUnknown) const { if (Subtarget->hasLS64() && Ty->isIntegerTy(512)) return EVT(MVT::i64x8); return TargetLowering::getAsmOperandValueType(DL, Ty, AllowUnknown); } /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops /// vector. If it is invalid, don't add anything to Ops. void AArch64TargetLowering::LowerAsmOperandForConstraint( SDValue Op, std::string &Constraint, std::vector &Ops, SelectionDAG &DAG) const { SDValue Result; // Currently only support length 1 constraints. if (Constraint.length() != 1) return; char ConstraintLetter = Constraint[0]; switch (ConstraintLetter) { default: break; // This set of constraints deal with valid constants for various instructions. // Validate and return a target constant for them if we can. case 'z': { // 'z' maps to xzr or wzr so it needs an input of 0. if (!isNullConstant(Op)) return; if (Op.getValueType() == MVT::i64) Result = DAG.getRegister(AArch64::XZR, MVT::i64); else Result = DAG.getRegister(AArch64::WZR, MVT::i32); break; } case 'S': { // An absolute symbolic address or label reference. if (const GlobalAddressSDNode *GA = dyn_cast(Op)) { Result = DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op), GA->getValueType(0)); } else if (const BlockAddressSDNode *BA = dyn_cast(Op)) { Result = DAG.getTargetBlockAddress(BA->getBlockAddress(), BA->getValueType(0)); } else return; break; } case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': ConstantSDNode *C = dyn_cast(Op); if (!C) return; // Grab the value and do some validation. uint64_t CVal = C->getZExtValue(); switch (ConstraintLetter) { // The I constraint applies only to simple ADD or SUB immediate operands: // i.e. 0 to 4095 with optional shift by 12 // The J constraint applies only to ADD or SUB immediates that would be // valid when negated, i.e. if [an add pattern] were to be output as a SUB // instruction [or vice versa], in other words -1 to -4095 with optional // left shift by 12. case 'I': if (isUInt<12>(CVal) || isShiftedUInt<12, 12>(CVal)) break; return; case 'J': { uint64_t NVal = -C->getSExtValue(); if (isUInt<12>(NVal) || isShiftedUInt<12, 12>(NVal)) { CVal = C->getSExtValue(); break; } return; } // The K and L constraints apply *only* to logical immediates, including // what used to be the MOVI alias for ORR (though the MOVI alias has now // been removed and MOV should be used). So these constraints have to // distinguish between bit patterns that are valid 32-bit or 64-bit // "bitmask immediates": for example 0xaaaaaaaa is a valid bimm32 (K), but // not a valid bimm64 (L) where 0xaaaaaaaaaaaaaaaa would be valid, and vice // versa. case 'K': if (AArch64_AM::isLogicalImmediate(CVal, 32)) break; return; case 'L': if (AArch64_AM::isLogicalImmediate(CVal, 64)) break; return; // The M and N constraints are a superset of K and L respectively, for use // with the MOV (immediate) alias. As well as the logical immediates they // also match 32 or 64-bit immediates that can be loaded either using a // *single* MOVZ or MOVN , such as 32-bit 0x12340000, 0x00001234, 0xffffedca // (M) or 64-bit 0x1234000000000000 (N) etc. // As a note some of this code is liberally stolen from the asm parser. case 'M': { if (!isUInt<32>(CVal)) return; if (AArch64_AM::isLogicalImmediate(CVal, 32)) break; if ((CVal & 0xFFFF) == CVal) break; if ((CVal & 0xFFFF0000ULL) == CVal) break; uint64_t NCVal = ~(uint32_t)CVal; if ((NCVal & 0xFFFFULL) == NCVal) break; if ((NCVal & 0xFFFF0000ULL) == NCVal) break; return; } case 'N': { if (AArch64_AM::isLogicalImmediate(CVal, 64)) break; if ((CVal & 0xFFFFULL) == CVal) break; if ((CVal & 0xFFFF0000ULL) == CVal) break; if ((CVal & 0xFFFF00000000ULL) == CVal) break; if ((CVal & 0xFFFF000000000000ULL) == CVal) break; uint64_t NCVal = ~CVal; if ((NCVal & 0xFFFFULL) == NCVal) break; if ((NCVal & 0xFFFF0000ULL) == NCVal) break; if ((NCVal & 0xFFFF00000000ULL) == NCVal) break; if ((NCVal & 0xFFFF000000000000ULL) == NCVal) break; return; } default: return; } // All assembler immediates are 64-bit integers. Result = DAG.getTargetConstant(CVal, SDLoc(Op), MVT::i64); break; } if (Result.getNode()) { Ops.push_back(Result); return; } return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); } //===----------------------------------------------------------------------===// // AArch64 Advanced SIMD Support //===----------------------------------------------------------------------===// /// WidenVector - Given a value in the V64 register class, produce the /// equivalent value in the V128 register class. static SDValue WidenVector(SDValue V64Reg, SelectionDAG &DAG) { EVT VT = V64Reg.getValueType(); unsigned NarrowSize = VT.getVectorNumElements(); MVT EltTy = VT.getVectorElementType().getSimpleVT(); MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize); SDLoc DL(V64Reg); return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideTy, DAG.getUNDEF(WideTy), V64Reg, DAG.getConstant(0, DL, MVT::i64)); } /// getExtFactor - Determine the adjustment factor for the position when /// generating an "extract from vector registers" instruction. static unsigned getExtFactor(SDValue &V) { EVT EltType = V.getValueType().getVectorElementType(); return EltType.getSizeInBits() / 8; } /// NarrowVector - Given a value in the V128 register class, produce the /// equivalent value in the V64 register class. static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) { EVT VT = V128Reg.getValueType(); unsigned WideSize = VT.getVectorNumElements(); MVT EltTy = VT.getVectorElementType().getSimpleVT(); MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2); SDLoc DL(V128Reg); return DAG.getTargetExtractSubreg(AArch64::dsub, DL, NarrowTy, V128Reg); } // Gather data to see if the operation can be modelled as a // shuffle in combination with VEXTs. SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const { assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!"); LLVM_DEBUG(dbgs() << "AArch64TargetLowering::ReconstructShuffle\n"); SDLoc dl(Op); EVT VT = Op.getValueType(); assert(!VT.isScalableVector() && "Scalable vectors cannot be used with ISD::BUILD_VECTOR"); unsigned NumElts = VT.getVectorNumElements(); struct ShuffleSourceInfo { SDValue Vec; unsigned MinElt; unsigned MaxElt; // We may insert some combination of BITCASTs and VEXT nodes to force Vec to // be compatible with the shuffle we intend to construct. As a result // ShuffleVec will be some sliding window into the original Vec. SDValue ShuffleVec; // Code should guarantee that element i in Vec starts at element "WindowBase // + i * WindowScale in ShuffleVec". int WindowBase; int WindowScale; ShuffleSourceInfo(SDValue Vec) : Vec(Vec), MinElt(std::numeric_limits::max()), MaxElt(0), ShuffleVec(Vec), WindowBase(0), WindowScale(1) {} bool operator ==(SDValue OtherVec) { return Vec == OtherVec; } }; // First gather all vectors used as an immediate source for this BUILD_VECTOR // node. SmallVector Sources; for (unsigned i = 0; i < NumElts; ++i) { SDValue V = Op.getOperand(i); if (V.isUndef()) continue; else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT || !isa(V.getOperand(1))) { LLVM_DEBUG( dbgs() << "Reshuffle failed: " "a shuffle can only come from building a vector from " "various elements of other vectors, provided their " "indices are constant\n"); return SDValue(); } // Add this element source to the list if it's not already there. SDValue SourceVec = V.getOperand(0); auto Source = find(Sources, SourceVec); if (Source == Sources.end()) Source = Sources.insert(Sources.end(), ShuffleSourceInfo(SourceVec)); // Update the minimum and maximum lane number seen. unsigned EltNo = cast(V.getOperand(1))->getZExtValue(); Source->MinElt = std::min(Source->MinElt, EltNo); Source->MaxElt = std::max(Source->MaxElt, EltNo); } if (Sources.size() > 2) { LLVM_DEBUG( dbgs() << "Reshuffle failed: currently only do something sane when at " "most two source vectors are involved\n"); return SDValue(); } // Find out the smallest element size among result and two sources, and use // it as element size to build the shuffle_vector. EVT SmallestEltTy = VT.getVectorElementType(); for (auto &Source : Sources) { EVT SrcEltTy = Source.Vec.getValueType().getVectorElementType(); if (SrcEltTy.bitsLT(SmallestEltTy)) { SmallestEltTy = SrcEltTy; } } unsigned ResMultiplier = VT.getScalarSizeInBits() / SmallestEltTy.getFixedSizeInBits(); uint64_t VTSize = VT.getFixedSizeInBits(); NumElts = VTSize / SmallestEltTy.getFixedSizeInBits(); EVT ShuffleVT = EVT::getVectorVT(*DAG.getContext(), SmallestEltTy, NumElts); // If the source vector is too wide or too narrow, we may nevertheless be able // to construct a compatible shuffle either by concatenating it with UNDEF or // extracting a suitable range of elements. for (auto &Src : Sources) { EVT SrcVT = Src.ShuffleVec.getValueType(); uint64_t SrcVTSize = SrcVT.getFixedSizeInBits(); if (SrcVTSize == VTSize) continue; // This stage of the search produces a source with the same element type as // the original, but with a total width matching the BUILD_VECTOR output. EVT EltVT = SrcVT.getVectorElementType(); unsigned NumSrcElts = VTSize / EltVT.getFixedSizeInBits(); EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts); if (SrcVTSize < VTSize) { assert(2 * SrcVTSize == VTSize); // We can pad out the smaller vector for free, so if it's part of a // shuffle... Src.ShuffleVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, DestVT, Src.ShuffleVec, DAG.getUNDEF(Src.ShuffleVec.getValueType())); continue; } if (SrcVTSize != 2 * VTSize) { LLVM_DEBUG( dbgs() << "Reshuffle failed: result vector too small to extract\n"); return SDValue(); } if (Src.MaxElt - Src.MinElt >= NumSrcElts) { LLVM_DEBUG( dbgs() << "Reshuffle failed: span too large for a VEXT to cope\n"); return SDValue(); } if (Src.MinElt >= NumSrcElts) { // The extraction can just take the second half Src.ShuffleVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, DAG.getConstant(NumSrcElts, dl, MVT::i64)); Src.WindowBase = -NumSrcElts; } else if (Src.MaxElt < NumSrcElts) { // The extraction can just take the first half Src.ShuffleVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, DAG.getConstant(0, dl, MVT::i64)); } else { // An actual VEXT is needed SDValue VEXTSrc1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, DAG.getConstant(0, dl, MVT::i64)); SDValue VEXTSrc2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, DAG.getConstant(NumSrcElts, dl, MVT::i64)); unsigned Imm = Src.MinElt * getExtFactor(VEXTSrc1); if (!SrcVT.is64BitVector()) { LLVM_DEBUG( dbgs() << "Reshuffle failed: don't know how to lower AArch64ISD::EXT " "for SVE vectors."); return SDValue(); } Src.ShuffleVec = DAG.getNode(AArch64ISD::EXT, dl, DestVT, VEXTSrc1, VEXTSrc2, DAG.getConstant(Imm, dl, MVT::i32)); Src.WindowBase = -Src.MinElt; } } // Another possible incompatibility occurs from the vector element types. We // can fix this by bitcasting the source vectors to the same type we intend // for the shuffle. for (auto &Src : Sources) { EVT SrcEltTy = Src.ShuffleVec.getValueType().getVectorElementType(); if (SrcEltTy == SmallestEltTy) continue; assert(ShuffleVT.getVectorElementType() == SmallestEltTy); Src.ShuffleVec = DAG.getNode(ISD::BITCAST, dl, ShuffleVT, Src.ShuffleVec); Src.WindowScale = SrcEltTy.getFixedSizeInBits() / SmallestEltTy.getFixedSizeInBits(); Src.WindowBase *= Src.WindowScale; } // Final sanity check before we try to actually produce a shuffle. LLVM_DEBUG(for (auto Src : Sources) assert(Src.ShuffleVec.getValueType() == ShuffleVT);); // The stars all align, our next step is to produce the mask for the shuffle. SmallVector Mask(ShuffleVT.getVectorNumElements(), -1); int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits(); for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) { SDValue Entry = Op.getOperand(i); if (Entry.isUndef()) continue; auto Src = find(Sources, Entry.getOperand(0)); int EltNo = cast(Entry.getOperand(1))->getSExtValue(); // EXTRACT_VECTOR_ELT performs an implicit any_ext; BUILD_VECTOR an implicit // trunc. So only std::min(SrcBits, DestBits) actually get defined in this // segment. EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType(); int BitsDefined = std::min(OrigEltTy.getScalarSizeInBits(), VT.getScalarSizeInBits()); int LanesDefined = BitsDefined / BitsPerShuffleLane; // This source is expected to fill ResMultiplier lanes of the final shuffle, // starting at the appropriate offset. int *LaneMask = &Mask[i * ResMultiplier]; int ExtractBase = EltNo * Src->WindowScale + Src->WindowBase; ExtractBase += NumElts * (Src - Sources.begin()); for (int j = 0; j < LanesDefined; ++j) LaneMask[j] = ExtractBase + j; } // Final check before we try to produce nonsense... if (!isShuffleMaskLegal(Mask, ShuffleVT)) { LLVM_DEBUG(dbgs() << "Reshuffle failed: illegal shuffle mask\n"); return SDValue(); } SDValue ShuffleOps[] = { DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT) }; for (unsigned i = 0; i < Sources.size(); ++i) ShuffleOps[i] = Sources[i].ShuffleVec; SDValue Shuffle = DAG.getVectorShuffle(ShuffleVT, dl, ShuffleOps[0], ShuffleOps[1], Mask); SDValue V = DAG.getNode(ISD::BITCAST, dl, VT, Shuffle); LLVM_DEBUG(dbgs() << "Reshuffle, creating node: "; Shuffle.dump(); dbgs() << "Reshuffle, creating node: "; V.dump();); return V; } // check if an EXT instruction can handle the shuffle mask when the // vector sources of the shuffle are the same. static bool isSingletonEXTMask(ArrayRef M, EVT VT, unsigned &Imm) { unsigned NumElts = VT.getVectorNumElements(); // Assume that the first shuffle index is not UNDEF. Fail if it is. if (M[0] < 0) return false; Imm = M[0]; // If this is a VEXT shuffle, the immediate value is the index of the first // element. The other shuffle indices must be the successive elements after // the first one. unsigned ExpectedElt = Imm; for (unsigned i = 1; i < NumElts; ++i) { // Increment the expected index. If it wraps around, just follow it // back to index zero and keep going. ++ExpectedElt; if (ExpectedElt == NumElts) ExpectedElt = 0; if (M[i] < 0) continue; // ignore UNDEF indices if (ExpectedElt != static_cast(M[i])) return false; } return true; } /// Check if a vector shuffle corresponds to a DUP instructions with a larger /// element width than the vector lane type. If that is the case the function /// returns true and writes the value of the DUP instruction lane operand into /// DupLaneOp static bool isWideDUPMask(ArrayRef M, EVT VT, unsigned BlockSize, unsigned &DupLaneOp) { assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) && "Only possible block sizes for wide DUP are: 16, 32, 64"); if (BlockSize <= VT.getScalarSizeInBits()) return false; if (BlockSize % VT.getScalarSizeInBits() != 0) return false; if (VT.getSizeInBits() % BlockSize != 0) return false; size_t SingleVecNumElements = VT.getVectorNumElements(); size_t NumEltsPerBlock = BlockSize / VT.getScalarSizeInBits(); size_t NumBlocks = VT.getSizeInBits() / BlockSize; // We are looking for masks like // [0, 1, 0, 1] or [2, 3, 2, 3] or [4, 5, 6, 7, 4, 5, 6, 7] where any element // might be replaced by 'undefined'. BlockIndices will eventually contain // lane indices of the duplicated block (i.e. [0, 1], [2, 3] and [4, 5, 6, 7] // for the above examples) SmallVector BlockElts(NumEltsPerBlock, -1); for (size_t BlockIndex = 0; BlockIndex < NumBlocks; BlockIndex++) for (size_t I = 0; I < NumEltsPerBlock; I++) { int Elt = M[BlockIndex * NumEltsPerBlock + I]; if (Elt < 0) continue; // For now we don't support shuffles that use the second operand if ((unsigned)Elt >= SingleVecNumElements) return false; if (BlockElts[I] < 0) BlockElts[I] = Elt; else if (BlockElts[I] != Elt) return false; } // We found a candidate block (possibly with some undefs). It must be a // sequence of consecutive integers starting with a value divisible by // NumEltsPerBlock with some values possibly replaced by undef-s. // Find first non-undef element auto FirstRealEltIter = find_if(BlockElts, [](int Elt) { return Elt >= 0; }); assert(FirstRealEltIter != BlockElts.end() && "Shuffle with all-undefs must have been caught by previous cases, " "e.g. isSplat()"); if (FirstRealEltIter == BlockElts.end()) { DupLaneOp = 0; return true; } // Index of FirstRealElt in BlockElts size_t FirstRealIndex = FirstRealEltIter - BlockElts.begin(); if ((unsigned)*FirstRealEltIter < FirstRealIndex) return false; // BlockElts[0] must have the following value if it isn't undef: size_t Elt0 = *FirstRealEltIter - FirstRealIndex; // Check the first element if (Elt0 % NumEltsPerBlock != 0) return false; // Check that the sequence indeed consists of consecutive integers (modulo // undefs) for (size_t I = 0; I < NumEltsPerBlock; I++) if (BlockElts[I] >= 0 && (unsigned)BlockElts[I] != Elt0 + I) return false; DupLaneOp = Elt0 / NumEltsPerBlock; return true; } // check if an EXT instruction can handle the shuffle mask when the // vector sources of the shuffle are different. static bool isEXTMask(ArrayRef M, EVT VT, bool &ReverseEXT, unsigned &Imm) { // Look for the first non-undef element. const int *FirstRealElt = find_if(M, [](int Elt) { return Elt >= 0; }); // Benefit form APInt to handle overflow when calculating expected element. unsigned NumElts = VT.getVectorNumElements(); unsigned MaskBits = APInt(32, NumElts * 2).logBase2(); APInt ExpectedElt = APInt(MaskBits, *FirstRealElt + 1); // The following shuffle indices must be the successive elements after the // first real element. const int *FirstWrongElt = std::find_if(FirstRealElt + 1, M.end(), [&](int Elt) {return Elt != ExpectedElt++ && Elt != -1;}); if (FirstWrongElt != M.end()) return false; // The index of an EXT is the first element if it is not UNDEF. // Watch out for the beginning UNDEFs. The EXT index should be the expected // value of the first element. E.g. // <-1, -1, 3, ...> is treated as <1, 2, 3, ...>. // <-1, -1, 0, 1, ...> is treated as <2*NumElts-2, 2*NumElts-1, 0, 1, ...>. // ExpectedElt is the last mask index plus 1. Imm = ExpectedElt.getZExtValue(); // There are two difference cases requiring to reverse input vectors. // For example, for vector <4 x i32> we have the following cases, // Case 1: shufflevector(<4 x i32>,<4 x i32>,<-1, -1, -1, 0>) // Case 2: shufflevector(<4 x i32>,<4 x i32>,<-1, -1, 7, 0>) // For both cases, we finally use mask <5, 6, 7, 0>, which requires // to reverse two input vectors. if (Imm < NumElts) ReverseEXT = true; else Imm -= NumElts; return true; } /// isREVMask - Check if a vector shuffle corresponds to a REV /// instruction with the specified blocksize. (The order of the elements /// within each block of the vector is reversed.) static bool isREVMask(ArrayRef M, EVT VT, unsigned BlockSize) { assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) && "Only possible block sizes for REV are: 16, 32, 64"); unsigned EltSz = VT.getScalarSizeInBits(); if (EltSz == 64) return false; unsigned NumElts = VT.getVectorNumElements(); unsigned BlockElts = M[0] + 1; // If the first shuffle index is UNDEF, be optimistic. if (M[0] < 0) BlockElts = BlockSize / EltSz; if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) return false; for (unsigned i = 0; i < NumElts; ++i) { if (M[i] < 0) continue; // ignore UNDEF indices if ((unsigned)M[i] != (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts)) return false; } return true; } static bool isZIPMask(ArrayRef M, EVT VT, unsigned &WhichResult) { unsigned NumElts = VT.getVectorNumElements(); if (NumElts % 2 != 0) return false; WhichResult = (M[0] == 0 ? 0 : 1); unsigned Idx = WhichResult * NumElts / 2; for (unsigned i = 0; i != NumElts; i += 2) { if ((M[i] >= 0 && (unsigned)M[i] != Idx) || (M[i + 1] >= 0 && (unsigned)M[i + 1] != Idx + NumElts)) return false; Idx += 1; } return true; } static bool isUZPMask(ArrayRef M, EVT VT, unsigned &WhichResult) { unsigned NumElts = VT.getVectorNumElements(); WhichResult = (M[0] == 0 ? 0 : 1); for (unsigned i = 0; i != NumElts; ++i) { if (M[i] < 0) continue; // ignore UNDEF indices if ((unsigned)M[i] != 2 * i + WhichResult) return false; } return true; } static bool isTRNMask(ArrayRef M, EVT VT, unsigned &WhichResult) { unsigned NumElts = VT.getVectorNumElements(); if (NumElts % 2 != 0) return false; WhichResult = (M[0] == 0 ? 0 : 1); for (unsigned i = 0; i < NumElts; i += 2) { if ((M[i] >= 0 && (unsigned)M[i] != i + WhichResult) || (M[i + 1] >= 0 && (unsigned)M[i + 1] != i + NumElts + WhichResult)) return false; } return true; } /// isZIP_v_undef_Mask - Special case of isZIPMask for canonical form of /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. static bool isZIP_v_undef_Mask(ArrayRef M, EVT VT, unsigned &WhichResult) { unsigned NumElts = VT.getVectorNumElements(); if (NumElts % 2 != 0) return false; WhichResult = (M[0] == 0 ? 0 : 1); unsigned Idx = WhichResult * NumElts / 2; for (unsigned i = 0; i != NumElts; i += 2) { if ((M[i] >= 0 && (unsigned)M[i] != Idx) || (M[i + 1] >= 0 && (unsigned)M[i + 1] != Idx)) return false; Idx += 1; } return true; } /// isUZP_v_undef_Mask - Special case of isUZPMask for canonical form of /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, static bool isUZP_v_undef_Mask(ArrayRef M, EVT VT, unsigned &WhichResult) { unsigned Half = VT.getVectorNumElements() / 2; WhichResult = (M[0] == 0 ? 0 : 1); for (unsigned j = 0; j != 2; ++j) { unsigned Idx = WhichResult; for (unsigned i = 0; i != Half; ++i) { int MIdx = M[i + j * Half]; if (MIdx >= 0 && (unsigned)MIdx != Idx) return false; Idx += 2; } } return true; } /// isTRN_v_undef_Mask - Special case of isTRNMask for canonical form of /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. static bool isTRN_v_undef_Mask(ArrayRef M, EVT VT, unsigned &WhichResult) { unsigned NumElts = VT.getVectorNumElements(); if (NumElts % 2 != 0) return false; WhichResult = (M[0] == 0 ? 0 : 1); for (unsigned i = 0; i < NumElts; i += 2) { if ((M[i] >= 0 && (unsigned)M[i] != i + WhichResult) || (M[i + 1] >= 0 && (unsigned)M[i + 1] != i + WhichResult)) return false; } return true; } static bool isINSMask(ArrayRef M, int NumInputElements, bool &DstIsLeft, int &Anomaly) { if (M.size() != static_cast(NumInputElements)) return false; int NumLHSMatch = 0, NumRHSMatch = 0; int LastLHSMismatch = -1, LastRHSMismatch = -1; for (int i = 0; i < NumInputElements; ++i) { if (M[i] == -1) { ++NumLHSMatch; ++NumRHSMatch; continue; } if (M[i] == i) ++NumLHSMatch; else LastLHSMismatch = i; if (M[i] == i + NumInputElements) ++NumRHSMatch; else LastRHSMismatch = i; } if (NumLHSMatch == NumInputElements - 1) { DstIsLeft = true; Anomaly = LastLHSMismatch; return true; } else if (NumRHSMatch == NumInputElements - 1) { DstIsLeft = false; Anomaly = LastRHSMismatch; return true; } return false; } static bool isConcatMask(ArrayRef Mask, EVT VT, bool SplitLHS) { if (VT.getSizeInBits() != 128) return false; unsigned NumElts = VT.getVectorNumElements(); for (int I = 0, E = NumElts / 2; I != E; I++) { if (Mask[I] != I) return false; } int Offset = NumElts / 2; for (int I = NumElts / 2, E = NumElts; I != E; I++) { if (Mask[I] != I + SplitLHS * Offset) return false; } return true; } static SDValue tryFormConcatFromShuffle(SDValue Op, SelectionDAG &DAG) { SDLoc DL(Op); EVT VT = Op.getValueType(); SDValue V0 = Op.getOperand(0); SDValue V1 = Op.getOperand(1); ArrayRef Mask = cast(Op)->getMask(); if (VT.getVectorElementType() != V0.getValueType().getVectorElementType() || VT.getVectorElementType() != V1.getValueType().getVectorElementType()) return SDValue(); bool SplitV0 = V0.getValueSizeInBits() == 128; if (!isConcatMask(Mask, VT, SplitV0)) return SDValue(); EVT CastVT = VT.getHalfNumVectorElementsVT(*DAG.getContext()); if (SplitV0) { V0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V0, DAG.getConstant(0, DL, MVT::i64)); } if (V1.getValueSizeInBits() == 128) { V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V1, DAG.getConstant(0, DL, MVT::i64)); } return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, V0, V1); } /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit /// the specified operations to build the shuffle. static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const SDLoc &dl) { unsigned OpNum = (PFEntry >> 26) & 0x0F; unsigned LHSID = (PFEntry >> 13) & ((1 << 13) - 1); unsigned RHSID = (PFEntry >> 0) & ((1 << 13) - 1); enum { OP_COPY = 0, // Copy, used for things like to say it is <0,1,2,3> OP_VREV, OP_VDUP0, OP_VDUP1, OP_VDUP2, OP_VDUP3, OP_VEXT1, OP_VEXT2, OP_VEXT3, OP_VUZPL, // VUZP, left result OP_VUZPR, // VUZP, right result OP_VZIPL, // VZIP, left result OP_VZIPR, // VZIP, right result OP_VTRNL, // VTRN, left result OP_VTRNR // VTRN, right result }; if (OpNum == OP_COPY) { if (LHSID == (1 * 9 + 2) * 9 + 3) return LHS; assert(LHSID == ((4 * 9 + 5) * 9 + 6) * 9 + 7 && "Illegal OP_COPY!"); return RHS; } SDValue OpLHS, OpRHS; OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); EVT VT = OpLHS.getValueType(); switch (OpNum) { default: llvm_unreachable("Unknown shuffle opcode!"); case OP_VREV: // VREV divides the vector in half and swaps within the half. if (VT.getVectorElementType() == MVT::i32 || VT.getVectorElementType() == MVT::f32) return DAG.getNode(AArch64ISD::REV64, dl, VT, OpLHS); // vrev <4 x i16> -> REV32 if (VT.getVectorElementType() == MVT::i16 || VT.getVectorElementType() == MVT::f16 || VT.getVectorElementType() == MVT::bf16) return DAG.getNode(AArch64ISD::REV32, dl, VT, OpLHS); // vrev <4 x i8> -> REV16 assert(VT.getVectorElementType() == MVT::i8); return DAG.getNode(AArch64ISD::REV16, dl, VT, OpLHS); case OP_VDUP0: case OP_VDUP1: case OP_VDUP2: case OP_VDUP3: { EVT EltTy = VT.getVectorElementType(); unsigned Opcode; if (EltTy == MVT::i8) Opcode = AArch64ISD::DUPLANE8; else if (EltTy == MVT::i16 || EltTy == MVT::f16 || EltTy == MVT::bf16) Opcode = AArch64ISD::DUPLANE16; else if (EltTy == MVT::i32 || EltTy == MVT::f32) Opcode = AArch64ISD::DUPLANE32; else if (EltTy == MVT::i64 || EltTy == MVT::f64) Opcode = AArch64ISD::DUPLANE64; else llvm_unreachable("Invalid vector element type?"); if (VT.getSizeInBits() == 64) OpLHS = WidenVector(OpLHS, DAG); SDValue Lane = DAG.getConstant(OpNum - OP_VDUP0, dl, MVT::i64); return DAG.getNode(Opcode, dl, VT, OpLHS, Lane); } case OP_VEXT1: case OP_VEXT2: case OP_VEXT3: { unsigned Imm = (OpNum - OP_VEXT1 + 1) * getExtFactor(OpLHS); return DAG.getNode(AArch64ISD::EXT, dl, VT, OpLHS, OpRHS, DAG.getConstant(Imm, dl, MVT::i32)); } case OP_VUZPL: return DAG.getNode(AArch64ISD::UZP1, dl, DAG.getVTList(VT, VT), OpLHS, OpRHS); case OP_VUZPR: return DAG.getNode(AArch64ISD::UZP2, dl, DAG.getVTList(VT, VT), OpLHS, OpRHS); case OP_VZIPL: return DAG.getNode(AArch64ISD::ZIP1, dl, DAG.getVTList(VT, VT), OpLHS, OpRHS); case OP_VZIPR: return DAG.getNode(AArch64ISD::ZIP2, dl, DAG.getVTList(VT, VT), OpLHS, OpRHS); case OP_VTRNL: return DAG.getNode(AArch64ISD::TRN1, dl, DAG.getVTList(VT, VT), OpLHS, OpRHS); case OP_VTRNR: return DAG.getNode(AArch64ISD::TRN2, dl, DAG.getVTList(VT, VT), OpLHS, OpRHS); } } static SDValue GenerateTBL(SDValue Op, ArrayRef ShuffleMask, SelectionDAG &DAG) { // Check to see if we can use the TBL instruction. SDValue V1 = Op.getOperand(0); SDValue V2 = Op.getOperand(1); SDLoc DL(Op); EVT EltVT = Op.getValueType().getVectorElementType(); unsigned BytesPerElt = EltVT.getSizeInBits() / 8; SmallVector TBLMask; for (int Val : ShuffleMask) { for (unsigned Byte = 0; Byte < BytesPerElt; ++Byte) { unsigned Offset = Byte + Val * BytesPerElt; TBLMask.push_back(DAG.getConstant(Offset, DL, MVT::i32)); } } MVT IndexVT = MVT::v8i8; unsigned IndexLen = 8; if (Op.getValueSizeInBits() == 128) { IndexVT = MVT::v16i8; IndexLen = 16; } SDValue V1Cst = DAG.getNode(ISD::BITCAST, DL, IndexVT, V1); SDValue V2Cst = DAG.getNode(ISD::BITCAST, DL, IndexVT, V2); SDValue Shuffle; if (V2.getNode()->isUndef()) { if (IndexLen == 8) V1Cst = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V1Cst, V1Cst); Shuffle = DAG.getNode( ISD::INTRINSIC_WO_CHAIN, DL, IndexVT, DAG.getConstant(Intrinsic::aarch64_neon_tbl1, DL, MVT::i32), V1Cst, DAG.getBuildVector(IndexVT, DL, makeArrayRef(TBLMask.data(), IndexLen))); } else { if (IndexLen == 8) { V1Cst = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V1Cst, V2Cst); Shuffle = DAG.getNode( ISD::INTRINSIC_WO_CHAIN, DL, IndexVT, DAG.getConstant(Intrinsic::aarch64_neon_tbl1, DL, MVT::i32), V1Cst, DAG.getBuildVector(IndexVT, DL, makeArrayRef(TBLMask.data(), IndexLen))); } else { // FIXME: We cannot, for the moment, emit a TBL2 instruction because we // cannot currently represent the register constraints on the input // table registers. // Shuffle = DAG.getNode(AArch64ISD::TBL2, DL, IndexVT, V1Cst, V2Cst, // DAG.getBuildVector(IndexVT, DL, &TBLMask[0], // IndexLen)); Shuffle = DAG.getNode( ISD::INTRINSIC_WO_CHAIN, DL, IndexVT, DAG.getConstant(Intrinsic::aarch64_neon_tbl2, DL, MVT::i32), V1Cst, V2Cst, DAG.getBuildVector(IndexVT, DL, makeArrayRef(TBLMask.data(), IndexLen))); } } return DAG.getNode(ISD::BITCAST, DL, Op.getValueType(), Shuffle); } static unsigned getDUPLANEOp(EVT EltType) { if (EltType == MVT::i8) return AArch64ISD::DUPLANE8; if (EltType == MVT::i16 || EltType == MVT::f16 || EltType == MVT::bf16) return AArch64ISD::DUPLANE16; if (EltType == MVT::i32 || EltType == MVT::f32) return AArch64ISD::DUPLANE32; if (EltType == MVT::i64 || EltType == MVT::f64) return AArch64ISD::DUPLANE64; llvm_unreachable("Invalid vector element type?"); } static SDValue constructDup(SDValue V, int Lane, SDLoc dl, EVT VT, unsigned Opcode, SelectionDAG &DAG) { // Try to eliminate a bitcasted extract subvector before a DUPLANE. auto getScaledOffsetDup = [](SDValue BitCast, int &LaneC, MVT &CastVT) { // Match: dup (bitcast (extract_subv X, C)), LaneC if (BitCast.getOpcode() != ISD::BITCAST || BitCast.getOperand(0).getOpcode() != ISD::EXTRACT_SUBVECTOR) return false; // The extract index must align in the destination type. That may not // happen if the bitcast is from narrow to wide type. SDValue Extract = BitCast.getOperand(0); unsigned ExtIdx = Extract.getConstantOperandVal(1); unsigned SrcEltBitWidth = Extract.getScalarValueSizeInBits(); unsigned ExtIdxInBits = ExtIdx * SrcEltBitWidth; unsigned CastedEltBitWidth = BitCast.getScalarValueSizeInBits(); if (ExtIdxInBits % CastedEltBitWidth != 0) return false; // Update the lane value by offsetting with the scaled extract index. LaneC += ExtIdxInBits / CastedEltBitWidth; // Determine the casted vector type of the wide vector input. // dup (bitcast (extract_subv X, C)), LaneC --> dup (bitcast X), LaneC' // Examples: // dup (bitcast (extract_subv v2f64 X, 1) to v2f32), 1 --> dup v4f32 X, 3 // dup (bitcast (extract_subv v16i8 X, 8) to v4i16), 1 --> dup v8i16 X, 5 unsigned SrcVecNumElts = Extract.getOperand(0).getValueSizeInBits() / CastedEltBitWidth; CastVT = MVT::getVectorVT(BitCast.getSimpleValueType().getScalarType(), SrcVecNumElts); return true; }; MVT CastVT; if (getScaledOffsetDup(V, Lane, CastVT)) { V = DAG.getBitcast(CastVT, V.getOperand(0).getOperand(0)); } else if (V.getOpcode() == ISD::EXTRACT_SUBVECTOR) { // The lane is incremented by the index of the extract. // Example: dup v2f32 (extract v4f32 X, 2), 1 --> dup v4f32 X, 3 Lane += V.getConstantOperandVal(1); V = V.getOperand(0); } else if (V.getOpcode() == ISD::CONCAT_VECTORS) { // The lane is decremented if we are splatting from the 2nd operand. // Example: dup v4i32 (concat v2i32 X, v2i32 Y), 3 --> dup v4i32 Y, 1 unsigned Idx = Lane >= (int)VT.getVectorNumElements() / 2; Lane -= Idx * VT.getVectorNumElements() / 2; V = WidenVector(V.getOperand(Idx), DAG); } else if (VT.getSizeInBits() == 64) { // Widen the operand to 128-bit register with undef. V = WidenVector(V, DAG); } return DAG.getNode(Opcode, dl, VT, V, DAG.getConstant(Lane, dl, MVT::i64)); } SDValue AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { SDLoc dl(Op); EVT VT = Op.getValueType(); ShuffleVectorSDNode *SVN = cast(Op.getNode()); if (useSVEForFixedLengthVectorVT(VT)) return LowerFixedLengthVECTOR_SHUFFLEToSVE(Op, DAG); // Convert shuffles that are directly supported on NEON to target-specific // DAG nodes, instead of keeping them as shuffles and matching them again // during code selection. This is more efficient and avoids the possibility // of inconsistencies between legalization and selection. ArrayRef ShuffleMask = SVN->getMask(); SDValue V1 = Op.getOperand(0); SDValue V2 = Op.getOperand(1); assert(V1.getValueType() == VT && "Unexpected VECTOR_SHUFFLE type!"); assert(ShuffleMask.size() == VT.getVectorNumElements() && "Unexpected VECTOR_SHUFFLE mask size!"); if (SVN->isSplat()) { int Lane = SVN->getSplatIndex(); // If this is undef splat, generate it via "just" vdup, if possible. if (Lane == -1) Lane = 0; if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) return DAG.getNode(AArch64ISD::DUP, dl, V1.getValueType(), V1.getOperand(0)); // Test if V1 is a BUILD_VECTOR and the lane being referenced is a non- // constant. If so, we can just reference the lane's definition directly. if (V1.getOpcode() == ISD::BUILD_VECTOR && !isa(V1.getOperand(Lane))) return DAG.getNode(AArch64ISD::DUP, dl, VT, V1.getOperand(Lane)); // Otherwise, duplicate from the lane of the input vector. unsigned Opcode = getDUPLANEOp(V1.getValueType().getVectorElementType()); return constructDup(V1, Lane, dl, VT, Opcode, DAG); } // Check if the mask matches a DUP for a wider element for (unsigned LaneSize : {64U, 32U, 16U}) { unsigned Lane = 0; if (isWideDUPMask(ShuffleMask, VT, LaneSize, Lane)) { unsigned Opcode = LaneSize == 64 ? AArch64ISD::DUPLANE64 : LaneSize == 32 ? AArch64ISD::DUPLANE32 : AArch64ISD::DUPLANE16; // Cast V1 to an integer vector with required lane size MVT NewEltTy = MVT::getIntegerVT(LaneSize); unsigned NewEltCount = VT.getSizeInBits() / LaneSize; MVT NewVecTy = MVT::getVectorVT(NewEltTy, NewEltCount); V1 = DAG.getBitcast(NewVecTy, V1); // Constuct the DUP instruction V1 = constructDup(V1, Lane, dl, NewVecTy, Opcode, DAG); // Cast back to the original type return DAG.getBitcast(VT, V1); } } if (isREVMask(ShuffleMask, VT, 64)) return DAG.getNode(AArch64ISD::REV64, dl, V1.getValueType(), V1, V2); if (isREVMask(ShuffleMask, VT, 32)) return DAG.getNode(AArch64ISD::REV32, dl, V1.getValueType(), V1, V2); if (isREVMask(ShuffleMask, VT, 16)) return DAG.getNode(AArch64ISD::REV16, dl, V1.getValueType(), V1, V2); if (((VT.getVectorNumElements() == 8 && VT.getScalarSizeInBits() == 16) || (VT.getVectorNumElements() == 16 && VT.getScalarSizeInBits() == 8)) && ShuffleVectorInst::isReverseMask(ShuffleMask)) { SDValue Rev = DAG.getNode(AArch64ISD::REV64, dl, VT, V1); return DAG.getNode(AArch64ISD::EXT, dl, VT, Rev, Rev, DAG.getConstant(8, dl, MVT::i32)); } bool ReverseEXT = false; unsigned Imm; if (isEXTMask(ShuffleMask, VT, ReverseEXT, Imm)) { if (ReverseEXT) std::swap(V1, V2); Imm *= getExtFactor(V1); return DAG.getNode(AArch64ISD::EXT, dl, V1.getValueType(), V1, V2, DAG.getConstant(Imm, dl, MVT::i32)); } else if (V2->isUndef() && isSingletonEXTMask(ShuffleMask, VT, Imm)) { Imm *= getExtFactor(V1); return DAG.getNode(AArch64ISD::EXT, dl, V1.getValueType(), V1, V1, DAG.getConstant(Imm, dl, MVT::i32)); } unsigned WhichResult; if (isZIPMask(ShuffleMask, VT, WhichResult)) { unsigned Opc = (WhichResult == 0) ? AArch64ISD::ZIP1 : AArch64ISD::ZIP2; return DAG.getNode(Opc, dl, V1.getValueType(), V1, V2); } if (isUZPMask(ShuffleMask, VT, WhichResult)) { unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2; return DAG.getNode(Opc, dl, V1.getValueType(), V1, V2); } if (isTRNMask(ShuffleMask, VT, WhichResult)) { unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2; return DAG.getNode(Opc, dl, V1.getValueType(), V1, V2); } if (isZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) { unsigned Opc = (WhichResult == 0) ? AArch64ISD::ZIP1 : AArch64ISD::ZIP2; return DAG.getNode(Opc, dl, V1.getValueType(), V1, V1); } if (isUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) { unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2; return DAG.getNode(Opc, dl, V1.getValueType(), V1, V1); } if (isTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) { unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2; return DAG.getNode(Opc, dl, V1.getValueType(), V1, V1); } if (SDValue Concat = tryFormConcatFromShuffle(Op, DAG)) return Concat; bool DstIsLeft; int Anomaly; int NumInputElements = V1.getValueType().getVectorNumElements(); if (isINSMask(ShuffleMask, NumInputElements, DstIsLeft, Anomaly)) { SDValue DstVec = DstIsLeft ? V1 : V2; SDValue DstLaneV = DAG.getConstant(Anomaly, dl, MVT::i64); SDValue SrcVec = V1; int SrcLane = ShuffleMask[Anomaly]; if (SrcLane >= NumInputElements) { SrcVec = V2; SrcLane -= VT.getVectorNumElements(); } SDValue SrcLaneV = DAG.getConstant(SrcLane, dl, MVT::i64); EVT ScalarVT = VT.getVectorElementType(); if (ScalarVT.getFixedSizeInBits() < 32 && ScalarVT.isInteger()) ScalarVT = MVT::i32; return DAG.getNode( ISD::INSERT_VECTOR_ELT, dl, VT, DstVec, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, SrcVec, SrcLaneV), DstLaneV); } // If the shuffle is not directly supported and it has 4 elements, use // the PerfectShuffle-generated table to synthesize it from other shuffles. unsigned NumElts = VT.getVectorNumElements(); if (NumElts == 4) { unsigned PFIndexes[4]; for (unsigned i = 0; i != 4; ++i) { if (ShuffleMask[i] < 0) PFIndexes[i] = 8; else PFIndexes[i] = ShuffleMask[i]; } // Compute the index in the perfect shuffle table. unsigned PFTableIndex = PFIndexes[0] * 9 * 9 * 9 + PFIndexes[1] * 9 * 9 + PFIndexes[2] * 9 + PFIndexes[3]; unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; unsigned Cost = (PFEntry >> 30); if (Cost <= 4) return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); } return GenerateTBL(Op, ShuffleMask, DAG); } SDValue AArch64TargetLowering::LowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG) const { SDLoc dl(Op); EVT VT = Op.getValueType(); EVT ElemVT = VT.getScalarType(); SDValue SplatVal = Op.getOperand(0); if (useSVEForFixedLengthVectorVT(VT)) return LowerToScalableOp(Op, DAG); // Extend input splat value where needed to fit into a GPR (32b or 64b only) // FPRs don't have this restriction. switch (ElemVT.getSimpleVT().SimpleTy) { case MVT::i1: { // The only legal i1 vectors are SVE vectors, so we can use SVE-specific // lowering code. if (auto *ConstVal = dyn_cast(SplatVal)) { if (ConstVal->isOne()) return getPTrue(DAG, dl, VT, AArch64SVEPredPattern::all); // TODO: Add special case for constant false } // The general case of i1. There isn't any natural way to do this, // so we use some trickery with whilelo. SplatVal = DAG.getAnyExtOrTrunc(SplatVal, dl, MVT::i64); SplatVal = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::i64, SplatVal, DAG.getValueType(MVT::i1)); SDValue ID = DAG.getTargetConstant(Intrinsic::aarch64_sve_whilelo, dl, MVT::i64); return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, ID, DAG.getConstant(0, dl, MVT::i64), SplatVal); } case MVT::i8: case MVT::i16: case MVT::i32: SplatVal = DAG.getAnyExtOrTrunc(SplatVal, dl, MVT::i32); break; case MVT::i64: SplatVal = DAG.getAnyExtOrTrunc(SplatVal, dl, MVT::i64); break; case MVT::f16: case MVT::bf16: case MVT::f32: case MVT::f64: // Fine as is break; default: report_fatal_error("Unsupported SPLAT_VECTOR input operand type"); } return DAG.getNode(AArch64ISD::DUP, dl, VT, SplatVal); } SDValue AArch64TargetLowering::LowerDUPQLane(SDValue Op, SelectionDAG &DAG) const { SDLoc DL(Op); EVT VT = Op.getValueType(); if (!isTypeLegal(VT) || !VT.isScalableVector()) return SDValue(); // Current lowering only supports the SVE-ACLE types. if (VT.getSizeInBits().getKnownMinSize() != AArch64::SVEBitsPerBlock) return SDValue(); // The DUPQ operation is indepedent of element type so normalise to i64s. SDValue V = DAG.getNode(ISD::BITCAST, DL, MVT::nxv2i64, Op.getOperand(1)); SDValue Idx128 = Op.getOperand(2); // DUPQ can be used when idx is in range. auto *CIdx = dyn_cast(Idx128); if (CIdx && (CIdx->getZExtValue() <= 3)) { SDValue CI = DAG.getTargetConstant(CIdx->getZExtValue(), DL, MVT::i64); SDNode *DUPQ = DAG.getMachineNode(AArch64::DUP_ZZI_Q, DL, MVT::nxv2i64, V, CI); return DAG.getNode(ISD::BITCAST, DL, VT, SDValue(DUPQ, 0)); } // The ACLE says this must produce the same result as: // svtbl(data, svadd_x(svptrue_b64(), // svand_x(svptrue_b64(), svindex_u64(0, 1), 1), // index * 2)) SDValue One = DAG.getConstant(1, DL, MVT::i64); SDValue SplatOne = DAG.getNode(ISD::SPLAT_VECTOR, DL, MVT::nxv2i64, One); // create the vector 0,1,0,1,... SDValue SV = DAG.getStepVector(DL, MVT::nxv2i64); SV = DAG.getNode(ISD::AND, DL, MVT::nxv2i64, SV, SplatOne); // create the vector idx64,idx64+1,idx64,idx64+1,... SDValue Idx64 = DAG.getNode(ISD::ADD, DL, MVT::i64, Idx128, Idx128); SDValue SplatIdx64 = DAG.getNode(ISD::SPLAT_VECTOR, DL, MVT::nxv2i64, Idx64); SDValue ShuffleMask = DAG.getNode(ISD::ADD, DL, MVT::nxv2i64, SV, SplatIdx64); // create the vector Val[idx64],Val[idx64+1],Val[idx64],Val[idx64+1],... SDValue TBL = DAG.getNode(AArch64ISD::TBL, DL, MVT::nxv2i64, V, ShuffleMask); return DAG.getNode(ISD::BITCAST, DL, VT, TBL); } static bool resolveBuildVector(BuildVectorSDNode *BVN, APInt &CnstBits, APInt &UndefBits) { EVT VT = BVN->getValueType(0); APInt SplatBits, SplatUndef; unsigned SplatBitSize; bool HasAnyUndefs; if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { unsigned NumSplats = VT.getSizeInBits() / SplatBitSize; for (unsigned i = 0; i < NumSplats; ++i) { CnstBits <<= SplatBitSize; UndefBits <<= SplatBitSize; CnstBits |= SplatBits.zextOrTrunc(VT.getSizeInBits()); UndefBits |= (SplatBits ^ SplatUndef).zextOrTrunc(VT.getSizeInBits()); } return true; } return false; } // Try 64-bit splatted SIMD immediate. static SDValue tryAdvSIMDModImm64(unsigned NewOp, SDValue Op, SelectionDAG &DAG, const APInt &Bits) { if (Bits.getHiBits(64) == Bits.getLoBits(64)) { uint64_t Value = Bits.zextOrTrunc(64).getZExtValue(); EVT VT = Op.getValueType(); MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v2i64 : MVT::f64; if (AArch64_AM::isAdvSIMDModImmType10(Value)) { Value = AArch64_AM::encodeAdvSIMDModImmType10(Value); SDLoc dl(Op); SDValue Mov = DAG.getNode(NewOp, dl, MovTy, DAG.getConstant(Value, dl, MVT::i32)); return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov); } } return SDValue(); } // Try 32-bit splatted SIMD immediate. static SDValue tryAdvSIMDModImm32(unsigned NewOp, SDValue Op, SelectionDAG &DAG, const APInt &Bits, const SDValue *LHS = nullptr) { if (Bits.getHiBits(64) == Bits.getLoBits(64)) { uint64_t Value = Bits.zextOrTrunc(64).getZExtValue(); EVT VT = Op.getValueType(); MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32; bool isAdvSIMDModImm = false; uint64_t Shift; if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType1(Value))) { Value = AArch64_AM::encodeAdvSIMDModImmType1(Value); Shift = 0; } else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType2(Value))) { Value = AArch64_AM::encodeAdvSIMDModImmType2(Value); Shift = 8; } else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType3(Value))) { Value = AArch64_AM::encodeAdvSIMDModImmType3(Value); Shift = 16; } else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType4(Value))) { Value = AArch64_AM::encodeAdvSIMDModImmType4(Value); Shift = 24; } if (isAdvSIMDModImm) { SDLoc dl(Op); SDValue Mov; if (LHS) Mov = DAG.getNode(NewOp, dl, MovTy, *LHS, DAG.getConstant(Value, dl, MVT::i32), DAG.getConstant(Shift, dl, MVT::i32)); else Mov = DAG.getNode(NewOp, dl, MovTy, DAG.getConstant(Value, dl, MVT::i32), DAG.getConstant(Shift, dl, MVT::i32)); return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov); } } return SDValue(); } // Try 16-bit splatted SIMD immediate. static SDValue tryAdvSIMDModImm16(unsigned NewOp, SDValue Op, SelectionDAG &DAG, const APInt &Bits, const SDValue *LHS = nullptr) { if (Bits.getHiBits(64) == Bits.getLoBits(64)) { uint64_t Value = Bits.zextOrTrunc(64).getZExtValue(); EVT VT = Op.getValueType(); MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16; bool isAdvSIMDModImm = false; uint64_t Shift; if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType5(Value))) { Value = AArch64_AM::encodeAdvSIMDModImmType5(Value); Shift = 0; } else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType6(Value))) { Value = AArch64_AM::encodeAdvSIMDModImmType6(Value); Shift = 8; } if (isAdvSIMDModImm) { SDLoc dl(Op); SDValue Mov; if (LHS) Mov = DAG.getNode(NewOp, dl, MovTy, *LHS, DAG.getConstant(Value, dl, MVT::i32), DAG.getConstant(Shift, dl, MVT::i32)); else Mov = DAG.getNode(NewOp, dl, MovTy, DAG.getConstant(Value, dl, MVT::i32), DAG.getConstant(Shift, dl, MVT::i32)); return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov); } } return SDValue(); } // Try 32-bit splatted SIMD immediate with shifted ones. static SDValue tryAdvSIMDModImm321s(unsigned NewOp, SDValue Op, SelectionDAG &DAG, const APInt &Bits) { if (Bits.getHiBits(64) == Bits.getLoBits(64)) { uint64_t Value = Bits.zextOrTrunc(64).getZExtValue(); EVT VT = Op.getValueType(); MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32; bool isAdvSIMDModImm = false; uint64_t Shift; if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType7(Value))) { Value = AArch64_AM::encodeAdvSIMDModImmType7(Value); Shift = 264; } else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType8(Value))) { Value = AArch64_AM::encodeAdvSIMDModImmType8(Value); Shift = 272; } if (isAdvSIMDModImm) { SDLoc dl(Op); SDValue Mov = DAG.getNode(NewOp, dl, MovTy, DAG.getConstant(Value, dl, MVT::i32), DAG.getConstant(Shift, dl, MVT::i32)); return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov); } } return SDValue(); } // Try 8-bit splatted SIMD immediate. static SDValue tryAdvSIMDModImm8(unsigned NewOp, SDValue Op, SelectionDAG &DAG, const APInt &Bits) { if (Bits.getHiBits(64) == Bits.getLoBits(64)) { uint64_t Value = Bits.zextOrTrunc(64).getZExtValue(); EVT VT = Op.getValueType(); MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v16i8 : MVT::v8i8; if (AArch64_AM::isAdvSIMDModImmType9(Value)) { Value = AArch64_AM::encodeAdvSIMDModImmType9(Value); SDLoc dl(Op); SDValue Mov = DAG.getNode(NewOp, dl, MovTy, DAG.getConstant(Value, dl, MVT::i32)); return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov); } } return SDValue(); } // Try FP splatted SIMD immediate. static SDValue tryAdvSIMDModImmFP(unsigned NewOp, SDValue Op, SelectionDAG &DAG, const APInt &Bits) { if (Bits.getHiBits(64) == Bits.getLoBits(64)) { uint64_t Value = Bits.zextOrTrunc(64).getZExtValue(); EVT VT = Op.getValueType(); bool isWide = (VT.getSizeInBits() == 128); MVT MovTy; bool isAdvSIMDModImm = false; if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType11(Value))) { Value = AArch64_AM::encodeAdvSIMDModImmType11(Value); MovTy = isWide ? MVT::v4f32 : MVT::v2f32; } else if (isWide && (isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType12(Value))) { Value = AArch64_AM::encodeAdvSIMDModImmType12(Value); MovTy = MVT::v2f64; } if (isAdvSIMDModImm) { SDLoc dl(Op); SDValue Mov = DAG.getNode(NewOp, dl, MovTy, DAG.getConstant(Value, dl, MVT::i32)); return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov); } } return SDValue(); } // Specialized code to quickly find if PotentialBVec is a BuildVector that // consists of only the same constant int value, returned in reference arg // ConstVal static bool isAllConstantBuildVector(const SDValue &PotentialBVec, uint64_t &ConstVal) { BuildVectorSDNode *Bvec = dyn_cast(PotentialBVec); if (!Bvec) return false; ConstantSDNode *FirstElt = dyn_cast(Bvec->getOperand(0)); if (!FirstElt) return false; EVT VT = Bvec->getValueType(0); unsigned NumElts = VT.getVectorNumElements(); for (unsigned i = 1; i < NumElts; ++i) if (dyn_cast(Bvec->getOperand(i)) != FirstElt) return false; ConstVal = FirstElt->getZExtValue(); return true; } static unsigned getIntrinsicID(const SDNode *N) { unsigned Opcode = N->getOpcode(); switch (Opcode) { default: return Intrinsic::not_intrinsic; case ISD::INTRINSIC_WO_CHAIN: { unsigned IID = cast(N->getOperand(0))->getZExtValue(); if (IID < Intrinsic::num_intrinsics) return IID; return Intrinsic::not_intrinsic; } } } // Attempt to form a vector S[LR]I from (or (and X, BvecC1), (lsl Y, C2)), // to (SLI X, Y, C2), where X and Y have matching vector types, BvecC1 is a // BUILD_VECTORs with constant element C1, C2 is a constant, and: // - for the SLI case: C1 == ~(Ones(ElemSizeInBits) << C2) // - for the SRI case: C1 == ~(Ones(ElemSizeInBits) >> C2) // The (or (lsl Y, C2), (and X, BvecC1)) case is also handled. static SDValue tryLowerToSLI(SDNode *N, SelectionDAG &DAG) { EVT VT = N->getValueType(0); if (!VT.isVector()) return SDValue(); SDLoc DL(N); SDValue And; SDValue Shift; SDValue FirstOp = N->getOperand(0); unsigned FirstOpc = FirstOp.getOpcode(); SDValue SecondOp = N->getOperand(1); unsigned SecondOpc = SecondOp.getOpcode(); // Is one of the operands an AND or a BICi? The AND may have been optimised to // a BICi in order to use an immediate instead of a register. // Is the other operand an shl or lshr? This will have been turned into: // AArch64ISD::VSHL vector, #shift or AArch64ISD::VLSHR vector, #shift. if ((FirstOpc == ISD::AND || FirstOpc == AArch64ISD::BICi) && (SecondOpc == AArch64ISD::VSHL || SecondOpc == AArch64ISD::VLSHR)) { And = FirstOp; Shift = SecondOp; } else if ((SecondOpc == ISD::AND || SecondOpc == AArch64ISD::BICi) && (FirstOpc == AArch64ISD::VSHL || FirstOpc == AArch64ISD::VLSHR)) { And = SecondOp; Shift = FirstOp; } else return SDValue(); bool IsAnd = And.getOpcode() == ISD::AND; bool IsShiftRight = Shift.getOpcode() == AArch64ISD::VLSHR; // Is the shift amount constant? ConstantSDNode *C2node = dyn_cast(Shift.getOperand(1)); if (!C2node) return SDValue(); uint64_t C1; if (IsAnd) { // Is the and mask vector all constant? if (!isAllConstantBuildVector(And.getOperand(1), C1)) return SDValue(); } else { // Reconstruct the corresponding AND immediate from the two BICi immediates. ConstantSDNode *C1nodeImm = dyn_cast(And.getOperand(1)); ConstantSDNode *C1nodeShift = dyn_cast(And.getOperand(2)); assert(C1nodeImm && C1nodeShift); C1 = ~(C1nodeImm->getZExtValue() << C1nodeShift->getZExtValue()); } // Is C1 == ~(Ones(ElemSizeInBits) << C2) or // C1 == ~(Ones(ElemSizeInBits) >> C2), taking into account // how much one can shift elements of a particular size? uint64_t C2 = C2node->getZExtValue(); unsigned ElemSizeInBits = VT.getScalarSizeInBits(); if (C2 > ElemSizeInBits) return SDValue(); APInt C1AsAPInt(ElemSizeInBits, C1); APInt RequiredC1 = IsShiftRight ? APInt::getHighBitsSet(ElemSizeInBits, C2) : APInt::getLowBitsSet(ElemSizeInBits, C2); if (C1AsAPInt != RequiredC1) return SDValue(); SDValue X = And.getOperand(0); SDValue Y = Shift.getOperand(0); unsigned Inst = IsShiftRight ? AArch64ISD::VSRI : AArch64ISD::VSLI; SDValue ResultSLI = DAG.getNode(Inst, DL, VT, X, Y, Shift.getOperand(1)); LLVM_DEBUG(dbgs() << "aarch64-lower: transformed: \n"); LLVM_DEBUG(N->dump(&DAG)); LLVM_DEBUG(dbgs() << "into: \n"); LLVM_DEBUG(ResultSLI->dump(&DAG)); ++NumShiftInserts; return ResultSLI; } SDValue AArch64TargetLowering::LowerVectorOR(SDValue Op, SelectionDAG &DAG) const { if (useSVEForFixedLengthVectorVT(Op.getValueType())) return LowerToScalableOp(Op, DAG); // Attempt to form a vector S[LR]I from (or (and X, C1), (lsl Y, C2)) if (SDValue Res = tryLowerToSLI(Op.getNode(), DAG)) return Res; EVT VT = Op.getValueType(); SDValue LHS = Op.getOperand(0); BuildVectorSDNode *BVN = dyn_cast(Op.getOperand(1).getNode()); if (!BVN) { // OR commutes, so try swapping the operands. LHS = Op.getOperand(1); BVN = dyn_cast(Op.getOperand(0).getNode()); } if (!BVN) return Op; APInt DefBits(VT.getSizeInBits(), 0); APInt UndefBits(VT.getSizeInBits(), 0); if (resolveBuildVector(BVN, DefBits, UndefBits)) { SDValue NewOp; if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::ORRi, Op, DAG, DefBits, &LHS)) || (NewOp = tryAdvSIMDModImm16(AArch64ISD::ORRi, Op, DAG, DefBits, &LHS))) return NewOp; if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::ORRi, Op, DAG, UndefBits, &LHS)) || (NewOp = tryAdvSIMDModImm16(AArch64ISD::ORRi, Op, DAG, UndefBits, &LHS))) return NewOp; } // We can always fall back to a non-immediate OR. return Op; } // Normalize the operands of BUILD_VECTOR. The value of constant operands will // be truncated to fit element width. static SDValue NormalizeBuildVector(SDValue Op, SelectionDAG &DAG) { assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!"); SDLoc dl(Op); EVT VT = Op.getValueType(); EVT EltTy= VT.getVectorElementType(); if (EltTy.isFloatingPoint() || EltTy.getSizeInBits() > 16) return Op; SmallVector Ops; for (SDValue Lane : Op->ops()) { // For integer vectors, type legalization would have promoted the // operands already. Otherwise, if Op is a floating-point splat // (with operands cast to integers), then the only possibilities // are constants and UNDEFs. if (auto *CstLane = dyn_cast(Lane)) { APInt LowBits(EltTy.getSizeInBits(), CstLane->getZExtValue()); Lane = DAG.getConstant(LowBits.getZExtValue(), dl, MVT::i32); } else if (Lane.getNode()->isUndef()) { Lane = DAG.getUNDEF(MVT::i32); } else { assert(Lane.getValueType() == MVT::i32 && "Unexpected BUILD_VECTOR operand type"); } Ops.push_back(Lane); } return DAG.getBuildVector(VT, dl, Ops); } static SDValue ConstantBuildVector(SDValue Op, SelectionDAG &DAG) { EVT VT = Op.getValueType(); APInt DefBits(VT.getSizeInBits(), 0); APInt UndefBits(VT.getSizeInBits(), 0); BuildVectorSDNode *BVN = cast(Op.getNode()); if (resolveBuildVector(BVN, DefBits, UndefBits)) { SDValue NewOp; if ((NewOp = tryAdvSIMDModImm64(AArch64ISD::MOVIedit, Op, DAG, DefBits)) || (NewOp = tryAdvSIMDModImm32(AArch64ISD::MOVIshift, Op, DAG, DefBits)) || (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MOVImsl, Op, DAG, DefBits)) || (NewOp = tryAdvSIMDModImm16(AArch64ISD::MOVIshift, Op, DAG, DefBits)) || (NewOp = tryAdvSIMDModImm8(AArch64ISD::MOVI, Op, DAG, DefBits)) || (NewOp = tryAdvSIMDModImmFP(AArch64ISD::FMOV, Op, DAG, DefBits))) return NewOp; DefBits = ~DefBits; if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::MVNIshift, Op, DAG, DefBits)) || (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MVNImsl, Op, DAG, DefBits)) || (NewOp = tryAdvSIMDModImm16(AArch64ISD::MVNIshift, Op, DAG, DefBits))) return NewOp; DefBits = UndefBits; if ((NewOp = tryAdvSIMDModImm64(AArch64ISD::MOVIedit, Op, DAG, DefBits)) || (NewOp = tryAdvSIMDModImm32(AArch64ISD::MOVIshift, Op, DAG, DefBits)) || (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MOVImsl, Op, DAG, DefBits)) || (NewOp = tryAdvSIMDModImm16(AArch64ISD::MOVIshift, Op, DAG, DefBits)) || (NewOp = tryAdvSIMDModImm8(AArch64ISD::MOVI, Op, DAG, DefBits)) || (NewOp = tryAdvSIMDModImmFP(AArch64ISD::FMOV, Op, DAG, DefBits))) return NewOp; DefBits = ~UndefBits; if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::MVNIshift, Op, DAG, DefBits)) || (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MVNImsl, Op, DAG, DefBits)) || (NewOp = tryAdvSIMDModImm16(AArch64ISD::MVNIshift, Op, DAG, DefBits))) return NewOp; } return SDValue(); } SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); // Try to build a simple constant vector. Op = NormalizeBuildVector(Op, DAG); if (VT.isInteger()) { // Certain vector constants, used to express things like logical NOT and // arithmetic NEG, are passed through unmodified. This allows special // patterns for these operations to match, which will lower these constants // to whatever is proven necessary. BuildVectorSDNode *BVN = cast(Op.getNode()); if (BVN->isConstant()) if (ConstantSDNode *Const = BVN->getConstantSplatNode()) { unsigned BitSize = VT.getVectorElementType().getSizeInBits(); APInt Val(BitSize, Const->getAPIntValue().zextOrTrunc(BitSize).getZExtValue()); if (Val.isNullValue() || Val.isAllOnesValue()) return Op; } } if (SDValue V = ConstantBuildVector(Op, DAG)) return V; // Scan through the operands to find some interesting properties we can // exploit: // 1) If only one value is used, we can use a DUP, or // 2) if only the low element is not undef, we can just insert that, or // 3) if only one constant value is used (w/ some non-constant lanes), // we can splat the constant value into the whole vector then fill // in the non-constant lanes. // 4) FIXME: If different constant values are used, but we can intelligently // select the values we'll be overwriting for the non-constant // lanes such that we can directly materialize the vector // some other way (MOVI, e.g.), we can be sneaky. // 5) if all operands are EXTRACT_VECTOR_ELT, check for VUZP. SDLoc dl(Op); unsigned NumElts = VT.getVectorNumElements(); bool isOnlyLowElement = true; bool usesOnlyOneValue = true; bool usesOnlyOneConstantValue = true; bool isConstant = true; bool AllLanesExtractElt = true; unsigned NumConstantLanes = 0; unsigned NumDifferentLanes = 0; unsigned NumUndefLanes = 0; SDValue Value; SDValue ConstantValue; for (unsigned i = 0; i < NumElts; ++i) { SDValue V = Op.getOperand(i); if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) AllLanesExtractElt = false; if (V.isUndef()) { ++NumUndefLanes; continue; } if (i > 0) isOnlyLowElement = false; if (!isIntOrFPConstant(V)) isConstant = false; if (isIntOrFPConstant(V)) { ++NumConstantLanes; if (!ConstantValue.getNode()) ConstantValue = V; else if (ConstantValue != V) usesOnlyOneConstantValue = false; } if (!Value.getNode()) Value = V; else if (V != Value) { usesOnlyOneValue = false; ++NumDifferentLanes; } } if (!Value.getNode()) { LLVM_DEBUG( dbgs() << "LowerBUILD_VECTOR: value undefined, creating undef node\n"); return DAG.getUNDEF(VT); } // Convert BUILD_VECTOR where all elements but the lowest are undef into // SCALAR_TO_VECTOR, except for when we have a single-element constant vector // as SimplifyDemandedBits will just turn that back into BUILD_VECTOR. if (isOnlyLowElement && !(NumElts == 1 && isIntOrFPConstant(Value))) { LLVM_DEBUG(dbgs() << "LowerBUILD_VECTOR: only low element used, creating 1 " "SCALAR_TO_VECTOR node\n"); return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); } if (AllLanesExtractElt) { SDNode *Vector = nullptr; bool Even = false; bool Odd = false; // Check whether the extract elements match the Even pattern <0,2,4,...> or // the Odd pattern <1,3,5,...>. for (unsigned i = 0; i < NumElts; ++i) { SDValue V = Op.getOperand(i); const SDNode *N = V.getNode(); if (!isa(N->getOperand(1))) break; SDValue N0 = N->getOperand(0); // All elements are extracted from the same vector. if (!Vector) { Vector = N0.getNode(); // Check that the type of EXTRACT_VECTOR_ELT matches the type of // BUILD_VECTOR. if (VT.getVectorElementType() != N0.getValueType().getVectorElementType()) break; } else if (Vector != N0.getNode()) { Odd = false; Even = false; break; } // Extracted values are either at Even indices <0,2,4,...> or at Odd // indices <1,3,5,...>. uint64_t Val = N->getConstantOperandVal(1); if (Val == 2 * i) { Even = true; continue; } if (Val - 1 == 2 * i) { Odd = true; continue; } // Something does not match: abort. Odd = false; Even = false; break; } if (Even || Odd) { SDValue LHS = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, SDValue(Vector, 0), DAG.getConstant(0, dl, MVT::i64)); SDValue RHS = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, SDValue(Vector, 0), DAG.getConstant(NumElts, dl, MVT::i64)); if (Even && !Odd) return DAG.getNode(AArch64ISD::UZP1, dl, DAG.getVTList(VT, VT), LHS, RHS); if (Odd && !Even) return DAG.getNode(AArch64ISD::UZP2, dl, DAG.getVTList(VT, VT), LHS, RHS); } } // Use DUP for non-constant splats. For f32 constant splats, reduce to // i32 and try again. if (usesOnlyOneValue) { if (!isConstant) { if (Value.getOpcode() != ISD::EXTRACT_VECTOR_ELT || Value.getValueType() != VT) { LLVM_DEBUG( dbgs() << "LowerBUILD_VECTOR: use DUP for non-constant splats\n"); return DAG.getNode(AArch64ISD::DUP, dl, VT, Value); } // This is actually a DUPLANExx operation, which keeps everything vectory. SDValue Lane = Value.getOperand(1); Value = Value.getOperand(0); if (Value.getValueSizeInBits() == 64) { LLVM_DEBUG( dbgs() << "LowerBUILD_VECTOR: DUPLANE works on 128-bit vectors, " "widening it\n"); Value = WidenVector(Value, DAG); } unsigned Opcode = getDUPLANEOp(VT.getVectorElementType()); return DAG.getNode(Opcode, dl, VT, Value, Lane); } if (VT.getVectorElementType().isFloatingPoint()) { SmallVector Ops; EVT EltTy = VT.getVectorElementType(); assert ((EltTy == MVT::f16 || EltTy == MVT::bf16 || EltTy == MVT::f32 || EltTy == MVT::f64) && "Unsupported floating-point vector type"); LLVM_DEBUG( dbgs() << "LowerBUILD_VECTOR: float constant splats, creating int " "BITCASTS, and try again\n"); MVT NewType = MVT::getIntegerVT(EltTy.getSizeInBits()); for (unsigned i = 0; i < NumElts; ++i) Ops.push_back(DAG.getNode(ISD::BITCAST, dl, NewType, Op.getOperand(i))); EVT VecVT = EVT::getVectorVT(*DAG.getContext(), NewType, NumElts); SDValue Val = DAG.getBuildVector(VecVT, dl, Ops); LLVM_DEBUG(dbgs() << "LowerBUILD_VECTOR: trying to lower new vector: "; Val.dump();); Val = LowerBUILD_VECTOR(Val, DAG); if (Val.getNode()) return DAG.getNode(ISD::BITCAST, dl, VT, Val); } } // If we need to insert a small number of different non-constant elements and // the vector width is sufficiently large, prefer using DUP with the common // value and INSERT_VECTOR_ELT for the different lanes. If DUP is preferred, // skip the constant lane handling below. bool PreferDUPAndInsert = !isConstant && NumDifferentLanes >= 1 && NumDifferentLanes < ((NumElts - NumUndefLanes) / 2) && NumDifferentLanes >= NumConstantLanes; // If there was only one constant value used and for more than one lane, // start by splatting that value, then replace the non-constant lanes. This // is better than the default, which will perform a separate initialization // for each lane. if (!PreferDUPAndInsert && NumConstantLanes > 0 && usesOnlyOneConstantValue) { // Firstly, try to materialize the splat constant. SDValue Vec = DAG.getSplatBuildVector(VT, dl, ConstantValue), Val = ConstantBuildVector(Vec, DAG); if (!Val) { // Otherwise, materialize the constant and splat it. Val = DAG.getNode(AArch64ISD::DUP, dl, VT, ConstantValue); DAG.ReplaceAllUsesWith(Vec.getNode(), &Val); } // Now insert the non-constant lanes. for (unsigned i = 0; i < NumElts; ++i) { SDValue V = Op.getOperand(i); SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i64); if (!isIntOrFPConstant(V)) // Note that type legalization likely mucked about with the VT of the // source operand, so we may have to convert it here before inserting. Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Val, V, LaneIdx); } return Val; } // This will generate a load from the constant pool. if (isConstant) { LLVM_DEBUG( dbgs() << "LowerBUILD_VECTOR: all elements are constant, use default " "expansion\n"); return SDValue(); } // Empirical tests suggest this is rarely worth it for vectors of length <= 2. if (NumElts >= 4) { if (SDValue shuffle = ReconstructShuffle(Op, DAG)) return shuffle; } if (PreferDUPAndInsert) { // First, build a constant vector with the common element. SmallVector Ops(NumElts, Value); SDValue NewVector = LowerBUILD_VECTOR(DAG.getBuildVector(VT, dl, Ops), DAG); // Next, insert the elements that do not match the common value. for (unsigned I = 0; I < NumElts; ++I) if (Op.getOperand(I) != Value) NewVector = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, NewVector, Op.getOperand(I), DAG.getConstant(I, dl, MVT::i64)); return NewVector; } // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we // know the default expansion would otherwise fall back on something even // worse. For a vector with one or two non-undef values, that's // scalar_to_vector for the elements followed by a shuffle (provided the // shuffle is valid for the target) and materialization element by element // on the stack followed by a load for everything else. if (!isConstant && !usesOnlyOneValue) { LLVM_DEBUG( dbgs() << "LowerBUILD_VECTOR: alternatives failed, creating sequence " "of INSERT_VECTOR_ELT\n"); SDValue Vec = DAG.getUNDEF(VT); SDValue Op0 = Op.getOperand(0); unsigned i = 0; // Use SCALAR_TO_VECTOR for lane zero to // a) Avoid a RMW dependency on the full vector register, and // b) Allow the register coalescer to fold away the copy if the // value is already in an S or D register, and we're forced to emit an // INSERT_SUBREG that we can't fold anywhere. // // We also allow types like i8 and i16 which are illegal scalar but legal // vector element types. After type-legalization the inserted value is // extended (i32) and it is safe to cast them to the vector type by ignoring // the upper bits of the lowest lane (e.g. v8i8, v4i16). if (!Op0.isUndef()) { LLVM_DEBUG(dbgs() << "Creating node for op0, it is not undefined:\n"); Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op0); ++i; } LLVM_DEBUG(if (i < NumElts) dbgs() << "Creating nodes for the other vector elements:\n";); for (; i < NumElts; ++i) { SDValue V = Op.getOperand(i); if (V.isUndef()) continue; SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i64); Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx); } return Vec; } LLVM_DEBUG( dbgs() << "LowerBUILD_VECTOR: use default expansion, failed to find " "better alternative\n"); return SDValue(); } SDValue AArch64TargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const { if (useSVEForFixedLengthVectorVT(Op.getValueType())) return LowerFixedLengthConcatVectorsToSVE(Op, DAG); assert(Op.getValueType().isScalableVector() && isTypeLegal(Op.getValueType()) && "Expected legal scalable vector type!"); if (isTypeLegal(Op.getOperand(0).getValueType()) && Op.getNumOperands() == 2) return Op; return SDValue(); } SDValue AArch64TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const { assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT && "Unknown opcode!"); if (useSVEForFixedLengthVectorVT(Op.getValueType())) return LowerFixedLengthInsertVectorElt(Op, DAG); // Check for non-constant or out of range lane. EVT VT = Op.getOperand(0).getValueType(); if (VT.getScalarType() == MVT::i1) { EVT VectorVT = getPromotedVTForPredicate(VT); SDLoc DL(Op); SDValue ExtendedVector = DAG.getAnyExtOrTrunc(Op.getOperand(0), DL, VectorVT); SDValue ExtendedValue = DAG.getAnyExtOrTrunc(Op.getOperand(1), DL, VectorVT.getScalarType().getSizeInBits() < 32 ? MVT::i32 : VectorVT.getScalarType()); ExtendedVector = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VectorVT, ExtendedVector, ExtendedValue, Op.getOperand(2)); return DAG.getAnyExtOrTrunc(ExtendedVector, DL, VT); } ConstantSDNode *CI = dyn_cast(Op.getOperand(2)); if (!CI || CI->getZExtValue() >= VT.getVectorNumElements()) return SDValue(); // Insertion/extraction are legal for V128 types. if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v2i64 || VT == MVT::v4f32 || VT == MVT::v2f64 || VT == MVT::v8f16 || VT == MVT::v8bf16) return Op; if (VT != MVT::v8i8 && VT != MVT::v4i16 && VT != MVT::v2i32 && VT != MVT::v1i64 && VT != MVT::v2f32 && VT != MVT::v4f16 && VT != MVT::v4bf16) return SDValue(); // For V64 types, we perform insertion by expanding the value // to a V128 type and perform the insertion on that. SDLoc DL(Op); SDValue WideVec = WidenVector(Op.getOperand(0), DAG); EVT WideTy = WideVec.getValueType(); SDValue Node = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideTy, WideVec, Op.getOperand(1), Op.getOperand(2)); // Re-narrow the resultant vector. return NarrowVector(Node, DAG); } SDValue AArch64TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const { assert(Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Unknown opcode!"); EVT VT = Op.getOperand(0).getValueType(); if (VT.getScalarType() == MVT::i1) { // We can't directly extract from an SVE predicate; extend it first. // (This isn't the only possible lowering, but it's straightforward.) EVT VectorVT = getPromotedVTForPredicate(VT); SDLoc DL(Op); SDValue Extend = DAG.getNode(ISD::ANY_EXTEND, DL, VectorVT, Op.getOperand(0)); MVT ExtractTy = VectorVT == MVT::nxv2i64 ? MVT::i64 : MVT::i32; SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractTy, Extend, Op.getOperand(1)); return DAG.getAnyExtOrTrunc(Extract, DL, Op.getValueType()); } if (useSVEForFixedLengthVectorVT(VT)) return LowerFixedLengthExtractVectorElt(Op, DAG); // Check for non-constant or out of range lane. ConstantSDNode *CI = dyn_cast(Op.getOperand(1)); if (!CI || CI->getZExtValue() >= VT.getVectorNumElements()) return SDValue(); // Insertion/extraction are legal for V128 types. if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v2i64 || VT == MVT::v4f32 || VT == MVT::v2f64 || VT == MVT::v8f16 || VT == MVT::v8bf16) return Op; if (VT != MVT::v8i8 && VT != MVT::v4i16 && VT != MVT::v2i32 && VT != MVT::v1i64 && VT != MVT::v2f32 && VT != MVT::v4f16 && VT != MVT::v4bf16) return SDValue(); // For V64 types, we perform extraction by expanding the value // to a V128 type and perform the extraction on that. SDLoc DL(Op); SDValue WideVec = WidenVector(Op.getOperand(0), DAG); EVT WideTy = WideVec.getValueType(); EVT ExtrTy = WideTy.getVectorElementType(); if (ExtrTy == MVT::i16 || ExtrTy == MVT::i8) ExtrTy = MVT::i32; // For extractions, we just return the result directly. return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtrTy, WideVec, Op.getOperand(1)); } SDValue AArch64TargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const { assert(Op.getValueType().isFixedLengthVector() && "Only cases that extract a fixed length vector are supported!"); EVT InVT = Op.getOperand(0).getValueType(); unsigned Idx = cast(Op.getOperand(1))->getZExtValue(); unsigned Size = Op.getValueSizeInBits(); if (InVT.isScalableVector()) { // This will be matched by custom code during ISelDAGToDAG. if (Idx == 0 && isPackedVectorType(InVT, DAG)) return Op; return SDValue(); } // This will get lowered to an appropriate EXTRACT_SUBREG in ISel. if (Idx == 0 && InVT.getSizeInBits() <= 128) return Op; // If this is extracting the upper 64-bits of a 128-bit vector, we match // that directly. if (Size == 64 && Idx * InVT.getScalarSizeInBits() == 64 && InVT.getSizeInBits() == 128) return Op; return SDValue(); } SDValue AArch64TargetLowering::LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const { assert(Op.getValueType().isScalableVector() && "Only expect to lower inserts into scalable vectors!"); EVT InVT = Op.getOperand(1).getValueType(); unsigned Idx = cast(Op.getOperand(2))->getZExtValue(); if (InVT.isScalableVector()) { SDLoc DL(Op); EVT VT = Op.getValueType(); if (!isTypeLegal(VT) || !VT.isInteger()) return SDValue(); SDValue Vec0 = Op.getOperand(0); SDValue Vec1 = Op.getOperand(1); // Ensure the subvector is half the size of the main vector. if (VT.getVectorElementCount() != (InVT.getVectorElementCount() * 2)) return SDValue(); // Extend elements of smaller vector... EVT WideVT = InVT.widenIntegerVectorElementType(*(DAG.getContext())); SDValue ExtVec = DAG.getNode(ISD::ANY_EXTEND, DL, WideVT, Vec1); if (Idx == 0) { SDValue HiVec0 = DAG.getNode(AArch64ISD::UUNPKHI, DL, WideVT, Vec0); return DAG.getNode(AArch64ISD::UZP1, DL, VT, ExtVec, HiVec0); } else if (Idx == InVT.getVectorMinNumElements()) { SDValue LoVec0 = DAG.getNode(AArch64ISD::UUNPKLO, DL, WideVT, Vec0); return DAG.getNode(AArch64ISD::UZP1, DL, VT, LoVec0, ExtVec); } return SDValue(); } // This will be matched by custom code during ISelDAGToDAG. if (Idx == 0 && isPackedVectorType(InVT, DAG) && Op.getOperand(0).isUndef()) return Op; return SDValue(); } SDValue AArch64TargetLowering::LowerDIV(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); if (useSVEForFixedLengthVectorVT(VT, /*OverrideNEON=*/true)) return LowerFixedLengthVectorIntDivideToSVE(Op, DAG); assert(VT.isScalableVector() && "Expected a scalable vector."); bool Signed = Op.getOpcode() == ISD::SDIV; unsigned PredOpcode = Signed ? AArch64ISD::SDIV_PRED : AArch64ISD::UDIV_PRED; if (VT == MVT::nxv4i32 || VT == MVT::nxv2i64) return LowerToPredicatedOp(Op, DAG, PredOpcode); // SVE doesn't have i8 and i16 DIV operations; widen them to 32-bit // operations, and truncate the result. EVT WidenedVT; if (VT == MVT::nxv16i8) WidenedVT = MVT::nxv8i16; else if (VT == MVT::nxv8i16) WidenedVT = MVT::nxv4i32; else llvm_unreachable("Unexpected Custom DIV operation"); SDLoc dl(Op); unsigned UnpkLo = Signed ? AArch64ISD::SUNPKLO : AArch64ISD::UUNPKLO; unsigned UnpkHi = Signed ? AArch64ISD::SUNPKHI : AArch64ISD::UUNPKHI; SDValue Op0Lo = DAG.getNode(UnpkLo, dl, WidenedVT, Op.getOperand(0)); SDValue Op1Lo = DAG.getNode(UnpkLo, dl, WidenedVT, Op.getOperand(1)); SDValue Op0Hi = DAG.getNode(UnpkHi, dl, WidenedVT, Op.getOperand(0)); SDValue Op1Hi = DAG.getNode(UnpkHi, dl, WidenedVT, Op.getOperand(1)); SDValue ResultLo = DAG.getNode(Op.getOpcode(), dl, WidenedVT, Op0Lo, Op1Lo); SDValue ResultHi = DAG.getNode(Op.getOpcode(), dl, WidenedVT, Op0Hi, Op1Hi); return DAG.getNode(AArch64ISD::UZP1, dl, VT, ResultLo, ResultHi); } bool AArch64TargetLowering::isShuffleMaskLegal(ArrayRef M, EVT VT) const { // Currently no fixed length shuffles that require SVE are legal. if (useSVEForFixedLengthVectorVT(VT)) return false; if (VT.getVectorNumElements() == 4 && (VT.is128BitVector() || VT.is64BitVector())) { unsigned PFIndexes[4]; for (unsigned i = 0; i != 4; ++i) { if (M[i] < 0) PFIndexes[i] = 8; else PFIndexes[i] = M[i]; } // Compute the index in the perfect shuffle table. unsigned PFTableIndex = PFIndexes[0] * 9 * 9 * 9 + PFIndexes[1] * 9 * 9 + PFIndexes[2] * 9 + PFIndexes[3]; unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; unsigned Cost = (PFEntry >> 30); if (Cost <= 4) return true; } bool DummyBool; int DummyInt; unsigned DummyUnsigned; return (ShuffleVectorSDNode::isSplatMask(&M[0], VT) || isREVMask(M, VT, 64) || isREVMask(M, VT, 32) || isREVMask(M, VT, 16) || isEXTMask(M, VT, DummyBool, DummyUnsigned) || // isTBLMask(M, VT) || // FIXME: Port TBL support from ARM. isTRNMask(M, VT, DummyUnsigned) || isUZPMask(M, VT, DummyUnsigned) || isZIPMask(M, VT, DummyUnsigned) || isTRN_v_undef_Mask(M, VT, DummyUnsigned) || isUZP_v_undef_Mask(M, VT, DummyUnsigned) || isZIP_v_undef_Mask(M, VT, DummyUnsigned) || isINSMask(M, VT.getVectorNumElements(), DummyBool, DummyInt) || isConcatMask(M, VT, VT.getSizeInBits() == 128)); } /// getVShiftImm - Check if this is a valid build_vector for the immediate /// operand of a vector shift operation, where all the elements of the /// build_vector must have the same constant integer value. static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { // Ignore bit_converts. while (Op.getOpcode() == ISD::BITCAST) Op = Op.getOperand(0); BuildVectorSDNode *BVN = dyn_cast(Op.getNode()); APInt SplatBits, SplatUndef; unsigned SplatBitSize; bool HasAnyUndefs; if (!BVN || !BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, ElementBits) || SplatBitSize > ElementBits) return false; Cnt = SplatBits.getSExtValue(); return true; } /// isVShiftLImm - Check if this is a valid build_vector for the immediate /// operand of a vector shift left operation. That value must be in the range: /// 0 <= Value < ElementBits for a left shift; or /// 0 <= Value <= ElementBits for a long left shift. static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { assert(VT.isVector() && "vector shift count is not a vector type"); int64_t ElementBits = VT.getScalarSizeInBits(); if (!getVShiftImm(Op, ElementBits, Cnt)) return false; return (Cnt >= 0 && (isLong ? Cnt - 1 : Cnt) < ElementBits); } /// isVShiftRImm - Check if this is a valid build_vector for the immediate /// operand of a vector shift right operation. The value must be in the range: /// 1 <= Value <= ElementBits for a right shift; or static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, int64_t &Cnt) { assert(VT.isVector() && "vector shift count is not a vector type"); int64_t ElementBits = VT.getScalarSizeInBits(); if (!getVShiftImm(Op, ElementBits, Cnt)) return false; return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits / 2 : ElementBits)); } SDValue AArch64TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); if (VT.getScalarType() == MVT::i1) { // Lower i1 truncate to `(x & 1) != 0`. SDLoc dl(Op); EVT OpVT = Op.getOperand(0).getValueType(); SDValue Zero = DAG.getConstant(0, dl, OpVT); SDValue One = DAG.getConstant(1, dl, OpVT); SDValue And = DAG.getNode(ISD::AND, dl, OpVT, Op.getOperand(0), One); return DAG.getSetCC(dl, VT, And, Zero, ISD::SETNE); } if (!VT.isVector() || VT.isScalableVector()) return SDValue(); if (useSVEForFixedLengthVectorVT(Op.getOperand(0).getValueType())) return LowerFixedLengthVectorTruncateToSVE(Op, DAG); return SDValue(); } SDValue AArch64TargetLowering::LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); SDLoc DL(Op); int64_t Cnt; if (!Op.getOperand(1).getValueType().isVector()) return Op; unsigned EltSize = VT.getScalarSizeInBits(); switch (Op.getOpcode()) { default: llvm_unreachable("unexpected shift opcode"); case ISD::SHL: if (VT.isScalableVector() || useSVEForFixedLengthVectorVT(VT)) return LowerToPredicatedOp(Op, DAG, AArch64ISD::SHL_PRED); if (isVShiftLImm(Op.getOperand(1), VT, false, Cnt) && Cnt < EltSize) return DAG.getNode(AArch64ISD::VSHL, DL, VT, Op.getOperand(0), DAG.getConstant(Cnt, DL, MVT::i32)); return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, DAG.getConstant(Intrinsic::aarch64_neon_ushl, DL, MVT::i32), Op.getOperand(0), Op.getOperand(1)); case ISD::SRA: case ISD::SRL: if (VT.isScalableVector() || useSVEForFixedLengthVectorVT(VT)) { unsigned Opc = Op.getOpcode() == ISD::SRA ? AArch64ISD::SRA_PRED : AArch64ISD::SRL_PRED; return LowerToPredicatedOp(Op, DAG, Opc); } // Right shift immediate if (isVShiftRImm(Op.getOperand(1), VT, false, Cnt) && Cnt < EltSize) { unsigned Opc = (Op.getOpcode() == ISD::SRA) ? AArch64ISD::VASHR : AArch64ISD::VLSHR; return DAG.getNode(Opc, DL, VT, Op.getOperand(0), DAG.getConstant(Cnt, DL, MVT::i32)); } // Right shift register. Note, there is not a shift right register // instruction, but the shift left register instruction takes a signed // value, where negative numbers specify a right shift. unsigned Opc = (Op.getOpcode() == ISD::SRA) ? Intrinsic::aarch64_neon_sshl : Intrinsic::aarch64_neon_ushl; // negate the shift amount SDValue NegShift = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op.getOperand(1)); SDValue NegShiftLeft = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, DAG.getConstant(Opc, DL, MVT::i32), Op.getOperand(0), NegShift); return NegShiftLeft; } return SDValue(); } static SDValue EmitVectorComparison(SDValue LHS, SDValue RHS, AArch64CC::CondCode CC, bool NoNans, EVT VT, const SDLoc &dl, SelectionDAG &DAG) { EVT SrcVT = LHS.getValueType(); assert(VT.getSizeInBits() == SrcVT.getSizeInBits() && "function only supposed to emit natural comparisons"); BuildVectorSDNode *BVN = dyn_cast(RHS.getNode()); APInt CnstBits(VT.getSizeInBits(), 0); APInt UndefBits(VT.getSizeInBits(), 0); bool IsCnst = BVN && resolveBuildVector(BVN, CnstBits, UndefBits); bool IsZero = IsCnst && (CnstBits == 0); if (SrcVT.getVectorElementType().isFloatingPoint()) { switch (CC) { default: return SDValue(); case AArch64CC::NE: { SDValue Fcmeq; if (IsZero) Fcmeq = DAG.getNode(AArch64ISD::FCMEQz, dl, VT, LHS); else Fcmeq = DAG.getNode(AArch64ISD::FCMEQ, dl, VT, LHS, RHS); return DAG.getNOT(dl, Fcmeq, VT); } case AArch64CC::EQ: if (IsZero) return DAG.getNode(AArch64ISD::FCMEQz, dl, VT, LHS); return DAG.getNode(AArch64ISD::FCMEQ, dl, VT, LHS, RHS); case AArch64CC::GE: if (IsZero) return DAG.getNode(AArch64ISD::FCMGEz, dl, VT, LHS); return DAG.getNode(AArch64ISD::FCMGE, dl, VT, LHS, RHS); case AArch64CC::GT: if (IsZero) return DAG.getNode(AArch64ISD::FCMGTz, dl, VT, LHS); return DAG.getNode(AArch64ISD::FCMGT, dl, VT, LHS, RHS); case AArch64CC::LS: if (IsZero) return DAG.getNode(AArch64ISD::FCMLEz, dl, VT, LHS); return DAG.getNode(AArch64ISD::FCMGE, dl, VT, RHS, LHS); case AArch64CC::LT: if (!NoNans) return SDValue(); // If we ignore NaNs then we can use to the MI implementation. LLVM_FALLTHROUGH; case AArch64CC::MI: if (IsZero) return DAG.getNode(AArch64ISD::FCMLTz, dl, VT, LHS); return DAG.getNode(AArch64ISD::FCMGT, dl, VT, RHS, LHS); } } switch (CC) { default: return SDValue(); case AArch64CC::NE: { SDValue Cmeq; if (IsZero) Cmeq = DAG.getNode(AArch64ISD::CMEQz, dl, VT, LHS); else Cmeq = DAG.getNode(AArch64ISD::CMEQ, dl, VT, LHS, RHS); return DAG.getNOT(dl, Cmeq, VT); } case AArch64CC::EQ: if (IsZero) return DAG.getNode(AArch64ISD::CMEQz, dl, VT, LHS); return DAG.getNode(AArch64ISD::CMEQ, dl, VT, LHS, RHS); case AArch64CC::GE: if (IsZero) return DAG.getNode(AArch64ISD::CMGEz, dl, VT, LHS); return DAG.getNode(AArch64ISD::CMGE, dl, VT, LHS, RHS); case AArch64CC::GT: if (IsZero) return DAG.getNode(AArch64ISD::CMGTz, dl, VT, LHS); return DAG.getNode(AArch64ISD::CMGT, dl, VT, LHS, RHS); case AArch64CC::LE: if (IsZero) return DAG.getNode(AArch64ISD::CMLEz, dl, VT, LHS); return DAG.getNode(AArch64ISD::CMGE, dl, VT, RHS, LHS); case AArch64CC::LS: return DAG.getNode(AArch64ISD::CMHS, dl, VT, RHS, LHS); case AArch64CC::LO: return DAG.getNode(AArch64ISD::CMHI, dl, VT, RHS, LHS); case AArch64CC::LT: if (IsZero) return DAG.getNode(AArch64ISD::CMLTz, dl, VT, LHS); return DAG.getNode(AArch64ISD::CMGT, dl, VT, RHS, LHS); case AArch64CC::HI: return DAG.getNode(AArch64ISD::CMHI, dl, VT, LHS, RHS); case AArch64CC::HS: return DAG.getNode(AArch64ISD::CMHS, dl, VT, LHS, RHS); } } SDValue AArch64TargetLowering::LowerVSETCC(SDValue Op, SelectionDAG &DAG) const { if (Op.getValueType().isScalableVector()) return LowerToPredicatedOp(Op, DAG, AArch64ISD::SETCC_MERGE_ZERO); if (useSVEForFixedLengthVectorVT(Op.getOperand(0).getValueType())) return LowerFixedLengthVectorSetccToSVE(Op, DAG); ISD::CondCode CC = cast(Op.getOperand(2))->get(); SDValue LHS = Op.getOperand(0); SDValue RHS = Op.getOperand(1); EVT CmpVT = LHS.getValueType().changeVectorElementTypeToInteger(); SDLoc dl(Op); if (LHS.getValueType().getVectorElementType().isInteger()) { assert(LHS.getValueType() == RHS.getValueType()); AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC); SDValue Cmp = EmitVectorComparison(LHS, RHS, AArch64CC, false, CmpVT, dl, DAG); return DAG.getSExtOrTrunc(Cmp, dl, Op.getValueType()); } const bool FullFP16 = static_cast(DAG.getSubtarget()).hasFullFP16(); // Make v4f16 (only) fcmp operations utilise vector instructions // v8f16 support will be a litle more complicated if (!FullFP16 && LHS.getValueType().getVectorElementType() == MVT::f16) { if (LHS.getValueType().getVectorNumElements() == 4) { LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::v4f32, LHS); RHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::v4f32, RHS); SDValue NewSetcc = DAG.getSetCC(dl, MVT::v4i16, LHS, RHS, CC); DAG.ReplaceAllUsesWith(Op, NewSetcc); CmpVT = MVT::v4i32; } else return SDValue(); } assert((!FullFP16 && LHS.getValueType().getVectorElementType() != MVT::f16) || LHS.getValueType().getVectorElementType() != MVT::f128); // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't totally // clean. Some of them require two branches to implement. AArch64CC::CondCode CC1, CC2; bool ShouldInvert; changeVectorFPCCToAArch64CC(CC, CC1, CC2, ShouldInvert); bool NoNaNs = getTargetMachine().Options.NoNaNsFPMath; SDValue Cmp = EmitVectorComparison(LHS, RHS, CC1, NoNaNs, CmpVT, dl, DAG); if (!Cmp.getNode()) return SDValue(); if (CC2 != AArch64CC::AL) { SDValue Cmp2 = EmitVectorComparison(LHS, RHS, CC2, NoNaNs, CmpVT, dl, DAG); if (!Cmp2.getNode()) return SDValue(); Cmp = DAG.getNode(ISD::OR, dl, CmpVT, Cmp, Cmp2); } Cmp = DAG.getSExtOrTrunc(Cmp, dl, Op.getValueType()); if (ShouldInvert) Cmp = DAG.getNOT(dl, Cmp, Cmp.getValueType()); return Cmp; } static SDValue getReductionSDNode(unsigned Op, SDLoc DL, SDValue ScalarOp, SelectionDAG &DAG) { SDValue VecOp = ScalarOp.getOperand(0); auto Rdx = DAG.getNode(Op, DL, VecOp.getSimpleValueType(), VecOp); return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ScalarOp.getValueType(), Rdx, DAG.getConstant(0, DL, MVT::i64)); } SDValue AArch64TargetLowering::LowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const { SDValue Src = Op.getOperand(0); // Try to lower fixed length reductions to SVE. EVT SrcVT = Src.getValueType(); bool OverrideNEON = Op.getOpcode() == ISD::VECREDUCE_AND || Op.getOpcode() == ISD::VECREDUCE_OR || Op.getOpcode() == ISD::VECREDUCE_XOR || Op.getOpcode() == ISD::VECREDUCE_FADD || (Op.getOpcode() != ISD::VECREDUCE_ADD && SrcVT.getVectorElementType() == MVT::i64); if (SrcVT.isScalableVector() || useSVEForFixedLengthVectorVT(SrcVT, OverrideNEON)) { if (SrcVT.getVectorElementType() == MVT::i1) return LowerPredReductionToSVE(Op, DAG); switch (Op.getOpcode()) { case ISD::VECREDUCE_ADD: return LowerReductionToSVE(AArch64ISD::UADDV_PRED, Op, DAG); case ISD::VECREDUCE_AND: return LowerReductionToSVE(AArch64ISD::ANDV_PRED, Op, DAG); case ISD::VECREDUCE_OR: return LowerReductionToSVE(AArch64ISD::ORV_PRED, Op, DAG); case ISD::VECREDUCE_SMAX: return LowerReductionToSVE(AArch64ISD::SMAXV_PRED, Op, DAG); case ISD::VECREDUCE_SMIN: return LowerReductionToSVE(AArch64ISD::SMINV_PRED, Op, DAG); case ISD::VECREDUCE_UMAX: return LowerReductionToSVE(AArch64ISD::UMAXV_PRED, Op, DAG); case ISD::VECREDUCE_UMIN: return LowerReductionToSVE(AArch64ISD::UMINV_PRED, Op, DAG); case ISD::VECREDUCE_XOR: return LowerReductionToSVE(AArch64ISD::EORV_PRED, Op, DAG); case ISD::VECREDUCE_FADD: return LowerReductionToSVE(AArch64ISD::FADDV_PRED, Op, DAG); case ISD::VECREDUCE_FMAX: return LowerReductionToSVE(AArch64ISD::FMAXNMV_PRED, Op, DAG); case ISD::VECREDUCE_FMIN: return LowerReductionToSVE(AArch64ISD::FMINNMV_PRED, Op, DAG); default: llvm_unreachable("Unhandled fixed length reduction"); } } // Lower NEON reductions. SDLoc dl(Op); switch (Op.getOpcode()) { case ISD::VECREDUCE_ADD: return getReductionSDNode(AArch64ISD::UADDV, dl, Op, DAG); case ISD::VECREDUCE_SMAX: return getReductionSDNode(AArch64ISD::SMAXV, dl, Op, DAG); case ISD::VECREDUCE_SMIN: return getReductionSDNode(AArch64ISD::SMINV, dl, Op, DAG); case ISD::VECREDUCE_UMAX: return getReductionSDNode(AArch64ISD::UMAXV, dl, Op, DAG); case ISD::VECREDUCE_UMIN: return getReductionSDNode(AArch64ISD::UMINV, dl, Op, DAG); case ISD::VECREDUCE_FMAX: { return DAG.getNode( ISD::INTRINSIC_WO_CHAIN, dl, Op.getValueType(), DAG.getConstant(Intrinsic::aarch64_neon_fmaxnmv, dl, MVT::i32), Src); } case ISD::VECREDUCE_FMIN: { return DAG.getNode( ISD::INTRINSIC_WO_CHAIN, dl, Op.getValueType(), DAG.getConstant(Intrinsic::aarch64_neon_fminnmv, dl, MVT::i32), Src); } default: llvm_unreachable("Unhandled reduction"); } } SDValue AArch64TargetLowering::LowerATOMIC_LOAD_SUB(SDValue Op, SelectionDAG &DAG) const { auto &Subtarget = static_cast(DAG.getSubtarget()); if (!Subtarget.hasLSE() && !Subtarget.outlineAtomics()) return SDValue(); // LSE has an atomic load-add instruction, but not a load-sub. SDLoc dl(Op); MVT VT = Op.getSimpleValueType(); SDValue RHS = Op.getOperand(2); AtomicSDNode *AN = cast(Op.getNode()); RHS = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, dl, VT), RHS); return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl, AN->getMemoryVT(), Op.getOperand(0), Op.getOperand(1), RHS, AN->getMemOperand()); } SDValue AArch64TargetLowering::LowerATOMIC_LOAD_AND(SDValue Op, SelectionDAG &DAG) const { auto &Subtarget = static_cast(DAG.getSubtarget()); if (!Subtarget.hasLSE() && !Subtarget.outlineAtomics()) return SDValue(); // LSE has an atomic load-clear instruction, but not a load-and. SDLoc dl(Op); MVT VT = Op.getSimpleValueType(); SDValue RHS = Op.getOperand(2); AtomicSDNode *AN = cast(Op.getNode()); RHS = DAG.getNode(ISD::XOR, dl, VT, DAG.getConstant(-1ULL, dl, VT), RHS); return DAG.getAtomic(ISD::ATOMIC_LOAD_CLR, dl, AN->getMemoryVT(), Op.getOperand(0), Op.getOperand(1), RHS, AN->getMemOperand()); } SDValue AArch64TargetLowering::LowerWindowsDYNAMIC_STACKALLOC( SDValue Op, SDValue Chain, SDValue &Size, SelectionDAG &DAG) const { SDLoc dl(Op); EVT PtrVT = getPointerTy(DAG.getDataLayout()); SDValue Callee = DAG.getTargetExternalSymbol("__chkstk", PtrVT, 0); const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo(); const uint32_t *Mask = TRI->getWindowsStackProbePreservedMask(); if (Subtarget->hasCustomCallingConv()) TRI->UpdateCustomCallPreservedMask(DAG.getMachineFunction(), &Mask); Size = DAG.getNode(ISD::SRL, dl, MVT::i64, Size, DAG.getConstant(4, dl, MVT::i64)); Chain = DAG.getCopyToReg(Chain, dl, AArch64::X15, Size, SDValue()); Chain = DAG.getNode(AArch64ISD::CALL, dl, DAG.getVTList(MVT::Other, MVT::Glue), Chain, Callee, DAG.getRegister(AArch64::X15, MVT::i64), DAG.getRegisterMask(Mask), Chain.getValue(1)); // To match the actual intent better, we should read the output from X15 here // again (instead of potentially spilling it to the stack), but rereading Size // from X15 here doesn't work at -O0, since it thinks that X15 is undefined // here. Size = DAG.getNode(ISD::SHL, dl, MVT::i64, Size, DAG.getConstant(4, dl, MVT::i64)); return Chain; } SDValue AArch64TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { assert(Subtarget->isTargetWindows() && "Only Windows alloca probing supported"); SDLoc dl(Op); // Get the inputs. SDNode *Node = Op.getNode(); SDValue Chain = Op.getOperand(0); SDValue Size = Op.getOperand(1); MaybeAlign Align = cast(Op.getOperand(2))->getMaybeAlignValue(); EVT VT = Node->getValueType(0); if (DAG.getMachineFunction().getFunction().hasFnAttribute( "no-stack-arg-probe")) { SDValue SP = DAG.getCopyFromReg(Chain, dl, AArch64::SP, MVT::i64); Chain = SP.getValue(1); SP = DAG.getNode(ISD::SUB, dl, MVT::i64, SP, Size); if (Align) SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0), DAG.getConstant(-(uint64_t)Align->value(), dl, VT)); Chain = DAG.getCopyToReg(Chain, dl, AArch64::SP, SP); SDValue Ops[2] = {SP, Chain}; return DAG.getMergeValues(Ops, dl); } Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl); Chain = LowerWindowsDYNAMIC_STACKALLOC(Op, Chain, Size, DAG); SDValue SP = DAG.getCopyFromReg(Chain, dl, AArch64::SP, MVT::i64); Chain = SP.getValue(1); SP = DAG.getNode(ISD::SUB, dl, MVT::i64, SP, Size); if (Align) SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0), DAG.getConstant(-(uint64_t)Align->value(), dl, VT)); Chain = DAG.getCopyToReg(Chain, dl, AArch64::SP, SP); Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, dl, true), DAG.getIntPtrConstant(0, dl, true), SDValue(), dl); SDValue Ops[2] = {SP, Chain}; return DAG.getMergeValues(Ops, dl); } SDValue AArch64TargetLowering::LowerVSCALE(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); assert(VT != MVT::i64 && "Expected illegal VSCALE node"); SDLoc DL(Op); APInt MulImm = cast(Op.getOperand(0))->getAPIntValue(); return DAG.getZExtOrTrunc(DAG.getVScale(DL, MVT::i64, MulImm.sextOrSelf(64)), DL, VT); } /// Set the IntrinsicInfo for the `aarch64_sve_st` intrinsics. template static bool setInfoSVEStN(const AArch64TargetLowering &TLI, const DataLayout &DL, AArch64TargetLowering::IntrinsicInfo &Info, const CallInst &CI) { Info.opc = ISD::INTRINSIC_VOID; // Retrieve EC from first vector argument. const EVT VT = TLI.getMemValueType(DL, CI.getArgOperand(0)->getType()); ElementCount EC = VT.getVectorElementCount(); #ifndef NDEBUG // Check the assumption that all input vectors are the same type. for (unsigned I = 0; I < NumVecs; ++I) assert(VT == TLI.getMemValueType(DL, CI.getArgOperand(I)->getType()) && "Invalid type."); #endif // memVT is `NumVecs * VT`. Info.memVT = EVT::getVectorVT(CI.getType()->getContext(), VT.getScalarType(), EC * NumVecs); Info.ptrVal = CI.getArgOperand(CI.getNumArgOperands() - 1); Info.offset = 0; Info.align.reset(); Info.flags = MachineMemOperand::MOStore; return true; } /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as /// MemIntrinsicNodes. The associated MachineMemOperands record the alignment /// specified in the intrinsic calls. bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const { auto &DL = I.getModule()->getDataLayout(); switch (Intrinsic) { case Intrinsic::aarch64_sve_st2: return setInfoSVEStN<2>(*this, DL, Info, I); case Intrinsic::aarch64_sve_st3: return setInfoSVEStN<3>(*this, DL, Info, I); case Intrinsic::aarch64_sve_st4: return setInfoSVEStN<4>(*this, DL, Info, I); case Intrinsic::aarch64_neon_ld2: case Intrinsic::aarch64_neon_ld3: case Intrinsic::aarch64_neon_ld4: case Intrinsic::aarch64_neon_ld1x2: case Intrinsic::aarch64_neon_ld1x3: case Intrinsic::aarch64_neon_ld1x4: case Intrinsic::aarch64_neon_ld2lane: case Intrinsic::aarch64_neon_ld3lane: case Intrinsic::aarch64_neon_ld4lane: case Intrinsic::aarch64_neon_ld2r: case Intrinsic::aarch64_neon_ld3r: case Intrinsic::aarch64_neon_ld4r: { Info.opc = ISD::INTRINSIC_W_CHAIN; // Conservatively set memVT to the entire set of vectors loaded. uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64; Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); Info.ptrVal = I.getArgOperand(I.getNumArgOperands() - 1); Info.offset = 0; Info.align.reset(); // volatile loads with NEON intrinsics not supported Info.flags = MachineMemOperand::MOLoad; return true; } case Intrinsic::aarch64_neon_st2: case Intrinsic::aarch64_neon_st3: case Intrinsic::aarch64_neon_st4: case Intrinsic::aarch64_neon_st1x2: case Intrinsic::aarch64_neon_st1x3: case Intrinsic::aarch64_neon_st1x4: case Intrinsic::aarch64_neon_st2lane: case Intrinsic::aarch64_neon_st3lane: case Intrinsic::aarch64_neon_st4lane: { Info.opc = ISD::INTRINSIC_VOID; // Conservatively set memVT to the entire set of vectors stored. unsigned NumElts = 0; for (unsigned ArgI = 0, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { Type *ArgTy = I.getArgOperand(ArgI)->getType(); if (!ArgTy->isVectorTy()) break; NumElts += DL.getTypeSizeInBits(ArgTy) / 64; } Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); Info.ptrVal = I.getArgOperand(I.getNumArgOperands() - 1); Info.offset = 0; Info.align.reset(); // volatile stores with NEON intrinsics not supported Info.flags = MachineMemOperand::MOStore; return true; } case Intrinsic::aarch64_ldaxr: case Intrinsic::aarch64_ldxr: { PointerType *PtrTy = cast(I.getArgOperand(0)->getType()); Info.opc = ISD::INTRINSIC_W_CHAIN; Info.memVT = MVT::getVT(PtrTy->getElementType()); Info.ptrVal = I.getArgOperand(0); Info.offset = 0; Info.align = DL.getABITypeAlign(PtrTy->getElementType()); Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile; return true; } case Intrinsic::aarch64_stlxr: case Intrinsic::aarch64_stxr: { PointerType *PtrTy = cast(I.getArgOperand(1)->getType()); Info.opc = ISD::INTRINSIC_W_CHAIN; Info.memVT = MVT::getVT(PtrTy->getElementType()); Info.ptrVal = I.getArgOperand(1); Info.offset = 0; Info.align = DL.getABITypeAlign(PtrTy->getElementType()); Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile; return true; } case Intrinsic::aarch64_ldaxp: case Intrinsic::aarch64_ldxp: Info.opc = ISD::INTRINSIC_W_CHAIN; Info.memVT = MVT::i128; Info.ptrVal = I.getArgOperand(0); Info.offset = 0; Info.align = Align(16); Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile; return true; case Intrinsic::aarch64_stlxp: case Intrinsic::aarch64_stxp: Info.opc = ISD::INTRINSIC_W_CHAIN; Info.memVT = MVT::i128; Info.ptrVal = I.getArgOperand(2); Info.offset = 0; Info.align = Align(16); Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile; return true; case Intrinsic::aarch64_sve_ldnt1: { PointerType *PtrTy = cast(I.getArgOperand(1)->getType()); Info.opc = ISD::INTRINSIC_W_CHAIN; Info.memVT = MVT::getVT(I.getType()); Info.ptrVal = I.getArgOperand(1); Info.offset = 0; Info.align = DL.getABITypeAlign(PtrTy->getElementType()); Info.flags = MachineMemOperand::MOLoad; if (Intrinsic == Intrinsic::aarch64_sve_ldnt1) Info.flags |= MachineMemOperand::MONonTemporal; return true; } case Intrinsic::aarch64_sve_stnt1: { PointerType *PtrTy = cast(I.getArgOperand(2)->getType()); Info.opc = ISD::INTRINSIC_W_CHAIN; Info.memVT = MVT::getVT(I.getOperand(0)->getType()); Info.ptrVal = I.getArgOperand(2); Info.offset = 0; Info.align = DL.getABITypeAlign(PtrTy->getElementType()); Info.flags = MachineMemOperand::MOStore; if (Intrinsic == Intrinsic::aarch64_sve_stnt1) Info.flags |= MachineMemOperand::MONonTemporal; return true; } default: break; } return false; } bool AArch64TargetLowering::shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT) const { // TODO: This may be worth removing. Check regression tests for diffs. if (!TargetLoweringBase::shouldReduceLoadWidth(Load, ExtTy, NewVT)) return false; // If we're reducing the load width in order to avoid having to use an extra // instruction to do extension then it's probably a good idea. if (ExtTy != ISD::NON_EXTLOAD) return true; // Don't reduce load width if it would prevent us from combining a shift into // the offset. MemSDNode *Mem = dyn_cast(Load); assert(Mem); const SDValue &Base = Mem->getBasePtr(); if (Base.getOpcode() == ISD::ADD && Base.getOperand(1).getOpcode() == ISD::SHL && Base.getOperand(1).hasOneUse() && Base.getOperand(1).getOperand(1).getOpcode() == ISD::Constant) { // The shift can be combined if it matches the size of the value being // loaded (and so reducing the width would make it not match). uint64_t ShiftAmount = Base.getOperand(1).getConstantOperandVal(1); uint64_t LoadBytes = Mem->getMemoryVT().getSizeInBits()/8; if (ShiftAmount == Log2_32(LoadBytes)) return false; } // We have no reason to disallow reducing the load width, so allow it. return true; } // Truncations from 64-bit GPR to 32-bit GPR is free. bool AArch64TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) return false; uint64_t NumBits1 = Ty1->getPrimitiveSizeInBits().getFixedSize(); uint64_t NumBits2 = Ty2->getPrimitiveSizeInBits().getFixedSize(); return NumBits1 > NumBits2; } bool AArch64TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { if (VT1.isVector() || VT2.isVector() || !VT1.isInteger() || !VT2.isInteger()) return false; uint64_t NumBits1 = VT1.getFixedSizeInBits(); uint64_t NumBits2 = VT2.getFixedSizeInBits(); return NumBits1 > NumBits2; } /// Check if it is profitable to hoist instruction in then/else to if. /// Not profitable if I and it's user can form a FMA instruction /// because we prefer FMSUB/FMADD. bool AArch64TargetLowering::isProfitableToHoist(Instruction *I) const { if (I->getOpcode() != Instruction::FMul) return true; if (!I->hasOneUse()) return true; Instruction *User = I->user_back(); if (User && !(User->getOpcode() == Instruction::FSub || User->getOpcode() == Instruction::FAdd)) return true; const TargetOptions &Options = getTargetMachine().Options; const Function *F = I->getFunction(); const DataLayout &DL = F->getParent()->getDataLayout(); Type *Ty = User->getOperand(0)->getType(); return !(isFMAFasterThanFMulAndFAdd(*F, Ty) && isOperationLegalOrCustom(ISD::FMA, getValueType(DL, Ty)) && (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath)); } // All 32-bit GPR operations implicitly zero the high-half of the corresponding // 64-bit GPR. bool AArch64TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const { if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) return false; unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); return NumBits1 == 32 && NumBits2 == 64; } bool AArch64TargetLowering::isZExtFree(EVT VT1, EVT VT2) const { if (VT1.isVector() || VT2.isVector() || !VT1.isInteger() || !VT2.isInteger()) return false; unsigned NumBits1 = VT1.getSizeInBits(); unsigned NumBits2 = VT2.getSizeInBits(); return NumBits1 == 32 && NumBits2 == 64; } bool AArch64TargetLowering::isZExtFree(SDValue Val, EVT VT2) const { EVT VT1 = Val.getValueType(); if (isZExtFree(VT1, VT2)) { return true; } if (Val.getOpcode() != ISD::LOAD) return false; // 8-, 16-, and 32-bit integer loads all implicitly zero-extend. return (VT1.isSimple() && !VT1.isVector() && VT1.isInteger() && VT2.isSimple() && !VT2.isVector() && VT2.isInteger() && VT1.getSizeInBits() <= 32); } bool AArch64TargetLowering::isExtFreeImpl(const Instruction *Ext) const { if (isa(Ext)) return false; // Vector types are not free. if (Ext->getType()->isVectorTy()) return false; for (const Use &U : Ext->uses()) { // The extension is free if we can fold it with a left shift in an // addressing mode or an arithmetic operation: add, sub, and cmp. // Is there a shift? const Instruction *Instr = cast(U.getUser()); // Is this a constant shift? switch (Instr->getOpcode()) { case Instruction::Shl: if (!isa(Instr->getOperand(1))) return false; break; case Instruction::GetElementPtr: { gep_type_iterator GTI = gep_type_begin(Instr); auto &DL = Ext->getModule()->getDataLayout(); std::advance(GTI, U.getOperandNo()-1); Type *IdxTy = GTI.getIndexedType(); // This extension will end up with a shift because of the scaling factor. // 8-bit sized types have a scaling factor of 1, thus a shift amount of 0. // Get the shift amount based on the scaling factor: // log2(sizeof(IdxTy)) - log2(8). uint64_t ShiftAmt = countTrailingZeros(DL.getTypeStoreSizeInBits(IdxTy).getFixedSize()) - 3; // Is the constant foldable in the shift of the addressing mode? // I.e., shift amount is between 1 and 4 inclusive. if (ShiftAmt == 0 || ShiftAmt > 4) return false; break; } case Instruction::Trunc: // Check if this is a noop. // trunc(sext ty1 to ty2) to ty1. if (Instr->getType() == Ext->getOperand(0)->getType()) continue; LLVM_FALLTHROUGH; default: return false; } // At this point we can use the bfm family, so this extension is free // for that use. } return true; } /// Check if both Op1 and Op2 are shufflevector extracts of either the lower /// or upper half of the vector elements. static bool areExtractShuffleVectors(Value *Op1, Value *Op2) { auto areTypesHalfed = [](Value *FullV, Value *HalfV) { auto *FullTy = FullV->getType(); auto *HalfTy = HalfV->getType(); return FullTy->getPrimitiveSizeInBits().getFixedSize() == 2 * HalfTy->getPrimitiveSizeInBits().getFixedSize(); }; auto extractHalf = [](Value *FullV, Value *HalfV) { auto *FullVT = cast(FullV->getType()); auto *HalfVT = cast(HalfV->getType()); return FullVT->getNumElements() == 2 * HalfVT->getNumElements(); }; ArrayRef M1, M2; Value *S1Op1, *S2Op1; if (!match(Op1, m_Shuffle(m_Value(S1Op1), m_Undef(), m_Mask(M1))) || !match(Op2, m_Shuffle(m_Value(S2Op1), m_Undef(), m_Mask(M2)))) return false; // Check that the operands are half as wide as the result and we extract // half of the elements of the input vectors. if (!areTypesHalfed(S1Op1, Op1) || !areTypesHalfed(S2Op1, Op2) || !extractHalf(S1Op1, Op1) || !extractHalf(S2Op1, Op2)) return false; // Check the mask extracts either the lower or upper half of vector // elements. int M1Start = -1; int M2Start = -1; int NumElements = cast(Op1->getType())->getNumElements() * 2; if (!ShuffleVectorInst::isExtractSubvectorMask(M1, NumElements, M1Start) || !ShuffleVectorInst::isExtractSubvectorMask(M2, NumElements, M2Start) || M1Start != M2Start || (M1Start != 0 && M2Start != (NumElements / 2))) return false; return true; } /// Check if Ext1 and Ext2 are extends of the same type, doubling the bitwidth /// of the vector elements. static bool areExtractExts(Value *Ext1, Value *Ext2) { auto areExtDoubled = [](Instruction *Ext) { return Ext->getType()->getScalarSizeInBits() == 2 * Ext->getOperand(0)->getType()->getScalarSizeInBits(); }; if (!match(Ext1, m_ZExtOrSExt(m_Value())) || !match(Ext2, m_ZExtOrSExt(m_Value())) || !areExtDoubled(cast(Ext1)) || !areExtDoubled(cast(Ext2))) return false; return true; } /// Check if Op could be used with vmull_high_p64 intrinsic. static bool isOperandOfVmullHighP64(Value *Op) { Value *VectorOperand = nullptr; ConstantInt *ElementIndex = nullptr; return match(Op, m_ExtractElt(m_Value(VectorOperand), m_ConstantInt(ElementIndex))) && ElementIndex->getValue() == 1 && isa(VectorOperand->getType()) && cast(VectorOperand->getType())->getNumElements() == 2; } /// Check if Op1 and Op2 could be used with vmull_high_p64 intrinsic. static bool areOperandsOfVmullHighP64(Value *Op1, Value *Op2) { return isOperandOfVmullHighP64(Op1) && isOperandOfVmullHighP64(Op2); } /// Check if sinking \p I's operands to I's basic block is profitable, because /// the operands can be folded into a target instruction, e.g. /// shufflevectors extracts and/or sext/zext can be folded into (u,s)subl(2). bool AArch64TargetLowering::shouldSinkOperands( Instruction *I, SmallVectorImpl &Ops) const { if (!I->getType()->isVectorTy()) return false; if (IntrinsicInst *II = dyn_cast(I)) { switch (II->getIntrinsicID()) { case Intrinsic::aarch64_neon_umull: if (!areExtractShuffleVectors(II->getOperand(0), II->getOperand(1))) return false; Ops.push_back(&II->getOperandUse(0)); Ops.push_back(&II->getOperandUse(1)); return true; case Intrinsic::aarch64_neon_pmull64: if (!areOperandsOfVmullHighP64(II->getArgOperand(0), II->getArgOperand(1))) return false; Ops.push_back(&II->getArgOperandUse(0)); Ops.push_back(&II->getArgOperandUse(1)); return true; default: return false; } } switch (I->getOpcode()) { case Instruction::Sub: case Instruction::Add: { if (!areExtractExts(I->getOperand(0), I->getOperand(1))) return false; // If the exts' operands extract either the lower or upper elements, we // can sink them too. auto Ext1 = cast(I->getOperand(0)); auto Ext2 = cast(I->getOperand(1)); if (areExtractShuffleVectors(Ext1, Ext2)) { Ops.push_back(&Ext1->getOperandUse(0)); Ops.push_back(&Ext2->getOperandUse(0)); } Ops.push_back(&I->getOperandUse(0)); Ops.push_back(&I->getOperandUse(1)); return true; } case Instruction::Mul: { bool IsProfitable = false; for (auto &Op : I->operands()) { // Make sure we are not already sinking this operand if (any_of(Ops, [&](Use *U) { return U->get() == Op; })) continue; ShuffleVectorInst *Shuffle = dyn_cast(Op); if (!Shuffle || !Shuffle->isZeroEltSplat()) continue; Value *ShuffleOperand = Shuffle->getOperand(0); InsertElementInst *Insert = dyn_cast(ShuffleOperand); if (!Insert) continue; Instruction *OperandInstr = dyn_cast(Insert->getOperand(1)); if (!OperandInstr) continue; ConstantInt *ElementConstant = dyn_cast(Insert->getOperand(2)); // Check that the insertelement is inserting into element 0 if (!ElementConstant || ElementConstant->getZExtValue() != 0) continue; unsigned Opcode = OperandInstr->getOpcode(); if (Opcode != Instruction::SExt && Opcode != Instruction::ZExt) continue; Ops.push_back(&Shuffle->getOperandUse(0)); Ops.push_back(&Op); IsProfitable = true; } return IsProfitable; } default: return false; } return false; } bool AArch64TargetLowering::hasPairedLoad(EVT LoadedType, Align &RequiredAligment) const { if (!LoadedType.isSimple() || (!LoadedType.isInteger() && !LoadedType.isFloatingPoint())) return false; // Cyclone supports unaligned accesses. RequiredAligment = Align(1); unsigned NumBits = LoadedType.getSizeInBits(); return NumBits == 32 || NumBits == 64; } /// A helper function for determining the number of interleaved accesses we /// will generate when lowering accesses of the given type. unsigned AArch64TargetLowering::getNumInterleavedAccesses(VectorType *VecTy, const DataLayout &DL) const { return (DL.getTypeSizeInBits(VecTy) + 127) / 128; } MachineMemOperand::Flags AArch64TargetLowering::getTargetMMOFlags(const Instruction &I) const { if (Subtarget->getProcFamily() == AArch64Subtarget::Falkor && I.getMetadata(FALKOR_STRIDED_ACCESS_MD) != nullptr) return MOStridedAccess; return MachineMemOperand::MONone; } bool AArch64TargetLowering::isLegalInterleavedAccessType( VectorType *VecTy, const DataLayout &DL) const { unsigned VecSize = DL.getTypeSizeInBits(VecTy); unsigned ElSize = DL.getTypeSizeInBits(VecTy->getElementType()); // Ensure the number of vector elements is greater than 1. if (cast(VecTy)->getNumElements() < 2) return false; // Ensure the element type is legal. if (ElSize != 8 && ElSize != 16 && ElSize != 32 && ElSize != 64) return false; // Ensure the total vector size is 64 or a multiple of 128. Types larger than // 128 will be split into multiple interleaved accesses. return VecSize == 64 || VecSize % 128 == 0; } /// Lower an interleaved load into a ldN intrinsic. /// /// E.g. Lower an interleaved load (Factor = 2): /// %wide.vec = load <8 x i32>, <8 x i32>* %ptr /// %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6> ; Extract even elements /// %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7> ; Extract odd elements /// /// Into: /// %ld2 = { <4 x i32>, <4 x i32> } call llvm.aarch64.neon.ld2(%ptr) /// %vec0 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 0 /// %vec1 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 1 bool AArch64TargetLowering::lowerInterleavedLoad( LoadInst *LI, ArrayRef Shuffles, ArrayRef Indices, unsigned Factor) const { assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && "Invalid interleave factor"); assert(!Shuffles.empty() && "Empty shufflevector input"); assert(Shuffles.size() == Indices.size() && "Unmatched number of shufflevectors and indices"); const DataLayout &DL = LI->getModule()->getDataLayout(); VectorType *VTy = Shuffles[0]->getType(); // Skip if we do not have NEON and skip illegal vector types. We can // "legalize" wide vector types into multiple interleaved accesses as long as // the vector types are divisible by 128. if (!Subtarget->hasNEON() || !isLegalInterleavedAccessType(VTy, DL)) return false; unsigned NumLoads = getNumInterleavedAccesses(VTy, DL); auto *FVTy = cast(VTy); // A pointer vector can not be the return type of the ldN intrinsics. Need to // load integer vectors first and then convert to pointer vectors. Type *EltTy = FVTy->getElementType(); if (EltTy->isPointerTy()) FVTy = FixedVectorType::get(DL.getIntPtrType(EltTy), FVTy->getNumElements()); IRBuilder<> Builder(LI); // The base address of the load. Value *BaseAddr = LI->getPointerOperand(); if (NumLoads > 1) { // If we're going to generate more than one load, reset the sub-vector type // to something legal. FVTy = FixedVectorType::get(FVTy->getElementType(), FVTy->getNumElements() / NumLoads); // We will compute the pointer operand of each load from the original base // address using GEPs. Cast the base address to a pointer to the scalar // element type. BaseAddr = Builder.CreateBitCast( BaseAddr, FVTy->getElementType()->getPointerTo(LI->getPointerAddressSpace())); } Type *PtrTy = FVTy->getPointerTo(LI->getPointerAddressSpace()); Type *Tys[2] = {FVTy, PtrTy}; static const Intrinsic::ID LoadInts[3] = {Intrinsic::aarch64_neon_ld2, Intrinsic::aarch64_neon_ld3, Intrinsic::aarch64_neon_ld4}; Function *LdNFunc = Intrinsic::getDeclaration(LI->getModule(), LoadInts[Factor - 2], Tys); // Holds sub-vectors extracted from the load intrinsic return values. The // sub-vectors are associated with the shufflevector instructions they will // replace. DenseMap> SubVecs; for (unsigned LoadCount = 0; LoadCount < NumLoads; ++LoadCount) { // If we're generating more than one load, compute the base address of // subsequent loads as an offset from the previous. if (LoadCount > 0) BaseAddr = Builder.CreateConstGEP1_32(FVTy->getElementType(), BaseAddr, FVTy->getNumElements() * Factor); CallInst *LdN = Builder.CreateCall( LdNFunc, Builder.CreateBitCast(BaseAddr, PtrTy), "ldN"); // Extract and store the sub-vectors returned by the load intrinsic. for (unsigned i = 0; i < Shuffles.size(); i++) { ShuffleVectorInst *SVI = Shuffles[i]; unsigned Index = Indices[i]; Value *SubVec = Builder.CreateExtractValue(LdN, Index); // Convert the integer vector to pointer vector if the element is pointer. if (EltTy->isPointerTy()) SubVec = Builder.CreateIntToPtr( SubVec, FixedVectorType::get(SVI->getType()->getElementType(), FVTy->getNumElements())); SubVecs[SVI].push_back(SubVec); } } // Replace uses of the shufflevector instructions with the sub-vectors // returned by the load intrinsic. If a shufflevector instruction is // associated with more than one sub-vector, those sub-vectors will be // concatenated into a single wide vector. for (ShuffleVectorInst *SVI : Shuffles) { auto &SubVec = SubVecs[SVI]; auto *WideVec = SubVec.size() > 1 ? concatenateVectors(Builder, SubVec) : SubVec[0]; SVI->replaceAllUsesWith(WideVec); } return true; } /// Lower an interleaved store into a stN intrinsic. /// /// E.g. Lower an interleaved store (Factor = 3): /// %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1, /// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> /// store <12 x i32> %i.vec, <12 x i32>* %ptr /// /// Into: /// %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3> /// %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7> /// %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11> /// call void llvm.aarch64.neon.st3(%sub.v0, %sub.v1, %sub.v2, %ptr) /// /// Note that the new shufflevectors will be removed and we'll only generate one /// st3 instruction in CodeGen. /// /// Example for a more general valid mask (Factor 3). Lower: /// %i.vec = shuffle <32 x i32> %v0, <32 x i32> %v1, /// <4, 32, 16, 5, 33, 17, 6, 34, 18, 7, 35, 19> /// store <12 x i32> %i.vec, <12 x i32>* %ptr /// /// Into: /// %sub.v0 = shuffle <32 x i32> %v0, <32 x i32> v1, <4, 5, 6, 7> /// %sub.v1 = shuffle <32 x i32> %v0, <32 x i32> v1, <32, 33, 34, 35> /// %sub.v2 = shuffle <32 x i32> %v0, <32 x i32> v1, <16, 17, 18, 19> /// call void llvm.aarch64.neon.st3(%sub.v0, %sub.v1, %sub.v2, %ptr) bool AArch64TargetLowering::lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, unsigned Factor) const { assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && "Invalid interleave factor"); auto *VecTy = cast(SVI->getType()); assert(VecTy->getNumElements() % Factor == 0 && "Invalid interleaved store"); unsigned LaneLen = VecTy->getNumElements() / Factor; Type *EltTy = VecTy->getElementType(); auto *SubVecTy = FixedVectorType::get(EltTy, LaneLen); const DataLayout &DL = SI->getModule()->getDataLayout(); // Skip if we do not have NEON and skip illegal vector types. We can // "legalize" wide vector types into multiple interleaved accesses as long as // the vector types are divisible by 128. if (!Subtarget->hasNEON() || !isLegalInterleavedAccessType(SubVecTy, DL)) return false; unsigned NumStores = getNumInterleavedAccesses(SubVecTy, DL); Value *Op0 = SVI->getOperand(0); Value *Op1 = SVI->getOperand(1); IRBuilder<> Builder(SI); // StN intrinsics don't support pointer vectors as arguments. Convert pointer // vectors to integer vectors. if (EltTy->isPointerTy()) { Type *IntTy = DL.getIntPtrType(EltTy); unsigned NumOpElts = cast(Op0->getType())->getNumElements(); // Convert to the corresponding integer vector. auto *IntVecTy = FixedVectorType::get(IntTy, NumOpElts); Op0 = Builder.CreatePtrToInt(Op0, IntVecTy); Op1 = Builder.CreatePtrToInt(Op1, IntVecTy); SubVecTy = FixedVectorType::get(IntTy, LaneLen); } // The base address of the store. Value *BaseAddr = SI->getPointerOperand(); if (NumStores > 1) { // If we're going to generate more than one store, reset the lane length // and sub-vector type to something legal. LaneLen /= NumStores; SubVecTy = FixedVectorType::get(SubVecTy->getElementType(), LaneLen); // We will compute the pointer operand of each store from the original base // address using GEPs. Cast the base address to a pointer to the scalar // element type. BaseAddr = Builder.CreateBitCast( BaseAddr, SubVecTy->getElementType()->getPointerTo(SI->getPointerAddressSpace())); } auto Mask = SVI->getShuffleMask(); Type *PtrTy = SubVecTy->getPointerTo(SI->getPointerAddressSpace()); Type *Tys[2] = {SubVecTy, PtrTy}; static const Intrinsic::ID StoreInts[3] = {Intrinsic::aarch64_neon_st2, Intrinsic::aarch64_neon_st3, Intrinsic::aarch64_neon_st4}; Function *StNFunc = Intrinsic::getDeclaration(SI->getModule(), StoreInts[Factor - 2], Tys); for (unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) { SmallVector Ops; // Split the shufflevector operands into sub vectors for the new stN call. for (unsigned i = 0; i < Factor; i++) { unsigned IdxI = StoreCount * LaneLen * Factor + i; if (Mask[IdxI] >= 0) { Ops.push_back(Builder.CreateShuffleVector( Op0, Op1, createSequentialMask(Mask[IdxI], LaneLen, 0))); } else { unsigned StartMask = 0; for (unsigned j = 1; j < LaneLen; j++) { unsigned IdxJ = StoreCount * LaneLen * Factor + j; if (Mask[IdxJ * Factor + IdxI] >= 0) { StartMask = Mask[IdxJ * Factor + IdxI] - IdxJ; break; } } // Note: Filling undef gaps with random elements is ok, since // those elements were being written anyway (with undefs). // In the case of all undefs we're defaulting to using elems from 0 // Note: StartMask cannot be negative, it's checked in // isReInterleaveMask Ops.push_back(Builder.CreateShuffleVector( Op0, Op1, createSequentialMask(StartMask, LaneLen, 0))); } } // If we generating more than one store, we compute the base address of // subsequent stores as an offset from the previous. if (StoreCount > 0) BaseAddr = Builder.CreateConstGEP1_32(SubVecTy->getElementType(), BaseAddr, LaneLen * Factor); Ops.push_back(Builder.CreateBitCast(BaseAddr, PtrTy)); Builder.CreateCall(StNFunc, Ops); } return true; } // Lower an SVE structured load intrinsic returning a tuple type to target // specific intrinsic taking the same input but returning a multi-result value // of the split tuple type. // // E.g. Lowering an LD3: // // call @llvm.aarch64.sve.ld3.nxv12i32( // %pred, // * %addr) // // Output DAG: // // t0: ch = EntryToken // t2: nxv4i1,ch = CopyFromReg t0, Register:nxv4i1 %0 // t4: i64,ch = CopyFromReg t0, Register:i64 %1 // t5: nxv4i32,nxv4i32,nxv4i32,ch = AArch64ISD::SVE_LD3 t0, t2, t4 // t6: nxv12i32 = concat_vectors t5, t5:1, t5:2 // // This is called pre-legalization to avoid widening/splitting issues with // non-power-of-2 tuple types used for LD3, such as nxv12i32. SDValue AArch64TargetLowering::LowerSVEStructLoad(unsigned Intrinsic, ArrayRef LoadOps, EVT VT, SelectionDAG &DAG, const SDLoc &DL) const { assert(VT.isScalableVector() && "Can only lower scalable vectors"); unsigned N, Opcode; static std::map> IntrinsicMap = { {Intrinsic::aarch64_sve_ld2, {2, AArch64ISD::SVE_LD2_MERGE_ZERO}}, {Intrinsic::aarch64_sve_ld3, {3, AArch64ISD::SVE_LD3_MERGE_ZERO}}, {Intrinsic::aarch64_sve_ld4, {4, AArch64ISD::SVE_LD4_MERGE_ZERO}}}; std::tie(N, Opcode) = IntrinsicMap[Intrinsic]; assert(VT.getVectorElementCount().getKnownMinValue() % N == 0 && "invalid tuple vector type!"); EVT SplitVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), VT.getVectorElementCount().divideCoefficientBy(N)); assert(isTypeLegal(SplitVT)); SmallVector VTs(N, SplitVT); VTs.push_back(MVT::Other); // Chain SDVTList NodeTys = DAG.getVTList(VTs); SDValue PseudoLoad = DAG.getNode(Opcode, DL, NodeTys, LoadOps); SmallVector PseudoLoadOps; for (unsigned I = 0; I < N; ++I) PseudoLoadOps.push_back(SDValue(PseudoLoad.getNode(), I)); return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, PseudoLoadOps); } EVT AArch64TargetLowering::getOptimalMemOpType( const MemOp &Op, const AttributeList &FuncAttributes) const { bool CanImplicitFloat = !FuncAttributes.hasFnAttribute(Attribute::NoImplicitFloat); bool CanUseNEON = Subtarget->hasNEON() && CanImplicitFloat; bool CanUseFP = Subtarget->hasFPARMv8() && CanImplicitFloat; // Only use AdvSIMD to implement memset of 32-byte and above. It would have // taken one instruction to materialize the v2i64 zero and one store (with // restrictive addressing mode). Just do i64 stores. bool IsSmallMemset = Op.isMemset() && Op.size() < 32; auto AlignmentIsAcceptable = [&](EVT VT, Align AlignCheck) { if (Op.isAligned(AlignCheck)) return true; bool Fast; return allowsMisalignedMemoryAccesses(VT, 0, Align(1), MachineMemOperand::MONone, &Fast) && Fast; }; if (CanUseNEON && Op.isMemset() && !IsSmallMemset && AlignmentIsAcceptable(MVT::v2i64, Align(16))) return MVT::v2i64; if (CanUseFP && !IsSmallMemset && AlignmentIsAcceptable(MVT::f128, Align(16))) return MVT::f128; if (Op.size() >= 8 && AlignmentIsAcceptable(MVT::i64, Align(8))) return MVT::i64; if (Op.size() >= 4 && AlignmentIsAcceptable(MVT::i32, Align(4))) return MVT::i32; return MVT::Other; } LLT AArch64TargetLowering::getOptimalMemOpLLT( const MemOp &Op, const AttributeList &FuncAttributes) const { bool CanImplicitFloat = !FuncAttributes.hasFnAttribute(Attribute::NoImplicitFloat); bool CanUseNEON = Subtarget->hasNEON() && CanImplicitFloat; bool CanUseFP = Subtarget->hasFPARMv8() && CanImplicitFloat; // Only use AdvSIMD to implement memset of 32-byte and above. It would have // taken one instruction to materialize the v2i64 zero and one store (with // restrictive addressing mode). Just do i64 stores. bool IsSmallMemset = Op.isMemset() && Op.size() < 32; auto AlignmentIsAcceptable = [&](EVT VT, Align AlignCheck) { if (Op.isAligned(AlignCheck)) return true; bool Fast; return allowsMisalignedMemoryAccesses(VT, 0, Align(1), MachineMemOperand::MONone, &Fast) && Fast; }; if (CanUseNEON && Op.isMemset() && !IsSmallMemset && AlignmentIsAcceptable(MVT::v2i64, Align(16))) return LLT::fixed_vector(2, 64); if (CanUseFP && !IsSmallMemset && AlignmentIsAcceptable(MVT::f128, Align(16))) return LLT::scalar(128); if (Op.size() >= 8 && AlignmentIsAcceptable(MVT::i64, Align(8))) return LLT::scalar(64); if (Op.size() >= 4 && AlignmentIsAcceptable(MVT::i32, Align(4))) return LLT::scalar(32); return LLT(); } // 12-bit optionally shifted immediates are legal for adds. bool AArch64TargetLowering::isLegalAddImmediate(int64_t Immed) const { if (Immed == std::numeric_limits::min()) { LLVM_DEBUG(dbgs() << "Illegal add imm " << Immed << ": avoid UB for INT64_MIN\n"); return false; } // Same encoding for add/sub, just flip the sign. Immed = std::abs(Immed); bool IsLegal = ((Immed >> 12) == 0 || ((Immed & 0xfff) == 0 && Immed >> 24 == 0)); LLVM_DEBUG(dbgs() << "Is " << Immed << " legal add imm: " << (IsLegal ? "yes" : "no") << "\n"); return IsLegal; } // Integer comparisons are implemented with ADDS/SUBS, so the range of valid // immediates is the same as for an add or a sub. bool AArch64TargetLowering::isLegalICmpImmediate(int64_t Immed) const { return isLegalAddImmediate(Immed); } /// isLegalAddressingMode - Return true if the addressing mode represented /// by AM is legal for this target, for a load/store of the specified type. bool AArch64TargetLowering::isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I) const { // AArch64 has five basic addressing modes: // reg // reg + 9-bit signed offset // reg + SIZE_IN_BYTES * 12-bit unsigned offset // reg1 + reg2 // reg + SIZE_IN_BYTES * reg // No global is ever allowed as a base. if (AM.BaseGV) return false; // No reg+reg+imm addressing. if (AM.HasBaseReg && AM.BaseOffs && AM.Scale) return false; // FIXME: Update this method to support scalable addressing modes. if (isa(Ty)) { uint64_t VecElemNumBytes = DL.getTypeSizeInBits(cast(Ty)->getElementType()) / 8; return AM.HasBaseReg && !AM.BaseOffs && (AM.Scale == 0 || (uint64_t)AM.Scale == VecElemNumBytes); } // check reg + imm case: // i.e., reg + 0, reg + imm9, reg + SIZE_IN_BYTES * uimm12 uint64_t NumBytes = 0; if (Ty->isSized()) { uint64_t NumBits = DL.getTypeSizeInBits(Ty); NumBytes = NumBits / 8; if (!isPowerOf2_64(NumBits)) NumBytes = 0; } if (!AM.Scale) { int64_t Offset = AM.BaseOffs; // 9-bit signed offset if (isInt<9>(Offset)) return true; // 12-bit unsigned offset unsigned shift = Log2_64(NumBytes); if (NumBytes && Offset > 0 && (Offset / NumBytes) <= (1LL << 12) - 1 && // Must be a multiple of NumBytes (NumBytes is a power of 2) (Offset >> shift) << shift == Offset) return true; return false; } // Check reg1 + SIZE_IN_BYTES * reg2 and reg1 + reg2 return AM.Scale == 1 || (AM.Scale > 0 && (uint64_t)AM.Scale == NumBytes); } bool AArch64TargetLowering::shouldConsiderGEPOffsetSplit() const { // Consider splitting large offset of struct or array. return true; } InstructionCost AArch64TargetLowering::getScalingFactorCost( const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS) const { // Scaling factors are not free at all. // Operands | Rt Latency // ------------------------------------------- // Rt, [Xn, Xm] | 4 // ------------------------------------------- // Rt, [Xn, Xm, lsl #imm] | Rn: 4 Rm: 5 // Rt, [Xn, Wm, #imm] | if (isLegalAddressingMode(DL, AM, Ty, AS)) // Scale represents reg2 * scale, thus account for 1 if // it is not equal to 0 or 1. return AM.Scale != 0 && AM.Scale != 1; return -1; } bool AArch64TargetLowering::isFMAFasterThanFMulAndFAdd( const MachineFunction &MF, EVT VT) const { VT = VT.getScalarType(); if (!VT.isSimple()) return false; switch (VT.getSimpleVT().SimpleTy) { case MVT::f16: return Subtarget->hasFullFP16(); case MVT::f32: case MVT::f64: return true; default: break; } return false; } bool AArch64TargetLowering::isFMAFasterThanFMulAndFAdd(const Function &F, Type *Ty) const { switch (Ty->getScalarType()->getTypeID()) { case Type::FloatTyID: case Type::DoubleTyID: return true; default: return false; } } bool AArch64TargetLowering::generateFMAsInMachineCombiner( EVT VT, CodeGenOpt::Level OptLevel) const { return (OptLevel >= CodeGenOpt::Aggressive) && !VT.isScalableVector(); } const MCPhysReg * AArch64TargetLowering::getScratchRegisters(CallingConv::ID) const { // LR is a callee-save register, but we must treat it as clobbered by any call // site. Hence we include LR in the scratch registers, which are in turn added // as implicit-defs for stackmaps and patchpoints. static const MCPhysReg ScratchRegs[] = { AArch64::X16, AArch64::X17, AArch64::LR, 0 }; return ScratchRegs; } bool AArch64TargetLowering::isDesirableToCommuteWithShift(const SDNode *N, CombineLevel Level) const { N = N->getOperand(0).getNode(); EVT VT = N->getValueType(0); // If N is unsigned bit extraction: ((x >> C) & mask), then do not combine // it with shift to let it be lowered to UBFX. if (N->getOpcode() == ISD::AND && (VT == MVT::i32 || VT == MVT::i64) && isa(N->getOperand(1))) { uint64_t TruncMask = N->getConstantOperandVal(1); if (isMask_64(TruncMask) && N->getOperand(0).getOpcode() == ISD::SRL && isa(N->getOperand(0)->getOperand(1))) return false; } return true; } bool AArch64TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const { assert(Ty->isIntegerTy()); unsigned BitSize = Ty->getPrimitiveSizeInBits(); if (BitSize == 0) return false; int64_t Val = Imm.getSExtValue(); if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, BitSize)) return true; if ((int64_t)Val < 0) Val = ~Val; if (BitSize == 32) Val &= (1LL << 32) - 1; unsigned LZ = countLeadingZeros((uint64_t)Val); unsigned Shift = (63 - LZ) / 16; // MOVZ is free so return true for one or fewer MOVK. return Shift < 3; } bool AArch64TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const { if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT)) return false; return (Index == 0 || Index == ResVT.getVectorNumElements()); } /// Turn vector tests of the signbit in the form of: /// xor (sra X, elt_size(X)-1), -1 /// into: /// cmge X, X, #0 static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG, const AArch64Subtarget *Subtarget) { EVT VT = N->getValueType(0); if (!Subtarget->hasNEON() || !VT.isVector()) return SDValue(); // There must be a shift right algebraic before the xor, and the xor must be a // 'not' operation. SDValue Shift = N->getOperand(0); SDValue Ones = N->getOperand(1); if (Shift.getOpcode() != AArch64ISD::VASHR || !Shift.hasOneUse() || !ISD::isBuildVectorAllOnes(Ones.getNode())) return SDValue(); // The shift should be smearing the sign bit across each vector element. auto *ShiftAmt = dyn_cast(Shift.getOperand(1)); EVT ShiftEltTy = Shift.getValueType().getVectorElementType(); if (!ShiftAmt || ShiftAmt->getZExtValue() != ShiftEltTy.getSizeInBits() - 1) return SDValue(); return DAG.getNode(AArch64ISD::CMGEz, SDLoc(N), VT, Shift.getOperand(0)); } // Given a vecreduce_add node, detect the below pattern and convert it to the // node sequence with UABDL, [S|U]ADB and UADDLP. // // i32 vecreduce_add( // v16i32 abs( // v16i32 sub( // v16i32 [sign|zero]_extend(v16i8 a), v16i32 [sign|zero]_extend(v16i8 b)))) // =================> // i32 vecreduce_add( // v4i32 UADDLP( // v8i16 add( // v8i16 zext( // v8i8 [S|U]ABD low8:v16i8 a, low8:v16i8 b // v8i16 zext( // v8i8 [S|U]ABD high8:v16i8 a, high8:v16i8 b static SDValue performVecReduceAddCombineWithUADDLP(SDNode *N, SelectionDAG &DAG) { // Assumed i32 vecreduce_add if (N->getValueType(0) != MVT::i32) return SDValue(); SDValue VecReduceOp0 = N->getOperand(0); unsigned Opcode = VecReduceOp0.getOpcode(); // Assumed v16i32 abs if (Opcode != ISD::ABS || VecReduceOp0->getValueType(0) != MVT::v16i32) return SDValue(); SDValue ABS = VecReduceOp0; // Assumed v16i32 sub if (ABS->getOperand(0)->getOpcode() != ISD::SUB || ABS->getOperand(0)->getValueType(0) != MVT::v16i32) return SDValue(); SDValue SUB = ABS->getOperand(0); unsigned Opcode0 = SUB->getOperand(0).getOpcode(); unsigned Opcode1 = SUB->getOperand(1).getOpcode(); // Assumed v16i32 type if (SUB->getOperand(0)->getValueType(0) != MVT::v16i32 || SUB->getOperand(1)->getValueType(0) != MVT::v16i32) return SDValue(); // Assumed zext or sext bool IsZExt = false; if (Opcode0 == ISD::ZERO_EXTEND && Opcode1 == ISD::ZERO_EXTEND) { IsZExt = true; } else if (Opcode0 == ISD::SIGN_EXTEND && Opcode1 == ISD::SIGN_EXTEND) { IsZExt = false; } else return SDValue(); SDValue EXT0 = SUB->getOperand(0); SDValue EXT1 = SUB->getOperand(1); // Assumed zext's operand has v16i8 type if (EXT0->getOperand(0)->getValueType(0) != MVT::v16i8 || EXT1->getOperand(0)->getValueType(0) != MVT::v16i8) return SDValue(); // Pattern is dectected. Let's convert it to sequence of nodes. SDLoc DL(N); // First, create the node pattern of UABD/SABD. SDValue UABDHigh8Op0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, EXT0->getOperand(0), DAG.getConstant(8, DL, MVT::i64)); SDValue UABDHigh8Op1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, EXT1->getOperand(0), DAG.getConstant(8, DL, MVT::i64)); SDValue UABDHigh8 = DAG.getNode(IsZExt ? ISD::ABDU : ISD::ABDS, DL, MVT::v8i8, UABDHigh8Op0, UABDHigh8Op1); SDValue UABDL = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, UABDHigh8); // Second, create the node pattern of UABAL. SDValue UABDLo8Op0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, EXT0->getOperand(0), DAG.getConstant(0, DL, MVT::i64)); SDValue UABDLo8Op1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, EXT1->getOperand(0), DAG.getConstant(0, DL, MVT::i64)); SDValue UABDLo8 = DAG.getNode(IsZExt ? ISD::ABDU : ISD::ABDS, DL, MVT::v8i8, UABDLo8Op0, UABDLo8Op1); SDValue ZExtUABD = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, UABDLo8); SDValue UABAL = DAG.getNode(ISD::ADD, DL, MVT::v8i16, UABDL, ZExtUABD); // Third, create the node of UADDLP. SDValue UADDLP = DAG.getNode(AArch64ISD::UADDLP, DL, MVT::v4i32, UABAL); // Fourth, create the node of VECREDUCE_ADD. return DAG.getNode(ISD::VECREDUCE_ADD, DL, MVT::i32, UADDLP); } // Turn a v8i8/v16i8 extended vecreduce into a udot/sdot and vecreduce // vecreduce.add(ext(A)) to vecreduce.add(DOT(zero, A, one)) // vecreduce.add(mul(ext(A), ext(B))) to vecreduce.add(DOT(zero, A, B)) static SDValue performVecReduceAddCombine(SDNode *N, SelectionDAG &DAG, const AArch64Subtarget *ST) { if (!ST->hasDotProd()) return performVecReduceAddCombineWithUADDLP(N, DAG); SDValue Op0 = N->getOperand(0); if (N->getValueType(0) != MVT::i32 || Op0.getValueType().getVectorElementType() != MVT::i32) return SDValue(); unsigned ExtOpcode = Op0.getOpcode(); SDValue A = Op0; SDValue B; if (ExtOpcode == ISD::MUL) { A = Op0.getOperand(0); B = Op0.getOperand(1); if (A.getOpcode() != B.getOpcode() || A.getOperand(0).getValueType() != B.getOperand(0).getValueType()) return SDValue(); ExtOpcode = A.getOpcode(); } if (ExtOpcode != ISD::ZERO_EXTEND && ExtOpcode != ISD::SIGN_EXTEND) return SDValue(); EVT Op0VT = A.getOperand(0).getValueType(); if (Op0VT != MVT::v8i8 && Op0VT != MVT::v16i8) return SDValue(); SDLoc DL(Op0); // For non-mla reductions B can be set to 1. For MLA we take the operand of // the extend B. if (!B) B = DAG.getConstant(1, DL, Op0VT); else B = B.getOperand(0); SDValue Zeros = DAG.getConstant(0, DL, Op0VT == MVT::v8i8 ? MVT::v2i32 : MVT::v4i32); auto DotOpcode = (ExtOpcode == ISD::ZERO_EXTEND) ? AArch64ISD::UDOT : AArch64ISD::SDOT; SDValue Dot = DAG.getNode(DotOpcode, DL, Zeros.getValueType(), Zeros, A.getOperand(0), B); return DAG.getNode(ISD::VECREDUCE_ADD, DL, N->getValueType(0), Dot); } static SDValue performXorCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget) { if (DCI.isBeforeLegalizeOps()) return SDValue(); return foldVectorXorShiftIntoCmp(N, DAG, Subtarget); } SDValue AArch64TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl &Created) const { AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); if (isIntDivCheap(N->getValueType(0), Attr)) return SDValue(N,0); // Lower SDIV as SDIV // fold (sdiv X, pow2) EVT VT = N->getValueType(0); if ((VT != MVT::i32 && VT != MVT::i64) || !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2())) return SDValue(); SDLoc DL(N); SDValue N0 = N->getOperand(0); unsigned Lg2 = Divisor.countTrailingZeros(); SDValue Zero = DAG.getConstant(0, DL, VT); SDValue Pow2MinusOne = DAG.getConstant((1ULL << Lg2) - 1, DL, VT); // Add (N0 < 0) ? Pow2 - 1 : 0; SDValue CCVal; SDValue Cmp = getAArch64Cmp(N0, Zero, ISD::SETLT, CCVal, DAG, DL); SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne); SDValue CSel = DAG.getNode(AArch64ISD::CSEL, DL, VT, Add, N0, CCVal, Cmp); Created.push_back(Cmp.getNode()); Created.push_back(Add.getNode()); Created.push_back(CSel.getNode()); // Divide by pow2. SDValue SRA = DAG.getNode(ISD::SRA, DL, VT, CSel, DAG.getConstant(Lg2, DL, MVT::i64)); // If we're dividing by a positive value, we're done. Otherwise, we must // negate the result. if (Divisor.isNonNegative()) return SRA; Created.push_back(SRA.getNode()); return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), SRA); } static bool IsSVECntIntrinsic(SDValue S) { switch(getIntrinsicID(S.getNode())) { default: break; case Intrinsic::aarch64_sve_cntb: case Intrinsic::aarch64_sve_cnth: case Intrinsic::aarch64_sve_cntw: case Intrinsic::aarch64_sve_cntd: return true; } return false; } /// Calculates what the pre-extend type is, based on the extension /// operation node provided by \p Extend. /// /// In the case that \p Extend is a SIGN_EXTEND or a ZERO_EXTEND, the /// pre-extend type is pulled directly from the operand, while other extend /// operations need a bit more inspection to get this information. /// /// \param Extend The SDNode from the DAG that represents the extend operation /// \param DAG The SelectionDAG hosting the \p Extend node /// /// \returns The type representing the \p Extend source type, or \p MVT::Other /// if no valid type can be determined static EVT calculatePreExtendType(SDValue Extend, SelectionDAG &DAG) { switch (Extend.getOpcode()) { case ISD::SIGN_EXTEND: case ISD::ZERO_EXTEND: return Extend.getOperand(0).getValueType(); case ISD::AssertSext: case ISD::AssertZext: case ISD::SIGN_EXTEND_INREG: { VTSDNode *TypeNode = dyn_cast(Extend.getOperand(1)); if (!TypeNode) return MVT::Other; return TypeNode->getVT(); } case ISD::AND: { ConstantSDNode *Constant = dyn_cast(Extend.getOperand(1).getNode()); if (!Constant) return MVT::Other; uint32_t Mask = Constant->getZExtValue(); if (Mask == UCHAR_MAX) return MVT::i8; else if (Mask == USHRT_MAX) return MVT::i16; else if (Mask == UINT_MAX) return MVT::i32; return MVT::Other; } default: return MVT::Other; } llvm_unreachable("Code path unhandled in calculatePreExtendType!"); } /// Combines a dup(sext/zext) node pattern into sext/zext(dup) /// making use of the vector SExt/ZExt rather than the scalar SExt/ZExt static SDValue performCommonVectorExtendCombine(SDValue VectorShuffle, SelectionDAG &DAG) { ShuffleVectorSDNode *ShuffleNode = dyn_cast(VectorShuffle.getNode()); if (!ShuffleNode) return SDValue(); // Ensuring the mask is zero before continuing if (!ShuffleNode->isSplat() || ShuffleNode->getSplatIndex() != 0) return SDValue(); SDValue InsertVectorElt = VectorShuffle.getOperand(0); if (InsertVectorElt.getOpcode() != ISD::INSERT_VECTOR_ELT) return SDValue(); SDValue InsertLane = InsertVectorElt.getOperand(2); ConstantSDNode *Constant = dyn_cast(InsertLane.getNode()); // Ensures the insert is inserting into lane 0 if (!Constant || Constant->getZExtValue() != 0) return SDValue(); SDValue Extend = InsertVectorElt.getOperand(1); unsigned ExtendOpcode = Extend.getOpcode(); bool IsSExt = ExtendOpcode == ISD::SIGN_EXTEND || ExtendOpcode == ISD::SIGN_EXTEND_INREG || ExtendOpcode == ISD::AssertSext; if (!IsSExt && ExtendOpcode != ISD::ZERO_EXTEND && ExtendOpcode != ISD::AssertZext && ExtendOpcode != ISD::AND) return SDValue(); EVT TargetType = VectorShuffle.getValueType(); EVT PreExtendType = calculatePreExtendType(Extend, DAG); if ((TargetType != MVT::v8i16 && TargetType != MVT::v4i32 && TargetType != MVT::v2i64) || (PreExtendType == MVT::Other)) return SDValue(); // Restrict valid pre-extend data type if (PreExtendType != MVT::i8 && PreExtendType != MVT::i16 && PreExtendType != MVT::i32) return SDValue(); EVT PreExtendVT = TargetType.changeVectorElementType(PreExtendType); if (PreExtendVT.getVectorElementCount() != TargetType.getVectorElementCount()) return SDValue(); if (TargetType.getScalarSizeInBits() != PreExtendVT.getScalarSizeInBits() * 2) return SDValue(); SDLoc DL(VectorShuffle); SDValue InsertVectorNode = DAG.getNode( InsertVectorElt.getOpcode(), DL, PreExtendVT, DAG.getUNDEF(PreExtendVT), DAG.getAnyExtOrTrunc(Extend.getOperand(0), DL, PreExtendType), DAG.getConstant(0, DL, MVT::i64)); std::vector ShuffleMask(TargetType.getVectorElementCount().getValue()); SDValue VectorShuffleNode = DAG.getVectorShuffle(PreExtendVT, DL, InsertVectorNode, DAG.getUNDEF(PreExtendVT), ShuffleMask); SDValue ExtendNode = DAG.getNode(IsSExt ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL, TargetType, VectorShuffleNode); return ExtendNode; } /// Combines a mul(dup(sext/zext)) node pattern into mul(sext/zext(dup)) /// making use of the vector SExt/ZExt rather than the scalar SExt/ZExt static SDValue performMulVectorExtendCombine(SDNode *Mul, SelectionDAG &DAG) { // If the value type isn't a vector, none of the operands are going to be dups if (!Mul->getValueType(0).isVector()) return SDValue(); SDValue Op0 = performCommonVectorExtendCombine(Mul->getOperand(0), DAG); SDValue Op1 = performCommonVectorExtendCombine(Mul->getOperand(1), DAG); // Neither operands have been changed, don't make any further changes if (!Op0 && !Op1) return SDValue(); SDLoc DL(Mul); return DAG.getNode(Mul->getOpcode(), DL, Mul->getValueType(0), Op0 ? Op0 : Mul->getOperand(0), Op1 ? Op1 : Mul->getOperand(1)); } static SDValue performMulCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget) { if (SDValue Ext = performMulVectorExtendCombine(N, DAG)) return Ext; if (DCI.isBeforeLegalizeOps()) return SDValue(); // The below optimizations require a constant RHS. if (!isa(N->getOperand(1))) return SDValue(); SDValue N0 = N->getOperand(0); ConstantSDNode *C = cast(N->getOperand(1)); const APInt &ConstValue = C->getAPIntValue(); // Allow the scaling to be folded into the `cnt` instruction by preventing // the scaling to be obscured here. This makes it easier to pattern match. if (IsSVECntIntrinsic(N0) || (N0->getOpcode() == ISD::TRUNCATE && (IsSVECntIntrinsic(N0->getOperand(0))))) if (ConstValue.sge(1) && ConstValue.sle(16)) return SDValue(); // Multiplication of a power of two plus/minus one can be done more // cheaply as as shift+add/sub. For now, this is true unilaterally. If // future CPUs have a cheaper MADD instruction, this may need to be // gated on a subtarget feature. For Cyclone, 32-bit MADD is 4 cycles and // 64-bit is 5 cycles, so this is always a win. // More aggressively, some multiplications N0 * C can be lowered to // shift+add+shift if the constant C = A * B where A = 2^N + 1 and B = 2^M, // e.g. 6=3*2=(2+1)*2. // TODO: consider lowering more cases, e.g. C = 14, -6, -14 or even 45 // which equals to (1+2)*16-(1+2). // TrailingZeroes is used to test if the mul can be lowered to // shift+add+shift. unsigned TrailingZeroes = ConstValue.countTrailingZeros(); if (TrailingZeroes) { // Conservatively do not lower to shift+add+shift if the mul might be // folded into smul or umul. if (N0->hasOneUse() && (isSignExtended(N0.getNode(), DAG) || isZeroExtended(N0.getNode(), DAG))) return SDValue(); // Conservatively do not lower to shift+add+shift if the mul might be // folded into madd or msub. if (N->hasOneUse() && (N->use_begin()->getOpcode() == ISD::ADD || N->use_begin()->getOpcode() == ISD::SUB)) return SDValue(); } // Use ShiftedConstValue instead of ConstValue to support both shift+add/sub // and shift+add+shift. APInt ShiftedConstValue = ConstValue.ashr(TrailingZeroes); unsigned ShiftAmt, AddSubOpc; // Is the shifted value the LHS operand of the add/sub? bool ShiftValUseIsN0 = true; // Do we need to negate the result? bool NegateResult = false; if (ConstValue.isNonNegative()) { // (mul x, 2^N + 1) => (add (shl x, N), x) // (mul x, 2^N - 1) => (sub (shl x, N), x) // (mul x, (2^N + 1) * 2^M) => (shl (add (shl x, N), x), M) APInt SCVMinus1 = ShiftedConstValue - 1; APInt CVPlus1 = ConstValue + 1; if (SCVMinus1.isPowerOf2()) { ShiftAmt = SCVMinus1.logBase2(); AddSubOpc = ISD::ADD; } else if (CVPlus1.isPowerOf2()) { ShiftAmt = CVPlus1.logBase2(); AddSubOpc = ISD::SUB; } else return SDValue(); } else { // (mul x, -(2^N - 1)) => (sub x, (shl x, N)) // (mul x, -(2^N + 1)) => - (add (shl x, N), x) APInt CVNegPlus1 = -ConstValue + 1; APInt CVNegMinus1 = -ConstValue - 1; if (CVNegPlus1.isPowerOf2()) { ShiftAmt = CVNegPlus1.logBase2(); AddSubOpc = ISD::SUB; ShiftValUseIsN0 = false; } else if (CVNegMinus1.isPowerOf2()) { ShiftAmt = CVNegMinus1.logBase2(); AddSubOpc = ISD::ADD; NegateResult = true; } else return SDValue(); } SDLoc DL(N); EVT VT = N->getValueType(0); SDValue ShiftedVal = DAG.getNode(ISD::SHL, DL, VT, N0, DAG.getConstant(ShiftAmt, DL, MVT::i64)); SDValue AddSubN0 = ShiftValUseIsN0 ? ShiftedVal : N0; SDValue AddSubN1 = ShiftValUseIsN0 ? N0 : ShiftedVal; SDValue Res = DAG.getNode(AddSubOpc, DL, VT, AddSubN0, AddSubN1); assert(!(NegateResult && TrailingZeroes) && "NegateResult and TrailingZeroes cannot both be true for now."); // Negate the result. if (NegateResult) return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Res); // Shift the result. if (TrailingZeroes) return DAG.getNode(ISD::SHL, DL, VT, Res, DAG.getConstant(TrailingZeroes, DL, MVT::i64)); return Res; } static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N, SelectionDAG &DAG) { // Take advantage of vector comparisons producing 0 or -1 in each lane to // optimize away operation when it's from a constant. // // The general transformation is: // UNARYOP(AND(VECTOR_CMP(x,y), constant)) --> // AND(VECTOR_CMP(x,y), constant2) // constant2 = UNARYOP(constant) // Early exit if this isn't a vector operation, the operand of the // unary operation isn't a bitwise AND, or if the sizes of the operations // aren't the same. EVT VT = N->getValueType(0); if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND || N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC || VT.getSizeInBits() != N->getOperand(0)->getValueType(0).getSizeInBits()) return SDValue(); // Now check that the other operand of the AND is a constant. We could // make the transformation for non-constant splats as well, but it's unclear // that would be a benefit as it would not eliminate any operations, just // perform one more step in scalar code before moving to the vector unit. if (BuildVectorSDNode *BV = dyn_cast(N->getOperand(0)->getOperand(1))) { // Bail out if the vector isn't a constant. if (!BV->isConstant()) return SDValue(); // Everything checks out. Build up the new and improved node. SDLoc DL(N); EVT IntVT = BV->getValueType(0); // Create a new constant of the appropriate type for the transformed // DAG. SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0)); // The AND node needs bitcasts to/from an integer vector type around it. SDValue MaskConst = DAG.getNode(ISD::BITCAST, DL, IntVT, SourceConst); SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT, N->getOperand(0)->getOperand(0), MaskConst); SDValue Res = DAG.getNode(ISD::BITCAST, DL, VT, NewAnd); return Res; } return SDValue(); } static SDValue performIntToFpCombine(SDNode *N, SelectionDAG &DAG, const AArch64Subtarget *Subtarget) { // First try to optimize away the conversion when it's conditionally from // a constant. Vectors only. if (SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG)) return Res; EVT VT = N->getValueType(0); if (VT != MVT::f32 && VT != MVT::f64) return SDValue(); // Only optimize when the source and destination types have the same width. if (VT.getSizeInBits() != N->getOperand(0).getValueSizeInBits()) return SDValue(); // If the result of an integer load is only used by an integer-to-float // conversion, use a fp load instead and a AdvSIMD scalar {S|U}CVTF instead. // This eliminates an "integer-to-vector-move" UOP and improves throughput. SDValue N0 = N->getOperand(0); if (Subtarget->hasNEON() && ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() && // Do not change the width of a volatile load. !cast(N0)->isVolatile()) { LoadSDNode *LN0 = cast(N0); SDValue Load = DAG.getLoad(VT, SDLoc(N), LN0->getChain(), LN0->getBasePtr(), LN0->getPointerInfo(), LN0->getAlignment(), LN0->getMemOperand()->getFlags()); // Make sure successors of the original load stay after it by updating them // to use the new Chain. DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 1), Load.getValue(1)); unsigned Opcode = (N->getOpcode() == ISD::SINT_TO_FP) ? AArch64ISD::SITOF : AArch64ISD::UITOF; return DAG.getNode(Opcode, SDLoc(N), VT, Load); } return SDValue(); } /// Fold a floating-point multiply by power of two into floating-point to /// fixed-point conversion. static SDValue performFpToIntCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget) { if (!Subtarget->hasNEON()) return SDValue(); if (!N->getValueType(0).isSimple()) return SDValue(); SDValue Op = N->getOperand(0); if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() || Op.getOpcode() != ISD::FMUL) return SDValue(); SDValue ConstVec = Op->getOperand(1); if (!isa(ConstVec)) return SDValue(); MVT FloatTy = Op.getSimpleValueType().getVectorElementType(); uint32_t FloatBits = FloatTy.getSizeInBits(); if (FloatBits != 32 && FloatBits != 64) return SDValue(); MVT IntTy = N->getSimpleValueType(0).getVectorElementType(); uint32_t IntBits = IntTy.getSizeInBits(); if (IntBits != 16 && IntBits != 32 && IntBits != 64) return SDValue(); // Avoid conversions where iN is larger than the float (e.g., float -> i64). if (IntBits > FloatBits) return SDValue(); BitVector UndefElements; BuildVectorSDNode *BV = cast(ConstVec); int32_t Bits = IntBits == 64 ? 64 : 32; int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, Bits + 1); if (C == -1 || C == 0 || C > Bits) return SDValue(); MVT ResTy; unsigned NumLanes = Op.getValueType().getVectorNumElements(); switch (NumLanes) { default: return SDValue(); case 2: ResTy = FloatBits == 32 ? MVT::v2i32 : MVT::v2i64; break; case 4: ResTy = FloatBits == 32 ? MVT::v4i32 : MVT::v4i64; break; } if (ResTy == MVT::v4i64 && DCI.isBeforeLegalizeOps()) return SDValue(); assert((ResTy != MVT::v4i64 || DCI.isBeforeLegalizeOps()) && "Illegal vector type after legalization"); SDLoc DL(N); bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT; unsigned IntrinsicOpcode = IsSigned ? Intrinsic::aarch64_neon_vcvtfp2fxs : Intrinsic::aarch64_neon_vcvtfp2fxu; SDValue FixConv = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, ResTy, DAG.getConstant(IntrinsicOpcode, DL, MVT::i32), Op->getOperand(0), DAG.getConstant(C, DL, MVT::i32)); // We can handle smaller integers by generating an extra trunc. if (IntBits < FloatBits) FixConv = DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), FixConv); return FixConv; } /// Fold a floating-point divide by power of two into fixed-point to /// floating-point conversion. static SDValue performFDivCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget) { if (!Subtarget->hasNEON()) return SDValue(); SDValue Op = N->getOperand(0); unsigned Opc = Op->getOpcode(); if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() || !Op.getOperand(0).getValueType().isSimple() || (Opc != ISD::SINT_TO_FP && Opc != ISD::UINT_TO_FP)) return SDValue(); SDValue ConstVec = N->getOperand(1); if (!isa(ConstVec)) return SDValue(); MVT IntTy = Op.getOperand(0).getSimpleValueType().getVectorElementType(); int32_t IntBits = IntTy.getSizeInBits(); if (IntBits != 16 && IntBits != 32 && IntBits != 64) return SDValue(); MVT FloatTy = N->getSimpleValueType(0).getVectorElementType(); int32_t FloatBits = FloatTy.getSizeInBits(); if (FloatBits != 32 && FloatBits != 64) return SDValue(); // Avoid conversions where iN is larger than the float (e.g., i64 -> float). if (IntBits > FloatBits) return SDValue(); BitVector UndefElements; BuildVectorSDNode *BV = cast(ConstVec); int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, FloatBits + 1); if (C == -1 || C == 0 || C > FloatBits) return SDValue(); MVT ResTy; unsigned NumLanes = Op.getValueType().getVectorNumElements(); switch (NumLanes) { default: return SDValue(); case 2: ResTy = FloatBits == 32 ? MVT::v2i32 : MVT::v2i64; break; case 4: ResTy = FloatBits == 32 ? MVT::v4i32 : MVT::v4i64; break; } if (ResTy == MVT::v4i64 && DCI.isBeforeLegalizeOps()) return SDValue(); SDLoc DL(N); SDValue ConvInput = Op.getOperand(0); bool IsSigned = Opc == ISD::SINT_TO_FP; if (IntBits < FloatBits) ConvInput = DAG.getNode(IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL, ResTy, ConvInput); unsigned IntrinsicOpcode = IsSigned ? Intrinsic::aarch64_neon_vcvtfxs2fp : Intrinsic::aarch64_neon_vcvtfxu2fp; return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(), DAG.getConstant(IntrinsicOpcode, DL, MVT::i32), ConvInput, DAG.getConstant(C, DL, MVT::i32)); } /// An EXTR instruction is made up of two shifts, ORed together. This helper /// searches for and classifies those shifts. static bool findEXTRHalf(SDValue N, SDValue &Src, uint32_t &ShiftAmount, bool &FromHi) { if (N.getOpcode() == ISD::SHL) FromHi = false; else if (N.getOpcode() == ISD::SRL) FromHi = true; else return false; if (!isa(N.getOperand(1))) return false; ShiftAmount = N->getConstantOperandVal(1); Src = N->getOperand(0); return true; } /// EXTR instruction extracts a contiguous chunk of bits from two existing /// registers viewed as a high/low pair. This function looks for the pattern: /// (or (shl VAL1, \#N), (srl VAL2, \#RegWidth-N)) and replaces it /// with an EXTR. Can't quite be done in TableGen because the two immediates /// aren't independent. static SDValue tryCombineToEXTR(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { SelectionDAG &DAG = DCI.DAG; SDLoc DL(N); EVT VT = N->getValueType(0); assert(N->getOpcode() == ISD::OR && "Unexpected root"); if (VT != MVT::i32 && VT != MVT::i64) return SDValue(); SDValue LHS; uint32_t ShiftLHS = 0; bool LHSFromHi = false; if (!findEXTRHalf(N->getOperand(0), LHS, ShiftLHS, LHSFromHi)) return SDValue(); SDValue RHS; uint32_t ShiftRHS = 0; bool RHSFromHi = false; if (!findEXTRHalf(N->getOperand(1), RHS, ShiftRHS, RHSFromHi)) return SDValue(); // If they're both trying to come from the high part of the register, they're // not really an EXTR. if (LHSFromHi == RHSFromHi) return SDValue(); if (ShiftLHS + ShiftRHS != VT.getSizeInBits()) return SDValue(); if (LHSFromHi) { std::swap(LHS, RHS); std::swap(ShiftLHS, ShiftRHS); } return DAG.getNode(AArch64ISD::EXTR, DL, VT, LHS, RHS, DAG.getConstant(ShiftRHS, DL, MVT::i64)); } static SDValue tryCombineToBSL(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { EVT VT = N->getValueType(0); SelectionDAG &DAG = DCI.DAG; SDLoc DL(N); if (!VT.isVector()) return SDValue(); // The combining code currently only works for NEON vectors. In particular, // it does not work for SVE when dealing with vectors wider than 128 bits. if (!VT.is64BitVector() && !VT.is128BitVector()) return SDValue(); SDValue N0 = N->getOperand(0); if (N0.getOpcode() != ISD::AND) return SDValue(); SDValue N1 = N->getOperand(1); if (N1.getOpcode() != ISD::AND) return SDValue(); // InstCombine does (not (neg a)) => (add a -1). // Try: (or (and (neg a) b) (and (add a -1) c)) => (bsl (neg a) b c) // Loop over all combinations of AND operands. for (int i = 1; i >= 0; --i) { for (int j = 1; j >= 0; --j) { SDValue O0 = N0->getOperand(i); SDValue O1 = N1->getOperand(j); SDValue Sub, Add, SubSibling, AddSibling; // Find a SUB and an ADD operand, one from each AND. if (O0.getOpcode() == ISD::SUB && O1.getOpcode() == ISD::ADD) { Sub = O0; Add = O1; SubSibling = N0->getOperand(1 - i); AddSibling = N1->getOperand(1 - j); } else if (O0.getOpcode() == ISD::ADD && O1.getOpcode() == ISD::SUB) { Add = O0; Sub = O1; AddSibling = N0->getOperand(1 - i); SubSibling = N1->getOperand(1 - j); } else continue; if (!ISD::isBuildVectorAllZeros(Sub.getOperand(0).getNode())) continue; // Constant ones is always righthand operand of the Add. if (!ISD::isBuildVectorAllOnes(Add.getOperand(1).getNode())) continue; if (Sub.getOperand(1) != Add.getOperand(0)) continue; return DAG.getNode(AArch64ISD::BSP, DL, VT, Sub, SubSibling, AddSibling); } } // (or (and a b) (and (not a) c)) => (bsl a b c) // We only have to look for constant vectors here since the general, variable // case can be handled in TableGen. unsigned Bits = VT.getScalarSizeInBits(); uint64_t BitMask = Bits == 64 ? -1ULL : ((1ULL << Bits) - 1); for (int i = 1; i >= 0; --i) for (int j = 1; j >= 0; --j) { BuildVectorSDNode *BVN0 = dyn_cast(N0->getOperand(i)); BuildVectorSDNode *BVN1 = dyn_cast(N1->getOperand(j)); if (!BVN0 || !BVN1) continue; bool FoundMatch = true; for (unsigned k = 0; k < VT.getVectorNumElements(); ++k) { ConstantSDNode *CN0 = dyn_cast(BVN0->getOperand(k)); ConstantSDNode *CN1 = dyn_cast(BVN1->getOperand(k)); if (!CN0 || !CN1 || CN0->getZExtValue() != (BitMask & ~CN1->getZExtValue())) { FoundMatch = false; break; } } if (FoundMatch) return DAG.getNode(AArch64ISD::BSP, DL, VT, SDValue(BVN0, 0), N0->getOperand(1 - i), N1->getOperand(1 - j)); } return SDValue(); } static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget) { // Attempt to form an EXTR from (or (shl VAL1, #N), (srl VAL2, #RegWidth-N)) SelectionDAG &DAG = DCI.DAG; EVT VT = N->getValueType(0); if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) return SDValue(); if (SDValue Res = tryCombineToEXTR(N, DCI)) return Res; if (SDValue Res = tryCombineToBSL(N, DCI)) return Res; return SDValue(); } static bool isConstantSplatVectorMaskForType(SDNode *N, EVT MemVT) { if (!MemVT.getVectorElementType().isSimple()) return false; uint64_t MaskForTy = 0ull; switch (MemVT.getVectorElementType().getSimpleVT().SimpleTy) { case MVT::i8: MaskForTy = 0xffull; break; case MVT::i16: MaskForTy = 0xffffull; break; case MVT::i32: MaskForTy = 0xffffffffull; break; default: return false; break; } if (N->getOpcode() == AArch64ISD::DUP || N->getOpcode() == ISD::SPLAT_VECTOR) if (auto *Op0 = dyn_cast(N->getOperand(0))) return Op0->getAPIntValue().getLimitedValue() == MaskForTy; return false; } static SDValue performSVEAndCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { if (DCI.isBeforeLegalizeOps()) return SDValue(); SelectionDAG &DAG = DCI.DAG; SDValue Src = N->getOperand(0); unsigned Opc = Src->getOpcode(); // Zero/any extend of an unsigned unpack if (Opc == AArch64ISD::UUNPKHI || Opc == AArch64ISD::UUNPKLO) { SDValue UnpkOp = Src->getOperand(0); SDValue Dup = N->getOperand(1); if (Dup.getOpcode() != AArch64ISD::DUP) return SDValue(); SDLoc DL(N); ConstantSDNode *C = dyn_cast(Dup->getOperand(0)); uint64_t ExtVal = C->getZExtValue(); // If the mask is fully covered by the unpack, we don't need to push // a new AND onto the operand EVT EltTy = UnpkOp->getValueType(0).getVectorElementType(); if ((ExtVal == 0xFF && EltTy == MVT::i8) || (ExtVal == 0xFFFF && EltTy == MVT::i16) || (ExtVal == 0xFFFFFFFF && EltTy == MVT::i32)) return Src; // Truncate to prevent a DUP with an over wide constant APInt Mask = C->getAPIntValue().trunc(EltTy.getSizeInBits()); // Otherwise, make sure we propagate the AND to the operand // of the unpack Dup = DAG.getNode(AArch64ISD::DUP, DL, UnpkOp->getValueType(0), DAG.getConstant(Mask.zextOrTrunc(32), DL, MVT::i32)); SDValue And = DAG.getNode(ISD::AND, DL, UnpkOp->getValueType(0), UnpkOp, Dup); return DAG.getNode(Opc, DL, N->getValueType(0), And); } if (!EnableCombineMGatherIntrinsics) return SDValue(); SDValue Mask = N->getOperand(1); if (!Src.hasOneUse()) return SDValue(); EVT MemVT; // SVE load instructions perform an implicit zero-extend, which makes them // perfect candidates for combining. switch (Opc) { case AArch64ISD::LD1_MERGE_ZERO: case AArch64ISD::LDNF1_MERGE_ZERO: case AArch64ISD::LDFF1_MERGE_ZERO: MemVT = cast(Src->getOperand(3))->getVT(); break; case AArch64ISD::GLD1_MERGE_ZERO: case AArch64ISD::GLD1_SCALED_MERGE_ZERO: case AArch64ISD::GLD1_SXTW_MERGE_ZERO: case AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO: case AArch64ISD::GLD1_UXTW_MERGE_ZERO: case AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO: case AArch64ISD::GLD1_IMM_MERGE_ZERO: case AArch64ISD::GLDFF1_MERGE_ZERO: case AArch64ISD::GLDFF1_SCALED_MERGE_ZERO: case AArch64ISD::GLDFF1_SXTW_MERGE_ZERO: case AArch64ISD::GLDFF1_SXTW_SCALED_MERGE_ZERO: case AArch64ISD::GLDFF1_UXTW_MERGE_ZERO: case AArch64ISD::GLDFF1_UXTW_SCALED_MERGE_ZERO: case AArch64ISD::GLDFF1_IMM_MERGE_ZERO: case AArch64ISD::GLDNT1_MERGE_ZERO: MemVT = cast(Src->getOperand(4))->getVT(); break; default: return SDValue(); } if (isConstantSplatVectorMaskForType(Mask.getNode(), MemVT)) return Src; return SDValue(); } static SDValue performANDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { SelectionDAG &DAG = DCI.DAG; SDValue LHS = N->getOperand(0); EVT VT = N->getValueType(0); if (!VT.isVector() || !DAG.getTargetLoweringInfo().isTypeLegal(VT)) return SDValue(); if (VT.isScalableVector()) return performSVEAndCombine(N, DCI); // The combining code below works only for NEON vectors. In particular, it // does not work for SVE when dealing with vectors wider than 128 bits. if (!(VT.is64BitVector() || VT.is128BitVector())) return SDValue(); BuildVectorSDNode *BVN = dyn_cast(N->getOperand(1).getNode()); if (!BVN) return SDValue(); // AND does not accept an immediate, so check if we can use a BIC immediate // instruction instead. We do this here instead of using a (and x, (mvni imm)) // pattern in isel, because some immediates may be lowered to the preferred // (and x, (movi imm)) form, even though an mvni representation also exists. APInt DefBits(VT.getSizeInBits(), 0); APInt UndefBits(VT.getSizeInBits(), 0); if (resolveBuildVector(BVN, DefBits, UndefBits)) { SDValue NewOp; DefBits = ~DefBits; if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::BICi, SDValue(N, 0), DAG, DefBits, &LHS)) || (NewOp = tryAdvSIMDModImm16(AArch64ISD::BICi, SDValue(N, 0), DAG, DefBits, &LHS))) return NewOp; UndefBits = ~UndefBits; if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::BICi, SDValue(N, 0), DAG, UndefBits, &LHS)) || (NewOp = tryAdvSIMDModImm16(AArch64ISD::BICi, SDValue(N, 0), DAG, UndefBits, &LHS))) return NewOp; } return SDValue(); } static SDValue performSRLCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { SelectionDAG &DAG = DCI.DAG; EVT VT = N->getValueType(0); if (VT != MVT::i32 && VT != MVT::i64) return SDValue(); // Canonicalize (srl (bswap i32 x), 16) to (rotr (bswap i32 x), 16), if the // high 16-bits of x are zero. Similarly, canonicalize (srl (bswap i64 x), 32) // to (rotr (bswap i64 x), 32), if the high 32-bits of x are zero. SDValue N0 = N->getOperand(0); if (N0.getOpcode() == ISD::BSWAP) { SDLoc DL(N); SDValue N1 = N->getOperand(1); SDValue N00 = N0.getOperand(0); if (ConstantSDNode *C = dyn_cast(N1)) { uint64_t ShiftAmt = C->getZExtValue(); if (VT == MVT::i32 && ShiftAmt == 16 && DAG.MaskedValueIsZero(N00, APInt::getHighBitsSet(32, 16))) return DAG.getNode(ISD::ROTR, DL, VT, N0, N1); if (VT == MVT::i64 && ShiftAmt == 32 && DAG.MaskedValueIsZero(N00, APInt::getHighBitsSet(64, 32))) return DAG.getNode(ISD::ROTR, DL, VT, N0, N1); } } return SDValue(); } // Attempt to form urhadd(OpA, OpB) from // truncate(vlshr(sub(zext(OpB), xor(zext(OpA), Ones(ElemSizeInBits))), 1)) // or uhadd(OpA, OpB) from truncate(vlshr(add(zext(OpA), zext(OpB)), 1)). // The original form of the first expression is // truncate(srl(add(zext(OpB), add(zext(OpA), 1)), 1)) and the // (OpA + OpB + 1) subexpression will have been changed to (OpB - (~OpA)). // Before this function is called the srl will have been lowered to // AArch64ISD::VLSHR. // This pass can also recognize signed variants of the patterns that use sign // extension instead of zero extension and form a srhadd(OpA, OpB) or a // shadd(OpA, OpB) from them. static SDValue performVectorTruncateCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG) { EVT VT = N->getValueType(0); // Since we are looking for a right shift by a constant value of 1 and we are // operating on types at least 16 bits in length (sign/zero extended OpA and // OpB, which are at least 8 bits), it follows that the truncate will always // discard the shifted-in bit and therefore the right shift will be logical // regardless of the signedness of OpA and OpB. SDValue Shift = N->getOperand(0); if (Shift.getOpcode() != AArch64ISD::VLSHR) return SDValue(); // Is the right shift using an immediate value of 1? uint64_t ShiftAmount = Shift.getConstantOperandVal(1); if (ShiftAmount != 1) return SDValue(); SDValue ExtendOpA, ExtendOpB; SDValue ShiftOp0 = Shift.getOperand(0); unsigned ShiftOp0Opc = ShiftOp0.getOpcode(); if (ShiftOp0Opc == ISD::SUB) { SDValue Xor = ShiftOp0.getOperand(1); if (Xor.getOpcode() != ISD::XOR) return SDValue(); // Is the XOR using a constant amount of all ones in the right hand side? uint64_t C; if (!isAllConstantBuildVector(Xor.getOperand(1), C)) return SDValue(); unsigned ElemSizeInBits = VT.getScalarSizeInBits(); APInt CAsAPInt(ElemSizeInBits, C); if (CAsAPInt != APInt::getAllOnesValue(ElemSizeInBits)) return SDValue(); ExtendOpA = Xor.getOperand(0); ExtendOpB = ShiftOp0.getOperand(0); } else if (ShiftOp0Opc == ISD::ADD) { ExtendOpA = ShiftOp0.getOperand(0); ExtendOpB = ShiftOp0.getOperand(1); } else return SDValue(); unsigned ExtendOpAOpc = ExtendOpA.getOpcode(); unsigned ExtendOpBOpc = ExtendOpB.getOpcode(); if (!(ExtendOpAOpc == ExtendOpBOpc && (ExtendOpAOpc == ISD::ZERO_EXTEND || ExtendOpAOpc == ISD::SIGN_EXTEND))) return SDValue(); // Is the result of the right shift being truncated to the same value type as // the original operands, OpA and OpB? SDValue OpA = ExtendOpA.getOperand(0); SDValue OpB = ExtendOpB.getOperand(0); EVT OpAVT = OpA.getValueType(); assert(ExtendOpA.getValueType() == ExtendOpB.getValueType()); if (!(VT == OpAVT && OpAVT == OpB.getValueType())) return SDValue(); SDLoc DL(N); bool IsSignExtend = ExtendOpAOpc == ISD::SIGN_EXTEND; bool IsRHADD = ShiftOp0Opc == ISD::SUB; unsigned HADDOpc = IsSignExtend ? (IsRHADD ? AArch64ISD::SRHADD : AArch64ISD::SHADD) : (IsRHADD ? AArch64ISD::URHADD : AArch64ISD::UHADD); SDValue ResultHADD = DAG.getNode(HADDOpc, DL, VT, OpA, OpB); return ResultHADD; } static bool hasPairwiseAdd(unsigned Opcode, EVT VT, bool FullFP16) { switch (Opcode) { case ISD::FADD: return (FullFP16 && VT == MVT::f16) || VT == MVT::f32 || VT == MVT::f64; case ISD::ADD: return VT == MVT::i64; default: return false; } } static SDValue performExtractVectorEltCombine(SDNode *N, SelectionDAG &DAG) { SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); ConstantSDNode *ConstantN1 = dyn_cast(N1); EVT VT = N->getValueType(0); const bool FullFP16 = static_cast(DAG.getSubtarget()).hasFullFP16(); // Rewrite for pairwise fadd pattern // (f32 (extract_vector_elt // (fadd (vXf32 Other) // (vector_shuffle (vXf32 Other) undef <1,X,...> )) 0)) // -> // (f32 (fadd (extract_vector_elt (vXf32 Other) 0) // (extract_vector_elt (vXf32 Other) 1)) if (ConstantN1 && ConstantN1->getZExtValue() == 0 && hasPairwiseAdd(N0->getOpcode(), VT, FullFP16)) { SDLoc DL(N0); SDValue N00 = N0->getOperand(0); SDValue N01 = N0->getOperand(1); ShuffleVectorSDNode *Shuffle = dyn_cast(N01); SDValue Other = N00; // And handle the commutative case. if (!Shuffle) { Shuffle = dyn_cast(N00); Other = N01; } if (Shuffle && Shuffle->getMaskElt(0) == 1 && Other == Shuffle->getOperand(0)) { return DAG.getNode(N0->getOpcode(), DL, VT, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Other, DAG.getConstant(0, DL, MVT::i64)), DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Other, DAG.getConstant(1, DL, MVT::i64))); } } return SDValue(); } static SDValue performConcatVectorsCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG) { SDLoc dl(N); EVT VT = N->getValueType(0); SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); unsigned N0Opc = N0->getOpcode(), N1Opc = N1->getOpcode(); // Optimize concat_vectors of truncated vectors, where the intermediate // type is illegal, to avoid said illegality, e.g., // (v4i16 (concat_vectors (v2i16 (truncate (v2i64))), // (v2i16 (truncate (v2i64))))) // -> // (v4i16 (truncate (vector_shuffle (v4i32 (bitcast (v2i64))), // (v4i32 (bitcast (v2i64))), // <0, 2, 4, 6>))) // This isn't really target-specific, but ISD::TRUNCATE legality isn't keyed // on both input and result type, so we might generate worse code. // On AArch64 we know it's fine for v2i64->v4i16 and v4i32->v8i8. if (N->getNumOperands() == 2 && N0Opc == ISD::TRUNCATE && N1Opc == ISD::TRUNCATE) { SDValue N00 = N0->getOperand(0); SDValue N10 = N1->getOperand(0); EVT N00VT = N00.getValueType(); if (N00VT == N10.getValueType() && (N00VT == MVT::v2i64 || N00VT == MVT::v4i32) && N00VT.getScalarSizeInBits() == 4 * VT.getScalarSizeInBits()) { MVT MidVT = (N00VT == MVT::v2i64 ? MVT::v4i32 : MVT::v8i16); SmallVector Mask(MidVT.getVectorNumElements()); for (size_t i = 0; i < Mask.size(); ++i) Mask[i] = i * 2; return DAG.getNode(ISD::TRUNCATE, dl, VT, DAG.getVectorShuffle( MidVT, dl, DAG.getNode(ISD::BITCAST, dl, MidVT, N00), DAG.getNode(ISD::BITCAST, dl, MidVT, N10), Mask)); } } // Wait 'til after everything is legalized to try this. That way we have // legal vector types and such. if (DCI.isBeforeLegalizeOps()) return SDValue(); // Optimise concat_vectors of two [us]rhadds or [us]hadds that use extracted // subvectors from the same original vectors. Combine these into a single // [us]rhadd or [us]hadd that operates on the two original vectors. Example: // (v16i8 (concat_vectors (v8i8 (urhadd (extract_subvector (v16i8 OpA, <0>), // extract_subvector (v16i8 OpB, // <0>))), // (v8i8 (urhadd (extract_subvector (v16i8 OpA, <8>), // extract_subvector (v16i8 OpB, // <8>))))) // -> // (v16i8(urhadd(v16i8 OpA, v16i8 OpB))) if (N->getNumOperands() == 2 && N0Opc == N1Opc && (N0Opc == AArch64ISD::URHADD || N0Opc == AArch64ISD::SRHADD || N0Opc == AArch64ISD::UHADD || N0Opc == AArch64ISD::SHADD)) { SDValue N00 = N0->getOperand(0); SDValue N01 = N0->getOperand(1); SDValue N10 = N1->getOperand(0); SDValue N11 = N1->getOperand(1); EVT N00VT = N00.getValueType(); EVT N10VT = N10.getValueType(); if (N00->getOpcode() == ISD::EXTRACT_SUBVECTOR && N01->getOpcode() == ISD::EXTRACT_SUBVECTOR && N10->getOpcode() == ISD::EXTRACT_SUBVECTOR && N11->getOpcode() == ISD::EXTRACT_SUBVECTOR && N00VT == N10VT) { SDValue N00Source = N00->getOperand(0); SDValue N01Source = N01->getOperand(0); SDValue N10Source = N10->getOperand(0); SDValue N11Source = N11->getOperand(0); if (N00Source == N10Source && N01Source == N11Source && N00Source.getValueType() == VT && N01Source.getValueType() == VT) { assert(N0.getValueType() == N1.getValueType()); uint64_t N00Index = N00.getConstantOperandVal(1); uint64_t N01Index = N01.getConstantOperandVal(1); uint64_t N10Index = N10.getConstantOperandVal(1); uint64_t N11Index = N11.getConstantOperandVal(1); if (N00Index == N01Index && N10Index == N11Index && N00Index == 0 && N10Index == N00VT.getVectorNumElements()) return DAG.getNode(N0Opc, dl, VT, N00Source, N01Source); } } } // If we see a (concat_vectors (v1x64 A), (v1x64 A)) it's really a vector // splat. The indexed instructions are going to be expecting a DUPLANE64, so // canonicalise to that. if (N0 == N1 && VT.getVectorNumElements() == 2) { assert(VT.getScalarSizeInBits() == 64); return DAG.getNode(AArch64ISD::DUPLANE64, dl, VT, WidenVector(N0, DAG), DAG.getConstant(0, dl, MVT::i64)); } // Canonicalise concat_vectors so that the right-hand vector has as few // bit-casts as possible before its real operation. The primary matching // destination for these operations will be the narrowing "2" instructions, // which depend on the operation being performed on this right-hand vector. // For example, // (concat_vectors LHS, (v1i64 (bitconvert (v4i16 RHS)))) // becomes // (bitconvert (concat_vectors (v4i16 (bitconvert LHS)), RHS)) if (N1Opc != ISD::BITCAST) return SDValue(); SDValue RHS = N1->getOperand(0); MVT RHSTy = RHS.getValueType().getSimpleVT(); // If the RHS is not a vector, this is not the pattern we're looking for. if (!RHSTy.isVector()) return SDValue(); LLVM_DEBUG( dbgs() << "aarch64-lower: concat_vectors bitcast simplification\n"); MVT ConcatTy = MVT::getVectorVT(RHSTy.getVectorElementType(), RHSTy.getVectorNumElements() * 2); return DAG.getNode(ISD::BITCAST, dl, VT, DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatTy, DAG.getNode(ISD::BITCAST, dl, RHSTy, N0), RHS)); } static SDValue tryCombineFixedPointConvert(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG) { // Wait until after everything is legalized to try this. That way we have // legal vector types and such. if (DCI.isBeforeLegalizeOps()) return SDValue(); // Transform a scalar conversion of a value from a lane extract into a // lane extract of a vector conversion. E.g., from foo1 to foo2: // double foo1(int64x2_t a) { return vcvtd_n_f64_s64(a[1], 9); } // double foo2(int64x2_t a) { return vcvtq_n_f64_s64(a, 9)[1]; } // // The second form interacts better with instruction selection and the // register allocator to avoid cross-class register copies that aren't // coalescable due to a lane reference. // Check the operand and see if it originates from a lane extract. SDValue Op1 = N->getOperand(1); if (Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { // Yep, no additional predication needed. Perform the transform. SDValue IID = N->getOperand(0); SDValue Shift = N->getOperand(2); SDValue Vec = Op1.getOperand(0); SDValue Lane = Op1.getOperand(1); EVT ResTy = N->getValueType(0); EVT VecResTy; SDLoc DL(N); // The vector width should be 128 bits by the time we get here, even // if it started as 64 bits (the extract_vector handling will have // done so). assert(Vec.getValueSizeInBits() == 128 && "unexpected vector size on extract_vector_elt!"); if (Vec.getValueType() == MVT::v4i32) VecResTy = MVT::v4f32; else if (Vec.getValueType() == MVT::v2i64) VecResTy = MVT::v2f64; else llvm_unreachable("unexpected vector type!"); SDValue Convert = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VecResTy, IID, Vec, Shift); return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResTy, Convert, Lane); } return SDValue(); } // AArch64 high-vector "long" operations are formed by performing the non-high // version on an extract_subvector of each operand which gets the high half: // // (longop2 LHS, RHS) == (longop (extract_high LHS), (extract_high RHS)) // // However, there are cases which don't have an extract_high explicitly, but // have another operation that can be made compatible with one for free. For // example: // // (dupv64 scalar) --> (extract_high (dup128 scalar)) // // This routine does the actual conversion of such DUPs, once outer routines // have determined that everything else is in order. // It also supports immediate DUP-like nodes (MOVI/MVNi), which we can fold // similarly here. static SDValue tryExtendDUPToExtractHigh(SDValue N, SelectionDAG &DAG) { switch (N.getOpcode()) { case AArch64ISD::DUP: case AArch64ISD::DUPLANE8: case AArch64ISD::DUPLANE16: case AArch64ISD::DUPLANE32: case AArch64ISD::DUPLANE64: case AArch64ISD::MOVI: case AArch64ISD::MOVIshift: case AArch64ISD::MOVIedit: case AArch64ISD::MOVImsl: case AArch64ISD::MVNIshift: case AArch64ISD::MVNImsl: break; default: // FMOV could be supported, but isn't very useful, as it would only occur // if you passed a bitcast' floating point immediate to an eligible long // integer op (addl, smull, ...). return SDValue(); } MVT NarrowTy = N.getSimpleValueType(); if (!NarrowTy.is64BitVector()) return SDValue(); MVT ElementTy = NarrowTy.getVectorElementType(); unsigned NumElems = NarrowTy.getVectorNumElements(); MVT NewVT = MVT::getVectorVT(ElementTy, NumElems * 2); SDLoc dl(N); return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, NarrowTy, DAG.getNode(N->getOpcode(), dl, NewVT, N->ops()), DAG.getConstant(NumElems, dl, MVT::i64)); } static bool isEssentiallyExtractHighSubvector(SDValue N) { if (N.getOpcode() == ISD::BITCAST) N = N.getOperand(0); if (N.getOpcode() != ISD::EXTRACT_SUBVECTOR) return false; if (N.getOperand(0).getValueType().isScalableVector()) return false; return cast(N.getOperand(1))->getAPIntValue() == N.getOperand(0).getValueType().getVectorNumElements() / 2; } /// Helper structure to keep track of ISD::SET_CC operands. struct GenericSetCCInfo { const SDValue *Opnd0; const SDValue *Opnd1; ISD::CondCode CC; }; /// Helper structure to keep track of a SET_CC lowered into AArch64 code. struct AArch64SetCCInfo { const SDValue *Cmp; AArch64CC::CondCode CC; }; /// Helper structure to keep track of SetCC information. union SetCCInfo { GenericSetCCInfo Generic; AArch64SetCCInfo AArch64; }; /// Helper structure to be able to read SetCC information. If set to /// true, IsAArch64 field, Info is a AArch64SetCCInfo, otherwise Info is a /// GenericSetCCInfo. struct SetCCInfoAndKind { SetCCInfo Info; bool IsAArch64; }; /// Check whether or not \p Op is a SET_CC operation, either a generic or /// an /// AArch64 lowered one. /// \p SetCCInfo is filled accordingly. /// \post SetCCInfo is meanginfull only when this function returns true. /// \return True when Op is a kind of SET_CC operation. static bool isSetCC(SDValue Op, SetCCInfoAndKind &SetCCInfo) { // If this is a setcc, this is straight forward. if (Op.getOpcode() == ISD::SETCC) { SetCCInfo.Info.Generic.Opnd0 = &Op.getOperand(0); SetCCInfo.Info.Generic.Opnd1 = &Op.getOperand(1); SetCCInfo.Info.Generic.CC = cast(Op.getOperand(2))->get(); SetCCInfo.IsAArch64 = false; return true; } // Otherwise, check if this is a matching csel instruction. // In other words: // - csel 1, 0, cc // - csel 0, 1, !cc if (Op.getOpcode() != AArch64ISD::CSEL) return false; // Set the information about the operands. // TODO: we want the operands of the Cmp not the csel SetCCInfo.Info.AArch64.Cmp = &Op.getOperand(3); SetCCInfo.IsAArch64 = true; SetCCInfo.Info.AArch64.CC = static_cast( cast(Op.getOperand(2))->getZExtValue()); // Check that the operands matches the constraints: // (1) Both operands must be constants. // (2) One must be 1 and the other must be 0. ConstantSDNode *TValue = dyn_cast(Op.getOperand(0)); ConstantSDNode *FValue = dyn_cast(Op.getOperand(1)); // Check (1). if (!TValue || !FValue) return false; // Check (2). if (!TValue->isOne()) { // Update the comparison when we are interested in !cc. std::swap(TValue, FValue); SetCCInfo.Info.AArch64.CC = AArch64CC::getInvertedCondCode(SetCCInfo.Info.AArch64.CC); } return TValue->isOne() && FValue->isNullValue(); } // Returns true if Op is setcc or zext of setcc. static bool isSetCCOrZExtSetCC(const SDValue& Op, SetCCInfoAndKind &Info) { if (isSetCC(Op, Info)) return true; return ((Op.getOpcode() == ISD::ZERO_EXTEND) && isSetCC(Op->getOperand(0), Info)); } // The folding we want to perform is: // (add x, [zext] (setcc cc ...) ) // --> // (csel x, (add x, 1), !cc ...) // // The latter will get matched to a CSINC instruction. static SDValue performSetccAddFolding(SDNode *Op, SelectionDAG &DAG) { assert(Op && Op->getOpcode() == ISD::ADD && "Unexpected operation!"); SDValue LHS = Op->getOperand(0); SDValue RHS = Op->getOperand(1); SetCCInfoAndKind InfoAndKind; // If both operands are a SET_CC, then we don't want to perform this // folding and create another csel as this results in more instructions // (and higher register usage). if (isSetCCOrZExtSetCC(LHS, InfoAndKind) && isSetCCOrZExtSetCC(RHS, InfoAndKind)) return SDValue(); // If neither operand is a SET_CC, give up. if (!isSetCCOrZExtSetCC(LHS, InfoAndKind)) { std::swap(LHS, RHS); if (!isSetCCOrZExtSetCC(LHS, InfoAndKind)) return SDValue(); } // FIXME: This could be generatized to work for FP comparisons. EVT CmpVT = InfoAndKind.IsAArch64 ? InfoAndKind.Info.AArch64.Cmp->getOperand(0).getValueType() : InfoAndKind.Info.Generic.Opnd0->getValueType(); if (CmpVT != MVT::i32 && CmpVT != MVT::i64) return SDValue(); SDValue CCVal; SDValue Cmp; SDLoc dl(Op); if (InfoAndKind.IsAArch64) { CCVal = DAG.getConstant( AArch64CC::getInvertedCondCode(InfoAndKind.Info.AArch64.CC), dl, MVT::i32); Cmp = *InfoAndKind.Info.AArch64.Cmp; } else Cmp = getAArch64Cmp( *InfoAndKind.Info.Generic.Opnd0, *InfoAndKind.Info.Generic.Opnd1, ISD::getSetCCInverse(InfoAndKind.Info.Generic.CC, CmpVT), CCVal, DAG, dl); EVT VT = Op->getValueType(0); LHS = DAG.getNode(ISD::ADD, dl, VT, RHS, DAG.getConstant(1, dl, VT)); return DAG.getNode(AArch64ISD::CSEL, dl, VT, RHS, LHS, CCVal, Cmp); } // ADD(UADDV a, UADDV b) --> UADDV(ADD a, b) static SDValue performUADDVCombine(SDNode *N, SelectionDAG &DAG) { EVT VT = N->getValueType(0); // Only scalar integer and vector types. if (N->getOpcode() != ISD::ADD || !VT.isScalarInteger()) return SDValue(); SDValue LHS = N->getOperand(0); SDValue RHS = N->getOperand(1); if (LHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT || RHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT || LHS.getValueType() != VT) return SDValue(); auto *LHSN1 = dyn_cast(LHS->getOperand(1)); auto *RHSN1 = dyn_cast(RHS->getOperand(1)); if (!LHSN1 || LHSN1 != RHSN1 || !RHSN1->isNullValue()) return SDValue(); SDValue Op1 = LHS->getOperand(0); SDValue Op2 = RHS->getOperand(0); EVT OpVT1 = Op1.getValueType(); EVT OpVT2 = Op2.getValueType(); if (Op1.getOpcode() != AArch64ISD::UADDV || OpVT1 != OpVT2 || Op2.getOpcode() != AArch64ISD::UADDV || OpVT1.getVectorElementType() != VT) return SDValue(); SDValue Val1 = Op1.getOperand(0); SDValue Val2 = Op2.getOperand(0); EVT ValVT = Val1->getValueType(0); SDLoc DL(N); SDValue AddVal = DAG.getNode(ISD::ADD, DL, ValVT, Val1, Val2); return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, DAG.getNode(AArch64ISD::UADDV, DL, ValVT, AddVal), DAG.getConstant(0, DL, MVT::i64)); } // ADD(UDOT(zero, x, y), A) --> UDOT(A, x, y) static SDValue performAddDotCombine(SDNode *N, SelectionDAG &DAG) { EVT VT = N->getValueType(0); if (N->getOpcode() != ISD::ADD) return SDValue(); SDValue Dot = N->getOperand(0); SDValue A = N->getOperand(1); // Handle commutivity auto isZeroDot = [](SDValue Dot) { return (Dot.getOpcode() == AArch64ISD::UDOT || Dot.getOpcode() == AArch64ISD::SDOT) && isZerosVector(Dot.getOperand(0).getNode()); }; if (!isZeroDot(Dot)) std::swap(Dot, A); if (!isZeroDot(Dot)) return SDValue(); return DAG.getNode(Dot.getOpcode(), SDLoc(N), VT, A, Dot.getOperand(1), Dot.getOperand(2)); } // The basic add/sub long vector instructions have variants with "2" on the end // which act on the high-half of their inputs. They are normally matched by // patterns like: // // (add (zeroext (extract_high LHS)), // (zeroext (extract_high RHS))) // -> uaddl2 vD, vN, vM // // However, if one of the extracts is something like a duplicate, this // instruction can still be used profitably. This function puts the DAG into a // more appropriate form for those patterns to trigger. static SDValue performAddSubLongCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG) { if (DCI.isBeforeLegalizeOps()) return SDValue(); MVT VT = N->getSimpleValueType(0); if (!VT.is128BitVector()) { if (N->getOpcode() == ISD::ADD) return performSetccAddFolding(N, DAG); return SDValue(); } // Make sure both branches are extended in the same way. SDValue LHS = N->getOperand(0); SDValue RHS = N->getOperand(1); if ((LHS.getOpcode() != ISD::ZERO_EXTEND && LHS.getOpcode() != ISD::SIGN_EXTEND) || LHS.getOpcode() != RHS.getOpcode()) return SDValue(); unsigned ExtType = LHS.getOpcode(); // It's not worth doing if at least one of the inputs isn't already an // extract, but we don't know which it'll be so we have to try both. if (isEssentiallyExtractHighSubvector(LHS.getOperand(0))) { RHS = tryExtendDUPToExtractHigh(RHS.getOperand(0), DAG); if (!RHS.getNode()) return SDValue(); RHS = DAG.getNode(ExtType, SDLoc(N), VT, RHS); } else if (isEssentiallyExtractHighSubvector(RHS.getOperand(0))) { LHS = tryExtendDUPToExtractHigh(LHS.getOperand(0), DAG); if (!LHS.getNode()) return SDValue(); LHS = DAG.getNode(ExtType, SDLoc(N), VT, LHS); } return DAG.getNode(N->getOpcode(), SDLoc(N), VT, LHS, RHS); } static SDValue performAddSubCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG) { // Try to change sum of two reductions. if (SDValue Val = performUADDVCombine(N, DAG)) return Val; if (SDValue Val = performAddDotCombine(N, DAG)) return Val; return performAddSubLongCombine(N, DCI, DAG); } // Massage DAGs which we can use the high-half "long" operations on into // something isel will recognize better. E.g. // // (aarch64_neon_umull (extract_high vec) (dupv64 scalar)) --> // (aarch64_neon_umull (extract_high (v2i64 vec))) // (extract_high (v2i64 (dup128 scalar))))) // static SDValue tryCombineLongOpWithDup(unsigned IID, SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG) { if (DCI.isBeforeLegalizeOps()) return SDValue(); SDValue LHS = N->getOperand((IID == Intrinsic::not_intrinsic) ? 0 : 1); SDValue RHS = N->getOperand((IID == Intrinsic::not_intrinsic) ? 1 : 2); assert(LHS.getValueType().is64BitVector() && RHS.getValueType().is64BitVector() && "unexpected shape for long operation"); // Either node could be a DUP, but it's not worth doing both of them (you'd // just as well use the non-high version) so look for a corresponding extract // operation on the other "wing". if (isEssentiallyExtractHighSubvector(LHS)) { RHS = tryExtendDUPToExtractHigh(RHS, DAG); if (!RHS.getNode()) return SDValue(); } else if (isEssentiallyExtractHighSubvector(RHS)) { LHS = tryExtendDUPToExtractHigh(LHS, DAG); if (!LHS.getNode()) return SDValue(); } if (IID == Intrinsic::not_intrinsic) return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), LHS, RHS); return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N), N->getValueType(0), N->getOperand(0), LHS, RHS); } static SDValue tryCombineShiftImm(unsigned IID, SDNode *N, SelectionDAG &DAG) { MVT ElemTy = N->getSimpleValueType(0).getScalarType(); unsigned ElemBits = ElemTy.getSizeInBits(); int64_t ShiftAmount; if (BuildVectorSDNode *BVN = dyn_cast(N->getOperand(2))) { APInt SplatValue, SplatUndef; unsigned SplatBitSize; bool HasAnyUndefs; if (!BVN->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, HasAnyUndefs, ElemBits) || SplatBitSize != ElemBits) return SDValue(); ShiftAmount = SplatValue.getSExtValue(); } else if (ConstantSDNode *CVN = dyn_cast(N->getOperand(2))) { ShiftAmount = CVN->getSExtValue(); } else return SDValue(); unsigned Opcode; bool IsRightShift; switch (IID) { default: llvm_unreachable("Unknown shift intrinsic"); case Intrinsic::aarch64_neon_sqshl: Opcode = AArch64ISD::SQSHL_I; IsRightShift = false; break; case Intrinsic::aarch64_neon_uqshl: Opcode = AArch64ISD::UQSHL_I; IsRightShift = false; break; case Intrinsic::aarch64_neon_srshl: Opcode = AArch64ISD::SRSHR_I; IsRightShift = true; break; case Intrinsic::aarch64_neon_urshl: Opcode = AArch64ISD::URSHR_I; IsRightShift = true; break; case Intrinsic::aarch64_neon_sqshlu: Opcode = AArch64ISD::SQSHLU_I; IsRightShift = false; break; case Intrinsic::aarch64_neon_sshl: case Intrinsic::aarch64_neon_ushl: // For positive shift amounts we can use SHL, as ushl/sshl perform a regular // left shift for positive shift amounts. Below, we only replace the current // node with VSHL, if this condition is met. Opcode = AArch64ISD::VSHL; IsRightShift = false; break; } if (IsRightShift && ShiftAmount <= -1 && ShiftAmount >= -(int)ElemBits) { SDLoc dl(N); return DAG.getNode(Opcode, dl, N->getValueType(0), N->getOperand(1), DAG.getConstant(-ShiftAmount, dl, MVT::i32)); } else if (!IsRightShift && ShiftAmount >= 0 && ShiftAmount < ElemBits) { SDLoc dl(N); return DAG.getNode(Opcode, dl, N->getValueType(0), N->getOperand(1), DAG.getConstant(ShiftAmount, dl, MVT::i32)); } return SDValue(); } // The CRC32[BH] instructions ignore the high bits of their data operand. Since // the intrinsics must be legal and take an i32, this means there's almost // certainly going to be a zext in the DAG which we can eliminate. static SDValue tryCombineCRC32(unsigned Mask, SDNode *N, SelectionDAG &DAG) { SDValue AndN = N->getOperand(2); if (AndN.getOpcode() != ISD::AND) return SDValue(); ConstantSDNode *CMask = dyn_cast(AndN.getOperand(1)); if (!CMask || CMask->getZExtValue() != Mask) return SDValue(); return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N), MVT::i32, N->getOperand(0), N->getOperand(1), AndN.getOperand(0)); } static SDValue combineAcrossLanesIntrinsic(unsigned Opc, SDNode *N, SelectionDAG &DAG) { SDLoc dl(N); return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), DAG.getNode(Opc, dl, N->getOperand(1).getSimpleValueType(), N->getOperand(1)), DAG.getConstant(0, dl, MVT::i64)); } static SDValue LowerSVEIntrinsicIndex(SDNode *N, SelectionDAG &DAG) { SDLoc DL(N); SDValue Op1 = N->getOperand(1); SDValue Op2 = N->getOperand(2); EVT ScalarTy = Op2.getValueType(); if ((ScalarTy == MVT::i8) || (ScalarTy == MVT::i16)) ScalarTy = MVT::i32; // Lower index_vector(base, step) to mul(step step_vector(1)) + splat(base). SDValue StepVector = DAG.getStepVector(DL, N->getValueType(0)); SDValue Step = DAG.getNode(ISD::SPLAT_VECTOR, DL, N->getValueType(0), Op2); SDValue Mul = DAG.getNode(ISD::MUL, DL, N->getValueType(0), StepVector, Step); SDValue Base = DAG.getNode(ISD::SPLAT_VECTOR, DL, N->getValueType(0), Op1); return DAG.getNode(ISD::ADD, DL, N->getValueType(0), Mul, Base); } static SDValue LowerSVEIntrinsicDUP(SDNode *N, SelectionDAG &DAG) { SDLoc dl(N); SDValue Scalar = N->getOperand(3); EVT ScalarTy = Scalar.getValueType(); if ((ScalarTy == MVT::i8) || (ScalarTy == MVT::i16)) Scalar = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Scalar); SDValue Passthru = N->getOperand(1); SDValue Pred = N->getOperand(2); return DAG.getNode(AArch64ISD::DUP_MERGE_PASSTHRU, dl, N->getValueType(0), Pred, Scalar, Passthru); } static SDValue LowerSVEIntrinsicEXT(SDNode *N, SelectionDAG &DAG) { SDLoc dl(N); LLVMContext &Ctx = *DAG.getContext(); EVT VT = N->getValueType(0); assert(VT.isScalableVector() && "Expected a scalable vector."); // Current lowering only supports the SVE-ACLE types. if (VT.getSizeInBits().getKnownMinSize() != AArch64::SVEBitsPerBlock) return SDValue(); unsigned ElemSize = VT.getVectorElementType().getSizeInBits() / 8; unsigned ByteSize = VT.getSizeInBits().getKnownMinSize() / 8; EVT ByteVT = EVT::getVectorVT(Ctx, MVT::i8, ElementCount::getScalable(ByteSize)); // Convert everything to the domain of EXT (i.e bytes). SDValue Op0 = DAG.getNode(ISD::BITCAST, dl, ByteVT, N->getOperand(1)); SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, ByteVT, N->getOperand(2)); SDValue Op2 = DAG.getNode(ISD::MUL, dl, MVT::i32, N->getOperand(3), DAG.getConstant(ElemSize, dl, MVT::i32)); SDValue EXT = DAG.getNode(AArch64ISD::EXT, dl, ByteVT, Op0, Op1, Op2); return DAG.getNode(ISD::BITCAST, dl, VT, EXT); } static SDValue tryConvertSVEWideCompare(SDNode *N, ISD::CondCode CC, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG) { if (DCI.isBeforeLegalize()) return SDValue(); SDValue Comparator = N->getOperand(3); if (Comparator.getOpcode() == AArch64ISD::DUP || Comparator.getOpcode() == ISD::SPLAT_VECTOR) { unsigned IID = getIntrinsicID(N); EVT VT = N->getValueType(0); EVT CmpVT = N->getOperand(2).getValueType(); SDValue Pred = N->getOperand(1); SDValue Imm; SDLoc DL(N); switch (IID) { default: llvm_unreachable("Called with wrong intrinsic!"); break; // Signed comparisons case Intrinsic::aarch64_sve_cmpeq_wide: case Intrinsic::aarch64_sve_cmpne_wide: case Intrinsic::aarch64_sve_cmpge_wide: case Intrinsic::aarch64_sve_cmpgt_wide: case Intrinsic::aarch64_sve_cmplt_wide: case Intrinsic::aarch64_sve_cmple_wide: { if (auto *CN = dyn_cast(Comparator.getOperand(0))) { int64_t ImmVal = CN->getSExtValue(); if (ImmVal >= -16 && ImmVal <= 15) Imm = DAG.getConstant(ImmVal, DL, MVT::i32); else return SDValue(); } break; } // Unsigned comparisons case Intrinsic::aarch64_sve_cmphs_wide: case Intrinsic::aarch64_sve_cmphi_wide: case Intrinsic::aarch64_sve_cmplo_wide: case Intrinsic::aarch64_sve_cmpls_wide: { if (auto *CN = dyn_cast(Comparator.getOperand(0))) { uint64_t ImmVal = CN->getZExtValue(); if (ImmVal <= 127) Imm = DAG.getConstant(ImmVal, DL, MVT::i32); else return SDValue(); } break; } } if (!Imm) return SDValue(); SDValue Splat = DAG.getNode(ISD::SPLAT_VECTOR, DL, CmpVT, Imm); return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, DL, VT, Pred, N->getOperand(2), Splat, DAG.getCondCode(CC)); } return SDValue(); } static SDValue getPTest(SelectionDAG &DAG, EVT VT, SDValue Pg, SDValue Op, AArch64CC::CondCode Cond) { const TargetLowering &TLI = DAG.getTargetLoweringInfo(); SDLoc DL(Op); assert(Op.getValueType().isScalableVector() && TLI.isTypeLegal(Op.getValueType()) && "Expected legal scalable vector type!"); // Ensure target specific opcodes are using legal type. EVT OutVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); SDValue TVal = DAG.getConstant(1, DL, OutVT); SDValue FVal = DAG.getConstant(0, DL, OutVT); // Set condition code (CC) flags. SDValue Test = DAG.getNode(AArch64ISD::PTEST, DL, MVT::Other, Pg, Op); // Convert CC to integer based on requested condition. // NOTE: Cond is inverted to promote CSEL's removal when it feeds a compare. SDValue CC = DAG.getConstant(getInvertedCondCode(Cond), DL, MVT::i32); SDValue Res = DAG.getNode(AArch64ISD::CSEL, DL, OutVT, FVal, TVal, CC, Test); return DAG.getZExtOrTrunc(Res, DL, VT); } static SDValue combineSVEReductionInt(SDNode *N, unsigned Opc, SelectionDAG &DAG) { SDLoc DL(N); SDValue Pred = N->getOperand(1); SDValue VecToReduce = N->getOperand(2); // NOTE: The integer reduction's result type is not always linked to the // operand's element type so we construct it from the intrinsic's result type. EVT ReduceVT = getPackedSVEVectorVT(N->getValueType(0)); SDValue Reduce = DAG.getNode(Opc, DL, ReduceVT, Pred, VecToReduce); // SVE reductions set the whole vector register with the first element // containing the reduction result, which we'll now extract. SDValue Zero = DAG.getConstant(0, DL, MVT::i64); return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, N->getValueType(0), Reduce, Zero); } static SDValue combineSVEReductionFP(SDNode *N, unsigned Opc, SelectionDAG &DAG) { SDLoc DL(N); SDValue Pred = N->getOperand(1); SDValue VecToReduce = N->getOperand(2); EVT ReduceVT = VecToReduce.getValueType(); SDValue Reduce = DAG.getNode(Opc, DL, ReduceVT, Pred, VecToReduce); // SVE reductions set the whole vector register with the first element // containing the reduction result, which we'll now extract. SDValue Zero = DAG.getConstant(0, DL, MVT::i64); return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, N->getValueType(0), Reduce, Zero); } static SDValue combineSVEReductionOrderedFP(SDNode *N, unsigned Opc, SelectionDAG &DAG) { SDLoc DL(N); SDValue Pred = N->getOperand(1); SDValue InitVal = N->getOperand(2); SDValue VecToReduce = N->getOperand(3); EVT ReduceVT = VecToReduce.getValueType(); // Ordered reductions use the first lane of the result vector as the // reduction's initial value. SDValue Zero = DAG.getConstant(0, DL, MVT::i64); InitVal = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ReduceVT, DAG.getUNDEF(ReduceVT), InitVal, Zero); SDValue Reduce = DAG.getNode(Opc, DL, ReduceVT, Pred, InitVal, VecToReduce); // SVE reductions set the whole vector register with the first element // containing the reduction result, which we'll now extract. return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, N->getValueType(0), Reduce, Zero); } static bool isAllActivePredicate(SDValue N) { unsigned NumElts = N.getValueType().getVectorMinNumElements(); // Look through cast. while (N.getOpcode() == AArch64ISD::REINTERPRET_CAST) { N = N.getOperand(0); // When reinterpreting from a type with fewer elements the "new" elements // are not active, so bail if they're likely to be used. if (N.getValueType().getVectorMinNumElements() < NumElts) return false; } // "ptrue p., all" can be considered all active when is the same size // or smaller than the implicit element type represented by N. // NOTE: A larger element count implies a smaller element type. if (N.getOpcode() == AArch64ISD::PTRUE && N.getConstantOperandVal(0) == AArch64SVEPredPattern::all) return N.getValueType().getVectorMinNumElements() >= NumElts; return false; } // If a merged operation has no inactive lanes we can relax it to a predicated // or unpredicated operation, which potentially allows better isel (perhaps // using immediate forms) or relaxing register reuse requirements. static SDValue convertMergedOpToPredOp(SDNode *N, unsigned Opc, SelectionDAG &DAG, bool UnpredOp = false) { assert(N->getOpcode() == ISD::INTRINSIC_WO_CHAIN && "Expected intrinsic!"); assert(N->getNumOperands() == 4 && "Expected 3 operand intrinsic!"); SDValue Pg = N->getOperand(1); // ISD way to specify an all active predicate. if (isAllActivePredicate(Pg)) { if (UnpredOp) return DAG.getNode(Opc, SDLoc(N), N->getValueType(0), N->getOperand(2), N->getOperand(3)); else return DAG.getNode(Opc, SDLoc(N), N->getValueType(0), Pg, N->getOperand(2), N->getOperand(3)); } // FUTURE: SplatVector(true) return SDValue(); } static SDValue performIntrinsicCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget) { SelectionDAG &DAG = DCI.DAG; unsigned IID = getIntrinsicID(N); switch (IID) { default: break; case Intrinsic::aarch64_neon_vcvtfxs2fp: case Intrinsic::aarch64_neon_vcvtfxu2fp: return tryCombineFixedPointConvert(N, DCI, DAG); case Intrinsic::aarch64_neon_saddv: return combineAcrossLanesIntrinsic(AArch64ISD::SADDV, N, DAG); case Intrinsic::aarch64_neon_uaddv: return combineAcrossLanesIntrinsic(AArch64ISD::UADDV, N, DAG); case Intrinsic::aarch64_neon_sminv: return combineAcrossLanesIntrinsic(AArch64ISD::SMINV, N, DAG); case Intrinsic::aarch64_neon_uminv: return combineAcrossLanesIntrinsic(AArch64ISD::UMINV, N, DAG); case Intrinsic::aarch64_neon_smaxv: return combineAcrossLanesIntrinsic(AArch64ISD::SMAXV, N, DAG); case Intrinsic::aarch64_neon_umaxv: return combineAcrossLanesIntrinsic(AArch64ISD::UMAXV, N, DAG); case Intrinsic::aarch64_neon_fmax: return DAG.getNode(ISD::FMAXIMUM, SDLoc(N), N->getValueType(0), N->getOperand(1), N->getOperand(2)); case Intrinsic::aarch64_neon_fmin: return DAG.getNode(ISD::FMINIMUM, SDLoc(N), N->getValueType(0), N->getOperand(1), N->getOperand(2)); case Intrinsic::aarch64_neon_fmaxnm: return DAG.getNode(ISD::FMAXNUM, SDLoc(N), N->getValueType(0), N->getOperand(1), N->getOperand(2)); case Intrinsic::aarch64_neon_fminnm: return DAG.getNode(ISD::FMINNUM, SDLoc(N), N->getValueType(0), N->getOperand(1), N->getOperand(2)); case Intrinsic::aarch64_neon_smull: case Intrinsic::aarch64_neon_umull: case Intrinsic::aarch64_neon_pmull: case Intrinsic::aarch64_neon_sqdmull: return tryCombineLongOpWithDup(IID, N, DCI, DAG); case Intrinsic::aarch64_neon_sqshl: case Intrinsic::aarch64_neon_uqshl: case Intrinsic::aarch64_neon_sqshlu: case Intrinsic::aarch64_neon_srshl: case Intrinsic::aarch64_neon_urshl: case Intrinsic::aarch64_neon_sshl: case Intrinsic::aarch64_neon_ushl: return tryCombineShiftImm(IID, N, DAG); case Intrinsic::aarch64_crc32b: case Intrinsic::aarch64_crc32cb: return tryCombineCRC32(0xff, N, DAG); case Intrinsic::aarch64_crc32h: case Intrinsic::aarch64_crc32ch: return tryCombineCRC32(0xffff, N, DAG); case Intrinsic::aarch64_sve_saddv: // There is no i64 version of SADDV because the sign is irrelevant. if (N->getOperand(2)->getValueType(0).getVectorElementType() == MVT::i64) return combineSVEReductionInt(N, AArch64ISD::UADDV_PRED, DAG); else return combineSVEReductionInt(N, AArch64ISD::SADDV_PRED, DAG); case Intrinsic::aarch64_sve_uaddv: return combineSVEReductionInt(N, AArch64ISD::UADDV_PRED, DAG); case Intrinsic::aarch64_sve_smaxv: return combineSVEReductionInt(N, AArch64ISD::SMAXV_PRED, DAG); case Intrinsic::aarch64_sve_umaxv: return combineSVEReductionInt(N, AArch64ISD::UMAXV_PRED, DAG); case Intrinsic::aarch64_sve_sminv: return combineSVEReductionInt(N, AArch64ISD::SMINV_PRED, DAG); case Intrinsic::aarch64_sve_uminv: return combineSVEReductionInt(N, AArch64ISD::UMINV_PRED, DAG); case Intrinsic::aarch64_sve_orv: return combineSVEReductionInt(N, AArch64ISD::ORV_PRED, DAG); case Intrinsic::aarch64_sve_eorv: return combineSVEReductionInt(N, AArch64ISD::EORV_PRED, DAG); case Intrinsic::aarch64_sve_andv: return combineSVEReductionInt(N, AArch64ISD::ANDV_PRED, DAG); case Intrinsic::aarch64_sve_index: return LowerSVEIntrinsicIndex(N, DAG); case Intrinsic::aarch64_sve_dup: return LowerSVEIntrinsicDUP(N, DAG); case Intrinsic::aarch64_sve_dup_x: return DAG.getNode(ISD::SPLAT_VECTOR, SDLoc(N), N->getValueType(0), N->getOperand(1)); case Intrinsic::aarch64_sve_ext: return LowerSVEIntrinsicEXT(N, DAG); case Intrinsic::aarch64_sve_mul: return convertMergedOpToPredOp(N, AArch64ISD::MUL_PRED, DAG); case Intrinsic::aarch64_sve_smulh: return convertMergedOpToPredOp(N, AArch64ISD::MULHS_PRED, DAG); case Intrinsic::aarch64_sve_umulh: return convertMergedOpToPredOp(N, AArch64ISD::MULHU_PRED, DAG); case Intrinsic::aarch64_sve_smin: return convertMergedOpToPredOp(N, AArch64ISD::SMIN_PRED, DAG); case Intrinsic::aarch64_sve_umin: return convertMergedOpToPredOp(N, AArch64ISD::UMIN_PRED, DAG); case Intrinsic::aarch64_sve_smax: return convertMergedOpToPredOp(N, AArch64ISD::SMAX_PRED, DAG); case Intrinsic::aarch64_sve_umax: return convertMergedOpToPredOp(N, AArch64ISD::UMAX_PRED, DAG); case Intrinsic::aarch64_sve_lsl: return convertMergedOpToPredOp(N, AArch64ISD::SHL_PRED, DAG); case Intrinsic::aarch64_sve_lsr: return convertMergedOpToPredOp(N, AArch64ISD::SRL_PRED, DAG); case Intrinsic::aarch64_sve_asr: return convertMergedOpToPredOp(N, AArch64ISD::SRA_PRED, DAG); case Intrinsic::aarch64_sve_fadd: return convertMergedOpToPredOp(N, AArch64ISD::FADD_PRED, DAG); case Intrinsic::aarch64_sve_fsub: return convertMergedOpToPredOp(N, AArch64ISD::FSUB_PRED, DAG); case Intrinsic::aarch64_sve_fmul: return convertMergedOpToPredOp(N, AArch64ISD::FMUL_PRED, DAG); case Intrinsic::aarch64_sve_add: return convertMergedOpToPredOp(N, ISD::ADD, DAG, true); case Intrinsic::aarch64_sve_sub: return convertMergedOpToPredOp(N, ISD::SUB, DAG, true); case Intrinsic::aarch64_sve_and: return convertMergedOpToPredOp(N, ISD::AND, DAG, true); case Intrinsic::aarch64_sve_bic: return convertMergedOpToPredOp(N, AArch64ISD::BIC, DAG, true); case Intrinsic::aarch64_sve_eor: return convertMergedOpToPredOp(N, ISD::XOR, DAG, true); case Intrinsic::aarch64_sve_orr: return convertMergedOpToPredOp(N, ISD::OR, DAG, true); case Intrinsic::aarch64_sve_sqadd: return convertMergedOpToPredOp(N, ISD::SADDSAT, DAG, true); case Intrinsic::aarch64_sve_sqsub: return convertMergedOpToPredOp(N, ISD::SSUBSAT, DAG, true); case Intrinsic::aarch64_sve_uqadd: return convertMergedOpToPredOp(N, ISD::UADDSAT, DAG, true); case Intrinsic::aarch64_sve_uqsub: return convertMergedOpToPredOp(N, ISD::USUBSAT, DAG, true); case Intrinsic::aarch64_sve_sqadd_x: return DAG.getNode(ISD::SADDSAT, SDLoc(N), N->getValueType(0), N->getOperand(1), N->getOperand(2)); case Intrinsic::aarch64_sve_sqsub_x: return DAG.getNode(ISD::SSUBSAT, SDLoc(N), N->getValueType(0), N->getOperand(1), N->getOperand(2)); case Intrinsic::aarch64_sve_uqadd_x: return DAG.getNode(ISD::UADDSAT, SDLoc(N), N->getValueType(0), N->getOperand(1), N->getOperand(2)); case Intrinsic::aarch64_sve_uqsub_x: return DAG.getNode(ISD::USUBSAT, SDLoc(N), N->getValueType(0), N->getOperand(1), N->getOperand(2)); case Intrinsic::aarch64_sve_cmphs: if (!N->getOperand(2).getValueType().isFloatingPoint()) return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N), N->getValueType(0), N->getOperand(1), N->getOperand(2), N->getOperand(3), DAG.getCondCode(ISD::SETUGE)); break; case Intrinsic::aarch64_sve_cmphi: if (!N->getOperand(2).getValueType().isFloatingPoint()) return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N), N->getValueType(0), N->getOperand(1), N->getOperand(2), N->getOperand(3), DAG.getCondCode(ISD::SETUGT)); break; case Intrinsic::aarch64_sve_fcmpge: case Intrinsic::aarch64_sve_cmpge: return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N), N->getValueType(0), N->getOperand(1), N->getOperand(2), N->getOperand(3), DAG.getCondCode(ISD::SETGE)); break; case Intrinsic::aarch64_sve_fcmpgt: case Intrinsic::aarch64_sve_cmpgt: return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N), N->getValueType(0), N->getOperand(1), N->getOperand(2), N->getOperand(3), DAG.getCondCode(ISD::SETGT)); break; case Intrinsic::aarch64_sve_fcmpeq: case Intrinsic::aarch64_sve_cmpeq: return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N), N->getValueType(0), N->getOperand(1), N->getOperand(2), N->getOperand(3), DAG.getCondCode(ISD::SETEQ)); break; case Intrinsic::aarch64_sve_fcmpne: case Intrinsic::aarch64_sve_cmpne: return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N), N->getValueType(0), N->getOperand(1), N->getOperand(2), N->getOperand(3), DAG.getCondCode(ISD::SETNE)); break; case Intrinsic::aarch64_sve_fcmpuo: return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N), N->getValueType(0), N->getOperand(1), N->getOperand(2), N->getOperand(3), DAG.getCondCode(ISD::SETUO)); break; case Intrinsic::aarch64_sve_fadda: return combineSVEReductionOrderedFP(N, AArch64ISD::FADDA_PRED, DAG); case Intrinsic::aarch64_sve_faddv: return combineSVEReductionFP(N, AArch64ISD::FADDV_PRED, DAG); case Intrinsic::aarch64_sve_fmaxnmv: return combineSVEReductionFP(N, AArch64ISD::FMAXNMV_PRED, DAG); case Intrinsic::aarch64_sve_fmaxv: return combineSVEReductionFP(N, AArch64ISD::FMAXV_PRED, DAG); case Intrinsic::aarch64_sve_fminnmv: return combineSVEReductionFP(N, AArch64ISD::FMINNMV_PRED, DAG); case Intrinsic::aarch64_sve_fminv: return combineSVEReductionFP(N, AArch64ISD::FMINV_PRED, DAG); case Intrinsic::aarch64_sve_sel: return DAG.getNode(ISD::VSELECT, SDLoc(N), N->getValueType(0), N->getOperand(1), N->getOperand(2), N->getOperand(3)); case Intrinsic::aarch64_sve_cmpeq_wide: return tryConvertSVEWideCompare(N, ISD::SETEQ, DCI, DAG); case Intrinsic::aarch64_sve_cmpne_wide: return tryConvertSVEWideCompare(N, ISD::SETNE, DCI, DAG); case Intrinsic::aarch64_sve_cmpge_wide: return tryConvertSVEWideCompare(N, ISD::SETGE, DCI, DAG); case Intrinsic::aarch64_sve_cmpgt_wide: return tryConvertSVEWideCompare(N, ISD::SETGT, DCI, DAG); case Intrinsic::aarch64_sve_cmplt_wide: return tryConvertSVEWideCompare(N, ISD::SETLT, DCI, DAG); case Intrinsic::aarch64_sve_cmple_wide: return tryConvertSVEWideCompare(N, ISD::SETLE, DCI, DAG); case Intrinsic::aarch64_sve_cmphs_wide: return tryConvertSVEWideCompare(N, ISD::SETUGE, DCI, DAG); case Intrinsic::aarch64_sve_cmphi_wide: return tryConvertSVEWideCompare(N, ISD::SETUGT, DCI, DAG); case Intrinsic::aarch64_sve_cmplo_wide: return tryConvertSVEWideCompare(N, ISD::SETULT, DCI, DAG); case Intrinsic::aarch64_sve_cmpls_wide: return tryConvertSVEWideCompare(N, ISD::SETULE, DCI, DAG); case Intrinsic::aarch64_sve_ptest_any: return getPTest(DAG, N->getValueType(0), N->getOperand(1), N->getOperand(2), AArch64CC::ANY_ACTIVE); case Intrinsic::aarch64_sve_ptest_first: return getPTest(DAG, N->getValueType(0), N->getOperand(1), N->getOperand(2), AArch64CC::FIRST_ACTIVE); case Intrinsic::aarch64_sve_ptest_last: return getPTest(DAG, N->getValueType(0), N->getOperand(1), N->getOperand(2), AArch64CC::LAST_ACTIVE); } return SDValue(); } static SDValue performExtendCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG) { // If we see something like (zext (sabd (extract_high ...), (DUP ...))) then // we can convert that DUP into another extract_high (of a bigger DUP), which // helps the backend to decide that an sabdl2 would be useful, saving a real // extract_high operation. if (!DCI.isBeforeLegalizeOps() && N->getOpcode() == ISD::ZERO_EXTEND && (N->getOperand(0).getOpcode() == ISD::ABDU || N->getOperand(0).getOpcode() == ISD::ABDS)) { SDNode *ABDNode = N->getOperand(0).getNode(); SDValue NewABD = tryCombineLongOpWithDup(Intrinsic::not_intrinsic, ABDNode, DCI, DAG); if (!NewABD.getNode()) return SDValue(); return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), N->getValueType(0), NewABD); } return SDValue(); } static SDValue splitStoreSplat(SelectionDAG &DAG, StoreSDNode &St, SDValue SplatVal, unsigned NumVecElts) { assert(!St.isTruncatingStore() && "cannot split truncating vector store"); unsigned OrigAlignment = St.getAlignment(); unsigned EltOffset = SplatVal.getValueType().getSizeInBits() / 8; // Create scalar stores. This is at least as good as the code sequence for a // split unaligned store which is a dup.s, ext.b, and two stores. // Most of the time the three stores should be replaced by store pair // instructions (stp). SDLoc DL(&St); SDValue BasePtr = St.getBasePtr(); uint64_t BaseOffset = 0; const MachinePointerInfo &PtrInfo = St.getPointerInfo(); SDValue NewST1 = DAG.getStore(St.getChain(), DL, SplatVal, BasePtr, PtrInfo, OrigAlignment, St.getMemOperand()->getFlags()); // As this in ISel, we will not merge this add which may degrade results. if (BasePtr->getOpcode() == ISD::ADD && isa(BasePtr->getOperand(1))) { BaseOffset = cast(BasePtr->getOperand(1))->getSExtValue(); BasePtr = BasePtr->getOperand(0); } unsigned Offset = EltOffset; while (--NumVecElts) { unsigned Alignment = MinAlign(OrigAlignment, Offset); SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr, DAG.getConstant(BaseOffset + Offset, DL, MVT::i64)); NewST1 = DAG.getStore(NewST1.getValue(0), DL, SplatVal, OffsetPtr, PtrInfo.getWithOffset(Offset), Alignment, St.getMemOperand()->getFlags()); Offset += EltOffset; } return NewST1; } // Returns an SVE type that ContentTy can be trivially sign or zero extended // into. static MVT getSVEContainerType(EVT ContentTy) { assert(ContentTy.isSimple() && "No SVE containers for extended types"); switch (ContentTy.getSimpleVT().SimpleTy) { default: llvm_unreachable("No known SVE container for this MVT type"); case MVT::nxv2i8: case MVT::nxv2i16: case MVT::nxv2i32: case MVT::nxv2i64: case MVT::nxv2f32: case MVT::nxv2f64: return MVT::nxv2i64; case MVT::nxv4i8: case MVT::nxv4i16: case MVT::nxv4i32: case MVT::nxv4f32: return MVT::nxv4i32; case MVT::nxv8i8: case MVT::nxv8i16: case MVT::nxv8f16: case MVT::nxv8bf16: return MVT::nxv8i16; case MVT::nxv16i8: return MVT::nxv16i8; } } static SDValue performLD1Combine(SDNode *N, SelectionDAG &DAG, unsigned Opc) { SDLoc DL(N); EVT VT = N->getValueType(0); if (VT.getSizeInBits().getKnownMinSize() > AArch64::SVEBitsPerBlock) return SDValue(); EVT ContainerVT = VT; if (ContainerVT.isInteger()) ContainerVT = getSVEContainerType(ContainerVT); SDVTList VTs = DAG.getVTList(ContainerVT, MVT::Other); SDValue Ops[] = { N->getOperand(0), // Chain N->getOperand(2), // Pg N->getOperand(3), // Base DAG.getValueType(VT) }; SDValue Load = DAG.getNode(Opc, DL, VTs, Ops); SDValue LoadChain = SDValue(Load.getNode(), 1); if (ContainerVT.isInteger() && (VT != ContainerVT)) Load = DAG.getNode(ISD::TRUNCATE, DL, VT, Load.getValue(0)); return DAG.getMergeValues({ Load, LoadChain }, DL); } static SDValue performLDNT1Combine(SDNode *N, SelectionDAG &DAG) { SDLoc DL(N); EVT VT = N->getValueType(0); EVT PtrTy = N->getOperand(3).getValueType(); if (VT == MVT::nxv8bf16 && !static_cast(DAG.getSubtarget()).hasBF16()) return SDValue(); EVT LoadVT = VT; if (VT.isFloatingPoint()) LoadVT = VT.changeTypeToInteger(); auto *MINode = cast(N); SDValue PassThru = DAG.getConstant(0, DL, LoadVT); SDValue L = DAG.getMaskedLoad(LoadVT, DL, MINode->getChain(), MINode->getOperand(3), DAG.getUNDEF(PtrTy), MINode->getOperand(2), PassThru, MINode->getMemoryVT(), MINode->getMemOperand(), ISD::UNINDEXED, ISD::NON_EXTLOAD, false); if (VT.isFloatingPoint()) { SDValue Ops[] = { DAG.getNode(ISD::BITCAST, DL, VT, L), L.getValue(1) }; return DAG.getMergeValues(Ops, DL); } return L; } template static SDValue performLD1ReplicateCombine(SDNode *N, SelectionDAG &DAG) { static_assert(Opcode == AArch64ISD::LD1RQ_MERGE_ZERO || Opcode == AArch64ISD::LD1RO_MERGE_ZERO, "Unsupported opcode."); SDLoc DL(N); EVT VT = N->getValueType(0); if (VT == MVT::nxv8bf16 && !static_cast(DAG.getSubtarget()).hasBF16()) return SDValue(); EVT LoadVT = VT; if (VT.isFloatingPoint()) LoadVT = VT.changeTypeToInteger(); SDValue Ops[] = {N->getOperand(0), N->getOperand(2), N->getOperand(3)}; SDValue Load = DAG.getNode(Opcode, DL, {LoadVT, MVT::Other}, Ops); SDValue LoadChain = SDValue(Load.getNode(), 1); if (VT.isFloatingPoint()) Load = DAG.getNode(ISD::BITCAST, DL, VT, Load.getValue(0)); return DAG.getMergeValues({Load, LoadChain}, DL); } static SDValue performST1Combine(SDNode *N, SelectionDAG &DAG) { SDLoc DL(N); SDValue Data = N->getOperand(2); EVT DataVT = Data.getValueType(); EVT HwSrcVt = getSVEContainerType(DataVT); SDValue InputVT = DAG.getValueType(DataVT); if (DataVT == MVT::nxv8bf16 && !static_cast(DAG.getSubtarget()).hasBF16()) return SDValue(); if (DataVT.isFloatingPoint()) InputVT = DAG.getValueType(HwSrcVt); SDValue SrcNew; if (Data.getValueType().isFloatingPoint()) SrcNew = DAG.getNode(ISD::BITCAST, DL, HwSrcVt, Data); else SrcNew = DAG.getNode(ISD::ANY_EXTEND, DL, HwSrcVt, Data); SDValue Ops[] = { N->getOperand(0), // Chain SrcNew, N->getOperand(4), // Base N->getOperand(3), // Pg InputVT }; return DAG.getNode(AArch64ISD::ST1_PRED, DL, N->getValueType(0), Ops); } static SDValue performSTNT1Combine(SDNode *N, SelectionDAG &DAG) { SDLoc DL(N); SDValue Data = N->getOperand(2); EVT DataVT = Data.getValueType(); EVT PtrTy = N->getOperand(4).getValueType(); if (DataVT == MVT::nxv8bf16 && !static_cast(DAG.getSubtarget()).hasBF16()) return SDValue(); if (DataVT.isFloatingPoint()) Data = DAG.getNode(ISD::BITCAST, DL, DataVT.changeTypeToInteger(), Data); auto *MINode = cast(N); return DAG.getMaskedStore(MINode->getChain(), DL, Data, MINode->getOperand(4), DAG.getUNDEF(PtrTy), MINode->getOperand(3), MINode->getMemoryVT(), MINode->getMemOperand(), ISD::UNINDEXED, false, false); } /// Replace a splat of zeros to a vector store by scalar stores of WZR/XZR. The /// load store optimizer pass will merge them to store pair stores. This should /// be better than a movi to create the vector zero followed by a vector store /// if the zero constant is not re-used, since one instructions and one register /// live range will be removed. /// /// For example, the final generated code should be: /// /// stp xzr, xzr, [x0] /// /// instead of: /// /// movi v0.2d, #0 /// str q0, [x0] /// static SDValue replaceZeroVectorStore(SelectionDAG &DAG, StoreSDNode &St) { SDValue StVal = St.getValue(); EVT VT = StVal.getValueType(); // Avoid scalarizing zero splat stores for scalable vectors. if (VT.isScalableVector()) return SDValue(); // It is beneficial to scalarize a zero splat store for 2 or 3 i64 elements or // 2, 3 or 4 i32 elements. int NumVecElts = VT.getVectorNumElements(); if (!(((NumVecElts == 2 || NumVecElts == 3) && VT.getVectorElementType().getSizeInBits() == 64) || ((NumVecElts == 2 || NumVecElts == 3 || NumVecElts == 4) && VT.getVectorElementType().getSizeInBits() == 32))) return SDValue(); if (StVal.getOpcode() != ISD::BUILD_VECTOR) return SDValue(); // If the zero constant has more than one use then the vector store could be // better since the constant mov will be amortized and stp q instructions // should be able to be formed. if (!StVal.hasOneUse()) return SDValue(); // If the store is truncating then it's going down to i16 or smaller, which // means it can be implemented in a single store anyway. if (St.isTruncatingStore()) return SDValue(); // If the immediate offset of the address operand is too large for the stp // instruction, then bail out. if (DAG.isBaseWithConstantOffset(St.getBasePtr())) { int64_t Offset = St.getBasePtr()->getConstantOperandVal(1); if (Offset < -512 || Offset > 504) return SDValue(); } for (int I = 0; I < NumVecElts; ++I) { SDValue EltVal = StVal.getOperand(I); if (!isNullConstant(EltVal) && !isNullFPConstant(EltVal)) return SDValue(); } // Use a CopyFromReg WZR/XZR here to prevent // DAGCombiner::MergeConsecutiveStores from undoing this transformation. SDLoc DL(&St); unsigned ZeroReg; EVT ZeroVT; if (VT.getVectorElementType().getSizeInBits() == 32) { ZeroReg = AArch64::WZR; ZeroVT = MVT::i32; } else { ZeroReg = AArch64::XZR; ZeroVT = MVT::i64; } SDValue SplatVal = DAG.getCopyFromReg(DAG.getEntryNode(), DL, ZeroReg, ZeroVT); return splitStoreSplat(DAG, St, SplatVal, NumVecElts); } /// Replace a splat of a scalar to a vector store by scalar stores of the scalar /// value. The load store optimizer pass will merge them to store pair stores. /// This has better performance than a splat of the scalar followed by a split /// vector store. Even if the stores are not merged it is four stores vs a dup, /// followed by an ext.b and two stores. static SDValue replaceSplatVectorStore(SelectionDAG &DAG, StoreSDNode &St) { SDValue StVal = St.getValue(); EVT VT = StVal.getValueType(); // Don't replace floating point stores, they possibly won't be transformed to // stp because of the store pair suppress pass. if (VT.isFloatingPoint()) return SDValue(); // We can express a splat as store pair(s) for 2 or 4 elements. unsigned NumVecElts = VT.getVectorNumElements(); if (NumVecElts != 4 && NumVecElts != 2) return SDValue(); // If the store is truncating then it's going down to i16 or smaller, which // means it can be implemented in a single store anyway. if (St.isTruncatingStore()) return SDValue(); // Check that this is a splat. // Make sure that each of the relevant vector element locations are inserted // to, i.e. 0 and 1 for v2i64 and 0, 1, 2, 3 for v4i32. std::bitset<4> IndexNotInserted((1 << NumVecElts) - 1); SDValue SplatVal; for (unsigned I = 0; I < NumVecElts; ++I) { // Check for insert vector elements. if (StVal.getOpcode() != ISD::INSERT_VECTOR_ELT) return SDValue(); // Check that same value is inserted at each vector element. if (I == 0) SplatVal = StVal.getOperand(1); else if (StVal.getOperand(1) != SplatVal) return SDValue(); // Check insert element index. ConstantSDNode *CIndex = dyn_cast(StVal.getOperand(2)); if (!CIndex) return SDValue(); uint64_t IndexVal = CIndex->getZExtValue(); if (IndexVal >= NumVecElts) return SDValue(); IndexNotInserted.reset(IndexVal); StVal = StVal.getOperand(0); } // Check that all vector element locations were inserted to. if (IndexNotInserted.any()) return SDValue(); return splitStoreSplat(DAG, St, SplatVal, NumVecElts); } static SDValue splitStores(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG, const AArch64Subtarget *Subtarget) { StoreSDNode *S = cast(N); if (S->isVolatile() || S->isIndexed()) return SDValue(); SDValue StVal = S->getValue(); EVT VT = StVal.getValueType(); if (!VT.isFixedLengthVector()) return SDValue(); // If we get a splat of zeros, convert this vector store to a store of // scalars. They will be merged into store pairs of xzr thereby removing one // instruction and one register. if (SDValue ReplacedZeroSplat = replaceZeroVectorStore(DAG, *S)) return ReplacedZeroSplat; // FIXME: The logic for deciding if an unaligned store should be split should // be included in TLI.allowsMisalignedMemoryAccesses(), and there should be // a call to that function here. if (!Subtarget->isMisaligned128StoreSlow()) return SDValue(); // Don't split at -Oz. if (DAG.getMachineFunction().getFunction().hasMinSize()) return SDValue(); // Don't split v2i64 vectors. Memcpy lowering produces those and splitting // those up regresses performance on micro-benchmarks and olden/bh. if (VT.getVectorNumElements() < 2 || VT == MVT::v2i64) return SDValue(); // Split unaligned 16B stores. They are terrible for performance. // Don't split stores with alignment of 1 or 2. Code that uses clang vector // extensions can use this to mark that it does not want splitting to happen // (by underspecifying alignment to be 1 or 2). Furthermore, the chance of // eliminating alignment hazards is only 1 in 8 for alignment of 2. if (VT.getSizeInBits() != 128 || S->getAlignment() >= 16 || S->getAlignment() <= 2) return SDValue(); // If we get a splat of a scalar convert this vector store to a store of // scalars. They will be merged into store pairs thereby removing two // instructions. if (SDValue ReplacedSplat = replaceSplatVectorStore(DAG, *S)) return ReplacedSplat; SDLoc DL(S); // Split VT into two. EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext()); unsigned NumElts = HalfVT.getVectorNumElements(); SDValue SubVector0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, StVal, DAG.getConstant(0, DL, MVT::i64)); SDValue SubVector1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, StVal, DAG.getConstant(NumElts, DL, MVT::i64)); SDValue BasePtr = S->getBasePtr(); SDValue NewST1 = DAG.getStore(S->getChain(), DL, SubVector0, BasePtr, S->getPointerInfo(), S->getAlignment(), S->getMemOperand()->getFlags()); SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr, DAG.getConstant(8, DL, MVT::i64)); return DAG.getStore(NewST1.getValue(0), DL, SubVector1, OffsetPtr, S->getPointerInfo(), S->getAlignment(), S->getMemOperand()->getFlags()); } static SDValue performSpliceCombine(SDNode *N, SelectionDAG &DAG) { assert(N->getOpcode() == AArch64ISD::SPLICE && "Unexepected Opcode!"); // splice(pg, op1, undef) -> op1 if (N->getOperand(2).isUndef()) return N->getOperand(1); return SDValue(); } static SDValue performUzpCombine(SDNode *N, SelectionDAG &DAG) { SDLoc DL(N); SDValue Op0 = N->getOperand(0); SDValue Op1 = N->getOperand(1); EVT ResVT = N->getValueType(0); // uzp1(unpklo(uzp1(x, y)), z) => uzp1(x, z) if (Op0.getOpcode() == AArch64ISD::UUNPKLO) { if (Op0.getOperand(0).getOpcode() == AArch64ISD::UZP1) { SDValue X = Op0.getOperand(0).getOperand(0); return DAG.getNode(AArch64ISD::UZP1, DL, ResVT, X, Op1); } } // uzp1(x, unpkhi(uzp1(y, z))) => uzp1(x, z) if (Op1.getOpcode() == AArch64ISD::UUNPKHI) { if (Op1.getOperand(0).getOpcode() == AArch64ISD::UZP1) { SDValue Z = Op1.getOperand(0).getOperand(1); return DAG.getNode(AArch64ISD::UZP1, DL, ResVT, Op0, Z); } } return SDValue(); } static SDValue performGLD1Combine(SDNode *N, SelectionDAG &DAG) { unsigned Opc = N->getOpcode(); assert(((Opc >= AArch64ISD::GLD1_MERGE_ZERO && // unsigned gather loads Opc <= AArch64ISD::GLD1_IMM_MERGE_ZERO) || (Opc >= AArch64ISD::GLD1S_MERGE_ZERO && // signed gather loads Opc <= AArch64ISD::GLD1S_IMM_MERGE_ZERO)) && "Invalid opcode."); const bool Scaled = Opc == AArch64ISD::GLD1_SCALED_MERGE_ZERO || Opc == AArch64ISD::GLD1S_SCALED_MERGE_ZERO; const bool Signed = Opc == AArch64ISD::GLD1S_MERGE_ZERO || Opc == AArch64ISD::GLD1S_SCALED_MERGE_ZERO; const bool Extended = Opc == AArch64ISD::GLD1_SXTW_MERGE_ZERO || Opc == AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO || Opc == AArch64ISD::GLD1_UXTW_MERGE_ZERO || Opc == AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO; SDLoc DL(N); SDValue Chain = N->getOperand(0); SDValue Pg = N->getOperand(1); SDValue Base = N->getOperand(2); SDValue Offset = N->getOperand(3); SDValue Ty = N->getOperand(4); EVT ResVT = N->getValueType(0); const auto OffsetOpc = Offset.getOpcode(); const bool OffsetIsZExt = OffsetOpc == AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU; const bool OffsetIsSExt = OffsetOpc == AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU; // Fold sign/zero extensions of vector offsets into GLD1 nodes where possible. if (!Extended && (OffsetIsSExt || OffsetIsZExt)) { SDValue ExtPg = Offset.getOperand(0); VTSDNode *ExtFrom = cast(Offset.getOperand(2).getNode()); EVT ExtFromEVT = ExtFrom->getVT().getVectorElementType(); // If the predicate for the sign- or zero-extended offset is the // same as the predicate used for this load and the sign-/zero-extension // was from a 32-bits... if (ExtPg == Pg && ExtFromEVT == MVT::i32) { SDValue UnextendedOffset = Offset.getOperand(1); unsigned NewOpc = getGatherVecOpcode(Scaled, OffsetIsSExt, true); if (Signed) NewOpc = getSignExtendedGatherOpcode(NewOpc); return DAG.getNode(NewOpc, DL, {ResVT, MVT::Other}, {Chain, Pg, Base, UnextendedOffset, Ty}); } } return SDValue(); } /// Optimize a vector shift instruction and its operand if shifted out /// bits are not used. static SDValue performVectorShiftCombine(SDNode *N, const AArch64TargetLowering &TLI, TargetLowering::DAGCombinerInfo &DCI) { assert(N->getOpcode() == AArch64ISD::VASHR || N->getOpcode() == AArch64ISD::VLSHR); SDValue Op = N->getOperand(0); unsigned OpScalarSize = Op.getScalarValueSizeInBits(); unsigned ShiftImm = N->getConstantOperandVal(1); assert(OpScalarSize > ShiftImm && "Invalid shift imm"); APInt ShiftedOutBits = APInt::getLowBitsSet(OpScalarSize, ShiftImm); APInt DemandedMask = ~ShiftedOutBits; if (TLI.SimplifyDemandedBits(Op, DemandedMask, DCI)) return SDValue(N, 0); return SDValue(); } /// Target-specific DAG combine function for post-increment LD1 (lane) and /// post-increment LD1R. static SDValue performPostLD1Combine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, bool IsLaneOp) { if (DCI.isBeforeLegalizeOps()) return SDValue(); SelectionDAG &DAG = DCI.DAG; EVT VT = N->getValueType(0); if (VT.isScalableVector()) return SDValue(); unsigned LoadIdx = IsLaneOp ? 1 : 0; SDNode *LD = N->getOperand(LoadIdx).getNode(); // If it is not LOAD, can not do such combine. if (LD->getOpcode() != ISD::LOAD) return SDValue(); // The vector lane must be a constant in the LD1LANE opcode. SDValue Lane; if (IsLaneOp) { Lane = N->getOperand(2); auto *LaneC = dyn_cast(Lane); if (!LaneC || LaneC->getZExtValue() >= VT.getVectorNumElements()) return SDValue(); } LoadSDNode *LoadSDN = cast(LD); EVT MemVT = LoadSDN->getMemoryVT(); // Check if memory operand is the same type as the vector element. if (MemVT != VT.getVectorElementType()) return SDValue(); // Check if there are other uses. If so, do not combine as it will introduce // an extra load. for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end(); UI != UE; ++UI) { if (UI.getUse().getResNo() == 1) // Ignore uses of the chain result. continue; if (*UI != N) return SDValue(); } SDValue Addr = LD->getOperand(1); SDValue Vector = N->getOperand(0); // Search for a use of the address operand that is an increment. for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), UE = Addr.getNode()->use_end(); UI != UE; ++UI) { SDNode *User = *UI; if (User->getOpcode() != ISD::ADD || UI.getUse().getResNo() != Addr.getResNo()) continue; // If the increment is a constant, it must match the memory ref size. SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); if (ConstantSDNode *CInc = dyn_cast(Inc.getNode())) { uint32_t IncVal = CInc->getZExtValue(); unsigned NumBytes = VT.getScalarSizeInBits() / 8; if (IncVal != NumBytes) continue; Inc = DAG.getRegister(AArch64::XZR, MVT::i64); } // To avoid cycle construction make sure that neither the load nor the add // are predecessors to each other or the Vector. SmallPtrSet Visited; SmallVector Worklist; Visited.insert(Addr.getNode()); Worklist.push_back(User); Worklist.push_back(LD); Worklist.push_back(Vector.getNode()); if (SDNode::hasPredecessorHelper(LD, Visited, Worklist) || SDNode::hasPredecessorHelper(User, Visited, Worklist)) continue; SmallVector Ops; Ops.push_back(LD->getOperand(0)); // Chain if (IsLaneOp) { Ops.push_back(Vector); // The vector to be inserted Ops.push_back(Lane); // The lane to be inserted in the vector } Ops.push_back(Addr); Ops.push_back(Inc); EVT Tys[3] = { VT, MVT::i64, MVT::Other }; SDVTList SDTys = DAG.getVTList(Tys); unsigned NewOp = IsLaneOp ? AArch64ISD::LD1LANEpost : AArch64ISD::LD1DUPpost; SDValue UpdN = DAG.getMemIntrinsicNode(NewOp, SDLoc(N), SDTys, Ops, MemVT, LoadSDN->getMemOperand()); // Update the uses. SDValue NewResults[] = { SDValue(LD, 0), // The result of load SDValue(UpdN.getNode(), 2) // Chain }; DCI.CombineTo(LD, NewResults); DCI.CombineTo(N, SDValue(UpdN.getNode(), 0)); // Dup/Inserted Result DCI.CombineTo(User, SDValue(UpdN.getNode(), 1)); // Write back register break; } return SDValue(); } /// Simplify ``Addr`` given that the top byte of it is ignored by HW during /// address translation. static bool performTBISimplification(SDValue Addr, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG) { APInt DemandedMask = APInt::getLowBitsSet(64, 56); KnownBits Known; TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), !DCI.isBeforeLegalizeOps()); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); if (TLI.SimplifyDemandedBits(Addr, DemandedMask, Known, TLO)) { DCI.CommitTargetLoweringOpt(TLO); return true; } return false; } static SDValue foldTruncStoreOfExt(SelectionDAG &DAG, SDNode *N) { assert((N->getOpcode() == ISD::STORE || N->getOpcode() == ISD::MSTORE) && "Expected STORE dag node in input!"); if (auto Store = dyn_cast(N)) { if (!Store->isTruncatingStore() || Store->isIndexed()) return SDValue(); SDValue Ext = Store->getValue(); auto ExtOpCode = Ext.getOpcode(); if (ExtOpCode != ISD::ZERO_EXTEND && ExtOpCode != ISD::SIGN_EXTEND && ExtOpCode != ISD::ANY_EXTEND) return SDValue(); SDValue Orig = Ext->getOperand(0); if (Store->getMemoryVT() != Orig->getValueType(0)) return SDValue(); return DAG.getStore(Store->getChain(), SDLoc(Store), Orig, Store->getBasePtr(), Store->getPointerInfo(), Store->getAlign()); } return SDValue(); } static SDValue performSTORECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG, const AArch64Subtarget *Subtarget) { if (SDValue Split = splitStores(N, DCI, DAG, Subtarget)) return Split; if (Subtarget->supportsAddressTopByteIgnored() && performTBISimplification(N->getOperand(2), DCI, DAG)) return SDValue(N, 0); if (SDValue Store = foldTruncStoreOfExt(DAG, N)) return Store; return SDValue(); } /// Target-specific DAG combine function for NEON load/store intrinsics /// to merge base address updates. static SDValue performNEONPostLDSTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG) { if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) return SDValue(); unsigned AddrOpIdx = N->getNumOperands() - 1; SDValue Addr = N->getOperand(AddrOpIdx); // Search for a use of the address operand that is an increment. for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), UE = Addr.getNode()->use_end(); UI != UE; ++UI) { SDNode *User = *UI; if (User->getOpcode() != ISD::ADD || UI.getUse().getResNo() != Addr.getResNo()) continue; // Check that the add is independent of the load/store. Otherwise, folding // it would create a cycle. SmallPtrSet Visited; SmallVector Worklist; Visited.insert(Addr.getNode()); Worklist.push_back(N); Worklist.push_back(User); if (SDNode::hasPredecessorHelper(N, Visited, Worklist) || SDNode::hasPredecessorHelper(User, Visited, Worklist)) continue; // Find the new opcode for the updating load/store. bool IsStore = false; bool IsLaneOp = false; bool IsDupOp = false; unsigned NewOpc = 0; unsigned NumVecs = 0; unsigned IntNo = cast(N->getOperand(1))->getZExtValue(); switch (IntNo) { default: llvm_unreachable("unexpected intrinsic for Neon base update"); case Intrinsic::aarch64_neon_ld2: NewOpc = AArch64ISD::LD2post; NumVecs = 2; break; case Intrinsic::aarch64_neon_ld3: NewOpc = AArch64ISD::LD3post; NumVecs = 3; break; case Intrinsic::aarch64_neon_ld4: NewOpc = AArch64ISD::LD4post; NumVecs = 4; break; case Intrinsic::aarch64_neon_st2: NewOpc = AArch64ISD::ST2post; NumVecs = 2; IsStore = true; break; case Intrinsic::aarch64_neon_st3: NewOpc = AArch64ISD::ST3post; NumVecs = 3; IsStore = true; break; case Intrinsic::aarch64_neon_st4: NewOpc = AArch64ISD::ST4post; NumVecs = 4; IsStore = true; break; case Intrinsic::aarch64_neon_ld1x2: NewOpc = AArch64ISD::LD1x2post; NumVecs = 2; break; case Intrinsic::aarch64_neon_ld1x3: NewOpc = AArch64ISD::LD1x3post; NumVecs = 3; break; case Intrinsic::aarch64_neon_ld1x4: NewOpc = AArch64ISD::LD1x4post; NumVecs = 4; break; case Intrinsic::aarch64_neon_st1x2: NewOpc = AArch64ISD::ST1x2post; NumVecs = 2; IsStore = true; break; case Intrinsic::aarch64_neon_st1x3: NewOpc = AArch64ISD::ST1x3post; NumVecs = 3; IsStore = true; break; case Intrinsic::aarch64_neon_st1x4: NewOpc = AArch64ISD::ST1x4post; NumVecs = 4; IsStore = true; break; case Intrinsic::aarch64_neon_ld2r: NewOpc = AArch64ISD::LD2DUPpost; NumVecs = 2; IsDupOp = true; break; case Intrinsic::aarch64_neon_ld3r: NewOpc = AArch64ISD::LD3DUPpost; NumVecs = 3; IsDupOp = true; break; case Intrinsic::aarch64_neon_ld4r: NewOpc = AArch64ISD::LD4DUPpost; NumVecs = 4; IsDupOp = true; break; case Intrinsic::aarch64_neon_ld2lane: NewOpc = AArch64ISD::LD2LANEpost; NumVecs = 2; IsLaneOp = true; break; case Intrinsic::aarch64_neon_ld3lane: NewOpc = AArch64ISD::LD3LANEpost; NumVecs = 3; IsLaneOp = true; break; case Intrinsic::aarch64_neon_ld4lane: NewOpc = AArch64ISD::LD4LANEpost; NumVecs = 4; IsLaneOp = true; break; case Intrinsic::aarch64_neon_st2lane: NewOpc = AArch64ISD::ST2LANEpost; NumVecs = 2; IsStore = true; IsLaneOp = true; break; case Intrinsic::aarch64_neon_st3lane: NewOpc = AArch64ISD::ST3LANEpost; NumVecs = 3; IsStore = true; IsLaneOp = true; break; case Intrinsic::aarch64_neon_st4lane: NewOpc = AArch64ISD::ST4LANEpost; NumVecs = 4; IsStore = true; IsLaneOp = true; break; } EVT VecTy; if (IsStore) VecTy = N->getOperand(2).getValueType(); else VecTy = N->getValueType(0); // If the increment is a constant, it must match the memory ref size. SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); if (ConstantSDNode *CInc = dyn_cast(Inc.getNode())) { uint32_t IncVal = CInc->getZExtValue(); unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; if (IsLaneOp || IsDupOp) NumBytes /= VecTy.getVectorNumElements(); if (IncVal != NumBytes) continue; Inc = DAG.getRegister(AArch64::XZR, MVT::i64); } SmallVector Ops; Ops.push_back(N->getOperand(0)); // Incoming chain // Load lane and store have vector list as input. if (IsLaneOp || IsStore) for (unsigned i = 2; i < AddrOpIdx; ++i) Ops.push_back(N->getOperand(i)); Ops.push_back(Addr); // Base register Ops.push_back(Inc); // Return Types. EVT Tys[6]; unsigned NumResultVecs = (IsStore ? 0 : NumVecs); unsigned n; for (n = 0; n < NumResultVecs; ++n) Tys[n] = VecTy; Tys[n++] = MVT::i64; // Type of write back register Tys[n] = MVT::Other; // Type of the chain SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs + 2)); MemIntrinsicSDNode *MemInt = cast(N); SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, SDLoc(N), SDTys, Ops, MemInt->getMemoryVT(), MemInt->getMemOperand()); // Update the uses. std::vector NewResults; for (unsigned i = 0; i < NumResultVecs; ++i) { NewResults.push_back(SDValue(UpdN.getNode(), i)); } NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs + 1)); DCI.CombineTo(N, NewResults); DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs)); break; } return SDValue(); } // Checks to see if the value is the prescribed width and returns information // about its extension mode. static bool checkValueWidth(SDValue V, unsigned width, ISD::LoadExtType &ExtType) { ExtType = ISD::NON_EXTLOAD; switch(V.getNode()->getOpcode()) { default: return false; case ISD::LOAD: { LoadSDNode *LoadNode = cast(V.getNode()); if ((LoadNode->getMemoryVT() == MVT::i8 && width == 8) || (LoadNode->getMemoryVT() == MVT::i16 && width == 16)) { ExtType = LoadNode->getExtensionType(); return true; } return false; } case ISD::AssertSext: { VTSDNode *TypeNode = cast(V.getNode()->getOperand(1)); if ((TypeNode->getVT() == MVT::i8 && width == 8) || (TypeNode->getVT() == MVT::i16 && width == 16)) { ExtType = ISD::SEXTLOAD; return true; } return false; } case ISD::AssertZext: { VTSDNode *TypeNode = cast(V.getNode()->getOperand(1)); if ((TypeNode->getVT() == MVT::i8 && width == 8) || (TypeNode->getVT() == MVT::i16 && width == 16)) { ExtType = ISD::ZEXTLOAD; return true; } return false; } case ISD::Constant: case ISD::TargetConstant: { return std::abs(cast(V.getNode())->getSExtValue()) < 1LL << (width - 1); } } return true; } // This function does a whole lot of voodoo to determine if the tests are // equivalent without and with a mask. Essentially what happens is that given a // DAG resembling: // // +-------------+ +-------------+ +-------------+ +-------------+ // | Input | | AddConstant | | CompConstant| | CC | // +-------------+ +-------------+ +-------------+ +-------------+ // | | | | // V V | +----------+ // +-------------+ +----+ | | // | ADD | |0xff| | | // +-------------+ +----+ | | // | | | | // V V | | // +-------------+ | | // | AND | | | // +-------------+ | | // | | | // +-----+ | | // | | | // V V V // +-------------+ // | CMP | // +-------------+ // // The AND node may be safely removed for some combinations of inputs. In // particular we need to take into account the extension type of the Input, // the exact values of AddConstant, CompConstant, and CC, along with the nominal // width of the input (this can work for any width inputs, the above graph is // specific to 8 bits. // // The specific equations were worked out by generating output tables for each // AArch64CC value in terms of and AddConstant (w1), CompConstant(w2). The // problem was simplified by working with 4 bit inputs, which means we only // needed to reason about 24 distinct bit patterns: 8 patterns unique to zero // extension (8,15), 8 patterns unique to sign extensions (-8,-1), and 8 // patterns present in both extensions (0,7). For every distinct set of // AddConstant and CompConstants bit patterns we can consider the masked and // unmasked versions to be equivalent if the result of this function is true for // all 16 distinct bit patterns of for the current extension type of Input (w0). // // sub w8, w0, w1 // and w10, w8, #0x0f // cmp w8, w2 // cset w9, AArch64CC // cmp w10, w2 // cset w11, AArch64CC // cmp w9, w11 // cset w0, eq // ret // // Since the above function shows when the outputs are equivalent it defines // when it is safe to remove the AND. Unfortunately it only runs on AArch64 and // would be expensive to run during compiles. The equations below were written // in a test harness that confirmed they gave equivalent outputs to the above // for all inputs function, so they can be used determine if the removal is // legal instead. // // isEquivalentMaskless() is the code for testing if the AND can be removed // factored out of the DAG recognition as the DAG can take several forms. static bool isEquivalentMaskless(unsigned CC, unsigned width, ISD::LoadExtType ExtType, int AddConstant, int CompConstant) { // By being careful about our equations and only writing the in term // symbolic values and well known constants (0, 1, -1, MaxUInt) we can // make them generally applicable to all bit widths. int MaxUInt = (1 << width); // For the purposes of these comparisons sign extending the type is // equivalent to zero extending the add and displacing it by half the integer // width. Provided we are careful and make sure our equations are valid over // the whole range we can just adjust the input and avoid writing equations // for sign extended inputs. if (ExtType == ISD::SEXTLOAD) AddConstant -= (1 << (width-1)); switch(CC) { case AArch64CC::LE: case AArch64CC::GT: if ((AddConstant == 0) || (CompConstant == MaxUInt - 1 && AddConstant < 0) || (AddConstant >= 0 && CompConstant < 0) || (AddConstant <= 0 && CompConstant <= 0 && CompConstant < AddConstant)) return true; break; case AArch64CC::LT: case AArch64CC::GE: if ((AddConstant == 0) || (AddConstant >= 0 && CompConstant <= 0) || (AddConstant <= 0 && CompConstant <= 0 && CompConstant <= AddConstant)) return true; break; case AArch64CC::HI: case AArch64CC::LS: if ((AddConstant >= 0 && CompConstant < 0) || (AddConstant <= 0 && CompConstant >= -1 && CompConstant < AddConstant + MaxUInt)) return true; break; case AArch64CC::PL: case AArch64CC::MI: if ((AddConstant == 0) || (AddConstant > 0 && CompConstant <= 0) || (AddConstant < 0 && CompConstant <= AddConstant)) return true; break; case AArch64CC::LO: case AArch64CC::HS: if ((AddConstant >= 0 && CompConstant <= 0) || (AddConstant <= 0 && CompConstant >= 0 && CompConstant <= AddConstant + MaxUInt)) return true; break; case AArch64CC::EQ: case AArch64CC::NE: if ((AddConstant > 0 && CompConstant < 0) || (AddConstant < 0 && CompConstant >= 0 && CompConstant < AddConstant + MaxUInt) || (AddConstant >= 0 && CompConstant >= 0 && CompConstant >= AddConstant) || (AddConstant <= 0 && CompConstant < 0 && CompConstant < AddConstant)) return true; break; case AArch64CC::VS: case AArch64CC::VC: case AArch64CC::AL: case AArch64CC::NV: return true; case AArch64CC::Invalid: break; } return false; } static SDValue performCONDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG, unsigned CCIndex, unsigned CmpIndex) { unsigned CC = cast(N->getOperand(CCIndex))->getSExtValue(); SDNode *SubsNode = N->getOperand(CmpIndex).getNode(); unsigned CondOpcode = SubsNode->getOpcode(); if (CondOpcode != AArch64ISD::SUBS) return SDValue(); // There is a SUBS feeding this condition. Is it fed by a mask we can // use? SDNode *AndNode = SubsNode->getOperand(0).getNode(); unsigned MaskBits = 0; if (AndNode->getOpcode() != ISD::AND) return SDValue(); if (ConstantSDNode *CN = dyn_cast(AndNode->getOperand(1))) { uint32_t CNV = CN->getZExtValue(); if (CNV == 255) MaskBits = 8; else if (CNV == 65535) MaskBits = 16; } if (!MaskBits) return SDValue(); SDValue AddValue = AndNode->getOperand(0); if (AddValue.getOpcode() != ISD::ADD) return SDValue(); // The basic dag structure is correct, grab the inputs and validate them. SDValue AddInputValue1 = AddValue.getNode()->getOperand(0); SDValue AddInputValue2 = AddValue.getNode()->getOperand(1); SDValue SubsInputValue = SubsNode->getOperand(1); // The mask is present and the provenance of all the values is a smaller type, // lets see if the mask is superfluous. if (!isa(AddInputValue2.getNode()) || !isa(SubsInputValue.getNode())) return SDValue(); ISD::LoadExtType ExtType; if (!checkValueWidth(SubsInputValue, MaskBits, ExtType) || !checkValueWidth(AddInputValue2, MaskBits, ExtType) || !checkValueWidth(AddInputValue1, MaskBits, ExtType) ) return SDValue(); if(!isEquivalentMaskless(CC, MaskBits, ExtType, cast(AddInputValue2.getNode())->getSExtValue(), cast(SubsInputValue.getNode())->getSExtValue())) return SDValue(); // The AND is not necessary, remove it. SDVTList VTs = DAG.getVTList(SubsNode->getValueType(0), SubsNode->getValueType(1)); SDValue Ops[] = { AddValue, SubsNode->getOperand(1) }; SDValue NewValue = DAG.getNode(CondOpcode, SDLoc(SubsNode), VTs, Ops); DAG.ReplaceAllUsesWith(SubsNode, NewValue.getNode()); return SDValue(N, 0); } // Optimize compare with zero and branch. static SDValue performBRCONDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG) { MachineFunction &MF = DAG.getMachineFunction(); // Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z instructions // will not be produced, as they are conditional branch instructions that do // not set flags. if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening)) return SDValue(); if (SDValue NV = performCONDCombine(N, DCI, DAG, 2, 3)) N = NV.getNode(); SDValue Chain = N->getOperand(0); SDValue Dest = N->getOperand(1); SDValue CCVal = N->getOperand(2); SDValue Cmp = N->getOperand(3); assert(isa(CCVal) && "Expected a ConstantSDNode here!"); unsigned CC = cast(CCVal)->getZExtValue(); if (CC != AArch64CC::EQ && CC != AArch64CC::NE) return SDValue(); unsigned CmpOpc = Cmp.getOpcode(); if (CmpOpc != AArch64ISD::ADDS && CmpOpc != AArch64ISD::SUBS) return SDValue(); // Only attempt folding if there is only one use of the flag and no use of the // value. if (!Cmp->hasNUsesOfValue(0, 0) || !Cmp->hasNUsesOfValue(1, 1)) return SDValue(); SDValue LHS = Cmp.getOperand(0); SDValue RHS = Cmp.getOperand(1); assert(LHS.getValueType() == RHS.getValueType() && "Expected the value type to be the same for both operands!"); if (LHS.getValueType() != MVT::i32 && LHS.getValueType() != MVT::i64) return SDValue(); if (isNullConstant(LHS)) std::swap(LHS, RHS); if (!isNullConstant(RHS)) return SDValue(); if (LHS.getOpcode() == ISD::SHL || LHS.getOpcode() == ISD::SRA || LHS.getOpcode() == ISD::SRL) return SDValue(); // Fold the compare into the branch instruction. SDValue BR; if (CC == AArch64CC::EQ) BR = DAG.getNode(AArch64ISD::CBZ, SDLoc(N), MVT::Other, Chain, LHS, Dest); else BR = DAG.getNode(AArch64ISD::CBNZ, SDLoc(N), MVT::Other, Chain, LHS, Dest); // Do not add new nodes to DAG combiner worklist. DCI.CombineTo(N, BR, false); return SDValue(); } // Optimize CSEL instructions static SDValue performCSELCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG) { // CSEL x, x, cc -> x if (N->getOperand(0) == N->getOperand(1)) return N->getOperand(0); return performCONDCombine(N, DCI, DAG, 2, 3); } static SDValue performSETCCCombine(SDNode *N, SelectionDAG &DAG) { assert(N->getOpcode() == ISD::SETCC && "Unexpected opcode!"); SDValue LHS = N->getOperand(0); SDValue RHS = N->getOperand(1); ISD::CondCode Cond = cast(N->getOperand(2))->get(); // setcc (csel 0, 1, cond, X), 1, ne ==> csel 0, 1, !cond, X if (Cond == ISD::SETNE && isOneConstant(RHS) && LHS->getOpcode() == AArch64ISD::CSEL && isNullConstant(LHS->getOperand(0)) && isOneConstant(LHS->getOperand(1)) && LHS->hasOneUse()) { SDLoc DL(N); // Invert CSEL's condition. auto *OpCC = cast(LHS.getOperand(2)); auto OldCond = static_cast(OpCC->getZExtValue()); auto NewCond = getInvertedCondCode(OldCond); // csel 0, 1, !cond, X SDValue CSEL = DAG.getNode(AArch64ISD::CSEL, DL, LHS.getValueType(), LHS.getOperand(0), LHS.getOperand(1), DAG.getConstant(NewCond, DL, MVT::i32), LHS.getOperand(3)); return DAG.getZExtOrTrunc(CSEL, DL, N->getValueType(0)); } return SDValue(); } static SDValue performSetccMergeZeroCombine(SDNode *N, SelectionDAG &DAG) { assert(N->getOpcode() == AArch64ISD::SETCC_MERGE_ZERO && "Unexpected opcode!"); SDValue Pred = N->getOperand(0); SDValue LHS = N->getOperand(1); SDValue RHS = N->getOperand(2); ISD::CondCode Cond = cast(N->getOperand(3))->get(); // setcc_merge_zero pred (sign_extend (setcc_merge_zero ... pred ...)), 0, ne // => inner setcc_merge_zero if (Cond == ISD::SETNE && isZerosVector(RHS.getNode()) && LHS->getOpcode() == ISD::SIGN_EXTEND && LHS->getOperand(0)->getValueType(0) == N->getValueType(0) && LHS->getOperand(0)->getOpcode() == AArch64ISD::SETCC_MERGE_ZERO && LHS->getOperand(0)->getOperand(0) == Pred) return LHS->getOperand(0); return SDValue(); } // Optimize some simple tbz/tbnz cases. Returns the new operand and bit to test // as well as whether the test should be inverted. This code is required to // catch these cases (as opposed to standard dag combines) because // AArch64ISD::TBZ is matched during legalization. static SDValue getTestBitOperand(SDValue Op, unsigned &Bit, bool &Invert, SelectionDAG &DAG) { if (!Op->hasOneUse()) return Op; // We don't handle undef/constant-fold cases below, as they should have // already been taken care of (e.g. and of 0, test of undefined shifted bits, // etc.) // (tbz (trunc x), b) -> (tbz x, b) // This case is just here to enable more of the below cases to be caught. if (Op->getOpcode() == ISD::TRUNCATE && Bit < Op->getValueType(0).getSizeInBits()) { return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG); } // (tbz (any_ext x), b) -> (tbz x, b) if we don't use the extended bits. if (Op->getOpcode() == ISD::ANY_EXTEND && Bit < Op->getOperand(0).getValueSizeInBits()) { return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG); } if (Op->getNumOperands() != 2) return Op; auto *C = dyn_cast(Op->getOperand(1)); if (!C) return Op; switch (Op->getOpcode()) { default: return Op; // (tbz (and x, m), b) -> (tbz x, b) case ISD::AND: if ((C->getZExtValue() >> Bit) & 1) return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG); return Op; // (tbz (shl x, c), b) -> (tbz x, b-c) case ISD::SHL: if (C->getZExtValue() <= Bit && (Bit - C->getZExtValue()) < Op->getValueType(0).getSizeInBits()) { Bit = Bit - C->getZExtValue(); return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG); } return Op; // (tbz (sra x, c), b) -> (tbz x, b+c) or (tbz x, msb) if b+c is > # bits in x case ISD::SRA: Bit = Bit + C->getZExtValue(); if (Bit >= Op->getValueType(0).getSizeInBits()) Bit = Op->getValueType(0).getSizeInBits() - 1; return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG); // (tbz (srl x, c), b) -> (tbz x, b+c) case ISD::SRL: if ((Bit + C->getZExtValue()) < Op->getValueType(0).getSizeInBits()) { Bit = Bit + C->getZExtValue(); return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG); } return Op; // (tbz (xor x, -1), b) -> (tbnz x, b) case ISD::XOR: if ((C->getZExtValue() >> Bit) & 1) Invert = !Invert; return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG); } } // Optimize test single bit zero/non-zero and branch. static SDValue performTBZCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG) { unsigned Bit = cast(N->getOperand(2))->getZExtValue(); bool Invert = false; SDValue TestSrc = N->getOperand(1); SDValue NewTestSrc = getTestBitOperand(TestSrc, Bit, Invert, DAG); if (TestSrc == NewTestSrc) return SDValue(); unsigned NewOpc = N->getOpcode(); if (Invert) { if (NewOpc == AArch64ISD::TBZ) NewOpc = AArch64ISD::TBNZ; else { assert(NewOpc == AArch64ISD::TBNZ); NewOpc = AArch64ISD::TBZ; } } SDLoc DL(N); return DAG.getNode(NewOpc, DL, MVT::Other, N->getOperand(0), NewTestSrc, DAG.getConstant(Bit, DL, MVT::i64), N->getOperand(3)); } // vselect (v1i1 setcc) -> // vselect (v1iXX setcc) (XX is the size of the compared operand type) // FIXME: Currently the type legalizer can't handle VSELECT having v1i1 as // condition. If it can legalize "VSELECT v1i1" correctly, no need to combine // such VSELECT. static SDValue performVSelectCombine(SDNode *N, SelectionDAG &DAG) { SDValue N0 = N->getOperand(0); EVT CCVT = N0.getValueType(); // Check for sign pattern (VSELECT setgt, iN lhs, -1, 1, -1) and transform // into (OR (ASR lhs, N-1), 1), which requires less instructions for the // supported types. SDValue SetCC = N->getOperand(0); if (SetCC.getOpcode() == ISD::SETCC && SetCC.getOperand(2) == DAG.getCondCode(ISD::SETGT)) { SDValue CmpLHS = SetCC.getOperand(0); EVT VT = CmpLHS.getValueType(); SDNode *CmpRHS = SetCC.getOperand(1).getNode(); SDNode *SplatLHS = N->getOperand(1).getNode(); SDNode *SplatRHS = N->getOperand(2).getNode(); APInt SplatLHSVal; if (CmpLHS.getValueType() == N->getOperand(1).getValueType() && VT.isSimple() && is_contained( makeArrayRef({MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16, MVT::v2i32, MVT::v4i32, MVT::v2i64}), VT.getSimpleVT().SimpleTy) && ISD::isConstantSplatVector(SplatLHS, SplatLHSVal) && SplatLHSVal.isOneValue() && ISD::isConstantSplatVectorAllOnes(CmpRHS) && ISD::isConstantSplatVectorAllOnes(SplatRHS)) { unsigned NumElts = VT.getVectorNumElements(); SmallVector Ops( NumElts, DAG.getConstant(VT.getScalarSizeInBits() - 1, SDLoc(N), VT.getScalarType())); SDValue Val = DAG.getBuildVector(VT, SDLoc(N), Ops); auto Shift = DAG.getNode(ISD::SRA, SDLoc(N), VT, CmpLHS, Val); auto Or = DAG.getNode(ISD::OR, SDLoc(N), VT, Shift, N->getOperand(1)); return Or; } } if (N0.getOpcode() != ISD::SETCC || CCVT.getVectorElementCount() != ElementCount::getFixed(1) || CCVT.getVectorElementType() != MVT::i1) return SDValue(); EVT ResVT = N->getValueType(0); EVT CmpVT = N0.getOperand(0).getValueType(); // Only combine when the result type is of the same size as the compared // operands. if (ResVT.getSizeInBits() != CmpVT.getSizeInBits()) return SDValue(); SDValue IfTrue = N->getOperand(1); SDValue IfFalse = N->getOperand(2); SetCC = DAG.getSetCC(SDLoc(N), CmpVT.changeVectorElementTypeToInteger(), N0.getOperand(0), N0.getOperand(1), cast(N0.getOperand(2))->get()); return DAG.getNode(ISD::VSELECT, SDLoc(N), ResVT, SetCC, IfTrue, IfFalse); } /// A vector select: "(select vL, vR, (setcc LHS, RHS))" is best performed with /// the compare-mask instructions rather than going via NZCV, even if LHS and /// RHS are really scalar. This replaces any scalar setcc in the above pattern /// with a vector one followed by a DUP shuffle on the result. static SDValue performSelectCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { SelectionDAG &DAG = DCI.DAG; SDValue N0 = N->getOperand(0); EVT ResVT = N->getValueType(0); if (N0.getOpcode() != ISD::SETCC) return SDValue(); if (ResVT.isScalableVector()) return SDValue(); // Make sure the SETCC result is either i1 (initial DAG), or i32, the lowered // scalar SetCCResultType. We also don't expect vectors, because we assume // that selects fed by vector SETCCs are canonicalized to VSELECT. assert((N0.getValueType() == MVT::i1 || N0.getValueType() == MVT::i32) && "Scalar-SETCC feeding SELECT has unexpected result type!"); // If NumMaskElts == 0, the comparison is larger than select result. The // largest real NEON comparison is 64-bits per lane, which means the result is // at most 32-bits and an illegal vector. Just bail out for now. EVT SrcVT = N0.getOperand(0).getValueType(); // Don't try to do this optimization when the setcc itself has i1 operands. // There are no legal vectors of i1, so this would be pointless. if (SrcVT == MVT::i1) return SDValue(); int NumMaskElts = ResVT.getSizeInBits() / SrcVT.getSizeInBits(); if (!ResVT.isVector() || NumMaskElts == 0) return SDValue(); SrcVT = EVT::getVectorVT(*DAG.getContext(), SrcVT, NumMaskElts); EVT CCVT = SrcVT.changeVectorElementTypeToInteger(); // Also bail out if the vector CCVT isn't the same size as ResVT. // This can happen if the SETCC operand size doesn't divide the ResVT size // (e.g., f64 vs v3f32). if (CCVT.getSizeInBits() != ResVT.getSizeInBits()) return SDValue(); // Make sure we didn't create illegal types, if we're not supposed to. assert(DCI.isBeforeLegalize() || DAG.getTargetLoweringInfo().isTypeLegal(SrcVT)); // First perform a vector comparison, where lane 0 is the one we're interested // in. SDLoc DL(N0); SDValue LHS = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, SrcVT, N0.getOperand(0)); SDValue RHS = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, SrcVT, N0.getOperand(1)); SDValue SetCC = DAG.getNode(ISD::SETCC, DL, CCVT, LHS, RHS, N0.getOperand(2)); // Now duplicate the comparison mask we want across all other lanes. SmallVector DUPMask(CCVT.getVectorNumElements(), 0); SDValue Mask = DAG.getVectorShuffle(CCVT, DL, SetCC, SetCC, DUPMask); Mask = DAG.getNode(ISD::BITCAST, DL, ResVT.changeVectorElementTypeToInteger(), Mask); return DAG.getSelect(DL, ResVT, Mask, N->getOperand(1), N->getOperand(2)); } /// Get rid of unnecessary NVCASTs (that don't change the type). static SDValue performNVCASTCombine(SDNode *N) { if (N->getValueType(0) == N->getOperand(0).getValueType()) return N->getOperand(0); return SDValue(); } // If all users of the globaladdr are of the form (globaladdr + constant), find // the smallest constant, fold it into the globaladdr's offset and rewrite the // globaladdr as (globaladdr + constant) - constant. static SDValue performGlobalAddressCombine(SDNode *N, SelectionDAG &DAG, const AArch64Subtarget *Subtarget, const TargetMachine &TM) { auto *GN = cast(N); if (Subtarget->ClassifyGlobalReference(GN->getGlobal(), TM) != AArch64II::MO_NO_FLAG) return SDValue(); uint64_t MinOffset = -1ull; for (SDNode *N : GN->uses()) { if (N->getOpcode() != ISD::ADD) return SDValue(); auto *C = dyn_cast(N->getOperand(0)); if (!C) C = dyn_cast(N->getOperand(1)); if (!C) return SDValue(); MinOffset = std::min(MinOffset, C->getZExtValue()); } uint64_t Offset = MinOffset + GN->getOffset(); // Require that the new offset is larger than the existing one. Otherwise, we // can end up oscillating between two possible DAGs, for example, // (add (add globaladdr + 10, -1), 1) and (add globaladdr + 9, 1). if (Offset <= uint64_t(GN->getOffset())) return SDValue(); // Check whether folding this offset is legal. It must not go out of bounds of // the referenced object to avoid violating the code model, and must be // smaller than 2^21 because this is the largest offset expressible in all // object formats. // // This check also prevents us from folding negative offsets, which will end // up being treated in the same way as large positive ones. They could also // cause code model violations, and aren't really common enough to matter. if (Offset >= (1 << 21)) return SDValue(); const GlobalValue *GV = GN->getGlobal(); Type *T = GV->getValueType(); if (!T->isSized() || Offset > GV->getParent()->getDataLayout().getTypeAllocSize(T)) return SDValue(); SDLoc DL(GN); SDValue Result = DAG.getGlobalAddress(GV, DL, MVT::i64, Offset); return DAG.getNode(ISD::SUB, DL, MVT::i64, Result, DAG.getConstant(MinOffset, DL, MVT::i64)); } // Turns the vector of indices into a vector of byte offstes by scaling Offset // by (BitWidth / 8). static SDValue getScaledOffsetForBitWidth(SelectionDAG &DAG, SDValue Offset, SDLoc DL, unsigned BitWidth) { assert(Offset.getValueType().isScalableVector() && "This method is only for scalable vectors of offsets"); SDValue Shift = DAG.getConstant(Log2_32(BitWidth / 8), DL, MVT::i64); SDValue SplatShift = DAG.getNode(ISD::SPLAT_VECTOR, DL, MVT::nxv2i64, Shift); return DAG.getNode(ISD::SHL, DL, MVT::nxv2i64, Offset, SplatShift); } /// Check if the value of \p OffsetInBytes can be used as an immediate for /// the gather load/prefetch and scatter store instructions with vector base and /// immediate offset addressing mode: /// /// [.[S|D]{, #}] /// /// where = sizeof() * k, for k = 0, 1, ..., 31. inline static bool isValidImmForSVEVecImmAddrMode(unsigned OffsetInBytes, unsigned ScalarSizeInBytes) { // The immediate is not a multiple of the scalar size. if (OffsetInBytes % ScalarSizeInBytes) return false; // The immediate is out of range. if (OffsetInBytes / ScalarSizeInBytes > 31) return false; return true; } /// Check if the value of \p Offset represents a valid immediate for the SVE /// gather load/prefetch and scatter store instructiona with vector base and /// immediate offset addressing mode: /// /// [.[S|D]{, #}] /// /// where = sizeof() * k, for k = 0, 1, ..., 31. static bool isValidImmForSVEVecImmAddrMode(SDValue Offset, unsigned ScalarSizeInBytes) { ConstantSDNode *OffsetConst = dyn_cast(Offset.getNode()); return OffsetConst && isValidImmForSVEVecImmAddrMode( OffsetConst->getZExtValue(), ScalarSizeInBytes); } static SDValue performScatterStoreCombine(SDNode *N, SelectionDAG &DAG, unsigned Opcode, bool OnlyPackedOffsets = true) { const SDValue Src = N->getOperand(2); const EVT SrcVT = Src->getValueType(0); assert(SrcVT.isScalableVector() && "Scatter stores are only possible for SVE vectors"); SDLoc DL(N); MVT SrcElVT = SrcVT.getVectorElementType().getSimpleVT(); // Make sure that source data will fit into an SVE register if (SrcVT.getSizeInBits().getKnownMinSize() > AArch64::SVEBitsPerBlock) return SDValue(); // For FPs, ACLE only supports _packed_ single and double precision types. if (SrcElVT.isFloatingPoint()) if ((SrcVT != MVT::nxv4f32) && (SrcVT != MVT::nxv2f64)) return SDValue(); // Depending on the addressing mode, this is either a pointer or a vector of // pointers (that fits into one register) SDValue Base = N->getOperand(4); // Depending on the addressing mode, this is either a single offset or a // vector of offsets (that fits into one register) SDValue Offset = N->getOperand(5); // For "scalar + vector of indices", just scale the indices. This only // applies to non-temporal scatters because there's no instruction that takes // indicies. if (Opcode == AArch64ISD::SSTNT1_INDEX_PRED) { Offset = getScaledOffsetForBitWidth(DAG, Offset, DL, SrcElVT.getSizeInBits()); Opcode = AArch64ISD::SSTNT1_PRED; } // In the case of non-temporal gather loads there's only one SVE instruction // per data-size: "scalar + vector", i.e. // * stnt1{b|h|w|d} { z0.s }, p0/z, [z0.s, x0] // Since we do have intrinsics that allow the arguments to be in a different // order, we may need to swap them to match the spec. if (Opcode == AArch64ISD::SSTNT1_PRED && Offset.getValueType().isVector()) std::swap(Base, Offset); // SST1_IMM requires that the offset is an immediate that is: // * a multiple of #SizeInBytes, // * in the range [0, 31 x #SizeInBytes], // where #SizeInBytes is the size in bytes of the stored items. For // immediates outside that range and non-immediate scalar offsets use SST1 or // SST1_UXTW instead. if (Opcode == AArch64ISD::SST1_IMM_PRED) { if (!isValidImmForSVEVecImmAddrMode(Offset, SrcVT.getScalarSizeInBits() / 8)) { if (MVT::nxv4i32 == Base.getValueType().getSimpleVT().SimpleTy) Opcode = AArch64ISD::SST1_UXTW_PRED; else Opcode = AArch64ISD::SST1_PRED; std::swap(Base, Offset); } } auto &TLI = DAG.getTargetLoweringInfo(); if (!TLI.isTypeLegal(Base.getValueType())) return SDValue(); // Some scatter store variants allow unpacked offsets, but only as nxv2i32 // vectors. These are implicitly sign (sxtw) or zero (zxtw) extend to // nxv2i64. Legalize accordingly. if (!OnlyPackedOffsets && Offset.getValueType().getSimpleVT().SimpleTy == MVT::nxv2i32) Offset = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::nxv2i64, Offset).getValue(0); if (!TLI.isTypeLegal(Offset.getValueType())) return SDValue(); // Source value type that is representable in hardware EVT HwSrcVt = getSVEContainerType(SrcVT); // Keep the original type of the input data to store - this is needed to be // able to select the correct instruction, e.g. ST1B, ST1H, ST1W and ST1D. For // FP values we want the integer equivalent, so just use HwSrcVt. SDValue InputVT = DAG.getValueType(SrcVT); if (SrcVT.isFloatingPoint()) InputVT = DAG.getValueType(HwSrcVt); SDVTList VTs = DAG.getVTList(MVT::Other); SDValue SrcNew; if (Src.getValueType().isFloatingPoint()) SrcNew = DAG.getNode(ISD::BITCAST, DL, HwSrcVt, Src); else SrcNew = DAG.getNode(ISD::ANY_EXTEND, DL, HwSrcVt, Src); SDValue Ops[] = {N->getOperand(0), // Chain SrcNew, N->getOperand(3), // Pg Base, Offset, InputVT}; return DAG.getNode(Opcode, DL, VTs, Ops); } static SDValue performGatherLoadCombine(SDNode *N, SelectionDAG &DAG, unsigned Opcode, bool OnlyPackedOffsets = true) { const EVT RetVT = N->getValueType(0); assert(RetVT.isScalableVector() && "Gather loads are only possible for SVE vectors"); SDLoc DL(N); // Make sure that the loaded data will fit into an SVE register if (RetVT.getSizeInBits().getKnownMinSize() > AArch64::SVEBitsPerBlock) return SDValue(); // Depending on the addressing mode, this is either a pointer or a vector of // pointers (that fits into one register) SDValue Base = N->getOperand(3); // Depending on the addressing mode, this is either a single offset or a // vector of offsets (that fits into one register) SDValue Offset = N->getOperand(4); // For "scalar + vector of indices", just scale the indices. This only // applies to non-temporal gathers because there's no instruction that takes // indicies. if (Opcode == AArch64ISD::GLDNT1_INDEX_MERGE_ZERO) { Offset = getScaledOffsetForBitWidth(DAG, Offset, DL, RetVT.getScalarSizeInBits()); Opcode = AArch64ISD::GLDNT1_MERGE_ZERO; } // In the case of non-temporal gather loads there's only one SVE instruction // per data-size: "scalar + vector", i.e. // * ldnt1{b|h|w|d} { z0.s }, p0/z, [z0.s, x0] // Since we do have intrinsics that allow the arguments to be in a different // order, we may need to swap them to match the spec. if (Opcode == AArch64ISD::GLDNT1_MERGE_ZERO && Offset.getValueType().isVector()) std::swap(Base, Offset); // GLD{FF}1_IMM requires that the offset is an immediate that is: // * a multiple of #SizeInBytes, // * in the range [0, 31 x #SizeInBytes], // where #SizeInBytes is the size in bytes of the loaded items. For // immediates outside that range and non-immediate scalar offsets use // GLD1_MERGE_ZERO or GLD1_UXTW_MERGE_ZERO instead. if (Opcode == AArch64ISD::GLD1_IMM_MERGE_ZERO || Opcode == AArch64ISD::GLDFF1_IMM_MERGE_ZERO) { if (!isValidImmForSVEVecImmAddrMode(Offset, RetVT.getScalarSizeInBits() / 8)) { if (MVT::nxv4i32 == Base.getValueType().getSimpleVT().SimpleTy) Opcode = (Opcode == AArch64ISD::GLD1_IMM_MERGE_ZERO) ? AArch64ISD::GLD1_UXTW_MERGE_ZERO : AArch64ISD::GLDFF1_UXTW_MERGE_ZERO; else Opcode = (Opcode == AArch64ISD::GLD1_IMM_MERGE_ZERO) ? AArch64ISD::GLD1_MERGE_ZERO : AArch64ISD::GLDFF1_MERGE_ZERO; std::swap(Base, Offset); } } auto &TLI = DAG.getTargetLoweringInfo(); if (!TLI.isTypeLegal(Base.getValueType())) return SDValue(); // Some gather load variants allow unpacked offsets, but only as nxv2i32 // vectors. These are implicitly sign (sxtw) or zero (zxtw) extend to // nxv2i64. Legalize accordingly. if (!OnlyPackedOffsets && Offset.getValueType().getSimpleVT().SimpleTy == MVT::nxv2i32) Offset = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::nxv2i64, Offset).getValue(0); // Return value type that is representable in hardware EVT HwRetVt = getSVEContainerType(RetVT); // Keep the original output value type around - this is needed to be able to // select the correct instruction, e.g. LD1B, LD1H, LD1W and LD1D. For FP // values we want the integer equivalent, so just use HwRetVT. SDValue OutVT = DAG.getValueType(RetVT); if (RetVT.isFloatingPoint()) OutVT = DAG.getValueType(HwRetVt); SDVTList VTs = DAG.getVTList(HwRetVt, MVT::Other); SDValue Ops[] = {N->getOperand(0), // Chain N->getOperand(2), // Pg Base, Offset, OutVT}; SDValue Load = DAG.getNode(Opcode, DL, VTs, Ops); SDValue LoadChain = SDValue(Load.getNode(), 1); if (RetVT.isInteger() && (RetVT != HwRetVt)) Load = DAG.getNode(ISD::TRUNCATE, DL, RetVT, Load.getValue(0)); // If the original return value was FP, bitcast accordingly. Doing it here // means that we can avoid adding TableGen patterns for FPs. if (RetVT.isFloatingPoint()) Load = DAG.getNode(ISD::BITCAST, DL, RetVT, Load.getValue(0)); return DAG.getMergeValues({Load, LoadChain}, DL); } static SDValue performSignExtendInRegCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG) { SDLoc DL(N); SDValue Src = N->getOperand(0); unsigned Opc = Src->getOpcode(); // Sign extend of an unsigned unpack -> signed unpack if (Opc == AArch64ISD::UUNPKHI || Opc == AArch64ISD::UUNPKLO) { unsigned SOpc = Opc == AArch64ISD::UUNPKHI ? AArch64ISD::SUNPKHI : AArch64ISD::SUNPKLO; // Push the sign extend to the operand of the unpack // This is necessary where, for example, the operand of the unpack // is another unpack: // 4i32 sign_extend_inreg (4i32 uunpklo(8i16 uunpklo (16i8 opnd)), from 4i8) // -> // 4i32 sunpklo (8i16 sign_extend_inreg(8i16 uunpklo (16i8 opnd), from 8i8) // -> // 4i32 sunpklo(8i16 sunpklo(16i8 opnd)) SDValue ExtOp = Src->getOperand(0); auto VT = cast(N->getOperand(1))->getVT(); EVT EltTy = VT.getVectorElementType(); (void)EltTy; assert((EltTy == MVT::i8 || EltTy == MVT::i16 || EltTy == MVT::i32) && "Sign extending from an invalid type"); EVT ExtVT = VT.getDoubleNumVectorElementsVT(*DAG.getContext()); SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ExtOp.getValueType(), ExtOp, DAG.getValueType(ExtVT)); return DAG.getNode(SOpc, DL, N->getValueType(0), Ext); } if (DCI.isBeforeLegalizeOps()) return SDValue(); if (!EnableCombineMGatherIntrinsics) return SDValue(); // SVE load nodes (e.g. AArch64ISD::GLD1) are straightforward candidates // for DAG Combine with SIGN_EXTEND_INREG. Bail out for all other nodes. unsigned NewOpc; unsigned MemVTOpNum = 4; switch (Opc) { case AArch64ISD::LD1_MERGE_ZERO: NewOpc = AArch64ISD::LD1S_MERGE_ZERO; MemVTOpNum = 3; break; case AArch64ISD::LDNF1_MERGE_ZERO: NewOpc = AArch64ISD::LDNF1S_MERGE_ZERO; MemVTOpNum = 3; break; case AArch64ISD::LDFF1_MERGE_ZERO: NewOpc = AArch64ISD::LDFF1S_MERGE_ZERO; MemVTOpNum = 3; break; case AArch64ISD::GLD1_MERGE_ZERO: NewOpc = AArch64ISD::GLD1S_MERGE_ZERO; break; case AArch64ISD::GLD1_SCALED_MERGE_ZERO: NewOpc = AArch64ISD::GLD1S_SCALED_MERGE_ZERO; break; case AArch64ISD::GLD1_SXTW_MERGE_ZERO: NewOpc = AArch64ISD::GLD1S_SXTW_MERGE_ZERO; break; case AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO: NewOpc = AArch64ISD::GLD1S_SXTW_SCALED_MERGE_ZERO; break; case AArch64ISD::GLD1_UXTW_MERGE_ZERO: NewOpc = AArch64ISD::GLD1S_UXTW_MERGE_ZERO; break; case AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO: NewOpc = AArch64ISD::GLD1S_UXTW_SCALED_MERGE_ZERO; break; case AArch64ISD::GLD1_IMM_MERGE_ZERO: NewOpc = AArch64ISD::GLD1S_IMM_MERGE_ZERO; break; case AArch64ISD::GLDFF1_MERGE_ZERO: NewOpc = AArch64ISD::GLDFF1S_MERGE_ZERO; break; case AArch64ISD::GLDFF1_SCALED_MERGE_ZERO: NewOpc = AArch64ISD::GLDFF1S_SCALED_MERGE_ZERO; break; case AArch64ISD::GLDFF1_SXTW_MERGE_ZERO: NewOpc = AArch64ISD::GLDFF1S_SXTW_MERGE_ZERO; break; case AArch64ISD::GLDFF1_SXTW_SCALED_MERGE_ZERO: NewOpc = AArch64ISD::GLDFF1S_SXTW_SCALED_MERGE_ZERO; break; case AArch64ISD::GLDFF1_UXTW_MERGE_ZERO: NewOpc = AArch64ISD::GLDFF1S_UXTW_MERGE_ZERO; break; case AArch64ISD::GLDFF1_UXTW_SCALED_MERGE_ZERO: NewOpc = AArch64ISD::GLDFF1S_UXTW_SCALED_MERGE_ZERO; break; case AArch64ISD::GLDFF1_IMM_MERGE_ZERO: NewOpc = AArch64ISD::GLDFF1S_IMM_MERGE_ZERO; break; case AArch64ISD::GLDNT1_MERGE_ZERO: NewOpc = AArch64ISD::GLDNT1S_MERGE_ZERO; break; default: return SDValue(); } EVT SignExtSrcVT = cast(N->getOperand(1))->getVT(); EVT SrcMemVT = cast(Src->getOperand(MemVTOpNum))->getVT(); if ((SignExtSrcVT != SrcMemVT) || !Src.hasOneUse()) return SDValue(); EVT DstVT = N->getValueType(0); SDVTList VTs = DAG.getVTList(DstVT, MVT::Other); SmallVector Ops; for (unsigned I = 0; I < Src->getNumOperands(); ++I) Ops.push_back(Src->getOperand(I)); SDValue ExtLoad = DAG.getNode(NewOpc, SDLoc(N), VTs, Ops); DCI.CombineTo(N, ExtLoad); DCI.CombineTo(Src.getNode(), ExtLoad, ExtLoad.getValue(1)); // Return N so it doesn't get rechecked return SDValue(N, 0); } /// Legalize the gather prefetch (scalar + vector addressing mode) when the /// offset vector is an unpacked 32-bit scalable vector. The other cases (Offset /// != nxv2i32) do not need legalization. static SDValue legalizeSVEGatherPrefetchOffsVec(SDNode *N, SelectionDAG &DAG) { const unsigned OffsetPos = 4; SDValue Offset = N->getOperand(OffsetPos); // Not an unpacked vector, bail out. if (Offset.getValueType().getSimpleVT().SimpleTy != MVT::nxv2i32) return SDValue(); // Extend the unpacked offset vector to 64-bit lanes. SDLoc DL(N); Offset = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::nxv2i64, Offset); SmallVector Ops(N->op_begin(), N->op_end()); // Replace the offset operand with the 64-bit one. Ops[OffsetPos] = Offset; return DAG.getNode(N->getOpcode(), DL, DAG.getVTList(MVT::Other), Ops); } /// Combines a node carrying the intrinsic /// `aarch64_sve_prf_gather_scalar_offset` into a node that uses /// `aarch64_sve_prfb_gather_uxtw_index` when the scalar offset passed to /// `aarch64_sve_prf_gather_scalar_offset` is not a valid immediate for the /// sve gather prefetch instruction with vector plus immediate addressing mode. static SDValue combineSVEPrefetchVecBaseImmOff(SDNode *N, SelectionDAG &DAG, unsigned ScalarSizeInBytes) { const unsigned ImmPos = 4, OffsetPos = 3; // No need to combine the node if the immediate is valid... if (isValidImmForSVEVecImmAddrMode(N->getOperand(ImmPos), ScalarSizeInBytes)) return SDValue(); // ...otherwise swap the offset base with the offset... SmallVector Ops(N->op_begin(), N->op_end()); std::swap(Ops[ImmPos], Ops[OffsetPos]); // ...and remap the intrinsic `aarch64_sve_prf_gather_scalar_offset` to // `aarch64_sve_prfb_gather_uxtw_index`. SDLoc DL(N); Ops[1] = DAG.getConstant(Intrinsic::aarch64_sve_prfb_gather_uxtw_index, DL, MVT::i64); return DAG.getNode(N->getOpcode(), DL, DAG.getVTList(MVT::Other), Ops); } // Return true if the vector operation can guarantee only the first lane of its // result contains data, with all bits in other lanes set to zero. static bool isLanes1toNKnownZero(SDValue Op) { switch (Op.getOpcode()) { default: return false; case AArch64ISD::ANDV_PRED: case AArch64ISD::EORV_PRED: case AArch64ISD::FADDA_PRED: case AArch64ISD::FADDV_PRED: case AArch64ISD::FMAXNMV_PRED: case AArch64ISD::FMAXV_PRED: case AArch64ISD::FMINNMV_PRED: case AArch64ISD::FMINV_PRED: case AArch64ISD::ORV_PRED: case AArch64ISD::SADDV_PRED: case AArch64ISD::SMAXV_PRED: case AArch64ISD::SMINV_PRED: case AArch64ISD::UADDV_PRED: case AArch64ISD::UMAXV_PRED: case AArch64ISD::UMINV_PRED: return true; } } static SDValue removeRedundantInsertVectorElt(SDNode *N) { assert(N->getOpcode() == ISD::INSERT_VECTOR_ELT && "Unexpected node!"); SDValue InsertVec = N->getOperand(0); SDValue InsertElt = N->getOperand(1); SDValue InsertIdx = N->getOperand(2); // We only care about inserts into the first element... if (!isNullConstant(InsertIdx)) return SDValue(); // ...of a zero'd vector... if (!ISD::isConstantSplatVectorAllZeros(InsertVec.getNode())) return SDValue(); // ...where the inserted data was previously extracted... if (InsertElt.getOpcode() != ISD::EXTRACT_VECTOR_ELT) return SDValue(); SDValue ExtractVec = InsertElt.getOperand(0); SDValue ExtractIdx = InsertElt.getOperand(1); // ...from the first element of a vector. if (!isNullConstant(ExtractIdx)) return SDValue(); // If we get here we are effectively trying to zero lanes 1-N of a vector. // Ensure there's no type conversion going on. if (N->getValueType(0) != ExtractVec.getValueType()) return SDValue(); if (!isLanes1toNKnownZero(ExtractVec)) return SDValue(); // The explicit zeroing is redundant. return ExtractVec; } static SDValue performInsertVectorEltCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { if (SDValue Res = removeRedundantInsertVectorElt(N)) return Res; return performPostLD1Combine(N, DCI, true); } SDValue performSVESpliceCombine(SDNode *N, SelectionDAG &DAG) { EVT Ty = N->getValueType(0); if (Ty.isInteger()) return SDValue(); EVT IntTy = Ty.changeVectorElementTypeToInteger(); EVT ExtIntTy = getPackedSVEVectorVT(IntTy.getVectorElementCount()); if (ExtIntTy.getVectorElementType().getScalarSizeInBits() < IntTy.getVectorElementType().getScalarSizeInBits()) return SDValue(); SDLoc DL(N); SDValue LHS = DAG.getAnyExtOrTrunc(DAG.getBitcast(IntTy, N->getOperand(0)), DL, ExtIntTy); SDValue RHS = DAG.getAnyExtOrTrunc(DAG.getBitcast(IntTy, N->getOperand(1)), DL, ExtIntTy); SDValue Idx = N->getOperand(2); SDValue Splice = DAG.getNode(ISD::VECTOR_SPLICE, DL, ExtIntTy, LHS, RHS, Idx); SDValue Trunc = DAG.getAnyExtOrTrunc(Splice, DL, IntTy); return DAG.getBitcast(Ty, Trunc); } SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { SelectionDAG &DAG = DCI.DAG; switch (N->getOpcode()) { default: LLVM_DEBUG(dbgs() << "Custom combining: skipping\n"); break; case ISD::ADD: case ISD::SUB: return performAddSubCombine(N, DCI, DAG); case ISD::XOR: return performXorCombine(N, DAG, DCI, Subtarget); case ISD::MUL: return performMulCombine(N, DAG, DCI, Subtarget); case ISD::SINT_TO_FP: case ISD::UINT_TO_FP: return performIntToFpCombine(N, DAG, Subtarget); case ISD::FP_TO_SINT: case ISD::FP_TO_UINT: return performFpToIntCombine(N, DAG, DCI, Subtarget); case ISD::FDIV: return performFDivCombine(N, DAG, DCI, Subtarget); case ISD::OR: return performORCombine(N, DCI, Subtarget); case ISD::AND: return performANDCombine(N, DCI); case ISD::SRL: return performSRLCombine(N, DCI); case ISD::INTRINSIC_WO_CHAIN: return performIntrinsicCombine(N, DCI, Subtarget); case ISD::ANY_EXTEND: case ISD::ZERO_EXTEND: case ISD::SIGN_EXTEND: return performExtendCombine(N, DCI, DAG); case ISD::SIGN_EXTEND_INREG: return performSignExtendInRegCombine(N, DCI, DAG); case ISD::TRUNCATE: return performVectorTruncateCombine(N, DCI, DAG); case ISD::CONCAT_VECTORS: return performConcatVectorsCombine(N, DCI, DAG); case ISD::SELECT: return performSelectCombine(N, DCI); case ISD::VSELECT: return performVSelectCombine(N, DCI.DAG); case ISD::SETCC: return performSETCCCombine(N, DAG); case ISD::LOAD: if (performTBISimplification(N->getOperand(1), DCI, DAG)) return SDValue(N, 0); break; case ISD::STORE: return performSTORECombine(N, DCI, DAG, Subtarget); case ISD::VECTOR_SPLICE: return performSVESpliceCombine(N, DAG); case AArch64ISD::BRCOND: return performBRCONDCombine(N, DCI, DAG); case AArch64ISD::TBNZ: case AArch64ISD::TBZ: return performTBZCombine(N, DCI, DAG); case AArch64ISD::CSEL: return performCSELCombine(N, DCI, DAG); case AArch64ISD::DUP: return performPostLD1Combine(N, DCI, false); case AArch64ISD::NVCAST: return performNVCASTCombine(N); case AArch64ISD::SPLICE: return performSpliceCombine(N, DAG); case AArch64ISD::UZP1: return performUzpCombine(N, DAG); case AArch64ISD::SETCC_MERGE_ZERO: return performSetccMergeZeroCombine(N, DAG); case AArch64ISD::GLD1_MERGE_ZERO: case AArch64ISD::GLD1_SCALED_MERGE_ZERO: case AArch64ISD::GLD1_UXTW_MERGE_ZERO: case AArch64ISD::GLD1_SXTW_MERGE_ZERO: case AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO: case AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO: case AArch64ISD::GLD1_IMM_MERGE_ZERO: case AArch64ISD::GLD1S_MERGE_ZERO: case AArch64ISD::GLD1S_SCALED_MERGE_ZERO: case AArch64ISD::GLD1S_UXTW_MERGE_ZERO: case AArch64ISD::GLD1S_SXTW_MERGE_ZERO: case AArch64ISD::GLD1S_UXTW_SCALED_MERGE_ZERO: case AArch64ISD::GLD1S_SXTW_SCALED_MERGE_ZERO: case AArch64ISD::GLD1S_IMM_MERGE_ZERO: return performGLD1Combine(N, DAG); case AArch64ISD::VASHR: case AArch64ISD::VLSHR: return performVectorShiftCombine(N, *this, DCI); case ISD::INSERT_VECTOR_ELT: return performInsertVectorEltCombine(N, DCI); case ISD::EXTRACT_VECTOR_ELT: return performExtractVectorEltCombine(N, DAG); case ISD::VECREDUCE_ADD: return performVecReduceAddCombine(N, DCI.DAG, Subtarget); case ISD::INTRINSIC_VOID: case ISD::INTRINSIC_W_CHAIN: switch (cast(N->getOperand(1))->getZExtValue()) { case Intrinsic::aarch64_sve_prfb_gather_scalar_offset: return combineSVEPrefetchVecBaseImmOff(N, DAG, 1 /*=ScalarSizeInBytes*/); case Intrinsic::aarch64_sve_prfh_gather_scalar_offset: return combineSVEPrefetchVecBaseImmOff(N, DAG, 2 /*=ScalarSizeInBytes*/); case Intrinsic::aarch64_sve_prfw_gather_scalar_offset: return combineSVEPrefetchVecBaseImmOff(N, DAG, 4 /*=ScalarSizeInBytes*/); case Intrinsic::aarch64_sve_prfd_gather_scalar_offset: return combineSVEPrefetchVecBaseImmOff(N, DAG, 8 /*=ScalarSizeInBytes*/); case Intrinsic::aarch64_sve_prfb_gather_uxtw_index: case Intrinsic::aarch64_sve_prfb_gather_sxtw_index: case Intrinsic::aarch64_sve_prfh_gather_uxtw_index: case Intrinsic::aarch64_sve_prfh_gather_sxtw_index: case Intrinsic::aarch64_sve_prfw_gather_uxtw_index: case Intrinsic::aarch64_sve_prfw_gather_sxtw_index: case Intrinsic::aarch64_sve_prfd_gather_uxtw_index: case Intrinsic::aarch64_sve_prfd_gather_sxtw_index: return legalizeSVEGatherPrefetchOffsVec(N, DAG); case Intrinsic::aarch64_neon_ld2: case Intrinsic::aarch64_neon_ld3: case Intrinsic::aarch64_neon_ld4: case Intrinsic::aarch64_neon_ld1x2: case Intrinsic::aarch64_neon_ld1x3: case Intrinsic::aarch64_neon_ld1x4: case Intrinsic::aarch64_neon_ld2lane: case Intrinsic::aarch64_neon_ld3lane: case Intrinsic::aarch64_neon_ld4lane: case Intrinsic::aarch64_neon_ld2r: case Intrinsic::aarch64_neon_ld3r: case Intrinsic::aarch64_neon_ld4r: case Intrinsic::aarch64_neon_st2: case Intrinsic::aarch64_neon_st3: case Intrinsic::aarch64_neon_st4: case Intrinsic::aarch64_neon_st1x2: case Intrinsic::aarch64_neon_st1x3: case Intrinsic::aarch64_neon_st1x4: case Intrinsic::aarch64_neon_st2lane: case Intrinsic::aarch64_neon_st3lane: case Intrinsic::aarch64_neon_st4lane: return performNEONPostLDSTCombine(N, DCI, DAG); case Intrinsic::aarch64_sve_ldnt1: return performLDNT1Combine(N, DAG); case Intrinsic::aarch64_sve_ld1rq: return performLD1ReplicateCombine(N, DAG); case Intrinsic::aarch64_sve_ld1ro: return performLD1ReplicateCombine(N, DAG); case Intrinsic::aarch64_sve_ldnt1_gather_scalar_offset: return performGatherLoadCombine(N, DAG, AArch64ISD::GLDNT1_MERGE_ZERO); case Intrinsic::aarch64_sve_ldnt1_gather: return performGatherLoadCombine(N, DAG, AArch64ISD::GLDNT1_MERGE_ZERO); case Intrinsic::aarch64_sve_ldnt1_gather_index: return performGatherLoadCombine(N, DAG, AArch64ISD::GLDNT1_INDEX_MERGE_ZERO); case Intrinsic::aarch64_sve_ldnt1_gather_uxtw: return performGatherLoadCombine(N, DAG, AArch64ISD::GLDNT1_MERGE_ZERO); case Intrinsic::aarch64_sve_ld1: return performLD1Combine(N, DAG, AArch64ISD::LD1_MERGE_ZERO); case Intrinsic::aarch64_sve_ldnf1: return performLD1Combine(N, DAG, AArch64ISD::LDNF1_MERGE_ZERO); case Intrinsic::aarch64_sve_ldff1: return performLD1Combine(N, DAG, AArch64ISD::LDFF1_MERGE_ZERO); case Intrinsic::aarch64_sve_st1: return performST1Combine(N, DAG); case Intrinsic::aarch64_sve_stnt1: return performSTNT1Combine(N, DAG); case Intrinsic::aarch64_sve_stnt1_scatter_scalar_offset: return performScatterStoreCombine(N, DAG, AArch64ISD::SSTNT1_PRED); case Intrinsic::aarch64_sve_stnt1_scatter_uxtw: return performScatterStoreCombine(N, DAG, AArch64ISD::SSTNT1_PRED); case Intrinsic::aarch64_sve_stnt1_scatter: return performScatterStoreCombine(N, DAG, AArch64ISD::SSTNT1_PRED); case Intrinsic::aarch64_sve_stnt1_scatter_index: return performScatterStoreCombine(N, DAG, AArch64ISD::SSTNT1_INDEX_PRED); case Intrinsic::aarch64_sve_ld1_gather: return performGatherLoadCombine(N, DAG, AArch64ISD::GLD1_MERGE_ZERO); case Intrinsic::aarch64_sve_ld1_gather_index: return performGatherLoadCombine(N, DAG, AArch64ISD::GLD1_SCALED_MERGE_ZERO); case Intrinsic::aarch64_sve_ld1_gather_sxtw: return performGatherLoadCombine(N, DAG, AArch64ISD::GLD1_SXTW_MERGE_ZERO, /*OnlyPackedOffsets=*/false); case Intrinsic::aarch64_sve_ld1_gather_uxtw: return performGatherLoadCombine(N, DAG, AArch64ISD::GLD1_UXTW_MERGE_ZERO, /*OnlyPackedOffsets=*/false); case Intrinsic::aarch64_sve_ld1_gather_sxtw_index: return performGatherLoadCombine(N, DAG, AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO, /*OnlyPackedOffsets=*/false); case Intrinsic::aarch64_sve_ld1_gather_uxtw_index: return performGatherLoadCombine(N, DAG, AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO, /*OnlyPackedOffsets=*/false); case Intrinsic::aarch64_sve_ld1_gather_scalar_offset: return performGatherLoadCombine(N, DAG, AArch64ISD::GLD1_IMM_MERGE_ZERO); case Intrinsic::aarch64_sve_ldff1_gather: return performGatherLoadCombine(N, DAG, AArch64ISD::GLDFF1_MERGE_ZERO); case Intrinsic::aarch64_sve_ldff1_gather_index: return performGatherLoadCombine(N, DAG, AArch64ISD::GLDFF1_SCALED_MERGE_ZERO); case Intrinsic::aarch64_sve_ldff1_gather_sxtw: return performGatherLoadCombine(N, DAG, AArch64ISD::GLDFF1_SXTW_MERGE_ZERO, /*OnlyPackedOffsets=*/false); case Intrinsic::aarch64_sve_ldff1_gather_uxtw: return performGatherLoadCombine(N, DAG, AArch64ISD::GLDFF1_UXTW_MERGE_ZERO, /*OnlyPackedOffsets=*/false); case Intrinsic::aarch64_sve_ldff1_gather_sxtw_index: return performGatherLoadCombine(N, DAG, AArch64ISD::GLDFF1_SXTW_SCALED_MERGE_ZERO, /*OnlyPackedOffsets=*/false); case Intrinsic::aarch64_sve_ldff1_gather_uxtw_index: return performGatherLoadCombine(N, DAG, AArch64ISD::GLDFF1_UXTW_SCALED_MERGE_ZERO, /*OnlyPackedOffsets=*/false); case Intrinsic::aarch64_sve_ldff1_gather_scalar_offset: return performGatherLoadCombine(N, DAG, AArch64ISD::GLDFF1_IMM_MERGE_ZERO); case Intrinsic::aarch64_sve_st1_scatter: return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_PRED); case Intrinsic::aarch64_sve_st1_scatter_index: return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_SCALED_PRED); case Intrinsic::aarch64_sve_st1_scatter_sxtw: return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_SXTW_PRED, /*OnlyPackedOffsets=*/false); case Intrinsic::aarch64_sve_st1_scatter_uxtw: return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_UXTW_PRED, /*OnlyPackedOffsets=*/false); case Intrinsic::aarch64_sve_st1_scatter_sxtw_index: return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_SXTW_SCALED_PRED, /*OnlyPackedOffsets=*/false); case Intrinsic::aarch64_sve_st1_scatter_uxtw_index: return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_UXTW_SCALED_PRED, /*OnlyPackedOffsets=*/false); case Intrinsic::aarch64_sve_st1_scatter_scalar_offset: return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_IMM_PRED); case Intrinsic::aarch64_sve_tuple_get: { SDLoc DL(N); SDValue Chain = N->getOperand(0); SDValue Src1 = N->getOperand(2); SDValue Idx = N->getOperand(3); uint64_t IdxConst = cast(Idx)->getZExtValue(); EVT ResVT = N->getValueType(0); uint64_t NumLanes = ResVT.getVectorElementCount().getKnownMinValue(); SDValue ExtIdx = DAG.getVectorIdxConstant(IdxConst * NumLanes, DL); SDValue Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ResVT, Src1, ExtIdx); return DAG.getMergeValues({Val, Chain}, DL); } case Intrinsic::aarch64_sve_tuple_set: { SDLoc DL(N); SDValue Chain = N->getOperand(0); SDValue Tuple = N->getOperand(2); SDValue Idx = N->getOperand(3); SDValue Vec = N->getOperand(4); EVT TupleVT = Tuple.getValueType(); uint64_t TupleLanes = TupleVT.getVectorElementCount().getKnownMinValue(); uint64_t IdxConst = cast(Idx)->getZExtValue(); uint64_t NumLanes = Vec.getValueType().getVectorElementCount().getKnownMinValue(); if ((TupleLanes % NumLanes) != 0) report_fatal_error("invalid tuple vector!"); uint64_t NumVecs = TupleLanes / NumLanes; SmallVector Opnds; for (unsigned I = 0; I < NumVecs; ++I) { if (I == IdxConst) Opnds.push_back(Vec); else { SDValue ExtIdx = DAG.getVectorIdxConstant(I * NumLanes, DL); Opnds.push_back(DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, Vec.getValueType(), Tuple, ExtIdx)); } } SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, Tuple.getValueType(), Opnds); return DAG.getMergeValues({Concat, Chain}, DL); } case Intrinsic::aarch64_sve_tuple_create2: case Intrinsic::aarch64_sve_tuple_create3: case Intrinsic::aarch64_sve_tuple_create4: { SDLoc DL(N); SDValue Chain = N->getOperand(0); SmallVector Opnds; for (unsigned I = 2; I < N->getNumOperands(); ++I) Opnds.push_back(N->getOperand(I)); EVT VT = Opnds[0].getValueType(); EVT EltVT = VT.getVectorElementType(); EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, VT.getVectorElementCount() * (N->getNumOperands() - 2)); SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, DestVT, Opnds); return DAG.getMergeValues({Concat, Chain}, DL); } case Intrinsic::aarch64_sve_ld2: case Intrinsic::aarch64_sve_ld3: case Intrinsic::aarch64_sve_ld4: { SDLoc DL(N); SDValue Chain = N->getOperand(0); SDValue Mask = N->getOperand(2); SDValue BasePtr = N->getOperand(3); SDValue LoadOps[] = {Chain, Mask, BasePtr}; unsigned IntrinsicID = cast(N->getOperand(1))->getZExtValue(); SDValue Result = LowerSVEStructLoad(IntrinsicID, LoadOps, N->getValueType(0), DAG, DL); return DAG.getMergeValues({Result, Chain}, DL); } case Intrinsic::aarch64_rndr: case Intrinsic::aarch64_rndrrs: { unsigned IntrinsicID = cast(N->getOperand(1))->getZExtValue(); auto Register = (IntrinsicID == Intrinsic::aarch64_rndr ? AArch64SysReg::RNDR : AArch64SysReg::RNDRRS); SDLoc DL(N); SDValue A = DAG.getNode( AArch64ISD::MRS, DL, DAG.getVTList(MVT::i64, MVT::Glue, MVT::Other), N->getOperand(0), DAG.getConstant(Register, DL, MVT::i64)); SDValue B = DAG.getNode( AArch64ISD::CSINC, DL, MVT::i32, DAG.getConstant(0, DL, MVT::i32), DAG.getConstant(0, DL, MVT::i32), DAG.getConstant(AArch64CC::NE, DL, MVT::i32), A.getValue(1)); return DAG.getMergeValues( {A, DAG.getZExtOrTrunc(B, DL, MVT::i1), A.getValue(2)}, DL); } default: break; } break; case ISD::GlobalAddress: return performGlobalAddressCombine(N, DAG, Subtarget, getTargetMachine()); } return SDValue(); } // Check if the return value is used as only a return value, as otherwise // we can't perform a tail-call. In particular, we need to check for // target ISD nodes that are returns and any other "odd" constructs // that the generic analysis code won't necessarily catch. bool AArch64TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { if (N->getNumValues() != 1) return false; if (!N->hasNUsesOfValue(1, 0)) return false; SDValue TCChain = Chain; SDNode *Copy = *N->use_begin(); if (Copy->getOpcode() == ISD::CopyToReg) { // If the copy has a glue operand, we conservatively assume it isn't safe to // perform a tail call. if (Copy->getOperand(Copy->getNumOperands() - 1).getValueType() == MVT::Glue) return false; TCChain = Copy->getOperand(0); } else if (Copy->getOpcode() != ISD::FP_EXTEND) return false; bool HasRet = false; for (SDNode *Node : Copy->uses()) { if (Node->getOpcode() != AArch64ISD::RET_FLAG) return false; HasRet = true; } if (!HasRet) return false; Chain = TCChain; return true; } // Return whether the an instruction can potentially be optimized to a tail // call. This will cause the optimizers to attempt to move, or duplicate, // return instructions to help enable tail call optimizations for this // instruction. bool AArch64TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { return CI->isTailCall(); } bool AArch64TargetLowering::getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, bool &IsInc, SelectionDAG &DAG) const { if (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB) return false; Base = Op->getOperand(0); // All of the indexed addressing mode instructions take a signed // 9 bit immediate offset. if (ConstantSDNode *RHS = dyn_cast(Op->getOperand(1))) { int64_t RHSC = RHS->getSExtValue(); if (Op->getOpcode() == ISD::SUB) RHSC = -(uint64_t)RHSC; if (!isInt<9>(RHSC)) return false; IsInc = (Op->getOpcode() == ISD::ADD); Offset = Op->getOperand(1); return true; } return false; } bool AArch64TargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const { EVT VT; SDValue Ptr; if (LoadSDNode *LD = dyn_cast(N)) { VT = LD->getMemoryVT(); Ptr = LD->getBasePtr(); } else if (StoreSDNode *ST = dyn_cast(N)) { VT = ST->getMemoryVT(); Ptr = ST->getBasePtr(); } else return false; bool IsInc; if (!getIndexedAddressParts(Ptr.getNode(), Base, Offset, AM, IsInc, DAG)) return false; AM = IsInc ? ISD::PRE_INC : ISD::PRE_DEC; return true; } bool AArch64TargetLowering::getPostIndexedAddressParts( SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const { EVT VT; SDValue Ptr; if (LoadSDNode *LD = dyn_cast(N)) { VT = LD->getMemoryVT(); Ptr = LD->getBasePtr(); } else if (StoreSDNode *ST = dyn_cast(N)) { VT = ST->getMemoryVT(); Ptr = ST->getBasePtr(); } else return false; bool IsInc; if (!getIndexedAddressParts(Op, Base, Offset, AM, IsInc, DAG)) return false; // Post-indexing updates the base, so it's not a valid transform // if that's not the same as the load's pointer. if (Ptr != Base) return false; AM = IsInc ? ISD::POST_INC : ISD::POST_DEC; return true; } void AArch64TargetLowering::ReplaceBITCASTResults( SDNode *N, SmallVectorImpl &Results, SelectionDAG &DAG) const { SDLoc DL(N); SDValue Op = N->getOperand(0); EVT VT = N->getValueType(0); EVT SrcVT = Op.getValueType(); if (VT.isScalableVector() && !isTypeLegal(VT) && isTypeLegal(SrcVT)) { assert(!VT.isFloatingPoint() && SrcVT.isFloatingPoint() && "Expected fp->int bitcast!"); SDValue CastResult = getSVESafeBitCast(getSVEContainerType(VT), Op, DAG); Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, CastResult)); return; } if (VT != MVT::i16 || (SrcVT != MVT::f16 && SrcVT != MVT::bf16)) return; Op = SDValue( DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL, MVT::f32, DAG.getUNDEF(MVT::i32), Op, DAG.getTargetConstant(AArch64::hsub, DL, MVT::i32)), 0); Op = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op); Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Op)); } static void ReplaceReductionResults(SDNode *N, SmallVectorImpl &Results, SelectionDAG &DAG, unsigned InterOp, unsigned AcrossOp) { EVT LoVT, HiVT; SDValue Lo, Hi; SDLoc dl(N); std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0)); std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0); SDValue InterVal = DAG.getNode(InterOp, dl, LoVT, Lo, Hi); SDValue SplitVal = DAG.getNode(AcrossOp, dl, LoVT, InterVal); Results.push_back(SplitVal); } static std::pair splitInt128(SDValue N, SelectionDAG &DAG) { SDLoc DL(N); SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i64, N); SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i64, DAG.getNode(ISD::SRL, DL, MVT::i128, N, DAG.getConstant(64, DL, MVT::i64))); return std::make_pair(Lo, Hi); } void AArch64TargetLowering::ReplaceExtractSubVectorResults( SDNode *N, SmallVectorImpl &Results, SelectionDAG &DAG) const { SDValue In = N->getOperand(0); EVT InVT = In.getValueType(); // Common code will handle these just fine. if (!InVT.isScalableVector() || !InVT.isInteger()) return; SDLoc DL(N); EVT VT = N->getValueType(0); // The following checks bail if this is not a halving operation. ElementCount ResEC = VT.getVectorElementCount(); if (InVT.getVectorElementCount() != (ResEC * 2)) return; auto *CIndex = dyn_cast(N->getOperand(1)); if (!CIndex) return; unsigned Index = CIndex->getZExtValue(); if ((Index != 0) && (Index != ResEC.getKnownMinValue())) return; unsigned Opcode = (Index == 0) ? AArch64ISD::UUNPKLO : AArch64ISD::UUNPKHI; EVT ExtendedHalfVT = VT.widenIntegerVectorElementType(*DAG.getContext()); SDValue Half = DAG.getNode(Opcode, DL, ExtendedHalfVT, N->getOperand(0)); Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Half)); } // Create an even/odd pair of X registers holding integer value V. static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V) { SDLoc dl(V.getNode()); SDValue VLo = DAG.getAnyExtOrTrunc(V, dl, MVT::i64); SDValue VHi = DAG.getAnyExtOrTrunc( DAG.getNode(ISD::SRL, dl, MVT::i128, V, DAG.getConstant(64, dl, MVT::i64)), dl, MVT::i64); if (DAG.getDataLayout().isBigEndian()) std::swap (VLo, VHi); SDValue RegClass = DAG.getTargetConstant(AArch64::XSeqPairsClassRegClassID, dl, MVT::i32); SDValue SubReg0 = DAG.getTargetConstant(AArch64::sube64, dl, MVT::i32); SDValue SubReg1 = DAG.getTargetConstant(AArch64::subo64, dl, MVT::i32); const SDValue Ops[] = { RegClass, VLo, SubReg0, VHi, SubReg1 }; return SDValue( DAG.getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::Untyped, Ops), 0); } static void ReplaceCMP_SWAP_128Results(SDNode *N, SmallVectorImpl &Results, SelectionDAG &DAG, const AArch64Subtarget *Subtarget) { assert(N->getValueType(0) == MVT::i128 && "AtomicCmpSwap on types less than 128 should be legal"); MachineMemOperand *MemOp = cast(N)->getMemOperand(); if (Subtarget->hasLSE() || Subtarget->outlineAtomics()) { // LSE has a 128-bit compare and swap (CASP), but i128 is not a legal type, // so lower it here, wrapped in REG_SEQUENCE and EXTRACT_SUBREG. SDValue Ops[] = { createGPRPairNode(DAG, N->getOperand(2)), // Compare value createGPRPairNode(DAG, N->getOperand(3)), // Store value N->getOperand(1), // Ptr N->getOperand(0), // Chain in }; unsigned Opcode; switch (MemOp->getMergedOrdering()) { case AtomicOrdering::Monotonic: Opcode = AArch64::CASPX; break; case AtomicOrdering::Acquire: Opcode = AArch64::CASPAX; break; case AtomicOrdering::Release: Opcode = AArch64::CASPLX; break; case AtomicOrdering::AcquireRelease: case AtomicOrdering::SequentiallyConsistent: Opcode = AArch64::CASPALX; break; default: llvm_unreachable("Unexpected ordering!"); } MachineSDNode *CmpSwap = DAG.getMachineNode( Opcode, SDLoc(N), DAG.getVTList(MVT::Untyped, MVT::Other), Ops); DAG.setNodeMemRefs(CmpSwap, {MemOp}); unsigned SubReg1 = AArch64::sube64, SubReg2 = AArch64::subo64; if (DAG.getDataLayout().isBigEndian()) std::swap(SubReg1, SubReg2); SDValue Lo = DAG.getTargetExtractSubreg(SubReg1, SDLoc(N), MVT::i64, SDValue(CmpSwap, 0)); SDValue Hi = DAG.getTargetExtractSubreg(SubReg2, SDLoc(N), MVT::i64, SDValue(CmpSwap, 0)); Results.push_back( DAG.getNode(ISD::BUILD_PAIR, SDLoc(N), MVT::i128, Lo, Hi)); Results.push_back(SDValue(CmpSwap, 1)); // Chain out return; } unsigned Opcode; switch (MemOp->getMergedOrdering()) { case AtomicOrdering::Monotonic: Opcode = AArch64::CMP_SWAP_128_MONOTONIC; break; case AtomicOrdering::Acquire: Opcode = AArch64::CMP_SWAP_128_ACQUIRE; break; case AtomicOrdering::Release: Opcode = AArch64::CMP_SWAP_128_RELEASE; break; case AtomicOrdering::AcquireRelease: case AtomicOrdering::SequentiallyConsistent: Opcode = AArch64::CMP_SWAP_128; break; default: llvm_unreachable("Unexpected ordering!"); } auto Desired = splitInt128(N->getOperand(2), DAG); auto New = splitInt128(N->getOperand(3), DAG); SDValue Ops[] = {N->getOperand(1), Desired.first, Desired.second, New.first, New.second, N->getOperand(0)}; SDNode *CmpSwap = DAG.getMachineNode( Opcode, SDLoc(N), DAG.getVTList(MVT::i64, MVT::i64, MVT::i32, MVT::Other), Ops); DAG.setNodeMemRefs(cast(CmpSwap), {MemOp}); Results.push_back(DAG.getNode(ISD::BUILD_PAIR, SDLoc(N), MVT::i128, SDValue(CmpSwap, 0), SDValue(CmpSwap, 1))); Results.push_back(SDValue(CmpSwap, 3)); } void AArch64TargetLowering::ReplaceNodeResults( SDNode *N, SmallVectorImpl &Results, SelectionDAG &DAG) const { switch (N->getOpcode()) { default: llvm_unreachable("Don't know how to custom expand this"); case ISD::BITCAST: ReplaceBITCASTResults(N, Results, DAG); return; case ISD::VECREDUCE_ADD: case ISD::VECREDUCE_SMAX: case ISD::VECREDUCE_SMIN: case ISD::VECREDUCE_UMAX: case ISD::VECREDUCE_UMIN: Results.push_back(LowerVECREDUCE(SDValue(N, 0), DAG)); return; case ISD::CTPOP: if (SDValue Result = LowerCTPOP(SDValue(N, 0), DAG)) Results.push_back(Result); return; case AArch64ISD::SADDV: ReplaceReductionResults(N, Results, DAG, ISD::ADD, AArch64ISD::SADDV); return; case AArch64ISD::UADDV: ReplaceReductionResults(N, Results, DAG, ISD::ADD, AArch64ISD::UADDV); return; case AArch64ISD::SMINV: ReplaceReductionResults(N, Results, DAG, ISD::SMIN, AArch64ISD::SMINV); return; case AArch64ISD::UMINV: ReplaceReductionResults(N, Results, DAG, ISD::UMIN, AArch64ISD::UMINV); return; case AArch64ISD::SMAXV: ReplaceReductionResults(N, Results, DAG, ISD::SMAX, AArch64ISD::SMAXV); return; case AArch64ISD::UMAXV: ReplaceReductionResults(N, Results, DAG, ISD::UMAX, AArch64ISD::UMAXV); return; case ISD::FP_TO_UINT: case ISD::FP_TO_SINT: assert(N->getValueType(0) == MVT::i128 && "unexpected illegal conversion"); // Let normal code take care of it by not adding anything to Results. return; case ISD::ATOMIC_CMP_SWAP: ReplaceCMP_SWAP_128Results(N, Results, DAG, Subtarget); return; case ISD::LOAD: { assert(SDValue(N, 0).getValueType() == MVT::i128 && "unexpected load's value type"); LoadSDNode *LoadNode = cast(N); if (!LoadNode->isVolatile() || LoadNode->getMemoryVT() != MVT::i128) { // Non-volatile loads are optimized later in AArch64's load/store // optimizer. return; } SDValue Result = DAG.getMemIntrinsicNode( AArch64ISD::LDP, SDLoc(N), DAG.getVTList({MVT::i64, MVT::i64, MVT::Other}), {LoadNode->getChain(), LoadNode->getBasePtr()}, LoadNode->getMemoryVT(), LoadNode->getMemOperand()); SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, SDLoc(N), MVT::i128, Result.getValue(0), Result.getValue(1)); Results.append({Pair, Result.getValue(2) /* Chain */}); return; } case ISD::EXTRACT_SUBVECTOR: ReplaceExtractSubVectorResults(N, Results, DAG); return; case ISD::INSERT_SUBVECTOR: // Custom lowering has been requested for INSERT_SUBVECTOR -- but delegate // to common code for result type legalisation return; case ISD::INTRINSIC_WO_CHAIN: { EVT VT = N->getValueType(0); assert((VT == MVT::i8 || VT == MVT::i16) && "custom lowering for unexpected type"); ConstantSDNode *CN = cast(N->getOperand(0)); Intrinsic::ID IntID = static_cast(CN->getZExtValue()); switch (IntID) { default: return; case Intrinsic::aarch64_sve_clasta_n: { SDLoc DL(N); auto Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, N->getOperand(2)); auto V = DAG.getNode(AArch64ISD::CLASTA_N, DL, MVT::i32, N->getOperand(1), Op2, N->getOperand(3)); Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V)); return; } case Intrinsic::aarch64_sve_clastb_n: { SDLoc DL(N); auto Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, N->getOperand(2)); auto V = DAG.getNode(AArch64ISD::CLASTB_N, DL, MVT::i32, N->getOperand(1), Op2, N->getOperand(3)); Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V)); return; } case Intrinsic::aarch64_sve_lasta: { SDLoc DL(N); auto V = DAG.getNode(AArch64ISD::LASTA, DL, MVT::i32, N->getOperand(1), N->getOperand(2)); Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V)); return; } case Intrinsic::aarch64_sve_lastb: { SDLoc DL(N); auto V = DAG.getNode(AArch64ISD::LASTB, DL, MVT::i32, N->getOperand(1), N->getOperand(2)); Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V)); return; } } } } } bool AArch64TargetLowering::useLoadStackGuardNode() const { if (Subtarget->isTargetAndroid() || Subtarget->isTargetFuchsia()) return TargetLowering::useLoadStackGuardNode(); return true; } unsigned AArch64TargetLowering::combineRepeatedFPDivisors() const { // Combine multiple FDIVs with the same divisor into multiple FMULs by the // reciprocal if there are three or more FDIVs. return 3; } TargetLoweringBase::LegalizeTypeAction AArch64TargetLowering::getPreferredVectorAction(MVT VT) const { // During type legalization, we prefer to widen v1i8, v1i16, v1i32 to v8i8, // v4i16, v2i32 instead of to promote. if (VT == MVT::v1i8 || VT == MVT::v1i16 || VT == MVT::v1i32 || VT == MVT::v1f32) return TypeWidenVector; return TargetLoweringBase::getPreferredVectorAction(VT); } // Loads and stores less than 128-bits are already atomic; ones above that // are doomed anyway, so defer to the default libcall and blame the OS when // things go wrong. bool AArch64TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits(); return Size == 128; } // Loads and stores less than 128-bits are already atomic; ones above that // are doomed anyway, so defer to the default libcall and blame the OS when // things go wrong. TargetLowering::AtomicExpansionKind AArch64TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { unsigned Size = LI->getType()->getPrimitiveSizeInBits(); return Size == 128 ? AtomicExpansionKind::LLSC : AtomicExpansionKind::None; } // For the real atomic operations, we have ldxr/stxr up to 128 bits, TargetLowering::AtomicExpansionKind AArch64TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { if (AI->isFloatingPointOperation()) return AtomicExpansionKind::CmpXChg; unsigned Size = AI->getType()->getPrimitiveSizeInBits(); if (Size > 128) return AtomicExpansionKind::None; // Nand is not supported in LSE. // Leave 128 bits to LLSC or CmpXChg. if (AI->getOperation() != AtomicRMWInst::Nand && Size < 128) { if (Subtarget->hasLSE()) return AtomicExpansionKind::None; if (Subtarget->outlineAtomics()) { // [U]Min/[U]Max RWM atomics are used in __sync_fetch_ libcalls so far. // Don't outline them unless // (1) high level support approved: // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p0493r1.pdf // (2) low level libgcc and compiler-rt support implemented by: // min/max outline atomics helpers if (AI->getOperation() != AtomicRMWInst::Min && AI->getOperation() != AtomicRMWInst::Max && AI->getOperation() != AtomicRMWInst::UMin && AI->getOperation() != AtomicRMWInst::UMax) { return AtomicExpansionKind::None; } } } // At -O0, fast-regalloc cannot cope with the live vregs necessary to // implement atomicrmw without spilling. If the target address is also on the // stack and close enough to the spill slot, this can lead to a situation // where the monitor always gets cleared and the atomic operation can never // succeed. So at -O0 lower this operation to a CAS loop. if (getTargetMachine().getOptLevel() == CodeGenOpt::None) return AtomicExpansionKind::CmpXChg; return AtomicExpansionKind::LLSC; } TargetLowering::AtomicExpansionKind AArch64TargetLowering::shouldExpandAtomicCmpXchgInIR( AtomicCmpXchgInst *AI) const { // If subtarget has LSE, leave cmpxchg intact for codegen. if (Subtarget->hasLSE() || Subtarget->outlineAtomics()) return AtomicExpansionKind::None; // At -O0, fast-regalloc cannot cope with the live vregs necessary to // implement cmpxchg without spilling. If the address being exchanged is also // on the stack and close enough to the spill slot, this can lead to a // situation where the monitor always gets cleared and the atomic operation // can never succeed. So at -O0 we need a late-expanded pseudo-inst instead. if (getTargetMachine().getOptLevel() == CodeGenOpt::None) return AtomicExpansionKind::None; // 128-bit atomic cmpxchg is weird; AtomicExpand doesn't know how to expand // it. unsigned Size = AI->getCompareOperand()->getType()->getPrimitiveSizeInBits(); if (Size > 64) return AtomicExpansionKind::None; return AtomicExpansionKind::LLSC; } Value *AArch64TargetLowering::emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr, AtomicOrdering Ord) const { Module *M = Builder.GetInsertBlock()->getParent()->getParent(); bool IsAcquire = isAcquireOrStronger(Ord); // Since i128 isn't legal and intrinsics don't get type-lowered, the ldrexd // intrinsic must return {i64, i64} and we have to recombine them into a // single i128 here. if (ValueTy->getPrimitiveSizeInBits() == 128) { Intrinsic::ID Int = IsAcquire ? Intrinsic::aarch64_ldaxp : Intrinsic::aarch64_ldxp; Function *Ldxr = Intrinsic::getDeclaration(M, Int); Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext())); Value *LoHi = Builder.CreateCall(Ldxr, Addr, "lohi"); Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo"); Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi"); Lo = Builder.CreateZExt(Lo, ValueTy, "lo64"); Hi = Builder.CreateZExt(Hi, ValueTy, "hi64"); return Builder.CreateOr( Lo, Builder.CreateShl(Hi, ConstantInt::get(ValueTy, 64)), "val64"); } Type *Tys[] = { Addr->getType() }; Intrinsic::ID Int = IsAcquire ? Intrinsic::aarch64_ldaxr : Intrinsic::aarch64_ldxr; Function *Ldxr = Intrinsic::getDeclaration(M, Int, Tys); const DataLayout &DL = M->getDataLayout(); IntegerType *IntEltTy = Builder.getIntNTy(DL.getTypeSizeInBits(ValueTy)); Value *Trunc = Builder.CreateTrunc(Builder.CreateCall(Ldxr, Addr), IntEltTy); return Builder.CreateBitCast(Trunc, ValueTy); } void AArch64TargetLowering::emitAtomicCmpXchgNoStoreLLBalance( IRBuilderBase &Builder) const { Module *M = Builder.GetInsertBlock()->getParent()->getParent(); Builder.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::aarch64_clrex)); } Value *AArch64TargetLowering::emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const { Module *M = Builder.GetInsertBlock()->getParent()->getParent(); bool IsRelease = isReleaseOrStronger(Ord); // Since the intrinsics must have legal type, the i128 intrinsics take two // parameters: "i64, i64". We must marshal Val into the appropriate form // before the call. if (Val->getType()->getPrimitiveSizeInBits() == 128) { Intrinsic::ID Int = IsRelease ? Intrinsic::aarch64_stlxp : Intrinsic::aarch64_stxp; Function *Stxr = Intrinsic::getDeclaration(M, Int); Type *Int64Ty = Type::getInt64Ty(M->getContext()); Value *Lo = Builder.CreateTrunc(Val, Int64Ty, "lo"); Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 64), Int64Ty, "hi"); Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext())); return Builder.CreateCall(Stxr, {Lo, Hi, Addr}); } Intrinsic::ID Int = IsRelease ? Intrinsic::aarch64_stlxr : Intrinsic::aarch64_stxr; Type *Tys[] = { Addr->getType() }; Function *Stxr = Intrinsic::getDeclaration(M, Int, Tys); const DataLayout &DL = M->getDataLayout(); IntegerType *IntValTy = Builder.getIntNTy(DL.getTypeSizeInBits(Val->getType())); Val = Builder.CreateBitCast(Val, IntValTy); return Builder.CreateCall(Stxr, {Builder.CreateZExtOrBitCast( Val, Stxr->getFunctionType()->getParamType(0)), Addr}); } bool AArch64TargetLowering::functionArgumentNeedsConsecutiveRegisters( Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const { if (!Ty->isArrayTy()) { const TypeSize &TySize = Ty->getPrimitiveSizeInBits(); return TySize.isScalable() && TySize.getKnownMinSize() > 128; } // All non aggregate members of the type must have the same type SmallVector ValueVTs; ComputeValueVTs(*this, DL, Ty, ValueVTs); return is_splat(ValueVTs); } bool AArch64TargetLowering::shouldNormalizeToSelectSequence(LLVMContext &, EVT) const { return false; } static Value *UseTlsOffset(IRBuilderBase &IRB, unsigned Offset) { Module *M = IRB.GetInsertBlock()->getParent()->getParent(); Function *ThreadPointerFunc = Intrinsic::getDeclaration(M, Intrinsic::thread_pointer); return IRB.CreatePointerCast( IRB.CreateConstGEP1_32(IRB.getInt8Ty(), IRB.CreateCall(ThreadPointerFunc), Offset), IRB.getInt8PtrTy()->getPointerTo(0)); } Value *AArch64TargetLowering::getIRStackGuard(IRBuilderBase &IRB) const { // Android provides a fixed TLS slot for the stack cookie. See the definition // of TLS_SLOT_STACK_GUARD in // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h if (Subtarget->isTargetAndroid()) return UseTlsOffset(IRB, 0x28); // Fuchsia is similar. // defines ZX_TLS_STACK_GUARD_OFFSET with this value. if (Subtarget->isTargetFuchsia()) return UseTlsOffset(IRB, -0x10); return TargetLowering::getIRStackGuard(IRB); } void AArch64TargetLowering::insertSSPDeclarations(Module &M) const { // MSVC CRT provides functionalities for stack protection. if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment()) { // MSVC CRT has a global variable holding security cookie. M.getOrInsertGlobal("__security_cookie", Type::getInt8PtrTy(M.getContext())); // MSVC CRT has a function to validate security cookie. FunctionCallee SecurityCheckCookie = M.getOrInsertFunction( "__security_check_cookie", Type::getVoidTy(M.getContext()), Type::getInt8PtrTy(M.getContext())); if (Function *F = dyn_cast(SecurityCheckCookie.getCallee())) { F->setCallingConv(CallingConv::Win64); F->addAttribute(1, Attribute::AttrKind::InReg); } return; } TargetLowering::insertSSPDeclarations(M); } Value *AArch64TargetLowering::getSDagStackGuard(const Module &M) const { // MSVC CRT has a global variable holding security cookie. if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment()) return M.getGlobalVariable("__security_cookie"); return TargetLowering::getSDagStackGuard(M); } Function *AArch64TargetLowering::getSSPStackGuardCheck(const Module &M) const { // MSVC CRT has a function to validate security cookie. if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment()) return M.getFunction("__security_check_cookie"); return TargetLowering::getSSPStackGuardCheck(M); } Value * AArch64TargetLowering::getSafeStackPointerLocation(IRBuilderBase &IRB) const { // Android provides a fixed TLS slot for the SafeStack pointer. See the // definition of TLS_SLOT_SAFESTACK in // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h if (Subtarget->isTargetAndroid()) return UseTlsOffset(IRB, 0x48); // Fuchsia is similar. // defines ZX_TLS_UNSAFE_SP_OFFSET with this value. if (Subtarget->isTargetFuchsia()) return UseTlsOffset(IRB, -0x8); return TargetLowering::getSafeStackPointerLocation(IRB); } bool AArch64TargetLowering::isMaskAndCmp0FoldingBeneficial( const Instruction &AndI) const { // Only sink 'and' mask to cmp use block if it is masking a single bit, since // this is likely to be fold the and/cmp/br into a single tbz instruction. It // may be beneficial to sink in other cases, but we would have to check that // the cmp would not get folded into the br to form a cbz for these to be // beneficial. ConstantInt* Mask = dyn_cast(AndI.getOperand(1)); if (!Mask) return false; return Mask->getValue().isPowerOf2(); } bool AArch64TargetLowering:: shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd( SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y, unsigned OldShiftOpcode, unsigned NewShiftOpcode, SelectionDAG &DAG) const { // Does baseline recommend not to perform the fold by default? if (!TargetLowering::shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd( X, XC, CC, Y, OldShiftOpcode, NewShiftOpcode, DAG)) return false; // Else, if this is a vector shift, prefer 'shl'. return X.getValueType().isScalarInteger() || NewShiftOpcode == ISD::SHL; } bool AArch64TargetLowering::shouldExpandShift(SelectionDAG &DAG, SDNode *N) const { if (DAG.getMachineFunction().getFunction().hasMinSize() && !Subtarget->isTargetWindows() && !Subtarget->isTargetDarwin()) return false; return true; } void AArch64TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { // Update IsSplitCSR in AArch64unctionInfo. AArch64FunctionInfo *AFI = Entry->getParent()->getInfo(); AFI->setIsSplitCSR(true); } void AArch64TargetLowering::insertCopiesSplitCSR( MachineBasicBlock *Entry, const SmallVectorImpl &Exits) const { const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo(); const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); if (!IStart) return; const TargetInstrInfo *TII = Subtarget->getInstrInfo(); MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); MachineBasicBlock::iterator MBBI = Entry->begin(); for (const MCPhysReg *I = IStart; *I; ++I) { const TargetRegisterClass *RC = nullptr; if (AArch64::GPR64RegClass.contains(*I)) RC = &AArch64::GPR64RegClass; else if (AArch64::FPR64RegClass.contains(*I)) RC = &AArch64::FPR64RegClass; else llvm_unreachable("Unexpected register class in CSRsViaCopy!"); Register NewVR = MRI->createVirtualRegister(RC); // Create copy from CSR to a virtual register. // FIXME: this currently does not emit CFI pseudo-instructions, it works // fine for CXX_FAST_TLS since the C++-style TLS access functions should be // nounwind. If we want to generalize this later, we may need to emit // CFI pseudo-instructions. assert(Entry->getParent()->getFunction().hasFnAttribute( Attribute::NoUnwind) && "Function should be nounwind in insertCopiesSplitCSR!"); Entry->addLiveIn(*I); BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) .addReg(*I); // Insert the copy-back instructions right before the terminator. for (auto *Exit : Exits) BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), TII->get(TargetOpcode::COPY), *I) .addReg(NewVR); } } bool AArch64TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const { // Integer division on AArch64 is expensive. However, when aggressively // optimizing for code size, we prefer to use a div instruction, as it is // usually smaller than the alternative sequence. // The exception to this is vector division. Since AArch64 doesn't have vector // integer division, leaving the division as-is is a loss even in terms of // size, because it will have to be scalarized, while the alternative code // sequence can be performed in vector form. bool OptSize = Attr.hasFnAttribute(Attribute::MinSize); return OptSize && !VT.isVector(); } bool AArch64TargetLowering::preferIncOfAddToSubOfNot(EVT VT) const { // We want inc-of-add for scalars and sub-of-not for vectors. return VT.isScalarInteger(); } bool AArch64TargetLowering::enableAggressiveFMAFusion(EVT VT) const { return Subtarget->hasAggressiveFMA() && VT.isFloatingPoint(); } unsigned AArch64TargetLowering::getVaListSizeInBits(const DataLayout &DL) const { if (Subtarget->isTargetDarwin() || Subtarget->isTargetWindows()) return getPointerTy(DL).getSizeInBits(); return 3 * getPointerTy(DL).getSizeInBits() + 2 * 32; } void AArch64TargetLowering::finalizeLowering(MachineFunction &MF) const { MF.getFrameInfo().computeMaxCallFrameSize(MF); TargetLoweringBase::finalizeLowering(MF); } // Unlike X86, we let frame lowering assign offsets to all catch objects. bool AArch64TargetLowering::needsFixedCatchObjects() const { return false; } bool AArch64TargetLowering::shouldLocalize( const MachineInstr &MI, const TargetTransformInfo *TTI) const { switch (MI.getOpcode()) { case TargetOpcode::G_GLOBAL_VALUE: { // On Darwin, TLS global vars get selected into function calls, which // we don't want localized, as they can get moved into the middle of a // another call sequence. const GlobalValue &GV = *MI.getOperand(1).getGlobal(); if (GV.isThreadLocal() && Subtarget->isTargetMachO()) return false; break; } // If we legalized G_GLOBAL_VALUE into ADRP + G_ADD_LOW, mark both as being // localizable. case AArch64::ADRP: case AArch64::G_ADD_LOW: return true; default: break; } return TargetLoweringBase::shouldLocalize(MI, TTI); } bool AArch64TargetLowering::fallBackToDAGISel(const Instruction &Inst) const { if (isa(Inst.getType())) return true; for (unsigned i = 0; i < Inst.getNumOperands(); ++i) if (isa(Inst.getOperand(i)->getType())) return true; if (const AllocaInst *AI = dyn_cast(&Inst)) { if (isa(AI->getAllocatedType())) return true; } return false; } // Return the largest legal scalable vector type that matches VT's element type. static EVT getContainerForFixedLengthVector(SelectionDAG &DAG, EVT VT) { assert(VT.isFixedLengthVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) && "Expected legal fixed length vector!"); switch (VT.getVectorElementType().getSimpleVT().SimpleTy) { default: llvm_unreachable("unexpected element type for SVE container"); case MVT::i8: return EVT(MVT::nxv16i8); case MVT::i16: return EVT(MVT::nxv8i16); case MVT::i32: return EVT(MVT::nxv4i32); case MVT::i64: return EVT(MVT::nxv2i64); case MVT::f16: return EVT(MVT::nxv8f16); case MVT::f32: return EVT(MVT::nxv4f32); case MVT::f64: return EVT(MVT::nxv2f64); } } // Return a PTRUE with active lanes corresponding to the extent of VT. static SDValue getPredicateForFixedLengthVector(SelectionDAG &DAG, SDLoc &DL, EVT VT) { assert(VT.isFixedLengthVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) && "Expected legal fixed length vector!"); int PgPattern; switch (VT.getVectorNumElements()) { default: llvm_unreachable("unexpected element count for SVE predicate"); case 1: PgPattern = AArch64SVEPredPattern::vl1; break; case 2: PgPattern = AArch64SVEPredPattern::vl2; break; case 4: PgPattern = AArch64SVEPredPattern::vl4; break; case 8: PgPattern = AArch64SVEPredPattern::vl8; break; case 16: PgPattern = AArch64SVEPredPattern::vl16; break; case 32: PgPattern = AArch64SVEPredPattern::vl32; break; case 64: PgPattern = AArch64SVEPredPattern::vl64; break; case 128: PgPattern = AArch64SVEPredPattern::vl128; break; case 256: PgPattern = AArch64SVEPredPattern::vl256; break; } // TODO: For vectors that are exactly getMaxSVEVectorSizeInBits big, we can // use AArch64SVEPredPattern::all, which can enable the use of unpredicated // variants of instructions when available. MVT MaskVT; switch (VT.getVectorElementType().getSimpleVT().SimpleTy) { default: llvm_unreachable("unexpected element type for SVE predicate"); case MVT::i8: MaskVT = MVT::nxv16i1; break; case MVT::i16: case MVT::f16: MaskVT = MVT::nxv8i1; break; case MVT::i32: case MVT::f32: MaskVT = MVT::nxv4i1; break; case MVT::i64: case MVT::f64: MaskVT = MVT::nxv2i1; break; } return DAG.getNode(AArch64ISD::PTRUE, DL, MaskVT, DAG.getTargetConstant(PgPattern, DL, MVT::i64)); } static SDValue getPredicateForScalableVector(SelectionDAG &DAG, SDLoc &DL, EVT VT) { assert(VT.isScalableVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) && "Expected legal scalable vector!"); auto PredTy = VT.changeVectorElementType(MVT::i1); return getPTrue(DAG, DL, PredTy, AArch64SVEPredPattern::all); } static SDValue getPredicateForVector(SelectionDAG &DAG, SDLoc &DL, EVT VT) { if (VT.isFixedLengthVector()) return getPredicateForFixedLengthVector(DAG, DL, VT); return getPredicateForScalableVector(DAG, DL, VT); } // Grow V to consume an entire SVE register. static SDValue convertToScalableVector(SelectionDAG &DAG, EVT VT, SDValue V) { assert(VT.isScalableVector() && "Expected to convert into a scalable vector!"); assert(V.getValueType().isFixedLengthVector() && "Expected a fixed length vector operand!"); SDLoc DL(V); SDValue Zero = DAG.getConstant(0, DL, MVT::i64); return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero); } // Shrink V so it's just big enough to maintain a VT's worth of data. static SDValue convertFromScalableVector(SelectionDAG &DAG, EVT VT, SDValue V) { assert(VT.isFixedLengthVector() && "Expected to convert into a fixed length vector!"); assert(V.getValueType().isScalableVector() && "Expected a scalable vector operand!"); SDLoc DL(V); SDValue Zero = DAG.getConstant(0, DL, MVT::i64); return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero); } // Convert all fixed length vector loads larger than NEON to masked_loads. SDValue AArch64TargetLowering::LowerFixedLengthVectorLoadToSVE( SDValue Op, SelectionDAG &DAG) const { auto Load = cast(Op); SDLoc DL(Op); EVT VT = Op.getValueType(); EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT); auto NewLoad = DAG.getMaskedLoad( ContainerVT, DL, Load->getChain(), Load->getBasePtr(), Load->getOffset(), getPredicateForFixedLengthVector(DAG, DL, VT), DAG.getUNDEF(ContainerVT), Load->getMemoryVT(), Load->getMemOperand(), Load->getAddressingMode(), Load->getExtensionType()); auto Result = convertFromScalableVector(DAG, VT, NewLoad); SDValue MergedValues[2] = {Result, Load->getChain()}; return DAG.getMergeValues(MergedValues, DL); } static SDValue convertFixedMaskToScalableVector(SDValue Mask, SelectionDAG &DAG) { SDLoc DL(Mask); EVT InVT = Mask.getValueType(); EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT); auto Op1 = convertToScalableVector(DAG, ContainerVT, Mask); auto Op2 = DAG.getConstant(0, DL, ContainerVT); auto Pg = getPredicateForFixedLengthVector(DAG, DL, InVT); EVT CmpVT = Pg.getValueType(); return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, DL, CmpVT, {Pg, Op1, Op2, DAG.getCondCode(ISD::SETNE)}); } // Convert all fixed length vector loads larger than NEON to masked_loads. SDValue AArch64TargetLowering::LowerFixedLengthVectorMLoadToSVE( SDValue Op, SelectionDAG &DAG) const { auto Load = cast(Op); if (Load->getExtensionType() != ISD::LoadExtType::NON_EXTLOAD) return SDValue(); SDLoc DL(Op); EVT VT = Op.getValueType(); EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT); SDValue Mask = convertFixedMaskToScalableVector(Load->getMask(), DAG); SDValue PassThru; bool IsPassThruZeroOrUndef = false; if (Load->getPassThru()->isUndef()) { PassThru = DAG.getUNDEF(ContainerVT); IsPassThruZeroOrUndef = true; } else { if (ContainerVT.isInteger()) PassThru = DAG.getConstant(0, DL, ContainerVT); else PassThru = DAG.getConstantFP(0, DL, ContainerVT); if (isZerosVector(Load->getPassThru().getNode())) IsPassThruZeroOrUndef = true; } auto NewLoad = DAG.getMaskedLoad( ContainerVT, DL, Load->getChain(), Load->getBasePtr(), Load->getOffset(), Mask, PassThru, Load->getMemoryVT(), Load->getMemOperand(), Load->getAddressingMode(), Load->getExtensionType()); if (!IsPassThruZeroOrUndef) { SDValue OldPassThru = convertToScalableVector(DAG, ContainerVT, Load->getPassThru()); NewLoad = DAG.getSelect(DL, ContainerVT, Mask, NewLoad, OldPassThru); } auto Result = convertFromScalableVector(DAG, VT, NewLoad); SDValue MergedValues[2] = {Result, Load->getChain()}; return DAG.getMergeValues(MergedValues, DL); } // Convert all fixed length vector stores larger than NEON to masked_stores. SDValue AArch64TargetLowering::LowerFixedLengthVectorStoreToSVE( SDValue Op, SelectionDAG &DAG) const { auto Store = cast(Op); SDLoc DL(Op); EVT VT = Store->getValue().getValueType(); EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT); auto NewValue = convertToScalableVector(DAG, ContainerVT, Store->getValue()); return DAG.getMaskedStore( Store->getChain(), DL, NewValue, Store->getBasePtr(), Store->getOffset(), getPredicateForFixedLengthVector(DAG, DL, VT), Store->getMemoryVT(), Store->getMemOperand(), Store->getAddressingMode(), Store->isTruncatingStore()); } SDValue AArch64TargetLowering::LowerFixedLengthVectorMStoreToSVE( SDValue Op, SelectionDAG &DAG) const { auto Store = cast(Op); if (Store->isTruncatingStore()) return SDValue(); SDLoc DL(Op); EVT VT = Store->getValue().getValueType(); EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT); auto NewValue = convertToScalableVector(DAG, ContainerVT, Store->getValue()); SDValue Mask = convertFixedMaskToScalableVector(Store->getMask(), DAG); return DAG.getMaskedStore( Store->getChain(), DL, NewValue, Store->getBasePtr(), Store->getOffset(), Mask, Store->getMemoryVT(), Store->getMemOperand(), Store->getAddressingMode(), Store->isTruncatingStore()); } SDValue AArch64TargetLowering::LowerFixedLengthVectorIntDivideToSVE( SDValue Op, SelectionDAG &DAG) const { SDLoc dl(Op); EVT VT = Op.getValueType(); EVT EltVT = VT.getVectorElementType(); bool Signed = Op.getOpcode() == ISD::SDIV; unsigned PredOpcode = Signed ? AArch64ISD::SDIV_PRED : AArch64ISD::UDIV_PRED; // Scalable vector i32/i64 DIV is supported. if (EltVT == MVT::i32 || EltVT == MVT::i64) return LowerToPredicatedOp(Op, DAG, PredOpcode, /*OverrideNEON=*/true); // Scalable vector i8/i16 DIV is not supported. Promote it to i32. EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT); EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext()); EVT FixedWidenedVT = HalfVT.widenIntegerVectorElementType(*DAG.getContext()); EVT ScalableWidenedVT = getContainerForFixedLengthVector(DAG, FixedWidenedVT); // If this is not a full vector, extend, div, and truncate it. EVT WidenedVT = VT.widenIntegerVectorElementType(*DAG.getContext()); if (DAG.getTargetLoweringInfo().isTypeLegal(WidenedVT)) { unsigned ExtendOpcode = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; SDValue Op0 = DAG.getNode(ExtendOpcode, dl, WidenedVT, Op.getOperand(0)); SDValue Op1 = DAG.getNode(ExtendOpcode, dl, WidenedVT, Op.getOperand(1)); SDValue Div = DAG.getNode(Op.getOpcode(), dl, WidenedVT, Op0, Op1); return DAG.getNode(ISD::TRUNCATE, dl, VT, Div); } // Convert the operands to scalable vectors. SDValue Op0 = convertToScalableVector(DAG, ContainerVT, Op.getOperand(0)); SDValue Op1 = convertToScalableVector(DAG, ContainerVT, Op.getOperand(1)); // Extend the scalable operands. unsigned UnpkLo = Signed ? AArch64ISD::SUNPKLO : AArch64ISD::UUNPKLO; unsigned UnpkHi = Signed ? AArch64ISD::SUNPKHI : AArch64ISD::UUNPKHI; SDValue Op0Lo = DAG.getNode(UnpkLo, dl, ScalableWidenedVT, Op0); SDValue Op1Lo = DAG.getNode(UnpkLo, dl, ScalableWidenedVT, Op1); SDValue Op0Hi = DAG.getNode(UnpkHi, dl, ScalableWidenedVT, Op0); SDValue Op1Hi = DAG.getNode(UnpkHi, dl, ScalableWidenedVT, Op1); // Convert back to fixed vectors so the DIV can be further lowered. Op0Lo = convertFromScalableVector(DAG, FixedWidenedVT, Op0Lo); Op1Lo = convertFromScalableVector(DAG, FixedWidenedVT, Op1Lo); Op0Hi = convertFromScalableVector(DAG, FixedWidenedVT, Op0Hi); Op1Hi = convertFromScalableVector(DAG, FixedWidenedVT, Op1Hi); SDValue ResultLo = DAG.getNode(Op.getOpcode(), dl, FixedWidenedVT, Op0Lo, Op1Lo); SDValue ResultHi = DAG.getNode(Op.getOpcode(), dl, FixedWidenedVT, Op0Hi, Op1Hi); // Convert again to scalable vectors to truncate. ResultLo = convertToScalableVector(DAG, ScalableWidenedVT, ResultLo); ResultHi = convertToScalableVector(DAG, ScalableWidenedVT, ResultHi); SDValue ScalableResult = DAG.getNode(AArch64ISD::UZP1, dl, ContainerVT, ResultLo, ResultHi); return convertFromScalableVector(DAG, VT, ScalableResult); } SDValue AArch64TargetLowering::LowerFixedLengthVectorIntExtendToSVE( SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); assert(VT.isFixedLengthVector() && "Expected fixed length vector type!"); SDLoc DL(Op); SDValue Val = Op.getOperand(0); EVT ContainerVT = getContainerForFixedLengthVector(DAG, Val.getValueType()); Val = convertToScalableVector(DAG, ContainerVT, Val); bool Signed = Op.getOpcode() == ISD::SIGN_EXTEND; unsigned ExtendOpc = Signed ? AArch64ISD::SUNPKLO : AArch64ISD::UUNPKLO; // Repeatedly unpack Val until the result is of the desired element type. switch (ContainerVT.getSimpleVT().SimpleTy) { default: llvm_unreachable("unimplemented container type"); case MVT::nxv16i8: Val = DAG.getNode(ExtendOpc, DL, MVT::nxv8i16, Val); if (VT.getVectorElementType() == MVT::i16) break; LLVM_FALLTHROUGH; case MVT::nxv8i16: Val = DAG.getNode(ExtendOpc, DL, MVT::nxv4i32, Val); if (VT.getVectorElementType() == MVT::i32) break; LLVM_FALLTHROUGH; case MVT::nxv4i32: Val = DAG.getNode(ExtendOpc, DL, MVT::nxv2i64, Val); assert(VT.getVectorElementType() == MVT::i64 && "Unexpected element type!"); break; } return convertFromScalableVector(DAG, VT, Val); } SDValue AArch64TargetLowering::LowerFixedLengthVectorTruncateToSVE( SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); assert(VT.isFixedLengthVector() && "Expected fixed length vector type!"); SDLoc DL(Op); SDValue Val = Op.getOperand(0); EVT ContainerVT = getContainerForFixedLengthVector(DAG, Val.getValueType()); Val = convertToScalableVector(DAG, ContainerVT, Val); // Repeatedly truncate Val until the result is of the desired element type. switch (ContainerVT.getSimpleVT().SimpleTy) { default: llvm_unreachable("unimplemented container type"); case MVT::nxv2i64: Val = DAG.getNode(ISD::BITCAST, DL, MVT::nxv4i32, Val); Val = DAG.getNode(AArch64ISD::UZP1, DL, MVT::nxv4i32, Val, Val); if (VT.getVectorElementType() == MVT::i32) break; LLVM_FALLTHROUGH; case MVT::nxv4i32: Val = DAG.getNode(ISD::BITCAST, DL, MVT::nxv8i16, Val); Val = DAG.getNode(AArch64ISD::UZP1, DL, MVT::nxv8i16, Val, Val); if (VT.getVectorElementType() == MVT::i16) break; LLVM_FALLTHROUGH; case MVT::nxv8i16: Val = DAG.getNode(ISD::BITCAST, DL, MVT::nxv16i8, Val); Val = DAG.getNode(AArch64ISD::UZP1, DL, MVT::nxv16i8, Val, Val); assert(VT.getVectorElementType() == MVT::i8 && "Unexpected element type!"); break; } return convertFromScalableVector(DAG, VT, Val); } SDValue AArch64TargetLowering::LowerFixedLengthExtractVectorElt( SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); EVT InVT = Op.getOperand(0).getValueType(); assert(InVT.isFixedLengthVector() && "Expected fixed length vector type!"); SDLoc DL(Op); EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT); SDValue Op0 = convertToScalableVector(DAG, ContainerVT, Op->getOperand(0)); return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Op0, Op.getOperand(1)); } SDValue AArch64TargetLowering::LowerFixedLengthInsertVectorElt( SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); assert(VT.isFixedLengthVector() && "Expected fixed length vector type!"); SDLoc DL(Op); EVT InVT = Op.getOperand(0).getValueType(); EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT); SDValue Op0 = convertToScalableVector(DAG, ContainerVT, Op->getOperand(0)); auto ScalableRes = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ContainerVT, Op0, Op.getOperand(1), Op.getOperand(2)); return convertFromScalableVector(DAG, VT, ScalableRes); } // Convert vector operation 'Op' to an equivalent predicated operation whereby // the original operation's type is used to construct a suitable predicate. // NOTE: The results for inactive lanes are undefined. SDValue AArch64TargetLowering::LowerToPredicatedOp(SDValue Op, SelectionDAG &DAG, unsigned NewOp, bool OverrideNEON) const { EVT VT = Op.getValueType(); SDLoc DL(Op); auto Pg = getPredicateForVector(DAG, DL, VT); if (useSVEForFixedLengthVectorVT(VT, OverrideNEON)) { EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT); // Create list of operands by converting existing ones to scalable types. SmallVector Operands = {Pg}; for (const SDValue &V : Op->op_values()) { if (isa(V)) { Operands.push_back(V); continue; } if (const VTSDNode *VTNode = dyn_cast(V)) { EVT VTArg = VTNode->getVT().getVectorElementType(); EVT NewVTArg = ContainerVT.changeVectorElementType(VTArg); Operands.push_back(DAG.getValueType(NewVTArg)); continue; } assert(useSVEForFixedLengthVectorVT(V.getValueType(), OverrideNEON) && "Only fixed length vectors are supported!"); Operands.push_back(convertToScalableVector(DAG, ContainerVT, V)); } if (isMergePassthruOpcode(NewOp)) Operands.push_back(DAG.getUNDEF(ContainerVT)); auto ScalableRes = DAG.getNode(NewOp, DL, ContainerVT, Operands); return convertFromScalableVector(DAG, VT, ScalableRes); } assert(VT.isScalableVector() && "Only expect to lower scalable vector op!"); SmallVector Operands = {Pg}; for (const SDValue &V : Op->op_values()) { assert((!V.getValueType().isVector() || V.getValueType().isScalableVector()) && "Only scalable vectors are supported!"); Operands.push_back(V); } if (isMergePassthruOpcode(NewOp)) Operands.push_back(DAG.getUNDEF(VT)); return DAG.getNode(NewOp, DL, VT, Operands); } // If a fixed length vector operation has no side effects when applied to // undefined elements, we can safely use scalable vectors to perform the same // operation without needing to worry about predication. SDValue AArch64TargetLowering::LowerToScalableOp(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); assert(useSVEForFixedLengthVectorVT(VT) && "Only expected to lower fixed length vector operation!"); EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT); // Create list of operands by converting existing ones to scalable types. SmallVector Ops; for (const SDValue &V : Op->op_values()) { assert(!isa(V) && "Unexpected VTSDNode node!"); // Pass through non-vector operands. if (!V.getValueType().isVector()) { Ops.push_back(V); continue; } // "cast" fixed length vector to a scalable vector. assert(useSVEForFixedLengthVectorVT(V.getValueType()) && "Only fixed length vectors are supported!"); Ops.push_back(convertToScalableVector(DAG, ContainerVT, V)); } auto ScalableRes = DAG.getNode(Op.getOpcode(), SDLoc(Op), ContainerVT, Ops); return convertFromScalableVector(DAG, VT, ScalableRes); } SDValue AArch64TargetLowering::LowerVECREDUCE_SEQ_FADD(SDValue ScalarOp, SelectionDAG &DAG) const { SDLoc DL(ScalarOp); SDValue AccOp = ScalarOp.getOperand(0); SDValue VecOp = ScalarOp.getOperand(1); EVT SrcVT = VecOp.getValueType(); EVT ResVT = SrcVT.getVectorElementType(); EVT ContainerVT = SrcVT; if (SrcVT.isFixedLengthVector()) { ContainerVT = getContainerForFixedLengthVector(DAG, SrcVT); VecOp = convertToScalableVector(DAG, ContainerVT, VecOp); } SDValue Pg = getPredicateForVector(DAG, DL, SrcVT); SDValue Zero = DAG.getConstant(0, DL, MVT::i64); // Convert operands to Scalable. AccOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ContainerVT, DAG.getUNDEF(ContainerVT), AccOp, Zero); // Perform reduction. SDValue Rdx = DAG.getNode(AArch64ISD::FADDA_PRED, DL, ContainerVT, Pg, AccOp, VecOp); return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Rdx, Zero); } SDValue AArch64TargetLowering::LowerPredReductionToSVE(SDValue ReduceOp, SelectionDAG &DAG) const { SDLoc DL(ReduceOp); SDValue Op = ReduceOp.getOperand(0); EVT OpVT = Op.getValueType(); EVT VT = ReduceOp.getValueType(); if (!OpVT.isScalableVector() || OpVT.getVectorElementType() != MVT::i1) return SDValue(); SDValue Pg = getPredicateForVector(DAG, DL, OpVT); switch (ReduceOp.getOpcode()) { default: return SDValue(); case ISD::VECREDUCE_OR: return getPTest(DAG, VT, Pg, Op, AArch64CC::ANY_ACTIVE); case ISD::VECREDUCE_AND: { Op = DAG.getNode(ISD::XOR, DL, OpVT, Op, Pg); return getPTest(DAG, VT, Pg, Op, AArch64CC::NONE_ACTIVE); } case ISD::VECREDUCE_XOR: { SDValue ID = DAG.getTargetConstant(Intrinsic::aarch64_sve_cntp, DL, MVT::i64); SDValue Cntp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::i64, ID, Pg, Op); return DAG.getAnyExtOrTrunc(Cntp, DL, VT); } } return SDValue(); } SDValue AArch64TargetLowering::LowerReductionToSVE(unsigned Opcode, SDValue ScalarOp, SelectionDAG &DAG) const { SDLoc DL(ScalarOp); SDValue VecOp = ScalarOp.getOperand(0); EVT SrcVT = VecOp.getValueType(); if (useSVEForFixedLengthVectorVT(SrcVT, true)) { EVT ContainerVT = getContainerForFixedLengthVector(DAG, SrcVT); VecOp = convertToScalableVector(DAG, ContainerVT, VecOp); } // UADDV always returns an i64 result. EVT ResVT = (Opcode == AArch64ISD::UADDV_PRED) ? MVT::i64 : SrcVT.getVectorElementType(); EVT RdxVT = SrcVT; if (SrcVT.isFixedLengthVector() || Opcode == AArch64ISD::UADDV_PRED) RdxVT = getPackedSVEVectorVT(ResVT); SDValue Pg = getPredicateForVector(DAG, DL, SrcVT); SDValue Rdx = DAG.getNode(Opcode, DL, RdxVT, Pg, VecOp); SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Rdx, DAG.getConstant(0, DL, MVT::i64)); // The VEC_REDUCE nodes expect an element size result. if (ResVT != ScalarOp.getValueType()) Res = DAG.getAnyExtOrTrunc(Res, DL, ScalarOp.getValueType()); return Res; } SDValue AArch64TargetLowering::LowerFixedLengthVectorSelectToSVE(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); SDLoc DL(Op); EVT InVT = Op.getOperand(1).getValueType(); EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT); SDValue Op1 = convertToScalableVector(DAG, ContainerVT, Op->getOperand(1)); SDValue Op2 = convertToScalableVector(DAG, ContainerVT, Op->getOperand(2)); // Convert the mask to a predicated (NOTE: We don't need to worry about // inactive lanes since VSELECT is safe when given undefined elements). EVT MaskVT = Op.getOperand(0).getValueType(); EVT MaskContainerVT = getContainerForFixedLengthVector(DAG, MaskVT); auto Mask = convertToScalableVector(DAG, MaskContainerVT, Op.getOperand(0)); Mask = DAG.getNode(ISD::TRUNCATE, DL, MaskContainerVT.changeVectorElementType(MVT::i1), Mask); auto ScalableRes = DAG.getNode(ISD::VSELECT, DL, ContainerVT, Mask, Op1, Op2); return convertFromScalableVector(DAG, VT, ScalableRes); } SDValue AArch64TargetLowering::LowerFixedLengthVectorSetccToSVE( SDValue Op, SelectionDAG &DAG) const { SDLoc DL(Op); EVT InVT = Op.getOperand(0).getValueType(); EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT); assert(useSVEForFixedLengthVectorVT(InVT) && "Only expected to lower fixed length vector operation!"); assert(Op.getValueType() == InVT.changeTypeToInteger() && "Expected integer result of the same bit length as the inputs!"); auto Op1 = convertToScalableVector(DAG, ContainerVT, Op.getOperand(0)); auto Op2 = convertToScalableVector(DAG, ContainerVT, Op.getOperand(1)); auto Pg = getPredicateForFixedLengthVector(DAG, DL, InVT); EVT CmpVT = Pg.getValueType(); auto Cmp = DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, DL, CmpVT, {Pg, Op1, Op2, Op.getOperand(2)}); EVT PromoteVT = ContainerVT.changeTypeToInteger(); auto Promote = DAG.getBoolExtOrTrunc(Cmp, DL, PromoteVT, InVT); return convertFromScalableVector(DAG, Op.getValueType(), Promote); } SDValue AArch64TargetLowering::LowerFixedLengthBitcastToSVE(SDValue Op, SelectionDAG &DAG) const { SDLoc DL(Op); auto SrcOp = Op.getOperand(0); EVT VT = Op.getValueType(); EVT ContainerDstVT = getContainerForFixedLengthVector(DAG, VT); EVT ContainerSrcVT = getContainerForFixedLengthVector(DAG, SrcOp.getValueType()); SrcOp = convertToScalableVector(DAG, ContainerSrcVT, SrcOp); Op = DAG.getNode(ISD::BITCAST, DL, ContainerDstVT, SrcOp); return convertFromScalableVector(DAG, VT, Op); } SDValue AArch64TargetLowering::LowerFixedLengthConcatVectorsToSVE( SDValue Op, SelectionDAG &DAG) const { SDLoc DL(Op); unsigned NumOperands = Op->getNumOperands(); assert(NumOperands > 1 && isPowerOf2_32(NumOperands) && "Unexpected number of operands in CONCAT_VECTORS"); auto SrcOp1 = Op.getOperand(0); auto SrcOp2 = Op.getOperand(1); EVT VT = Op.getValueType(); EVT SrcVT = SrcOp1.getValueType(); if (NumOperands > 2) { SmallVector Ops; EVT PairVT = SrcVT.getDoubleNumVectorElementsVT(*DAG.getContext()); for (unsigned I = 0; I < NumOperands; I += 2) Ops.push_back(DAG.getNode(ISD::CONCAT_VECTORS, DL, PairVT, Op->getOperand(I), Op->getOperand(I + 1))); return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Ops); } EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT); SDValue Pg = getPredicateForFixedLengthVector(DAG, DL, SrcVT); SrcOp1 = convertToScalableVector(DAG, ContainerVT, SrcOp1); SrcOp2 = convertToScalableVector(DAG, ContainerVT, SrcOp2); Op = DAG.getNode(AArch64ISD::SPLICE, DL, ContainerVT, Pg, SrcOp1, SrcOp2); return convertFromScalableVector(DAG, VT, Op); } SDValue AArch64TargetLowering::LowerFixedLengthFPExtendToSVE(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); assert(VT.isFixedLengthVector() && "Expected fixed length vector type!"); SDLoc DL(Op); SDValue Val = Op.getOperand(0); SDValue Pg = getPredicateForVector(DAG, DL, VT); EVT SrcVT = Val.getValueType(); EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT); EVT ExtendVT = ContainerVT.changeVectorElementType( SrcVT.getVectorElementType()); Val = DAG.getNode(ISD::BITCAST, DL, SrcVT.changeTypeToInteger(), Val); Val = DAG.getNode(ISD::ANY_EXTEND, DL, VT.changeTypeToInteger(), Val); Val = convertToScalableVector(DAG, ContainerVT.changeTypeToInteger(), Val); Val = getSVESafeBitCast(ExtendVT, Val, DAG); Val = DAG.getNode(AArch64ISD::FP_EXTEND_MERGE_PASSTHRU, DL, ContainerVT, Pg, Val, DAG.getUNDEF(ContainerVT)); return convertFromScalableVector(DAG, VT, Val); } SDValue AArch64TargetLowering::LowerFixedLengthFPRoundToSVE(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); assert(VT.isFixedLengthVector() && "Expected fixed length vector type!"); SDLoc DL(Op); SDValue Val = Op.getOperand(0); EVT SrcVT = Val.getValueType(); EVT ContainerSrcVT = getContainerForFixedLengthVector(DAG, SrcVT); EVT RoundVT = ContainerSrcVT.changeVectorElementType( VT.getVectorElementType()); SDValue Pg = getPredicateForVector(DAG, DL, RoundVT); Val = convertToScalableVector(DAG, ContainerSrcVT, Val); Val = DAG.getNode(AArch64ISD::FP_ROUND_MERGE_PASSTHRU, DL, RoundVT, Pg, Val, Op.getOperand(1), DAG.getUNDEF(RoundVT)); Val = getSVESafeBitCast(ContainerSrcVT.changeTypeToInteger(), Val, DAG); Val = convertFromScalableVector(DAG, SrcVT.changeTypeToInteger(), Val); Val = DAG.getNode(ISD::TRUNCATE, DL, VT.changeTypeToInteger(), Val); return DAG.getNode(ISD::BITCAST, DL, VT, Val); } SDValue AArch64TargetLowering::LowerFixedLengthIntToFPToSVE(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); assert(VT.isFixedLengthVector() && "Expected fixed length vector type!"); bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP; unsigned Opcode = IsSigned ? AArch64ISD::SINT_TO_FP_MERGE_PASSTHRU : AArch64ISD::UINT_TO_FP_MERGE_PASSTHRU; SDLoc DL(Op); SDValue Val = Op.getOperand(0); EVT SrcVT = Val.getValueType(); EVT ContainerDstVT = getContainerForFixedLengthVector(DAG, VT); EVT ContainerSrcVT = getContainerForFixedLengthVector(DAG, SrcVT); if (ContainerSrcVT.getVectorElementType().getSizeInBits() <= ContainerDstVT.getVectorElementType().getSizeInBits()) { SDValue Pg = getPredicateForVector(DAG, DL, VT); Val = DAG.getNode(IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL, VT.changeTypeToInteger(), Val); Val = convertToScalableVector(DAG, ContainerSrcVT, Val); Val = getSVESafeBitCast(ContainerDstVT.changeTypeToInteger(), Val, DAG); // Safe to use a larger than specified operand since we just unpacked the // data, hence the upper bits are zero. Val = DAG.getNode(Opcode, DL, ContainerDstVT, Pg, Val, DAG.getUNDEF(ContainerDstVT)); return convertFromScalableVector(DAG, VT, Val); } else { EVT CvtVT = ContainerSrcVT.changeVectorElementType( ContainerDstVT.getVectorElementType()); SDValue Pg = getPredicateForVector(DAG, DL, CvtVT); Val = convertToScalableVector(DAG, ContainerSrcVT, Val); Val = DAG.getNode(Opcode, DL, CvtVT, Pg, Val, DAG.getUNDEF(CvtVT)); Val = getSVESafeBitCast(ContainerSrcVT, Val, DAG); Val = convertFromScalableVector(DAG, SrcVT, Val); Val = DAG.getNode(ISD::TRUNCATE, DL, VT.changeTypeToInteger(), Val); return DAG.getNode(ISD::BITCAST, DL, VT, Val); } } SDValue AArch64TargetLowering::LowerFixedLengthFPToIntToSVE(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); assert(VT.isFixedLengthVector() && "Expected fixed length vector type!"); bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT; unsigned Opcode = IsSigned ? AArch64ISD::FCVTZS_MERGE_PASSTHRU : AArch64ISD::FCVTZU_MERGE_PASSTHRU; SDLoc DL(Op); SDValue Val = Op.getOperand(0); EVT SrcVT = Val.getValueType(); EVT ContainerDstVT = getContainerForFixedLengthVector(DAG, VT); EVT ContainerSrcVT = getContainerForFixedLengthVector(DAG, SrcVT); if (ContainerSrcVT.getVectorElementType().getSizeInBits() <= ContainerDstVT.getVectorElementType().getSizeInBits()) { EVT CvtVT = ContainerDstVT.changeVectorElementType( ContainerSrcVT.getVectorElementType()); SDValue Pg = getPredicateForVector(DAG, DL, VT); Val = DAG.getNode(ISD::BITCAST, DL, SrcVT.changeTypeToInteger(), Val); Val = DAG.getNode(ISD::ANY_EXTEND, DL, VT, Val); Val = convertToScalableVector(DAG, ContainerSrcVT, Val); Val = getSVESafeBitCast(CvtVT, Val, DAG); Val = DAG.getNode(Opcode, DL, ContainerDstVT, Pg, Val, DAG.getUNDEF(ContainerDstVT)); return convertFromScalableVector(DAG, VT, Val); } else { EVT CvtVT = ContainerSrcVT.changeTypeToInteger(); SDValue Pg = getPredicateForVector(DAG, DL, CvtVT); // Safe to use a larger than specified result since an fp_to_int where the // result doesn't fit into the destination is undefined. Val = convertToScalableVector(DAG, ContainerSrcVT, Val); Val = DAG.getNode(Opcode, DL, CvtVT, Pg, Val, DAG.getUNDEF(CvtVT)); Val = convertFromScalableVector(DAG, SrcVT.changeTypeToInteger(), Val); return DAG.getNode(ISD::TRUNCATE, DL, VT, Val); } } SDValue AArch64TargetLowering::LowerFixedLengthVECTOR_SHUFFLEToSVE( SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); assert(VT.isFixedLengthVector() && "Expected fixed length vector type!"); auto *SVN = cast(Op.getNode()); auto ShuffleMask = SVN->getMask(); SDLoc DL(Op); SDValue Op1 = Op.getOperand(0); SDValue Op2 = Op.getOperand(1); EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT); Op1 = convertToScalableVector(DAG, ContainerVT, Op1); Op2 = convertToScalableVector(DAG, ContainerVT, Op2); bool ReverseEXT = false; unsigned Imm; if (isEXTMask(ShuffleMask, VT, ReverseEXT, Imm) && Imm == VT.getVectorNumElements() - 1) { if (ReverseEXT) std::swap(Op1, Op2); EVT ScalarTy = VT.getVectorElementType(); if ((ScalarTy == MVT::i8) || (ScalarTy == MVT::i16)) ScalarTy = MVT::i32; SDValue Scalar = DAG.getNode( ISD::EXTRACT_VECTOR_ELT, DL, ScalarTy, Op1, DAG.getConstant(VT.getVectorNumElements() - 1, DL, MVT::i64)); Op = DAG.getNode(AArch64ISD::INSR, DL, ContainerVT, Op2, Scalar); return convertFromScalableVector(DAG, VT, Op); } return SDValue(); } SDValue AArch64TargetLowering::getSVESafeBitCast(EVT VT, SDValue Op, SelectionDAG &DAG) const { SDLoc DL(Op); EVT InVT = Op.getValueType(); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); (void)TLI; assert(VT.isScalableVector() && TLI.isTypeLegal(VT) && InVT.isScalableVector() && TLI.isTypeLegal(InVT) && "Only expect to cast between legal scalable vector types!"); assert((VT.getVectorElementType() == MVT::i1) == (InVT.getVectorElementType() == MVT::i1) && "Cannot cast between data and predicate scalable vector types!"); if (InVT == VT) return Op; if (VT.getVectorElementType() == MVT::i1) return DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, VT, Op); EVT PackedVT = getPackedSVEVectorVT(VT.getVectorElementType()); EVT PackedInVT = getPackedSVEVectorVT(InVT.getVectorElementType()); // Pack input if required. if (InVT != PackedInVT) Op = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, PackedInVT, Op); Op = DAG.getNode(ISD::BITCAST, DL, PackedVT, Op); // Unpack result if required. if (VT != PackedVT) Op = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, VT, Op); return Op; } bool AArch64TargetLowering::isAllActivePredicate(SDValue N) const { return ::isAllActivePredicate(N); } EVT AArch64TargetLowering::getPromotedVTForPredicate(EVT VT) const { return ::getPromotedVTForPredicate(VT); } bool AArch64TargetLowering::SimplifyDemandedBitsForTargetNode( SDValue Op, const APInt &OriginalDemandedBits, const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth) const { unsigned Opc = Op.getOpcode(); switch (Opc) { case AArch64ISD::VSHL: { // Match (VSHL (VLSHR Val X) X) SDValue ShiftL = Op; SDValue ShiftR = Op->getOperand(0); if (ShiftR->getOpcode() != AArch64ISD::VLSHR) return false; if (!ShiftL.hasOneUse() || !ShiftR.hasOneUse()) return false; unsigned ShiftLBits = ShiftL->getConstantOperandVal(1); unsigned ShiftRBits = ShiftR->getConstantOperandVal(1); // Other cases can be handled as well, but this is not // implemented. if (ShiftRBits != ShiftLBits) return false; unsigned ScalarSize = Op.getScalarValueSizeInBits(); assert(ScalarSize > ShiftLBits && "Invalid shift imm"); APInt ZeroBits = APInt::getLowBitsSet(ScalarSize, ShiftLBits); APInt UnusedBits = ~OriginalDemandedBits; if ((ZeroBits & UnusedBits) != ZeroBits) return false; // All bits that are zeroed by (VSHL (VLSHR Val X) X) are not // used - simplify to just Val. return TLO.CombineTo(Op, ShiftR->getOperand(0)); } } return TargetLowering::SimplifyDemandedBitsForTargetNode( Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth); } bool AArch64TargetLowering::isConstantUnsignedBitfieldExtactLegal( unsigned Opc, LLT Ty1, LLT Ty2) const { return Ty1 == Ty2 && (Ty1 == LLT::scalar(32) || Ty1 == LLT::scalar(64)); } diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp index 08e4a119127c..edf4d06d4d59 100644 --- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp @@ -1,1315 +1,1272 @@ //===- AArch64LegalizerInfo.cpp ----------------------------------*- C++ -*-==// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// \file /// This file implements the targeting of the Machinelegalizer class for /// AArch64. /// \todo This should be generated by TableGen. //===----------------------------------------------------------------------===// #include "AArch64LegalizerInfo.h" #include "AArch64RegisterBankInfo.h" #include "AArch64Subtarget.h" #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h" #include "llvm/CodeGen/GlobalISel/LegalizerInfo.h" #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" #include "llvm/CodeGen/GlobalISel/Utils.h" #include "llvm/CodeGen/MachineInstr.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/TargetOpcodes.h" #include "llvm/CodeGen/ValueTypes.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/IntrinsicsAArch64.h" #include "llvm/IR/Type.h" #include "llvm/Support/MathExtras.h" #include #define DEBUG_TYPE "aarch64-legalinfo" using namespace llvm; using namespace LegalizeActions; using namespace LegalizeMutations; using namespace LegalityPredicates; AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST) : ST(&ST) { using namespace TargetOpcode; const LLT p0 = LLT::pointer(0, 64); const LLT s1 = LLT::scalar(1); const LLT s8 = LLT::scalar(8); const LLT s16 = LLT::scalar(16); const LLT s32 = LLT::scalar(32); const LLT s64 = LLT::scalar(64); const LLT s128 = LLT::scalar(128); const LLT s256 = LLT::scalar(256); const LLT v16s8 = LLT::fixed_vector(16, 8); const LLT v8s8 = LLT::fixed_vector(8, 8); const LLT v4s8 = LLT::fixed_vector(4, 8); const LLT v8s16 = LLT::fixed_vector(8, 16); const LLT v4s16 = LLT::fixed_vector(4, 16); const LLT v2s16 = LLT::fixed_vector(2, 16); const LLT v2s32 = LLT::fixed_vector(2, 32); const LLT v4s32 = LLT::fixed_vector(4, 32); const LLT v2s64 = LLT::fixed_vector(2, 64); const LLT v2p0 = LLT::fixed_vector(2, p0); std::initializer_list PackedVectorAllTypeList = {/* Begin 128bit types */ v16s8, v8s16, v4s32, v2s64, v2p0, /* End 128bit types */ /* Begin 64bit types */ v8s8, v4s16, v2s32}; const TargetMachine &TM = ST.getTargetLowering()->getTargetMachine(); // FIXME: support subtargets which have neon/fp-armv8 disabled. if (!ST.hasNEON() || !ST.hasFPARMv8()) { getLegacyLegalizerInfo().computeTables(); return; } // Some instructions only support s16 if the subtarget has full 16-bit FP // support. const bool HasFP16 = ST.hasFullFP16(); const LLT &MinFPScalar = HasFP16 ? s16 : s32; getActionDefinitionsBuilder({G_IMPLICIT_DEF, G_FREEZE}) .legalFor({p0, s1, s8, s16, s32, s64}) .legalFor(PackedVectorAllTypeList) .clampScalar(0, s8, s64) .widenScalarToNextPow2(0, 8) .fewerElementsIf( [=](const LegalityQuery &Query) { return Query.Types[0].isVector() && (Query.Types[0].getElementType() != s64 || Query.Types[0].getNumElements() != 2); }, [=](const LegalityQuery &Query) { LLT EltTy = Query.Types[0].getElementType(); if (EltTy == s64) return std::make_pair(0, LLT::fixed_vector(2, 64)); return std::make_pair(0, EltTy); }); getActionDefinitionsBuilder(G_PHI).legalFor({p0, s16, s32, s64}) .legalFor(PackedVectorAllTypeList) .clampScalar(0, s16, s64) .widenScalarToNextPow2(0); getActionDefinitionsBuilder(G_BSWAP) .legalFor({s32, s64, v4s32, v2s32, v2s64}) .clampScalar(0, s32, s64) - .widenScalarToNextPow2(0) - .customIf(typeIs(0, v2s16)); // custom lower as G_REV32 + G_LSHR + .widenScalarToNextPow2(0); getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR}) .legalFor({s32, s64, v2s32, v4s32, v4s16, v8s16, v16s8, v8s8}) .scalarizeIf( [=](const LegalityQuery &Query) { return Query.Opcode == G_MUL && Query.Types[0] == v2s64; }, 0) .legalFor({v2s64}) .clampScalar(0, s32, s64) .widenScalarToNextPow2(0) .clampNumElements(0, v2s32, v4s32) .clampNumElements(0, v2s64, v2s64) .moreElementsToNextPow2(0); getActionDefinitionsBuilder({G_SHL, G_ASHR, G_LSHR}) .customIf([=](const LegalityQuery &Query) { const auto &SrcTy = Query.Types[0]; const auto &AmtTy = Query.Types[1]; return !SrcTy.isVector() && SrcTy.getSizeInBits() == 32 && AmtTy.getSizeInBits() == 32; }) .legalFor({ {s32, s32}, {s32, s64}, {s64, s64}, {v8s8, v8s8}, {v16s8, v16s8}, {v4s16, v4s16}, {v8s16, v8s16}, {v2s32, v2s32}, {v4s32, v4s32}, {v2s64, v2s64}, }) .widenScalarToNextPow2(0) .clampScalar(1, s32, s64) .clampScalar(0, s32, s64) .clampNumElements(0, v2s32, v4s32) .clampNumElements(0, v2s64, v2s64) .moreElementsToNextPow2(0) .minScalarSameAs(1, 0); getActionDefinitionsBuilder(G_PTR_ADD) .legalFor({{p0, s64}, {v2p0, v2s64}}) .clampScalar(1, s64, s64); getActionDefinitionsBuilder(G_PTRMASK).legalFor({{p0, s64}}); getActionDefinitionsBuilder({G_SDIV, G_UDIV}) .legalFor({s32, s64}) .libcallFor({s128}) .clampScalar(0, s32, s64) .widenScalarToNextPow2(0) .scalarize(0); getActionDefinitionsBuilder({G_SREM, G_UREM, G_SDIVREM, G_UDIVREM}) .lowerFor({s1, s8, s16, s32, s64}); getActionDefinitionsBuilder({G_SMULO, G_UMULO}).lowerFor({{s64, s1}}); getActionDefinitionsBuilder({G_SMULH, G_UMULH}).legalFor({s32, s64}); getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX}) .legalFor({v8s8, v16s8, v4s16, v8s16, v2s32, v4s32}) .clampNumElements(0, v8s8, v16s8) .clampNumElements(0, v4s16, v8s16) .clampNumElements(0, v2s32, v4s32) // FIXME: This sholdn't be needed as v2s64 types are going to // be expanded anyway, but G_ICMP doesn't support splitting vectors yet .clampNumElements(0, v2s64, v2s64) .lower(); getActionDefinitionsBuilder( {G_SADDE, G_SSUBE, G_UADDE, G_USUBE, G_SADDO, G_SSUBO, G_UADDO, G_USUBO}) .legalFor({{s32, s1}, {s64, s1}}) .clampScalar(0, s32, s64) .widenScalarToNextPow2(0); getActionDefinitionsBuilder({G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FNEG}) .legalFor({s32, s64, v2s64, v4s32, v2s32}) .clampNumElements(0, v2s32, v4s32) .clampNumElements(0, v2s64, v2s64); getActionDefinitionsBuilder(G_FREM).libcallFor({s32, s64}); getActionDefinitionsBuilder({G_FCEIL, G_FABS, G_FSQRT, G_FFLOOR, G_FRINT, G_FMA, G_INTRINSIC_TRUNC, G_INTRINSIC_ROUND, G_FNEARBYINT, G_INTRINSIC_LRINT}) // If we don't have full FP16 support, then scalarize the elements of // vectors containing fp16 types. .fewerElementsIf( [=, &ST](const LegalityQuery &Query) { const auto &Ty = Query.Types[0]; return Ty.isVector() && Ty.getElementType() == s16 && !ST.hasFullFP16(); }, [=](const LegalityQuery &Query) { return std::make_pair(0, s16); }) // If we don't have full FP16 support, then widen s16 to s32 if we // encounter it. .widenScalarIf( [=, &ST](const LegalityQuery &Query) { return Query.Types[0] == s16 && !ST.hasFullFP16(); }, [=](const LegalityQuery &Query) { return std::make_pair(0, s32); }) .legalFor({s16, s32, s64, v2s32, v4s32, v2s64, v2s16, v4s16, v8s16}); getActionDefinitionsBuilder( {G_FCOS, G_FSIN, G_FLOG10, G_FLOG, G_FLOG2, G_FEXP, G_FEXP2, G_FPOW}) // We need a call for these, so we always need to scalarize. .scalarize(0) // Regardless of FP16 support, widen 16-bit elements to 32-bits. .minScalar(0, s32) .libcallFor({s32, s64, v2s32, v4s32, v2s64}); getActionDefinitionsBuilder(G_INSERT) .unsupportedIf([=](const LegalityQuery &Query) { return Query.Types[0].getSizeInBits() <= Query.Types[1].getSizeInBits(); }) .legalIf([=](const LegalityQuery &Query) { const LLT &Ty0 = Query.Types[0]; const LLT &Ty1 = Query.Types[1]; if (Ty0 != s32 && Ty0 != s64 && Ty0 != p0) return false; return isPowerOf2_32(Ty1.getSizeInBits()) && (Ty1.getSizeInBits() == 1 || Ty1.getSizeInBits() >= 8); }) .clampScalar(0, s32, s64) .widenScalarToNextPow2(0) .maxScalarIf(typeInSet(0, {s32}), 1, s16) .maxScalarIf(typeInSet(0, {s64}), 1, s32) .widenScalarToNextPow2(1); getActionDefinitionsBuilder(G_EXTRACT) .unsupportedIf([=](const LegalityQuery &Query) { return Query.Types[0].getSizeInBits() >= Query.Types[1].getSizeInBits(); }) .legalIf([=](const LegalityQuery &Query) { const LLT &Ty0 = Query.Types[0]; const LLT &Ty1 = Query.Types[1]; if (Ty1 != s32 && Ty1 != s64 && Ty1 != s128) return false; if (Ty1 == p0) return true; return isPowerOf2_32(Ty0.getSizeInBits()) && (Ty0.getSizeInBits() == 1 || Ty0.getSizeInBits() >= 8); }) .clampScalar(1, s32, s128) .widenScalarToNextPow2(1) .maxScalarIf(typeInSet(1, {s32}), 0, s16) .maxScalarIf(typeInSet(1, {s64}), 0, s32) .widenScalarToNextPow2(0); getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD}) .lowerIf(atomicOrderingAtLeastOrStrongerThan(0, AtomicOrdering::Unordered)) .legalForTypesWithMemDesc({{s32, p0, s8, 8}, {s32, p0, s16, 8}, {s32, p0, s32, 8}, {s64, p0, s8, 2}, {s64, p0, s16, 2}, {s64, p0, s32, 4}, {s64, p0, s64, 8}, {p0, p0, s64, 8}, {v2s32, p0, s64, 8}}) .clampScalar(0, s32, s64) .widenScalarToNextPow2(0) // TODO: We could support sum-of-pow2's but the lowering code doesn't know // how to do that yet. .unsupportedIfMemSizeNotPow2() // Lower anything left over into G_*EXT and G_LOAD .lower(); auto IsPtrVecPred = [=](const LegalityQuery &Query) { const LLT &ValTy = Query.Types[0]; if (!ValTy.isVector()) return false; const LLT EltTy = ValTy.getElementType(); return EltTy.isPointer() && EltTy.getAddressSpace() == 0; }; getActionDefinitionsBuilder(G_LOAD) .legalForTypesWithMemDesc({{s8, p0, s8, 8}, {s16, p0, s16, 8}, {s32, p0, s32, 8}, {s64, p0, s64, 8}, {p0, p0, s64, 8}, {s128, p0, s128, 8}, {v8s8, p0, s64, 8}, {v16s8, p0, s128, 8}, {v4s16, p0, s64, 8}, {v8s16, p0, s128, 8}, {v2s32, p0, s64, 8}, {v4s32, p0, s128, 8}, {v2s64, p0, s128, 8}}) // These extends are also legal .legalForTypesWithMemDesc({{s32, p0, s8, 8}, {s32, p0, s16, 8}}) .clampScalar(0, s8, s64) .lowerIfMemSizeNotPow2() .widenScalarToNextPow2(0) .narrowScalarIf([=](const LegalityQuery &Query) { // Clamp extending load results to 32-bits. return Query.Types[0].isScalar() && Query.Types[0] != Query.MMODescrs[0].MemoryTy && Query.Types[0].getSizeInBits() > 32; }, changeTo(0, s32)) // Lower any any-extending loads left into G_ANYEXT and G_LOAD .lowerIf([=](const LegalityQuery &Query) { return Query.Types[0] != Query.MMODescrs[0].MemoryTy; }) .clampMaxNumElements(0, s8, 16) .clampMaxNumElements(0, s16, 8) .clampMaxNumElements(0, s32, 4) .clampMaxNumElements(0, s64, 2) .customIf(IsPtrVecPred) .scalarizeIf(typeIs(0, v2s16), 0); getActionDefinitionsBuilder(G_STORE) .legalForTypesWithMemDesc({{s8, p0, s8, 8}, {s16, p0, s8, 8}, // truncstorei8 from s16 {s32, p0, s8, 8}, // truncstorei8 from s32 {s64, p0, s8, 8}, // truncstorei8 from s64 {s16, p0, s16, 8}, {s32, p0, s16, 8}, // truncstorei16 from s32 {s64, p0, s16, 8}, // truncstorei16 from s64 {s32, p0, s8, 8}, {s32, p0, s16, 8}, {s32, p0, s32, 8}, {s64, p0, s64, 8}, {s64, p0, s32, 8}, // truncstorei32 from s64 {p0, p0, s64, 8}, {s128, p0, s128, 8}, {v16s8, p0, s128, 8}, {v8s8, p0, s64, 8}, {v4s16, p0, s64, 8}, {v8s16, p0, s128, 8}, {v2s32, p0, s64, 8}, {v4s32, p0, s128, 8}, {v2s64, p0, s128, 8}}) .clampScalar(0, s8, s64) .lowerIf([=](const LegalityQuery &Query) { return Query.Types[0].isScalar() && Query.Types[0] != Query.MMODescrs[0].MemoryTy; }) // Maximum: sN * k = 128 .clampMaxNumElements(0, s8, 16) .clampMaxNumElements(0, s16, 8) .clampMaxNumElements(0, s32, 4) .clampMaxNumElements(0, s64, 2) .lowerIfMemSizeNotPow2() .customIf(IsPtrVecPred) .scalarizeIf(typeIs(0, v2s16), 0); // Constants getActionDefinitionsBuilder(G_CONSTANT) .legalFor({p0, s8, s16, s32, s64}) .clampScalar(0, s8, s64) .widenScalarToNextPow2(0); getActionDefinitionsBuilder(G_FCONSTANT) .legalIf([=](const LegalityQuery &Query) { const auto &Ty = Query.Types[0]; if (HasFP16 && Ty == s16) return true; return Ty == s32 || Ty == s64 || Ty == s128; }) .clampScalar(0, MinFPScalar, s128); getActionDefinitionsBuilder({G_ICMP, G_FCMP}) .legalFor({{s32, s32}, {s32, s64}, {s32, p0}, {v4s32, v4s32}, {v2s32, v2s32}, {v2s64, v2s64}, {v2s64, v2p0}, {v4s16, v4s16}, {v8s16, v8s16}, {v8s8, v8s8}, {v16s8, v16s8}}) .clampScalar(1, s32, s64) .clampScalar(0, s32, s32) .minScalarEltSameAsIf( [=](const LegalityQuery &Query) { const LLT &Ty = Query.Types[0]; const LLT &SrcTy = Query.Types[1]; return Ty.isVector() && !SrcTy.getElementType().isPointer() && Ty.getElementType() != SrcTy.getElementType(); }, 0, 1) .minScalarOrEltIf( [=](const LegalityQuery &Query) { return Query.Types[1] == v2s16; }, 1, s32) .minScalarOrEltIf( [=](const LegalityQuery &Query) { return Query.Types[1] == v2p0; }, 0, s64) .widenScalarOrEltToNextPow2(1) .clampNumElements(0, v2s32, v4s32); // Extensions auto ExtLegalFunc = [=](const LegalityQuery &Query) { unsigned DstSize = Query.Types[0].getSizeInBits(); if (DstSize == 128 && !Query.Types[0].isVector()) return false; // Extending to a scalar s128 needs narrowing. // Make sure that we have something that will fit in a register, and // make sure it's a power of 2. if (DstSize < 8 || DstSize > 128 || !isPowerOf2_32(DstSize)) return false; const LLT &SrcTy = Query.Types[1]; // Special case for s1. if (SrcTy == s1) return true; // Make sure we fit in a register otherwise. Don't bother checking that // the source type is below 128 bits. We shouldn't be allowing anything // through which is wider than the destination in the first place. unsigned SrcSize = SrcTy.getSizeInBits(); if (SrcSize < 8 || !isPowerOf2_32(SrcSize)) return false; return true; }; getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT}) .legalIf(ExtLegalFunc) .clampScalar(0, s64, s64); // Just for s128, others are handled above. getActionDefinitionsBuilder(G_TRUNC) .minScalarOrEltIf( [=](const LegalityQuery &Query) { return Query.Types[0].isVector(); }, 0, s8) .customIf([=](const LegalityQuery &Query) { LLT DstTy = Query.Types[0]; LLT SrcTy = Query.Types[1]; return DstTy == v8s8 && SrcTy.getSizeInBits() > 128; }) .alwaysLegal(); getActionDefinitionsBuilder(G_SEXT_INREG).legalFor({s32, s64}).lower(); // FP conversions getActionDefinitionsBuilder(G_FPTRUNC) .legalFor( {{s16, s32}, {s16, s64}, {s32, s64}, {v4s16, v4s32}, {v2s32, v2s64}}) .clampMaxNumElements(0, s32, 2); getActionDefinitionsBuilder(G_FPEXT) .legalFor( {{s32, s16}, {s64, s16}, {s64, s32}, {v4s32, v4s16}, {v2s64, v2s32}}) .clampMaxNumElements(0, s64, 2); // Conversions getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI}) .legalForCartesianProduct({s32, s64, v2s64, v4s32, v2s32}) .clampScalar(0, s32, s64) .widenScalarToNextPow2(0) .clampScalar(1, s32, s64) .widenScalarToNextPow2(1); getActionDefinitionsBuilder({G_SITOFP, G_UITOFP}) .legalForCartesianProduct({s32, s64, v2s64, v4s32, v2s32}) .clampScalar(1, s32, s64) .minScalarSameAs(1, 0) .clampScalar(0, s32, s64) .widenScalarToNextPow2(0); // Control-flow getActionDefinitionsBuilder(G_BRCOND).legalFor({s1, s8, s16, s32}); getActionDefinitionsBuilder(G_BRINDIRECT).legalFor({p0}); getActionDefinitionsBuilder(G_SELECT) .legalFor({{s32, s1}, {s64, s1}, {p0, s1}}) .clampScalar(0, s32, s64) .widenScalarToNextPow2(0) .minScalarEltSameAsIf(all(isVector(0), isVector(1)), 1, 0) .lowerIf(isVector(0)); // Pointer-handling getActionDefinitionsBuilder(G_FRAME_INDEX).legalFor({p0}); if (TM.getCodeModel() == CodeModel::Small) getActionDefinitionsBuilder(G_GLOBAL_VALUE).custom(); else getActionDefinitionsBuilder(G_GLOBAL_VALUE).legalFor({p0}); getActionDefinitionsBuilder(G_PTRTOINT) .legalForCartesianProduct({s1, s8, s16, s32, s64}, {p0}) .maxScalar(0, s64) .widenScalarToNextPow2(0, /*Min*/ 8); getActionDefinitionsBuilder(G_INTTOPTR) .unsupportedIf([&](const LegalityQuery &Query) { return Query.Types[0].getSizeInBits() != Query.Types[1].getSizeInBits(); }) .legalFor({{p0, s64}, {v2p0, v2s64}}); // Casts for 32 and 64-bit width type are just copies. // Same for 128-bit width type, except they are on the FPR bank. getActionDefinitionsBuilder(G_BITCAST) // FIXME: This is wrong since G_BITCAST is not allowed to change the // number of bits but it's what the previous code described and fixing // it breaks tests. .legalForCartesianProduct({s1, s8, s16, s32, s64, s128, v16s8, v8s8, v4s8, v8s16, v4s16, v2s16, v4s32, v2s32, v2s64, v2p0}); getActionDefinitionsBuilder(G_VASTART).legalFor({p0}); // va_list must be a pointer, but most sized types are pretty easy to handle // as the destination. getActionDefinitionsBuilder(G_VAARG) .customForCartesianProduct({s8, s16, s32, s64, p0}, {p0}) .clampScalar(0, s8, s64) .widenScalarToNextPow2(0, /*Min*/ 8); getActionDefinitionsBuilder(G_ATOMIC_CMPXCHG_WITH_SUCCESS) .lowerIf( all(typeInSet(0, {s8, s16, s32, s64, s128}), typeIs(1, s1), typeIs(2, p0))); getActionDefinitionsBuilder(G_ATOMIC_CMPXCHG) .customIf([](const LegalityQuery &Query) { return Query.Types[0].getSizeInBits() == 128; }) .clampScalar(0, s32, s64) .legalIf(all(typeInSet(0, {s32, s64}), typeIs(1, p0))); getActionDefinitionsBuilder( {G_ATOMICRMW_XCHG, G_ATOMICRMW_ADD, G_ATOMICRMW_SUB, G_ATOMICRMW_AND, G_ATOMICRMW_OR, G_ATOMICRMW_XOR, G_ATOMICRMW_MIN, G_ATOMICRMW_MAX, G_ATOMICRMW_UMIN, G_ATOMICRMW_UMAX}) .clampScalar(0, s32, s64) .legalIf(all(typeInSet(0, {s32, s64}), typeIs(1, p0))); getActionDefinitionsBuilder(G_BLOCK_ADDR).legalFor({p0}); // Merge/Unmerge for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) { unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1; unsigned LitTyIdx = Op == G_MERGE_VALUES ? 1 : 0; auto notValidElt = [](const LegalityQuery &Query, unsigned TypeIdx) { const LLT &Ty = Query.Types[TypeIdx]; if (Ty.isVector()) { const LLT &EltTy = Ty.getElementType(); if (EltTy.getSizeInBits() < 8 || EltTy.getSizeInBits() > 64) return true; if (!isPowerOf2_32(EltTy.getSizeInBits())) return true; } return false; }; // FIXME: This rule is horrible, but specifies the same as what we had // before with the particularly strange definitions removed (e.g. // s8 = G_MERGE_VALUES s32, s32). // Part of the complexity comes from these ops being extremely flexible. For // example, you can build/decompose vectors with it, concatenate vectors, // etc. and in addition to this you can also bitcast with it at the same // time. We've been considering breaking it up into multiple ops to make it // more manageable throughout the backend. getActionDefinitionsBuilder(Op) // Break up vectors with weird elements into scalars .fewerElementsIf( [=](const LegalityQuery &Query) { return notValidElt(Query, 0); }, scalarize(0)) .fewerElementsIf( [=](const LegalityQuery &Query) { return notValidElt(Query, 1); }, scalarize(1)) // Clamp the big scalar to s8-s128 and make it a power of 2. .clampScalar(BigTyIdx, s8, s128) .widenScalarIf( [=](const LegalityQuery &Query) { const LLT &Ty = Query.Types[BigTyIdx]; return !isPowerOf2_32(Ty.getSizeInBits()) && Ty.getSizeInBits() % 64 != 0; }, [=](const LegalityQuery &Query) { // Pick the next power of 2, or a multiple of 64 over 128. // Whichever is smaller. const LLT &Ty = Query.Types[BigTyIdx]; unsigned NewSizeInBits = 1 << Log2_32_Ceil(Ty.getSizeInBits() + 1); if (NewSizeInBits >= 256) { unsigned RoundedTo = alignTo<64>(Ty.getSizeInBits() + 1); if (RoundedTo < NewSizeInBits) NewSizeInBits = RoundedTo; } return std::make_pair(BigTyIdx, LLT::scalar(NewSizeInBits)); }) // Clamp the little scalar to s8-s256 and make it a power of 2. It's not // worth considering the multiples of 64 since 2*192 and 2*384 are not // valid. .clampScalar(LitTyIdx, s8, s256) .widenScalarToNextPow2(LitTyIdx, /*Min*/ 8) // So at this point, we have s8, s16, s32, s64, s128, s192, s256, s384, // s512, , , , or . // At this point it's simple enough to accept the legal types. .legalIf([=](const LegalityQuery &Query) { const LLT &BigTy = Query.Types[BigTyIdx]; const LLT &LitTy = Query.Types[LitTyIdx]; if (BigTy.isVector() && BigTy.getSizeInBits() < 32) return false; if (LitTy.isVector() && LitTy.getSizeInBits() < 32) return false; return BigTy.getSizeInBits() % LitTy.getSizeInBits() == 0; }) // Any vectors left are the wrong size. Scalarize them. .scalarize(0) .scalarize(1); } getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT) .unsupportedIf([=](const LegalityQuery &Query) { const LLT &EltTy = Query.Types[1].getElementType(); return Query.Types[0] != EltTy; }) .minScalar(2, s64) .legalIf([=](const LegalityQuery &Query) { const LLT &VecTy = Query.Types[1]; return VecTy == v2s16 || VecTy == v4s16 || VecTy == v8s16 || VecTy == v4s32 || VecTy == v2s64 || VecTy == v2s32 || VecTy == v16s8 || VecTy == v2s32 || VecTy == v2p0; }) .minScalarOrEltIf( [=](const LegalityQuery &Query) { // We want to promote to to if that wouldn't // cause the total vec size to be > 128b. return Query.Types[1].getNumElements() <= 2; }, 0, s64) .minScalarOrEltIf( [=](const LegalityQuery &Query) { return Query.Types[1].getNumElements() <= 4; }, 0, s32) .minScalarOrEltIf( [=](const LegalityQuery &Query) { return Query.Types[1].getNumElements() <= 8; }, 0, s16) .minScalarOrEltIf( [=](const LegalityQuery &Query) { return Query.Types[1].getNumElements() <= 16; }, 0, s8) .minScalarOrElt(0, s8) // Worst case, we need at least s8. .clampMaxNumElements(1, s64, 2) .clampMaxNumElements(1, s32, 4) .clampMaxNumElements(1, s16, 8) .clampMaxNumElements(1, p0, 2); getActionDefinitionsBuilder(G_INSERT_VECTOR_ELT) .legalIf(typeInSet(0, {v8s16, v2s32, v4s32, v2s64})); getActionDefinitionsBuilder(G_BUILD_VECTOR) .legalFor({{v8s8, s8}, {v16s8, s8}, {v2s16, s16}, {v4s16, s16}, {v8s16, s16}, {v2s32, s32}, {v4s32, s32}, {v2p0, p0}, {v2s64, s64}}) .clampNumElements(0, v4s32, v4s32) .clampNumElements(0, v2s64, v2s64) .minScalarSameAs(1, 0); getActionDefinitionsBuilder(G_BUILD_VECTOR_TRUNC).lower(); getActionDefinitionsBuilder(G_CTLZ) .legalForCartesianProduct( {s32, s64, v8s8, v16s8, v4s16, v8s16, v2s32, v4s32}) .scalarize(1); getActionDefinitionsBuilder(G_CTLZ_ZERO_UNDEF).lower(); // TODO: Custom lowering for v2s32, v4s32, v2s64. getActionDefinitionsBuilder(G_BITREVERSE).legalFor({s32, s64, v8s8, v16s8}); getActionDefinitionsBuilder(G_CTTZ_ZERO_UNDEF).lower(); // TODO: Handle vector types. getActionDefinitionsBuilder(G_CTTZ) .clampScalar(0, s32, s64) .scalarSameSizeAs(1, 0) .customFor({s32, s64}); getActionDefinitionsBuilder(G_SHUFFLE_VECTOR) .legalIf([=](const LegalityQuery &Query) { const LLT &DstTy = Query.Types[0]; const LLT &SrcTy = Query.Types[1]; // For now just support the TBL2 variant which needs the source vectors // to be the same size as the dest. if (DstTy != SrcTy) return false; for (auto &Ty : {v2s32, v4s32, v2s64, v2p0, v16s8, v8s16}) { if (DstTy == Ty) return true; } return false; }) // G_SHUFFLE_VECTOR can have scalar sources (from 1 x s vectors), we // just want those lowered into G_BUILD_VECTOR .lowerIf([=](const LegalityQuery &Query) { return !Query.Types[1].isVector(); }) .moreElementsToNextPow2(0) .clampNumElements(0, v4s32, v4s32) .clampNumElements(0, v2s64, v2s64); getActionDefinitionsBuilder(G_CONCAT_VECTORS) .legalFor({{v4s32, v2s32}, {v8s16, v4s16}}); getActionDefinitionsBuilder(G_JUMP_TABLE).legalFor({{p0}, {s64}}); getActionDefinitionsBuilder(G_BRJT).legalIf([=](const LegalityQuery &Query) { return Query.Types[0] == p0 && Query.Types[1] == s64; }); getActionDefinitionsBuilder(G_DYN_STACKALLOC).lower(); getActionDefinitionsBuilder({G_BZERO, G_MEMCPY, G_MEMMOVE, G_MEMSET}) .libcall(); // FIXME: Legal types are only legal with NEON. getActionDefinitionsBuilder(G_ABS) .lowerIf(isScalar(0)) .legalFor(PackedVectorAllTypeList); getActionDefinitionsBuilder(G_VECREDUCE_FADD) // We only have FADDP to do reduction-like operations. Lower the rest. .legalFor({{s32, v2s32}, {s64, v2s64}}) .clampMaxNumElements(1, s64, 2) .clampMaxNumElements(1, s32, 2) .lower(); getActionDefinitionsBuilder(G_VECREDUCE_ADD) .legalFor( {{s8, v16s8}, {s16, v8s16}, {s32, v4s32}, {s32, v2s32}, {s64, v2s64}}) .clampMaxNumElements(1, s64, 2) .clampMaxNumElements(1, s32, 4) .lower(); getActionDefinitionsBuilder({G_UADDSAT, G_USUBSAT}) .lowerIf([=](const LegalityQuery &Q) { return Q.Types[0].isScalar(); }); getActionDefinitionsBuilder({G_FSHL, G_FSHR}).lower(); getActionDefinitionsBuilder(G_ROTR) .legalFor({{s32, s64}, {s64, s64}}) .customIf([=](const LegalityQuery &Q) { return Q.Types[0].isScalar() && Q.Types[1].getScalarSizeInBits() < 64; }) .lower(); getActionDefinitionsBuilder(G_ROTL).lower(); getActionDefinitionsBuilder({G_SBFX, G_UBFX}) .customFor({{s32, s32}, {s64, s64}}); // TODO: Custom legalization for s128 // TODO: Use generic lowering when custom lowering is not possible. auto always = [=](const LegalityQuery &Q) { return true; }; getActionDefinitionsBuilder(G_CTPOP) .legalFor({{v8s8, v8s8}, {v16s8, v16s8}}) .clampScalar(0, s32, s128) .widenScalarToNextPow2(0) .minScalarEltSameAsIf(always, 1, 0) .maxScalarEltSameAsIf(always, 1, 0) .customFor({{s32, s32}, {s64, s64}, {v2s64, v2s64}, {v2s32, v2s32}, {v4s32, v4s32}, {v4s16, v4s16}, {v8s16, v8s16}}); getLegacyLegalizerInfo().computeTables(); verify(*ST.getInstrInfo()); } bool AArch64LegalizerInfo::legalizeCustom(LegalizerHelper &Helper, MachineInstr &MI) const { MachineIRBuilder &MIRBuilder = Helper.MIRBuilder; MachineRegisterInfo &MRI = *MIRBuilder.getMRI(); GISelChangeObserver &Observer = Helper.Observer; switch (MI.getOpcode()) { default: // No idea what to do. return false; case TargetOpcode::G_VAARG: return legalizeVaArg(MI, MRI, MIRBuilder); case TargetOpcode::G_LOAD: case TargetOpcode::G_STORE: return legalizeLoadStore(MI, MRI, MIRBuilder, Observer); - case TargetOpcode::G_BSWAP: - return legalizeBSwap(MI, MRI, MIRBuilder); case TargetOpcode::G_SHL: case TargetOpcode::G_ASHR: case TargetOpcode::G_LSHR: return legalizeShlAshrLshr(MI, MRI, MIRBuilder, Observer); case TargetOpcode::G_GLOBAL_VALUE: return legalizeSmallCMGlobalValue(MI, MRI, MIRBuilder, Observer); case TargetOpcode::G_TRUNC: return legalizeVectorTrunc(MI, Helper); case TargetOpcode::G_SBFX: case TargetOpcode::G_UBFX: return legalizeBitfieldExtract(MI, MRI, Helper); case TargetOpcode::G_ROTR: return legalizeRotate(MI, MRI, Helper); case TargetOpcode::G_CTPOP: return legalizeCTPOP(MI, MRI, Helper); case TargetOpcode::G_ATOMIC_CMPXCHG: return legalizeAtomicCmpxchg128(MI, MRI, Helper); case TargetOpcode::G_CTTZ: return legalizeCTTZ(MI, Helper); } llvm_unreachable("expected switch to return"); } bool AArch64LegalizerInfo::legalizeRotate(MachineInstr &MI, MachineRegisterInfo &MRI, LegalizerHelper &Helper) const { // To allow for imported patterns to match, we ensure that the rotate amount // is 64b with an extension. Register AmtReg = MI.getOperand(2).getReg(); LLT AmtTy = MRI.getType(AmtReg); (void)AmtTy; assert(AmtTy.isScalar() && "Expected a scalar rotate"); assert(AmtTy.getSizeInBits() < 64 && "Expected this rotate to be legal"); auto NewAmt = Helper.MIRBuilder.buildSExt(LLT::scalar(64), AmtReg); Helper.Observer.changingInstr(MI); MI.getOperand(2).setReg(NewAmt.getReg(0)); Helper.Observer.changedInstr(MI); return true; } static void extractParts(Register Reg, MachineRegisterInfo &MRI, MachineIRBuilder &MIRBuilder, LLT Ty, int NumParts, SmallVectorImpl &VRegs) { for (int I = 0; I < NumParts; ++I) VRegs.push_back(MRI.createGenericVirtualRegister(Ty)); MIRBuilder.buildUnmerge(VRegs, Reg); } bool AArch64LegalizerInfo::legalizeVectorTrunc( MachineInstr &MI, LegalizerHelper &Helper) const { MachineIRBuilder &MIRBuilder = Helper.MIRBuilder; MachineRegisterInfo &MRI = *MIRBuilder.getMRI(); // Similar to how operand splitting is done in SelectiondDAG, we can handle // %res(v8s8) = G_TRUNC %in(v8s32) by generating: // %inlo(<4x s32>), %inhi(<4 x s32>) = G_UNMERGE %in(<8 x s32>) // %lo16(<4 x s16>) = G_TRUNC %inlo // %hi16(<4 x s16>) = G_TRUNC %inhi // %in16(<8 x s16>) = G_CONCAT_VECTORS %lo16, %hi16 // %res(<8 x s8>) = G_TRUNC %in16 Register DstReg = MI.getOperand(0).getReg(); Register SrcReg = MI.getOperand(1).getReg(); LLT DstTy = MRI.getType(DstReg); LLT SrcTy = MRI.getType(SrcReg); assert(isPowerOf2_32(DstTy.getSizeInBits()) && isPowerOf2_32(SrcTy.getSizeInBits())); // Split input type. LLT SplitSrcTy = SrcTy.changeElementCount(SrcTy.getElementCount().divideCoefficientBy(2)); // First, split the source into two smaller vectors. SmallVector SplitSrcs; extractParts(SrcReg, MRI, MIRBuilder, SplitSrcTy, 2, SplitSrcs); // Truncate the splits into intermediate narrower elements. LLT InterTy = SplitSrcTy.changeElementSize(DstTy.getScalarSizeInBits() * 2); for (unsigned I = 0; I < SplitSrcs.size(); ++I) SplitSrcs[I] = MIRBuilder.buildTrunc(InterTy, SplitSrcs[I]).getReg(0); auto Concat = MIRBuilder.buildConcatVectors( DstTy.changeElementSize(DstTy.getScalarSizeInBits() * 2), SplitSrcs); Helper.Observer.changingInstr(MI); MI.getOperand(1).setReg(Concat.getReg(0)); Helper.Observer.changedInstr(MI); return true; } bool AArch64LegalizerInfo::legalizeSmallCMGlobalValue( MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &MIRBuilder, GISelChangeObserver &Observer) const { assert(MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE); // We do this custom legalization to convert G_GLOBAL_VALUE into target ADRP + // G_ADD_LOW instructions. // By splitting this here, we can optimize accesses in the small code model by // folding in the G_ADD_LOW into the load/store offset. auto &GlobalOp = MI.getOperand(1); const auto* GV = GlobalOp.getGlobal(); if (GV->isThreadLocal()) return true; // Don't want to modify TLS vars. auto &TM = ST->getTargetLowering()->getTargetMachine(); unsigned OpFlags = ST->ClassifyGlobalReference(GV, TM); if (OpFlags & AArch64II::MO_GOT) return true; auto Offset = GlobalOp.getOffset(); Register DstReg = MI.getOperand(0).getReg(); auto ADRP = MIRBuilder.buildInstr(AArch64::ADRP, {LLT::pointer(0, 64)}, {}) .addGlobalAddress(GV, Offset, OpFlags | AArch64II::MO_PAGE); // Set the regclass on the dest reg too. MRI.setRegClass(ADRP.getReg(0), &AArch64::GPR64RegClass); // MO_TAGGED on the page indicates a tagged address. Set the tag now. We do so // by creating a MOVK that sets bits 48-63 of the register to (global address // + 0x100000000 - PC) >> 48. The additional 0x100000000 offset here is to // prevent an incorrect tag being generated during relocation when the the // global appears before the code section. Without the offset, a global at // `0x0f00'0000'0000'1000` (i.e. at `0x1000` with tag `0xf`) that's referenced // by code at `0x2000` would result in `0x0f00'0000'0000'1000 - 0x2000 = // 0x0eff'ffff'ffff'f000`, meaning the tag would be incorrectly set to `0xe` // instead of `0xf`. // This assumes that we're in the small code model so we can assume a binary // size of <= 4GB, which makes the untagged PC relative offset positive. The // binary must also be loaded into address range [0, 2^48). Both of these // properties need to be ensured at runtime when using tagged addresses. if (OpFlags & AArch64II::MO_TAGGED) { assert(!Offset && "Should not have folded in an offset for a tagged global!"); ADRP = MIRBuilder.buildInstr(AArch64::MOVKXi, {LLT::pointer(0, 64)}, {ADRP}) .addGlobalAddress(GV, 0x100000000, AArch64II::MO_PREL | AArch64II::MO_G3) .addImm(48); MRI.setRegClass(ADRP.getReg(0), &AArch64::GPR64RegClass); } MIRBuilder.buildInstr(AArch64::G_ADD_LOW, {DstReg}, {ADRP}) .addGlobalAddress(GV, Offset, OpFlags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC); MI.eraseFromParent(); return true; } bool AArch64LegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper, MachineInstr &MI) const { return true; } bool AArch64LegalizerInfo::legalizeShlAshrLshr( MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &MIRBuilder, GISelChangeObserver &Observer) const { assert(MI.getOpcode() == TargetOpcode::G_ASHR || MI.getOpcode() == TargetOpcode::G_LSHR || MI.getOpcode() == TargetOpcode::G_SHL); // If the shift amount is a G_CONSTANT, promote it to a 64 bit type so the // imported patterns can select it later. Either way, it will be legal. Register AmtReg = MI.getOperand(2).getReg(); auto VRegAndVal = getConstantVRegValWithLookThrough(AmtReg, MRI); if (!VRegAndVal) return true; // Check the shift amount is in range for an immediate form. int64_t Amount = VRegAndVal->Value.getSExtValue(); if (Amount > 31) return true; // This will have to remain a register variant. auto ExtCst = MIRBuilder.buildConstant(LLT::scalar(64), Amount); Observer.changingInstr(MI); MI.getOperand(2).setReg(ExtCst.getReg(0)); Observer.changedInstr(MI); return true; } // FIXME: This should be removed and replaced with the generic bitcast legalize // action. bool AArch64LegalizerInfo::legalizeLoadStore( MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &MIRBuilder, GISelChangeObserver &Observer) const { assert(MI.getOpcode() == TargetOpcode::G_STORE || MI.getOpcode() == TargetOpcode::G_LOAD); // Here we just try to handle vector loads/stores where our value type might // have pointer elements, which the SelectionDAG importer can't handle. To // allow the existing patterns for s64 to fire for p0, we just try to bitcast // the value to use s64 types. // Custom legalization requires the instruction, if not deleted, must be fully // legalized. In order to allow further legalization of the inst, we create // a new instruction and erase the existing one. Register ValReg = MI.getOperand(0).getReg(); const LLT ValTy = MRI.getType(ValReg); if (!ValTy.isVector() || !ValTy.getElementType().isPointer() || ValTy.getElementType().getAddressSpace() != 0) { LLVM_DEBUG(dbgs() << "Tried to do custom legalization on wrong load/store"); return false; } unsigned PtrSize = ValTy.getElementType().getSizeInBits(); const LLT NewTy = LLT::vector(ValTy.getElementCount(), PtrSize); auto &MMO = **MI.memoperands_begin(); MMO.setType(NewTy); if (MI.getOpcode() == TargetOpcode::G_STORE) { auto Bitcast = MIRBuilder.buildBitcast(NewTy, ValReg); MIRBuilder.buildStore(Bitcast.getReg(0), MI.getOperand(1), MMO); } else { auto NewLoad = MIRBuilder.buildLoad(NewTy, MI.getOperand(1), MMO); MIRBuilder.buildBitcast(ValReg, NewLoad); } MI.eraseFromParent(); return true; } -bool AArch64LegalizerInfo::legalizeBSwap(MachineInstr &MI, - MachineRegisterInfo &MRI, - MachineIRBuilder &MIRBuilder) const { - assert(MI.getOpcode() == TargetOpcode::G_BSWAP); - - // The <2 x half> case needs special lowering because there isn't an - // instruction that does that directly. Instead, we widen to <8 x i8> - // and emit a G_REV32 followed by a G_LSHR knowing that instruction selection - // will later match them as: - // - // rev32.8b v0, v0 - // ushr.2s v0, v0, #16 - // - // We could emit those here directly, but it seems better to keep things as - // generic as possible through legalization, and avoid committing layering - // violations by legalizing & selecting here at the same time. - - Register ValReg = MI.getOperand(1).getReg(); - assert(LLT::fixed_vector(2, 16) == MRI.getType(ValReg)); - const LLT v2s32 = LLT::fixed_vector(2, 32); - const LLT v8s8 = LLT::fixed_vector(8, 8); - const LLT s32 = LLT::scalar(32); - - auto Undef = MIRBuilder.buildUndef(v8s8); - auto Insert = - MIRBuilder - .buildInstr(TargetOpcode::INSERT_SUBREG, {v8s8}, {Undef, ValReg}) - .addImm(AArch64::ssub); - auto Rev32 = MIRBuilder.buildInstr(AArch64::G_REV32, {v8s8}, {Insert}); - auto Bitcast = MIRBuilder.buildBitcast(v2s32, Rev32); - auto Amt = MIRBuilder.buildConstant(v2s32, 16); - auto UShr = - MIRBuilder.buildInstr(TargetOpcode::G_LSHR, {v2s32}, {Bitcast, Amt}); - auto Zero = MIRBuilder.buildConstant(s32, 0); - auto Extract = MIRBuilder.buildExtractVectorElement(s32, UShr, Zero); - MIRBuilder.buildBitcast({MI.getOperand(0).getReg()}, Extract); - MI.eraseFromParent(); - return true; -} - bool AArch64LegalizerInfo::legalizeVaArg(MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &MIRBuilder) const { MachineFunction &MF = MIRBuilder.getMF(); Align Alignment(MI.getOperand(2).getImm()); Register Dst = MI.getOperand(0).getReg(); Register ListPtr = MI.getOperand(1).getReg(); LLT PtrTy = MRI.getType(ListPtr); LLT IntPtrTy = LLT::scalar(PtrTy.getSizeInBits()); const unsigned PtrSize = PtrTy.getSizeInBits() / 8; const Align PtrAlign = Align(PtrSize); auto List = MIRBuilder.buildLoad( PtrTy, ListPtr, *MF.getMachineMemOperand(MachinePointerInfo(), MachineMemOperand::MOLoad, PtrTy, PtrAlign)); MachineInstrBuilder DstPtr; if (Alignment > PtrAlign) { // Realign the list to the actual required alignment. auto AlignMinus1 = MIRBuilder.buildConstant(IntPtrTy, Alignment.value() - 1); auto ListTmp = MIRBuilder.buildPtrAdd(PtrTy, List, AlignMinus1.getReg(0)); DstPtr = MIRBuilder.buildMaskLowPtrBits(PtrTy, ListTmp, Log2(Alignment)); } else DstPtr = List; LLT ValTy = MRI.getType(Dst); uint64_t ValSize = ValTy.getSizeInBits() / 8; MIRBuilder.buildLoad( Dst, DstPtr, *MF.getMachineMemOperand(MachinePointerInfo(), MachineMemOperand::MOLoad, ValTy, std::max(Alignment, PtrAlign))); auto Size = MIRBuilder.buildConstant(IntPtrTy, alignTo(ValSize, PtrAlign)); auto NewList = MIRBuilder.buildPtrAdd(PtrTy, DstPtr, Size.getReg(0)); MIRBuilder.buildStore(NewList, ListPtr, *MF.getMachineMemOperand(MachinePointerInfo(), MachineMemOperand::MOStore, PtrTy, PtrAlign)); MI.eraseFromParent(); return true; } bool AArch64LegalizerInfo::legalizeBitfieldExtract( MachineInstr &MI, MachineRegisterInfo &MRI, LegalizerHelper &Helper) const { // Only legal if we can select immediate forms. // TODO: Lower this otherwise. return getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI) && getConstantVRegValWithLookThrough(MI.getOperand(3).getReg(), MRI); } bool AArch64LegalizerInfo::legalizeCTPOP(MachineInstr &MI, MachineRegisterInfo &MRI, LegalizerHelper &Helper) const { // While there is no integer popcount instruction, it can // be more efficiently lowered to the following sequence that uses // AdvSIMD registers/instructions as long as the copies to/from // the AdvSIMD registers are cheap. // FMOV D0, X0 // copy 64-bit int to vector, high bits zero'd // CNT V0.8B, V0.8B // 8xbyte pop-counts // ADDV B0, V0.8B // sum 8xbyte pop-counts // UMOV X0, V0.B[0] // copy byte result back to integer reg // // For 128 bit vector popcounts, we lower to the following sequence: // cnt.16b v0, v0 // v8s16, v4s32, v2s64 // uaddlp.8h v0, v0 // v8s16, v4s32, v2s64 // uaddlp.4s v0, v0 // v4s32, v2s64 // uaddlp.2d v0, v0 // v2s64 // // For 64 bit vector popcounts, we lower to the following sequence: // cnt.8b v0, v0 // v4s16, v2s32 // uaddlp.4h v0, v0 // v4s16, v2s32 // uaddlp.2s v0, v0 // v2s32 if (!ST->hasNEON() || MI.getMF()->getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) return false; MachineIRBuilder &MIRBuilder = Helper.MIRBuilder; Register Dst = MI.getOperand(0).getReg(); Register Val = MI.getOperand(1).getReg(); LLT Ty = MRI.getType(Val); assert(Ty == MRI.getType(Dst) && "Expected src and dst to have the same type!"); unsigned Size = Ty.getSizeInBits(); // Pre-conditioning: widen Val up to the nearest vector type. // s32,s64,v4s16,v2s32 -> v8i8 // v8s16,v4s32,v2s64 -> v16i8 LLT VTy = Size == 128 ? LLT::fixed_vector(16, 8) : LLT::fixed_vector(8, 8); if (Ty.isScalar()) { // TODO: Handle s128. assert((Size == 32 || Size == 64) && "Expected only 32 or 64 bit scalars!"); if (Size == 32) { Val = MIRBuilder.buildZExt(LLT::scalar(64), Val).getReg(0); } } Val = MIRBuilder.buildBitcast(VTy, Val).getReg(0); // Count bits in each byte-sized lane. auto CTPOP = MIRBuilder.buildCTPOP(VTy, Val); // Sum across lanes. Register HSum = CTPOP.getReg(0); unsigned Opc; SmallVector HAddTys; if (Ty.isScalar()) { Opc = Intrinsic::aarch64_neon_uaddlv; HAddTys.push_back(LLT::scalar(32)); } else if (Ty == LLT::fixed_vector(8, 16)) { Opc = Intrinsic::aarch64_neon_uaddlp; HAddTys.push_back(LLT::fixed_vector(8, 16)); } else if (Ty == LLT::fixed_vector(4, 32)) { Opc = Intrinsic::aarch64_neon_uaddlp; HAddTys.push_back(LLT::fixed_vector(8, 16)); HAddTys.push_back(LLT::fixed_vector(4, 32)); } else if (Ty == LLT::fixed_vector(2, 64)) { Opc = Intrinsic::aarch64_neon_uaddlp; HAddTys.push_back(LLT::fixed_vector(8, 16)); HAddTys.push_back(LLT::fixed_vector(4, 32)); HAddTys.push_back(LLT::fixed_vector(2, 64)); } else if (Ty == LLT::fixed_vector(4, 16)) { Opc = Intrinsic::aarch64_neon_uaddlp; HAddTys.push_back(LLT::fixed_vector(4, 16)); } else if (Ty == LLT::fixed_vector(2, 32)) { Opc = Intrinsic::aarch64_neon_uaddlp; HAddTys.push_back(LLT::fixed_vector(4, 16)); HAddTys.push_back(LLT::fixed_vector(2, 32)); } else llvm_unreachable("unexpected vector shape"); MachineInstrBuilder UADD; for (LLT HTy : HAddTys) { UADD = MIRBuilder.buildIntrinsic(Opc, {HTy}, /*HasSideEffects =*/false) .addUse(HSum); HSum = UADD.getReg(0); } // Post-conditioning. if (Ty.isScalar() && Size == 64) MIRBuilder.buildZExt(Dst, UADD); else UADD->getOperand(0).setReg(Dst); MI.eraseFromParent(); return true; } bool AArch64LegalizerInfo::legalizeAtomicCmpxchg128( MachineInstr &MI, MachineRegisterInfo &MRI, LegalizerHelper &Helper) const { MachineIRBuilder &MIRBuilder = Helper.MIRBuilder; LLT s64 = LLT::scalar(64); auto Addr = MI.getOperand(1).getReg(); auto DesiredI = MIRBuilder.buildUnmerge({s64, s64}, MI.getOperand(2)); auto NewI = MIRBuilder.buildUnmerge({s64, s64}, MI.getOperand(3)); auto DstLo = MRI.createGenericVirtualRegister(s64); auto DstHi = MRI.createGenericVirtualRegister(s64); MachineInstrBuilder CAS; if (ST->hasLSE()) { // We have 128-bit CASP instructions taking XSeqPair registers, which are // s128. We need the merge/unmerge to bracket the expansion and pair up with // the rest of the MIR so we must reassemble the extracted registers into a // 128-bit known-regclass one with code like this: // // %in1 = REG_SEQUENCE Lo, Hi ; One for each input // %out = CASP %in1, ... // %OldLo = G_EXTRACT %out, 0 // %OldHi = G_EXTRACT %out, 64 auto Ordering = (*MI.memoperands_begin())->getMergedOrdering(); unsigned Opcode; switch (Ordering) { case AtomicOrdering::Acquire: Opcode = AArch64::CASPAX; break; case AtomicOrdering::Release: Opcode = AArch64::CASPLX; break; case AtomicOrdering::AcquireRelease: case AtomicOrdering::SequentiallyConsistent: Opcode = AArch64::CASPALX; break; default: Opcode = AArch64::CASPX; break; } LLT s128 = LLT::scalar(128); auto CASDst = MRI.createGenericVirtualRegister(s128); auto CASDesired = MRI.createGenericVirtualRegister(s128); auto CASNew = MRI.createGenericVirtualRegister(s128); MIRBuilder.buildInstr(TargetOpcode::REG_SEQUENCE, {CASDesired}, {}) .addUse(DesiredI->getOperand(0).getReg()) .addImm(AArch64::sube64) .addUse(DesiredI->getOperand(1).getReg()) .addImm(AArch64::subo64); MIRBuilder.buildInstr(TargetOpcode::REG_SEQUENCE, {CASNew}, {}) .addUse(NewI->getOperand(0).getReg()) .addImm(AArch64::sube64) .addUse(NewI->getOperand(1).getReg()) .addImm(AArch64::subo64); CAS = MIRBuilder.buildInstr(Opcode, {CASDst}, {CASDesired, CASNew, Addr}); MIRBuilder.buildExtract({DstLo}, {CASDst}, 0); MIRBuilder.buildExtract({DstHi}, {CASDst}, 64); } else { // The -O0 CMP_SWAP_128 is friendlier to generate code for because LDXP/STXP // can take arbitrary registers so it just has the normal GPR64 operands the // rest of AArch64 is expecting. auto Ordering = (*MI.memoperands_begin())->getMergedOrdering(); unsigned Opcode; switch (Ordering) { case AtomicOrdering::Acquire: Opcode = AArch64::CMP_SWAP_128_ACQUIRE; break; case AtomicOrdering::Release: Opcode = AArch64::CMP_SWAP_128_RELEASE; break; case AtomicOrdering::AcquireRelease: case AtomicOrdering::SequentiallyConsistent: Opcode = AArch64::CMP_SWAP_128; break; default: Opcode = AArch64::CMP_SWAP_128_MONOTONIC; break; } auto Scratch = MRI.createVirtualRegister(&AArch64::GPR64RegClass); CAS = MIRBuilder.buildInstr(Opcode, {DstLo, DstHi, Scratch}, {Addr, DesiredI->getOperand(0), DesiredI->getOperand(1), NewI->getOperand(0), NewI->getOperand(1)}); } CAS.cloneMemRefs(MI); constrainSelectedInstRegOperands(*CAS, *ST->getInstrInfo(), *MRI.getTargetRegisterInfo(), *ST->getRegBankInfo()); MIRBuilder.buildMerge(MI.getOperand(0), {DstLo, DstHi}); MI.eraseFromParent(); return true; } bool AArch64LegalizerInfo::legalizeCTTZ(MachineInstr &MI, LegalizerHelper &Helper) const { MachineIRBuilder &MIRBuilder = Helper.MIRBuilder; MachineRegisterInfo &MRI = *MIRBuilder.getMRI(); LLT Ty = MRI.getType(MI.getOperand(1).getReg()); auto BitReverse = MIRBuilder.buildBitReverse(Ty, MI.getOperand(1)); MIRBuilder.buildCTLZ(MI.getOperand(0).getReg(), BitReverse); MI.eraseFromParent(); return true; } diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.h b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.h index 78fc24559d71..35456d95dc2b 100644 --- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.h +++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.h @@ -1,65 +1,63 @@ //===- AArch64LegalizerInfo --------------------------------------*- C++ -*-==// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// \file /// This file declares the targeting of the Machinelegalizer class for /// AArch64. /// \todo This should be generated by TableGen. //===----------------------------------------------------------------------===// #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64MACHINELEGALIZER_H #define LLVM_LIB_TARGET_AARCH64_AARCH64MACHINELEGALIZER_H #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h" #include "llvm/CodeGen/GlobalISel/LegalizerInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" namespace llvm { class LLVMContext; class AArch64Subtarget; /// This class provides the information for the target register banks. class AArch64LegalizerInfo : public LegalizerInfo { public: AArch64LegalizerInfo(const AArch64Subtarget &ST); bool legalizeCustom(LegalizerHelper &Helper, MachineInstr &MI) const override; bool legalizeIntrinsic(LegalizerHelper &Helper, MachineInstr &MI) const override; private: - bool legalizeBSwap(MachineInstr &MI, MachineRegisterInfo &MRI, - MachineIRBuilder &MIRBuilder) const; bool legalizeVaArg(MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &MIRBuilder) const; bool legalizeLoadStore(MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &MIRBuilder, GISelChangeObserver &Observer) const; bool legalizeShlAshrLshr(MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &MIRBuilder, GISelChangeObserver &Observer) const; bool legalizeSmallCMGlobalValue(MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &MIRBuilder, GISelChangeObserver &Observer) const; bool legalizeVectorTrunc(MachineInstr &MI, LegalizerHelper &Helper) const; bool legalizeBitfieldExtract(MachineInstr &MI, MachineRegisterInfo &MRI, LegalizerHelper &Helper) const; bool legalizeRotate(MachineInstr &MI, MachineRegisterInfo &MRI, LegalizerHelper &Helper) const; bool legalizeCTPOP(MachineInstr &MI, MachineRegisterInfo &MRI, LegalizerHelper &Helper) const; bool legalizeAtomicCmpxchg128(MachineInstr &MI, MachineRegisterInfo &MRI, LegalizerHelper &Helper) const; bool legalizeCTTZ(MachineInstr &MI, LegalizerHelper &Helper) const; const AArch64Subtarget *ST; }; } // End llvm namespace. #endif diff --git a/llvm/lib/Target/X86/X86FrameLowering.cpp b/llvm/lib/Target/X86/X86FrameLowering.cpp index 4cde7971e597..86cb86b19d62 100644 --- a/llvm/lib/Target/X86/X86FrameLowering.cpp +++ b/llvm/lib/Target/X86/X86FrameLowering.cpp @@ -1,3719 +1,3723 @@ //===-- X86FrameLowering.cpp - X86 Frame Information ----------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file contains the X86 implementation of TargetFrameLowering class. // //===----------------------------------------------------------------------===// #include "X86FrameLowering.h" #include "X86InstrBuilder.h" #include "X86InstrInfo.h" #include "X86MachineFunctionInfo.h" #include "X86Subtarget.h" #include "X86TargetMachine.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/EHPersonalities.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/WinEHFuncInfo.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Function.h" #include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCObjectFileInfo.h" #include "llvm/MC/MCSymbol.h" #include "llvm/Support/Debug.h" #include "llvm/Target/TargetOptions.h" #include #define DEBUG_TYPE "x86-fl" STATISTIC(NumFrameLoopProbe, "Number of loop stack probes used in prologue"); STATISTIC(NumFrameExtraProbe, "Number of extra stack probes generated in prologue"); using namespace llvm; X86FrameLowering::X86FrameLowering(const X86Subtarget &STI, MaybeAlign StackAlignOverride) : TargetFrameLowering(StackGrowsDown, StackAlignOverride.valueOrOne(), STI.is64Bit() ? -8 : -4), STI(STI), TII(*STI.getInstrInfo()), TRI(STI.getRegisterInfo()) { // Cache a bunch of frame-related predicates for this subtarget. SlotSize = TRI->getSlotSize(); Is64Bit = STI.is64Bit(); IsLP64 = STI.isTarget64BitLP64(); // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit. Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64(); StackPtr = TRI->getStackRegister(); } bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const { return !MF.getFrameInfo().hasVarSizedObjects() && !MF.getInfo()->getHasPushSequences() && !MF.getInfo()->hasPreallocatedCall(); } /// canSimplifyCallFramePseudos - If there is a reserved call frame, the /// call frame pseudos can be simplified. Having a FP, as in the default /// implementation, is not sufficient here since we can't always use it. /// Use a more nuanced condition. bool X86FrameLowering::canSimplifyCallFramePseudos(const MachineFunction &MF) const { return hasReservedCallFrame(MF) || MF.getInfo()->hasPreallocatedCall() || (hasFP(MF) && !TRI->hasStackRealignment(MF)) || TRI->hasBasePointer(MF); } // needsFrameIndexResolution - Do we need to perform FI resolution for // this function. Normally, this is required only when the function // has any stack objects. However, FI resolution actually has another job, // not apparent from the title - it resolves callframesetup/destroy // that were not simplified earlier. // So, this is required for x86 functions that have push sequences even // when there are no stack objects. bool X86FrameLowering::needsFrameIndexResolution(const MachineFunction &MF) const { return MF.getFrameInfo().hasStackObjects() || MF.getInfo()->getHasPushSequences(); } /// hasFP - Return true if the specified function should have a dedicated frame /// pointer register. This is true if the function has variable sized allocas /// or if frame pointer elimination is disabled. bool X86FrameLowering::hasFP(const MachineFunction &MF) const { const MachineFrameInfo &MFI = MF.getFrameInfo(); return (MF.getTarget().Options.DisableFramePointerElim(MF) || TRI->hasStackRealignment(MF) || MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken() || MFI.hasOpaqueSPAdjustment() || MF.getInfo()->getForceFramePointer() || MF.getInfo()->hasPreallocatedCall() || MF.callsUnwindInit() || MF.hasEHFunclets() || MF.callsEHReturn() || MFI.hasStackMap() || MFI.hasPatchPoint() || MFI.hasCopyImplyingStackAdjustment()); } static unsigned getSUBriOpcode(bool IsLP64, int64_t Imm) { if (IsLP64) { if (isInt<8>(Imm)) return X86::SUB64ri8; return X86::SUB64ri32; } else { if (isInt<8>(Imm)) return X86::SUB32ri8; return X86::SUB32ri; } } static unsigned getADDriOpcode(bool IsLP64, int64_t Imm) { if (IsLP64) { if (isInt<8>(Imm)) return X86::ADD64ri8; return X86::ADD64ri32; } else { if (isInt<8>(Imm)) return X86::ADD32ri8; return X86::ADD32ri; } } static unsigned getSUBrrOpcode(bool IsLP64) { return IsLP64 ? X86::SUB64rr : X86::SUB32rr; } static unsigned getADDrrOpcode(bool IsLP64) { return IsLP64 ? X86::ADD64rr : X86::ADD32rr; } static unsigned getANDriOpcode(bool IsLP64, int64_t Imm) { if (IsLP64) { if (isInt<8>(Imm)) return X86::AND64ri8; return X86::AND64ri32; } if (isInt<8>(Imm)) return X86::AND32ri8; return X86::AND32ri; } static unsigned getLEArOpcode(bool IsLP64) { return IsLP64 ? X86::LEA64r : X86::LEA32r; } static bool isEAXLiveIn(MachineBasicBlock &MBB) { for (MachineBasicBlock::RegisterMaskPair RegMask : MBB.liveins()) { unsigned Reg = RegMask.PhysReg; if (Reg == X86::RAX || Reg == X86::EAX || Reg == X86::AX || Reg == X86::AH || Reg == X86::AL) return true; } return false; } /// Check if the flags need to be preserved before the terminators. /// This would be the case, if the eflags is live-in of the region /// composed by the terminators or live-out of that region, without /// being defined by a terminator. static bool flagsNeedToBePreservedBeforeTheTerminators(const MachineBasicBlock &MBB) { for (const MachineInstr &MI : MBB.terminators()) { bool BreakNext = false; for (const MachineOperand &MO : MI.operands()) { if (!MO.isReg()) continue; Register Reg = MO.getReg(); if (Reg != X86::EFLAGS) continue; // This terminator needs an eflags that is not defined // by a previous another terminator: // EFLAGS is live-in of the region composed by the terminators. if (!MO.isDef()) return true; // This terminator defines the eflags, i.e., we don't need to preserve it. // However, we still need to check this specific terminator does not // read a live-in value. BreakNext = true; } // We found a definition of the eflags, no need to preserve them. if (BreakNext) return false; } // None of the terminators use or define the eflags. // Check if they are live-out, that would imply we need to preserve them. for (const MachineBasicBlock *Succ : MBB.successors()) if (Succ->isLiveIn(X86::EFLAGS)) return true; return false; } /// emitSPUpdate - Emit a series of instructions to increment / decrement the /// stack pointer by a constant value. void X86FrameLowering::emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, const DebugLoc &DL, int64_t NumBytes, bool InEpilogue) const { bool isSub = NumBytes < 0; uint64_t Offset = isSub ? -NumBytes : NumBytes; MachineInstr::MIFlag Flag = isSub ? MachineInstr::FrameSetup : MachineInstr::FrameDestroy; uint64_t Chunk = (1LL << 31) - 1; MachineFunction &MF = *MBB.getParent(); const X86Subtarget &STI = MF.getSubtarget(); const X86TargetLowering &TLI = *STI.getTargetLowering(); const bool EmitInlineStackProbe = TLI.hasInlineStackProbe(MF); // It's ok to not take into account large chunks when probing, as the // allocation is split in smaller chunks anyway. if (EmitInlineStackProbe && !InEpilogue) { // This pseudo-instruction is going to be expanded, potentially using a // loop, by inlineStackProbe(). BuildMI(MBB, MBBI, DL, TII.get(X86::STACKALLOC_W_PROBING)).addImm(Offset); return; } else if (Offset > Chunk) { // Rather than emit a long series of instructions for large offsets, // load the offset into a register and do one sub/add unsigned Reg = 0; unsigned Rax = (unsigned)(Is64Bit ? X86::RAX : X86::EAX); if (isSub && !isEAXLiveIn(MBB)) Reg = Rax; else Reg = TRI->findDeadCallerSavedReg(MBB, MBBI); unsigned MovRIOpc = Is64Bit ? X86::MOV64ri : X86::MOV32ri; unsigned AddSubRROpc = isSub ? getSUBrrOpcode(Is64Bit) : getADDrrOpcode(Is64Bit); if (Reg) { BuildMI(MBB, MBBI, DL, TII.get(MovRIOpc), Reg) .addImm(Offset) .setMIFlag(Flag); MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(AddSubRROpc), StackPtr) .addReg(StackPtr) .addReg(Reg); MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. return; } else if (Offset > 8 * Chunk) { // If we would need more than 8 add or sub instructions (a >16GB stack // frame), it's worth spilling RAX to materialize this immediate. // pushq %rax // movabsq +-$Offset+-SlotSize, %rax // addq %rsp, %rax // xchg %rax, (%rsp) // movq (%rsp), %rsp assert(Is64Bit && "can't have 32-bit 16GB stack frame"); BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64r)) .addReg(Rax, RegState::Kill) .setMIFlag(Flag); // Subtract is not commutative, so negate the offset and always use add. // Subtract 8 less and add 8 more to account for the PUSH we just did. if (isSub) Offset = -(Offset - SlotSize); else Offset = Offset + SlotSize; BuildMI(MBB, MBBI, DL, TII.get(MovRIOpc), Rax) .addImm(Offset) .setMIFlag(Flag); MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(X86::ADD64rr), Rax) .addReg(Rax) .addReg(StackPtr); MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. // Exchange the new SP in RAX with the top of the stack. addRegOffset( BuildMI(MBB, MBBI, DL, TII.get(X86::XCHG64rm), Rax).addReg(Rax), StackPtr, false, 0); // Load new SP from the top of the stack into RSP. addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rm), StackPtr), StackPtr, false, 0); return; } } while (Offset) { uint64_t ThisVal = std::min(Offset, Chunk); if (ThisVal == SlotSize) { // Use push / pop for slot sized adjustments as a size optimization. We // need to find a dead register when using pop. unsigned Reg = isSub ? (unsigned)(Is64Bit ? X86::RAX : X86::EAX) : TRI->findDeadCallerSavedReg(MBB, MBBI); if (Reg) { unsigned Opc = isSub ? (Is64Bit ? X86::PUSH64r : X86::PUSH32r) : (Is64Bit ? X86::POP64r : X86::POP32r); BuildMI(MBB, MBBI, DL, TII.get(Opc)) .addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub)) .setMIFlag(Flag); Offset -= ThisVal; continue; } } BuildStackAdjustment(MBB, MBBI, DL, isSub ? -ThisVal : ThisVal, InEpilogue) .setMIFlag(Flag); Offset -= ThisVal; } } MachineInstrBuilder X86FrameLowering::BuildStackAdjustment( MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, int64_t Offset, bool InEpilogue) const { assert(Offset != 0 && "zero offset stack adjustment requested"); // On Atom, using LEA to adjust SP is preferred, but using it in the epilogue // is tricky. bool UseLEA; if (!InEpilogue) { // Check if inserting the prologue at the beginning // of MBB would require to use LEA operations. // We need to use LEA operations if EFLAGS is live in, because // it means an instruction will read it before it gets defined. UseLEA = STI.useLeaForSP() || MBB.isLiveIn(X86::EFLAGS); } else { // If we can use LEA for SP but we shouldn't, check that none // of the terminators uses the eflags. Otherwise we will insert // a ADD that will redefine the eflags and break the condition. // Alternatively, we could move the ADD, but this may not be possible // and is an optimization anyway. UseLEA = canUseLEAForSPInEpilogue(*MBB.getParent()); if (UseLEA && !STI.useLeaForSP()) UseLEA = flagsNeedToBePreservedBeforeTheTerminators(MBB); // If that assert breaks, that means we do not do the right thing // in canUseAsEpilogue. assert((UseLEA || !flagsNeedToBePreservedBeforeTheTerminators(MBB)) && "We shouldn't have allowed this insertion point"); } MachineInstrBuilder MI; if (UseLEA) { MI = addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(getLEArOpcode(Uses64BitFramePtr)), StackPtr), StackPtr, false, Offset); } else { bool IsSub = Offset < 0; uint64_t AbsOffset = IsSub ? -Offset : Offset; const unsigned Opc = IsSub ? getSUBriOpcode(Uses64BitFramePtr, AbsOffset) : getADDriOpcode(Uses64BitFramePtr, AbsOffset); MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) .addReg(StackPtr) .addImm(AbsOffset); MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. } return MI; } int X86FrameLowering::mergeSPUpdates(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, bool doMergeWithPrevious) const { if ((doMergeWithPrevious && MBBI == MBB.begin()) || (!doMergeWithPrevious && MBBI == MBB.end())) return 0; MachineBasicBlock::iterator PI = doMergeWithPrevious ? std::prev(MBBI) : MBBI; PI = skipDebugInstructionsBackward(PI, MBB.begin()); // It is assumed that ADD/SUB/LEA instruction is succeded by one CFI // instruction, and that there are no DBG_VALUE or other instructions between // ADD/SUB/LEA and its corresponding CFI instruction. /* TODO: Add support for the case where there are multiple CFI instructions below the ADD/SUB/LEA, e.g.: ... add cfi_def_cfa_offset cfi_offset ... */ if (doMergeWithPrevious && PI != MBB.begin() && PI->isCFIInstruction()) PI = std::prev(PI); unsigned Opc = PI->getOpcode(); int Offset = 0; if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && PI->getOperand(0).getReg() == StackPtr){ assert(PI->getOperand(1).getReg() == StackPtr); Offset = PI->getOperand(2).getImm(); } else if ((Opc == X86::LEA32r || Opc == X86::LEA64_32r) && PI->getOperand(0).getReg() == StackPtr && PI->getOperand(1).getReg() == StackPtr && PI->getOperand(2).getImm() == 1 && PI->getOperand(3).getReg() == X86::NoRegister && PI->getOperand(5).getReg() == X86::NoRegister) { // For LEAs we have: def = lea SP, FI, noreg, Offset, noreg. Offset = PI->getOperand(4).getImm(); } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && PI->getOperand(0).getReg() == StackPtr) { assert(PI->getOperand(1).getReg() == StackPtr); Offset = -PI->getOperand(2).getImm(); } else return 0; PI = MBB.erase(PI); if (PI != MBB.end() && PI->isCFIInstruction()) { auto CIs = MBB.getParent()->getFrameInstructions(); MCCFIInstruction CI = CIs[PI->getOperand(0).getCFIIndex()]; if (CI.getOperation() == MCCFIInstruction::OpDefCfaOffset || CI.getOperation() == MCCFIInstruction::OpAdjustCfaOffset) PI = MBB.erase(PI); } if (!doMergeWithPrevious) MBBI = skipDebugInstructionsForward(PI, MBB.end()); return Offset; } void X86FrameLowering::BuildCFI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, const MCCFIInstruction &CFIInst) const { MachineFunction &MF = *MBB.getParent(); unsigned CFIIndex = MF.addFrameInst(CFIInst); BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) .addCFIIndex(CFIIndex); } /// Emits Dwarf Info specifying offsets of callee saved registers and /// frame pointer. This is called only when basic block sections are enabled. void X86FrameLowering::emitCalleeSavedFrameMoves( MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const { MachineFunction &MF = *MBB.getParent(); if (!hasFP(MF)) { emitCalleeSavedFrameMoves(MBB, MBBI, DebugLoc{}, true); return; } const MachineModuleInfo &MMI = MF.getMMI(); const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo(); const Register FramePtr = TRI->getFrameRegister(MF); const Register MachineFramePtr = STI.isTarget64BitILP32() ? Register(getX86SubSuperRegister(FramePtr, 64)) : FramePtr; unsigned DwarfReg = MRI->getDwarfRegNum(MachineFramePtr, true); // Offset = space for return address + size of the frame pointer itself. unsigned Offset = (Is64Bit ? 8 : 4) + (Uses64BitFramePtr ? 8 : 4); BuildCFI(MBB, MBBI, DebugLoc{}, MCCFIInstruction::createOffset(nullptr, DwarfReg, -Offset)); emitCalleeSavedFrameMoves(MBB, MBBI, DebugLoc{}, true); } void X86FrameLowering::emitCalleeSavedFrameMoves( MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool IsPrologue) const { MachineFunction &MF = *MBB.getParent(); MachineFrameInfo &MFI = MF.getFrameInfo(); MachineModuleInfo &MMI = MF.getMMI(); const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo(); // Add callee saved registers to move list. const std::vector &CSI = MFI.getCalleeSavedInfo(); if (CSI.empty()) return; // Calculate offsets. for (std::vector::const_iterator I = CSI.begin(), E = CSI.end(); I != E; ++I) { int64_t Offset = MFI.getObjectOffset(I->getFrameIdx()); unsigned Reg = I->getReg(); unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true); if (IsPrologue) { BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset)); } else { BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createRestore(nullptr, DwarfReg)); } } } void X86FrameLowering::emitStackProbe(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog) const { const X86Subtarget &STI = MF.getSubtarget(); if (STI.isTargetWindowsCoreCLR()) { if (InProlog) { BuildMI(MBB, MBBI, DL, TII.get(X86::STACKALLOC_W_PROBING)) .addImm(0 /* no explicit stack size */); } else { emitStackProbeInline(MF, MBB, MBBI, DL, false); } } else { emitStackProbeCall(MF, MBB, MBBI, DL, InProlog); } } void X86FrameLowering::inlineStackProbe(MachineFunction &MF, MachineBasicBlock &PrologMBB) const { auto Where = llvm::find_if(PrologMBB, [](MachineInstr &MI) { return MI.getOpcode() == X86::STACKALLOC_W_PROBING; }); if (Where != PrologMBB.end()) { DebugLoc DL = PrologMBB.findDebugLoc(Where); emitStackProbeInline(MF, PrologMBB, Where, DL, true); Where->eraseFromParent(); } } void X86FrameLowering::emitStackProbeInline(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog) const { const X86Subtarget &STI = MF.getSubtarget(); if (STI.isTargetWindowsCoreCLR() && STI.is64Bit()) emitStackProbeInlineWindowsCoreCLR64(MF, MBB, MBBI, DL, InProlog); else emitStackProbeInlineGeneric(MF, MBB, MBBI, DL, InProlog); } void X86FrameLowering::emitStackProbeInlineGeneric( MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog) const { MachineInstr &AllocWithProbe = *MBBI; uint64_t Offset = AllocWithProbe.getOperand(0).getImm(); const X86Subtarget &STI = MF.getSubtarget(); const X86TargetLowering &TLI = *STI.getTargetLowering(); assert(!(STI.is64Bit() && STI.isTargetWindowsCoreCLR()) && "different expansion expected for CoreCLR 64 bit"); const uint64_t StackProbeSize = TLI.getStackProbeSize(MF); uint64_t ProbeChunk = StackProbeSize * 8; uint64_t MaxAlign = TRI->hasStackRealignment(MF) ? calculateMaxStackAlign(MF) : 0; // Synthesize a loop or unroll it, depending on the number of iterations. // BuildStackAlignAND ensures that only MaxAlign % StackProbeSize bits left // between the unaligned rsp and current rsp. if (Offset > ProbeChunk) { emitStackProbeInlineGenericLoop(MF, MBB, MBBI, DL, Offset, MaxAlign % StackProbeSize); } else { emitStackProbeInlineGenericBlock(MF, MBB, MBBI, DL, Offset, MaxAlign % StackProbeSize); } } void X86FrameLowering::emitStackProbeInlineGenericBlock( MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, uint64_t Offset, uint64_t AlignOffset) const { const bool NeedsDwarfCFI = needsDwarfCFI(MF); const bool HasFP = hasFP(MF); const X86Subtarget &STI = MF.getSubtarget(); const X86TargetLowering &TLI = *STI.getTargetLowering(); const unsigned Opc = getSUBriOpcode(Uses64BitFramePtr, Offset); const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi; const uint64_t StackProbeSize = TLI.getStackProbeSize(MF); uint64_t CurrentOffset = 0; assert(AlignOffset < StackProbeSize); // If the offset is so small it fits within a page, there's nothing to do. if (StackProbeSize < Offset + AlignOffset) { MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) .addReg(StackPtr) .addImm(StackProbeSize - AlignOffset) .setMIFlag(MachineInstr::FrameSetup); if (!HasFP && NeedsDwarfCFI) { BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createAdjustCfaOffset( nullptr, StackProbeSize - AlignOffset)); } MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MovMIOpc)) .setMIFlag(MachineInstr::FrameSetup), StackPtr, false, 0) .addImm(0) .setMIFlag(MachineInstr::FrameSetup); NumFrameExtraProbe++; CurrentOffset = StackProbeSize - AlignOffset; } // For the next N - 1 pages, just probe. I tried to take advantage of // natural probes but it implies much more logic and there was very few // interesting natural probes to interleave. while (CurrentOffset + StackProbeSize < Offset) { MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) .addReg(StackPtr) .addImm(StackProbeSize) .setMIFlag(MachineInstr::FrameSetup); MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. if (!HasFP && NeedsDwarfCFI) { BuildCFI( MBB, MBBI, DL, MCCFIInstruction::createAdjustCfaOffset(nullptr, StackProbeSize)); } addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MovMIOpc)) .setMIFlag(MachineInstr::FrameSetup), StackPtr, false, 0) .addImm(0) .setMIFlag(MachineInstr::FrameSetup); NumFrameExtraProbe++; CurrentOffset += StackProbeSize; } // No need to probe the tail, it is smaller than a Page. uint64_t ChunkSize = Offset - CurrentOffset; MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) .addReg(StackPtr) .addImm(ChunkSize) .setMIFlag(MachineInstr::FrameSetup); // No need to adjust Dwarf CFA offset here, the last position of the stack has // been defined MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. } void X86FrameLowering::emitStackProbeInlineGenericLoop( MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, uint64_t Offset, uint64_t AlignOffset) const { assert(Offset && "null offset"); const X86Subtarget &STI = MF.getSubtarget(); const X86TargetLowering &TLI = *STI.getTargetLowering(); const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi; const uint64_t StackProbeSize = TLI.getStackProbeSize(MF); if (AlignOffset) { if (AlignOffset < StackProbeSize) { // Perform a first smaller allocation followed by a probe. const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr, AlignOffset); MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(SUBOpc), StackPtr) .addReg(StackPtr) .addImm(AlignOffset) .setMIFlag(MachineInstr::FrameSetup); MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MovMIOpc)) .setMIFlag(MachineInstr::FrameSetup), StackPtr, false, 0) .addImm(0) .setMIFlag(MachineInstr::FrameSetup); NumFrameExtraProbe++; Offset -= AlignOffset; } } // Synthesize a loop NumFrameLoopProbe++; const BasicBlock *LLVM_BB = MBB.getBasicBlock(); MachineBasicBlock *testMBB = MF.CreateMachineBasicBlock(LLVM_BB); MachineBasicBlock *tailMBB = MF.CreateMachineBasicBlock(LLVM_BB); MachineFunction::iterator MBBIter = ++MBB.getIterator(); MF.insert(MBBIter, testMBB); MF.insert(MBBIter, tailMBB); - Register FinalStackProbed = Uses64BitFramePtr ? X86::R11 : X86::R11D; + Register FinalStackProbed = Uses64BitFramePtr ? X86::R11 + : Is64Bit ? X86::R11D + : X86::EAX; BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::COPY), FinalStackProbed) .addReg(StackPtr) .setMIFlag(MachineInstr::FrameSetup); // save loop bound { const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr, Offset); BuildMI(MBB, MBBI, DL, TII.get(SUBOpc), FinalStackProbed) .addReg(FinalStackProbed) .addImm(Offset / StackProbeSize * StackProbeSize) .setMIFlag(MachineInstr::FrameSetup); } // allocate a page { const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr, StackProbeSize); BuildMI(testMBB, DL, TII.get(SUBOpc), StackPtr) .addReg(StackPtr) .addImm(StackProbeSize) .setMIFlag(MachineInstr::FrameSetup); } // touch the page addRegOffset(BuildMI(testMBB, DL, TII.get(MovMIOpc)) .setMIFlag(MachineInstr::FrameSetup), StackPtr, false, 0) .addImm(0) .setMIFlag(MachineInstr::FrameSetup); // cmp with stack pointer bound BuildMI(testMBB, DL, TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr)) .addReg(StackPtr) .addReg(FinalStackProbed) .setMIFlag(MachineInstr::FrameSetup); // jump BuildMI(testMBB, DL, TII.get(X86::JCC_1)) .addMBB(testMBB) .addImm(X86::COND_NE) .setMIFlag(MachineInstr::FrameSetup); testMBB->addSuccessor(testMBB); testMBB->addSuccessor(tailMBB); // BB management tailMBB->splice(tailMBB->end(), &MBB, MBBI, MBB.end()); tailMBB->transferSuccessorsAndUpdatePHIs(&MBB); MBB.addSuccessor(testMBB); // handle tail unsigned TailOffset = Offset % StackProbeSize; if (TailOffset) { const unsigned Opc = getSUBriOpcode(Uses64BitFramePtr, TailOffset); BuildMI(*tailMBB, tailMBB->begin(), DL, TII.get(Opc), StackPtr) .addReg(StackPtr) .addImm(TailOffset) .setMIFlag(MachineInstr::FrameSetup); } // Update Live In information recomputeLiveIns(*testMBB); recomputeLiveIns(*tailMBB); } void X86FrameLowering::emitStackProbeInlineWindowsCoreCLR64( MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog) const { const X86Subtarget &STI = MF.getSubtarget(); assert(STI.is64Bit() && "different expansion needed for 32 bit"); assert(STI.isTargetWindowsCoreCLR() && "custom expansion expects CoreCLR"); const TargetInstrInfo &TII = *STI.getInstrInfo(); const BasicBlock *LLVM_BB = MBB.getBasicBlock(); // RAX contains the number of bytes of desired stack adjustment. // The handling here assumes this value has already been updated so as to // maintain stack alignment. // // We need to exit with RSP modified by this amount and execute suitable // page touches to notify the OS that we're growing the stack responsibly. // All stack probing must be done without modifying RSP. // // MBB: // SizeReg = RAX; // ZeroReg = 0 // CopyReg = RSP // Flags, TestReg = CopyReg - SizeReg // FinalReg = !Flags.Ovf ? TestReg : ZeroReg // LimitReg = gs magic thread env access // if FinalReg >= LimitReg goto ContinueMBB // RoundBB: // RoundReg = page address of FinalReg // LoopMBB: // LoopReg = PHI(LimitReg,ProbeReg) // ProbeReg = LoopReg - PageSize // [ProbeReg] = 0 // if (ProbeReg > RoundReg) goto LoopMBB // ContinueMBB: // RSP = RSP - RAX // [rest of original MBB] // Set up the new basic blocks MachineBasicBlock *RoundMBB = MF.CreateMachineBasicBlock(LLVM_BB); MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB); MachineBasicBlock *ContinueMBB = MF.CreateMachineBasicBlock(LLVM_BB); MachineFunction::iterator MBBIter = std::next(MBB.getIterator()); MF.insert(MBBIter, RoundMBB); MF.insert(MBBIter, LoopMBB); MF.insert(MBBIter, ContinueMBB); // Split MBB and move the tail portion down to ContinueMBB. MachineBasicBlock::iterator BeforeMBBI = std::prev(MBBI); ContinueMBB->splice(ContinueMBB->begin(), &MBB, MBBI, MBB.end()); ContinueMBB->transferSuccessorsAndUpdatePHIs(&MBB); // Some useful constants const int64_t ThreadEnvironmentStackLimit = 0x10; const int64_t PageSize = 0x1000; const int64_t PageMask = ~(PageSize - 1); // Registers we need. For the normal case we use virtual // registers. For the prolog expansion we use RAX, RCX and RDX. MachineRegisterInfo &MRI = MF.getRegInfo(); const TargetRegisterClass *RegClass = &X86::GR64RegClass; const Register SizeReg = InProlog ? X86::RAX : MRI.createVirtualRegister(RegClass), ZeroReg = InProlog ? X86::RCX : MRI.createVirtualRegister(RegClass), CopyReg = InProlog ? X86::RDX : MRI.createVirtualRegister(RegClass), TestReg = InProlog ? X86::RDX : MRI.createVirtualRegister(RegClass), FinalReg = InProlog ? X86::RDX : MRI.createVirtualRegister(RegClass), RoundedReg = InProlog ? X86::RDX : MRI.createVirtualRegister(RegClass), LimitReg = InProlog ? X86::RCX : MRI.createVirtualRegister(RegClass), JoinReg = InProlog ? X86::RCX : MRI.createVirtualRegister(RegClass), ProbeReg = InProlog ? X86::RCX : MRI.createVirtualRegister(RegClass); // SP-relative offsets where we can save RCX and RDX. int64_t RCXShadowSlot = 0; int64_t RDXShadowSlot = 0; // If inlining in the prolog, save RCX and RDX. if (InProlog) { // Compute the offsets. We need to account for things already // pushed onto the stack at this point: return address, frame // pointer (if used), and callee saves. X86MachineFunctionInfo *X86FI = MF.getInfo(); const int64_t CalleeSaveSize = X86FI->getCalleeSavedFrameSize(); const bool HasFP = hasFP(MF); // Check if we need to spill RCX and/or RDX. // Here we assume that no earlier prologue instruction changes RCX and/or // RDX, so checking the block live-ins is enough. const bool IsRCXLiveIn = MBB.isLiveIn(X86::RCX); const bool IsRDXLiveIn = MBB.isLiveIn(X86::RDX); int64_t InitSlot = 8 + CalleeSaveSize + (HasFP ? 8 : 0); // Assign the initial slot to both registers, then change RDX's slot if both // need to be spilled. if (IsRCXLiveIn) RCXShadowSlot = InitSlot; if (IsRDXLiveIn) RDXShadowSlot = InitSlot; if (IsRDXLiveIn && IsRCXLiveIn) RDXShadowSlot += 8; // Emit the saves if needed. if (IsRCXLiveIn) addRegOffset(BuildMI(&MBB, DL, TII.get(X86::MOV64mr)), X86::RSP, false, RCXShadowSlot) .addReg(X86::RCX); if (IsRDXLiveIn) addRegOffset(BuildMI(&MBB, DL, TII.get(X86::MOV64mr)), X86::RSP, false, RDXShadowSlot) .addReg(X86::RDX); } else { // Not in the prolog. Copy RAX to a virtual reg. BuildMI(&MBB, DL, TII.get(X86::MOV64rr), SizeReg).addReg(X86::RAX); } // Add code to MBB to check for overflow and set the new target stack pointer // to zero if so. BuildMI(&MBB, DL, TII.get(X86::XOR64rr), ZeroReg) .addReg(ZeroReg, RegState::Undef) .addReg(ZeroReg, RegState::Undef); BuildMI(&MBB, DL, TII.get(X86::MOV64rr), CopyReg).addReg(X86::RSP); BuildMI(&MBB, DL, TII.get(X86::SUB64rr), TestReg) .addReg(CopyReg) .addReg(SizeReg); BuildMI(&MBB, DL, TII.get(X86::CMOV64rr), FinalReg) .addReg(TestReg) .addReg(ZeroReg) .addImm(X86::COND_B); // FinalReg now holds final stack pointer value, or zero if // allocation would overflow. Compare against the current stack // limit from the thread environment block. Note this limit is the // lowest touched page on the stack, not the point at which the OS // will cause an overflow exception, so this is just an optimization // to avoid unnecessarily touching pages that are below the current // SP but already committed to the stack by the OS. BuildMI(&MBB, DL, TII.get(X86::MOV64rm), LimitReg) .addReg(0) .addImm(1) .addReg(0) .addImm(ThreadEnvironmentStackLimit) .addReg(X86::GS); BuildMI(&MBB, DL, TII.get(X86::CMP64rr)).addReg(FinalReg).addReg(LimitReg); // Jump if the desired stack pointer is at or above the stack limit. BuildMI(&MBB, DL, TII.get(X86::JCC_1)).addMBB(ContinueMBB).addImm(X86::COND_AE); // Add code to roundMBB to round the final stack pointer to a page boundary. RoundMBB->addLiveIn(FinalReg); BuildMI(RoundMBB, DL, TII.get(X86::AND64ri32), RoundedReg) .addReg(FinalReg) .addImm(PageMask); BuildMI(RoundMBB, DL, TII.get(X86::JMP_1)).addMBB(LoopMBB); // LimitReg now holds the current stack limit, RoundedReg page-rounded // final RSP value. Add code to loopMBB to decrement LimitReg page-by-page // and probe until we reach RoundedReg. if (!InProlog) { BuildMI(LoopMBB, DL, TII.get(X86::PHI), JoinReg) .addReg(LimitReg) .addMBB(RoundMBB) .addReg(ProbeReg) .addMBB(LoopMBB); } LoopMBB->addLiveIn(JoinReg); addRegOffset(BuildMI(LoopMBB, DL, TII.get(X86::LEA64r), ProbeReg), JoinReg, false, -PageSize); // Probe by storing a byte onto the stack. BuildMI(LoopMBB, DL, TII.get(X86::MOV8mi)) .addReg(ProbeReg) .addImm(1) .addReg(0) .addImm(0) .addReg(0) .addImm(0); LoopMBB->addLiveIn(RoundedReg); BuildMI(LoopMBB, DL, TII.get(X86::CMP64rr)) .addReg(RoundedReg) .addReg(ProbeReg); BuildMI(LoopMBB, DL, TII.get(X86::JCC_1)).addMBB(LoopMBB).addImm(X86::COND_NE); MachineBasicBlock::iterator ContinueMBBI = ContinueMBB->getFirstNonPHI(); // If in prolog, restore RDX and RCX. if (InProlog) { if (RCXShadowSlot) // It means we spilled RCX in the prologue. addRegOffset(BuildMI(*ContinueMBB, ContinueMBBI, DL, TII.get(X86::MOV64rm), X86::RCX), X86::RSP, false, RCXShadowSlot); if (RDXShadowSlot) // It means we spilled RDX in the prologue. addRegOffset(BuildMI(*ContinueMBB, ContinueMBBI, DL, TII.get(X86::MOV64rm), X86::RDX), X86::RSP, false, RDXShadowSlot); } // Now that the probing is done, add code to continueMBB to update // the stack pointer for real. ContinueMBB->addLiveIn(SizeReg); BuildMI(*ContinueMBB, ContinueMBBI, DL, TII.get(X86::SUB64rr), X86::RSP) .addReg(X86::RSP) .addReg(SizeReg); // Add the control flow edges we need. MBB.addSuccessor(ContinueMBB); MBB.addSuccessor(RoundMBB); RoundMBB->addSuccessor(LoopMBB); LoopMBB->addSuccessor(ContinueMBB); LoopMBB->addSuccessor(LoopMBB); // Mark all the instructions added to the prolog as frame setup. if (InProlog) { for (++BeforeMBBI; BeforeMBBI != MBB.end(); ++BeforeMBBI) { BeforeMBBI->setFlag(MachineInstr::FrameSetup); } for (MachineInstr &MI : *RoundMBB) { MI.setFlag(MachineInstr::FrameSetup); } for (MachineInstr &MI : *LoopMBB) { MI.setFlag(MachineInstr::FrameSetup); } for (MachineBasicBlock::iterator CMBBI = ContinueMBB->begin(); CMBBI != ContinueMBBI; ++CMBBI) { CMBBI->setFlag(MachineInstr::FrameSetup); } } } void X86FrameLowering::emitStackProbeCall(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog) const { bool IsLargeCodeModel = MF.getTarget().getCodeModel() == CodeModel::Large; // FIXME: Add indirect thunk support and remove this. if (Is64Bit && IsLargeCodeModel && STI.useIndirectThunkCalls()) report_fatal_error("Emitting stack probe calls on 64-bit with the large " "code model and indirect thunks not yet implemented."); unsigned CallOp; if (Is64Bit) CallOp = IsLargeCodeModel ? X86::CALL64r : X86::CALL64pcrel32; else CallOp = X86::CALLpcrel32; StringRef Symbol = STI.getTargetLowering()->getStackProbeSymbolName(MF); MachineInstrBuilder CI; MachineBasicBlock::iterator ExpansionMBBI = std::prev(MBBI); // All current stack probes take AX and SP as input, clobber flags, and // preserve all registers. x86_64 probes leave RSP unmodified. if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) { // For the large code model, we have to call through a register. Use R11, // as it is scratch in all supported calling conventions. BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::R11) .addExternalSymbol(MF.createExternalSymbolName(Symbol)); CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)).addReg(X86::R11); } else { CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)) .addExternalSymbol(MF.createExternalSymbolName(Symbol)); } unsigned AX = Uses64BitFramePtr ? X86::RAX : X86::EAX; unsigned SP = Uses64BitFramePtr ? X86::RSP : X86::ESP; CI.addReg(AX, RegState::Implicit) .addReg(SP, RegState::Implicit) .addReg(AX, RegState::Define | RegState::Implicit) .addReg(SP, RegState::Define | RegState::Implicit) .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); if (STI.isTargetWin64() || !STI.isOSWindows()) { // MSVC x32's _chkstk and cygwin/mingw's _alloca adjust %esp themselves. // MSVC x64's __chkstk and cygwin/mingw's ___chkstk_ms do not adjust %rsp // themselves. They also does not clobber %rax so we can reuse it when // adjusting %rsp. // All other platforms do not specify a particular ABI for the stack probe // function, so we arbitrarily define it to not adjust %esp/%rsp itself. BuildMI(MBB, MBBI, DL, TII.get(getSUBrrOpcode(Uses64BitFramePtr)), SP) .addReg(SP) .addReg(AX); } if (InProlog) { // Apply the frame setup flag to all inserted instrs. for (++ExpansionMBBI; ExpansionMBBI != MBBI; ++ExpansionMBBI) ExpansionMBBI->setFlag(MachineInstr::FrameSetup); } } static unsigned calculateSetFPREG(uint64_t SPAdjust) { // Win64 ABI has a less restrictive limitation of 240; 128 works equally well // and might require smaller successive adjustments. const uint64_t Win64MaxSEHOffset = 128; uint64_t SEHFrameOffset = std::min(SPAdjust, Win64MaxSEHOffset); // Win64 ABI requires 16-byte alignment for the UWOP_SET_FPREG opcode. return SEHFrameOffset & -16; } // If we're forcing a stack realignment we can't rely on just the frame // info, we need to know the ABI stack alignment as well in case we // have a call out. Otherwise just make sure we have some alignment - we'll // go with the minimum SlotSize. uint64_t X86FrameLowering::calculateMaxStackAlign(const MachineFunction &MF) const { const MachineFrameInfo &MFI = MF.getFrameInfo(); Align MaxAlign = MFI.getMaxAlign(); // Desired stack alignment. Align StackAlign = getStackAlign(); if (MF.getFunction().hasFnAttribute("stackrealign")) { if (MFI.hasCalls()) MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign; else if (MaxAlign < SlotSize) MaxAlign = Align(SlotSize); } return MaxAlign.value(); } void X86FrameLowering::BuildStackAlignAND(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned Reg, uint64_t MaxAlign) const { uint64_t Val = -MaxAlign; unsigned AndOp = getANDriOpcode(Uses64BitFramePtr, Val); MachineFunction &MF = *MBB.getParent(); const X86Subtarget &STI = MF.getSubtarget(); const X86TargetLowering &TLI = *STI.getTargetLowering(); const uint64_t StackProbeSize = TLI.getStackProbeSize(MF); const bool EmitInlineStackProbe = TLI.hasInlineStackProbe(MF); // We want to make sure that (in worst case) less than StackProbeSize bytes // are not probed after the AND. This assumption is used in // emitStackProbeInlineGeneric. if (Reg == StackPtr && EmitInlineStackProbe && MaxAlign >= StackProbeSize) { { NumFrameLoopProbe++; MachineBasicBlock *entryMBB = MF.CreateMachineBasicBlock(MBB.getBasicBlock()); MachineBasicBlock *headMBB = MF.CreateMachineBasicBlock(MBB.getBasicBlock()); MachineBasicBlock *bodyMBB = MF.CreateMachineBasicBlock(MBB.getBasicBlock()); MachineBasicBlock *footMBB = MF.CreateMachineBasicBlock(MBB.getBasicBlock()); MachineFunction::iterator MBBIter = MBB.getIterator(); MF.insert(MBBIter, entryMBB); MF.insert(MBBIter, headMBB); MF.insert(MBBIter, bodyMBB); MF.insert(MBBIter, footMBB); const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi; - Register FinalStackProbed = Uses64BitFramePtr ? X86::R11 : X86::R11D; + Register FinalStackProbed = Uses64BitFramePtr ? X86::R11 + : Is64Bit ? X86::R11D + : X86::EAX; // Setup entry block { entryMBB->splice(entryMBB->end(), &MBB, MBB.begin(), MBBI); BuildMI(entryMBB, DL, TII.get(TargetOpcode::COPY), FinalStackProbed) .addReg(StackPtr) .setMIFlag(MachineInstr::FrameSetup); MachineInstr *MI = BuildMI(entryMBB, DL, TII.get(AndOp), FinalStackProbed) .addReg(FinalStackProbed) .addImm(Val) .setMIFlag(MachineInstr::FrameSetup); // The EFLAGS implicit def is dead. MI->getOperand(3).setIsDead(); BuildMI(entryMBB, DL, TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr)) .addReg(FinalStackProbed) .addReg(StackPtr) .setMIFlag(MachineInstr::FrameSetup); BuildMI(entryMBB, DL, TII.get(X86::JCC_1)) .addMBB(&MBB) .addImm(X86::COND_E) .setMIFlag(MachineInstr::FrameSetup); entryMBB->addSuccessor(headMBB); entryMBB->addSuccessor(&MBB); } // Loop entry block { const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr, StackProbeSize); BuildMI(headMBB, DL, TII.get(SUBOpc), StackPtr) .addReg(StackPtr) .addImm(StackProbeSize) .setMIFlag(MachineInstr::FrameSetup); BuildMI(headMBB, DL, TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr)) .addReg(FinalStackProbed) .addReg(StackPtr) .setMIFlag(MachineInstr::FrameSetup); // jump BuildMI(headMBB, DL, TII.get(X86::JCC_1)) .addMBB(footMBB) .addImm(X86::COND_B) .setMIFlag(MachineInstr::FrameSetup); headMBB->addSuccessor(bodyMBB); headMBB->addSuccessor(footMBB); } // setup loop body { addRegOffset(BuildMI(bodyMBB, DL, TII.get(MovMIOpc)) .setMIFlag(MachineInstr::FrameSetup), StackPtr, false, 0) .addImm(0) .setMIFlag(MachineInstr::FrameSetup); const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr, StackProbeSize); BuildMI(bodyMBB, DL, TII.get(SUBOpc), StackPtr) .addReg(StackPtr) .addImm(StackProbeSize) .setMIFlag(MachineInstr::FrameSetup); // cmp with stack pointer bound BuildMI(bodyMBB, DL, TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr)) .addReg(FinalStackProbed) .addReg(StackPtr) .setMIFlag(MachineInstr::FrameSetup); // jump BuildMI(bodyMBB, DL, TII.get(X86::JCC_1)) .addMBB(bodyMBB) .addImm(X86::COND_B) .setMIFlag(MachineInstr::FrameSetup); bodyMBB->addSuccessor(bodyMBB); bodyMBB->addSuccessor(footMBB); } // setup loop footer { BuildMI(footMBB, DL, TII.get(TargetOpcode::COPY), StackPtr) .addReg(FinalStackProbed) .setMIFlag(MachineInstr::FrameSetup); addRegOffset(BuildMI(footMBB, DL, TII.get(MovMIOpc)) .setMIFlag(MachineInstr::FrameSetup), StackPtr, false, 0) .addImm(0) .setMIFlag(MachineInstr::FrameSetup); footMBB->addSuccessor(&MBB); } recomputeLiveIns(*headMBB); recomputeLiveIns(*bodyMBB); recomputeLiveIns(*footMBB); recomputeLiveIns(MBB); } } else { MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(AndOp), Reg) .addReg(Reg) .addImm(Val) .setMIFlag(MachineInstr::FrameSetup); // The EFLAGS implicit def is dead. MI->getOperand(3).setIsDead(); } } bool X86FrameLowering::has128ByteRedZone(const MachineFunction& MF) const { // x86-64 (non Win64) has a 128 byte red zone which is guaranteed not to be // clobbered by any interrupt handler. assert(&STI == &MF.getSubtarget() && "MF used frame lowering for wrong subtarget"); const Function &Fn = MF.getFunction(); const bool IsWin64CC = STI.isCallingConvWin64(Fn.getCallingConv()); return Is64Bit && !IsWin64CC && !Fn.hasFnAttribute(Attribute::NoRedZone); } bool X86FrameLowering::isWin64Prologue(const MachineFunction &MF) const { return MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); } bool X86FrameLowering::needsDwarfCFI(const MachineFunction &MF) const { return !isWin64Prologue(MF) && MF.needsFrameMoves(); } /// emitPrologue - Push callee-saved registers onto the stack, which /// automatically adjust the stack pointer. Adjust the stack pointer to allocate /// space for local variables. Also emit labels used by the exception handler to /// generate the exception handling frames. /* Here's a gist of what gets emitted: ; Establish frame pointer, if needed [if needs FP] push %rbp .cfi_def_cfa_offset 16 .cfi_offset %rbp, -16 .seh_pushreg %rpb mov %rsp, %rbp .cfi_def_cfa_register %rbp ; Spill general-purpose registers [for all callee-saved GPRs] pushq % [if not needs FP] .cfi_def_cfa_offset (offset from RETADDR) .seh_pushreg % ; If the required stack alignment > default stack alignment ; rsp needs to be re-aligned. This creates a "re-alignment gap" ; of unknown size in the stack frame. [if stack needs re-alignment] and $MASK, %rsp ; Allocate space for locals [if target is Windows and allocated space > 4096 bytes] ; Windows needs special care for allocations larger ; than one page. mov $NNN, %rax call ___chkstk_ms/___chkstk sub %rax, %rsp [else] sub $NNN, %rsp [if needs FP] .seh_stackalloc (size of XMM spill slots) .seh_setframe %rbp, SEHFrameOffset ; = size of all spill slots [else] .seh_stackalloc NNN ; Spill XMMs ; Note, that while only Windows 64 ABI specifies XMMs as callee-preserved, ; they may get spilled on any platform, if the current function ; calls @llvm.eh.unwind.init [if needs FP] [for all callee-saved XMM registers] movaps %, -MMM(%rbp) [for all callee-saved XMM registers] .seh_savexmm %, (-MMM + SEHFrameOffset) ; i.e. the offset relative to (%rbp - SEHFrameOffset) [else] [for all callee-saved XMM registers] movaps %, KKK(%rsp) [for all callee-saved XMM registers] .seh_savexmm %, KKK .seh_endprologue [if needs base pointer] mov %rsp, %rbx [if needs to restore base pointer] mov %rsp, -MMM(%rbp) ; Emit CFI info [if needs FP] [for all callee-saved registers] .cfi_offset %, (offset from %rbp) [else] .cfi_def_cfa_offset (offset from RETADDR) [for all callee-saved registers] .cfi_offset %, (offset from %rsp) Notes: - .seh directives are emitted only for Windows 64 ABI - .cv_fpo directives are emitted on win32 when emitting CodeView - .cfi directives are emitted for all other ABIs - for 32-bit code, substitute %e?? registers for %r?? */ void X86FrameLowering::emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const { assert(&STI == &MF.getSubtarget() && "MF used frame lowering for wrong subtarget"); MachineBasicBlock::iterator MBBI = MBB.begin(); MachineFrameInfo &MFI = MF.getFrameInfo(); const Function &Fn = MF.getFunction(); MachineModuleInfo &MMI = MF.getMMI(); X86MachineFunctionInfo *X86FI = MF.getInfo(); uint64_t MaxAlign = calculateMaxStackAlign(MF); // Desired stack alignment. uint64_t StackSize = MFI.getStackSize(); // Number of bytes to allocate. bool IsFunclet = MBB.isEHFuncletEntry(); EHPersonality Personality = EHPersonality::Unknown; if (Fn.hasPersonalityFn()) Personality = classifyEHPersonality(Fn.getPersonalityFn()); bool FnHasClrFunclet = MF.hasEHFunclets() && Personality == EHPersonality::CoreCLR; bool IsClrFunclet = IsFunclet && FnHasClrFunclet; bool HasFP = hasFP(MF); bool IsWin64Prologue = isWin64Prologue(MF); bool NeedsWin64CFI = IsWin64Prologue && Fn.needsUnwindTableEntry(); // FIXME: Emit FPO data for EH funclets. bool NeedsWinFPO = !IsFunclet && STI.isTargetWin32() && MMI.getModule()->getCodeViewFlag(); bool NeedsWinCFI = NeedsWin64CFI || NeedsWinFPO; bool NeedsDwarfCFI = needsDwarfCFI(MF); Register FramePtr = TRI->getFrameRegister(MF); const Register MachineFramePtr = STI.isTarget64BitILP32() ? Register(getX86SubSuperRegister(FramePtr, 64)) : FramePtr; Register BasePtr = TRI->getBaseRegister(); bool HasWinCFI = false; // Debug location must be unknown since the first debug location is used // to determine the end of the prologue. DebugLoc DL; // Add RETADDR move area to callee saved frame size. int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); if (TailCallReturnAddrDelta && IsWin64Prologue) report_fatal_error("Can't handle guaranteed tail call under win64 yet"); if (TailCallReturnAddrDelta < 0) X86FI->setCalleeSavedFrameSize( X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta); const bool EmitStackProbeCall = STI.getTargetLowering()->hasStackProbeSymbol(MF); unsigned StackProbeSize = STI.getTargetLowering()->getStackProbeSize(MF); if (HasFP && X86FI->hasSwiftAsyncContext()) { BuildMI(MBB, MBBI, DL, TII.get(X86::BTS64ri8), MachineFramePtr) .addUse(MachineFramePtr) .addImm(60) .setMIFlag(MachineInstr::FrameSetup); } // Re-align the stack on 64-bit if the x86-interrupt calling convention is // used and an error code was pushed, since the x86-64 ABI requires a 16-byte // stack alignment. if (Fn.getCallingConv() == CallingConv::X86_INTR && Is64Bit && Fn.arg_size() == 2) { StackSize += 8; MFI.setStackSize(StackSize); emitSPUpdate(MBB, MBBI, DL, -8, /*InEpilogue=*/false); } // If this is x86-64 and the Red Zone is not disabled, if we are a leaf // function, and use up to 128 bytes of stack space, don't have a frame // pointer, calls, or dynamic alloca then we do not need to adjust the // stack pointer (we fit in the Red Zone). We also check that we don't // push and pop from the stack. if (has128ByteRedZone(MF) && !TRI->hasStackRealignment(MF) && !MFI.hasVarSizedObjects() && // No dynamic alloca. !MFI.adjustsStack() && // No calls. !EmitStackProbeCall && // No stack probes. !MFI.hasCopyImplyingStackAdjustment() && // Don't push and pop. !MF.shouldSplitStack()) { // Regular stack uint64_t MinSize = X86FI->getCalleeSavedFrameSize(); if (HasFP) MinSize += SlotSize; X86FI->setUsesRedZone(MinSize > 0 || StackSize > 0); StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0); MFI.setStackSize(StackSize); } // Insert stack pointer adjustment for later moving of return addr. Only // applies to tail call optimized functions where the callee argument stack // size is bigger than the callers. if (TailCallReturnAddrDelta < 0) { BuildStackAdjustment(MBB, MBBI, DL, TailCallReturnAddrDelta, /*InEpilogue=*/false) .setMIFlag(MachineInstr::FrameSetup); } // Mapping for machine moves: // // DST: VirtualFP AND // SRC: VirtualFP => DW_CFA_def_cfa_offset // ELSE => DW_CFA_def_cfa // // SRC: VirtualFP AND // DST: Register => DW_CFA_def_cfa_register // // ELSE // OFFSET < 0 => DW_CFA_offset_extended_sf // REG < 64 => DW_CFA_offset + Reg // ELSE => DW_CFA_offset_extended uint64_t NumBytes = 0; int stackGrowth = -SlotSize; // Find the funclet establisher parameter Register Establisher = X86::NoRegister; if (IsClrFunclet) Establisher = Uses64BitFramePtr ? X86::RCX : X86::ECX; else if (IsFunclet) Establisher = Uses64BitFramePtr ? X86::RDX : X86::EDX; if (IsWin64Prologue && IsFunclet && !IsClrFunclet) { // Immediately spill establisher into the home slot. // The runtime cares about this. // MOV64mr %rdx, 16(%rsp) unsigned MOVmr = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr; addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MOVmr)), StackPtr, true, 16) .addReg(Establisher) .setMIFlag(MachineInstr::FrameSetup); MBB.addLiveIn(Establisher); } if (HasFP) { assert(MF.getRegInfo().isReserved(MachineFramePtr) && "FP reserved"); // Calculate required stack adjustment. uint64_t FrameSize = StackSize - SlotSize; // If required, include space for extra hidden slot for stashing base pointer. if (X86FI->getRestoreBasePointer()) FrameSize += SlotSize; NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize(); // Callee-saved registers are pushed on stack before the stack is realigned. if (TRI->hasStackRealignment(MF) && !IsWin64Prologue) NumBytes = alignTo(NumBytes, MaxAlign); // Save EBP/RBP into the appropriate stack slot. BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r)) .addReg(MachineFramePtr, RegState::Kill) .setMIFlag(MachineInstr::FrameSetup); if (NeedsDwarfCFI) { // Mark the place where EBP/RBP was saved. // Define the current CFA rule to use the provided offset. assert(StackSize); BuildCFI(MBB, MBBI, DL, MCCFIInstruction::cfiDefCfaOffset(nullptr, -2 * stackGrowth)); // Change the rule for the FramePtr to be an "offset" rule. unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true); BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createOffset( nullptr, DwarfFramePtr, 2 * stackGrowth)); } if (NeedsWinCFI) { HasWinCFI = true; BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg)) .addImm(FramePtr) .setMIFlag(MachineInstr::FrameSetup); } if (!IsFunclet) { if (X86FI->hasSwiftAsyncContext()) { const auto &Attrs = MF.getFunction().getAttributes(); // Before we update the live frame pointer we have to ensure there's a // valid (or null) asynchronous context in its slot just before FP in // the frame record, so store it now. if (Attrs.hasAttrSomewhere(Attribute::SwiftAsync)) { // We have an initial context in r14, store it just before the frame // pointer. MBB.addLiveIn(X86::R14); BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64r)) .addReg(X86::R14) .setMIFlag(MachineInstr::FrameSetup); } else { // No initial context, store null so that there's no pointer that // could be misused. BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64i8)) .addImm(0) .setMIFlag(MachineInstr::FrameSetup); } if (NeedsWinCFI) { HasWinCFI = true; BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg)) .addImm(X86::R14) .setMIFlag(MachineInstr::FrameSetup); } BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), FramePtr) .addUse(X86::RSP) .addImm(1) .addUse(X86::NoRegister) .addImm(8) .addUse(X86::NoRegister) .setMIFlag(MachineInstr::FrameSetup); BuildMI(MBB, MBBI, DL, TII.get(X86::SUB64ri8), X86::RSP) .addUse(X86::RSP) .addImm(8) .setMIFlag(MachineInstr::FrameSetup); } if (!IsWin64Prologue && !IsFunclet) { // Update EBP with the new base value. if (!X86FI->hasSwiftAsyncContext()) BuildMI(MBB, MBBI, DL, TII.get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr), FramePtr) .addReg(StackPtr) .setMIFlag(MachineInstr::FrameSetup); if (NeedsDwarfCFI) { // Mark effective beginning of when frame pointer becomes valid. // Define the current CFA to use the EBP/RBP register. unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true); BuildCFI( MBB, MBBI, DL, MCCFIInstruction::createDefCfaRegister(nullptr, DwarfFramePtr)); } if (NeedsWinFPO) { // .cv_fpo_setframe $FramePtr HasWinCFI = true; BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame)) .addImm(FramePtr) .addImm(0) .setMIFlag(MachineInstr::FrameSetup); } } } } else { assert(!IsFunclet && "funclets without FPs not yet implemented"); NumBytes = StackSize - X86FI->getCalleeSavedFrameSize(); } // Update the offset adjustment, which is mainly used by codeview to translate // from ESP to VFRAME relative local variable offsets. if (!IsFunclet) { if (HasFP && TRI->hasStackRealignment(MF)) MFI.setOffsetAdjustment(-NumBytes); else MFI.setOffsetAdjustment(-StackSize); } // For EH funclets, only allocate enough space for outgoing calls. Save the // NumBytes value that we would've used for the parent frame. unsigned ParentFrameNumBytes = NumBytes; if (IsFunclet) NumBytes = getWinEHFuncletFrameSize(MF); // Skip the callee-saved push instructions. bool PushedRegs = false; int StackOffset = 2 * stackGrowth; while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup) && (MBBI->getOpcode() == X86::PUSH32r || MBBI->getOpcode() == X86::PUSH64r)) { PushedRegs = true; Register Reg = MBBI->getOperand(0).getReg(); ++MBBI; if (!HasFP && NeedsDwarfCFI) { // Mark callee-saved push instruction. // Define the current CFA rule to use the provided offset. assert(StackSize); BuildCFI(MBB, MBBI, DL, MCCFIInstruction::cfiDefCfaOffset(nullptr, -StackOffset)); StackOffset += stackGrowth; } if (NeedsWinCFI) { HasWinCFI = true; BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg)) .addImm(Reg) .setMIFlag(MachineInstr::FrameSetup); } } // Realign stack after we pushed callee-saved registers (so that we'll be // able to calculate their offsets from the frame pointer). // Don't do this for Win64, it needs to realign the stack after the prologue. if (!IsWin64Prologue && !IsFunclet && TRI->hasStackRealignment(MF)) { assert(HasFP && "There should be a frame pointer if stack is realigned."); BuildStackAlignAND(MBB, MBBI, DL, StackPtr, MaxAlign); if (NeedsWinCFI) { HasWinCFI = true; BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlign)) .addImm(MaxAlign) .setMIFlag(MachineInstr::FrameSetup); } } // If there is an SUB32ri of ESP immediately before this instruction, merge // the two. This can be the case when tail call elimination is enabled and // the callee has more arguments then the caller. NumBytes -= mergeSPUpdates(MBB, MBBI, true); // Adjust stack pointer: ESP -= numbytes. // Windows and cygwin/mingw require a prologue helper routine when allocating // more than 4K bytes on the stack. Windows uses __chkstk and cygwin/mingw // uses __alloca. __alloca and the 32-bit version of __chkstk will probe the // stack and adjust the stack pointer in one go. The 64-bit version of // __chkstk is only responsible for probing the stack. The 64-bit prologue is // responsible for adjusting the stack pointer. Touching the stack at 4K // increments is necessary to ensure that the guard pages used by the OS // virtual memory manager are allocated in correct sequence. uint64_t AlignedNumBytes = NumBytes; if (IsWin64Prologue && !IsFunclet && TRI->hasStackRealignment(MF)) AlignedNumBytes = alignTo(AlignedNumBytes, MaxAlign); if (AlignedNumBytes >= StackProbeSize && EmitStackProbeCall) { assert(!X86FI->getUsesRedZone() && "The Red Zone is not accounted for in stack probes"); // Check whether EAX is livein for this block. bool isEAXAlive = isEAXLiveIn(MBB); if (isEAXAlive) { if (Is64Bit) { // Save RAX BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64r)) .addReg(X86::RAX, RegState::Kill) .setMIFlag(MachineInstr::FrameSetup); } else { // Save EAX BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r)) .addReg(X86::EAX, RegState::Kill) .setMIFlag(MachineInstr::FrameSetup); } } if (Is64Bit) { // Handle the 64-bit Windows ABI case where we need to call __chkstk. // Function prologue is responsible for adjusting the stack pointer. int64_t Alloc = isEAXAlive ? NumBytes - 8 : NumBytes; if (isUInt<32>(Alloc)) { BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) .addImm(Alloc) .setMIFlag(MachineInstr::FrameSetup); } else if (isInt<32>(Alloc)) { BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri32), X86::RAX) .addImm(Alloc) .setMIFlag(MachineInstr::FrameSetup); } else { BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::RAX) .addImm(Alloc) .setMIFlag(MachineInstr::FrameSetup); } } else { // Allocate NumBytes-4 bytes on stack in case of isEAXAlive. // We'll also use 4 already allocated bytes for EAX. BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) .addImm(isEAXAlive ? NumBytes - 4 : NumBytes) .setMIFlag(MachineInstr::FrameSetup); } // Call __chkstk, __chkstk_ms, or __alloca. emitStackProbe(MF, MBB, MBBI, DL, true); if (isEAXAlive) { // Restore RAX/EAX MachineInstr *MI; if (Is64Bit) MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV64rm), X86::RAX), StackPtr, false, NumBytes - 8); else MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm), X86::EAX), StackPtr, false, NumBytes - 4); MI->setFlag(MachineInstr::FrameSetup); MBB.insert(MBBI, MI); } } else if (NumBytes) { emitSPUpdate(MBB, MBBI, DL, -(int64_t)NumBytes, /*InEpilogue=*/false); } if (NeedsWinCFI && NumBytes) { HasWinCFI = true; BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlloc)) .addImm(NumBytes) .setMIFlag(MachineInstr::FrameSetup); } int SEHFrameOffset = 0; unsigned SPOrEstablisher; if (IsFunclet) { if (IsClrFunclet) { // The establisher parameter passed to a CLR funclet is actually a pointer // to the (mostly empty) frame of its nearest enclosing funclet; we have // to find the root function establisher frame by loading the PSPSym from // the intermediate frame. unsigned PSPSlotOffset = getPSPSlotOffsetFromSP(MF); MachinePointerInfo NoInfo; MBB.addLiveIn(Establisher); addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rm), Establisher), Establisher, false, PSPSlotOffset) .addMemOperand(MF.getMachineMemOperand( NoInfo, MachineMemOperand::MOLoad, SlotSize, Align(SlotSize))); ; // Save the root establisher back into the current funclet's (mostly // empty) frame, in case a sub-funclet or the GC needs it. addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mr)), StackPtr, false, PSPSlotOffset) .addReg(Establisher) .addMemOperand(MF.getMachineMemOperand( NoInfo, MachineMemOperand::MOStore | MachineMemOperand::MOVolatile, SlotSize, Align(SlotSize))); } SPOrEstablisher = Establisher; } else { SPOrEstablisher = StackPtr; } if (IsWin64Prologue && HasFP) { // Set RBP to a small fixed offset from RSP. In the funclet case, we base // this calculation on the incoming establisher, which holds the value of // RSP from the parent frame at the end of the prologue. SEHFrameOffset = calculateSetFPREG(ParentFrameNumBytes); if (SEHFrameOffset) addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), FramePtr), SPOrEstablisher, false, SEHFrameOffset); else BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rr), FramePtr) .addReg(SPOrEstablisher); // If this is not a funclet, emit the CFI describing our frame pointer. if (NeedsWinCFI && !IsFunclet) { assert(!NeedsWinFPO && "this setframe incompatible with FPO data"); HasWinCFI = true; BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame)) .addImm(FramePtr) .addImm(SEHFrameOffset) .setMIFlag(MachineInstr::FrameSetup); if (isAsynchronousEHPersonality(Personality)) MF.getWinEHFuncInfo()->SEHSetFrameOffset = SEHFrameOffset; } } else if (IsFunclet && STI.is32Bit()) { // Reset EBP / ESI to something good for funclets. MBBI = restoreWin32EHStackPointers(MBB, MBBI, DL); // If we're a catch funclet, we can be returned to via catchret. Save ESP // into the registration node so that the runtime will restore it for us. if (!MBB.isCleanupFuncletEntry()) { assert(Personality == EHPersonality::MSVC_CXX); Register FrameReg; int FI = MF.getWinEHFuncInfo()->EHRegNodeFrameIndex; int64_t EHRegOffset = getFrameIndexReference(MF, FI, FrameReg).getFixed(); // ESP is the first field, so no extra displacement is needed. addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32mr)), FrameReg, false, EHRegOffset) .addReg(X86::ESP); } } while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup)) { const MachineInstr &FrameInstr = *MBBI; ++MBBI; if (NeedsWinCFI) { int FI; if (unsigned Reg = TII.isStoreToStackSlot(FrameInstr, FI)) { if (X86::FR64RegClass.contains(Reg)) { int Offset; Register IgnoredFrameReg; if (IsWin64Prologue && IsFunclet) Offset = getWin64EHFrameIndexRef(MF, FI, IgnoredFrameReg); else Offset = getFrameIndexReference(MF, FI, IgnoredFrameReg).getFixed() + SEHFrameOffset; HasWinCFI = true; assert(!NeedsWinFPO && "SEH_SaveXMM incompatible with FPO data"); BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SaveXMM)) .addImm(Reg) .addImm(Offset) .setMIFlag(MachineInstr::FrameSetup); } } } } if (NeedsWinCFI && HasWinCFI) BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_EndPrologue)) .setMIFlag(MachineInstr::FrameSetup); if (FnHasClrFunclet && !IsFunclet) { // Save the so-called Initial-SP (i.e. the value of the stack pointer // immediately after the prolog) into the PSPSlot so that funclets // and the GC can recover it. unsigned PSPSlotOffset = getPSPSlotOffsetFromSP(MF); auto PSPInfo = MachinePointerInfo::getFixedStack( MF, MF.getWinEHFuncInfo()->PSPSymFrameIdx); addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mr)), StackPtr, false, PSPSlotOffset) .addReg(StackPtr) .addMemOperand(MF.getMachineMemOperand( PSPInfo, MachineMemOperand::MOStore | MachineMemOperand::MOVolatile, SlotSize, Align(SlotSize))); } // Realign stack after we spilled callee-saved registers (so that we'll be // able to calculate their offsets from the frame pointer). // Win64 requires aligning the stack after the prologue. if (IsWin64Prologue && TRI->hasStackRealignment(MF)) { assert(HasFP && "There should be a frame pointer if stack is realigned."); BuildStackAlignAND(MBB, MBBI, DL, SPOrEstablisher, MaxAlign); } // We already dealt with stack realignment and funclets above. if (IsFunclet && STI.is32Bit()) return; // If we need a base pointer, set it up here. It's whatever the value // of the stack pointer is at this point. Any variable size objects // will be allocated after this, so we can still use the base pointer // to reference locals. if (TRI->hasBasePointer(MF)) { // Update the base pointer with the current stack pointer. unsigned Opc = Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr; BuildMI(MBB, MBBI, DL, TII.get(Opc), BasePtr) .addReg(SPOrEstablisher) .setMIFlag(MachineInstr::FrameSetup); if (X86FI->getRestoreBasePointer()) { // Stash value of base pointer. Saving RSP instead of EBP shortens // dependence chain. Used by SjLj EH. unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr; addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)), FramePtr, true, X86FI->getRestoreBasePointerOffset()) .addReg(SPOrEstablisher) .setMIFlag(MachineInstr::FrameSetup); } if (X86FI->getHasSEHFramePtrSave() && !IsFunclet) { // Stash the value of the frame pointer relative to the base pointer for // Win32 EH. This supports Win32 EH, which does the inverse of the above: // it recovers the frame pointer from the base pointer rather than the // other way around. unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr; Register UsedReg; int Offset = getFrameIndexReference(MF, X86FI->getSEHFramePtrSaveIndex(), UsedReg) .getFixed(); assert(UsedReg == BasePtr); addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)), UsedReg, true, Offset) .addReg(FramePtr) .setMIFlag(MachineInstr::FrameSetup); } } if (((!HasFP && NumBytes) || PushedRegs) && NeedsDwarfCFI) { // Mark end of stack pointer adjustment. if (!HasFP && NumBytes) { // Define the current CFA rule to use the provided offset. assert(StackSize); BuildCFI( MBB, MBBI, DL, MCCFIInstruction::cfiDefCfaOffset(nullptr, StackSize - stackGrowth)); } // Emit DWARF info specifying the offsets of the callee-saved registers. emitCalleeSavedFrameMoves(MBB, MBBI, DL, true); } // X86 Interrupt handling function cannot assume anything about the direction // flag (DF in EFLAGS register). Clear this flag by creating "cld" instruction // in each prologue of interrupt handler function. // // FIXME: Create "cld" instruction only in these cases: // 1. The interrupt handling function uses any of the "rep" instructions. // 2. Interrupt handling function calls another function. // if (Fn.getCallingConv() == CallingConv::X86_INTR) BuildMI(MBB, MBBI, DL, TII.get(X86::CLD)) .setMIFlag(MachineInstr::FrameSetup); // At this point we know if the function has WinCFI or not. MF.setHasWinCFI(HasWinCFI); } bool X86FrameLowering::canUseLEAForSPInEpilogue( const MachineFunction &MF) const { // We can't use LEA instructions for adjusting the stack pointer if we don't // have a frame pointer in the Win64 ABI. Only ADD instructions may be used // to deallocate the stack. // This means that we can use LEA for SP in two situations: // 1. We *aren't* using the Win64 ABI which means we are free to use LEA. // 2. We *have* a frame pointer which means we are permitted to use LEA. return !MF.getTarget().getMCAsmInfo()->usesWindowsCFI() || hasFP(MF); } static bool isFuncletReturnInstr(MachineInstr &MI) { switch (MI.getOpcode()) { case X86::CATCHRET: case X86::CLEANUPRET: return true; default: return false; } llvm_unreachable("impossible"); } // CLR funclets use a special "Previous Stack Pointer Symbol" slot on the // stack. It holds a pointer to the bottom of the root function frame. The // establisher frame pointer passed to a nested funclet may point to the // (mostly empty) frame of its parent funclet, but it will need to find // the frame of the root function to access locals. To facilitate this, // every funclet copies the pointer to the bottom of the root function // frame into a PSPSym slot in its own (mostly empty) stack frame. Using the // same offset for the PSPSym in the root function frame that's used in the // funclets' frames allows each funclet to dynamically accept any ancestor // frame as its establisher argument (the runtime doesn't guarantee the // immediate parent for some reason lost to history), and also allows the GC, // which uses the PSPSym for some bookkeeping, to find it in any funclet's // frame with only a single offset reported for the entire method. unsigned X86FrameLowering::getPSPSlotOffsetFromSP(const MachineFunction &MF) const { const WinEHFuncInfo &Info = *MF.getWinEHFuncInfo(); Register SPReg; int Offset = getFrameIndexReferencePreferSP(MF, Info.PSPSymFrameIdx, SPReg, /*IgnoreSPUpdates*/ true) .getFixed(); assert(Offset >= 0 && SPReg == TRI->getStackRegister()); return static_cast(Offset); } unsigned X86FrameLowering::getWinEHFuncletFrameSize(const MachineFunction &MF) const { const X86MachineFunctionInfo *X86FI = MF.getInfo(); // This is the size of the pushed CSRs. unsigned CSSize = X86FI->getCalleeSavedFrameSize(); // This is the size of callee saved XMMs. const auto& WinEHXMMSlotInfo = X86FI->getWinEHXMMSlotInfo(); unsigned XMMSize = WinEHXMMSlotInfo.size() * TRI->getSpillSize(X86::VR128RegClass); // This is the amount of stack a funclet needs to allocate. unsigned UsedSize; EHPersonality Personality = classifyEHPersonality(MF.getFunction().getPersonalityFn()); if (Personality == EHPersonality::CoreCLR) { // CLR funclets need to hold enough space to include the PSPSym, at the // same offset from the stack pointer (immediately after the prolog) as it // resides at in the main function. UsedSize = getPSPSlotOffsetFromSP(MF) + SlotSize; } else { // Other funclets just need enough stack for outgoing call arguments. UsedSize = MF.getFrameInfo().getMaxCallFrameSize(); } // RBP is not included in the callee saved register block. After pushing RBP, // everything is 16 byte aligned. Everything we allocate before an outgoing // call must also be 16 byte aligned. unsigned FrameSizeMinusRBP = alignTo(CSSize + UsedSize, getStackAlign()); // Subtract out the size of the callee saved registers. This is how much stack // each funclet will allocate. return FrameSizeMinusRBP + XMMSize - CSSize; } static bool isTailCallOpcode(unsigned Opc) { return Opc == X86::TCRETURNri || Opc == X86::TCRETURNdi || Opc == X86::TCRETURNmi || Opc == X86::TCRETURNri64 || Opc == X86::TCRETURNdi64 || Opc == X86::TCRETURNmi64; } void X86FrameLowering::emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const { const MachineFrameInfo &MFI = MF.getFrameInfo(); X86MachineFunctionInfo *X86FI = MF.getInfo(); MachineBasicBlock::iterator Terminator = MBB.getFirstTerminator(); MachineBasicBlock::iterator MBBI = Terminator; DebugLoc DL; if (MBBI != MBB.end()) DL = MBBI->getDebugLoc(); // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit. const bool Is64BitILP32 = STI.isTarget64BitILP32(); Register FramePtr = TRI->getFrameRegister(MF); Register MachineFramePtr = Is64BitILP32 ? Register(getX86SubSuperRegister(FramePtr, 64)) : FramePtr; bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); bool NeedsWin64CFI = IsWin64Prologue && MF.getFunction().needsUnwindTableEntry(); bool IsFunclet = MBBI == MBB.end() ? false : isFuncletReturnInstr(*MBBI); // Get the number of bytes to allocate from the FrameInfo. uint64_t StackSize = MFI.getStackSize(); uint64_t MaxAlign = calculateMaxStackAlign(MF); unsigned CSSize = X86FI->getCalleeSavedFrameSize(); bool HasFP = hasFP(MF); uint64_t NumBytes = 0; bool NeedsDwarfCFI = (!MF.getTarget().getTargetTriple().isOSDarwin() && !MF.getTarget().getTargetTriple().isOSWindows()) && MF.needsFrameMoves(); if (IsFunclet) { assert(HasFP && "EH funclets without FP not yet implemented"); NumBytes = getWinEHFuncletFrameSize(MF); } else if (HasFP) { // Calculate required stack adjustment. uint64_t FrameSize = StackSize - SlotSize; NumBytes = FrameSize - CSSize; // Callee-saved registers were pushed on stack before the stack was // realigned. if (TRI->hasStackRealignment(MF) && !IsWin64Prologue) NumBytes = alignTo(FrameSize, MaxAlign); } else { NumBytes = StackSize - CSSize; } uint64_t SEHStackAllocAmt = NumBytes; // AfterPop is the position to insert .cfi_restore. MachineBasicBlock::iterator AfterPop = MBBI; if (HasFP) { if (X86FI->hasSwiftAsyncContext()) { // Discard the context. int Offset = 16 + mergeSPUpdates(MBB, MBBI, true); emitSPUpdate(MBB, MBBI, DL, Offset, /*InEpilogue*/true); } // Pop EBP. BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::POP64r : X86::POP32r), MachineFramePtr) .setMIFlag(MachineInstr::FrameDestroy); // We need to reset FP to its untagged state on return. Bit 60 is currently // used to show the presence of an extended frame. if (X86FI->hasSwiftAsyncContext()) { BuildMI(MBB, MBBI, DL, TII.get(X86::BTR64ri8), MachineFramePtr) .addUse(MachineFramePtr) .addImm(60) .setMIFlag(MachineInstr::FrameDestroy); } if (NeedsDwarfCFI) { unsigned DwarfStackPtr = TRI->getDwarfRegNum(Is64Bit ? X86::RSP : X86::ESP, true); BuildCFI(MBB, MBBI, DL, MCCFIInstruction::cfiDefCfa(nullptr, DwarfStackPtr, SlotSize)); if (!MBB.succ_empty() && !MBB.isReturnBlock()) { unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true); BuildCFI(MBB, AfterPop, DL, MCCFIInstruction::createRestore(nullptr, DwarfFramePtr)); --MBBI; --AfterPop; } --MBBI; } } MachineBasicBlock::iterator FirstCSPop = MBBI; // Skip the callee-saved pop instructions. while (MBBI != MBB.begin()) { MachineBasicBlock::iterator PI = std::prev(MBBI); unsigned Opc = PI->getOpcode(); if (Opc != X86::DBG_VALUE && !PI->isTerminator()) { if ((Opc != X86::POP32r || !PI->getFlag(MachineInstr::FrameDestroy)) && (Opc != X86::POP64r || !PI->getFlag(MachineInstr::FrameDestroy)) && (Opc != X86::BTR64ri8 || !PI->getFlag(MachineInstr::FrameDestroy)) && (Opc != X86::ADD64ri8 || !PI->getFlag(MachineInstr::FrameDestroy))) break; FirstCSPop = PI; } --MBBI; } MBBI = FirstCSPop; if (IsFunclet && Terminator->getOpcode() == X86::CATCHRET) emitCatchRetReturnValue(MBB, FirstCSPop, &*Terminator); if (MBBI != MBB.end()) DL = MBBI->getDebugLoc(); // If there is an ADD32ri or SUB32ri of ESP immediately before this // instruction, merge the two instructions. if (NumBytes || MFI.hasVarSizedObjects()) NumBytes += mergeSPUpdates(MBB, MBBI, true); // If dynamic alloca is used, then reset esp to point to the last callee-saved // slot before popping them off! Same applies for the case, when stack was // realigned. Don't do this if this was a funclet epilogue, since the funclets // will not do realignment or dynamic stack allocation. if (((TRI->hasStackRealignment(MF)) || MFI.hasVarSizedObjects()) && !IsFunclet) { if (TRI->hasStackRealignment(MF)) MBBI = FirstCSPop; unsigned SEHFrameOffset = calculateSetFPREG(SEHStackAllocAmt); uint64_t LEAAmount = IsWin64Prologue ? SEHStackAllocAmt - SEHFrameOffset : -CSSize; if (X86FI->hasSwiftAsyncContext()) LEAAmount -= 16; // There are only two legal forms of epilogue: // - add SEHAllocationSize, %rsp // - lea SEHAllocationSize(%FramePtr), %rsp // // 'mov %FramePtr, %rsp' will not be recognized as an epilogue sequence. // However, we may use this sequence if we have a frame pointer because the // effects of the prologue can safely be undone. if (LEAAmount != 0) { unsigned Opc = getLEArOpcode(Uses64BitFramePtr); addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr), FramePtr, false, LEAAmount); --MBBI; } else { unsigned Opc = (Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr); BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) .addReg(FramePtr); --MBBI; } } else if (NumBytes) { // Adjust stack pointer back: ESP += numbytes. emitSPUpdate(MBB, MBBI, DL, NumBytes, /*InEpilogue=*/true); if (!hasFP(MF) && NeedsDwarfCFI) { // Define the current CFA rule to use the provided offset. BuildCFI(MBB, MBBI, DL, MCCFIInstruction::cfiDefCfaOffset(nullptr, CSSize + SlotSize)); } --MBBI; } // Windows unwinder will not invoke function's exception handler if IP is // either in prologue or in epilogue. This behavior causes a problem when a // call immediately precedes an epilogue, because the return address points // into the epilogue. To cope with that, we insert an epilogue marker here, // then replace it with a 'nop' if it ends up immediately after a CALL in the // final emitted code. if (NeedsWin64CFI && MF.hasWinCFI()) BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_Epilogue)); if (!hasFP(MF) && NeedsDwarfCFI) { MBBI = FirstCSPop; int64_t Offset = -CSSize - SlotSize; // Mark callee-saved pop instruction. // Define the current CFA rule to use the provided offset. while (MBBI != MBB.end()) { MachineBasicBlock::iterator PI = MBBI; unsigned Opc = PI->getOpcode(); ++MBBI; if (Opc == X86::POP32r || Opc == X86::POP64r) { Offset += SlotSize; BuildCFI(MBB, MBBI, DL, MCCFIInstruction::cfiDefCfaOffset(nullptr, -Offset)); } } } // Emit DWARF info specifying the restores of the callee-saved registers. // For epilogue with return inside or being other block without successor, // no need to generate .cfi_restore for callee-saved registers. if (NeedsDwarfCFI && !MBB.succ_empty() && !MBB.isReturnBlock()) { emitCalleeSavedFrameMoves(MBB, AfterPop, DL, false); } if (Terminator == MBB.end() || !isTailCallOpcode(Terminator->getOpcode())) { // Add the return addr area delta back since we are not tail calling. int Offset = -1 * X86FI->getTCReturnAddrDelta(); assert(Offset >= 0 && "TCDelta should never be positive"); if (Offset) { // Check for possible merge with preceding ADD instruction. Offset += mergeSPUpdates(MBB, Terminator, true); emitSPUpdate(MBB, Terminator, DL, Offset, /*InEpilogue=*/true); } } // Emit tilerelease for AMX kernel. const MachineRegisterInfo &MRI = MF.getRegInfo(); const TargetRegisterClass *RC = TRI->getRegClass(X86::TILERegClassID); for (unsigned I = 0; I < RC->getNumRegs(); I++) if (!MRI.reg_nodbg_empty(X86::TMM0 + I)) { BuildMI(MBB, Terminator, DL, TII.get(X86::TILERELEASE)); break; } } StackOffset X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg) const { const MachineFrameInfo &MFI = MF.getFrameInfo(); bool IsFixed = MFI.isFixedObjectIndex(FI); // We can't calculate offset from frame pointer if the stack is realigned, // so enforce usage of stack/base pointer. The base pointer is used when we // have dynamic allocas in addition to dynamic realignment. if (TRI->hasBasePointer(MF)) FrameReg = IsFixed ? TRI->getFramePtr() : TRI->getBaseRegister(); else if (TRI->hasStackRealignment(MF)) FrameReg = IsFixed ? TRI->getFramePtr() : TRI->getStackRegister(); else FrameReg = TRI->getFrameRegister(MF); // Offset will hold the offset from the stack pointer at function entry to the // object. // We need to factor in additional offsets applied during the prologue to the // frame, base, and stack pointer depending on which is used. int Offset = MFI.getObjectOffset(FI) - getOffsetOfLocalArea(); const X86MachineFunctionInfo *X86FI = MF.getInfo(); unsigned CSSize = X86FI->getCalleeSavedFrameSize(); uint64_t StackSize = MFI.getStackSize(); bool HasFP = hasFP(MF); bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); int64_t FPDelta = 0; // In an x86 interrupt, remove the offset we added to account for the return // address from any stack object allocated in the caller's frame. Interrupts // do not have a standard return address. Fixed objects in the current frame, // such as SSE register spills, should not get this treatment. if (MF.getFunction().getCallingConv() == CallingConv::X86_INTR && Offset >= 0) { Offset += getOffsetOfLocalArea(); } if (IsWin64Prologue) { assert(!MFI.hasCalls() || (StackSize % 16) == 8); // Calculate required stack adjustment. uint64_t FrameSize = StackSize - SlotSize; // If required, include space for extra hidden slot for stashing base pointer. if (X86FI->getRestoreBasePointer()) FrameSize += SlotSize; uint64_t NumBytes = FrameSize - CSSize; uint64_t SEHFrameOffset = calculateSetFPREG(NumBytes); if (FI && FI == X86FI->getFAIndex()) return StackOffset::getFixed(-SEHFrameOffset); // FPDelta is the offset from the "traditional" FP location of the old base // pointer followed by return address and the location required by the // restricted Win64 prologue. // Add FPDelta to all offsets below that go through the frame pointer. FPDelta = FrameSize - SEHFrameOffset; assert((!MFI.hasCalls() || (FPDelta % 16) == 0) && "FPDelta isn't aligned per the Win64 ABI!"); } if (TRI->hasBasePointer(MF)) { assert(HasFP && "VLAs and dynamic stack realign, but no FP?!"); if (FI < 0) { // Skip the saved EBP. return StackOffset::getFixed(Offset + SlotSize + FPDelta); } else { assert(isAligned(MFI.getObjectAlign(FI), -(Offset + StackSize))); return StackOffset::getFixed(Offset + StackSize); } } else if (TRI->hasStackRealignment(MF)) { if (FI < 0) { // Skip the saved EBP. return StackOffset::getFixed(Offset + SlotSize + FPDelta); } else { assert(isAligned(MFI.getObjectAlign(FI), -(Offset + StackSize))); return StackOffset::getFixed(Offset + StackSize); } // FIXME: Support tail calls } else { if (!HasFP) return StackOffset::getFixed(Offset + StackSize); // Skip the saved EBP. Offset += SlotSize; // Skip the RETADDR move area int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); if (TailCallReturnAddrDelta < 0) Offset -= TailCallReturnAddrDelta; } return StackOffset::getFixed(Offset + FPDelta); } int X86FrameLowering::getWin64EHFrameIndexRef(const MachineFunction &MF, int FI, Register &FrameReg) const { const MachineFrameInfo &MFI = MF.getFrameInfo(); const X86MachineFunctionInfo *X86FI = MF.getInfo(); const auto& WinEHXMMSlotInfo = X86FI->getWinEHXMMSlotInfo(); const auto it = WinEHXMMSlotInfo.find(FI); if (it == WinEHXMMSlotInfo.end()) return getFrameIndexReference(MF, FI, FrameReg).getFixed(); FrameReg = TRI->getStackRegister(); return alignDown(MFI.getMaxCallFrameSize(), getStackAlign().value()) + it->second; } StackOffset X86FrameLowering::getFrameIndexReferenceSP(const MachineFunction &MF, int FI, Register &FrameReg, int Adjustment) const { const MachineFrameInfo &MFI = MF.getFrameInfo(); FrameReg = TRI->getStackRegister(); return StackOffset::getFixed(MFI.getObjectOffset(FI) - getOffsetOfLocalArea() + Adjustment); } StackOffset X86FrameLowering::getFrameIndexReferencePreferSP(const MachineFunction &MF, int FI, Register &FrameReg, bool IgnoreSPUpdates) const { const MachineFrameInfo &MFI = MF.getFrameInfo(); // Does not include any dynamic realign. const uint64_t StackSize = MFI.getStackSize(); // LLVM arranges the stack as follows: // ... // ARG2 // ARG1 // RETADDR // PUSH RBP <-- RBP points here // PUSH CSRs // ~~~~~~~ <-- possible stack realignment (non-win64) // ... // STACK OBJECTS // ... <-- RSP after prologue points here // ~~~~~~~ <-- possible stack realignment (win64) // // if (hasVarSizedObjects()): // ... <-- "base pointer" (ESI/RBX) points here // DYNAMIC ALLOCAS // ... <-- RSP points here // // Case 1: In the simple case of no stack realignment and no dynamic // allocas, both "fixed" stack objects (arguments and CSRs) are addressable // with fixed offsets from RSP. // // Case 2: In the case of stack realignment with no dynamic allocas, fixed // stack objects are addressed with RBP and regular stack objects with RSP. // // Case 3: In the case of dynamic allocas and stack realignment, RSP is used // to address stack arguments for outgoing calls and nothing else. The "base // pointer" points to local variables, and RBP points to fixed objects. // // In cases 2 and 3, we can only answer for non-fixed stack objects, and the // answer we give is relative to the SP after the prologue, and not the // SP in the middle of the function. if (MFI.isFixedObjectIndex(FI) && TRI->hasStackRealignment(MF) && !STI.isTargetWin64()) return getFrameIndexReference(MF, FI, FrameReg); // If !hasReservedCallFrame the function might have SP adjustement in the // body. So, even though the offset is statically known, it depends on where // we are in the function. if (!IgnoreSPUpdates && !hasReservedCallFrame(MF)) return getFrameIndexReference(MF, FI, FrameReg); // We don't handle tail calls, and shouldn't be seeing them either. assert(MF.getInfo()->getTCReturnAddrDelta() >= 0 && "we don't handle this case!"); // This is how the math works out: // // %rsp grows (i.e. gets lower) left to right. Each box below is // one word (eight bytes). Obj0 is the stack slot we're trying to // get to. // // ---------------------------------- // | BP | Obj0 | Obj1 | ... | ObjN | // ---------------------------------- // ^ ^ ^ ^ // A B C E // // A is the incoming stack pointer. // (B - A) is the local area offset (-8 for x86-64) [1] // (C - A) is the Offset returned by MFI.getObjectOffset for Obj0 [2] // // |(E - B)| is the StackSize (absolute value, positive). For a // stack that grown down, this works out to be (B - E). [3] // // E is also the value of %rsp after stack has been set up, and we // want (C - E) -- the value we can add to %rsp to get to Obj0. Now // (C - E) == (C - A) - (B - A) + (B - E) // { Using [1], [2] and [3] above } // == getObjectOffset - LocalAreaOffset + StackSize return getFrameIndexReferenceSP(MF, FI, FrameReg, StackSize); } bool X86FrameLowering::assignCalleeSavedSpillSlots( MachineFunction &MF, const TargetRegisterInfo *TRI, std::vector &CSI) const { MachineFrameInfo &MFI = MF.getFrameInfo(); X86MachineFunctionInfo *X86FI = MF.getInfo(); unsigned CalleeSavedFrameSize = 0; unsigned XMMCalleeSavedFrameSize = 0; auto &WinEHXMMSlotInfo = X86FI->getWinEHXMMSlotInfo(); int SpillSlotOffset = getOffsetOfLocalArea() + X86FI->getTCReturnAddrDelta(); int64_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); if (TailCallReturnAddrDelta < 0) { // create RETURNADDR area // arg // arg // RETADDR // { ... // RETADDR area // ... // } // [EBP] MFI.CreateFixedObject(-TailCallReturnAddrDelta, TailCallReturnAddrDelta - SlotSize, true); } // Spill the BasePtr if it's used. if (this->TRI->hasBasePointer(MF)) { // Allocate a spill slot for EBP if we have a base pointer and EH funclets. if (MF.hasEHFunclets()) { int FI = MFI.CreateSpillStackObject(SlotSize, Align(SlotSize)); X86FI->setHasSEHFramePtrSave(true); X86FI->setSEHFramePtrSaveIndex(FI); } } if (hasFP(MF)) { // emitPrologue always spills frame register the first thing. SpillSlotOffset -= SlotSize; MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset); // The async context lives directly before the frame pointer, and we // allocate a second slot to preserve stack alignment. if (X86FI->hasSwiftAsyncContext()) { SpillSlotOffset -= SlotSize; MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset); SpillSlotOffset -= SlotSize; } // Since emitPrologue and emitEpilogue will handle spilling and restoring of // the frame register, we can delete it from CSI list and not have to worry // about avoiding it later. Register FPReg = TRI->getFrameRegister(MF); for (unsigned i = 0; i < CSI.size(); ++i) { if (TRI->regsOverlap(CSI[i].getReg(),FPReg)) { CSI.erase(CSI.begin() + i); break; } } } // Assign slots for GPRs. It increases frame size. for (unsigned i = CSI.size(); i != 0; --i) { unsigned Reg = CSI[i - 1].getReg(); if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg)) continue; SpillSlotOffset -= SlotSize; CalleeSavedFrameSize += SlotSize; int SlotIndex = MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset); CSI[i - 1].setFrameIdx(SlotIndex); } X86FI->setCalleeSavedFrameSize(CalleeSavedFrameSize); MFI.setCVBytesOfCalleeSavedRegisters(CalleeSavedFrameSize); // Assign slots for XMMs. for (unsigned i = CSI.size(); i != 0; --i) { unsigned Reg = CSI[i - 1].getReg(); if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg)) continue; // If this is k-register make sure we lookup via the largest legal type. MVT VT = MVT::Other; if (X86::VK16RegClass.contains(Reg)) VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1; const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT); unsigned Size = TRI->getSpillSize(*RC); Align Alignment = TRI->getSpillAlign(*RC); // ensure alignment assert(SpillSlotOffset < 0 && "SpillSlotOffset should always < 0 on X86"); SpillSlotOffset = -alignTo(-SpillSlotOffset, Alignment); // spill into slot SpillSlotOffset -= Size; int SlotIndex = MFI.CreateFixedSpillStackObject(Size, SpillSlotOffset); CSI[i - 1].setFrameIdx(SlotIndex); MFI.ensureMaxAlignment(Alignment); // Save the start offset and size of XMM in stack frame for funclets. if (X86::VR128RegClass.contains(Reg)) { WinEHXMMSlotInfo[SlotIndex] = XMMCalleeSavedFrameSize; XMMCalleeSavedFrameSize += Size; } } return true; } bool X86FrameLowering::spillCalleeSavedRegisters( MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, ArrayRef CSI, const TargetRegisterInfo *TRI) const { DebugLoc DL = MBB.findDebugLoc(MI); // Don't save CSRs in 32-bit EH funclets. The caller saves EBX, EBP, ESI, EDI // for us, and there are no XMM CSRs on Win32. if (MBB.isEHFuncletEntry() && STI.is32Bit() && STI.isOSWindows()) return true; // Push GPRs. It increases frame size. const MachineFunction &MF = *MBB.getParent(); unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r; for (unsigned i = CSI.size(); i != 0; --i) { unsigned Reg = CSI[i - 1].getReg(); if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg)) continue; const MachineRegisterInfo &MRI = MF.getRegInfo(); bool isLiveIn = MRI.isLiveIn(Reg); if (!isLiveIn) MBB.addLiveIn(Reg); // Decide whether we can add a kill flag to the use. bool CanKill = !isLiveIn; // Check if any subregister is live-in if (CanKill) { for (MCRegAliasIterator AReg(Reg, TRI, false); AReg.isValid(); ++AReg) { if (MRI.isLiveIn(*AReg)) { CanKill = false; break; } } } // Do not set a kill flag on values that are also marked as live-in. This // happens with the @llvm-returnaddress intrinsic and with arguments // passed in callee saved registers. // Omitting the kill flags is conservatively correct even if the live-in // is not used after all. BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, getKillRegState(CanKill)) .setMIFlag(MachineInstr::FrameSetup); } // Make XMM regs spilled. X86 does not have ability of push/pop XMM. // It can be done by spilling XMMs to stack frame. for (unsigned i = CSI.size(); i != 0; --i) { unsigned Reg = CSI[i-1].getReg(); if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg)) continue; // If this is k-register make sure we lookup via the largest legal type. MVT VT = MVT::Other; if (X86::VK16RegClass.contains(Reg)) VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1; // Add the callee-saved register as live-in. It's killed at the spill. MBB.addLiveIn(Reg); const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT); TII.storeRegToStackSlot(MBB, MI, Reg, true, CSI[i - 1].getFrameIdx(), RC, TRI); --MI; MI->setFlag(MachineInstr::FrameSetup); ++MI; } return true; } void X86FrameLowering::emitCatchRetReturnValue(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, MachineInstr *CatchRet) const { // SEH shouldn't use catchret. assert(!isAsynchronousEHPersonality(classifyEHPersonality( MBB.getParent()->getFunction().getPersonalityFn())) && "SEH should not use CATCHRET"); const DebugLoc &DL = CatchRet->getDebugLoc(); MachineBasicBlock *CatchRetTarget = CatchRet->getOperand(0).getMBB(); // Fill EAX/RAX with the address of the target block. if (STI.is64Bit()) { // LEA64r CatchRetTarget(%rip), %rax BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), X86::RAX) .addReg(X86::RIP) .addImm(0) .addReg(0) .addMBB(CatchRetTarget) .addReg(0); } else { // MOV32ri $CatchRetTarget, %eax BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) .addMBB(CatchRetTarget); } // Record that we've taken the address of CatchRetTarget and no longer just // reference it in a terminator. CatchRetTarget->setHasAddressTaken(); } bool X86FrameLowering::restoreCalleeSavedRegisters( MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, MutableArrayRef CSI, const TargetRegisterInfo *TRI) const { if (CSI.empty()) return false; if (MI != MBB.end() && isFuncletReturnInstr(*MI) && STI.isOSWindows()) { // Don't restore CSRs in 32-bit EH funclets. Matches // spillCalleeSavedRegisters. if (STI.is32Bit()) return true; // Don't restore CSRs before an SEH catchret. SEH except blocks do not form // funclets. emitEpilogue transforms these to normal jumps. if (MI->getOpcode() == X86::CATCHRET) { const Function &F = MBB.getParent()->getFunction(); bool IsSEH = isAsynchronousEHPersonality( classifyEHPersonality(F.getPersonalityFn())); if (IsSEH) return true; } } DebugLoc DL = MBB.findDebugLoc(MI); // Reload XMMs from stack frame. for (unsigned i = 0, e = CSI.size(); i != e; ++i) { unsigned Reg = CSI[i].getReg(); if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg)) continue; // If this is k-register make sure we lookup via the largest legal type. MVT VT = MVT::Other; if (X86::VK16RegClass.contains(Reg)) VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1; const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT); TII.loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(), RC, TRI); } // POP GPRs. unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r; for (unsigned i = 0, e = CSI.size(); i != e; ++i) { unsigned Reg = CSI[i].getReg(); if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg)) continue; BuildMI(MBB, MI, DL, TII.get(Opc), Reg) .setMIFlag(MachineInstr::FrameDestroy); } return true; } void X86FrameLowering::determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS) const { TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); // Spill the BasePtr if it's used. if (TRI->hasBasePointer(MF)){ Register BasePtr = TRI->getBaseRegister(); if (STI.isTarget64BitILP32()) BasePtr = getX86SubSuperRegister(BasePtr, 64); SavedRegs.set(BasePtr); } } static bool HasNestArgument(const MachineFunction *MF) { const Function &F = MF->getFunction(); for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; I++) { if (I->hasNestAttr() && !I->use_empty()) return true; } return false; } /// GetScratchRegister - Get a temp register for performing work in the /// segmented stack and the Erlang/HiPE stack prologue. Depending on platform /// and the properties of the function either one or two registers will be /// needed. Set primary to true for the first register, false for the second. static unsigned GetScratchRegister(bool Is64Bit, bool IsLP64, const MachineFunction &MF, bool Primary) { CallingConv::ID CallingConvention = MF.getFunction().getCallingConv(); // Erlang stuff. if (CallingConvention == CallingConv::HiPE) { if (Is64Bit) return Primary ? X86::R14 : X86::R13; else return Primary ? X86::EBX : X86::EDI; } if (Is64Bit) { if (IsLP64) return Primary ? X86::R11 : X86::R12; else return Primary ? X86::R11D : X86::R12D; } bool IsNested = HasNestArgument(&MF); if (CallingConvention == CallingConv::X86_FastCall || CallingConvention == CallingConv::Fast || CallingConvention == CallingConv::Tail) { if (IsNested) report_fatal_error("Segmented stacks does not support fastcall with " "nested function."); return Primary ? X86::EAX : X86::ECX; } if (IsNested) return Primary ? X86::EDX : X86::EAX; return Primary ? X86::ECX : X86::EAX; } // The stack limit in the TCB is set to this many bytes above the actual stack // limit. static const uint64_t kSplitStackAvailable = 256; void X86FrameLowering::adjustForSegmentedStacks( MachineFunction &MF, MachineBasicBlock &PrologueMBB) const { MachineFrameInfo &MFI = MF.getFrameInfo(); uint64_t StackSize; unsigned TlsReg, TlsOffset; DebugLoc DL; // To support shrink-wrapping we would need to insert the new blocks // at the right place and update the branches to PrologueMBB. assert(&(*MF.begin()) == &PrologueMBB && "Shrink-wrapping not supported yet"); unsigned ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true); assert(!MF.getRegInfo().isLiveIn(ScratchReg) && "Scratch register is live-in"); if (MF.getFunction().isVarArg()) report_fatal_error("Segmented stacks do not support vararg functions."); if (!STI.isTargetLinux() && !STI.isTargetDarwin() && !STI.isTargetWin32() && !STI.isTargetWin64() && !STI.isTargetFreeBSD() && !STI.isTargetDragonFly()) report_fatal_error("Segmented stacks not supported on this platform."); // Eventually StackSize will be calculated by a link-time pass; which will // also decide whether checking code needs to be injected into this particular // prologue. StackSize = MFI.getStackSize(); // Do not generate a prologue for leaf functions with a stack of size zero. // For non-leaf functions we have to allow for the possibility that the // callis to a non-split function, as in PR37807. This function could also // take the address of a non-split function. When the linker tries to adjust // its non-existent prologue, it would fail with an error. Mark the object // file so that such failures are not errors. See this Go language bug-report // https://go-review.googlesource.com/c/go/+/148819/ if (StackSize == 0 && !MFI.hasTailCall()) { MF.getMMI().setHasNosplitStack(true); return; } MachineBasicBlock *allocMBB = MF.CreateMachineBasicBlock(); MachineBasicBlock *checkMBB = MF.CreateMachineBasicBlock(); X86MachineFunctionInfo *X86FI = MF.getInfo(); bool IsNested = false; // We need to know if the function has a nest argument only in 64 bit mode. if (Is64Bit) IsNested = HasNestArgument(&MF); // The MOV R10, RAX needs to be in a different block, since the RET we emit in // allocMBB needs to be last (terminating) instruction. for (const auto &LI : PrologueMBB.liveins()) { allocMBB->addLiveIn(LI); checkMBB->addLiveIn(LI); } if (IsNested) allocMBB->addLiveIn(IsLP64 ? X86::R10 : X86::R10D); MF.push_front(allocMBB); MF.push_front(checkMBB); // When the frame size is less than 256 we just compare the stack // boundary directly to the value of the stack pointer, per gcc. bool CompareStackPointer = StackSize < kSplitStackAvailable; // Read the limit off the current stacklet off the stack_guard location. if (Is64Bit) { if (STI.isTargetLinux()) { TlsReg = X86::FS; TlsOffset = IsLP64 ? 0x70 : 0x40; } else if (STI.isTargetDarwin()) { TlsReg = X86::GS; TlsOffset = 0x60 + 90*8; // See pthread_machdep.h. Steal TLS slot 90. } else if (STI.isTargetWin64()) { TlsReg = X86::GS; TlsOffset = 0x28; // pvArbitrary, reserved for application use } else if (STI.isTargetFreeBSD()) { TlsReg = X86::FS; TlsOffset = 0x18; } else if (STI.isTargetDragonFly()) { TlsReg = X86::FS; TlsOffset = 0x20; // use tls_tcb.tcb_segstack } else { report_fatal_error("Segmented stacks not supported on this platform."); } if (CompareStackPointer) ScratchReg = IsLP64 ? X86::RSP : X86::ESP; else BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::LEA64r : X86::LEA64_32r), ScratchReg).addReg(X86::RSP) .addImm(1).addReg(0).addImm(-StackSize).addReg(0); BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::CMP64rm : X86::CMP32rm)).addReg(ScratchReg) .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg); } else { if (STI.isTargetLinux()) { TlsReg = X86::GS; TlsOffset = 0x30; } else if (STI.isTargetDarwin()) { TlsReg = X86::GS; TlsOffset = 0x48 + 90*4; } else if (STI.isTargetWin32()) { TlsReg = X86::FS; TlsOffset = 0x14; // pvArbitrary, reserved for application use } else if (STI.isTargetDragonFly()) { TlsReg = X86::FS; TlsOffset = 0x10; // use tls_tcb.tcb_segstack } else if (STI.isTargetFreeBSD()) { report_fatal_error("Segmented stacks not supported on FreeBSD i386."); } else { report_fatal_error("Segmented stacks not supported on this platform."); } if (CompareStackPointer) ScratchReg = X86::ESP; else BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP) .addImm(1).addReg(0).addImm(-StackSize).addReg(0); if (STI.isTargetLinux() || STI.isTargetWin32() || STI.isTargetWin64() || STI.isTargetDragonFly()) { BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg) .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg); } else if (STI.isTargetDarwin()) { // TlsOffset doesn't fit into a mod r/m byte so we need an extra register. unsigned ScratchReg2; bool SaveScratch2; if (CompareStackPointer) { // The primary scratch register is available for holding the TLS offset. ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, true); SaveScratch2 = false; } else { // Need to use a second register to hold the TLS offset ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, false); // Unfortunately, with fastcc the second scratch register may hold an // argument. SaveScratch2 = MF.getRegInfo().isLiveIn(ScratchReg2); } // If Scratch2 is live-in then it needs to be saved. assert((!MF.getRegInfo().isLiveIn(ScratchReg2) || SaveScratch2) && "Scratch register is live-in and not saved"); if (SaveScratch2) BuildMI(checkMBB, DL, TII.get(X86::PUSH32r)) .addReg(ScratchReg2, RegState::Kill); BuildMI(checkMBB, DL, TII.get(X86::MOV32ri), ScratchReg2) .addImm(TlsOffset); BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)) .addReg(ScratchReg) .addReg(ScratchReg2).addImm(1).addReg(0) .addImm(0) .addReg(TlsReg); if (SaveScratch2) BuildMI(checkMBB, DL, TII.get(X86::POP32r), ScratchReg2); } } // This jump is taken if SP >= (Stacklet Limit + Stack Space required). // It jumps to normal execution of the function body. BuildMI(checkMBB, DL, TII.get(X86::JCC_1)).addMBB(&PrologueMBB).addImm(X86::COND_A); // On 32 bit we first push the arguments size and then the frame size. On 64 // bit, we pass the stack frame size in r10 and the argument size in r11. if (Is64Bit) { // Functions with nested arguments use R10, so it needs to be saved across // the call to _morestack const unsigned RegAX = IsLP64 ? X86::RAX : X86::EAX; const unsigned Reg10 = IsLP64 ? X86::R10 : X86::R10D; const unsigned Reg11 = IsLP64 ? X86::R11 : X86::R11D; const unsigned MOVrr = IsLP64 ? X86::MOV64rr : X86::MOV32rr; const unsigned MOVri = IsLP64 ? X86::MOV64ri : X86::MOV32ri; if (IsNested) BuildMI(allocMBB, DL, TII.get(MOVrr), RegAX).addReg(Reg10); BuildMI(allocMBB, DL, TII.get(MOVri), Reg10) .addImm(StackSize); BuildMI(allocMBB, DL, TII.get(MOVri), Reg11) .addImm(X86FI->getArgumentStackSize()); } else { BuildMI(allocMBB, DL, TII.get(X86::PUSHi32)) .addImm(X86FI->getArgumentStackSize()); BuildMI(allocMBB, DL, TII.get(X86::PUSHi32)) .addImm(StackSize); } // __morestack is in libgcc if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) { // Under the large code model, we cannot assume that __morestack lives // within 2^31 bytes of the call site, so we cannot use pc-relative // addressing. We cannot perform the call via a temporary register, // as the rax register may be used to store the static chain, and all // other suitable registers may be either callee-save or used for // parameter passing. We cannot use the stack at this point either // because __morestack manipulates the stack directly. // // To avoid these issues, perform an indirect call via a read-only memory // location containing the address. // // This solution is not perfect, as it assumes that the .rodata section // is laid out within 2^31 bytes of each function body, but this seems // to be sufficient for JIT. // FIXME: Add retpoline support and remove the error here.. if (STI.useIndirectThunkCalls()) report_fatal_error("Emitting morestack calls on 64-bit with the large " "code model and thunks not yet implemented."); BuildMI(allocMBB, DL, TII.get(X86::CALL64m)) .addReg(X86::RIP) .addImm(0) .addReg(0) .addExternalSymbol("__morestack_addr") .addReg(0); MF.getMMI().setUsesMorestackAddr(true); } else { if (Is64Bit) BuildMI(allocMBB, DL, TII.get(X86::CALL64pcrel32)) .addExternalSymbol("__morestack"); else BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32)) .addExternalSymbol("__morestack"); } if (IsNested) BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET_RESTORE_R10)); else BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET)); allocMBB->addSuccessor(&PrologueMBB); checkMBB->addSuccessor(allocMBB, BranchProbability::getZero()); checkMBB->addSuccessor(&PrologueMBB, BranchProbability::getOne()); #ifdef EXPENSIVE_CHECKS MF.verify(); #endif } /// Lookup an ERTS parameter in the !hipe.literals named metadata node. /// HiPE provides Erlang Runtime System-internal parameters, such as PCB offsets /// to fields it needs, through a named metadata node "hipe.literals" containing /// name-value pairs. static unsigned getHiPELiteral( NamedMDNode *HiPELiteralsMD, const StringRef LiteralName) { for (int i = 0, e = HiPELiteralsMD->getNumOperands(); i != e; ++i) { MDNode *Node = HiPELiteralsMD->getOperand(i); if (Node->getNumOperands() != 2) continue; MDString *NodeName = dyn_cast(Node->getOperand(0)); ValueAsMetadata *NodeVal = dyn_cast(Node->getOperand(1)); if (!NodeName || !NodeVal) continue; ConstantInt *ValConst = dyn_cast_or_null(NodeVal->getValue()); if (ValConst && NodeName->getString() == LiteralName) { return ValConst->getZExtValue(); } } report_fatal_error("HiPE literal " + LiteralName + " required but not provided"); } // Return true if there are no non-ehpad successors to MBB and there are no // non-meta instructions between MBBI and MBB.end(). static bool blockEndIsUnreachable(const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI) { return llvm::all_of( MBB.successors(), [](const MachineBasicBlock *Succ) { return Succ->isEHPad(); }) && std::all_of(MBBI, MBB.end(), [](const MachineInstr &MI) { return MI.isMetaInstruction(); }); } /// Erlang programs may need a special prologue to handle the stack size they /// might need at runtime. That is because Erlang/OTP does not implement a C /// stack but uses a custom implementation of hybrid stack/heap architecture. /// (for more information see Eric Stenman's Ph.D. thesis: /// http://publications.uu.se/uu/fulltext/nbn_se_uu_diva-2688.pdf) /// /// CheckStack: /// temp0 = sp - MaxStack /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart /// OldStart: /// ... /// IncStack: /// call inc_stack # doubles the stack space /// temp0 = sp - MaxStack /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart void X86FrameLowering::adjustForHiPEPrologue( MachineFunction &MF, MachineBasicBlock &PrologueMBB) const { MachineFrameInfo &MFI = MF.getFrameInfo(); DebugLoc DL; // To support shrink-wrapping we would need to insert the new blocks // at the right place and update the branches to PrologueMBB. assert(&(*MF.begin()) == &PrologueMBB && "Shrink-wrapping not supported yet"); // HiPE-specific values NamedMDNode *HiPELiteralsMD = MF.getMMI().getModule() ->getNamedMetadata("hipe.literals"); if (!HiPELiteralsMD) report_fatal_error( "Can't generate HiPE prologue without runtime parameters"); const unsigned HipeLeafWords = getHiPELiteral(HiPELiteralsMD, Is64Bit ? "AMD64_LEAF_WORDS" : "X86_LEAF_WORDS"); const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5; const unsigned Guaranteed = HipeLeafWords * SlotSize; unsigned CallerStkArity = MF.getFunction().arg_size() > CCRegisteredArgs ? MF.getFunction().arg_size() - CCRegisteredArgs : 0; unsigned MaxStack = MFI.getStackSize() + CallerStkArity*SlotSize + SlotSize; assert(STI.isTargetLinux() && "HiPE prologue is only supported on Linux operating systems."); // Compute the largest caller's frame that is needed to fit the callees' // frames. This 'MaxStack' is computed from: // // a) the fixed frame size, which is the space needed for all spilled temps, // b) outgoing on-stack parameter areas, and // c) the minimum stack space this function needs to make available for the // functions it calls (a tunable ABI property). if (MFI.hasCalls()) { unsigned MoreStackForCalls = 0; for (auto &MBB : MF) { for (auto &MI : MBB) { if (!MI.isCall()) continue; // Get callee operand. const MachineOperand &MO = MI.getOperand(0); // Only take account of global function calls (no closures etc.). if (!MO.isGlobal()) continue; const Function *F = dyn_cast(MO.getGlobal()); if (!F) continue; // Do not update 'MaxStack' for primitive and built-in functions // (encoded with names either starting with "erlang."/"bif_" or not // having a ".", such as a simple .., or an // "_", such as the BIF "suspend_0") as they are executed on another // stack. if (F->getName().find("erlang.") != StringRef::npos || F->getName().find("bif_") != StringRef::npos || F->getName().find_first_of("._") == StringRef::npos) continue; unsigned CalleeStkArity = F->arg_size() > CCRegisteredArgs ? F->arg_size()-CCRegisteredArgs : 0; if (HipeLeafWords - 1 > CalleeStkArity) MoreStackForCalls = std::max(MoreStackForCalls, (HipeLeafWords - 1 - CalleeStkArity) * SlotSize); } } MaxStack += MoreStackForCalls; } // If the stack frame needed is larger than the guaranteed then runtime checks // and calls to "inc_stack_0" BIF should be inserted in the assembly prologue. if (MaxStack > Guaranteed) { MachineBasicBlock *stackCheckMBB = MF.CreateMachineBasicBlock(); MachineBasicBlock *incStackMBB = MF.CreateMachineBasicBlock(); for (const auto &LI : PrologueMBB.liveins()) { stackCheckMBB->addLiveIn(LI); incStackMBB->addLiveIn(LI); } MF.push_front(incStackMBB); MF.push_front(stackCheckMBB); unsigned ScratchReg, SPReg, PReg, SPLimitOffset; unsigned LEAop, CMPop, CALLop; SPLimitOffset = getHiPELiteral(HiPELiteralsMD, "P_NSP_LIMIT"); if (Is64Bit) { SPReg = X86::RSP; PReg = X86::RBP; LEAop = X86::LEA64r; CMPop = X86::CMP64rm; CALLop = X86::CALL64pcrel32; } else { SPReg = X86::ESP; PReg = X86::EBP; LEAop = X86::LEA32r; CMPop = X86::CMP32rm; CALLop = X86::CALLpcrel32; } ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true); assert(!MF.getRegInfo().isLiveIn(ScratchReg) && "HiPE prologue scratch register is live-in"); // Create new MBB for StackCheck: addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(LEAop), ScratchReg), SPReg, false, -MaxStack); // SPLimitOffset is in a fixed heap location (pointed by BP). addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(CMPop)) .addReg(ScratchReg), PReg, false, SPLimitOffset); BuildMI(stackCheckMBB, DL, TII.get(X86::JCC_1)).addMBB(&PrologueMBB).addImm(X86::COND_AE); // Create new MBB for IncStack: BuildMI(incStackMBB, DL, TII.get(CALLop)). addExternalSymbol("inc_stack_0"); addRegOffset(BuildMI(incStackMBB, DL, TII.get(LEAop), ScratchReg), SPReg, false, -MaxStack); addRegOffset(BuildMI(incStackMBB, DL, TII.get(CMPop)) .addReg(ScratchReg), PReg, false, SPLimitOffset); BuildMI(incStackMBB, DL, TII.get(X86::JCC_1)).addMBB(incStackMBB).addImm(X86::COND_LE); stackCheckMBB->addSuccessor(&PrologueMBB, {99, 100}); stackCheckMBB->addSuccessor(incStackMBB, {1, 100}); incStackMBB->addSuccessor(&PrologueMBB, {99, 100}); incStackMBB->addSuccessor(incStackMBB, {1, 100}); } #ifdef EXPENSIVE_CHECKS MF.verify(); #endif } bool X86FrameLowering::adjustStackWithPops(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, int Offset) const { if (Offset <= 0) return false; if (Offset % SlotSize) return false; int NumPops = Offset / SlotSize; // This is only worth it if we have at most 2 pops. if (NumPops != 1 && NumPops != 2) return false; // Handle only the trivial case where the adjustment directly follows // a call. This is the most common one, anyway. if (MBBI == MBB.begin()) return false; MachineBasicBlock::iterator Prev = std::prev(MBBI); if (!Prev->isCall() || !Prev->getOperand(1).isRegMask()) return false; unsigned Regs[2]; unsigned FoundRegs = 0; const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); const MachineOperand &RegMask = Prev->getOperand(1); auto &RegClass = Is64Bit ? X86::GR64_NOREX_NOSPRegClass : X86::GR32_NOREX_NOSPRegClass; // Try to find up to NumPops free registers. for (auto Candidate : RegClass) { // Poor man's liveness: // Since we're immediately after a call, any register that is clobbered // by the call and not defined by it can be considered dead. if (!RegMask.clobbersPhysReg(Candidate)) continue; // Don't clobber reserved registers if (MRI.isReserved(Candidate)) continue; bool IsDef = false; for (const MachineOperand &MO : Prev->implicit_operands()) { if (MO.isReg() && MO.isDef() && TRI->isSuperOrSubRegisterEq(MO.getReg(), Candidate)) { IsDef = true; break; } } if (IsDef) continue; Regs[FoundRegs++] = Candidate; if (FoundRegs == (unsigned)NumPops) break; } if (FoundRegs == 0) return false; // If we found only one free register, but need two, reuse the same one twice. while (FoundRegs < (unsigned)NumPops) Regs[FoundRegs++] = Regs[0]; for (int i = 0; i < NumPops; ++i) BuildMI(MBB, MBBI, DL, TII.get(STI.is64Bit() ? X86::POP64r : X86::POP32r), Regs[i]); return true; } MachineBasicBlock::iterator X86FrameLowering:: eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const { bool reserveCallFrame = hasReservedCallFrame(MF); unsigned Opcode = I->getOpcode(); bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode(); DebugLoc DL = I->getDebugLoc(); // copy DebugLoc as I will be erased. uint64_t Amount = TII.getFrameSize(*I); uint64_t InternalAmt = (isDestroy || Amount) ? TII.getFrameAdjustment(*I) : 0; I = MBB.erase(I); auto InsertPos = skipDebugInstructionsForward(I, MBB.end()); // Try to avoid emitting dead SP adjustments if the block end is unreachable, // typically because the function is marked noreturn (abort, throw, // assert_fail, etc). if (isDestroy && blockEndIsUnreachable(MBB, I)) return I; if (!reserveCallFrame) { // If the stack pointer can be changed after prologue, turn the // adjcallstackup instruction into a 'sub ESP, ' and the // adjcallstackdown instruction into 'add ESP, ' // We need to keep the stack aligned properly. To do this, we round the // amount of space needed for the outgoing arguments up to the next // alignment boundary. Amount = alignTo(Amount, getStackAlign()); const Function &F = MF.getFunction(); bool WindowsCFI = MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); bool DwarfCFI = !WindowsCFI && MF.needsFrameMoves(); // If we have any exception handlers in this function, and we adjust // the SP before calls, we may need to indicate this to the unwinder // using GNU_ARGS_SIZE. Note that this may be necessary even when // Amount == 0, because the preceding function may have set a non-0 // GNU_ARGS_SIZE. // TODO: We don't need to reset this between subsequent functions, // if it didn't change. bool HasDwarfEHHandlers = !WindowsCFI && !MF.getLandingPads().empty(); if (HasDwarfEHHandlers && !isDestroy && MF.getInfo()->getHasPushSequences()) BuildCFI(MBB, InsertPos, DL, MCCFIInstruction::createGnuArgsSize(nullptr, Amount)); if (Amount == 0) return I; // Factor out the amount that gets handled inside the sequence // (Pushes of argument for frame setup, callee pops for frame destroy) Amount -= InternalAmt; // TODO: This is needed only if we require precise CFA. // If this is a callee-pop calling convention, emit a CFA adjust for // the amount the callee popped. if (isDestroy && InternalAmt && DwarfCFI && !hasFP(MF)) BuildCFI(MBB, InsertPos, DL, MCCFIInstruction::createAdjustCfaOffset(nullptr, -InternalAmt)); // Add Amount to SP to destroy a frame, or subtract to setup. int64_t StackAdjustment = isDestroy ? Amount : -Amount; if (StackAdjustment) { // Merge with any previous or following adjustment instruction. Note: the // instructions merged with here do not have CFI, so their stack // adjustments do not feed into CfaAdjustment. StackAdjustment += mergeSPUpdates(MBB, InsertPos, true); StackAdjustment += mergeSPUpdates(MBB, InsertPos, false); if (StackAdjustment) { if (!(F.hasMinSize() && adjustStackWithPops(MBB, InsertPos, DL, StackAdjustment))) BuildStackAdjustment(MBB, InsertPos, DL, StackAdjustment, /*InEpilogue=*/false); } } if (DwarfCFI && !hasFP(MF)) { // If we don't have FP, but need to generate unwind information, // we need to set the correct CFA offset after the stack adjustment. // How much we adjust the CFA offset depends on whether we're emitting // CFI only for EH purposes or for debugging. EH only requires the CFA // offset to be correct at each call site, while for debugging we want // it to be more precise. int64_t CfaAdjustment = -StackAdjustment; // TODO: When not using precise CFA, we also need to adjust for the // InternalAmt here. if (CfaAdjustment) { BuildCFI(MBB, InsertPos, DL, MCCFIInstruction::createAdjustCfaOffset(nullptr, CfaAdjustment)); } } return I; } if (InternalAmt) { MachineBasicBlock::iterator CI = I; MachineBasicBlock::iterator B = MBB.begin(); while (CI != B && !std::prev(CI)->isCall()) --CI; BuildStackAdjustment(MBB, CI, DL, -InternalAmt, /*InEpilogue=*/false); } return I; } bool X86FrameLowering::canUseAsPrologue(const MachineBasicBlock &MBB) const { assert(MBB.getParent() && "Block is not attached to a function!"); const MachineFunction &MF = *MBB.getParent(); if (!MBB.isLiveIn(X86::EFLAGS)) return true; const X86MachineFunctionInfo *X86FI = MF.getInfo(); return !TRI->hasStackRealignment(MF) && !X86FI->hasSwiftAsyncContext(); } bool X86FrameLowering::canUseAsEpilogue(const MachineBasicBlock &MBB) const { assert(MBB.getParent() && "Block is not attached to a function!"); // Win64 has strict requirements in terms of epilogue and we are // not taking a chance at messing with them. // I.e., unless this block is already an exit block, we can't use // it as an epilogue. if (STI.isTargetWin64() && !MBB.succ_empty() && !MBB.isReturnBlock()) return false; // Swift async context epilogue has a BTR instruction that clobbers parts of // EFLAGS. const MachineFunction &MF = *MBB.getParent(); if (MF.getInfo()->hasSwiftAsyncContext()) return !flagsNeedToBePreservedBeforeTheTerminators(MBB); if (canUseLEAForSPInEpilogue(*MBB.getParent())) return true; // If we cannot use LEA to adjust SP, we may need to use ADD, which // clobbers the EFLAGS. Check that we do not need to preserve it, // otherwise, conservatively assume this is not // safe to insert the epilogue here. return !flagsNeedToBePreservedBeforeTheTerminators(MBB); } bool X86FrameLowering::enableShrinkWrapping(const MachineFunction &MF) const { // If we may need to emit frameless compact unwind information, give // up as this is currently broken: PR25614. bool CompactUnwind = MF.getMMI().getContext().getObjectFileInfo()->getCompactUnwindSection() != nullptr; return (MF.getFunction().hasFnAttribute(Attribute::NoUnwind) || hasFP(MF) || !CompactUnwind) && // The lowering of segmented stack and HiPE only support entry // blocks as prologue blocks: PR26107. This limitation may be // lifted if we fix: // - adjustForSegmentedStacks // - adjustForHiPEPrologue MF.getFunction().getCallingConv() != CallingConv::HiPE && !MF.shouldSplitStack(); } MachineBasicBlock::iterator X86FrameLowering::restoreWin32EHStackPointers( MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool RestoreSP) const { assert(STI.isTargetWindowsMSVC() && "funclets only supported in MSVC env"); assert(STI.isTargetWin32() && "EBP/ESI restoration only required on win32"); assert(STI.is32Bit() && !Uses64BitFramePtr && "restoring EBP/ESI on non-32-bit target"); MachineFunction &MF = *MBB.getParent(); Register FramePtr = TRI->getFrameRegister(MF); Register BasePtr = TRI->getBaseRegister(); WinEHFuncInfo &FuncInfo = *MF.getWinEHFuncInfo(); X86MachineFunctionInfo *X86FI = MF.getInfo(); MachineFrameInfo &MFI = MF.getFrameInfo(); // FIXME: Don't set FrameSetup flag in catchret case. int FI = FuncInfo.EHRegNodeFrameIndex; int EHRegSize = MFI.getObjectSize(FI); if (RestoreSP) { // MOV32rm -EHRegSize(%ebp), %esp addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32rm), X86::ESP), X86::EBP, true, -EHRegSize) .setMIFlag(MachineInstr::FrameSetup); } Register UsedReg; int EHRegOffset = getFrameIndexReference(MF, FI, UsedReg).getFixed(); int EndOffset = -EHRegOffset - EHRegSize; FuncInfo.EHRegNodeEndOffset = EndOffset; if (UsedReg == FramePtr) { // ADD $offset, %ebp unsigned ADDri = getADDriOpcode(false, EndOffset); BuildMI(MBB, MBBI, DL, TII.get(ADDri), FramePtr) .addReg(FramePtr) .addImm(EndOffset) .setMIFlag(MachineInstr::FrameSetup) ->getOperand(3) .setIsDead(); assert(EndOffset >= 0 && "end of registration object above normal EBP position!"); } else if (UsedReg == BasePtr) { // LEA offset(%ebp), %esi addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::LEA32r), BasePtr), FramePtr, false, EndOffset) .setMIFlag(MachineInstr::FrameSetup); // MOV32rm SavedEBPOffset(%esi), %ebp assert(X86FI->getHasSEHFramePtrSave()); int Offset = getFrameIndexReference(MF, X86FI->getSEHFramePtrSaveIndex(), UsedReg) .getFixed(); assert(UsedReg == BasePtr); addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32rm), FramePtr), UsedReg, true, Offset) .setMIFlag(MachineInstr::FrameSetup); } else { llvm_unreachable("32-bit frames with WinEH must use FramePtr or BasePtr"); } return MBBI; } int X86FrameLowering::getInitialCFAOffset(const MachineFunction &MF) const { return TRI->getSlotSize(); } Register X86FrameLowering::getInitialCFARegister(const MachineFunction &MF) const { return TRI->getDwarfRegNum(StackPtr, true); } namespace { // Struct used by orderFrameObjects to help sort the stack objects. struct X86FrameSortingObject { bool IsValid = false; // true if we care about this Object. unsigned ObjectIndex = 0; // Index of Object into MFI list. unsigned ObjectSize = 0; // Size of Object in bytes. Align ObjectAlignment = Align(1); // Alignment of Object in bytes. unsigned ObjectNumUses = 0; // Object static number of uses. }; // The comparison function we use for std::sort to order our local // stack symbols. The current algorithm is to use an estimated // "density". This takes into consideration the size and number of // uses each object has in order to roughly minimize code size. // So, for example, an object of size 16B that is referenced 5 times // will get higher priority than 4 4B objects referenced 1 time each. // It's not perfect and we may be able to squeeze a few more bytes out of // it (for example : 0(esp) requires fewer bytes, symbols allocated at the // fringe end can have special consideration, given their size is less // important, etc.), but the algorithmic complexity grows too much to be // worth the extra gains we get. This gets us pretty close. // The final order leaves us with objects with highest priority going // at the end of our list. struct X86FrameSortingComparator { inline bool operator()(const X86FrameSortingObject &A, const X86FrameSortingObject &B) const { uint64_t DensityAScaled, DensityBScaled; // For consistency in our comparison, all invalid objects are placed // at the end. This also allows us to stop walking when we hit the // first invalid item after it's all sorted. if (!A.IsValid) return false; if (!B.IsValid) return true; // The density is calculated by doing : // (double)DensityA = A.ObjectNumUses / A.ObjectSize // (double)DensityB = B.ObjectNumUses / B.ObjectSize // Since this approach may cause inconsistencies in // the floating point <, >, == comparisons, depending on the floating // point model with which the compiler was built, we're going // to scale both sides by multiplying with // A.ObjectSize * B.ObjectSize. This ends up factoring away // the division and, with it, the need for any floating point // arithmetic. DensityAScaled = static_cast(A.ObjectNumUses) * static_cast(B.ObjectSize); DensityBScaled = static_cast(B.ObjectNumUses) * static_cast(A.ObjectSize); // If the two densities are equal, prioritize highest alignment // objects. This allows for similar alignment objects // to be packed together (given the same density). // There's room for improvement here, also, since we can pack // similar alignment (different density) objects next to each // other to save padding. This will also require further // complexity/iterations, and the overall gain isn't worth it, // in general. Something to keep in mind, though. if (DensityAScaled == DensityBScaled) return A.ObjectAlignment < B.ObjectAlignment; return DensityAScaled < DensityBScaled; } }; } // namespace // Order the symbols in the local stack. // We want to place the local stack objects in some sort of sensible order. // The heuristic we use is to try and pack them according to static number // of uses and size of object in order to minimize code size. void X86FrameLowering::orderFrameObjects( const MachineFunction &MF, SmallVectorImpl &ObjectsToAllocate) const { const MachineFrameInfo &MFI = MF.getFrameInfo(); // Don't waste time if there's nothing to do. if (ObjectsToAllocate.empty()) return; // Create an array of all MFI objects. We won't need all of these // objects, but we're going to create a full array of them to make // it easier to index into when we're counting "uses" down below. // We want to be able to easily/cheaply access an object by simply // indexing into it, instead of having to search for it every time. std::vector SortingObjects(MFI.getObjectIndexEnd()); // Walk the objects we care about and mark them as such in our working // struct. for (auto &Obj : ObjectsToAllocate) { SortingObjects[Obj].IsValid = true; SortingObjects[Obj].ObjectIndex = Obj; SortingObjects[Obj].ObjectAlignment = MFI.getObjectAlign(Obj); // Set the size. int ObjectSize = MFI.getObjectSize(Obj); if (ObjectSize == 0) // Variable size. Just use 4. SortingObjects[Obj].ObjectSize = 4; else SortingObjects[Obj].ObjectSize = ObjectSize; } // Count the number of uses for each object. for (auto &MBB : MF) { for (auto &MI : MBB) { if (MI.isDebugInstr()) continue; for (const MachineOperand &MO : MI.operands()) { // Check to see if it's a local stack symbol. if (!MO.isFI()) continue; int Index = MO.getIndex(); // Check to see if it falls within our range, and is tagged // to require ordering. if (Index >= 0 && Index < MFI.getObjectIndexEnd() && SortingObjects[Index].IsValid) SortingObjects[Index].ObjectNumUses++; } } } // Sort the objects using X86FrameSortingAlgorithm (see its comment for // info). llvm::stable_sort(SortingObjects, X86FrameSortingComparator()); // Now modify the original list to represent the final order that // we want. The order will depend on whether we're going to access them // from the stack pointer or the frame pointer. For SP, the list should // end up with the END containing objects that we want with smaller offsets. // For FP, it should be flipped. int i = 0; for (auto &Obj : SortingObjects) { // All invalid items are sorted at the end, so it's safe to stop. if (!Obj.IsValid) break; ObjectsToAllocate[i++] = Obj.ObjectIndex; } // Flip it if we're accessing off of the FP. if (!TRI->hasStackRealignment(MF) && hasFP(MF)) std::reverse(ObjectsToAllocate.begin(), ObjectsToAllocate.end()); } unsigned X86FrameLowering::getWinEHParentFrameOffset(const MachineFunction &MF) const { // RDX, the parent frame pointer, is homed into 16(%rsp) in the prologue. unsigned Offset = 16; // RBP is immediately pushed. Offset += SlotSize; // All callee-saved registers are then pushed. Offset += MF.getInfo()->getCalleeSavedFrameSize(); // Every funclet allocates enough stack space for the largest outgoing call. Offset += getWinEHFuncletFrameSize(MF); return Offset; } void X86FrameLowering::processFunctionBeforeFrameFinalized( MachineFunction &MF, RegScavenger *RS) const { // Mark the function as not having WinCFI. We will set it back to true in // emitPrologue if it gets called and emits CFI. MF.setHasWinCFI(false); // If we are using Windows x64 CFI, ensure that the stack is always 8 byte // aligned. The format doesn't support misaligned stack adjustments. if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) MF.getFrameInfo().ensureMaxAlignment(Align(SlotSize)); // If this function isn't doing Win64-style C++ EH, we don't need to do // anything. if (STI.is64Bit() && MF.hasEHFunclets() && classifyEHPersonality(MF.getFunction().getPersonalityFn()) == EHPersonality::MSVC_CXX) { adjustFrameForMsvcCxxEh(MF); } } void X86FrameLowering::adjustFrameForMsvcCxxEh(MachineFunction &MF) const { // Win64 C++ EH needs to allocate the UnwindHelp object at some fixed offset // relative to RSP after the prologue. Find the offset of the last fixed // object, so that we can allocate a slot immediately following it. If there // were no fixed objects, use offset -SlotSize, which is immediately after the // return address. Fixed objects have negative frame indices. MachineFrameInfo &MFI = MF.getFrameInfo(); WinEHFuncInfo &EHInfo = *MF.getWinEHFuncInfo(); int64_t MinFixedObjOffset = -SlotSize; for (int I = MFI.getObjectIndexBegin(); I < 0; ++I) MinFixedObjOffset = std::min(MinFixedObjOffset, MFI.getObjectOffset(I)); for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) { for (WinEHHandlerType &H : TBME.HandlerArray) { int FrameIndex = H.CatchObj.FrameIndex; if (FrameIndex != INT_MAX) { // Ensure alignment. unsigned Align = MFI.getObjectAlign(FrameIndex).value(); MinFixedObjOffset -= std::abs(MinFixedObjOffset) % Align; MinFixedObjOffset -= MFI.getObjectSize(FrameIndex); MFI.setObjectOffset(FrameIndex, MinFixedObjOffset); } } } // Ensure alignment. MinFixedObjOffset -= std::abs(MinFixedObjOffset) % 8; int64_t UnwindHelpOffset = MinFixedObjOffset - SlotSize; int UnwindHelpFI = MFI.CreateFixedObject(SlotSize, UnwindHelpOffset, /*IsImmutable=*/false); EHInfo.UnwindHelpFrameIdx = UnwindHelpFI; // Store -2 into UnwindHelp on function entry. We have to scan forwards past // other frame setup instructions. MachineBasicBlock &MBB = MF.front(); auto MBBI = MBB.begin(); while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup)) ++MBBI; DebugLoc DL = MBB.findDebugLoc(MBBI); addFrameReference(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mi32)), UnwindHelpFI) .addImm(-2); } void X86FrameLowering::processFunctionBeforeFrameIndicesReplaced( MachineFunction &MF, RegScavenger *RS) const { if (STI.is32Bit() && MF.hasEHFunclets()) restoreWinEHStackPointersInParent(MF); } void X86FrameLowering::restoreWinEHStackPointersInParent( MachineFunction &MF) const { // 32-bit functions have to restore stack pointers when control is transferred // back to the parent function. These blocks are identified as eh pads that // are not funclet entries. bool IsSEH = isAsynchronousEHPersonality( classifyEHPersonality(MF.getFunction().getPersonalityFn())); for (MachineBasicBlock &MBB : MF) { bool NeedsRestore = MBB.isEHPad() && !MBB.isEHFuncletEntry(); if (NeedsRestore) restoreWin32EHStackPointers(MBB, MBB.begin(), DebugLoc(), /*RestoreSP=*/IsSEH); } } diff --git a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp index d6b97915ede6..75eec25f5807 100644 --- a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp +++ b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp @@ -1,4407 +1,4422 @@ //===-- IPO/OpenMPOpt.cpp - Collection of OpenMP specific optimizations ---===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // OpenMP specific optimizations: // // - Deduplication of runtime calls, e.g., omp_get_thread_num. // - Replacing globalized device memory with stack memory. // - Replacing globalized device memory with shared memory. // - Parallel region merging. // - Transforming generic-mode device kernels to SPMD mode. // - Specializing the state machine for generic-mode device kernels. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/IPO/OpenMPOpt.h" #include "llvm/ADT/EnumeratedArray.h" #include "llvm/ADT/PostOrderIterator.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/CallGraph.h" #include "llvm/Analysis/CallGraphSCCPass.h" #include "llvm/Analysis/OptimizationRemarkEmitter.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" #include "llvm/IR/Assumptions.h" #include "llvm/IR/DiagnosticInfo.h" #include "llvm/IR/GlobalValue.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/InitializePasses.h" #include "llvm/Support/CommandLine.h" #include "llvm/Transforms/IPO.h" #include "llvm/Transforms/IPO/Attributor.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/CallGraphUpdater.h" #include "llvm/Transforms/Utils/CodeExtractor.h" using namespace llvm; using namespace omp; #define DEBUG_TYPE "openmp-opt" static cl::opt DisableOpenMPOptimizations( "openmp-opt-disable", cl::ZeroOrMore, cl::desc("Disable OpenMP specific optimizations."), cl::Hidden, cl::init(false)); static cl::opt EnableParallelRegionMerging( "openmp-opt-enable-merging", cl::ZeroOrMore, cl::desc("Enable the OpenMP region merging optimization."), cl::Hidden, cl::init(false)); static cl::opt DisableInternalization("openmp-opt-disable-internalization", cl::ZeroOrMore, cl::desc("Disable function internalization."), cl::Hidden, cl::init(false)); static cl::opt PrintICVValues("openmp-print-icv-values", cl::init(false), cl::Hidden); static cl::opt PrintOpenMPKernels("openmp-print-gpu-kernels", cl::init(false), cl::Hidden); static cl::opt HideMemoryTransferLatency( "openmp-hide-memory-transfer-latency", cl::desc("[WIP] Tries to hide the latency of host to device memory" " transfers"), cl::Hidden, cl::init(false)); STATISTIC(NumOpenMPRuntimeCallsDeduplicated, "Number of OpenMP runtime calls deduplicated"); STATISTIC(NumOpenMPParallelRegionsDeleted, "Number of OpenMP parallel regions deleted"); STATISTIC(NumOpenMPRuntimeFunctionsIdentified, "Number of OpenMP runtime functions identified"); STATISTIC(NumOpenMPRuntimeFunctionUsesIdentified, "Number of OpenMP runtime function uses identified"); STATISTIC(NumOpenMPTargetRegionKernels, "Number of OpenMP target region entry points (=kernels) identified"); STATISTIC(NumOpenMPTargetRegionKernelsSPMD, "Number of OpenMP target region entry points (=kernels) executed in " "SPMD-mode instead of generic-mode"); STATISTIC(NumOpenMPTargetRegionKernelsWithoutStateMachine, "Number of OpenMP target region entry points (=kernels) executed in " "generic-mode without a state machines"); STATISTIC(NumOpenMPTargetRegionKernelsCustomStateMachineWithFallback, "Number of OpenMP target region entry points (=kernels) executed in " "generic-mode with customized state machines with fallback"); STATISTIC(NumOpenMPTargetRegionKernelsCustomStateMachineWithoutFallback, "Number of OpenMP target region entry points (=kernels) executed in " "generic-mode with customized state machines without fallback"); STATISTIC( NumOpenMPParallelRegionsReplacedInGPUStateMachine, "Number of OpenMP parallel regions replaced with ID in GPU state machines"); STATISTIC(NumOpenMPParallelRegionsMerged, "Number of OpenMP parallel regions merged"); STATISTIC(NumBytesMovedToSharedMemory, "Amount of memory pushed to shared memory"); #if !defined(NDEBUG) static constexpr auto TAG = "[" DEBUG_TYPE "]"; #endif namespace { enum class AddressSpace : unsigned { Generic = 0, Global = 1, Shared = 3, Constant = 4, Local = 5, }; struct AAHeapToShared; struct AAICVTracker; /// OpenMP specific information. For now, stores RFIs and ICVs also needed for /// Attributor runs. struct OMPInformationCache : public InformationCache { OMPInformationCache(Module &M, AnalysisGetter &AG, BumpPtrAllocator &Allocator, SetVector &CGSCC, SmallPtrSetImpl &Kernels) : InformationCache(M, AG, Allocator, &CGSCC), OMPBuilder(M), Kernels(Kernels) { OMPBuilder.initialize(); initializeRuntimeFunctions(); initializeInternalControlVars(); } /// Generic information that describes an internal control variable. struct InternalControlVarInfo { /// The kind, as described by InternalControlVar enum. InternalControlVar Kind; /// The name of the ICV. StringRef Name; /// Environment variable associated with this ICV. StringRef EnvVarName; /// Initial value kind. ICVInitValue InitKind; /// Initial value. ConstantInt *InitValue; /// Setter RTL function associated with this ICV. RuntimeFunction Setter; /// Getter RTL function associated with this ICV. RuntimeFunction Getter; /// RTL Function corresponding to the override clause of this ICV RuntimeFunction Clause; }; /// Generic information that describes a runtime function struct RuntimeFunctionInfo { /// The kind, as described by the RuntimeFunction enum. RuntimeFunction Kind; /// The name of the function. StringRef Name; /// Flag to indicate a variadic function. bool IsVarArg; /// The return type of the function. Type *ReturnType; /// The argument types of the function. SmallVector ArgumentTypes; /// The declaration if available. Function *Declaration = nullptr; /// Uses of this runtime function per function containing the use. using UseVector = SmallVector; /// Clear UsesMap for runtime function. void clearUsesMap() { UsesMap.clear(); } /// Boolean conversion that is true if the runtime function was found. operator bool() const { return Declaration; } /// Return the vector of uses in function \p F. UseVector &getOrCreateUseVector(Function *F) { std::shared_ptr &UV = UsesMap[F]; if (!UV) UV = std::make_shared(); return *UV; } /// Return the vector of uses in function \p F or `nullptr` if there are /// none. const UseVector *getUseVector(Function &F) const { auto I = UsesMap.find(&F); if (I != UsesMap.end()) return I->second.get(); return nullptr; } /// Return how many functions contain uses of this runtime function. size_t getNumFunctionsWithUses() const { return UsesMap.size(); } /// Return the number of arguments (or the minimal number for variadic /// functions). size_t getNumArgs() const { return ArgumentTypes.size(); } /// Run the callback \p CB on each use and forget the use if the result is /// true. The callback will be fed the function in which the use was /// encountered as second argument. void foreachUse(SmallVectorImpl &SCC, function_ref CB) { for (Function *F : SCC) foreachUse(CB, F); } /// Run the callback \p CB on each use within the function \p F and forget /// the use if the result is true. void foreachUse(function_ref CB, Function *F) { SmallVector ToBeDeleted; ToBeDeleted.clear(); unsigned Idx = 0; UseVector &UV = getOrCreateUseVector(F); for (Use *U : UV) { if (CB(*U, *F)) ToBeDeleted.push_back(Idx); ++Idx; } // Remove the to-be-deleted indices in reverse order as prior // modifications will not modify the smaller indices. while (!ToBeDeleted.empty()) { unsigned Idx = ToBeDeleted.pop_back_val(); UV[Idx] = UV.back(); UV.pop_back(); } } private: /// Map from functions to all uses of this runtime function contained in /// them. DenseMap> UsesMap; public: /// Iterators for the uses of this runtime function. decltype(UsesMap)::iterator begin() { return UsesMap.begin(); } decltype(UsesMap)::iterator end() { return UsesMap.end(); } }; /// An OpenMP-IR-Builder instance OpenMPIRBuilder OMPBuilder; /// Map from runtime function kind to the runtime function description. EnumeratedArray RFIs; /// Map from function declarations/definitions to their runtime enum type. DenseMap RuntimeFunctionIDMap; /// Map from ICV kind to the ICV description. EnumeratedArray ICVs; /// Helper to initialize all internal control variable information for those /// defined in OMPKinds.def. void initializeInternalControlVars() { #define ICV_RT_SET(_Name, RTL) \ { \ auto &ICV = ICVs[_Name]; \ ICV.Setter = RTL; \ } #define ICV_RT_GET(Name, RTL) \ { \ auto &ICV = ICVs[Name]; \ ICV.Getter = RTL; \ } #define ICV_DATA_ENV(Enum, _Name, _EnvVarName, Init) \ { \ auto &ICV = ICVs[Enum]; \ ICV.Name = _Name; \ ICV.Kind = Enum; \ ICV.InitKind = Init; \ ICV.EnvVarName = _EnvVarName; \ switch (ICV.InitKind) { \ case ICV_IMPLEMENTATION_DEFINED: \ ICV.InitValue = nullptr; \ break; \ case ICV_ZERO: \ ICV.InitValue = ConstantInt::get( \ Type::getInt32Ty(OMPBuilder.Int32->getContext()), 0); \ break; \ case ICV_FALSE: \ ICV.InitValue = ConstantInt::getFalse(OMPBuilder.Int1->getContext()); \ break; \ case ICV_LAST: \ break; \ } \ } #include "llvm/Frontend/OpenMP/OMPKinds.def" } /// Returns true if the function declaration \p F matches the runtime /// function types, that is, return type \p RTFRetType, and argument types /// \p RTFArgTypes. static bool declMatchesRTFTypes(Function *F, Type *RTFRetType, SmallVector &RTFArgTypes) { // TODO: We should output information to the user (under debug output // and via remarks). if (!F) return false; if (F->getReturnType() != RTFRetType) return false; if (F->arg_size() != RTFArgTypes.size()) return false; auto RTFTyIt = RTFArgTypes.begin(); for (Argument &Arg : F->args()) { if (Arg.getType() != *RTFTyIt) return false; ++RTFTyIt; } return true; } // Helper to collect all uses of the declaration in the UsesMap. unsigned collectUses(RuntimeFunctionInfo &RFI, bool CollectStats = true) { unsigned NumUses = 0; if (!RFI.Declaration) return NumUses; OMPBuilder.addAttributes(RFI.Kind, *RFI.Declaration); if (CollectStats) { NumOpenMPRuntimeFunctionsIdentified += 1; NumOpenMPRuntimeFunctionUsesIdentified += RFI.Declaration->getNumUses(); } // TODO: We directly convert uses into proper calls and unknown uses. for (Use &U : RFI.Declaration->uses()) { if (Instruction *UserI = dyn_cast(U.getUser())) { if (ModuleSlice.count(UserI->getFunction())) { RFI.getOrCreateUseVector(UserI->getFunction()).push_back(&U); ++NumUses; } } else { RFI.getOrCreateUseVector(nullptr).push_back(&U); ++NumUses; } } return NumUses; } // Helper function to recollect uses of a runtime function. void recollectUsesForFunction(RuntimeFunction RTF) { auto &RFI = RFIs[RTF]; RFI.clearUsesMap(); collectUses(RFI, /*CollectStats*/ false); } // Helper function to recollect uses of all runtime functions. void recollectUses() { for (int Idx = 0; Idx < RFIs.size(); ++Idx) recollectUsesForFunction(static_cast(Idx)); } /// Helper to initialize all runtime function information for those defined /// in OpenMPKinds.def. void initializeRuntimeFunctions() { Module &M = *((*ModuleSlice.begin())->getParent()); // Helper macros for handling __VA_ARGS__ in OMP_RTL #define OMP_TYPE(VarName, ...) \ Type *VarName = OMPBuilder.VarName; \ (void)VarName; #define OMP_ARRAY_TYPE(VarName, ...) \ ArrayType *VarName##Ty = OMPBuilder.VarName##Ty; \ (void)VarName##Ty; \ PointerType *VarName##PtrTy = OMPBuilder.VarName##PtrTy; \ (void)VarName##PtrTy; #define OMP_FUNCTION_TYPE(VarName, ...) \ FunctionType *VarName = OMPBuilder.VarName; \ (void)VarName; \ PointerType *VarName##Ptr = OMPBuilder.VarName##Ptr; \ (void)VarName##Ptr; #define OMP_STRUCT_TYPE(VarName, ...) \ StructType *VarName = OMPBuilder.VarName; \ (void)VarName; \ PointerType *VarName##Ptr = OMPBuilder.VarName##Ptr; \ (void)VarName##Ptr; #define OMP_RTL(_Enum, _Name, _IsVarArg, _ReturnType, ...) \ { \ SmallVector ArgsTypes({__VA_ARGS__}); \ Function *F = M.getFunction(_Name); \ RTLFunctions.insert(F); \ if (declMatchesRTFTypes(F, OMPBuilder._ReturnType, ArgsTypes)) { \ RuntimeFunctionIDMap[F] = _Enum; \ F->removeFnAttr(Attribute::NoInline); \ auto &RFI = RFIs[_Enum]; \ RFI.Kind = _Enum; \ RFI.Name = _Name; \ RFI.IsVarArg = _IsVarArg; \ RFI.ReturnType = OMPBuilder._ReturnType; \ RFI.ArgumentTypes = std::move(ArgsTypes); \ RFI.Declaration = F; \ unsigned NumUses = collectUses(RFI); \ (void)NumUses; \ LLVM_DEBUG({ \ dbgs() << TAG << RFI.Name << (RFI.Declaration ? "" : " not") \ << " found\n"; \ if (RFI.Declaration) \ dbgs() << TAG << "-> got " << NumUses << " uses in " \ << RFI.getNumFunctionsWithUses() \ << " different functions.\n"; \ }); \ } \ } #include "llvm/Frontend/OpenMP/OMPKinds.def" // TODO: We should attach the attributes defined in OMPKinds.def. } /// Collection of known kernels (\see Kernel) in the module. SmallPtrSetImpl &Kernels; /// Collection of known OpenMP runtime functions.. DenseSet RTLFunctions; }; template struct BooleanStateWithSetVector : public BooleanState { bool contains(const Ty &Elem) const { return Set.contains(Elem); } bool insert(const Ty &Elem) { if (InsertInvalidates) BooleanState::indicatePessimisticFixpoint(); return Set.insert(Elem); } const Ty &operator[](int Idx) const { return Set[Idx]; } bool operator==(const BooleanStateWithSetVector &RHS) const { return BooleanState::operator==(RHS) && Set == RHS.Set; } bool operator!=(const BooleanStateWithSetVector &RHS) const { return !(*this == RHS); } bool empty() const { return Set.empty(); } size_t size() const { return Set.size(); } /// "Clamp" this state with \p RHS. BooleanStateWithSetVector &operator^=(const BooleanStateWithSetVector &RHS) { BooleanState::operator^=(RHS); Set.insert(RHS.Set.begin(), RHS.Set.end()); return *this; } private: /// A set to keep track of elements. SetVector Set; public: typename decltype(Set)::iterator begin() { return Set.begin(); } typename decltype(Set)::iterator end() { return Set.end(); } typename decltype(Set)::const_iterator begin() const { return Set.begin(); } typename decltype(Set)::const_iterator end() const { return Set.end(); } }; template using BooleanStateWithPtrSetVector = BooleanStateWithSetVector; struct KernelInfoState : AbstractState { /// Flag to track if we reached a fixpoint. bool IsAtFixpoint = false; /// The parallel regions (identified by the outlined parallel functions) that /// can be reached from the associated function. BooleanStateWithPtrSetVector ReachedKnownParallelRegions; /// State to track what parallel region we might reach. BooleanStateWithPtrSetVector ReachedUnknownParallelRegions; /// State to track if we are in SPMD-mode, assumed or know, and why we decided /// we cannot be. If it is assumed, then RequiresFullRuntime should also be /// false. BooleanStateWithPtrSetVector SPMDCompatibilityTracker; /// The __kmpc_target_init call in this kernel, if any. If we find more than /// one we abort as the kernel is malformed. CallBase *KernelInitCB = nullptr; /// The __kmpc_target_deinit call in this kernel, if any. If we find more than /// one we abort as the kernel is malformed. CallBase *KernelDeinitCB = nullptr; /// Flag to indicate if the associated function is a kernel entry. bool IsKernelEntry = false; /// State to track what kernel entries can reach the associated function. BooleanStateWithPtrSetVector ReachingKernelEntries; /// State to indicate if we can track parallel level of the associated /// function. We will give up tracking if we encounter unknown caller or the /// caller is __kmpc_parallel_51. BooleanStateWithSetVector ParallelLevels; /// Abstract State interface ///{ KernelInfoState() {} KernelInfoState(bool BestState) { if (!BestState) indicatePessimisticFixpoint(); } /// See AbstractState::isValidState(...) bool isValidState() const override { return true; } /// See AbstractState::isAtFixpoint(...) bool isAtFixpoint() const override { return IsAtFixpoint; } /// See AbstractState::indicatePessimisticFixpoint(...) ChangeStatus indicatePessimisticFixpoint() override { IsAtFixpoint = true; SPMDCompatibilityTracker.indicatePessimisticFixpoint(); ReachedUnknownParallelRegions.indicatePessimisticFixpoint(); return ChangeStatus::CHANGED; } /// See AbstractState::indicateOptimisticFixpoint(...) ChangeStatus indicateOptimisticFixpoint() override { IsAtFixpoint = true; return ChangeStatus::UNCHANGED; } /// Return the assumed state KernelInfoState &getAssumed() { return *this; } const KernelInfoState &getAssumed() const { return *this; } bool operator==(const KernelInfoState &RHS) const { if (SPMDCompatibilityTracker != RHS.SPMDCompatibilityTracker) return false; if (ReachedKnownParallelRegions != RHS.ReachedKnownParallelRegions) return false; if (ReachedUnknownParallelRegions != RHS.ReachedUnknownParallelRegions) return false; if (ReachingKernelEntries != RHS.ReachingKernelEntries) return false; return true; } /// Return empty set as the best state of potential values. static KernelInfoState getBestState() { return KernelInfoState(true); } static KernelInfoState getBestState(KernelInfoState &KIS) { return getBestState(); } /// Return full set as the worst state of potential values. static KernelInfoState getWorstState() { return KernelInfoState(false); } /// "Clamp" this state with \p KIS. KernelInfoState operator^=(const KernelInfoState &KIS) { // Do not merge two different _init and _deinit call sites. if (KIS.KernelInitCB) { if (KernelInitCB && KernelInitCB != KIS.KernelInitCB) indicatePessimisticFixpoint(); KernelInitCB = KIS.KernelInitCB; } if (KIS.KernelDeinitCB) { if (KernelDeinitCB && KernelDeinitCB != KIS.KernelDeinitCB) indicatePessimisticFixpoint(); KernelDeinitCB = KIS.KernelDeinitCB; } SPMDCompatibilityTracker ^= KIS.SPMDCompatibilityTracker; ReachedKnownParallelRegions ^= KIS.ReachedKnownParallelRegions; ReachedUnknownParallelRegions ^= KIS.ReachedUnknownParallelRegions; return *this; } KernelInfoState operator&=(const KernelInfoState &KIS) { return (*this ^= KIS); } ///} }; /// Used to map the values physically (in the IR) stored in an offload /// array, to a vector in memory. struct OffloadArray { /// Physical array (in the IR). AllocaInst *Array = nullptr; /// Mapped values. SmallVector StoredValues; /// Last stores made in the offload array. SmallVector LastAccesses; OffloadArray() = default; /// Initializes the OffloadArray with the values stored in \p Array before /// instruction \p Before is reached. Returns false if the initialization /// fails. /// This MUST be used immediately after the construction of the object. bool initialize(AllocaInst &Array, Instruction &Before) { if (!Array.getAllocatedType()->isArrayTy()) return false; if (!getValues(Array, Before)) return false; this->Array = &Array; return true; } static const unsigned DeviceIDArgNum = 1; static const unsigned BasePtrsArgNum = 3; static const unsigned PtrsArgNum = 4; static const unsigned SizesArgNum = 5; private: /// Traverses the BasicBlock where \p Array is, collecting the stores made to /// \p Array, leaving StoredValues with the values stored before the /// instruction \p Before is reached. bool getValues(AllocaInst &Array, Instruction &Before) { // Initialize container. const uint64_t NumValues = Array.getAllocatedType()->getArrayNumElements(); StoredValues.assign(NumValues, nullptr); LastAccesses.assign(NumValues, nullptr); // TODO: This assumes the instruction \p Before is in the same // BasicBlock as Array. Make it general, for any control flow graph. BasicBlock *BB = Array.getParent(); if (BB != Before.getParent()) return false; const DataLayout &DL = Array.getModule()->getDataLayout(); const unsigned int PointerSize = DL.getPointerSize(); for (Instruction &I : *BB) { if (&I == &Before) break; if (!isa(&I)) continue; auto *S = cast(&I); int64_t Offset = -1; auto *Dst = GetPointerBaseWithConstantOffset(S->getPointerOperand(), Offset, DL); if (Dst == &Array) { int64_t Idx = Offset / PointerSize; StoredValues[Idx] = getUnderlyingObject(S->getValueOperand()); LastAccesses[Idx] = S; } } return isFilled(); } /// Returns true if all values in StoredValues and /// LastAccesses are not nullptrs. bool isFilled() { const unsigned NumValues = StoredValues.size(); for (unsigned I = 0; I < NumValues; ++I) { if (!StoredValues[I] || !LastAccesses[I]) return false; } return true; } }; struct OpenMPOpt { using OptimizationRemarkGetter = function_ref; OpenMPOpt(SmallVectorImpl &SCC, CallGraphUpdater &CGUpdater, OptimizationRemarkGetter OREGetter, OMPInformationCache &OMPInfoCache, Attributor &A) : M(*(*SCC.begin())->getParent()), SCC(SCC), CGUpdater(CGUpdater), OREGetter(OREGetter), OMPInfoCache(OMPInfoCache), A(A) {} /// Check if any remarks are enabled for openmp-opt bool remarksEnabled() { auto &Ctx = M.getContext(); return Ctx.getDiagHandlerPtr()->isAnyRemarkEnabled(DEBUG_TYPE); } /// Run all OpenMP optimizations on the underlying SCC/ModuleSlice. bool run(bool IsModulePass) { if (SCC.empty()) return false; bool Changed = false; LLVM_DEBUG(dbgs() << TAG << "Run on SCC with " << SCC.size() << " functions in a slice with " << OMPInfoCache.ModuleSlice.size() << " functions\n"); if (IsModulePass) { Changed |= runAttributor(IsModulePass); // Recollect uses, in case Attributor deleted any. OMPInfoCache.recollectUses(); // TODO: This should be folded into buildCustomStateMachine. Changed |= rewriteDeviceCodeStateMachine(); if (remarksEnabled()) analysisGlobalization(); } else { if (PrintICVValues) printICVs(); if (PrintOpenMPKernels) printKernels(); Changed |= runAttributor(IsModulePass); // Recollect uses, in case Attributor deleted any. OMPInfoCache.recollectUses(); Changed |= deleteParallelRegions(); if (HideMemoryTransferLatency) Changed |= hideMemTransfersLatency(); Changed |= deduplicateRuntimeCalls(); if (EnableParallelRegionMerging) { if (mergeParallelRegions()) { deduplicateRuntimeCalls(); Changed = true; } } } return Changed; } /// Print initial ICV values for testing. /// FIXME: This should be done from the Attributor once it is added. void printICVs() const { InternalControlVar ICVs[] = {ICV_nthreads, ICV_active_levels, ICV_cancel, ICV_proc_bind}; for (Function *F : OMPInfoCache.ModuleSlice) { for (auto ICV : ICVs) { auto ICVInfo = OMPInfoCache.ICVs[ICV]; auto Remark = [&](OptimizationRemarkAnalysis ORA) { return ORA << "OpenMP ICV " << ore::NV("OpenMPICV", ICVInfo.Name) << " Value: " << (ICVInfo.InitValue ? toString(ICVInfo.InitValue->getValue(), 10, true) : "IMPLEMENTATION_DEFINED"); }; emitRemark(F, "OpenMPICVTracker", Remark); } } } /// Print OpenMP GPU kernels for testing. void printKernels() const { for (Function *F : SCC) { if (!OMPInfoCache.Kernels.count(F)) continue; auto Remark = [&](OptimizationRemarkAnalysis ORA) { return ORA << "OpenMP GPU kernel " << ore::NV("OpenMPGPUKernel", F->getName()) << "\n"; }; emitRemark(F, "OpenMPGPU", Remark); } } /// Return the call if \p U is a callee use in a regular call. If \p RFI is /// given it has to be the callee or a nullptr is returned. static CallInst *getCallIfRegularCall( Use &U, OMPInformationCache::RuntimeFunctionInfo *RFI = nullptr) { CallInst *CI = dyn_cast(U.getUser()); if (CI && CI->isCallee(&U) && !CI->hasOperandBundles() && (!RFI || (RFI->Declaration && CI->getCalledFunction() == RFI->Declaration))) return CI; return nullptr; } /// Return the call if \p V is a regular call. If \p RFI is given it has to be /// the callee or a nullptr is returned. static CallInst *getCallIfRegularCall( Value &V, OMPInformationCache::RuntimeFunctionInfo *RFI = nullptr) { CallInst *CI = dyn_cast(&V); if (CI && !CI->hasOperandBundles() && (!RFI || (RFI->Declaration && CI->getCalledFunction() == RFI->Declaration))) return CI; return nullptr; } private: /// Merge parallel regions when it is safe. bool mergeParallelRegions() { const unsigned CallbackCalleeOperand = 2; const unsigned CallbackFirstArgOperand = 3; using InsertPointTy = OpenMPIRBuilder::InsertPointTy; // Check if there are any __kmpc_fork_call calls to merge. OMPInformationCache::RuntimeFunctionInfo &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_fork_call]; if (!RFI.Declaration) return false; // Unmergable calls that prevent merging a parallel region. OMPInformationCache::RuntimeFunctionInfo UnmergableCallsInfo[] = { OMPInfoCache.RFIs[OMPRTL___kmpc_push_proc_bind], OMPInfoCache.RFIs[OMPRTL___kmpc_push_num_threads], }; bool Changed = false; LoopInfo *LI = nullptr; DominatorTree *DT = nullptr; SmallDenseMap> BB2PRMap; BasicBlock *StartBB = nullptr, *EndBB = nullptr; auto BodyGenCB = [&](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, BasicBlock &ContinuationIP) { BasicBlock *CGStartBB = CodeGenIP.getBlock(); BasicBlock *CGEndBB = SplitBlock(CGStartBB, &*CodeGenIP.getPoint(), DT, LI); assert(StartBB != nullptr && "StartBB should not be null"); CGStartBB->getTerminator()->setSuccessor(0, StartBB); assert(EndBB != nullptr && "EndBB should not be null"); EndBB->getTerminator()->setSuccessor(0, CGEndBB); }; auto PrivCB = [&](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &, Value &Inner, Value *&ReplacementValue) -> InsertPointTy { ReplacementValue = &Inner; return CodeGenIP; }; auto FiniCB = [&](InsertPointTy CodeGenIP) {}; /// Create a sequential execution region within a merged parallel region, /// encapsulated in a master construct with a barrier for synchronization. auto CreateSequentialRegion = [&](Function *OuterFn, BasicBlock *OuterPredBB, Instruction *SeqStartI, Instruction *SeqEndI) { // Isolate the instructions of the sequential region to a separate // block. BasicBlock *ParentBB = SeqStartI->getParent(); BasicBlock *SeqEndBB = SplitBlock(ParentBB, SeqEndI->getNextNode(), DT, LI); BasicBlock *SeqAfterBB = SplitBlock(SeqEndBB, &*SeqEndBB->getFirstInsertionPt(), DT, LI); BasicBlock *SeqStartBB = SplitBlock(ParentBB, SeqStartI, DT, LI, nullptr, "seq.par.merged"); assert(ParentBB->getUniqueSuccessor() == SeqStartBB && "Expected a different CFG"); const DebugLoc DL = ParentBB->getTerminator()->getDebugLoc(); ParentBB->getTerminator()->eraseFromParent(); auto BodyGenCB = [&](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, BasicBlock &ContinuationIP) { BasicBlock *CGStartBB = CodeGenIP.getBlock(); BasicBlock *CGEndBB = SplitBlock(CGStartBB, &*CodeGenIP.getPoint(), DT, LI); assert(SeqStartBB != nullptr && "SeqStartBB should not be null"); CGStartBB->getTerminator()->setSuccessor(0, SeqStartBB); assert(SeqEndBB != nullptr && "SeqEndBB should not be null"); SeqEndBB->getTerminator()->setSuccessor(0, CGEndBB); }; auto FiniCB = [&](InsertPointTy CodeGenIP) {}; // Find outputs from the sequential region to outside users and // broadcast their values to them. for (Instruction &I : *SeqStartBB) { SmallPtrSet OutsideUsers; for (User *Usr : I.users()) { Instruction &UsrI = *cast(Usr); // Ignore outputs to LT intrinsics, code extraction for the merged // parallel region will fix them. if (UsrI.isLifetimeStartOrEnd()) continue; if (UsrI.getParent() != SeqStartBB) OutsideUsers.insert(&UsrI); } if (OutsideUsers.empty()) continue; // Emit an alloca in the outer region to store the broadcasted // value. const DataLayout &DL = M.getDataLayout(); AllocaInst *AllocaI = new AllocaInst( I.getType(), DL.getAllocaAddrSpace(), nullptr, I.getName() + ".seq.output.alloc", &OuterFn->front().front()); // Emit a store instruction in the sequential BB to update the // value. new StoreInst(&I, AllocaI, SeqStartBB->getTerminator()); // Emit a load instruction and replace the use of the output value // with it. for (Instruction *UsrI : OutsideUsers) { LoadInst *LoadI = new LoadInst( I.getType(), AllocaI, I.getName() + ".seq.output.load", UsrI); UsrI->replaceUsesOfWith(&I, LoadI); } } OpenMPIRBuilder::LocationDescription Loc( InsertPointTy(ParentBB, ParentBB->end()), DL); InsertPointTy SeqAfterIP = OMPInfoCache.OMPBuilder.createMaster(Loc, BodyGenCB, FiniCB); OMPInfoCache.OMPBuilder.createBarrier(SeqAfterIP, OMPD_parallel); BranchInst::Create(SeqAfterBB, SeqAfterIP.getBlock()); LLVM_DEBUG(dbgs() << TAG << "After sequential inlining " << *OuterFn << "\n"); }; // Helper to merge the __kmpc_fork_call calls in MergableCIs. They are all // contained in BB and only separated by instructions that can be // redundantly executed in parallel. The block BB is split before the first // call (in MergableCIs) and after the last so the entire region we merge // into a single parallel region is contained in a single basic block // without any other instructions. We use the OpenMPIRBuilder to outline // that block and call the resulting function via __kmpc_fork_call. auto Merge = [&](SmallVectorImpl &MergableCIs, BasicBlock *BB) { // TODO: Change the interface to allow single CIs expanded, e.g, to // include an outer loop. assert(MergableCIs.size() > 1 && "Assumed multiple mergable CIs"); auto Remark = [&](OptimizationRemark OR) { OR << "Parallel region merged with parallel region" << (MergableCIs.size() > 2 ? "s" : "") << " at "; for (auto *CI : llvm::drop_begin(MergableCIs)) { OR << ore::NV("OpenMPParallelMerge", CI->getDebugLoc()); if (CI != MergableCIs.back()) OR << ", "; } return OR << "."; }; emitRemark(MergableCIs.front(), "OMP150", Remark); Function *OriginalFn = BB->getParent(); LLVM_DEBUG(dbgs() << TAG << "Merge " << MergableCIs.size() << " parallel regions in " << OriginalFn->getName() << "\n"); // Isolate the calls to merge in a separate block. EndBB = SplitBlock(BB, MergableCIs.back()->getNextNode(), DT, LI); BasicBlock *AfterBB = SplitBlock(EndBB, &*EndBB->getFirstInsertionPt(), DT, LI); StartBB = SplitBlock(BB, MergableCIs.front(), DT, LI, nullptr, "omp.par.merged"); assert(BB->getUniqueSuccessor() == StartBB && "Expected a different CFG"); const DebugLoc DL = BB->getTerminator()->getDebugLoc(); BB->getTerminator()->eraseFromParent(); // Create sequential regions for sequential instructions that are // in-between mergable parallel regions. for (auto *It = MergableCIs.begin(), *End = MergableCIs.end() - 1; It != End; ++It) { Instruction *ForkCI = *It; Instruction *NextForkCI = *(It + 1); // Continue if there are not in-between instructions. if (ForkCI->getNextNode() == NextForkCI) continue; CreateSequentialRegion(OriginalFn, BB, ForkCI->getNextNode(), NextForkCI->getPrevNode()); } OpenMPIRBuilder::LocationDescription Loc(InsertPointTy(BB, BB->end()), DL); IRBuilder<>::InsertPoint AllocaIP( &OriginalFn->getEntryBlock(), OriginalFn->getEntryBlock().getFirstInsertionPt()); // Create the merged parallel region with default proc binding, to // avoid overriding binding settings, and without explicit cancellation. InsertPointTy AfterIP = OMPInfoCache.OMPBuilder.createParallel( Loc, AllocaIP, BodyGenCB, PrivCB, FiniCB, nullptr, nullptr, OMP_PROC_BIND_default, /* IsCancellable */ false); BranchInst::Create(AfterBB, AfterIP.getBlock()); // Perform the actual outlining. OMPInfoCache.OMPBuilder.finalize(OriginalFn, /* AllowExtractorSinking */ true); Function *OutlinedFn = MergableCIs.front()->getCaller(); // Replace the __kmpc_fork_call calls with direct calls to the outlined // callbacks. SmallVector Args; for (auto *CI : MergableCIs) { Value *Callee = CI->getArgOperand(CallbackCalleeOperand)->stripPointerCasts(); FunctionType *FT = cast(Callee->getType()->getPointerElementType()); Args.clear(); Args.push_back(OutlinedFn->getArg(0)); Args.push_back(OutlinedFn->getArg(1)); for (unsigned U = CallbackFirstArgOperand, E = CI->getNumArgOperands(); U < E; ++U) Args.push_back(CI->getArgOperand(U)); CallInst *NewCI = CallInst::Create(FT, Callee, Args, "", CI); if (CI->getDebugLoc()) NewCI->setDebugLoc(CI->getDebugLoc()); // Forward parameter attributes from the callback to the callee. for (unsigned U = CallbackFirstArgOperand, E = CI->getNumArgOperands(); U < E; ++U) for (const Attribute &A : CI->getAttributes().getParamAttributes(U)) NewCI->addParamAttr( U - (CallbackFirstArgOperand - CallbackCalleeOperand), A); // Emit an explicit barrier to replace the implicit fork-join barrier. if (CI != MergableCIs.back()) { // TODO: Remove barrier if the merged parallel region includes the // 'nowait' clause. OMPInfoCache.OMPBuilder.createBarrier( InsertPointTy(NewCI->getParent(), NewCI->getNextNode()->getIterator()), OMPD_parallel); } CI->eraseFromParent(); } assert(OutlinedFn != OriginalFn && "Outlining failed"); CGUpdater.registerOutlinedFunction(*OriginalFn, *OutlinedFn); CGUpdater.reanalyzeFunction(*OriginalFn); NumOpenMPParallelRegionsMerged += MergableCIs.size(); return true; }; // Helper function that identifes sequences of // __kmpc_fork_call uses in a basic block. auto DetectPRsCB = [&](Use &U, Function &F) { CallInst *CI = getCallIfRegularCall(U, &RFI); BB2PRMap[CI->getParent()].insert(CI); return false; }; BB2PRMap.clear(); RFI.foreachUse(SCC, DetectPRsCB); SmallVector, 4> MergableCIsVector; // Find mergable parallel regions within a basic block that are // safe to merge, that is any in-between instructions can safely // execute in parallel after merging. // TODO: support merging across basic-blocks. for (auto &It : BB2PRMap) { auto &CIs = It.getSecond(); if (CIs.size() < 2) continue; BasicBlock *BB = It.getFirst(); SmallVector MergableCIs; /// Returns true if the instruction is mergable, false otherwise. /// A terminator instruction is unmergable by definition since merging /// works within a BB. Instructions before the mergable region are /// mergable if they are not calls to OpenMP runtime functions that may /// set different execution parameters for subsequent parallel regions. /// Instructions in-between parallel regions are mergable if they are not /// calls to any non-intrinsic function since that may call a non-mergable /// OpenMP runtime function. auto IsMergable = [&](Instruction &I, bool IsBeforeMergableRegion) { // We do not merge across BBs, hence return false (unmergable) if the // instruction is a terminator. if (I.isTerminator()) return false; if (!isa(&I)) return true; CallInst *CI = cast(&I); if (IsBeforeMergableRegion) { Function *CalledFunction = CI->getCalledFunction(); if (!CalledFunction) return false; // Return false (unmergable) if the call before the parallel // region calls an explicit affinity (proc_bind) or number of // threads (num_threads) compiler-generated function. Those settings // may be incompatible with following parallel regions. // TODO: ICV tracking to detect compatibility. for (const auto &RFI : UnmergableCallsInfo) { if (CalledFunction == RFI.Declaration) return false; } } else { // Return false (unmergable) if there is a call instruction // in-between parallel regions when it is not an intrinsic. It // may call an unmergable OpenMP runtime function in its callpath. // TODO: Keep track of possible OpenMP calls in the callpath. if (!isa(CI)) return false; } return true; }; // Find maximal number of parallel region CIs that are safe to merge. for (auto It = BB->begin(), End = BB->end(); It != End;) { Instruction &I = *It; ++It; if (CIs.count(&I)) { MergableCIs.push_back(cast(&I)); continue; } // Continue expanding if the instruction is mergable. if (IsMergable(I, MergableCIs.empty())) continue; // Forward the instruction iterator to skip the next parallel region // since there is an unmergable instruction which can affect it. for (; It != End; ++It) { Instruction &SkipI = *It; if (CIs.count(&SkipI)) { LLVM_DEBUG(dbgs() << TAG << "Skip parallel region " << SkipI << " due to " << I << "\n"); ++It; break; } } // Store mergable regions found. if (MergableCIs.size() > 1) { MergableCIsVector.push_back(MergableCIs); LLVM_DEBUG(dbgs() << TAG << "Found " << MergableCIs.size() << " parallel regions in block " << BB->getName() << " of function " << BB->getParent()->getName() << "\n";); } MergableCIs.clear(); } if (!MergableCIsVector.empty()) { Changed = true; for (auto &MergableCIs : MergableCIsVector) Merge(MergableCIs, BB); MergableCIsVector.clear(); } } if (Changed) { /// Re-collect use for fork calls, emitted barrier calls, and /// any emitted master/end_master calls. OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_fork_call); OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_barrier); OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_master); OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_end_master); } return Changed; } /// Try to delete parallel regions if possible. bool deleteParallelRegions() { const unsigned CallbackCalleeOperand = 2; OMPInformationCache::RuntimeFunctionInfo &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_fork_call]; if (!RFI.Declaration) return false; bool Changed = false; auto DeleteCallCB = [&](Use &U, Function &) { CallInst *CI = getCallIfRegularCall(U); if (!CI) return false; auto *Fn = dyn_cast( CI->getArgOperand(CallbackCalleeOperand)->stripPointerCasts()); if (!Fn) return false; if (!Fn->onlyReadsMemory()) return false; if (!Fn->hasFnAttribute(Attribute::WillReturn)) return false; LLVM_DEBUG(dbgs() << TAG << "Delete read-only parallel region in " << CI->getCaller()->getName() << "\n"); auto Remark = [&](OptimizationRemark OR) { return OR << "Removing parallel region with no side-effects."; }; emitRemark(CI, "OMP160", Remark); CGUpdater.removeCallSite(*CI); CI->eraseFromParent(); Changed = true; ++NumOpenMPParallelRegionsDeleted; return true; }; RFI.foreachUse(SCC, DeleteCallCB); return Changed; } /// Try to eliminate runtime calls by reusing existing ones. bool deduplicateRuntimeCalls() { bool Changed = false; RuntimeFunction DeduplicableRuntimeCallIDs[] = { OMPRTL_omp_get_num_threads, OMPRTL_omp_in_parallel, OMPRTL_omp_get_cancellation, OMPRTL_omp_get_thread_limit, OMPRTL_omp_get_supported_active_levels, OMPRTL_omp_get_level, OMPRTL_omp_get_ancestor_thread_num, OMPRTL_omp_get_team_size, OMPRTL_omp_get_active_level, OMPRTL_omp_in_final, OMPRTL_omp_get_proc_bind, OMPRTL_omp_get_num_places, OMPRTL_omp_get_num_procs, OMPRTL_omp_get_place_num, OMPRTL_omp_get_partition_num_places, OMPRTL_omp_get_partition_place_nums}; // Global-tid is handled separately. SmallSetVector GTIdArgs; collectGlobalThreadIdArguments(GTIdArgs); LLVM_DEBUG(dbgs() << TAG << "Found " << GTIdArgs.size() << " global thread ID arguments\n"); for (Function *F : SCC) { for (auto DeduplicableRuntimeCallID : DeduplicableRuntimeCallIDs) Changed |= deduplicateRuntimeCalls( *F, OMPInfoCache.RFIs[DeduplicableRuntimeCallID]); // __kmpc_global_thread_num is special as we can replace it with an // argument in enough cases to make it worth trying. Value *GTIdArg = nullptr; for (Argument &Arg : F->args()) if (GTIdArgs.count(&Arg)) { GTIdArg = &Arg; break; } Changed |= deduplicateRuntimeCalls( *F, OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num], GTIdArg); } return Changed; } /// Tries to hide the latency of runtime calls that involve host to /// device memory transfers by splitting them into their "issue" and "wait" /// versions. The "issue" is moved upwards as much as possible. The "wait" is /// moved downards as much as possible. The "issue" issues the memory transfer /// asynchronously, returning a handle. The "wait" waits in the returned /// handle for the memory transfer to finish. bool hideMemTransfersLatency() { auto &RFI = OMPInfoCache.RFIs[OMPRTL___tgt_target_data_begin_mapper]; bool Changed = false; auto SplitMemTransfers = [&](Use &U, Function &Decl) { auto *RTCall = getCallIfRegularCall(U, &RFI); if (!RTCall) return false; OffloadArray OffloadArrays[3]; if (!getValuesInOffloadArrays(*RTCall, OffloadArrays)) return false; LLVM_DEBUG(dumpValuesInOffloadArrays(OffloadArrays)); // TODO: Check if can be moved upwards. bool WasSplit = false; Instruction *WaitMovementPoint = canBeMovedDownwards(*RTCall); if (WaitMovementPoint) WasSplit = splitTargetDataBeginRTC(*RTCall, *WaitMovementPoint); Changed |= WasSplit; return WasSplit; }; RFI.foreachUse(SCC, SplitMemTransfers); return Changed; } void analysisGlobalization() { auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared]; auto CheckGlobalization = [&](Use &U, Function &Decl) { if (CallInst *CI = getCallIfRegularCall(U, &RFI)) { auto Remark = [&](OptimizationRemarkMissed ORM) { return ORM << "Found thread data sharing on the GPU. " << "Expect degraded performance due to data globalization."; }; emitRemark(CI, "OMP112", Remark); } return false; }; RFI.foreachUse(SCC, CheckGlobalization); } /// Maps the values stored in the offload arrays passed as arguments to /// \p RuntimeCall into the offload arrays in \p OAs. bool getValuesInOffloadArrays(CallInst &RuntimeCall, MutableArrayRef OAs) { assert(OAs.size() == 3 && "Need space for three offload arrays!"); // A runtime call that involves memory offloading looks something like: // call void @__tgt_target_data_begin_mapper(arg0, arg1, // i8** %offload_baseptrs, i8** %offload_ptrs, i64* %offload_sizes, // ...) // So, the idea is to access the allocas that allocate space for these // offload arrays, offload_baseptrs, offload_ptrs, offload_sizes. // Therefore: // i8** %offload_baseptrs. Value *BasePtrsArg = RuntimeCall.getArgOperand(OffloadArray::BasePtrsArgNum); // i8** %offload_ptrs. Value *PtrsArg = RuntimeCall.getArgOperand(OffloadArray::PtrsArgNum); // i8** %offload_sizes. Value *SizesArg = RuntimeCall.getArgOperand(OffloadArray::SizesArgNum); // Get values stored in **offload_baseptrs. auto *V = getUnderlyingObject(BasePtrsArg); if (!isa(V)) return false; auto *BasePtrsArray = cast(V); if (!OAs[0].initialize(*BasePtrsArray, RuntimeCall)) return false; // Get values stored in **offload_baseptrs. V = getUnderlyingObject(PtrsArg); if (!isa(V)) return false; auto *PtrsArray = cast(V); if (!OAs[1].initialize(*PtrsArray, RuntimeCall)) return false; // Get values stored in **offload_sizes. V = getUnderlyingObject(SizesArg); // If it's a [constant] global array don't analyze it. if (isa(V)) return isa(V); if (!isa(V)) return false; auto *SizesArray = cast(V); if (!OAs[2].initialize(*SizesArray, RuntimeCall)) return false; return true; } /// Prints the values in the OffloadArrays \p OAs using LLVM_DEBUG. /// For now this is a way to test that the function getValuesInOffloadArrays /// is working properly. /// TODO: Move this to a unittest when unittests are available for OpenMPOpt. void dumpValuesInOffloadArrays(ArrayRef OAs) { assert(OAs.size() == 3 && "There are three offload arrays to debug!"); LLVM_DEBUG(dbgs() << TAG << " Successfully got offload values:\n"); std::string ValuesStr; raw_string_ostream Printer(ValuesStr); std::string Separator = " --- "; for (auto *BP : OAs[0].StoredValues) { BP->print(Printer); Printer << Separator; } LLVM_DEBUG(dbgs() << "\t\toffload_baseptrs: " << Printer.str() << "\n"); ValuesStr.clear(); for (auto *P : OAs[1].StoredValues) { P->print(Printer); Printer << Separator; } LLVM_DEBUG(dbgs() << "\t\toffload_ptrs: " << Printer.str() << "\n"); ValuesStr.clear(); for (auto *S : OAs[2].StoredValues) { S->print(Printer); Printer << Separator; } LLVM_DEBUG(dbgs() << "\t\toffload_sizes: " << Printer.str() << "\n"); } /// Returns the instruction where the "wait" counterpart \p RuntimeCall can be /// moved. Returns nullptr if the movement is not possible, or not worth it. Instruction *canBeMovedDownwards(CallInst &RuntimeCall) { // FIXME: This traverses only the BasicBlock where RuntimeCall is. // Make it traverse the CFG. Instruction *CurrentI = &RuntimeCall; bool IsWorthIt = false; while ((CurrentI = CurrentI->getNextNode())) { // TODO: Once we detect the regions to be offloaded we should use the // alias analysis manager to check if CurrentI may modify one of // the offloaded regions. if (CurrentI->mayHaveSideEffects() || CurrentI->mayReadFromMemory()) { if (IsWorthIt) return CurrentI; return nullptr; } // FIXME: For now if we move it over anything without side effect // is worth it. IsWorthIt = true; } // Return end of BasicBlock. return RuntimeCall.getParent()->getTerminator(); } /// Splits \p RuntimeCall into its "issue" and "wait" counterparts. bool splitTargetDataBeginRTC(CallInst &RuntimeCall, Instruction &WaitMovementPoint) { // Create stack allocated handle (__tgt_async_info) at the beginning of the // function. Used for storing information of the async transfer, allowing to // wait on it later. auto &IRBuilder = OMPInfoCache.OMPBuilder; auto *F = RuntimeCall.getCaller(); Instruction *FirstInst = &(F->getEntryBlock().front()); AllocaInst *Handle = new AllocaInst( IRBuilder.AsyncInfo, F->getAddressSpace(), "handle", FirstInst); // Add "issue" runtime call declaration: // declare %struct.tgt_async_info @__tgt_target_data_begin_issue(i64, i32, // i8**, i8**, i64*, i64*) FunctionCallee IssueDecl = IRBuilder.getOrCreateRuntimeFunction( M, OMPRTL___tgt_target_data_begin_mapper_issue); // Change RuntimeCall call site for its asynchronous version. SmallVector Args; for (auto &Arg : RuntimeCall.args()) Args.push_back(Arg.get()); Args.push_back(Handle); CallInst *IssueCallsite = CallInst::Create(IssueDecl, Args, /*NameStr=*/"", &RuntimeCall); RuntimeCall.eraseFromParent(); // Add "wait" runtime call declaration: // declare void @__tgt_target_data_begin_wait(i64, %struct.__tgt_async_info) FunctionCallee WaitDecl = IRBuilder.getOrCreateRuntimeFunction( M, OMPRTL___tgt_target_data_begin_mapper_wait); Value *WaitParams[2] = { IssueCallsite->getArgOperand( OffloadArray::DeviceIDArgNum), // device_id. Handle // handle to wait on. }; CallInst::Create(WaitDecl, WaitParams, /*NameStr=*/"", &WaitMovementPoint); return true; } static Value *combinedIdentStruct(Value *CurrentIdent, Value *NextIdent, bool GlobalOnly, bool &SingleChoice) { if (CurrentIdent == NextIdent) return CurrentIdent; // TODO: Figure out how to actually combine multiple debug locations. For // now we just keep an existing one if there is a single choice. if (!GlobalOnly || isa(NextIdent)) { SingleChoice = !CurrentIdent; return NextIdent; } return nullptr; } /// Return an `struct ident_t*` value that represents the ones used in the /// calls of \p RFI inside of \p F. If \p GlobalOnly is true, we will not /// return a local `struct ident_t*`. For now, if we cannot find a suitable /// return value we create one from scratch. We also do not yet combine /// information, e.g., the source locations, see combinedIdentStruct. Value * getCombinedIdentFromCallUsesIn(OMPInformationCache::RuntimeFunctionInfo &RFI, Function &F, bool GlobalOnly) { bool SingleChoice = true; Value *Ident = nullptr; auto CombineIdentStruct = [&](Use &U, Function &Caller) { CallInst *CI = getCallIfRegularCall(U, &RFI); if (!CI || &F != &Caller) return false; Ident = combinedIdentStruct(Ident, CI->getArgOperand(0), /* GlobalOnly */ true, SingleChoice); return false; }; RFI.foreachUse(SCC, CombineIdentStruct); if (!Ident || !SingleChoice) { // The IRBuilder uses the insertion block to get to the module, this is // unfortunate but we work around it for now. if (!OMPInfoCache.OMPBuilder.getInsertionPoint().getBlock()) OMPInfoCache.OMPBuilder.updateToLocation(OpenMPIRBuilder::InsertPointTy( &F.getEntryBlock(), F.getEntryBlock().begin())); // Create a fallback location if non was found. // TODO: Use the debug locations of the calls instead. Constant *Loc = OMPInfoCache.OMPBuilder.getOrCreateDefaultSrcLocStr(); Ident = OMPInfoCache.OMPBuilder.getOrCreateIdent(Loc); } return Ident; } /// Try to eliminate calls of \p RFI in \p F by reusing an existing one or /// \p ReplVal if given. bool deduplicateRuntimeCalls(Function &F, OMPInformationCache::RuntimeFunctionInfo &RFI, Value *ReplVal = nullptr) { auto *UV = RFI.getUseVector(F); if (!UV || UV->size() + (ReplVal != nullptr) < 2) return false; LLVM_DEBUG( dbgs() << TAG << "Deduplicate " << UV->size() << " uses of " << RFI.Name << (ReplVal ? " with an existing value\n" : "\n") << "\n"); assert((!ReplVal || (isa(ReplVal) && cast(ReplVal)->getParent() == &F)) && "Unexpected replacement value!"); // TODO: Use dominance to find a good position instead. auto CanBeMoved = [this](CallBase &CB) { unsigned NumArgs = CB.getNumArgOperands(); if (NumArgs == 0) return true; if (CB.getArgOperand(0)->getType() != OMPInfoCache.OMPBuilder.IdentPtr) return false; for (unsigned u = 1; u < NumArgs; ++u) if (isa(CB.getArgOperand(u))) return false; return true; }; if (!ReplVal) { for (Use *U : *UV) if (CallInst *CI = getCallIfRegularCall(*U, &RFI)) { if (!CanBeMoved(*CI)) continue; // If the function is a kernel, dedup will move // the runtime call right after the kernel init callsite. Otherwise, // it will move it to the beginning of the caller function. if (isKernel(F)) { auto &KernelInitRFI = OMPInfoCache.RFIs[OMPRTL___kmpc_target_init]; auto *KernelInitUV = KernelInitRFI.getUseVector(F); if (KernelInitUV->empty()) continue; assert(KernelInitUV->size() == 1 && "Expected a single __kmpc_target_init in kernel\n"); CallInst *KernelInitCI = getCallIfRegularCall(*KernelInitUV->front(), &KernelInitRFI); assert(KernelInitCI && "Expected a call to __kmpc_target_init in kernel\n"); CI->moveAfter(KernelInitCI); } else CI->moveBefore(&*F.getEntryBlock().getFirstInsertionPt()); ReplVal = CI; break; } if (!ReplVal) return false; } // If we use a call as a replacement value we need to make sure the ident is // valid at the new location. For now we just pick a global one, either // existing and used by one of the calls, or created from scratch. if (CallBase *CI = dyn_cast(ReplVal)) { if (CI->getNumArgOperands() > 0 && CI->getArgOperand(0)->getType() == OMPInfoCache.OMPBuilder.IdentPtr) { Value *Ident = getCombinedIdentFromCallUsesIn(RFI, F, /* GlobalOnly */ true); CI->setArgOperand(0, Ident); } } bool Changed = false; auto ReplaceAndDeleteCB = [&](Use &U, Function &Caller) { CallInst *CI = getCallIfRegularCall(U, &RFI); if (!CI || CI == ReplVal || &F != &Caller) return false; assert(CI->getCaller() == &F && "Unexpected call!"); auto Remark = [&](OptimizationRemark OR) { return OR << "OpenMP runtime call " << ore::NV("OpenMPOptRuntime", RFI.Name) << " deduplicated."; }; if (CI->getDebugLoc()) emitRemark(CI, "OMP170", Remark); else emitRemark(&F, "OMP170", Remark); CGUpdater.removeCallSite(*CI); CI->replaceAllUsesWith(ReplVal); CI->eraseFromParent(); ++NumOpenMPRuntimeCallsDeduplicated; Changed = true; return true; }; RFI.foreachUse(SCC, ReplaceAndDeleteCB); return Changed; } /// Collect arguments that represent the global thread id in \p GTIdArgs. void collectGlobalThreadIdArguments(SmallSetVector >IdArgs) { // TODO: Below we basically perform a fixpoint iteration with a pessimistic // initialization. We could define an AbstractAttribute instead and // run the Attributor here once it can be run as an SCC pass. // Helper to check the argument \p ArgNo at all call sites of \p F for // a GTId. auto CallArgOpIsGTId = [&](Function &F, unsigned ArgNo, CallInst &RefCI) { if (!F.hasLocalLinkage()) return false; for (Use &U : F.uses()) { if (CallInst *CI = getCallIfRegularCall(U)) { Value *ArgOp = CI->getArgOperand(ArgNo); if (CI == &RefCI || GTIdArgs.count(ArgOp) || getCallIfRegularCall( *ArgOp, &OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num])) continue; } return false; } return true; }; // Helper to identify uses of a GTId as GTId arguments. auto AddUserArgs = [&](Value >Id) { for (Use &U : GTId.uses()) if (CallInst *CI = dyn_cast(U.getUser())) if (CI->isArgOperand(&U)) if (Function *Callee = CI->getCalledFunction()) if (CallArgOpIsGTId(*Callee, U.getOperandNo(), *CI)) GTIdArgs.insert(Callee->getArg(U.getOperandNo())); }; // The argument users of __kmpc_global_thread_num calls are GTIds. OMPInformationCache::RuntimeFunctionInfo &GlobThreadNumRFI = OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num]; GlobThreadNumRFI.foreachUse(SCC, [&](Use &U, Function &F) { if (CallInst *CI = getCallIfRegularCall(U, &GlobThreadNumRFI)) AddUserArgs(*CI); return false; }); // Transitively search for more arguments by looking at the users of the // ones we know already. During the search the GTIdArgs vector is extended // so we cannot cache the size nor can we use a range based for. for (unsigned u = 0; u < GTIdArgs.size(); ++u) AddUserArgs(*GTIdArgs[u]); } /// Kernel (=GPU) optimizations and utility functions /// ///{{ /// Check if \p F is a kernel, hence entry point for target offloading. bool isKernel(Function &F) { return OMPInfoCache.Kernels.count(&F); } /// Cache to remember the unique kernel for a function. DenseMap> UniqueKernelMap; /// Find the unique kernel that will execute \p F, if any. Kernel getUniqueKernelFor(Function &F); /// Find the unique kernel that will execute \p I, if any. Kernel getUniqueKernelFor(Instruction &I) { return getUniqueKernelFor(*I.getFunction()); } /// Rewrite the device (=GPU) code state machine create in non-SPMD mode in /// the cases we can avoid taking the address of a function. bool rewriteDeviceCodeStateMachine(); /// ///}} /// Emit a remark generically /// /// This template function can be used to generically emit a remark. The /// RemarkKind should be one of the following: /// - OptimizationRemark to indicate a successful optimization attempt /// - OptimizationRemarkMissed to report a failed optimization attempt /// - OptimizationRemarkAnalysis to provide additional information about an /// optimization attempt /// /// The remark is built using a callback function provided by the caller that /// takes a RemarkKind as input and returns a RemarkKind. template void emitRemark(Instruction *I, StringRef RemarkName, RemarkCallBack &&RemarkCB) const { Function *F = I->getParent()->getParent(); auto &ORE = OREGetter(F); if (RemarkName.startswith("OMP")) ORE.emit([&]() { return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, I)) << " [" << RemarkName << "]"; }); else ORE.emit( [&]() { return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, I)); }); } /// Emit a remark on a function. template void emitRemark(Function *F, StringRef RemarkName, RemarkCallBack &&RemarkCB) const { auto &ORE = OREGetter(F); if (RemarkName.startswith("OMP")) ORE.emit([&]() { return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, F)) << " [" << RemarkName << "]"; }); else ORE.emit( [&]() { return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, F)); }); } /// RAII struct to temporarily change an RTL function's linkage to external. /// This prevents it from being mistakenly removed by other optimizations. struct ExternalizationRAII { ExternalizationRAII(OMPInformationCache &OMPInfoCache, RuntimeFunction RFKind) : Declaration(OMPInfoCache.RFIs[RFKind].Declaration) { if (!Declaration) return; LinkageType = Declaration->getLinkage(); Declaration->setLinkage(GlobalValue::ExternalLinkage); } ~ExternalizationRAII() { if (!Declaration) return; Declaration->setLinkage(LinkageType); } Function *Declaration; GlobalValue::LinkageTypes LinkageType; }; /// The underlying module. Module &M; /// The SCC we are operating on. SmallVectorImpl &SCC; /// Callback to update the call graph, the first argument is a removed call, /// the second an optional replacement call. CallGraphUpdater &CGUpdater; /// Callback to get an OptimizationRemarkEmitter from a Function * OptimizationRemarkGetter OREGetter; /// OpenMP-specific information cache. Also Used for Attributor runs. OMPInformationCache &OMPInfoCache; /// Attributor instance. Attributor &A; /// Helper function to run Attributor on SCC. bool runAttributor(bool IsModulePass) { if (SCC.empty()) return false; // Temporarily make these function have external linkage so the Attributor // doesn't remove them when we try to look them up later. ExternalizationRAII Parallel(OMPInfoCache, OMPRTL___kmpc_kernel_parallel); ExternalizationRAII EndParallel(OMPInfoCache, OMPRTL___kmpc_kernel_end_parallel); ExternalizationRAII BarrierSPMD(OMPInfoCache, OMPRTL___kmpc_barrier_simple_spmd); registerAAs(IsModulePass); ChangeStatus Changed = A.run(); LLVM_DEBUG(dbgs() << "[Attributor] Done with " << SCC.size() << " functions, result: " << Changed << ".\n"); return Changed == ChangeStatus::CHANGED; } void registerFoldRuntimeCall(RuntimeFunction RF); /// Populate the Attributor with abstract attribute opportunities in the /// function. void registerAAs(bool IsModulePass); }; Kernel OpenMPOpt::getUniqueKernelFor(Function &F) { if (!OMPInfoCache.ModuleSlice.count(&F)) return nullptr; // Use a scope to keep the lifetime of the CachedKernel short. { Optional &CachedKernel = UniqueKernelMap[&F]; if (CachedKernel) return *CachedKernel; // TODO: We should use an AA to create an (optimistic and callback // call-aware) call graph. For now we stick to simple patterns that // are less powerful, basically the worst fixpoint. if (isKernel(F)) { CachedKernel = Kernel(&F); return *CachedKernel; } CachedKernel = nullptr; if (!F.hasLocalLinkage()) { // See https://openmp.llvm.org/remarks/OptimizationRemarks.html auto Remark = [&](OptimizationRemarkAnalysis ORA) { return ORA << "Potentially unknown OpenMP target region caller."; }; emitRemark(&F, "OMP100", Remark); return nullptr; } } auto GetUniqueKernelForUse = [&](const Use &U) -> Kernel { if (auto *Cmp = dyn_cast(U.getUser())) { // Allow use in equality comparisons. if (Cmp->isEquality()) return getUniqueKernelFor(*Cmp); return nullptr; } if (auto *CB = dyn_cast(U.getUser())) { // Allow direct calls. if (CB->isCallee(&U)) return getUniqueKernelFor(*CB); OMPInformationCache::RuntimeFunctionInfo &KernelParallelRFI = OMPInfoCache.RFIs[OMPRTL___kmpc_parallel_51]; // Allow the use in __kmpc_parallel_51 calls. if (OpenMPOpt::getCallIfRegularCall(*U.getUser(), &KernelParallelRFI)) return getUniqueKernelFor(*CB); return nullptr; } // Disallow every other use. return nullptr; }; // TODO: In the future we want to track more than just a unique kernel. SmallPtrSet PotentialKernels; OMPInformationCache::foreachUse(F, [&](const Use &U) { PotentialKernels.insert(GetUniqueKernelForUse(U)); }); Kernel K = nullptr; if (PotentialKernels.size() == 1) K = *PotentialKernels.begin(); // Cache the result. UniqueKernelMap[&F] = K; return K; } bool OpenMPOpt::rewriteDeviceCodeStateMachine() { OMPInformationCache::RuntimeFunctionInfo &KernelParallelRFI = OMPInfoCache.RFIs[OMPRTL___kmpc_parallel_51]; bool Changed = false; if (!KernelParallelRFI) return Changed; for (Function *F : SCC) { // Check if the function is a use in a __kmpc_parallel_51 call at // all. bool UnknownUse = false; bool KernelParallelUse = false; unsigned NumDirectCalls = 0; SmallVector ToBeReplacedStateMachineUses; OMPInformationCache::foreachUse(*F, [&](Use &U) { if (auto *CB = dyn_cast(U.getUser())) if (CB->isCallee(&U)) { ++NumDirectCalls; return; } if (isa(U.getUser())) { ToBeReplacedStateMachineUses.push_back(&U); return; } // Find wrapper functions that represent parallel kernels. CallInst *CI = OpenMPOpt::getCallIfRegularCall(*U.getUser(), &KernelParallelRFI); const unsigned int WrapperFunctionArgNo = 6; if (!KernelParallelUse && CI && CI->getArgOperandNo(&U) == WrapperFunctionArgNo) { KernelParallelUse = true; ToBeReplacedStateMachineUses.push_back(&U); return; } UnknownUse = true; }); // Do not emit a remark if we haven't seen a __kmpc_parallel_51 // use. if (!KernelParallelUse) continue; // If this ever hits, we should investigate. // TODO: Checking the number of uses is not a necessary restriction and // should be lifted. if (UnknownUse || NumDirectCalls != 1 || ToBeReplacedStateMachineUses.size() > 2) { auto Remark = [&](OptimizationRemarkAnalysis ORA) { return ORA << "Parallel region is used in " << (UnknownUse ? "unknown" : "unexpected") << " ways. Will not attempt to rewrite the state machine."; }; emitRemark(F, "OMP101", Remark); continue; } // Even if we have __kmpc_parallel_51 calls, we (for now) give // up if the function is not called from a unique kernel. Kernel K = getUniqueKernelFor(*F); if (!K) { auto Remark = [&](OptimizationRemarkAnalysis ORA) { return ORA << "Parallel region is not called from a unique kernel. " "Will not attempt to rewrite the state machine."; }; emitRemark(F, "OMP102", Remark); continue; } // We now know F is a parallel body function called only from the kernel K. // We also identified the state machine uses in which we replace the // function pointer by a new global symbol for identification purposes. This // ensures only direct calls to the function are left. Module &M = *F->getParent(); Type *Int8Ty = Type::getInt8Ty(M.getContext()); auto *ID = new GlobalVariable( M, Int8Ty, /* isConstant */ true, GlobalValue::PrivateLinkage, UndefValue::get(Int8Ty), F->getName() + ".ID"); for (Use *U : ToBeReplacedStateMachineUses) - U->set(ConstantExpr::getBitCast(ID, U->get()->getType())); + U->set(ConstantExpr::getPointerBitCastOrAddrSpaceCast( + ID, U->get()->getType())); ++NumOpenMPParallelRegionsReplacedInGPUStateMachine; Changed = true; } return Changed; } /// Abstract Attribute for tracking ICV values. struct AAICVTracker : public StateWrapper { using Base = StateWrapper; AAICVTracker(const IRPosition &IRP, Attributor &A) : Base(IRP) {} void initialize(Attributor &A) override { Function *F = getAnchorScope(); if (!F || !A.isFunctionIPOAmendable(*F)) indicatePessimisticFixpoint(); } /// Returns true if value is assumed to be tracked. bool isAssumedTracked() const { return getAssumed(); } /// Returns true if value is known to be tracked. bool isKnownTracked() const { return getAssumed(); } /// Create an abstract attribute biew for the position \p IRP. static AAICVTracker &createForPosition(const IRPosition &IRP, Attributor &A); /// Return the value with which \p I can be replaced for specific \p ICV. virtual Optional getReplacementValue(InternalControlVar ICV, const Instruction *I, Attributor &A) const { return None; } /// Return an assumed unique ICV value if a single candidate is found. If /// there cannot be one, return a nullptr. If it is not clear yet, return the /// Optional::NoneType. virtual Optional getUniqueReplacementValue(InternalControlVar ICV) const = 0; // Currently only nthreads is being tracked. // this array will only grow with time. InternalControlVar TrackableICVs[1] = {ICV_nthreads}; /// See AbstractAttribute::getName() const std::string getName() const override { return "AAICVTracker"; } /// See AbstractAttribute::getIdAddr() const char *getIdAddr() const override { return &ID; } /// This function should return true if the type of the \p AA is AAICVTracker static bool classof(const AbstractAttribute *AA) { return (AA->getIdAddr() == &ID); } static const char ID; }; struct AAICVTrackerFunction : public AAICVTracker { AAICVTrackerFunction(const IRPosition &IRP, Attributor &A) : AAICVTracker(IRP, A) {} // FIXME: come up with better string. const std::string getAsStr() const override { return "ICVTrackerFunction"; } // FIXME: come up with some stats. void trackStatistics() const override {} /// We don't manifest anything for this AA. ChangeStatus manifest(Attributor &A) override { return ChangeStatus::UNCHANGED; } // Map of ICV to their values at specific program point. EnumeratedArray, InternalControlVar, InternalControlVar::ICV___last> ICVReplacementValuesMap; ChangeStatus updateImpl(Attributor &A) override { ChangeStatus HasChanged = ChangeStatus::UNCHANGED; Function *F = getAnchorScope(); auto &OMPInfoCache = static_cast(A.getInfoCache()); for (InternalControlVar ICV : TrackableICVs) { auto &SetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Setter]; auto &ValuesMap = ICVReplacementValuesMap[ICV]; auto TrackValues = [&](Use &U, Function &) { CallInst *CI = OpenMPOpt::getCallIfRegularCall(U); if (!CI) return false; // FIXME: handle setters with more that 1 arguments. /// Track new value. if (ValuesMap.insert(std::make_pair(CI, CI->getArgOperand(0))).second) HasChanged = ChangeStatus::CHANGED; return false; }; auto CallCheck = [&](Instruction &I) { Optional ReplVal = getValueForCall(A, &I, ICV); if (ReplVal.hasValue() && ValuesMap.insert(std::make_pair(&I, *ReplVal)).second) HasChanged = ChangeStatus::CHANGED; return true; }; // Track all changes of an ICV. SetterRFI.foreachUse(TrackValues, F); bool UsedAssumedInformation = false; A.checkForAllInstructions(CallCheck, *this, {Instruction::Call}, UsedAssumedInformation, /* CheckBBLivenessOnly */ true); /// TODO: Figure out a way to avoid adding entry in /// ICVReplacementValuesMap Instruction *Entry = &F->getEntryBlock().front(); if (HasChanged == ChangeStatus::CHANGED && !ValuesMap.count(Entry)) ValuesMap.insert(std::make_pair(Entry, nullptr)); } return HasChanged; } /// Hepler to check if \p I is a call and get the value for it if it is /// unique. Optional getValueForCall(Attributor &A, const Instruction *I, InternalControlVar &ICV) const { const auto *CB = dyn_cast(I); if (!CB || CB->hasFnAttr("no_openmp") || CB->hasFnAttr("no_openmp_routines")) return None; auto &OMPInfoCache = static_cast(A.getInfoCache()); auto &GetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Getter]; auto &SetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Setter]; Function *CalledFunction = CB->getCalledFunction(); // Indirect call, assume ICV changes. if (CalledFunction == nullptr) return nullptr; if (CalledFunction == GetterRFI.Declaration) return None; if (CalledFunction == SetterRFI.Declaration) { if (ICVReplacementValuesMap[ICV].count(I)) return ICVReplacementValuesMap[ICV].lookup(I); return nullptr; } // Since we don't know, assume it changes the ICV. if (CalledFunction->isDeclaration()) return nullptr; const auto &ICVTrackingAA = A.getAAFor( *this, IRPosition::callsite_returned(*CB), DepClassTy::REQUIRED); if (ICVTrackingAA.isAssumedTracked()) return ICVTrackingAA.getUniqueReplacementValue(ICV); // If we don't know, assume it changes. return nullptr; } // We don't check unique value for a function, so return None. Optional getUniqueReplacementValue(InternalControlVar ICV) const override { return None; } /// Return the value with which \p I can be replaced for specific \p ICV. Optional getReplacementValue(InternalControlVar ICV, const Instruction *I, Attributor &A) const override { const auto &ValuesMap = ICVReplacementValuesMap[ICV]; if (ValuesMap.count(I)) return ValuesMap.lookup(I); SmallVector Worklist; SmallPtrSet Visited; Worklist.push_back(I); Optional ReplVal; while (!Worklist.empty()) { const Instruction *CurrInst = Worklist.pop_back_val(); if (!Visited.insert(CurrInst).second) continue; const BasicBlock *CurrBB = CurrInst->getParent(); // Go up and look for all potential setters/calls that might change the // ICV. while ((CurrInst = CurrInst->getPrevNode())) { if (ValuesMap.count(CurrInst)) { Optional NewReplVal = ValuesMap.lookup(CurrInst); // Unknown value, track new. if (!ReplVal.hasValue()) { ReplVal = NewReplVal; break; } // If we found a new value, we can't know the icv value anymore. if (NewReplVal.hasValue()) if (ReplVal != NewReplVal) return nullptr; break; } Optional NewReplVal = getValueForCall(A, CurrInst, ICV); if (!NewReplVal.hasValue()) continue; // Unknown value, track new. if (!ReplVal.hasValue()) { ReplVal = NewReplVal; break; } // if (NewReplVal.hasValue()) // We found a new value, we can't know the icv value anymore. if (ReplVal != NewReplVal) return nullptr; } // If we are in the same BB and we have a value, we are done. if (CurrBB == I->getParent() && ReplVal.hasValue()) return ReplVal; // Go through all predecessors and add terminators for analysis. for (const BasicBlock *Pred : predecessors(CurrBB)) if (const Instruction *Terminator = Pred->getTerminator()) Worklist.push_back(Terminator); } return ReplVal; } }; struct AAICVTrackerFunctionReturned : AAICVTracker { AAICVTrackerFunctionReturned(const IRPosition &IRP, Attributor &A) : AAICVTracker(IRP, A) {} // FIXME: come up with better string. const std::string getAsStr() const override { return "ICVTrackerFunctionReturned"; } // FIXME: come up with some stats. void trackStatistics() const override {} /// We don't manifest anything for this AA. ChangeStatus manifest(Attributor &A) override { return ChangeStatus::UNCHANGED; } // Map of ICV to their values at specific program point. EnumeratedArray, InternalControlVar, InternalControlVar::ICV___last> ICVReplacementValuesMap; /// Return the value with which \p I can be replaced for specific \p ICV. Optional getUniqueReplacementValue(InternalControlVar ICV) const override { return ICVReplacementValuesMap[ICV]; } ChangeStatus updateImpl(Attributor &A) override { ChangeStatus Changed = ChangeStatus::UNCHANGED; const auto &ICVTrackingAA = A.getAAFor( *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED); if (!ICVTrackingAA.isAssumedTracked()) return indicatePessimisticFixpoint(); for (InternalControlVar ICV : TrackableICVs) { Optional &ReplVal = ICVReplacementValuesMap[ICV]; Optional UniqueICVValue; auto CheckReturnInst = [&](Instruction &I) { Optional NewReplVal = ICVTrackingAA.getReplacementValue(ICV, &I, A); // If we found a second ICV value there is no unique returned value. if (UniqueICVValue.hasValue() && UniqueICVValue != NewReplVal) return false; UniqueICVValue = NewReplVal; return true; }; bool UsedAssumedInformation = false; if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}, UsedAssumedInformation, /* CheckBBLivenessOnly */ true)) UniqueICVValue = nullptr; if (UniqueICVValue == ReplVal) continue; ReplVal = UniqueICVValue; Changed = ChangeStatus::CHANGED; } return Changed; } }; struct AAICVTrackerCallSite : AAICVTracker { AAICVTrackerCallSite(const IRPosition &IRP, Attributor &A) : AAICVTracker(IRP, A) {} void initialize(Attributor &A) override { Function *F = getAnchorScope(); if (!F || !A.isFunctionIPOAmendable(*F)) indicatePessimisticFixpoint(); // We only initialize this AA for getters, so we need to know which ICV it // gets. auto &OMPInfoCache = static_cast(A.getInfoCache()); for (InternalControlVar ICV : TrackableICVs) { auto ICVInfo = OMPInfoCache.ICVs[ICV]; auto &Getter = OMPInfoCache.RFIs[ICVInfo.Getter]; if (Getter.Declaration == getAssociatedFunction()) { AssociatedICV = ICVInfo.Kind; return; } } /// Unknown ICV. indicatePessimisticFixpoint(); } ChangeStatus manifest(Attributor &A) override { if (!ReplVal.hasValue() || !ReplVal.getValue()) return ChangeStatus::UNCHANGED; A.changeValueAfterManifest(*getCtxI(), **ReplVal); A.deleteAfterManifest(*getCtxI()); return ChangeStatus::CHANGED; } // FIXME: come up with better string. const std::string getAsStr() const override { return "ICVTrackerCallSite"; } // FIXME: come up with some stats. void trackStatistics() const override {} InternalControlVar AssociatedICV; Optional ReplVal; ChangeStatus updateImpl(Attributor &A) override { const auto &ICVTrackingAA = A.getAAFor( *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED); // We don't have any information, so we assume it changes the ICV. if (!ICVTrackingAA.isAssumedTracked()) return indicatePessimisticFixpoint(); Optional NewReplVal = ICVTrackingAA.getReplacementValue(AssociatedICV, getCtxI(), A); if (ReplVal == NewReplVal) return ChangeStatus::UNCHANGED; ReplVal = NewReplVal; return ChangeStatus::CHANGED; } // Return the value with which associated value can be replaced for specific // \p ICV. Optional getUniqueReplacementValue(InternalControlVar ICV) const override { return ReplVal; } }; struct AAICVTrackerCallSiteReturned : AAICVTracker { AAICVTrackerCallSiteReturned(const IRPosition &IRP, Attributor &A) : AAICVTracker(IRP, A) {} // FIXME: come up with better string. const std::string getAsStr() const override { return "ICVTrackerCallSiteReturned"; } // FIXME: come up with some stats. void trackStatistics() const override {} /// We don't manifest anything for this AA. ChangeStatus manifest(Attributor &A) override { return ChangeStatus::UNCHANGED; } // Map of ICV to their values at specific program point. EnumeratedArray, InternalControlVar, InternalControlVar::ICV___last> ICVReplacementValuesMap; /// Return the value with which associated value can be replaced for specific /// \p ICV. Optional getUniqueReplacementValue(InternalControlVar ICV) const override { return ICVReplacementValuesMap[ICV]; } ChangeStatus updateImpl(Attributor &A) override { ChangeStatus Changed = ChangeStatus::UNCHANGED; const auto &ICVTrackingAA = A.getAAFor( *this, IRPosition::returned(*getAssociatedFunction()), DepClassTy::REQUIRED); // We don't have any information, so we assume it changes the ICV. if (!ICVTrackingAA.isAssumedTracked()) return indicatePessimisticFixpoint(); for (InternalControlVar ICV : TrackableICVs) { Optional &ReplVal = ICVReplacementValuesMap[ICV]; Optional NewReplVal = ICVTrackingAA.getUniqueReplacementValue(ICV); if (ReplVal == NewReplVal) continue; ReplVal = NewReplVal; Changed = ChangeStatus::CHANGED; } return Changed; } }; struct AAExecutionDomainFunction : public AAExecutionDomain { AAExecutionDomainFunction(const IRPosition &IRP, Attributor &A) : AAExecutionDomain(IRP, A) {} const std::string getAsStr() const override { return "[AAExecutionDomain] " + std::to_string(SingleThreadedBBs.size()) + "/" + std::to_string(NumBBs) + " BBs thread 0 only."; } /// See AbstractAttribute::trackStatistics(). void trackStatistics() const override {} void initialize(Attributor &A) override { Function *F = getAnchorScope(); for (const auto &BB : *F) SingleThreadedBBs.insert(&BB); NumBBs = SingleThreadedBBs.size(); } ChangeStatus manifest(Attributor &A) override { LLVM_DEBUG({ for (const BasicBlock *BB : SingleThreadedBBs) dbgs() << TAG << " Basic block @" << getAnchorScope()->getName() << " " << BB->getName() << " is executed by a single thread.\n"; }); return ChangeStatus::UNCHANGED; } ChangeStatus updateImpl(Attributor &A) override; /// Check if an instruction is executed by a single thread. bool isExecutedByInitialThreadOnly(const Instruction &I) const override { return isExecutedByInitialThreadOnly(*I.getParent()); } bool isExecutedByInitialThreadOnly(const BasicBlock &BB) const override { return isValidState() && SingleThreadedBBs.contains(&BB); } /// Set of basic blocks that are executed by a single thread. DenseSet SingleThreadedBBs; /// Total number of basic blocks in this function. long unsigned NumBBs; }; ChangeStatus AAExecutionDomainFunction::updateImpl(Attributor &A) { Function *F = getAnchorScope(); ReversePostOrderTraversal RPOT(F); auto NumSingleThreadedBBs = SingleThreadedBBs.size(); bool AllCallSitesKnown; auto PredForCallSite = [&](AbstractCallSite ACS) { const auto &ExecutionDomainAA = A.getAAFor( *this, IRPosition::function(*ACS.getInstruction()->getFunction()), DepClassTy::REQUIRED); return ACS.isDirectCall() && ExecutionDomainAA.isExecutedByInitialThreadOnly( *ACS.getInstruction()); }; if (!A.checkForAllCallSites(PredForCallSite, *this, /* RequiresAllCallSites */ true, AllCallSitesKnown)) SingleThreadedBBs.erase(&F->getEntryBlock()); auto &OMPInfoCache = static_cast(A.getInfoCache()); auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_target_init]; // Check if the edge into the successor block compares the __kmpc_target_init // result with -1. If we are in non-SPMD-mode that signals only the main // thread will execute the edge. auto IsInitialThreadOnly = [&](BranchInst *Edge, BasicBlock *SuccessorBB) { if (!Edge || !Edge->isConditional()) return false; if (Edge->getSuccessor(0) != SuccessorBB) return false; auto *Cmp = dyn_cast(Edge->getCondition()); if (!Cmp || !Cmp->isTrueWhenEqual() || !Cmp->isEquality()) return false; ConstantInt *C = dyn_cast(Cmp->getOperand(1)); if (!C) return false; // Match: -1 == __kmpc_target_init (for non-SPMD kernels only!) if (C->isAllOnesValue()) { auto *CB = dyn_cast(Cmp->getOperand(0)); CB = CB ? OpenMPOpt::getCallIfRegularCall(*CB, &RFI) : nullptr; if (!CB) return false; const int InitIsSPMDArgNo = 1; auto *IsSPMDModeCI = dyn_cast(CB->getOperand(InitIsSPMDArgNo)); return IsSPMDModeCI && IsSPMDModeCI->isZero(); } return false; }; // Merge all the predecessor states into the current basic block. A basic // block is executed by a single thread if all of its predecessors are. auto MergePredecessorStates = [&](BasicBlock *BB) { if (pred_begin(BB) == pred_end(BB)) return SingleThreadedBBs.contains(BB); bool IsInitialThread = true; for (auto PredBB = pred_begin(BB), PredEndBB = pred_end(BB); PredBB != PredEndBB; ++PredBB) { if (!IsInitialThreadOnly(dyn_cast((*PredBB)->getTerminator()), BB)) IsInitialThread &= SingleThreadedBBs.contains(*PredBB); } return IsInitialThread; }; for (auto *BB : RPOT) { if (!MergePredecessorStates(BB)) SingleThreadedBBs.erase(BB); } return (NumSingleThreadedBBs == SingleThreadedBBs.size()) ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED; } /// Try to replace memory allocation calls called by a single thread with a /// static buffer of shared memory. struct AAHeapToShared : public StateWrapper { using Base = StateWrapper; AAHeapToShared(const IRPosition &IRP, Attributor &A) : Base(IRP) {} /// Create an abstract attribute view for the position \p IRP. static AAHeapToShared &createForPosition(const IRPosition &IRP, Attributor &A); /// Returns true if HeapToShared conversion is assumed to be possible. virtual bool isAssumedHeapToShared(CallBase &CB) const = 0; /// Returns true if HeapToShared conversion is assumed and the CB is a /// callsite to a free operation to be removed. virtual bool isAssumedHeapToSharedRemovedFree(CallBase &CB) const = 0; /// See AbstractAttribute::getName(). const std::string getName() const override { return "AAHeapToShared"; } /// See AbstractAttribute::getIdAddr(). const char *getIdAddr() const override { return &ID; } /// This function should return true if the type of the \p AA is /// AAHeapToShared. static bool classof(const AbstractAttribute *AA) { return (AA->getIdAddr() == &ID); } /// Unique ID (due to the unique address) static const char ID; }; struct AAHeapToSharedFunction : public AAHeapToShared { AAHeapToSharedFunction(const IRPosition &IRP, Attributor &A) : AAHeapToShared(IRP, A) {} const std::string getAsStr() const override { return "[AAHeapToShared] " + std::to_string(MallocCalls.size()) + " malloc calls eligible."; } /// See AbstractAttribute::trackStatistics(). void trackStatistics() const override {} /// This functions finds free calls that will be removed by the /// HeapToShared transformation. void findPotentialRemovedFreeCalls(Attributor &A) { auto &OMPInfoCache = static_cast(A.getInfoCache()); auto &FreeRFI = OMPInfoCache.RFIs[OMPRTL___kmpc_free_shared]; PotentialRemovedFreeCalls.clear(); // Update free call users of found malloc calls. for (CallBase *CB : MallocCalls) { SmallVector FreeCalls; for (auto *U : CB->users()) { CallBase *C = dyn_cast(U); if (C && C->getCalledFunction() == FreeRFI.Declaration) FreeCalls.push_back(C); } if (FreeCalls.size() != 1) continue; PotentialRemovedFreeCalls.insert(FreeCalls.front()); } } void initialize(Attributor &A) override { auto &OMPInfoCache = static_cast(A.getInfoCache()); auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared]; for (User *U : RFI.Declaration->users()) if (CallBase *CB = dyn_cast(U)) MallocCalls.insert(CB); findPotentialRemovedFreeCalls(A); } bool isAssumedHeapToShared(CallBase &CB) const override { return isValidState() && MallocCalls.count(&CB); } bool isAssumedHeapToSharedRemovedFree(CallBase &CB) const override { return isValidState() && PotentialRemovedFreeCalls.count(&CB); } ChangeStatus manifest(Attributor &A) override { if (MallocCalls.empty()) return ChangeStatus::UNCHANGED; auto &OMPInfoCache = static_cast(A.getInfoCache()); auto &FreeCall = OMPInfoCache.RFIs[OMPRTL___kmpc_free_shared]; Function *F = getAnchorScope(); auto *HS = A.lookupAAFor(IRPosition::function(*F), this, DepClassTy::OPTIONAL); ChangeStatus Changed = ChangeStatus::UNCHANGED; for (CallBase *CB : MallocCalls) { // Skip replacing this if HeapToStack has already claimed it. if (HS && HS->isAssumedHeapToStack(*CB)) continue; // Find the unique free call to remove it. SmallVector FreeCalls; for (auto *U : CB->users()) { CallBase *C = dyn_cast(U); if (C && C->getCalledFunction() == FreeCall.Declaration) FreeCalls.push_back(C); } if (FreeCalls.size() != 1) continue; ConstantInt *AllocSize = dyn_cast(CB->getArgOperand(0)); LLVM_DEBUG(dbgs() << TAG << "Replace globalization call in " << CB->getCaller()->getName() << " with " << AllocSize->getZExtValue() << " bytes of shared memory\n"); // Create a new shared memory buffer of the same size as the allocation // and replace all the uses of the original allocation with it. Module *M = CB->getModule(); Type *Int8Ty = Type::getInt8Ty(M->getContext()); Type *Int8ArrTy = ArrayType::get(Int8Ty, AllocSize->getZExtValue()); auto *SharedMem = new GlobalVariable( *M, Int8ArrTy, /* IsConstant */ false, GlobalValue::InternalLinkage, UndefValue::get(Int8ArrTy), CB->getName(), nullptr, GlobalValue::NotThreadLocal, static_cast(AddressSpace::Shared)); auto *NewBuffer = ConstantExpr::getPointerCast(SharedMem, Int8Ty->getPointerTo()); auto Remark = [&](OptimizationRemark OR) { return OR << "Replaced globalized variable with " << ore::NV("SharedMemory", AllocSize->getZExtValue()) << ((AllocSize->getZExtValue() != 1) ? " bytes " : " byte ") << "of shared memory."; }; A.emitRemark(CB, "OMP111", Remark); SharedMem->setAlignment(MaybeAlign(32)); A.changeValueAfterManifest(*CB, *NewBuffer); A.deleteAfterManifest(*CB); A.deleteAfterManifest(*FreeCalls.front()); NumBytesMovedToSharedMemory += AllocSize->getZExtValue(); Changed = ChangeStatus::CHANGED; } return Changed; } ChangeStatus updateImpl(Attributor &A) override { auto &OMPInfoCache = static_cast(A.getInfoCache()); auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared]; Function *F = getAnchorScope(); auto NumMallocCalls = MallocCalls.size(); // Only consider malloc calls executed by a single thread with a constant. for (User *U : RFI.Declaration->users()) { const auto &ED = A.getAAFor( *this, IRPosition::function(*F), DepClassTy::REQUIRED); if (CallBase *CB = dyn_cast(U)) if (!dyn_cast(CB->getArgOperand(0)) || !ED.isExecutedByInitialThreadOnly(*CB)) MallocCalls.erase(CB); } findPotentialRemovedFreeCalls(A); if (NumMallocCalls != MallocCalls.size()) return ChangeStatus::CHANGED; return ChangeStatus::UNCHANGED; } /// Collection of all malloc calls in a function. SmallPtrSet MallocCalls; /// Collection of potentially removed free calls in a function. SmallPtrSet PotentialRemovedFreeCalls; }; struct AAKernelInfo : public StateWrapper { using Base = StateWrapper; AAKernelInfo(const IRPosition &IRP, Attributor &A) : Base(IRP) {} /// Statistics are tracked as part of manifest for now. void trackStatistics() const override {} /// See AbstractAttribute::getAsStr() const std::string getAsStr() const override { if (!isValidState()) return ""; return std::string(SPMDCompatibilityTracker.isAssumed() ? "SPMD" : "generic") + std::string(SPMDCompatibilityTracker.isAtFixpoint() ? " [FIX]" : "") + std::string(" #PRs: ") + std::to_string(ReachedKnownParallelRegions.size()) + ", #Unknown PRs: " + std::to_string(ReachedUnknownParallelRegions.size()); } /// Create an abstract attribute biew for the position \p IRP. static AAKernelInfo &createForPosition(const IRPosition &IRP, Attributor &A); /// See AbstractAttribute::getName() const std::string getName() const override { return "AAKernelInfo"; } /// See AbstractAttribute::getIdAddr() const char *getIdAddr() const override { return &ID; } /// This function should return true if the type of the \p AA is AAKernelInfo static bool classof(const AbstractAttribute *AA) { return (AA->getIdAddr() == &ID); } static const char ID; }; /// The function kernel info abstract attribute, basically, what can we say /// about a function with regards to the KernelInfoState. struct AAKernelInfoFunction : AAKernelInfo { AAKernelInfoFunction(const IRPosition &IRP, Attributor &A) : AAKernelInfo(IRP, A) {} /// See AbstractAttribute::initialize(...). void initialize(Attributor &A) override { // This is a high-level transform that might change the constant arguments // of the init and dinit calls. We need to tell the Attributor about this // to avoid other parts using the current constant value for simpliication. auto &OMPInfoCache = static_cast(A.getInfoCache()); Function *Fn = getAnchorScope(); if (!OMPInfoCache.Kernels.count(Fn)) return; // Add itself to the reaching kernel and set IsKernelEntry. ReachingKernelEntries.insert(Fn); IsKernelEntry = true; OMPInformationCache::RuntimeFunctionInfo &InitRFI = OMPInfoCache.RFIs[OMPRTL___kmpc_target_init]; OMPInformationCache::RuntimeFunctionInfo &DeinitRFI = OMPInfoCache.RFIs[OMPRTL___kmpc_target_deinit]; // For kernels we perform more initialization work, first we find the init // and deinit calls. auto StoreCallBase = [](Use &U, OMPInformationCache::RuntimeFunctionInfo &RFI, CallBase *&Storage) { CallBase *CB = OpenMPOpt::getCallIfRegularCall(U, &RFI); assert(CB && "Unexpected use of __kmpc_target_init or __kmpc_target_deinit!"); assert(!Storage && "Multiple uses of __kmpc_target_init or __kmpc_target_deinit!"); Storage = CB; return false; }; InitRFI.foreachUse( [&](Use &U, Function &) { StoreCallBase(U, InitRFI, KernelInitCB); return false; }, Fn); DeinitRFI.foreachUse( [&](Use &U, Function &) { StoreCallBase(U, DeinitRFI, KernelDeinitCB); return false; }, Fn); assert((KernelInitCB && KernelDeinitCB) && "Kernel without __kmpc_target_init or __kmpc_target_deinit!"); // For kernels we might need to initialize/finalize the IsSPMD state and // we need to register a simplification callback so that the Attributor // knows the constant arguments to __kmpc_target_init and // __kmpc_target_deinit might actually change. Attributor::SimplifictionCallbackTy StateMachineSimplifyCB = [&](const IRPosition &IRP, const AbstractAttribute *AA, bool &UsedAssumedInformation) -> Optional { // IRP represents the "use generic state machine" argument of an // __kmpc_target_init call. We will answer this one with the internal // state. As long as we are not in an invalid state, we will create a // custom state machine so the value should be a `i1 false`. If we are // in an invalid state, we won't change the value that is in the IR. if (!isValidState()) return nullptr; if (AA) A.recordDependence(*this, *AA, DepClassTy::OPTIONAL); UsedAssumedInformation = !isAtFixpoint(); auto *FalseVal = ConstantInt::getBool(IRP.getAnchorValue().getContext(), 0); return FalseVal; }; Attributor::SimplifictionCallbackTy IsSPMDModeSimplifyCB = [&](const IRPosition &IRP, const AbstractAttribute *AA, bool &UsedAssumedInformation) -> Optional { // IRP represents the "SPMDCompatibilityTracker" argument of an // __kmpc_target_init or // __kmpc_target_deinit call. We will answer this one with the internal // state. if (!SPMDCompatibilityTracker.isValidState()) return nullptr; if (!SPMDCompatibilityTracker.isAtFixpoint()) { if (AA) A.recordDependence(*this, *AA, DepClassTy::OPTIONAL); UsedAssumedInformation = true; } else { UsedAssumedInformation = false; } auto *Val = ConstantInt::getBool(IRP.getAnchorValue().getContext(), SPMDCompatibilityTracker.isAssumed()); return Val; }; Attributor::SimplifictionCallbackTy IsGenericModeSimplifyCB = [&](const IRPosition &IRP, const AbstractAttribute *AA, bool &UsedAssumedInformation) -> Optional { // IRP represents the "RequiresFullRuntime" argument of an // __kmpc_target_init or __kmpc_target_deinit call. We will answer this // one with the internal state of the SPMDCompatibilityTracker, so if // generic then true, if SPMD then false. if (!SPMDCompatibilityTracker.isValidState()) return nullptr; if (!SPMDCompatibilityTracker.isAtFixpoint()) { if (AA) A.recordDependence(*this, *AA, DepClassTy::OPTIONAL); UsedAssumedInformation = true; } else { UsedAssumedInformation = false; } auto *Val = ConstantInt::getBool(IRP.getAnchorValue().getContext(), !SPMDCompatibilityTracker.isAssumed()); return Val; }; constexpr const int InitIsSPMDArgNo = 1; constexpr const int DeinitIsSPMDArgNo = 1; constexpr const int InitUseStateMachineArgNo = 2; constexpr const int InitRequiresFullRuntimeArgNo = 3; constexpr const int DeinitRequiresFullRuntimeArgNo = 2; A.registerSimplificationCallback( IRPosition::callsite_argument(*KernelInitCB, InitUseStateMachineArgNo), StateMachineSimplifyCB); A.registerSimplificationCallback( IRPosition::callsite_argument(*KernelInitCB, InitIsSPMDArgNo), IsSPMDModeSimplifyCB); A.registerSimplificationCallback( IRPosition::callsite_argument(*KernelDeinitCB, DeinitIsSPMDArgNo), IsSPMDModeSimplifyCB); A.registerSimplificationCallback( IRPosition::callsite_argument(*KernelInitCB, InitRequiresFullRuntimeArgNo), IsGenericModeSimplifyCB); A.registerSimplificationCallback( IRPosition::callsite_argument(*KernelDeinitCB, DeinitRequiresFullRuntimeArgNo), IsGenericModeSimplifyCB); // Check if we know we are in SPMD-mode already. ConstantInt *IsSPMDArg = dyn_cast(KernelInitCB->getArgOperand(InitIsSPMDArgNo)); if (IsSPMDArg && !IsSPMDArg->isZero()) SPMDCompatibilityTracker.indicateOptimisticFixpoint(); } /// Modify the IR based on the KernelInfoState as the fixpoint iteration is /// finished now. ChangeStatus manifest(Attributor &A) override { // If we are not looking at a kernel with __kmpc_target_init and // __kmpc_target_deinit call we cannot actually manifest the information. if (!KernelInitCB || !KernelDeinitCB) return ChangeStatus::UNCHANGED; // Known SPMD-mode kernels need no manifest changes. if (SPMDCompatibilityTracker.isKnown()) return ChangeStatus::UNCHANGED; // If we can we change the execution mode to SPMD-mode otherwise we build a // custom state machine. if (!changeToSPMDMode(A)) buildCustomStateMachine(A); return ChangeStatus::CHANGED; } bool changeToSPMDMode(Attributor &A) { auto &OMPInfoCache = static_cast(A.getInfoCache()); if (!SPMDCompatibilityTracker.isAssumed()) { for (Instruction *NonCompatibleI : SPMDCompatibilityTracker) { if (!NonCompatibleI) continue; // Skip diagnostics on calls to known OpenMP runtime functions for now. if (auto *CB = dyn_cast(NonCompatibleI)) if (OMPInfoCache.RTLFunctions.contains(CB->getCalledFunction())) continue; auto Remark = [&](OptimizationRemarkAnalysis ORA) { ORA << "Value has potential side effects preventing SPMD-mode " "execution"; if (isa(NonCompatibleI)) { ORA << ". Add `__attribute__((assume(\"ompx_spmd_amenable\")))` to " "the called function to override"; } return ORA << "."; }; A.emitRemark(NonCompatibleI, "OMP121", Remark); LLVM_DEBUG(dbgs() << TAG << "SPMD-incompatible side-effect: " << *NonCompatibleI << "\n"); } return false; } // Adjust the global exec mode flag that tells the runtime what mode this // kernel is executed in. Function *Kernel = getAnchorScope(); GlobalVariable *ExecMode = Kernel->getParent()->getGlobalVariable( (Kernel->getName() + "_exec_mode").str()); assert(ExecMode && "Kernel without exec mode?"); assert(ExecMode->getInitializer() && ExecMode->getInitializer()->isOneValue() && "Initially non-SPMD kernel has SPMD exec mode!"); // Set the global exec mode flag to indicate SPMD-Generic mode. constexpr int SPMDGeneric = 2; if (!ExecMode->getInitializer()->isZeroValue()) ExecMode->setInitializer( ConstantInt::get(ExecMode->getInitializer()->getType(), SPMDGeneric)); // Next rewrite the init and deinit calls to indicate we use SPMD-mode now. const int InitIsSPMDArgNo = 1; const int DeinitIsSPMDArgNo = 1; const int InitUseStateMachineArgNo = 2; const int InitRequiresFullRuntimeArgNo = 3; const int DeinitRequiresFullRuntimeArgNo = 2; auto &Ctx = getAnchorValue().getContext(); A.changeUseAfterManifest(KernelInitCB->getArgOperandUse(InitIsSPMDArgNo), *ConstantInt::getBool(Ctx, 1)); A.changeUseAfterManifest( KernelInitCB->getArgOperandUse(InitUseStateMachineArgNo), *ConstantInt::getBool(Ctx, 0)); A.changeUseAfterManifest( KernelDeinitCB->getArgOperandUse(DeinitIsSPMDArgNo), *ConstantInt::getBool(Ctx, 1)); A.changeUseAfterManifest( KernelInitCB->getArgOperandUse(InitRequiresFullRuntimeArgNo), *ConstantInt::getBool(Ctx, 0)); A.changeUseAfterManifest( KernelDeinitCB->getArgOperandUse(DeinitRequiresFullRuntimeArgNo), *ConstantInt::getBool(Ctx, 0)); ++NumOpenMPTargetRegionKernelsSPMD; auto Remark = [&](OptimizationRemark OR) { return OR << "Transformed generic-mode kernel to SPMD-mode."; }; A.emitRemark(KernelInitCB, "OMP120", Remark); return true; }; ChangeStatus buildCustomStateMachine(Attributor &A) { assert(ReachedKnownParallelRegions.isValidState() && "Custom state machine with invalid parallel region states?"); const int InitIsSPMDArgNo = 1; const int InitUseStateMachineArgNo = 2; // Check if the current configuration is non-SPMD and generic state machine. // If we already have SPMD mode or a custom state machine we do not need to // go any further. If it is anything but a constant something is weird and // we give up. ConstantInt *UseStateMachine = dyn_cast( KernelInitCB->getArgOperand(InitUseStateMachineArgNo)); ConstantInt *IsSPMD = dyn_cast(KernelInitCB->getArgOperand(InitIsSPMDArgNo)); // If we are stuck with generic mode, try to create a custom device (=GPU) // state machine which is specialized for the parallel regions that are // reachable by the kernel. if (!UseStateMachine || UseStateMachine->isZero() || !IsSPMD || !IsSPMD->isZero()) return ChangeStatus::UNCHANGED; // If not SPMD mode, indicate we use a custom state machine now. auto &Ctx = getAnchorValue().getContext(); auto *FalseVal = ConstantInt::getBool(Ctx, 0); A.changeUseAfterManifest( KernelInitCB->getArgOperandUse(InitUseStateMachineArgNo), *FalseVal); // If we don't actually need a state machine we are done here. This can // happen if there simply are no parallel regions. In the resulting kernel // all worker threads will simply exit right away, leaving the main thread // to do the work alone. if (ReachedKnownParallelRegions.empty() && ReachedUnknownParallelRegions.empty()) { ++NumOpenMPTargetRegionKernelsWithoutStateMachine; auto Remark = [&](OptimizationRemark OR) { return OR << "Removing unused state machine from generic-mode kernel."; }; A.emitRemark(KernelInitCB, "OMP130", Remark); return ChangeStatus::CHANGED; } // Keep track in the statistics of our new shiny custom state machine. if (ReachedUnknownParallelRegions.empty()) { ++NumOpenMPTargetRegionKernelsCustomStateMachineWithoutFallback; auto Remark = [&](OptimizationRemark OR) { return OR << "Rewriting generic-mode kernel with a customized state " "machine."; }; A.emitRemark(KernelInitCB, "OMP131", Remark); } else { ++NumOpenMPTargetRegionKernelsCustomStateMachineWithFallback; auto Remark = [&](OptimizationRemarkAnalysis OR) { return OR << "Generic-mode kernel is executed with a customized state " "machine that requires a fallback."; }; A.emitRemark(KernelInitCB, "OMP132", Remark); // Tell the user why we ended up with a fallback. for (CallBase *UnknownParallelRegionCB : ReachedUnknownParallelRegions) { if (!UnknownParallelRegionCB) continue; auto Remark = [&](OptimizationRemarkAnalysis ORA) { return ORA << "Call may contain unknown parallel regions. Use " << "`__attribute__((assume(\"omp_no_parallelism\")))` to " "override."; }; A.emitRemark(UnknownParallelRegionCB, "OMP133", Remark); } } // Create all the blocks: // // InitCB = __kmpc_target_init(...) // bool IsWorker = InitCB >= 0; // if (IsWorker) { // SMBeginBB: __kmpc_barrier_simple_spmd(...); // void *WorkFn; // bool Active = __kmpc_kernel_parallel(&WorkFn); // if (!WorkFn) return; // SMIsActiveCheckBB: if (Active) { // SMIfCascadeCurrentBB: if (WorkFn == ) // ParFn0(...); // SMIfCascadeCurrentBB: else if (WorkFn == ) // ParFn1(...); // ... // SMIfCascadeCurrentBB: else // ((WorkFnTy*)WorkFn)(...); // SMEndParallelBB: __kmpc_kernel_end_parallel(...); // } // SMDoneBB: __kmpc_barrier_simple_spmd(...); // goto SMBeginBB; // } // UserCodeEntryBB: // user code // __kmpc_target_deinit(...) // Function *Kernel = getAssociatedFunction(); assert(Kernel && "Expected an associated function!"); BasicBlock *InitBB = KernelInitCB->getParent(); BasicBlock *UserCodeEntryBB = InitBB->splitBasicBlock( KernelInitCB->getNextNode(), "thread.user_code.check"); BasicBlock *StateMachineBeginBB = BasicBlock::Create( Ctx, "worker_state_machine.begin", Kernel, UserCodeEntryBB); BasicBlock *StateMachineFinishedBB = BasicBlock::Create( Ctx, "worker_state_machine.finished", Kernel, UserCodeEntryBB); BasicBlock *StateMachineIsActiveCheckBB = BasicBlock::Create( Ctx, "worker_state_machine.is_active.check", Kernel, UserCodeEntryBB); BasicBlock *StateMachineIfCascadeCurrentBB = BasicBlock::Create(Ctx, "worker_state_machine.parallel_region.check", Kernel, UserCodeEntryBB); BasicBlock *StateMachineEndParallelBB = BasicBlock::Create(Ctx, "worker_state_machine.parallel_region.end", Kernel, UserCodeEntryBB); BasicBlock *StateMachineDoneBarrierBB = BasicBlock::Create( Ctx, "worker_state_machine.done.barrier", Kernel, UserCodeEntryBB); A.registerManifestAddedBasicBlock(*InitBB); A.registerManifestAddedBasicBlock(*UserCodeEntryBB); A.registerManifestAddedBasicBlock(*StateMachineBeginBB); A.registerManifestAddedBasicBlock(*StateMachineFinishedBB); A.registerManifestAddedBasicBlock(*StateMachineIsActiveCheckBB); A.registerManifestAddedBasicBlock(*StateMachineIfCascadeCurrentBB); A.registerManifestAddedBasicBlock(*StateMachineEndParallelBB); A.registerManifestAddedBasicBlock(*StateMachineDoneBarrierBB); const DebugLoc &DLoc = KernelInitCB->getDebugLoc(); ReturnInst::Create(Ctx, StateMachineFinishedBB)->setDebugLoc(DLoc); InitBB->getTerminator()->eraseFromParent(); Instruction *IsWorker = ICmpInst::Create(ICmpInst::ICmp, llvm::CmpInst::ICMP_NE, KernelInitCB, ConstantInt::get(KernelInitCB->getType(), -1), "thread.is_worker", InitBB); IsWorker->setDebugLoc(DLoc); BranchInst::Create(StateMachineBeginBB, UserCodeEntryBB, IsWorker, InitBB); + Module &M = *Kernel->getParent(); + // Create local storage for the work function pointer. + const DataLayout &DL = M.getDataLayout(); Type *VoidPtrTy = Type::getInt8PtrTy(Ctx); - AllocaInst *WorkFnAI = new AllocaInst(VoidPtrTy, 0, "worker.work_fn.addr", - &Kernel->getEntryBlock().front()); + Instruction *WorkFnAI = + new AllocaInst(VoidPtrTy, DL.getAllocaAddrSpace(), nullptr, + "worker.work_fn.addr", &Kernel->getEntryBlock().front()); WorkFnAI->setDebugLoc(DLoc); auto &OMPInfoCache = static_cast(A.getInfoCache()); OMPInfoCache.OMPBuilder.updateToLocation( OpenMPIRBuilder::LocationDescription( IRBuilder<>::InsertPoint(StateMachineBeginBB, StateMachineBeginBB->end()), DLoc)); Value *Ident = KernelInitCB->getArgOperand(0); Value *GTid = KernelInitCB; - Module &M = *Kernel->getParent(); FunctionCallee BarrierFn = OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction( M, OMPRTL___kmpc_barrier_simple_spmd); CallInst::Create(BarrierFn, {Ident, GTid}, "", StateMachineBeginBB) ->setDebugLoc(DLoc); + if (WorkFnAI->getType()->getPointerAddressSpace() != + (unsigned int)AddressSpace::Generic) { + WorkFnAI = new AddrSpaceCastInst( + WorkFnAI, + PointerType::getWithSamePointeeType( + cast(WorkFnAI->getType()), + (unsigned int)AddressSpace::Generic), + WorkFnAI->getName() + ".generic", StateMachineBeginBB); + WorkFnAI->setDebugLoc(DLoc); + } + FunctionCallee KernelParallelFn = OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction( M, OMPRTL___kmpc_kernel_parallel); Instruction *IsActiveWorker = CallInst::Create( KernelParallelFn, {WorkFnAI}, "worker.is_active", StateMachineBeginBB); IsActiveWorker->setDebugLoc(DLoc); Instruction *WorkFn = new LoadInst(VoidPtrTy, WorkFnAI, "worker.work_fn", StateMachineBeginBB); WorkFn->setDebugLoc(DLoc); FunctionType *ParallelRegionFnTy = FunctionType::get( Type::getVoidTy(Ctx), {Type::getInt16Ty(Ctx), Type::getInt32Ty(Ctx)}, false); Value *WorkFnCast = BitCastInst::CreatePointerBitCastOrAddrSpaceCast( WorkFn, ParallelRegionFnTy->getPointerTo(), "worker.work_fn.addr_cast", StateMachineBeginBB); Instruction *IsDone = ICmpInst::Create(ICmpInst::ICmp, llvm::CmpInst::ICMP_EQ, WorkFn, Constant::getNullValue(VoidPtrTy), "worker.is_done", StateMachineBeginBB); IsDone->setDebugLoc(DLoc); BranchInst::Create(StateMachineFinishedBB, StateMachineIsActiveCheckBB, IsDone, StateMachineBeginBB) ->setDebugLoc(DLoc); BranchInst::Create(StateMachineIfCascadeCurrentBB, StateMachineDoneBarrierBB, IsActiveWorker, StateMachineIsActiveCheckBB) ->setDebugLoc(DLoc); Value *ZeroArg = Constant::getNullValue(ParallelRegionFnTy->getParamType(0)); // Now that we have most of the CFG skeleton it is time for the if-cascade // that checks the function pointer we got from the runtime against the // parallel regions we expect, if there are any. for (int i = 0, e = ReachedKnownParallelRegions.size(); i < e; ++i) { auto *ParallelRegion = ReachedKnownParallelRegions[i]; BasicBlock *PRExecuteBB = BasicBlock::Create( Ctx, "worker_state_machine.parallel_region.execute", Kernel, StateMachineEndParallelBB); CallInst::Create(ParallelRegion, {ZeroArg, GTid}, "", PRExecuteBB) ->setDebugLoc(DLoc); BranchInst::Create(StateMachineEndParallelBB, PRExecuteBB) ->setDebugLoc(DLoc); BasicBlock *PRNextBB = BasicBlock::Create(Ctx, "worker_state_machine.parallel_region.check", Kernel, StateMachineEndParallelBB); // Check if we need to compare the pointer at all or if we can just // call the parallel region function. Value *IsPR; if (i + 1 < e || !ReachedUnknownParallelRegions.empty()) { Instruction *CmpI = ICmpInst::Create( ICmpInst::ICmp, llvm::CmpInst::ICMP_EQ, WorkFnCast, ParallelRegion, "worker.check_parallel_region", StateMachineIfCascadeCurrentBB); CmpI->setDebugLoc(DLoc); IsPR = CmpI; } else { IsPR = ConstantInt::getTrue(Ctx); } BranchInst::Create(PRExecuteBB, PRNextBB, IsPR, StateMachineIfCascadeCurrentBB) ->setDebugLoc(DLoc); StateMachineIfCascadeCurrentBB = PRNextBB; } // At the end of the if-cascade we place the indirect function pointer call // in case we might need it, that is if there can be parallel regions we // have not handled in the if-cascade above. if (!ReachedUnknownParallelRegions.empty()) { StateMachineIfCascadeCurrentBB->setName( "worker_state_machine.parallel_region.fallback.execute"); CallInst::Create(ParallelRegionFnTy, WorkFnCast, {ZeroArg, GTid}, "", StateMachineIfCascadeCurrentBB) ->setDebugLoc(DLoc); } BranchInst::Create(StateMachineEndParallelBB, StateMachineIfCascadeCurrentBB) ->setDebugLoc(DLoc); CallInst::Create(OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction( M, OMPRTL___kmpc_kernel_end_parallel), {}, "", StateMachineEndParallelBB) ->setDebugLoc(DLoc); BranchInst::Create(StateMachineDoneBarrierBB, StateMachineEndParallelBB) ->setDebugLoc(DLoc); CallInst::Create(BarrierFn, {Ident, GTid}, "", StateMachineDoneBarrierBB) ->setDebugLoc(DLoc); BranchInst::Create(StateMachineBeginBB, StateMachineDoneBarrierBB) ->setDebugLoc(DLoc); return ChangeStatus::CHANGED; } /// Fixpoint iteration update function. Will be called every time a dependence /// changed its state (and in the beginning). ChangeStatus updateImpl(Attributor &A) override { KernelInfoState StateBefore = getState(); // Callback to check a read/write instruction. auto CheckRWInst = [&](Instruction &I) { // We handle calls later. if (isa(I)) return true; // We only care about write effects. if (!I.mayWriteToMemory()) return true; if (auto *SI = dyn_cast(&I)) { SmallVector Objects; getUnderlyingObjects(SI->getPointerOperand(), Objects); if (llvm::all_of(Objects, [](const Value *Obj) { return isa(Obj); })) return true; } // For now we give up on everything but stores. SPMDCompatibilityTracker.insert(&I); return true; }; bool UsedAssumedInformationInCheckRWInst = false; if (!SPMDCompatibilityTracker.isAtFixpoint()) if (!A.checkForAllReadWriteInstructions( CheckRWInst, *this, UsedAssumedInformationInCheckRWInst)) SPMDCompatibilityTracker.indicatePessimisticFixpoint(); if (!IsKernelEntry) { updateReachingKernelEntries(A); updateParallelLevels(A); } // Callback to check a call instruction. bool AllSPMDStatesWereFixed = true; auto CheckCallInst = [&](Instruction &I) { auto &CB = cast(I); auto &CBAA = A.getAAFor( *this, IRPosition::callsite_function(CB), DepClassTy::OPTIONAL); getState() ^= CBAA.getState(); AllSPMDStatesWereFixed &= CBAA.SPMDCompatibilityTracker.isAtFixpoint(); return true; }; bool UsedAssumedInformationInCheckCallInst = false; if (!A.checkForAllCallLikeInstructions( CheckCallInst, *this, UsedAssumedInformationInCheckCallInst)) return indicatePessimisticFixpoint(); // If we haven't used any assumed information for the SPMD state we can fix // it. if (!UsedAssumedInformationInCheckRWInst && !UsedAssumedInformationInCheckCallInst && AllSPMDStatesWereFixed) SPMDCompatibilityTracker.indicateOptimisticFixpoint(); return StateBefore == getState() ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED; } private: /// Update info regarding reaching kernels. void updateReachingKernelEntries(Attributor &A) { auto PredCallSite = [&](AbstractCallSite ACS) { Function *Caller = ACS.getInstruction()->getFunction(); assert(Caller && "Caller is nullptr"); auto &CAA = A.getOrCreateAAFor( IRPosition::function(*Caller), this, DepClassTy::REQUIRED); if (CAA.ReachingKernelEntries.isValidState()) { ReachingKernelEntries ^= CAA.ReachingKernelEntries; return true; } // We lost track of the caller of the associated function, any kernel // could reach now. ReachingKernelEntries.indicatePessimisticFixpoint(); return true; }; bool AllCallSitesKnown; if (!A.checkForAllCallSites(PredCallSite, *this, true /* RequireAllCallSites */, AllCallSitesKnown)) ReachingKernelEntries.indicatePessimisticFixpoint(); } /// Update info regarding parallel levels. void updateParallelLevels(Attributor &A) { auto &OMPInfoCache = static_cast(A.getInfoCache()); OMPInformationCache::RuntimeFunctionInfo &Parallel51RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_parallel_51]; auto PredCallSite = [&](AbstractCallSite ACS) { Function *Caller = ACS.getInstruction()->getFunction(); assert(Caller && "Caller is nullptr"); auto &CAA = A.getOrCreateAAFor(IRPosition::function(*Caller)); if (CAA.ParallelLevels.isValidState()) { // Any function that is called by `__kmpc_parallel_51` will not be // folded as the parallel level in the function is updated. In order to // get it right, all the analysis would depend on the implentation. That // said, if in the future any change to the implementation, the analysis // could be wrong. As a consequence, we are just conservative here. if (Caller == Parallel51RFI.Declaration) { ParallelLevels.indicatePessimisticFixpoint(); return true; } ParallelLevels ^= CAA.ParallelLevels; return true; } // We lost track of the caller of the associated function, any kernel // could reach now. ParallelLevels.indicatePessimisticFixpoint(); return true; }; bool AllCallSitesKnown = true; if (!A.checkForAllCallSites(PredCallSite, *this, true /* RequireAllCallSites */, AllCallSitesKnown)) ParallelLevels.indicatePessimisticFixpoint(); } }; /// The call site kernel info abstract attribute, basically, what can we say /// about a call site with regards to the KernelInfoState. For now this simply /// forwards the information from the callee. struct AAKernelInfoCallSite : AAKernelInfo { AAKernelInfoCallSite(const IRPosition &IRP, Attributor &A) : AAKernelInfo(IRP, A) {} /// See AbstractAttribute::initialize(...). void initialize(Attributor &A) override { AAKernelInfo::initialize(A); CallBase &CB = cast(getAssociatedValue()); Function *Callee = getAssociatedFunction(); // Helper to lookup an assumption string. auto HasAssumption = [](Function *Fn, StringRef AssumptionStr) { return Fn && hasAssumption(*Fn, AssumptionStr); }; // Check for SPMD-mode assumptions. if (HasAssumption(Callee, "ompx_spmd_amenable")) SPMDCompatibilityTracker.indicateOptimisticFixpoint(); // First weed out calls we do not care about, that is readonly/readnone // calls, intrinsics, and "no_openmp" calls. Neither of these can reach a // parallel region or anything else we are looking for. if (!CB.mayWriteToMemory() || isa(CB)) { indicateOptimisticFixpoint(); return; } // Next we check if we know the callee. If it is a known OpenMP function // we will handle them explicitly in the switch below. If it is not, we // will use an AAKernelInfo object on the callee to gather information and // merge that into the current state. The latter happens in the updateImpl. auto &OMPInfoCache = static_cast(A.getInfoCache()); const auto &It = OMPInfoCache.RuntimeFunctionIDMap.find(Callee); if (It == OMPInfoCache.RuntimeFunctionIDMap.end()) { // Unknown caller or declarations are not analyzable, we give up. if (!Callee || !A.isFunctionIPOAmendable(*Callee)) { // Unknown callees might contain parallel regions, except if they have // an appropriate assumption attached. if (!(HasAssumption(Callee, "omp_no_openmp") || HasAssumption(Callee, "omp_no_parallelism"))) ReachedUnknownParallelRegions.insert(&CB); // If SPMDCompatibilityTracker is not fixed, we need to give up on the // idea we can run something unknown in SPMD-mode. if (!SPMDCompatibilityTracker.isAtFixpoint()) SPMDCompatibilityTracker.insert(&CB); // We have updated the state for this unknown call properly, there won't // be any change so we indicate a fixpoint. indicateOptimisticFixpoint(); } // If the callee is known and can be used in IPO, we will update the state // based on the callee state in updateImpl. return; } const unsigned int WrapperFunctionArgNo = 6; RuntimeFunction RF = It->getSecond(); switch (RF) { // All the functions we know are compatible with SPMD mode. case OMPRTL___kmpc_is_spmd_exec_mode: case OMPRTL___kmpc_for_static_fini: case OMPRTL___kmpc_global_thread_num: case OMPRTL___kmpc_get_hardware_num_threads_in_block: case OMPRTL___kmpc_get_hardware_num_blocks: case OMPRTL___kmpc_single: case OMPRTL___kmpc_end_single: case OMPRTL___kmpc_master: case OMPRTL___kmpc_end_master: case OMPRTL___kmpc_barrier: break; case OMPRTL___kmpc_for_static_init_4: case OMPRTL___kmpc_for_static_init_4u: case OMPRTL___kmpc_for_static_init_8: case OMPRTL___kmpc_for_static_init_8u: { // Check the schedule and allow static schedule in SPMD mode. unsigned ScheduleArgOpNo = 2; auto *ScheduleTypeCI = dyn_cast(CB.getArgOperand(ScheduleArgOpNo)); unsigned ScheduleTypeVal = ScheduleTypeCI ? ScheduleTypeCI->getZExtValue() : 0; switch (OMPScheduleType(ScheduleTypeVal)) { case OMPScheduleType::Static: case OMPScheduleType::StaticChunked: case OMPScheduleType::Distribute: case OMPScheduleType::DistributeChunked: break; default: SPMDCompatibilityTracker.insert(&CB); break; }; } break; case OMPRTL___kmpc_target_init: KernelInitCB = &CB; break; case OMPRTL___kmpc_target_deinit: KernelDeinitCB = &CB; break; case OMPRTL___kmpc_parallel_51: if (auto *ParallelRegion = dyn_cast( CB.getArgOperand(WrapperFunctionArgNo)->stripPointerCasts())) { ReachedKnownParallelRegions.insert(ParallelRegion); break; } // The condition above should usually get the parallel region function // pointer and record it. In the off chance it doesn't we assume the // worst. ReachedUnknownParallelRegions.insert(&CB); break; case OMPRTL___kmpc_omp_task: // We do not look into tasks right now, just give up. SPMDCompatibilityTracker.insert(&CB); ReachedUnknownParallelRegions.insert(&CB); break; case OMPRTL___kmpc_alloc_shared: case OMPRTL___kmpc_free_shared: // Return without setting a fixpoint, to be resolved in updateImpl. return; default: // Unknown OpenMP runtime calls cannot be executed in SPMD-mode, // generally. SPMDCompatibilityTracker.insert(&CB); break; } // All other OpenMP runtime calls will not reach parallel regions so they // can be safely ignored for now. Since it is a known OpenMP runtime call we // have now modeled all effects and there is no need for any update. indicateOptimisticFixpoint(); } ChangeStatus updateImpl(Attributor &A) override { // TODO: Once we have call site specific value information we can provide // call site specific liveness information and then it makes // sense to specialize attributes for call sites arguments instead of // redirecting requests to the callee argument. Function *F = getAssociatedFunction(); auto &OMPInfoCache = static_cast(A.getInfoCache()); const auto &It = OMPInfoCache.RuntimeFunctionIDMap.find(F); // If F is not a runtime function, propagate the AAKernelInfo of the callee. if (It == OMPInfoCache.RuntimeFunctionIDMap.end()) { const IRPosition &FnPos = IRPosition::function(*F); auto &FnAA = A.getAAFor(*this, FnPos, DepClassTy::REQUIRED); if (getState() == FnAA.getState()) return ChangeStatus::UNCHANGED; getState() = FnAA.getState(); return ChangeStatus::CHANGED; } // F is a runtime function that allocates or frees memory, check // AAHeapToStack and AAHeapToShared. KernelInfoState StateBefore = getState(); assert((It->getSecond() == OMPRTL___kmpc_alloc_shared || It->getSecond() == OMPRTL___kmpc_free_shared) && "Expected a __kmpc_alloc_shared or __kmpc_free_shared runtime call"); CallBase &CB = cast(getAssociatedValue()); auto &HeapToStackAA = A.getAAFor( *this, IRPosition::function(*CB.getCaller()), DepClassTy::OPTIONAL); auto &HeapToSharedAA = A.getAAFor( *this, IRPosition::function(*CB.getCaller()), DepClassTy::OPTIONAL); RuntimeFunction RF = It->getSecond(); switch (RF) { // If neither HeapToStack nor HeapToShared assume the call is removed, // assume SPMD incompatibility. case OMPRTL___kmpc_alloc_shared: if (!HeapToStackAA.isAssumedHeapToStack(CB) && !HeapToSharedAA.isAssumedHeapToShared(CB)) SPMDCompatibilityTracker.insert(&CB); break; case OMPRTL___kmpc_free_shared: if (!HeapToStackAA.isAssumedHeapToStackRemovedFree(CB) && !HeapToSharedAA.isAssumedHeapToSharedRemovedFree(CB)) SPMDCompatibilityTracker.insert(&CB); break; default: SPMDCompatibilityTracker.insert(&CB); } return StateBefore == getState() ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED; } }; struct AAFoldRuntimeCall : public StateWrapper { using Base = StateWrapper; AAFoldRuntimeCall(const IRPosition &IRP, Attributor &A) : Base(IRP) {} /// Statistics are tracked as part of manifest for now. void trackStatistics() const override {} /// Create an abstract attribute biew for the position \p IRP. static AAFoldRuntimeCall &createForPosition(const IRPosition &IRP, Attributor &A); /// See AbstractAttribute::getName() const std::string getName() const override { return "AAFoldRuntimeCall"; } /// See AbstractAttribute::getIdAddr() const char *getIdAddr() const override { return &ID; } /// This function should return true if the type of the \p AA is /// AAFoldRuntimeCall static bool classof(const AbstractAttribute *AA) { return (AA->getIdAddr() == &ID); } static const char ID; }; struct AAFoldRuntimeCallCallSiteReturned : AAFoldRuntimeCall { AAFoldRuntimeCallCallSiteReturned(const IRPosition &IRP, Attributor &A) : AAFoldRuntimeCall(IRP, A) {} /// See AbstractAttribute::getAsStr() const std::string getAsStr() const override { if (!isValidState()) return ""; std::string Str("simplified value: "); if (!SimplifiedValue.hasValue()) return Str + std::string("none"); if (!SimplifiedValue.getValue()) return Str + std::string("nullptr"); if (ConstantInt *CI = dyn_cast(SimplifiedValue.getValue())) return Str + std::to_string(CI->getSExtValue()); return Str + std::string("unknown"); } void initialize(Attributor &A) override { Function *Callee = getAssociatedFunction(); auto &OMPInfoCache = static_cast(A.getInfoCache()); const auto &It = OMPInfoCache.RuntimeFunctionIDMap.find(Callee); assert(It != OMPInfoCache.RuntimeFunctionIDMap.end() && "Expected a known OpenMP runtime function"); RFKind = It->getSecond(); CallBase &CB = cast(getAssociatedValue()); A.registerSimplificationCallback( IRPosition::callsite_returned(CB), [&](const IRPosition &IRP, const AbstractAttribute *AA, bool &UsedAssumedInformation) -> Optional { assert((isValidState() || (SimplifiedValue.hasValue() && SimplifiedValue.getValue() == nullptr)) && "Unexpected invalid state!"); if (!isAtFixpoint()) { UsedAssumedInformation = true; if (AA) A.recordDependence(*this, *AA, DepClassTy::OPTIONAL); } return SimplifiedValue; }); } ChangeStatus updateImpl(Attributor &A) override { ChangeStatus Changed = ChangeStatus::UNCHANGED; switch (RFKind) { case OMPRTL___kmpc_is_spmd_exec_mode: Changed |= foldIsSPMDExecMode(A); break; case OMPRTL___kmpc_is_generic_main_thread_id: Changed |= foldIsGenericMainThread(A); break; case OMPRTL___kmpc_parallel_level: Changed |= foldParallelLevel(A); break; case OMPRTL___kmpc_get_hardware_num_threads_in_block: Changed = Changed | foldKernelFnAttribute(A, "omp_target_thread_limit"); break; case OMPRTL___kmpc_get_hardware_num_blocks: Changed = Changed | foldKernelFnAttribute(A, "omp_target_num_teams"); break; default: llvm_unreachable("Unhandled OpenMP runtime function!"); } return Changed; } ChangeStatus manifest(Attributor &A) override { ChangeStatus Changed = ChangeStatus::UNCHANGED; if (SimplifiedValue.hasValue() && SimplifiedValue.getValue()) { Instruction &CB = *getCtxI(); A.changeValueAfterManifest(CB, **SimplifiedValue); A.deleteAfterManifest(CB); LLVM_DEBUG(dbgs() << TAG << "Folding runtime call: " << CB << " with " << **SimplifiedValue << "\n"); Changed = ChangeStatus::CHANGED; } return Changed; } ChangeStatus indicatePessimisticFixpoint() override { SimplifiedValue = nullptr; return AAFoldRuntimeCall::indicatePessimisticFixpoint(); } private: /// Fold __kmpc_is_spmd_exec_mode into a constant if possible. ChangeStatus foldIsSPMDExecMode(Attributor &A) { Optional SimplifiedValueBefore = SimplifiedValue; unsigned AssumedSPMDCount = 0, KnownSPMDCount = 0; unsigned AssumedNonSPMDCount = 0, KnownNonSPMDCount = 0; auto &CallerKernelInfoAA = A.getAAFor( *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED); if (!CallerKernelInfoAA.ReachingKernelEntries.isValidState()) return indicatePessimisticFixpoint(); for (Kernel K : CallerKernelInfoAA.ReachingKernelEntries) { auto &AA = A.getAAFor(*this, IRPosition::function(*K), DepClassTy::REQUIRED); if (!AA.isValidState()) { SimplifiedValue = nullptr; return indicatePessimisticFixpoint(); } if (AA.SPMDCompatibilityTracker.isAssumed()) { if (AA.SPMDCompatibilityTracker.isAtFixpoint()) ++KnownSPMDCount; else ++AssumedSPMDCount; } else { if (AA.SPMDCompatibilityTracker.isAtFixpoint()) ++KnownNonSPMDCount; else ++AssumedNonSPMDCount; } } if ((AssumedSPMDCount + KnownSPMDCount) && (AssumedNonSPMDCount + KnownNonSPMDCount)) return indicatePessimisticFixpoint(); auto &Ctx = getAnchorValue().getContext(); if (KnownSPMDCount || AssumedSPMDCount) { assert(KnownNonSPMDCount == 0 && AssumedNonSPMDCount == 0 && "Expected only SPMD kernels!"); // All reaching kernels are in SPMD mode. Update all function calls to // __kmpc_is_spmd_exec_mode to 1. SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), true); } else if (KnownNonSPMDCount || AssumedNonSPMDCount) { assert(KnownSPMDCount == 0 && AssumedSPMDCount == 0 && "Expected only non-SPMD kernels!"); // All reaching kernels are in non-SPMD mode. Update all function // calls to __kmpc_is_spmd_exec_mode to 0. SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), false); } else { // We have empty reaching kernels, therefore we cannot tell if the // associated call site can be folded. At this moment, SimplifiedValue // must be none. assert(!SimplifiedValue.hasValue() && "SimplifiedValue should be none"); } return SimplifiedValue == SimplifiedValueBefore ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED; } /// Fold __kmpc_is_generic_main_thread_id into a constant if possible. ChangeStatus foldIsGenericMainThread(Attributor &A) { Optional SimplifiedValueBefore = SimplifiedValue; CallBase &CB = cast(getAssociatedValue()); Function *F = CB.getFunction(); const auto &ExecutionDomainAA = A.getAAFor( *this, IRPosition::function(*F), DepClassTy::REQUIRED); if (!ExecutionDomainAA.isValidState()) return indicatePessimisticFixpoint(); auto &Ctx = getAnchorValue().getContext(); if (ExecutionDomainAA.isExecutedByInitialThreadOnly(CB)) SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), true); else return indicatePessimisticFixpoint(); return SimplifiedValue == SimplifiedValueBefore ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED; } /// Fold __kmpc_parallel_level into a constant if possible. ChangeStatus foldParallelLevel(Attributor &A) { Optional SimplifiedValueBefore = SimplifiedValue; auto &CallerKernelInfoAA = A.getAAFor( *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED); if (!CallerKernelInfoAA.ParallelLevels.isValidState()) return indicatePessimisticFixpoint(); if (!CallerKernelInfoAA.ReachingKernelEntries.isValidState()) return indicatePessimisticFixpoint(); if (CallerKernelInfoAA.ReachingKernelEntries.empty()) { assert(!SimplifiedValue.hasValue() && "SimplifiedValue should keep none at this point"); return ChangeStatus::UNCHANGED; } unsigned AssumedSPMDCount = 0, KnownSPMDCount = 0; unsigned AssumedNonSPMDCount = 0, KnownNonSPMDCount = 0; for (Kernel K : CallerKernelInfoAA.ReachingKernelEntries) { auto &AA = A.getAAFor(*this, IRPosition::function(*K), DepClassTy::REQUIRED); if (!AA.SPMDCompatibilityTracker.isValidState()) return indicatePessimisticFixpoint(); if (AA.SPMDCompatibilityTracker.isAssumed()) { if (AA.SPMDCompatibilityTracker.isAtFixpoint()) ++KnownSPMDCount; else ++AssumedSPMDCount; } else { if (AA.SPMDCompatibilityTracker.isAtFixpoint()) ++KnownNonSPMDCount; else ++AssumedNonSPMDCount; } } if ((AssumedSPMDCount + KnownSPMDCount) && (AssumedNonSPMDCount + KnownNonSPMDCount)) return indicatePessimisticFixpoint(); auto &Ctx = getAnchorValue().getContext(); // If the caller can only be reached by SPMD kernel entries, the parallel // level is 1. Similarly, if the caller can only be reached by non-SPMD // kernel entries, it is 0. if (AssumedSPMDCount || KnownSPMDCount) { assert(KnownNonSPMDCount == 0 && AssumedNonSPMDCount == 0 && "Expected only SPMD kernels!"); SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), 1); } else { assert(KnownSPMDCount == 0 && AssumedSPMDCount == 0 && "Expected only non-SPMD kernels!"); SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), 0); } return SimplifiedValue == SimplifiedValueBefore ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED; } ChangeStatus foldKernelFnAttribute(Attributor &A, llvm::StringRef Attr) { // Specialize only if all the calls agree with the attribute constant value int32_t CurrentAttrValue = -1; Optional SimplifiedValueBefore = SimplifiedValue; auto &CallerKernelInfoAA = A.getAAFor( *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED); if (!CallerKernelInfoAA.ReachingKernelEntries.isValidState()) return indicatePessimisticFixpoint(); // Iterate over the kernels that reach this function for (Kernel K : CallerKernelInfoAA.ReachingKernelEntries) { int32_t NextAttrVal = -1; if (K->hasFnAttribute(Attr)) NextAttrVal = std::stoi(K->getFnAttribute(Attr).getValueAsString().str()); if (NextAttrVal == -1 || (CurrentAttrValue != -1 && CurrentAttrValue != NextAttrVal)) return indicatePessimisticFixpoint(); CurrentAttrValue = NextAttrVal; } if (CurrentAttrValue != -1) { auto &Ctx = getAnchorValue().getContext(); SimplifiedValue = ConstantInt::get(Type::getInt32Ty(Ctx), CurrentAttrValue); } return SimplifiedValue == SimplifiedValueBefore ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED; } /// An optional value the associated value is assumed to fold to. That is, we /// assume the associated value (which is a call) can be replaced by this /// simplified value. Optional SimplifiedValue; /// The runtime function kind of the callee of the associated call site. RuntimeFunction RFKind; }; } // namespace /// Register folding callsite void OpenMPOpt::registerFoldRuntimeCall(RuntimeFunction RF) { auto &RFI = OMPInfoCache.RFIs[RF]; RFI.foreachUse(SCC, [&](Use &U, Function &F) { CallInst *CI = OpenMPOpt::getCallIfRegularCall(U, &RFI); if (!CI) return false; A.getOrCreateAAFor( IRPosition::callsite_returned(*CI), /* QueryingAA */ nullptr, DepClassTy::NONE, /* ForceUpdate */ false, /* UpdateAfterInit */ false); return false; }); } void OpenMPOpt::registerAAs(bool IsModulePass) { if (SCC.empty()) return; if (IsModulePass) { // Ensure we create the AAKernelInfo AAs first and without triggering an // update. This will make sure we register all value simplification // callbacks before any other AA has the chance to create an AAValueSimplify // or similar. for (Function *Kernel : OMPInfoCache.Kernels) A.getOrCreateAAFor( IRPosition::function(*Kernel), /* QueryingAA */ nullptr, DepClassTy::NONE, /* ForceUpdate */ false, /* UpdateAfterInit */ false); registerFoldRuntimeCall(OMPRTL___kmpc_is_generic_main_thread_id); registerFoldRuntimeCall(OMPRTL___kmpc_is_spmd_exec_mode); registerFoldRuntimeCall(OMPRTL___kmpc_parallel_level); registerFoldRuntimeCall(OMPRTL___kmpc_get_hardware_num_threads_in_block); registerFoldRuntimeCall(OMPRTL___kmpc_get_hardware_num_blocks); } // Create CallSite AA for all Getters. for (int Idx = 0; Idx < OMPInfoCache.ICVs.size() - 1; ++Idx) { auto ICVInfo = OMPInfoCache.ICVs[static_cast(Idx)]; auto &GetterRFI = OMPInfoCache.RFIs[ICVInfo.Getter]; auto CreateAA = [&](Use &U, Function &Caller) { CallInst *CI = OpenMPOpt::getCallIfRegularCall(U, &GetterRFI); if (!CI) return false; auto &CB = cast(*CI); IRPosition CBPos = IRPosition::callsite_function(CB); A.getOrCreateAAFor(CBPos); return false; }; GetterRFI.foreachUse(SCC, CreateAA); } auto &GlobalizationRFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared]; auto CreateAA = [&](Use &U, Function &F) { A.getOrCreateAAFor(IRPosition::function(F)); return false; }; GlobalizationRFI.foreachUse(SCC, CreateAA); // Create an ExecutionDomain AA for every function and a HeapToStack AA for // every function if there is a device kernel. if (!isOpenMPDevice(M)) return; for (auto *F : SCC) { if (F->isDeclaration()) continue; A.getOrCreateAAFor(IRPosition::function(*F)); A.getOrCreateAAFor(IRPosition::function(*F)); for (auto &I : instructions(*F)) { if (auto *LI = dyn_cast(&I)) { bool UsedAssumedInformation = false; A.getAssumedSimplified(IRPosition::value(*LI), /* AA */ nullptr, UsedAssumedInformation); } } } } const char AAICVTracker::ID = 0; const char AAKernelInfo::ID = 0; const char AAExecutionDomain::ID = 0; const char AAHeapToShared::ID = 0; const char AAFoldRuntimeCall::ID = 0; AAICVTracker &AAICVTracker::createForPosition(const IRPosition &IRP, Attributor &A) { AAICVTracker *AA = nullptr; switch (IRP.getPositionKind()) { case IRPosition::IRP_INVALID: case IRPosition::IRP_FLOAT: case IRPosition::IRP_ARGUMENT: case IRPosition::IRP_CALL_SITE_ARGUMENT: llvm_unreachable("ICVTracker can only be created for function position!"); case IRPosition::IRP_RETURNED: AA = new (A.Allocator) AAICVTrackerFunctionReturned(IRP, A); break; case IRPosition::IRP_CALL_SITE_RETURNED: AA = new (A.Allocator) AAICVTrackerCallSiteReturned(IRP, A); break; case IRPosition::IRP_CALL_SITE: AA = new (A.Allocator) AAICVTrackerCallSite(IRP, A); break; case IRPosition::IRP_FUNCTION: AA = new (A.Allocator) AAICVTrackerFunction(IRP, A); break; } return *AA; } AAExecutionDomain &AAExecutionDomain::createForPosition(const IRPosition &IRP, Attributor &A) { AAExecutionDomainFunction *AA = nullptr; switch (IRP.getPositionKind()) { case IRPosition::IRP_INVALID: case IRPosition::IRP_FLOAT: case IRPosition::IRP_ARGUMENT: case IRPosition::IRP_CALL_SITE_ARGUMENT: case IRPosition::IRP_RETURNED: case IRPosition::IRP_CALL_SITE_RETURNED: case IRPosition::IRP_CALL_SITE: llvm_unreachable( "AAExecutionDomain can only be created for function position!"); case IRPosition::IRP_FUNCTION: AA = new (A.Allocator) AAExecutionDomainFunction(IRP, A); break; } return *AA; } AAHeapToShared &AAHeapToShared::createForPosition(const IRPosition &IRP, Attributor &A) { AAHeapToSharedFunction *AA = nullptr; switch (IRP.getPositionKind()) { case IRPosition::IRP_INVALID: case IRPosition::IRP_FLOAT: case IRPosition::IRP_ARGUMENT: case IRPosition::IRP_CALL_SITE_ARGUMENT: case IRPosition::IRP_RETURNED: case IRPosition::IRP_CALL_SITE_RETURNED: case IRPosition::IRP_CALL_SITE: llvm_unreachable( "AAHeapToShared can only be created for function position!"); case IRPosition::IRP_FUNCTION: AA = new (A.Allocator) AAHeapToSharedFunction(IRP, A); break; } return *AA; } AAKernelInfo &AAKernelInfo::createForPosition(const IRPosition &IRP, Attributor &A) { AAKernelInfo *AA = nullptr; switch (IRP.getPositionKind()) { case IRPosition::IRP_INVALID: case IRPosition::IRP_FLOAT: case IRPosition::IRP_ARGUMENT: case IRPosition::IRP_RETURNED: case IRPosition::IRP_CALL_SITE_RETURNED: case IRPosition::IRP_CALL_SITE_ARGUMENT: llvm_unreachable("KernelInfo can only be created for function position!"); case IRPosition::IRP_CALL_SITE: AA = new (A.Allocator) AAKernelInfoCallSite(IRP, A); break; case IRPosition::IRP_FUNCTION: AA = new (A.Allocator) AAKernelInfoFunction(IRP, A); break; } return *AA; } AAFoldRuntimeCall &AAFoldRuntimeCall::createForPosition(const IRPosition &IRP, Attributor &A) { AAFoldRuntimeCall *AA = nullptr; switch (IRP.getPositionKind()) { case IRPosition::IRP_INVALID: case IRPosition::IRP_FLOAT: case IRPosition::IRP_ARGUMENT: case IRPosition::IRP_RETURNED: case IRPosition::IRP_FUNCTION: case IRPosition::IRP_CALL_SITE: case IRPosition::IRP_CALL_SITE_ARGUMENT: llvm_unreachable("KernelInfo can only be created for call site position!"); case IRPosition::IRP_CALL_SITE_RETURNED: AA = new (A.Allocator) AAFoldRuntimeCallCallSiteReturned(IRP, A); break; } return *AA; } PreservedAnalyses OpenMPOptPass::run(Module &M, ModuleAnalysisManager &AM) { if (!containsOpenMP(M)) return PreservedAnalyses::all(); if (DisableOpenMPOptimizations) return PreservedAnalyses::all(); FunctionAnalysisManager &FAM = AM.getResult(M).getManager(); KernelSet Kernels = getDeviceKernels(M); auto IsCalled = [&](Function &F) { if (Kernels.contains(&F)) return true; for (const User *U : F.users()) if (!isa(U)) return true; return false; }; auto EmitRemark = [&](Function &F) { auto &ORE = FAM.getResult(F); ORE.emit([&]() { OptimizationRemarkAnalysis ORA(DEBUG_TYPE, "OMP140", &F); return ORA << "Could not internalize function. " << "Some optimizations may not be possible. [OMP140]"; }); }; // Create internal copies of each function if this is a kernel Module. This // allows iterprocedural passes to see every call edge. DenseMap InternalizedMap; if (isOpenMPDevice(M)) { SmallPtrSet InternalizeFns; for (Function &F : M) if (!F.isDeclaration() && !Kernels.contains(&F) && IsCalled(F) && !DisableInternalization) { if (Attributor::isInternalizable(F)) { InternalizeFns.insert(&F); } else if (!F.hasLocalLinkage() && !F.hasFnAttribute(Attribute::Cold)) { EmitRemark(F); } } Attributor::internalizeFunctions(InternalizeFns, InternalizedMap); } // Look at every function in the Module unless it was internalized. SmallVector SCC; for (Function &F : M) if (!F.isDeclaration() && !InternalizedMap.lookup(&F)) SCC.push_back(&F); if (SCC.empty()) return PreservedAnalyses::all(); AnalysisGetter AG(FAM); auto OREGetter = [&FAM](Function *F) -> OptimizationRemarkEmitter & { return FAM.getResult(*F); }; BumpPtrAllocator Allocator; CallGraphUpdater CGUpdater; SetVector Functions(SCC.begin(), SCC.end()); OMPInformationCache InfoCache(M, AG, Allocator, /*CGSCC*/ Functions, Kernels); unsigned MaxFixpointIterations = (isOpenMPDevice(M)) ? 128 : 32; Attributor A(Functions, InfoCache, CGUpdater, nullptr, true, false, MaxFixpointIterations, OREGetter, DEBUG_TYPE); OpenMPOpt OMPOpt(SCC, CGUpdater, OREGetter, InfoCache, A); bool Changed = OMPOpt.run(true); if (Changed) return PreservedAnalyses::none(); return PreservedAnalyses::all(); } PreservedAnalyses OpenMPOptCGSCCPass::run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM, LazyCallGraph &CG, CGSCCUpdateResult &UR) { if (!containsOpenMP(*C.begin()->getFunction().getParent())) return PreservedAnalyses::all(); if (DisableOpenMPOptimizations) return PreservedAnalyses::all(); SmallVector SCC; // If there are kernels in the module, we have to run on all SCC's. for (LazyCallGraph::Node &N : C) { Function *Fn = &N.getFunction(); SCC.push_back(Fn); } if (SCC.empty()) return PreservedAnalyses::all(); Module &M = *C.begin()->getFunction().getParent(); KernelSet Kernels = getDeviceKernels(M); FunctionAnalysisManager &FAM = AM.getResult(C, CG).getManager(); AnalysisGetter AG(FAM); auto OREGetter = [&FAM](Function *F) -> OptimizationRemarkEmitter & { return FAM.getResult(*F); }; BumpPtrAllocator Allocator; CallGraphUpdater CGUpdater; CGUpdater.initialize(CG, C, AM, UR); SetVector Functions(SCC.begin(), SCC.end()); OMPInformationCache InfoCache(*(Functions.back()->getParent()), AG, Allocator, /*CGSCC*/ Functions, Kernels); unsigned MaxFixpointIterations = (isOpenMPDevice(M)) ? 128 : 32; Attributor A(Functions, InfoCache, CGUpdater, nullptr, false, true, MaxFixpointIterations, OREGetter, DEBUG_TYPE); OpenMPOpt OMPOpt(SCC, CGUpdater, OREGetter, InfoCache, A); bool Changed = OMPOpt.run(false); if (Changed) return PreservedAnalyses::none(); return PreservedAnalyses::all(); } namespace { struct OpenMPOptCGSCCLegacyPass : public CallGraphSCCPass { CallGraphUpdater CGUpdater; static char ID; OpenMPOptCGSCCLegacyPass() : CallGraphSCCPass(ID) { initializeOpenMPOptCGSCCLegacyPassPass(*PassRegistry::getPassRegistry()); } void getAnalysisUsage(AnalysisUsage &AU) const override { CallGraphSCCPass::getAnalysisUsage(AU); } bool runOnSCC(CallGraphSCC &CGSCC) override { if (!containsOpenMP(CGSCC.getCallGraph().getModule())) return false; if (DisableOpenMPOptimizations || skipSCC(CGSCC)) return false; SmallVector SCC; // If there are kernels in the module, we have to run on all SCC's. for (CallGraphNode *CGN : CGSCC) { Function *Fn = CGN->getFunction(); if (!Fn || Fn->isDeclaration()) continue; SCC.push_back(Fn); } if (SCC.empty()) return false; Module &M = CGSCC.getCallGraph().getModule(); KernelSet Kernels = getDeviceKernels(M); CallGraph &CG = getAnalysis().getCallGraph(); CGUpdater.initialize(CG, CGSCC); // Maintain a map of functions to avoid rebuilding the ORE DenseMap> OREMap; auto OREGetter = [&OREMap](Function *F) -> OptimizationRemarkEmitter & { std::unique_ptr &ORE = OREMap[F]; if (!ORE) ORE = std::make_unique(F); return *ORE; }; AnalysisGetter AG; SetVector Functions(SCC.begin(), SCC.end()); BumpPtrAllocator Allocator; OMPInformationCache InfoCache(*(Functions.back()->getParent()), AG, Allocator, /*CGSCC*/ Functions, Kernels); unsigned MaxFixpointIterations = (isOpenMPDevice(M)) ? 128 : 32; Attributor A(Functions, InfoCache, CGUpdater, nullptr, false, true, MaxFixpointIterations, OREGetter, DEBUG_TYPE); OpenMPOpt OMPOpt(SCC, CGUpdater, OREGetter, InfoCache, A); return OMPOpt.run(false); } bool doFinalization(CallGraph &CG) override { return CGUpdater.finalize(); } }; } // end anonymous namespace KernelSet llvm::omp::getDeviceKernels(Module &M) { // TODO: Create a more cross-platform way of determining device kernels. NamedMDNode *MD = M.getOrInsertNamedMetadata("nvvm.annotations"); KernelSet Kernels; if (!MD) return Kernels; for (auto *Op : MD->operands()) { if (Op->getNumOperands() < 2) continue; MDString *KindID = dyn_cast(Op->getOperand(1)); if (!KindID || KindID->getString() != "kernel") continue; Function *KernelFn = mdconst::dyn_extract_or_null(Op->getOperand(0)); if (!KernelFn) continue; ++NumOpenMPTargetRegionKernels; Kernels.insert(KernelFn); } return Kernels; } bool llvm::omp::containsOpenMP(Module &M) { Metadata *MD = M.getModuleFlag("openmp"); if (!MD) return false; return true; } bool llvm::omp::isOpenMPDevice(Module &M) { Metadata *MD = M.getModuleFlag("openmp-device"); if (!MD) return false; return true; } char OpenMPOptCGSCCLegacyPass::ID = 0; INITIALIZE_PASS_BEGIN(OpenMPOptCGSCCLegacyPass, "openmp-opt-cgscc", "OpenMP specific optimizations", false, false) INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) INITIALIZE_PASS_END(OpenMPOptCGSCCLegacyPass, "openmp-opt-cgscc", "OpenMP specific optimizations", false, false) Pass *llvm::createOpenMPOptCGSCCLegacyPass() { return new OpenMPOptCGSCCLegacyPass(); } diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp index 2e36c50b75fc..9afbe0e9a2a5 100644 --- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp +++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp @@ -1,1794 +1,1810 @@ //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This pass performs various transformations related to eliminating memcpy // calls, or transforming sets of stores into memset's. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar/MemCpyOptimizer.h" #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/None.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/GlobalsModRef.h" #include "llvm/Analysis/Loads.h" #include "llvm/Analysis/MemoryDependenceAnalysis.h" #include "llvm/Analysis/MemoryLocation.h" #include "llvm/Analysis/MemorySSA.h" #include "llvm/Analysis/MemorySSAUpdater.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/Argument.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Function.h" #include "llvm/IR/GetElementPtrTypeIterator.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/InstrTypes.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/IR/Operator.h" #include "llvm/IR/PassManager.h" #include "llvm/IR/Type.h" #include "llvm/IR/User.h" #include "llvm/IR/Value.h" #include "llvm/InitializePasses.h" #include "llvm/Pass.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Debug.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/Local.h" #include #include #include #include using namespace llvm; #define DEBUG_TYPE "memcpyopt" static cl::opt EnableMemorySSA("enable-memcpyopt-memoryssa", cl::init(true), cl::Hidden, cl::desc("Use MemorySSA-backed MemCpyOpt.")); STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted"); STATISTIC(NumMemSetInfer, "Number of memsets inferred"); STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy"); STATISTIC(NumCpyToSet, "Number of memcpys converted to memset"); STATISTIC(NumCallSlot, "Number of call slot optimizations performed"); namespace { /// Represents a range of memset'd bytes with the ByteVal value. /// This allows us to analyze stores like: /// store 0 -> P+1 /// store 0 -> P+0 /// store 0 -> P+3 /// store 0 -> P+2 /// which sometimes happens with stores to arrays of structs etc. When we see /// the first store, we make a range [1, 2). The second store extends the range /// to [0, 2). The third makes a new range [2, 3). The fourth store joins the /// two ranges into [0, 3) which is memset'able. struct MemsetRange { // Start/End - A semi range that describes the span that this range covers. // The range is closed at the start and open at the end: [Start, End). int64_t Start, End; /// StartPtr - The getelementptr instruction that points to the start of the /// range. Value *StartPtr; /// Alignment - The known alignment of the first store. unsigned Alignment; /// TheStores - The actual stores that make up this range. SmallVector TheStores; bool isProfitableToUseMemset(const DataLayout &DL) const; }; } // end anonymous namespace bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const { // If we found more than 4 stores to merge or 16 bytes, use memset. if (TheStores.size() >= 4 || End-Start >= 16) return true; // If there is nothing to merge, don't do anything. if (TheStores.size() < 2) return false; // If any of the stores are a memset, then it is always good to extend the // memset. for (Instruction *SI : TheStores) if (!isa(SI)) return true; // Assume that the code generator is capable of merging pairs of stores // together if it wants to. if (TheStores.size() == 2) return false; // If we have fewer than 8 stores, it can still be worthwhile to do this. // For example, merging 4 i8 stores into an i32 store is useful almost always. // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the // memset will be split into 2 32-bit stores anyway) and doing so can // pessimize the llvm optimizer. // // Since we don't have perfect knowledge here, make some assumptions: assume // the maximum GPR width is the same size as the largest legal integer // size. If so, check to see whether we will end up actually reducing the // number of stores used. unsigned Bytes = unsigned(End-Start); unsigned MaxIntSize = DL.getLargestLegalIntTypeSizeInBits() / 8; if (MaxIntSize == 0) MaxIntSize = 1; unsigned NumPointerStores = Bytes / MaxIntSize; // Assume the remaining bytes if any are done a byte at a time. unsigned NumByteStores = Bytes % MaxIntSize; // If we will reduce the # stores (according to this heuristic), do the // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32 // etc. return TheStores.size() > NumPointerStores+NumByteStores; } namespace { class MemsetRanges { using range_iterator = SmallVectorImpl::iterator; /// A sorted list of the memset ranges. SmallVector Ranges; const DataLayout &DL; public: MemsetRanges(const DataLayout &DL) : DL(DL) {} using const_iterator = SmallVectorImpl::const_iterator; const_iterator begin() const { return Ranges.begin(); } const_iterator end() const { return Ranges.end(); } bool empty() const { return Ranges.empty(); } void addInst(int64_t OffsetFromFirst, Instruction *Inst) { if (StoreInst *SI = dyn_cast(Inst)) addStore(OffsetFromFirst, SI); else addMemSet(OffsetFromFirst, cast(Inst)); } void addStore(int64_t OffsetFromFirst, StoreInst *SI) { - int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType()); - - addRange(OffsetFromFirst, StoreSize, SI->getPointerOperand(), + TypeSize StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType()); + assert(!StoreSize.isScalable() && "Can't track scalable-typed stores"); + addRange(OffsetFromFirst, StoreSize.getFixedSize(), SI->getPointerOperand(), SI->getAlign().value(), SI); } void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) { int64_t Size = cast(MSI->getLength())->getZExtValue(); addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getDestAlignment(), MSI); } void addRange(int64_t Start, int64_t Size, Value *Ptr, unsigned Alignment, Instruction *Inst); }; } // end anonymous namespace /// Add a new store to the MemsetRanges data structure. This adds a /// new range for the specified store at the specified offset, merging into /// existing ranges as appropriate. void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr, unsigned Alignment, Instruction *Inst) { int64_t End = Start+Size; range_iterator I = partition_point( Ranges, [=](const MemsetRange &O) { return O.End < Start; }); // We now know that I == E, in which case we didn't find anything to merge // with, or that Start <= I->End. If End < I->Start or I == E, then we need // to insert a new range. Handle this now. if (I == Ranges.end() || End < I->Start) { MemsetRange &R = *Ranges.insert(I, MemsetRange()); R.Start = Start; R.End = End; R.StartPtr = Ptr; R.Alignment = Alignment; R.TheStores.push_back(Inst); return; } // This store overlaps with I, add it. I->TheStores.push_back(Inst); // At this point, we may have an interval that completely contains our store. // If so, just add it to the interval and return. if (I->Start <= Start && I->End >= End) return; // Now we know that Start <= I->End and End >= I->Start so the range overlaps // but is not entirely contained within the range. // See if the range extends the start of the range. In this case, it couldn't // possibly cause it to join the prior range, because otherwise we would have // stopped on *it*. if (Start < I->Start) { I->Start = Start; I->StartPtr = Ptr; I->Alignment = Alignment; } // Now we know that Start <= I->End and Start >= I->Start (so the startpoint // is in or right at the end of I), and that End >= I->Start. Extend I out to // End. if (End > I->End) { I->End = End; range_iterator NextI = I; while (++NextI != Ranges.end() && End >= NextI->Start) { // Merge the range in. I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end()); if (NextI->End > I->End) I->End = NextI->End; Ranges.erase(NextI); NextI = I; } } } //===----------------------------------------------------------------------===// // MemCpyOptLegacyPass Pass //===----------------------------------------------------------------------===// namespace { class MemCpyOptLegacyPass : public FunctionPass { MemCpyOptPass Impl; public: static char ID; // Pass identification, replacement for typeid MemCpyOptLegacyPass() : FunctionPass(ID) { initializeMemCpyOptLegacyPassPass(*PassRegistry::getPassRegistry()); } bool runOnFunction(Function &F) override; private: // This transformation requires dominator postdominator info void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesCFG(); AU.addRequired(); AU.addRequired(); AU.addPreserved(); AU.addPreserved(); AU.addRequired(); if (!EnableMemorySSA) AU.addRequired(); AU.addPreserved(); AU.addRequired(); AU.addPreserved(); if (EnableMemorySSA) AU.addRequired(); AU.addPreserved(); } }; } // end anonymous namespace char MemCpyOptLegacyPass::ID = 0; /// The public interface to this file... FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOptLegacyPass(); } INITIALIZE_PASS_BEGIN(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization", false, false) INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass) INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass) INITIALIZE_PASS_END(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization", false, false) // Check that V is either not accessible by the caller, or unwinding cannot // occur between Start and End. static bool mayBeVisibleThroughUnwinding(Value *V, Instruction *Start, Instruction *End) { assert(Start->getParent() == End->getParent() && "Must be in same block"); if (!Start->getFunction()->doesNotThrow() && !isa(getUnderlyingObject(V))) { for (const Instruction &I : make_range(Start->getIterator(), End->getIterator())) { if (I.mayThrow()) return true; } } return false; } void MemCpyOptPass::eraseInstruction(Instruction *I) { if (MSSAU) MSSAU->removeMemoryAccess(I); if (MD) MD->removeInstruction(I); I->eraseFromParent(); } // Check for mod or ref of Loc between Start and End, excluding both boundaries. // Start and End must be in the same block static bool accessedBetween(AliasAnalysis &AA, MemoryLocation Loc, const MemoryUseOrDef *Start, const MemoryUseOrDef *End) { assert(Start->getBlock() == End->getBlock() && "Only local supported"); for (const MemoryAccess &MA : make_range(++Start->getIterator(), End->getIterator())) { if (isModOrRefSet(AA.getModRefInfo(cast(MA).getMemoryInst(), Loc))) return true; } return false; } // Check for mod of Loc between Start and End, excluding both boundaries. // Start and End can be in different blocks. static bool writtenBetween(MemorySSA *MSSA, MemoryLocation Loc, const MemoryUseOrDef *Start, const MemoryUseOrDef *End) { // TODO: Only walk until we hit Start. MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess( End->getDefiningAccess(), Loc); return !MSSA->dominates(Clobber, Start); } /// When scanning forward over instructions, we look for some other patterns to /// fold away. In particular, this looks for stores to neighboring locations of /// memory. If it sees enough consecutive ones, it attempts to merge them /// together into a memcpy/memset. Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst, Value *StartPtr, Value *ByteVal) { const DataLayout &DL = StartInst->getModule()->getDataLayout(); + // We can't track scalable types + if (StoreInst *SI = dyn_cast(StartInst)) + if (DL.getTypeStoreSize(SI->getOperand(0)->getType()).isScalable()) + return nullptr; + // Okay, so we now have a single store that can be splatable. Scan to find // all subsequent stores of the same value to offset from the same pointer. // Join these together into ranges, so we can decide whether contiguous blocks // are stored. MemsetRanges Ranges(DL); BasicBlock::iterator BI(StartInst); // Keeps track of the last memory use or def before the insertion point for // the new memset. The new MemoryDef for the inserted memsets will be inserted // after MemInsertPoint. It points to either LastMemDef or to the last user // before the insertion point of the memset, if there are any such users. MemoryUseOrDef *MemInsertPoint = nullptr; // Keeps track of the last MemoryDef between StartInst and the insertion point // for the new memset. This will become the defining access of the inserted // memsets. MemoryDef *LastMemDef = nullptr; for (++BI; !BI->isTerminator(); ++BI) { if (MSSAU) { auto *CurrentAcc = cast_or_null( MSSAU->getMemorySSA()->getMemoryAccess(&*BI)); if (CurrentAcc) { MemInsertPoint = CurrentAcc; if (auto *CurrentDef = dyn_cast(CurrentAcc)) LastMemDef = CurrentDef; } } // Calls that only access inaccessible memory do not block merging // accessible stores. if (auto *CB = dyn_cast(BI)) { if (CB->onlyAccessesInaccessibleMemory()) continue; } if (!isa(BI) && !isa(BI)) { // If the instruction is readnone, ignore it, otherwise bail out. We // don't even allow readonly here because we don't want something like: // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A). if (BI->mayWriteToMemory() || BI->mayReadFromMemory()) break; continue; } if (StoreInst *NextStore = dyn_cast(BI)) { // If this is a store, see if we can merge it in. if (!NextStore->isSimple()) break; Value *StoredVal = NextStore->getValueOperand(); // Don't convert stores of non-integral pointer types to memsets (which // stores integers). if (DL.isNonIntegralPointerType(StoredVal->getType()->getScalarType())) break; + // We can't track ranges involving scalable types. + if (DL.getTypeStoreSize(StoredVal->getType()).isScalable()) + break; + // Check to see if this stored value is of the same byte-splattable value. Value *StoredByte = isBytewiseValue(StoredVal, DL); if (isa(ByteVal) && StoredByte) ByteVal = StoredByte; if (ByteVal != StoredByte) break; // Check to see if this store is to a constant offset from the start ptr. Optional Offset = isPointerOffset(StartPtr, NextStore->getPointerOperand(), DL); if (!Offset) break; Ranges.addStore(*Offset, NextStore); } else { MemSetInst *MSI = cast(BI); if (MSI->isVolatile() || ByteVal != MSI->getValue() || !isa(MSI->getLength())) break; // Check to see if this store is to a constant offset from the start ptr. Optional Offset = isPointerOffset(StartPtr, MSI->getDest(), DL); if (!Offset) break; Ranges.addMemSet(*Offset, MSI); } } // If we have no ranges, then we just had a single store with nothing that // could be merged in. This is a very common case of course. if (Ranges.empty()) return nullptr; // If we had at least one store that could be merged in, add the starting // store as well. We try to avoid this unless there is at least something // interesting as a small compile-time optimization. Ranges.addInst(0, StartInst); // If we create any memsets, we put it right before the first instruction that // isn't part of the memset block. This ensure that the memset is dominated // by any addressing instruction needed by the start of the block. IRBuilder<> Builder(&*BI); // Now that we have full information about ranges, loop over the ranges and // emit memset's for anything big enough to be worthwhile. Instruction *AMemSet = nullptr; for (const MemsetRange &Range : Ranges) { if (Range.TheStores.size() == 1) continue; // If it is profitable to lower this range to memset, do so now. if (!Range.isProfitableToUseMemset(DL)) continue; // Otherwise, we do want to transform this! Create a new memset. // Get the starting pointer of the block. StartPtr = Range.StartPtr; AMemSet = Builder.CreateMemSet(StartPtr, ByteVal, Range.End - Range.Start, MaybeAlign(Range.Alignment)); LLVM_DEBUG(dbgs() << "Replace stores:\n"; for (Instruction *SI : Range.TheStores) dbgs() << *SI << '\n'; dbgs() << "With: " << *AMemSet << '\n'); if (!Range.TheStores.empty()) AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc()); if (MSSAU) { assert(LastMemDef && MemInsertPoint && "Both LastMemDef and MemInsertPoint need to be set"); auto *NewDef = cast(MemInsertPoint->getMemoryInst() == &*BI ? MSSAU->createMemoryAccessBefore( AMemSet, LastMemDef, MemInsertPoint) : MSSAU->createMemoryAccessAfter( AMemSet, LastMemDef, MemInsertPoint)); MSSAU->insertDef(NewDef, /*RenameUses=*/true); LastMemDef = NewDef; MemInsertPoint = NewDef; } // Zap all the stores. for (Instruction *SI : Range.TheStores) eraseInstruction(SI); ++NumMemSetInfer; } return AMemSet; } // This method try to lift a store instruction before position P. // It will lift the store and its argument + that anything that // may alias with these. // The method returns true if it was successful. bool MemCpyOptPass::moveUp(StoreInst *SI, Instruction *P, const LoadInst *LI) { // If the store alias this position, early bail out. MemoryLocation StoreLoc = MemoryLocation::get(SI); if (isModOrRefSet(AA->getModRefInfo(P, StoreLoc))) return false; // Keep track of the arguments of all instruction we plan to lift // so we can make sure to lift them as well if appropriate. DenseSet Args; if (auto *Ptr = dyn_cast(SI->getPointerOperand())) if (Ptr->getParent() == SI->getParent()) Args.insert(Ptr); // Instruction to lift before P. SmallVector ToLift{SI}; // Memory locations of lifted instructions. SmallVector MemLocs{StoreLoc}; // Lifted calls. SmallVector Calls; const MemoryLocation LoadLoc = MemoryLocation::get(LI); for (auto I = --SI->getIterator(), E = P->getIterator(); I != E; --I) { auto *C = &*I; // Make sure hoisting does not perform a store that was not guaranteed to // happen. if (!isGuaranteedToTransferExecutionToSuccessor(C)) return false; bool MayAlias = isModOrRefSet(AA->getModRefInfo(C, None)); bool NeedLift = false; if (Args.erase(C)) NeedLift = true; else if (MayAlias) { NeedLift = llvm::any_of(MemLocs, [C, this](const MemoryLocation &ML) { return isModOrRefSet(AA->getModRefInfo(C, ML)); }); if (!NeedLift) NeedLift = llvm::any_of(Calls, [C, this](const CallBase *Call) { return isModOrRefSet(AA->getModRefInfo(C, Call)); }); } if (!NeedLift) continue; if (MayAlias) { // Since LI is implicitly moved downwards past the lifted instructions, // none of them may modify its source. if (isModSet(AA->getModRefInfo(C, LoadLoc))) return false; else if (const auto *Call = dyn_cast(C)) { // If we can't lift this before P, it's game over. if (isModOrRefSet(AA->getModRefInfo(P, Call))) return false; Calls.push_back(Call); } else if (isa(C) || isa(C) || isa(C)) { // If we can't lift this before P, it's game over. auto ML = MemoryLocation::get(C); if (isModOrRefSet(AA->getModRefInfo(P, ML))) return false; MemLocs.push_back(ML); } else // We don't know how to lift this instruction. return false; } ToLift.push_back(C); for (unsigned k = 0, e = C->getNumOperands(); k != e; ++k) if (auto *A = dyn_cast(C->getOperand(k))) { if (A->getParent() == SI->getParent()) { // Cannot hoist user of P above P if(A == P) return false; Args.insert(A); } } } // Find MSSA insertion point. Normally P will always have a corresponding // memory access before which we can insert. However, with non-standard AA // pipelines, there may be a mismatch between AA and MSSA, in which case we // will scan for a memory access before P. In either case, we know for sure // that at least the load will have a memory access. // TODO: Simplify this once P will be determined by MSSA, in which case the // discrepancy can no longer occur. MemoryUseOrDef *MemInsertPoint = nullptr; if (MSSAU) { if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(P)) { MemInsertPoint = cast(--MA->getIterator()); } else { const Instruction *ConstP = P; for (const Instruction &I : make_range(++ConstP->getReverseIterator(), ++LI->getReverseIterator())) { if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(&I)) { MemInsertPoint = MA; break; } } } } // We made it, we need to lift. for (auto *I : llvm::reverse(ToLift)) { LLVM_DEBUG(dbgs() << "Lifting " << *I << " before " << *P << "\n"); I->moveBefore(P); if (MSSAU) { assert(MemInsertPoint && "Must have found insert point"); if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(I)) { MSSAU->moveAfter(MA, MemInsertPoint); MemInsertPoint = MA; } } } return true; } bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) { if (!SI->isSimple()) return false; // Avoid merging nontemporal stores since the resulting // memcpy/memset would not be able to preserve the nontemporal hint. // In theory we could teach how to propagate the !nontemporal metadata to // memset calls. However, that change would force the backend to // conservatively expand !nontemporal memset calls back to sequences of // store instructions (effectively undoing the merging). if (SI->getMetadata(LLVMContext::MD_nontemporal)) return false; const DataLayout &DL = SI->getModule()->getDataLayout(); Value *StoredVal = SI->getValueOperand(); // Not all the transforms below are correct for non-integral pointers, bail // until we've audited the individual pieces. if (DL.isNonIntegralPointerType(StoredVal->getType()->getScalarType())) return false; // Load to store forwarding can be interpreted as memcpy. if (LoadInst *LI = dyn_cast(StoredVal)) { if (LI->isSimple() && LI->hasOneUse() && LI->getParent() == SI->getParent()) { auto *T = LI->getType(); if (T->isAggregateType()) { MemoryLocation LoadLoc = MemoryLocation::get(LI); // We use alias analysis to check if an instruction may store to // the memory we load from in between the load and the store. If // such an instruction is found, we try to promote there instead // of at the store position. // TODO: Can use MSSA for this. Instruction *P = SI; for (auto &I : make_range(++LI->getIterator(), SI->getIterator())) { if (isModSet(AA->getModRefInfo(&I, LoadLoc))) { P = &I; break; } } // We found an instruction that may write to the loaded memory. // We can try to promote at this position instead of the store // position if nothing aliases the store memory after this and the store // destination is not in the range. if (P && P != SI) { if (!moveUp(SI, P, LI)) P = nullptr; } // If a valid insertion position is found, then we can promote // the load/store pair to a memcpy. if (P) { // If we load from memory that may alias the memory we store to, // memmove must be used to preserve semantic. If not, memcpy can // be used. bool UseMemMove = false; if (!AA->isNoAlias(MemoryLocation::get(SI), LoadLoc)) UseMemMove = true; uint64_t Size = DL.getTypeStoreSize(T); IRBuilder<> Builder(P); Instruction *M; if (UseMemMove) M = Builder.CreateMemMove( SI->getPointerOperand(), SI->getAlign(), LI->getPointerOperand(), LI->getAlign(), Size); else M = Builder.CreateMemCpy( SI->getPointerOperand(), SI->getAlign(), LI->getPointerOperand(), LI->getAlign(), Size); LLVM_DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI << " => " << *M << "\n"); if (MSSAU) { auto *LastDef = cast(MSSAU->getMemorySSA()->getMemoryAccess(SI)); auto *NewAccess = MSSAU->createMemoryAccessAfter(M, LastDef, LastDef); MSSAU->insertDef(cast(NewAccess), /*RenameUses=*/true); } eraseInstruction(SI); eraseInstruction(LI); ++NumMemCpyInstr; // Make sure we do not invalidate the iterator. BBI = M->getIterator(); return true; } } // Detect cases where we're performing call slot forwarding, but // happen to be using a load-store pair to implement it, rather than // a memcpy. CallInst *C = nullptr; if (EnableMemorySSA) { if (auto *LoadClobber = dyn_cast( MSSA->getWalker()->getClobberingMemoryAccess(LI))) { // The load most post-dom the call. Limit to the same block for now. // TODO: Support non-local call-slot optimization? if (LoadClobber->getBlock() == SI->getParent()) C = dyn_cast_or_null(LoadClobber->getMemoryInst()); } } else { MemDepResult ldep = MD->getDependency(LI); if (ldep.isClobber() && !isa(ldep.getInst())) C = dyn_cast(ldep.getInst()); } if (C) { // Check that nothing touches the dest of the "copy" between // the call and the store. MemoryLocation StoreLoc = MemoryLocation::get(SI); if (EnableMemorySSA) { if (accessedBetween(*AA, StoreLoc, MSSA->getMemoryAccess(C), MSSA->getMemoryAccess(SI))) C = nullptr; } else { for (BasicBlock::iterator I = --SI->getIterator(), E = C->getIterator(); I != E; --I) { if (isModOrRefSet(AA->getModRefInfo(&*I, StoreLoc))) { C = nullptr; break; } } } } if (C) { bool changed = performCallSlotOptzn( LI, SI, SI->getPointerOperand()->stripPointerCasts(), LI->getPointerOperand()->stripPointerCasts(), DL.getTypeStoreSize(SI->getOperand(0)->getType()), commonAlignment(SI->getAlign(), LI->getAlign()), C); if (changed) { eraseInstruction(SI); eraseInstruction(LI); ++NumMemCpyInstr; return true; } } } } // There are two cases that are interesting for this code to handle: memcpy // and memset. Right now we only handle memset. // Ensure that the value being stored is something that can be memset'able a // byte at a time like "0" or "-1" or any width, as well as things like // 0xA0A0A0A0 and 0.0. auto *V = SI->getOperand(0); if (Value *ByteVal = isBytewiseValue(V, DL)) { if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(), ByteVal)) { BBI = I->getIterator(); // Don't invalidate iterator. return true; } // If we have an aggregate, we try to promote it to memset regardless // of opportunity for merging as it can expose optimization opportunities // in subsequent passes. auto *T = V->getType(); if (T->isAggregateType()) { uint64_t Size = DL.getTypeStoreSize(T); IRBuilder<> Builder(SI); auto *M = Builder.CreateMemSet(SI->getPointerOperand(), ByteVal, Size, SI->getAlign()); LLVM_DEBUG(dbgs() << "Promoting " << *SI << " to " << *M << "\n"); if (MSSAU) { assert(isa(MSSAU->getMemorySSA()->getMemoryAccess(SI))); auto *LastDef = cast(MSSAU->getMemorySSA()->getMemoryAccess(SI)); auto *NewAccess = MSSAU->createMemoryAccessAfter(M, LastDef, LastDef); MSSAU->insertDef(cast(NewAccess), /*RenameUses=*/true); } eraseInstruction(SI); NumMemSetInfer++; // Make sure we do not invalidate the iterator. BBI = M->getIterator(); return true; } } return false; } bool MemCpyOptPass::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) { // See if there is another memset or store neighboring this memset which // allows us to widen out the memset to do a single larger store. if (isa(MSI->getLength()) && !MSI->isVolatile()) if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(), MSI->getValue())) { BBI = I->getIterator(); // Don't invalidate iterator. return true; } return false; } /// Takes a memcpy and a call that it depends on, /// and checks for the possibility of a call slot optimization by having /// the call write its result directly into the destination of the memcpy. bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpyLoad, Instruction *cpyStore, Value *cpyDest, - Value *cpySrc, uint64_t cpyLen, + Value *cpySrc, TypeSize cpySize, Align cpyAlign, CallInst *C) { // The general transformation to keep in mind is // // call @func(..., src, ...) // memcpy(dest, src, ...) // // -> // // memcpy(dest, src, ...) // call @func(..., dest, ...) // // Since moving the memcpy is technically awkward, we additionally check that // src only holds uninitialized values at the moment of the call, meaning that // the memcpy can be discarded rather than moved. + // We can't optimize scalable types. + if (cpySize.isScalable()) + return false; + // Lifetime marks shouldn't be operated on. if (Function *F = C->getCalledFunction()) if (F->isIntrinsic() && F->getIntrinsicID() == Intrinsic::lifetime_start) return false; // Require that src be an alloca. This simplifies the reasoning considerably. AllocaInst *srcAlloca = dyn_cast(cpySrc); if (!srcAlloca) return false; ConstantInt *srcArraySize = dyn_cast(srcAlloca->getArraySize()); if (!srcArraySize) return false; const DataLayout &DL = cpyLoad->getModule()->getDataLayout(); uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) * srcArraySize->getZExtValue(); - if (cpyLen < srcSize) + if (cpySize < srcSize) return false; // Check that accessing the first srcSize bytes of dest will not cause a // trap. Otherwise the transform is invalid since it might cause a trap // to occur earlier than it otherwise would. - if (!isDereferenceableAndAlignedPointer(cpyDest, Align(1), APInt(64, cpyLen), + if (!isDereferenceableAndAlignedPointer(cpyDest, Align(1), APInt(64, cpySize), DL, C, DT)) return false; // Make sure that nothing can observe cpyDest being written early. There are // a number of cases to consider: // 1. cpyDest cannot be accessed between C and cpyStore as a precondition of // the transform. // 2. C itself may not access cpyDest (prior to the transform). This is // checked further below. // 3. If cpyDest is accessible to the caller of this function (potentially // captured and not based on an alloca), we need to ensure that we cannot // unwind between C and cpyStore. This is checked here. // 4. If cpyDest is potentially captured, there may be accesses to it from // another thread. In this case, we need to check that cpyStore is // guaranteed to be executed if C is. As it is a non-atomic access, it // renders accesses from other threads undefined. // TODO: This is currently not checked. if (mayBeVisibleThroughUnwinding(cpyDest, C, cpyStore)) return false; // Check that dest points to memory that is at least as aligned as src. Align srcAlign = srcAlloca->getAlign(); bool isDestSufficientlyAligned = srcAlign <= cpyAlign; // If dest is not aligned enough and we can't increase its alignment then // bail out. if (!isDestSufficientlyAligned && !isa(cpyDest)) return false; // Check that src is not accessed except via the call and the memcpy. This // guarantees that it holds only undefined values when passed in (so the final // memcpy can be dropped), that it is not read or written between the call and // the memcpy, and that writing beyond the end of it is undefined. SmallVector srcUseList(srcAlloca->users()); while (!srcUseList.empty()) { User *U = srcUseList.pop_back_val(); if (isa(U) || isa(U)) { append_range(srcUseList, U->users()); continue; } if (GetElementPtrInst *G = dyn_cast(U)) { if (!G->hasAllZeroIndices()) return false; append_range(srcUseList, U->users()); continue; } if (const IntrinsicInst *IT = dyn_cast(U)) if (IT->isLifetimeStartOrEnd()) continue; if (U != C && U != cpyLoad) return false; } // Check that src isn't captured by the called function since the // transformation can cause aliasing issues in that case. for (unsigned ArgI = 0, E = C->arg_size(); ArgI != E; ++ArgI) if (C->getArgOperand(ArgI) == cpySrc && !C->doesNotCapture(ArgI)) return false; // Since we're changing the parameter to the callsite, we need to make sure // that what would be the new parameter dominates the callsite. if (!DT->dominates(cpyDest, C)) { // Support moving a constant index GEP before the call. auto *GEP = dyn_cast(cpyDest); if (GEP && GEP->hasAllConstantIndices() && DT->dominates(GEP->getPointerOperand(), C)) GEP->moveBefore(C); else return false; } // In addition to knowing that the call does not access src in some // unexpected manner, for example via a global, which we deduce from // the use analysis, we also need to know that it does not sneakily // access dest. We rely on AA to figure this out for us. ModRefInfo MR = AA->getModRefInfo(C, cpyDest, LocationSize::precise(srcSize)); // If necessary, perform additional analysis. if (isModOrRefSet(MR)) MR = AA->callCapturesBefore(C, cpyDest, LocationSize::precise(srcSize), DT); if (isModOrRefSet(MR)) return false; // We can't create address space casts here because we don't know if they're // safe for the target. if (cpySrc->getType()->getPointerAddressSpace() != cpyDest->getType()->getPointerAddressSpace()) return false; for (unsigned ArgI = 0; ArgI < C->arg_size(); ++ArgI) if (C->getArgOperand(ArgI)->stripPointerCasts() == cpySrc && cpySrc->getType()->getPointerAddressSpace() != C->getArgOperand(ArgI)->getType()->getPointerAddressSpace()) return false; // All the checks have passed, so do the transformation. bool changedArgument = false; for (unsigned ArgI = 0; ArgI < C->arg_size(); ++ArgI) if (C->getArgOperand(ArgI)->stripPointerCasts() == cpySrc) { Value *Dest = cpySrc->getType() == cpyDest->getType() ? cpyDest : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(), cpyDest->getName(), C); changedArgument = true; if (C->getArgOperand(ArgI)->getType() == Dest->getType()) C->setArgOperand(ArgI, Dest); else C->setArgOperand(ArgI, CastInst::CreatePointerCast( Dest, C->getArgOperand(ArgI)->getType(), Dest->getName(), C)); } if (!changedArgument) return false; // If the destination wasn't sufficiently aligned then increase its alignment. if (!isDestSufficientlyAligned) { assert(isa(cpyDest) && "Can only increase alloca alignment!"); cast(cpyDest)->setAlignment(srcAlign); } // Drop any cached information about the call, because we may have changed // its dependence information by changing its parameter. if (MD) MD->removeInstruction(C); // Update AA metadata // FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be // handled here, but combineMetadata doesn't support them yet unsigned KnownIDs[] = {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, LLVMContext::MD_noalias, LLVMContext::MD_invariant_group, LLVMContext::MD_access_group}; combineMetadata(C, cpyLoad, KnownIDs, true); ++NumCallSlot; return true; } /// We've found that the (upward scanning) memory dependence of memcpy 'M' is /// the memcpy 'MDep'. Try to simplify M to copy from MDep's input if we can. bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep) { // We can only transforms memcpy's where the dest of one is the source of the // other. if (M->getSource() != MDep->getDest() || MDep->isVolatile()) return false; // If dep instruction is reading from our current input, then it is a noop // transfer and substituting the input won't change this instruction. Just // ignore the input and let someone else zap MDep. This handles cases like: // memcpy(a <- a) // memcpy(b <- a) if (M->getSource() == MDep->getSource()) return false; // Second, the length of the memcpy's must be the same, or the preceding one // must be larger than the following one. if (MDep->getLength() != M->getLength()) { ConstantInt *MDepLen = dyn_cast(MDep->getLength()); ConstantInt *MLen = dyn_cast(M->getLength()); if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue()) return false; } // Verify that the copied-from memory doesn't change in between the two // transfers. For example, in: // memcpy(a <- b) // *b = 42; // memcpy(c <- a) // It would be invalid to transform the second memcpy into memcpy(c <- b). // // TODO: If the code between M and MDep is transparent to the destination "c", // then we could still perform the xform by moving M up to the first memcpy. if (EnableMemorySSA) { // TODO: It would be sufficient to check the MDep source up to the memcpy // size of M, rather than MDep. if (writtenBetween(MSSA, MemoryLocation::getForSource(MDep), MSSA->getMemoryAccess(MDep), MSSA->getMemoryAccess(M))) return false; } else { // NOTE: This is conservative, it will stop on any read from the source loc, // not just the defining memcpy. MemDepResult SourceDep = MD->getPointerDependencyFrom(MemoryLocation::getForSource(MDep), false, M->getIterator(), M->getParent()); if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) return false; } // If the dest of the second might alias the source of the first, then the // source and dest might overlap. We still want to eliminate the intermediate // value, but we have to generate a memmove instead of memcpy. bool UseMemMove = false; if (!AA->isNoAlias(MemoryLocation::getForDest(M), MemoryLocation::getForSource(MDep))) UseMemMove = true; // If all checks passed, then we can transform M. LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy->memcpy src:\n" << *MDep << '\n' << *M << '\n'); // TODO: Is this worth it if we're creating a less aligned memcpy? For // example we could be moving from movaps -> movq on x86. IRBuilder<> Builder(M); Instruction *NewM; if (UseMemMove) NewM = Builder.CreateMemMove(M->getRawDest(), M->getDestAlign(), MDep->getRawSource(), MDep->getSourceAlign(), M->getLength(), M->isVolatile()); else if (isa(M)) { // llvm.memcpy may be promoted to llvm.memcpy.inline, but the converse is // never allowed since that would allow the latter to be lowered as a call // to an external function. NewM = Builder.CreateMemCpyInline( M->getRawDest(), M->getDestAlign(), MDep->getRawSource(), MDep->getSourceAlign(), M->getLength(), M->isVolatile()); } else NewM = Builder.CreateMemCpy(M->getRawDest(), M->getDestAlign(), MDep->getRawSource(), MDep->getSourceAlign(), M->getLength(), M->isVolatile()); if (MSSAU) { assert(isa(MSSAU->getMemorySSA()->getMemoryAccess(M))); auto *LastDef = cast(MSSAU->getMemorySSA()->getMemoryAccess(M)); auto *NewAccess = MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef); MSSAU->insertDef(cast(NewAccess), /*RenameUses=*/true); } // Remove the instruction we're replacing. eraseInstruction(M); ++NumMemCpyInstr; return true; } /// We've found that the (upward scanning) memory dependence of \p MemCpy is /// \p MemSet. Try to simplify \p MemSet to only set the trailing bytes that /// weren't copied over by \p MemCpy. /// /// In other words, transform: /// \code /// memset(dst, c, dst_size); /// memcpy(dst, src, src_size); /// \endcode /// into: /// \code /// memcpy(dst, src, src_size); /// memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size); /// \endcode bool MemCpyOptPass::processMemSetMemCpyDependence(MemCpyInst *MemCpy, MemSetInst *MemSet) { // We can only transform memset/memcpy with the same destination. if (!AA->isMustAlias(MemSet->getDest(), MemCpy->getDest())) return false; // Check that src and dst of the memcpy aren't the same. While memcpy // operands cannot partially overlap, exact equality is allowed. if (!AA->isNoAlias(MemoryLocation(MemCpy->getSource(), LocationSize::precise(1)), MemoryLocation(MemCpy->getDest(), LocationSize::precise(1)))) return false; if (EnableMemorySSA) { // We know that dst up to src_size is not written. We now need to make sure // that dst up to dst_size is not accessed. (If we did not move the memset, // checking for reads would be sufficient.) if (accessedBetween(*AA, MemoryLocation::getForDest(MemSet), MSSA->getMemoryAccess(MemSet), MSSA->getMemoryAccess(MemCpy))) { return false; } } else { // We have already checked that dst up to src_size is not accessed. We // need to make sure that there are no accesses up to dst_size either. MemDepResult DstDepInfo = MD->getPointerDependencyFrom( MemoryLocation::getForDest(MemSet), false, MemCpy->getIterator(), MemCpy->getParent()); if (DstDepInfo.getInst() != MemSet) return false; } // Use the same i8* dest as the memcpy, killing the memset dest if different. Value *Dest = MemCpy->getRawDest(); Value *DestSize = MemSet->getLength(); Value *SrcSize = MemCpy->getLength(); if (mayBeVisibleThroughUnwinding(Dest, MemSet, MemCpy)) return false; // If the sizes are the same, simply drop the memset instead of generating // a replacement with zero size. if (DestSize == SrcSize) { eraseInstruction(MemSet); return true; } // By default, create an unaligned memset. unsigned Align = 1; // If Dest is aligned, and SrcSize is constant, use the minimum alignment // of the sum. const unsigned DestAlign = std::max(MemSet->getDestAlignment(), MemCpy->getDestAlignment()); if (DestAlign > 1) if (ConstantInt *SrcSizeC = dyn_cast(SrcSize)) Align = MinAlign(SrcSizeC->getZExtValue(), DestAlign); IRBuilder<> Builder(MemCpy); // If the sizes have different types, zext the smaller one. if (DestSize->getType() != SrcSize->getType()) { if (DestSize->getType()->getIntegerBitWidth() > SrcSize->getType()->getIntegerBitWidth()) SrcSize = Builder.CreateZExt(SrcSize, DestSize->getType()); else DestSize = Builder.CreateZExt(DestSize, SrcSize->getType()); } Value *Ule = Builder.CreateICmpULE(DestSize, SrcSize); Value *SizeDiff = Builder.CreateSub(DestSize, SrcSize); Value *MemsetLen = Builder.CreateSelect( Ule, ConstantInt::getNullValue(DestSize->getType()), SizeDiff); unsigned DestAS = Dest->getType()->getPointerAddressSpace(); Instruction *NewMemSet = Builder.CreateMemSet( Builder.CreateGEP(Builder.getInt8Ty(), Builder.CreatePointerCast(Dest, Builder.getInt8PtrTy(DestAS)), SrcSize), MemSet->getOperand(1), MemsetLen, MaybeAlign(Align)); if (MSSAU) { assert(isa(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)) && "MemCpy must be a MemoryDef"); // The new memset is inserted after the memcpy, but it is known that its // defining access is the memset about to be removed which immediately // precedes the memcpy. auto *LastDef = cast(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)); auto *NewAccess = MSSAU->createMemoryAccessBefore( NewMemSet, LastDef->getDefiningAccess(), LastDef); MSSAU->insertDef(cast(NewAccess), /*RenameUses=*/true); } eraseInstruction(MemSet); return true; } /// Determine whether the instruction has undefined content for the given Size, /// either because it was freshly alloca'd or started its lifetime. static bool hasUndefContents(Instruction *I, Value *Size) { if (isa(I)) return true; if (ConstantInt *CSize = dyn_cast(Size)) { if (IntrinsicInst *II = dyn_cast(I)) if (II->getIntrinsicID() == Intrinsic::lifetime_start) if (ConstantInt *LTSize = dyn_cast(II->getArgOperand(0))) if (LTSize->getZExtValue() >= CSize->getZExtValue()) return true; } return false; } static bool hasUndefContentsMSSA(MemorySSA *MSSA, AliasAnalysis *AA, Value *V, MemoryDef *Def, Value *Size) { if (MSSA->isLiveOnEntryDef(Def)) return isa(getUnderlyingObject(V)); if (IntrinsicInst *II = dyn_cast_or_null(Def->getMemoryInst())) { if (II->getIntrinsicID() == Intrinsic::lifetime_start) { ConstantInt *LTSize = cast(II->getArgOperand(0)); if (ConstantInt *CSize = dyn_cast(Size)) { if (AA->isMustAlias(V, II->getArgOperand(1)) && LTSize->getZExtValue() >= CSize->getZExtValue()) return true; } // If the lifetime.start covers a whole alloca (as it almost always // does) and we're querying a pointer based on that alloca, then we know // the memory is definitely undef, regardless of how exactly we alias. // The size also doesn't matter, as an out-of-bounds access would be UB. AllocaInst *Alloca = dyn_cast(getUnderlyingObject(V)); if (getUnderlyingObject(II->getArgOperand(1)) == Alloca) { const DataLayout &DL = Alloca->getModule()->getDataLayout(); if (Optional AllocaSize = Alloca->getAllocationSizeInBits(DL)) if (*AllocaSize == LTSize->getValue() * 8) return true; } } } return false; } /// Transform memcpy to memset when its source was just memset. /// In other words, turn: /// \code /// memset(dst1, c, dst1_size); /// memcpy(dst2, dst1, dst2_size); /// \endcode /// into: /// \code /// memset(dst1, c, dst1_size); /// memset(dst2, c, dst2_size); /// \endcode /// When dst2_size <= dst1_size. bool MemCpyOptPass::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy, MemSetInst *MemSet) { // Make sure that memcpy(..., memset(...), ...), that is we are memsetting and // memcpying from the same address. Otherwise it is hard to reason about. if (!AA->isMustAlias(MemSet->getRawDest(), MemCpy->getRawSource())) return false; Value *MemSetSize = MemSet->getLength(); Value *CopySize = MemCpy->getLength(); if (MemSetSize != CopySize) { // Make sure the memcpy doesn't read any more than what the memset wrote. // Don't worry about sizes larger than i64. // A known memset size is required. ConstantInt *CMemSetSize = dyn_cast(MemSetSize); if (!CMemSetSize) return false; // A known memcpy size is also required. ConstantInt *CCopySize = dyn_cast(CopySize); if (!CCopySize) return false; if (CCopySize->getZExtValue() > CMemSetSize->getZExtValue()) { // If the memcpy is larger than the memset, but the memory was undef prior // to the memset, we can just ignore the tail. Technically we're only // interested in the bytes from MemSetSize..CopySize here, but as we can't // easily represent this location, we use the full 0..CopySize range. MemoryLocation MemCpyLoc = MemoryLocation::getForSource(MemCpy); bool CanReduceSize = false; if (EnableMemorySSA) { MemoryUseOrDef *MemSetAccess = MSSA->getMemoryAccess(MemSet); MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess( MemSetAccess->getDefiningAccess(), MemCpyLoc); if (auto *MD = dyn_cast(Clobber)) if (hasUndefContentsMSSA(MSSA, AA, MemCpy->getSource(), MD, CopySize)) CanReduceSize = true; } else { MemDepResult DepInfo = MD->getPointerDependencyFrom( MemCpyLoc, true, MemSet->getIterator(), MemSet->getParent()); if (DepInfo.isDef() && hasUndefContents(DepInfo.getInst(), CopySize)) CanReduceSize = true; } if (!CanReduceSize) return false; CopySize = MemSetSize; } } IRBuilder<> Builder(MemCpy); Instruction *NewM = Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1), CopySize, MaybeAlign(MemCpy->getDestAlignment())); if (MSSAU) { auto *LastDef = cast(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)); auto *NewAccess = MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef); MSSAU->insertDef(cast(NewAccess), /*RenameUses=*/true); } return true; } /// Perform simplification of memcpy's. If we have memcpy A /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite /// B to be a memcpy from X to Z (or potentially a memmove, depending on /// circumstances). This allows later passes to remove the first memcpy /// altogether. bool MemCpyOptPass::processMemCpy(MemCpyInst *M, BasicBlock::iterator &BBI) { // We can only optimize non-volatile memcpy's. if (M->isVolatile()) return false; // If the source and destination of the memcpy are the same, then zap it. if (M->getSource() == M->getDest()) { ++BBI; eraseInstruction(M); return true; } // If copying from a constant, try to turn the memcpy into a memset. if (GlobalVariable *GV = dyn_cast(M->getSource())) if (GV->isConstant() && GV->hasDefinitiveInitializer()) if (Value *ByteVal = isBytewiseValue(GV->getInitializer(), M->getModule()->getDataLayout())) { IRBuilder<> Builder(M); Instruction *NewM = Builder.CreateMemSet(M->getRawDest(), ByteVal, M->getLength(), MaybeAlign(M->getDestAlignment()), false); if (MSSAU) { auto *LastDef = cast(MSSAU->getMemorySSA()->getMemoryAccess(M)); auto *NewAccess = MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef); MSSAU->insertDef(cast(NewAccess), /*RenameUses=*/true); } eraseInstruction(M); ++NumCpyToSet; return true; } if (EnableMemorySSA) { MemoryUseOrDef *MA = MSSA->getMemoryAccess(M); MemoryAccess *AnyClobber = MSSA->getWalker()->getClobberingMemoryAccess(MA); MemoryLocation DestLoc = MemoryLocation::getForDest(M); const MemoryAccess *DestClobber = MSSA->getWalker()->getClobberingMemoryAccess(AnyClobber, DestLoc); // Try to turn a partially redundant memset + memcpy into // memcpy + smaller memset. We don't need the memcpy size for this. // The memcpy most post-dom the memset, so limit this to the same basic // block. A non-local generalization is likely not worthwhile. if (auto *MD = dyn_cast(DestClobber)) if (auto *MDep = dyn_cast_or_null(MD->getMemoryInst())) if (DestClobber->getBlock() == M->getParent()) if (processMemSetMemCpyDependence(M, MDep)) return true; MemoryAccess *SrcClobber = MSSA->getWalker()->getClobberingMemoryAccess( AnyClobber, MemoryLocation::getForSource(M)); // There are four possible optimizations we can do for memcpy: // a) memcpy-memcpy xform which exposes redundance for DSE. // b) call-memcpy xform for return slot optimization. // c) memcpy from freshly alloca'd space or space that has just started // its lifetime copies undefined data, and we can therefore eliminate // the memcpy in favor of the data that was already at the destination. // d) memcpy from a just-memset'd source can be turned into memset. if (auto *MD = dyn_cast(SrcClobber)) { if (Instruction *MI = MD->getMemoryInst()) { if (ConstantInt *CopySize = dyn_cast(M->getLength())) { if (auto *C = dyn_cast(MI)) { // The memcpy must post-dom the call. Limit to the same block for // now. Additionally, we need to ensure that there are no accesses // to dest between the call and the memcpy. Accesses to src will be // checked by performCallSlotOptzn(). // TODO: Support non-local call-slot optimization? if (C->getParent() == M->getParent() && !accessedBetween(*AA, DestLoc, MD, MA)) { // FIXME: Can we pass in either of dest/src alignment here instead // of conservatively taking the minimum? Align Alignment = std::min(M->getDestAlign().valueOrOne(), M->getSourceAlign().valueOrOne()); - if (performCallSlotOptzn(M, M, M->getDest(), M->getSource(), - CopySize->getZExtValue(), Alignment, - C)) { + if (performCallSlotOptzn( + M, M, M->getDest(), M->getSource(), + TypeSize::getFixed(CopySize->getZExtValue()), Alignment, + C)) { LLVM_DEBUG(dbgs() << "Performed call slot optimization:\n" << " call: " << *C << "\n" << " memcpy: " << *M << "\n"); eraseInstruction(M); ++NumMemCpyInstr; return true; } } } } if (auto *MDep = dyn_cast(MI)) return processMemCpyMemCpyDependence(M, MDep); if (auto *MDep = dyn_cast(MI)) { if (performMemCpyToMemSetOptzn(M, MDep)) { LLVM_DEBUG(dbgs() << "Converted memcpy to memset\n"); eraseInstruction(M); ++NumCpyToSet; return true; } } } if (hasUndefContentsMSSA(MSSA, AA, M->getSource(), MD, M->getLength())) { LLVM_DEBUG(dbgs() << "Removed memcpy from undef\n"); eraseInstruction(M); ++NumMemCpyInstr; return true; } } } else { MemDepResult DepInfo = MD->getDependency(M); // Try to turn a partially redundant memset + memcpy into // memcpy + smaller memset. We don't need the memcpy size for this. if (DepInfo.isClobber()) if (MemSetInst *MDep = dyn_cast(DepInfo.getInst())) if (processMemSetMemCpyDependence(M, MDep)) return true; // There are four possible optimizations we can do for memcpy: // a) memcpy-memcpy xform which exposes redundance for DSE. // b) call-memcpy xform for return slot optimization. // c) memcpy from freshly alloca'd space or space that has just started // its lifetime copies undefined data, and we can therefore eliminate // the memcpy in favor of the data that was already at the destination. // d) memcpy from a just-memset'd source can be turned into memset. if (ConstantInt *CopySize = dyn_cast(M->getLength())) { if (DepInfo.isClobber()) { if (CallInst *C = dyn_cast(DepInfo.getInst())) { // FIXME: Can we pass in either of dest/src alignment here instead // of conservatively taking the minimum? Align Alignment = std::min(M->getDestAlign().valueOrOne(), M->getSourceAlign().valueOrOne()); if (performCallSlotOptzn(M, M, M->getDest(), M->getSource(), - CopySize->getZExtValue(), Alignment, C)) { + TypeSize::getFixed(CopySize->getZExtValue()), + Alignment, C)) { eraseInstruction(M); ++NumMemCpyInstr; return true; } } } } MemoryLocation SrcLoc = MemoryLocation::getForSource(M); MemDepResult SrcDepInfo = MD->getPointerDependencyFrom( SrcLoc, true, M->getIterator(), M->getParent()); if (SrcDepInfo.isClobber()) { if (MemCpyInst *MDep = dyn_cast(SrcDepInfo.getInst())) return processMemCpyMemCpyDependence(M, MDep); } else if (SrcDepInfo.isDef()) { if (hasUndefContents(SrcDepInfo.getInst(), M->getLength())) { eraseInstruction(M); ++NumMemCpyInstr; return true; } } if (SrcDepInfo.isClobber()) if (MemSetInst *MDep = dyn_cast(SrcDepInfo.getInst())) if (performMemCpyToMemSetOptzn(M, MDep)) { eraseInstruction(M); ++NumCpyToSet; return true; } } return false; } /// Transforms memmove calls to memcpy calls when the src/dst are guaranteed /// not to alias. bool MemCpyOptPass::processMemMove(MemMoveInst *M) { if (!TLI->has(LibFunc_memmove)) return false; // See if the pointers alias. if (!AA->isNoAlias(MemoryLocation::getForDest(M), MemoryLocation::getForSource(M))) return false; LLVM_DEBUG(dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *M << "\n"); // If not, then we know we can transform this. Type *ArgTys[3] = { M->getRawDest()->getType(), M->getRawSource()->getType(), M->getLength()->getType() }; M->setCalledFunction(Intrinsic::getDeclaration(M->getModule(), Intrinsic::memcpy, ArgTys)); // For MemorySSA nothing really changes (except that memcpy may imply stricter // aliasing guarantees). // MemDep may have over conservative information about this instruction, just // conservatively flush it from the cache. if (MD) MD->removeInstruction(M); ++NumMoveToCpy; return true; } /// This is called on every byval argument in call sites. bool MemCpyOptPass::processByValArgument(CallBase &CB, unsigned ArgNo) { const DataLayout &DL = CB.getCaller()->getParent()->getDataLayout(); // Find out what feeds this byval argument. Value *ByValArg = CB.getArgOperand(ArgNo); Type *ByValTy = CB.getParamByValType(ArgNo); - uint64_t ByValSize = DL.getTypeAllocSize(ByValTy); + TypeSize ByValSize = DL.getTypeAllocSize(ByValTy); MemoryLocation Loc(ByValArg, LocationSize::precise(ByValSize)); MemCpyInst *MDep = nullptr; if (EnableMemorySSA) { MemoryUseOrDef *CallAccess = MSSA->getMemoryAccess(&CB); if (!CallAccess) return false; MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess( CallAccess->getDefiningAccess(), Loc); if (auto *MD = dyn_cast(Clobber)) MDep = dyn_cast_or_null(MD->getMemoryInst()); } else { MemDepResult DepInfo = MD->getPointerDependencyFrom( Loc, true, CB.getIterator(), CB.getParent()); if (!DepInfo.isClobber()) return false; MDep = dyn_cast(DepInfo.getInst()); } // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by // a memcpy, see if we can byval from the source of the memcpy instead of the // result. if (!MDep || MDep->isVolatile() || ByValArg->stripPointerCasts() != MDep->getDest()) return false; // The length of the memcpy must be larger or equal to the size of the byval. ConstantInt *C1 = dyn_cast(MDep->getLength()); - if (!C1 || C1->getValue().getZExtValue() < ByValSize) + if (!C1 || !TypeSize::isKnownGE( + TypeSize::getFixed(C1->getValue().getZExtValue()), ByValSize)) return false; // Get the alignment of the byval. If the call doesn't specify the alignment, // then it is some target specific value that we can't know. MaybeAlign ByValAlign = CB.getParamAlign(ArgNo); if (!ByValAlign) return false; // If it is greater than the memcpy, then we check to see if we can force the // source of the memcpy to the alignment we need. If we fail, we bail out. MaybeAlign MemDepAlign = MDep->getSourceAlign(); if ((!MemDepAlign || *MemDepAlign < *ByValAlign) && getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL, &CB, AC, DT) < *ByValAlign) return false; // The address space of the memcpy source must match the byval argument if (MDep->getSource()->getType()->getPointerAddressSpace() != ByValArg->getType()->getPointerAddressSpace()) return false; // Verify that the copied-from memory doesn't change in between the memcpy and // the byval call. // memcpy(a <- b) // *b = 42; // foo(*a) // It would be invalid to transform the second memcpy into foo(*b). if (EnableMemorySSA) { if (writtenBetween(MSSA, MemoryLocation::getForSource(MDep), MSSA->getMemoryAccess(MDep), MSSA->getMemoryAccess(&CB))) return false; } else { // NOTE: This is conservative, it will stop on any read from the source loc, // not just the defining memcpy. MemDepResult SourceDep = MD->getPointerDependencyFrom( MemoryLocation::getForSource(MDep), false, CB.getIterator(), MDep->getParent()); if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) return false; } Value *TmpCast = MDep->getSource(); if (MDep->getSource()->getType() != ByValArg->getType()) { BitCastInst *TmpBitCast = new BitCastInst(MDep->getSource(), ByValArg->getType(), "tmpcast", &CB); // Set the tmpcast's DebugLoc to MDep's TmpBitCast->setDebugLoc(MDep->getDebugLoc()); TmpCast = TmpBitCast; } LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n" << " " << *MDep << "\n" << " " << CB << "\n"); // Otherwise we're good! Update the byval argument. CB.setArgOperand(ArgNo, TmpCast); ++NumMemCpyInstr; return true; } /// Executes one iteration of MemCpyOptPass. bool MemCpyOptPass::iterateOnFunction(Function &F) { bool MadeChange = false; // Walk all instruction in the function. for (BasicBlock &BB : F) { // Skip unreachable blocks. For example processStore assumes that an // instruction in a BB can't be dominated by a later instruction in the // same BB (which is a scenario that can happen for an unreachable BB that // has itself as a predecessor). if (!DT->isReachableFromEntry(&BB)) continue; for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) { // Avoid invalidating the iterator. Instruction *I = &*BI++; bool RepeatInstruction = false; if (StoreInst *SI = dyn_cast(I)) MadeChange |= processStore(SI, BI); else if (MemSetInst *M = dyn_cast(I)) RepeatInstruction = processMemSet(M, BI); else if (MemCpyInst *M = dyn_cast(I)) RepeatInstruction = processMemCpy(M, BI); else if (MemMoveInst *M = dyn_cast(I)) RepeatInstruction = processMemMove(M); else if (auto *CB = dyn_cast(I)) { for (unsigned i = 0, e = CB->arg_size(); i != e; ++i) if (CB->isByValArgument(i)) MadeChange |= processByValArgument(*CB, i); } // Reprocess the instruction if desired. if (RepeatInstruction) { if (BI != BB.begin()) --BI; MadeChange = true; } } } return MadeChange; } PreservedAnalyses MemCpyOptPass::run(Function &F, FunctionAnalysisManager &AM) { auto *MD = !EnableMemorySSA ? &AM.getResult(F) : AM.getCachedResult(F); auto &TLI = AM.getResult(F); auto *AA = &AM.getResult(F); auto *AC = &AM.getResult(F); auto *DT = &AM.getResult(F); auto *MSSA = EnableMemorySSA ? &AM.getResult(F) : AM.getCachedResult(F); bool MadeChange = runImpl(F, MD, &TLI, AA, AC, DT, MSSA ? &MSSA->getMSSA() : nullptr); if (!MadeChange) return PreservedAnalyses::all(); PreservedAnalyses PA; PA.preserveSet(); if (MD) PA.preserve(); if (MSSA) PA.preserve(); return PA; } bool MemCpyOptPass::runImpl(Function &F, MemoryDependenceResults *MD_, TargetLibraryInfo *TLI_, AliasAnalysis *AA_, AssumptionCache *AC_, DominatorTree *DT_, MemorySSA *MSSA_) { bool MadeChange = false; MD = MD_; TLI = TLI_; AA = AA_; AC = AC_; DT = DT_; MSSA = MSSA_; MemorySSAUpdater MSSAU_(MSSA_); MSSAU = MSSA_ ? &MSSAU_ : nullptr; // If we don't have at least memset and memcpy, there is little point of doing // anything here. These are required by a freestanding implementation, so if // even they are disabled, there is no point in trying hard. if (!TLI->has(LibFunc_memset) || !TLI->has(LibFunc_memcpy)) return false; while (true) { if (!iterateOnFunction(F)) break; MadeChange = true; } if (MSSA_ && VerifyMemorySSA) MSSA_->verifyMemorySSA(); MD = nullptr; return MadeChange; } /// This is the main transformation entry point for a function. bool MemCpyOptLegacyPass::runOnFunction(Function &F) { if (skipFunction(F)) return false; auto *MDWP = !EnableMemorySSA ? &getAnalysis() : getAnalysisIfAvailable(); auto *TLI = &getAnalysis().getTLI(F); auto *AA = &getAnalysis().getAAResults(); auto *AC = &getAnalysis().getAssumptionCache(F); auto *DT = &getAnalysis().getDomTree(); auto *MSSAWP = EnableMemorySSA ? &getAnalysis() : getAnalysisIfAvailable(); return Impl.runImpl(F, MDWP ? & MDWP->getMemDep() : nullptr, TLI, AA, AC, DT, MSSAWP ? &MSSAWP->getMSSA() : nullptr); } diff --git a/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp b/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp index b9cccc2af309..b1c105258027 100644 --- a/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp +++ b/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp @@ -1,3213 +1,3232 @@ ///===- SimpleLoopUnswitch.cpp - Hoist loop-invariant control flow ---------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar/SimpleLoopUnswitch.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/Sequence.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/ADT/Twine.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/CFG.h" #include "llvm/Analysis/CodeMetrics.h" #include "llvm/Analysis/GuardUtils.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/LoopAnalysisManager.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/LoopIterator.h" #include "llvm/Analysis/LoopPass.h" #include "llvm/Analysis/MemorySSA.h" #include "llvm/Analysis/MemorySSAUpdater.h" #include "llvm/Analysis/MustExecute.h" #include "llvm/Analysis/ScalarEvolution.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/Constant.h" #include "llvm/IR/Constants.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Function.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/InstrTypes.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/PatternMatch.h" #include "llvm/IR/Use.h" #include "llvm/IR/Value.h" #include "llvm/InitializePasses.h" #include "llvm/Pass.h" #include "llvm/Support/Casting.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/GenericDomTree.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Scalar/SimpleLoopUnswitch.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Cloning.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/LoopUtils.h" #include "llvm/Transforms/Utils/ValueMapper.h" #include #include #include #include #include #define DEBUG_TYPE "simple-loop-unswitch" using namespace llvm; using namespace llvm::PatternMatch; STATISTIC(NumBranches, "Number of branches unswitched"); STATISTIC(NumSwitches, "Number of switches unswitched"); STATISTIC(NumGuards, "Number of guards turned into branches for unswitching"); STATISTIC(NumTrivial, "Number of unswitches that are trivial"); STATISTIC( NumCostMultiplierSkipped, "Number of unswitch candidates that had their cost multiplier skipped"); static cl::opt EnableNonTrivialUnswitch( "enable-nontrivial-unswitch", cl::init(false), cl::Hidden, cl::desc("Forcibly enables non-trivial loop unswitching rather than " "following the configuration passed into the pass.")); static cl::opt UnswitchThreshold("unswitch-threshold", cl::init(50), cl::Hidden, cl::desc("The cost threshold for unswitching a loop.")); static cl::opt EnableUnswitchCostMultiplier( "enable-unswitch-cost-multiplier", cl::init(true), cl::Hidden, cl::desc("Enable unswitch cost multiplier that prohibits exponential " "explosion in nontrivial unswitch.")); static cl::opt UnswitchSiblingsToplevelDiv( "unswitch-siblings-toplevel-div", cl::init(2), cl::Hidden, cl::desc("Toplevel siblings divisor for cost multiplier.")); static cl::opt UnswitchNumInitialUnscaledCandidates( "unswitch-num-initial-unscaled-candidates", cl::init(8), cl::Hidden, cl::desc("Number of unswitch candidates that are ignored when calculating " "cost multiplier.")); static cl::opt UnswitchGuards( "simple-loop-unswitch-guards", cl::init(true), cl::Hidden, cl::desc("If enabled, simple loop unswitching will also consider " "llvm.experimental.guard intrinsics as unswitch candidates.")); static cl::opt DropNonTrivialImplicitNullChecks( "simple-loop-unswitch-drop-non-trivial-implicit-null-checks", cl::init(false), cl::Hidden, cl::desc("If enabled, drop make.implicit metadata in unswitched implicit " "null checks to save time analyzing if we can keep it.")); static cl::opt MSSAThreshold("simple-loop-unswitch-memoryssa-threshold", cl::desc("Max number of memory uses to explore during " "partial unswitching analysis"), cl::init(100), cl::Hidden); /// Collect all of the loop invariant input values transitively used by the /// homogeneous instruction graph from a given root. /// /// This essentially walks from a root recursively through loop variant operands /// which have the exact same opcode and finds all inputs which are loop /// invariant. For some operations these can be re-associated and unswitched out /// of the loop entirely. static TinyPtrVector collectHomogenousInstGraphLoopInvariants(Loop &L, Instruction &Root, LoopInfo &LI) { assert(!L.isLoopInvariant(&Root) && "Only need to walk the graph if root itself is not invariant."); TinyPtrVector Invariants; bool IsRootAnd = match(&Root, m_LogicalAnd()); bool IsRootOr = match(&Root, m_LogicalOr()); // Build a worklist and recurse through operators collecting invariants. SmallVector Worklist; SmallPtrSet Visited; Worklist.push_back(&Root); Visited.insert(&Root); do { Instruction &I = *Worklist.pop_back_val(); for (Value *OpV : I.operand_values()) { // Skip constants as unswitching isn't interesting for them. if (isa(OpV)) continue; // Add it to our result if loop invariant. if (L.isLoopInvariant(OpV)) { Invariants.push_back(OpV); continue; } // If not an instruction with the same opcode, nothing we can do. Instruction *OpI = dyn_cast(OpV); if (OpI && ((IsRootAnd && match(OpI, m_LogicalAnd())) || (IsRootOr && match(OpI, m_LogicalOr())))) { // Visit this operand. if (Visited.insert(OpI).second) Worklist.push_back(OpI); } } } while (!Worklist.empty()); return Invariants; } static void replaceLoopInvariantUses(Loop &L, Value *Invariant, Constant &Replacement) { assert(!isa(Invariant) && "Why are we unswitching on a constant?"); // Replace uses of LIC in the loop with the given constant. // We use make_early_inc_range as set invalidates the iterator. for (Use &U : llvm::make_early_inc_range(Invariant->uses())) { Instruction *UserI = dyn_cast(U.getUser()); // Replace this use within the loop body. if (UserI && L.contains(UserI)) U.set(&Replacement); } } /// Check that all the LCSSA PHI nodes in the loop exit block have trivial /// incoming values along this edge. static bool areLoopExitPHIsLoopInvariant(Loop &L, BasicBlock &ExitingBB, BasicBlock &ExitBB) { for (Instruction &I : ExitBB) { auto *PN = dyn_cast(&I); if (!PN) // No more PHIs to check. return true; // If the incoming value for this edge isn't loop invariant the unswitch // won't be trivial. if (!L.isLoopInvariant(PN->getIncomingValueForBlock(&ExitingBB))) return false; } llvm_unreachable("Basic blocks should never be empty!"); } /// Copy a set of loop invariant values \p ToDuplicate and insert them at the /// end of \p BB and conditionally branch on the copied condition. We only /// branch on a single value. static void buildPartialUnswitchConditionalBranch(BasicBlock &BB, ArrayRef Invariants, bool Direction, BasicBlock &UnswitchedSucc, BasicBlock &NormalSucc) { IRBuilder<> IRB(&BB); Value *Cond = Direction ? IRB.CreateOr(Invariants) : IRB.CreateAnd(Invariants); IRB.CreateCondBr(Cond, Direction ? &UnswitchedSucc : &NormalSucc, Direction ? &NormalSucc : &UnswitchedSucc); } /// Copy a set of loop invariant values, and conditionally branch on them. static void buildPartialInvariantUnswitchConditionalBranch( BasicBlock &BB, ArrayRef ToDuplicate, bool Direction, BasicBlock &UnswitchedSucc, BasicBlock &NormalSucc, Loop &L, MemorySSAUpdater *MSSAU) { ValueToValueMapTy VMap; for (auto *Val : reverse(ToDuplicate)) { Instruction *Inst = cast(Val); Instruction *NewInst = Inst->clone(); BB.getInstList().insert(BB.end(), NewInst); RemapInstruction(NewInst, VMap, RF_NoModuleLevelChanges | RF_IgnoreMissingLocals); VMap[Val] = NewInst; if (!MSSAU) continue; MemorySSA *MSSA = MSSAU->getMemorySSA(); if (auto *MemUse = dyn_cast_or_null(MSSA->getMemoryAccess(Inst))) { auto *DefiningAccess = MemUse->getDefiningAccess(); // Get the first defining access before the loop. while (L.contains(DefiningAccess->getBlock())) { // If the defining access is a MemoryPhi, get the incoming // value for the pre-header as defining access. if (auto *MemPhi = dyn_cast(DefiningAccess)) DefiningAccess = MemPhi->getIncomingValueForBlock(L.getLoopPreheader()); else DefiningAccess = cast(DefiningAccess)->getDefiningAccess(); } MSSAU->createMemoryAccessInBB(NewInst, DefiningAccess, NewInst->getParent(), MemorySSA::BeforeTerminator); } } IRBuilder<> IRB(&BB); Value *Cond = VMap[ToDuplicate[0]]; IRB.CreateCondBr(Cond, Direction ? &UnswitchedSucc : &NormalSucc, Direction ? &NormalSucc : &UnswitchedSucc); } /// Rewrite the PHI nodes in an unswitched loop exit basic block. /// /// Requires that the loop exit and unswitched basic block are the same, and /// that the exiting block was a unique predecessor of that block. Rewrites the /// PHI nodes in that block such that what were LCSSA PHI nodes become trivial /// PHI nodes from the old preheader that now contains the unswitched /// terminator. static void rewritePHINodesForUnswitchedExitBlock(BasicBlock &UnswitchedBB, BasicBlock &OldExitingBB, BasicBlock &OldPH) { for (PHINode &PN : UnswitchedBB.phis()) { // When the loop exit is directly unswitched we just need to update the // incoming basic block. We loop to handle weird cases with repeated // incoming blocks, but expect to typically only have one operand here. for (auto i : seq(0, PN.getNumOperands())) { assert(PN.getIncomingBlock(i) == &OldExitingBB && "Found incoming block different from unique predecessor!"); PN.setIncomingBlock(i, &OldPH); } } } /// Rewrite the PHI nodes in the loop exit basic block and the split off /// unswitched block. /// /// Because the exit block remains an exit from the loop, this rewrites the /// LCSSA PHI nodes in it to remove the unswitched edge and introduces PHI /// nodes into the unswitched basic block to select between the value in the /// old preheader and the loop exit. static void rewritePHINodesForExitAndUnswitchedBlocks(BasicBlock &ExitBB, BasicBlock &UnswitchedBB, BasicBlock &OldExitingBB, BasicBlock &OldPH, bool FullUnswitch) { assert(&ExitBB != &UnswitchedBB && "Must have different loop exit and unswitched blocks!"); Instruction *InsertPt = &*UnswitchedBB.begin(); for (PHINode &PN : ExitBB.phis()) { auto *NewPN = PHINode::Create(PN.getType(), /*NumReservedValues*/ 2, PN.getName() + ".split", InsertPt); // Walk backwards over the old PHI node's inputs to minimize the cost of // removing each one. We have to do this weird loop manually so that we // create the same number of new incoming edges in the new PHI as we expect // each case-based edge to be included in the unswitched switch in some // cases. // FIXME: This is really, really gross. It would be much cleaner if LLVM // allowed us to create a single entry for a predecessor block without // having separate entries for each "edge" even though these edges are // required to produce identical results. for (int i = PN.getNumIncomingValues() - 1; i >= 0; --i) { if (PN.getIncomingBlock(i) != &OldExitingBB) continue; Value *Incoming = PN.getIncomingValue(i); if (FullUnswitch) // No more edge from the old exiting block to the exit block. PN.removeIncomingValue(i); NewPN->addIncoming(Incoming, &OldPH); } // Now replace the old PHI with the new one and wire the old one in as an // input to the new one. PN.replaceAllUsesWith(NewPN); NewPN->addIncoming(&PN, &ExitBB); } } /// Hoist the current loop up to the innermost loop containing a remaining exit. /// /// Because we've removed an exit from the loop, we may have changed the set of /// loops reachable and need to move the current loop up the loop nest or even /// to an entirely separate nest. static void hoistLoopToNewParent(Loop &L, BasicBlock &Preheader, DominatorTree &DT, LoopInfo &LI, MemorySSAUpdater *MSSAU, ScalarEvolution *SE) { // If the loop is already at the top level, we can't hoist it anywhere. Loop *OldParentL = L.getParentLoop(); if (!OldParentL) return; SmallVector Exits; L.getExitBlocks(Exits); Loop *NewParentL = nullptr; for (auto *ExitBB : Exits) if (Loop *ExitL = LI.getLoopFor(ExitBB)) if (!NewParentL || NewParentL->contains(ExitL)) NewParentL = ExitL; if (NewParentL == OldParentL) return; // The new parent loop (if different) should always contain the old one. if (NewParentL) assert(NewParentL->contains(OldParentL) && "Can only hoist this loop up the nest!"); // The preheader will need to move with the body of this loop. However, // because it isn't in this loop we also need to update the primary loop map. assert(OldParentL == LI.getLoopFor(&Preheader) && "Parent loop of this loop should contain this loop's preheader!"); LI.changeLoopFor(&Preheader, NewParentL); // Remove this loop from its old parent. OldParentL->removeChildLoop(&L); // Add the loop either to the new parent or as a top-level loop. if (NewParentL) NewParentL->addChildLoop(&L); else LI.addTopLevelLoop(&L); // Remove this loops blocks from the old parent and every other loop up the // nest until reaching the new parent. Also update all of these // no-longer-containing loops to reflect the nesting change. for (Loop *OldContainingL = OldParentL; OldContainingL != NewParentL; OldContainingL = OldContainingL->getParentLoop()) { llvm::erase_if(OldContainingL->getBlocksVector(), [&](const BasicBlock *BB) { return BB == &Preheader || L.contains(BB); }); OldContainingL->getBlocksSet().erase(&Preheader); for (BasicBlock *BB : L.blocks()) OldContainingL->getBlocksSet().erase(BB); // Because we just hoisted a loop out of this one, we have essentially // created new exit paths from it. That means we need to form LCSSA PHI // nodes for values used in the no-longer-nested loop. formLCSSA(*OldContainingL, DT, &LI, SE); // We shouldn't need to form dedicated exits because the exit introduced // here is the (just split by unswitching) preheader. However, after trivial // unswitching it is possible to get new non-dedicated exits out of parent // loop so let's conservatively form dedicated exit blocks and figure out // if we can optimize later. formDedicatedExitBlocks(OldContainingL, &DT, &LI, MSSAU, /*PreserveLCSSA*/ true); } } // Return the top-most loop containing ExitBB and having ExitBB as exiting block // or the loop containing ExitBB, if there is no parent loop containing ExitBB // as exiting block. static Loop *getTopMostExitingLoop(BasicBlock *ExitBB, LoopInfo &LI) { Loop *TopMost = LI.getLoopFor(ExitBB); Loop *Current = TopMost; while (Current) { if (Current->isLoopExiting(ExitBB)) TopMost = Current; Current = Current->getParentLoop(); } return TopMost; } /// Unswitch a trivial branch if the condition is loop invariant. /// /// This routine should only be called when loop code leading to the branch has /// been validated as trivial (no side effects). This routine checks if the /// condition is invariant and one of the successors is a loop exit. This /// allows us to unswitch without duplicating the loop, making it trivial. /// /// If this routine fails to unswitch the branch it returns false. /// /// If the branch can be unswitched, this routine splits the preheader and /// hoists the branch above that split. Preserves loop simplified form /// (splitting the exit block as necessary). It simplifies the branch within /// the loop to an unconditional branch but doesn't remove it entirely. Further /// cleanup can be done with some simplifycfg like pass. /// /// If `SE` is not null, it will be updated based on the potential loop SCEVs /// invalidated by this. static bool unswitchTrivialBranch(Loop &L, BranchInst &BI, DominatorTree &DT, LoopInfo &LI, ScalarEvolution *SE, MemorySSAUpdater *MSSAU) { assert(BI.isConditional() && "Can only unswitch a conditional branch!"); LLVM_DEBUG(dbgs() << " Trying to unswitch branch: " << BI << "\n"); // The loop invariant values that we want to unswitch. TinyPtrVector Invariants; // When true, we're fully unswitching the branch rather than just unswitching // some input conditions to the branch. bool FullUnswitch = false; if (L.isLoopInvariant(BI.getCondition())) { Invariants.push_back(BI.getCondition()); FullUnswitch = true; } else { if (auto *CondInst = dyn_cast(BI.getCondition())) Invariants = collectHomogenousInstGraphLoopInvariants(L, *CondInst, LI); if (Invariants.empty()) { LLVM_DEBUG(dbgs() << " Couldn't find invariant inputs!\n"); return false; } } // Check that one of the branch's successors exits, and which one. bool ExitDirection = true; int LoopExitSuccIdx = 0; auto *LoopExitBB = BI.getSuccessor(0); if (L.contains(LoopExitBB)) { ExitDirection = false; LoopExitSuccIdx = 1; LoopExitBB = BI.getSuccessor(1); if (L.contains(LoopExitBB)) { LLVM_DEBUG(dbgs() << " Branch doesn't exit the loop!\n"); return false; } } auto *ContinueBB = BI.getSuccessor(1 - LoopExitSuccIdx); auto *ParentBB = BI.getParent(); if (!areLoopExitPHIsLoopInvariant(L, *ParentBB, *LoopExitBB)) { LLVM_DEBUG(dbgs() << " Loop exit PHI's aren't loop-invariant!\n"); return false; } // When unswitching only part of the branch's condition, we need the exit // block to be reached directly from the partially unswitched input. This can // be done when the exit block is along the true edge and the branch condition // is a graph of `or` operations, or the exit block is along the false edge // and the condition is a graph of `and` operations. if (!FullUnswitch) { if (ExitDirection ? !match(BI.getCondition(), m_LogicalOr()) : !match(BI.getCondition(), m_LogicalAnd())) { LLVM_DEBUG(dbgs() << " Branch condition is in improper form for " "non-full unswitch!\n"); return false; } } LLVM_DEBUG({ dbgs() << " unswitching trivial invariant conditions for: " << BI << "\n"; for (Value *Invariant : Invariants) { dbgs() << " " << *Invariant << " == true"; if (Invariant != Invariants.back()) dbgs() << " ||"; dbgs() << "\n"; } }); // If we have scalar evolutions, we need to invalidate them including this // loop, the loop containing the exit block and the topmost parent loop // exiting via LoopExitBB. if (SE) { if (Loop *ExitL = getTopMostExitingLoop(LoopExitBB, LI)) SE->forgetLoop(ExitL); else // Forget the entire nest as this exits the entire nest. SE->forgetTopmostLoop(&L); } if (MSSAU && VerifyMemorySSA) MSSAU->getMemorySSA()->verifyMemorySSA(); // Split the preheader, so that we know that there is a safe place to insert // the conditional branch. We will change the preheader to have a conditional // branch on LoopCond. BasicBlock *OldPH = L.getLoopPreheader(); BasicBlock *NewPH = SplitEdge(OldPH, L.getHeader(), &DT, &LI, MSSAU); // Now that we have a place to insert the conditional branch, create a place // to branch to: this is the exit block out of the loop that we are // unswitching. We need to split this if there are other loop predecessors. // Because the loop is in simplified form, *any* other predecessor is enough. BasicBlock *UnswitchedBB; if (FullUnswitch && LoopExitBB->getUniquePredecessor()) { assert(LoopExitBB->getUniquePredecessor() == BI.getParent() && "A branch's parent isn't a predecessor!"); UnswitchedBB = LoopExitBB; } else { UnswitchedBB = SplitBlock(LoopExitBB, &LoopExitBB->front(), &DT, &LI, MSSAU); } if (MSSAU && VerifyMemorySSA) MSSAU->getMemorySSA()->verifyMemorySSA(); // Actually move the invariant uses into the unswitched position. If possible, // we do this by moving the instructions, but when doing partial unswitching // we do it by building a new merge of the values in the unswitched position. OldPH->getTerminator()->eraseFromParent(); if (FullUnswitch) { // If fully unswitching, we can use the existing branch instruction. // Splice it into the old PH to gate reaching the new preheader and re-point // its successors. OldPH->getInstList().splice(OldPH->end(), BI.getParent()->getInstList(), BI); if (MSSAU) { // Temporarily clone the terminator, to make MSSA update cheaper by // separating "insert edge" updates from "remove edge" ones. ParentBB->getInstList().push_back(BI.clone()); } else { // Create a new unconditional branch that will continue the loop as a new // terminator. BranchInst::Create(ContinueBB, ParentBB); } BI.setSuccessor(LoopExitSuccIdx, UnswitchedBB); BI.setSuccessor(1 - LoopExitSuccIdx, NewPH); } else { // Only unswitching a subset of inputs to the condition, so we will need to // build a new branch that merges the invariant inputs. if (ExitDirection) assert(match(BI.getCondition(), m_LogicalOr()) && "Must have an `or` of `i1`s or `select i1 X, true, Y`s for the " "condition!"); else assert(match(BI.getCondition(), m_LogicalAnd()) && "Must have an `and` of `i1`s or `select i1 X, Y, false`s for the" " condition!"); buildPartialUnswitchConditionalBranch(*OldPH, Invariants, ExitDirection, *UnswitchedBB, *NewPH); } // Update the dominator tree with the added edge. DT.insertEdge(OldPH, UnswitchedBB); // After the dominator tree was updated with the added edge, update MemorySSA // if available. if (MSSAU) { SmallVector Updates; Updates.push_back({cfg::UpdateKind::Insert, OldPH, UnswitchedBB}); MSSAU->applyInsertUpdates(Updates, DT); } // Finish updating dominator tree and memory ssa for full unswitch. if (FullUnswitch) { if (MSSAU) { // Remove the cloned branch instruction. ParentBB->getTerminator()->eraseFromParent(); // Create unconditional branch now. BranchInst::Create(ContinueBB, ParentBB); MSSAU->removeEdge(ParentBB, LoopExitBB); } DT.deleteEdge(ParentBB, LoopExitBB); } if (MSSAU && VerifyMemorySSA) MSSAU->getMemorySSA()->verifyMemorySSA(); // Rewrite the relevant PHI nodes. if (UnswitchedBB == LoopExitBB) rewritePHINodesForUnswitchedExitBlock(*UnswitchedBB, *ParentBB, *OldPH); else rewritePHINodesForExitAndUnswitchedBlocks(*LoopExitBB, *UnswitchedBB, *ParentBB, *OldPH, FullUnswitch); // The constant we can replace all of our invariants with inside the loop // body. If any of the invariants have a value other than this the loop won't // be entered. ConstantInt *Replacement = ExitDirection ? ConstantInt::getFalse(BI.getContext()) : ConstantInt::getTrue(BI.getContext()); // Since this is an i1 condition we can also trivially replace uses of it // within the loop with a constant. for (Value *Invariant : Invariants) replaceLoopInvariantUses(L, Invariant, *Replacement); // If this was full unswitching, we may have changed the nesting relationship // for this loop so hoist it to its correct parent if needed. if (FullUnswitch) hoistLoopToNewParent(L, *NewPH, DT, LI, MSSAU, SE); if (MSSAU && VerifyMemorySSA) MSSAU->getMemorySSA()->verifyMemorySSA(); LLVM_DEBUG(dbgs() << " done: unswitching trivial branch...\n"); ++NumTrivial; ++NumBranches; return true; } /// Unswitch a trivial switch if the condition is loop invariant. /// /// This routine should only be called when loop code leading to the switch has /// been validated as trivial (no side effects). This routine checks if the /// condition is invariant and that at least one of the successors is a loop /// exit. This allows us to unswitch without duplicating the loop, making it /// trivial. /// /// If this routine fails to unswitch the switch it returns false. /// /// If the switch can be unswitched, this routine splits the preheader and /// copies the switch above that split. If the default case is one of the /// exiting cases, it copies the non-exiting cases and points them at the new /// preheader. If the default case is not exiting, it copies the exiting cases /// and points the default at the preheader. It preserves loop simplified form /// (splitting the exit blocks as necessary). It simplifies the switch within /// the loop by removing now-dead cases. If the default case is one of those /// unswitched, it replaces its destination with a new basic block containing /// only unreachable. Such basic blocks, while technically loop exits, are not /// considered for unswitching so this is a stable transform and the same /// switch will not be revisited. If after unswitching there is only a single /// in-loop successor, the switch is further simplified to an unconditional /// branch. Still more cleanup can be done with some simplifycfg like pass. /// /// If `SE` is not null, it will be updated based on the potential loop SCEVs /// invalidated by this. static bool unswitchTrivialSwitch(Loop &L, SwitchInst &SI, DominatorTree &DT, LoopInfo &LI, ScalarEvolution *SE, MemorySSAUpdater *MSSAU) { LLVM_DEBUG(dbgs() << " Trying to unswitch switch: " << SI << "\n"); Value *LoopCond = SI.getCondition(); // If this isn't switching on an invariant condition, we can't unswitch it. if (!L.isLoopInvariant(LoopCond)) return false; auto *ParentBB = SI.getParent(); // The same check must be used both for the default and the exit cases. We // should never leave edges from the switch instruction to a basic block that // we are unswitching, hence the condition used to determine the default case // needs to also be used to populate ExitCaseIndices, which is then used to // remove cases from the switch. auto IsTriviallyUnswitchableExitBlock = [&](BasicBlock &BBToCheck) { // BBToCheck is not an exit block if it is inside loop L. if (L.contains(&BBToCheck)) return false; // BBToCheck is not trivial to unswitch if its phis aren't loop invariant. if (!areLoopExitPHIsLoopInvariant(L, *ParentBB, BBToCheck)) return false; // We do not unswitch a block that only has an unreachable statement, as // it's possible this is a previously unswitched block. Only unswitch if // either the terminator is not unreachable, or, if it is, it's not the only // instruction in the block. auto *TI = BBToCheck.getTerminator(); bool isUnreachable = isa(TI); return !isUnreachable || (isUnreachable && (BBToCheck.getFirstNonPHIOrDbg() != TI)); }; SmallVector ExitCaseIndices; for (auto Case : SI.cases()) if (IsTriviallyUnswitchableExitBlock(*Case.getCaseSuccessor())) ExitCaseIndices.push_back(Case.getCaseIndex()); BasicBlock *DefaultExitBB = nullptr; SwitchInstProfUpdateWrapper::CaseWeightOpt DefaultCaseWeight = SwitchInstProfUpdateWrapper::getSuccessorWeight(SI, 0); if (IsTriviallyUnswitchableExitBlock(*SI.getDefaultDest())) { DefaultExitBB = SI.getDefaultDest(); } else if (ExitCaseIndices.empty()) return false; LLVM_DEBUG(dbgs() << " unswitching trivial switch...\n"); if (MSSAU && VerifyMemorySSA) MSSAU->getMemorySSA()->verifyMemorySSA(); // We may need to invalidate SCEVs for the outermost loop reached by any of // the exits. Loop *OuterL = &L; if (DefaultExitBB) { // Clear out the default destination temporarily to allow accurate // predecessor lists to be examined below. SI.setDefaultDest(nullptr); // Check the loop containing this exit. Loop *ExitL = LI.getLoopFor(DefaultExitBB); if (!ExitL || ExitL->contains(OuterL)) OuterL = ExitL; } // Store the exit cases into a separate data structure and remove them from // the switch. SmallVector, 4> ExitCases; ExitCases.reserve(ExitCaseIndices.size()); SwitchInstProfUpdateWrapper SIW(SI); // We walk the case indices backwards so that we remove the last case first // and don't disrupt the earlier indices. for (unsigned Index : reverse(ExitCaseIndices)) { auto CaseI = SI.case_begin() + Index; // Compute the outer loop from this exit. Loop *ExitL = LI.getLoopFor(CaseI->getCaseSuccessor()); if (!ExitL || ExitL->contains(OuterL)) OuterL = ExitL; // Save the value of this case. auto W = SIW.getSuccessorWeight(CaseI->getSuccessorIndex()); ExitCases.emplace_back(CaseI->getCaseValue(), CaseI->getCaseSuccessor(), W); // Delete the unswitched cases. SIW.removeCase(CaseI); } if (SE) { if (OuterL) SE->forgetLoop(OuterL); else SE->forgetTopmostLoop(&L); } // Check if after this all of the remaining cases point at the same // successor. BasicBlock *CommonSuccBB = nullptr; if (SI.getNumCases() > 0 && all_of(drop_begin(SI.cases()), [&SI](const SwitchInst::CaseHandle &Case) { return Case.getCaseSuccessor() == SI.case_begin()->getCaseSuccessor(); })) CommonSuccBB = SI.case_begin()->getCaseSuccessor(); if (!DefaultExitBB) { // If we're not unswitching the default, we need it to match any cases to // have a common successor or if we have no cases it is the common // successor. if (SI.getNumCases() == 0) CommonSuccBB = SI.getDefaultDest(); else if (SI.getDefaultDest() != CommonSuccBB) CommonSuccBB = nullptr; } // Split the preheader, so that we know that there is a safe place to insert // the switch. BasicBlock *OldPH = L.getLoopPreheader(); BasicBlock *NewPH = SplitEdge(OldPH, L.getHeader(), &DT, &LI, MSSAU); OldPH->getTerminator()->eraseFromParent(); // Now add the unswitched switch. auto *NewSI = SwitchInst::Create(LoopCond, NewPH, ExitCases.size(), OldPH); SwitchInstProfUpdateWrapper NewSIW(*NewSI); // Rewrite the IR for the unswitched basic blocks. This requires two steps. // First, we split any exit blocks with remaining in-loop predecessors. Then // we update the PHIs in one of two ways depending on if there was a split. // We walk in reverse so that we split in the same order as the cases // appeared. This is purely for convenience of reading the resulting IR, but // it doesn't cost anything really. SmallPtrSet UnswitchedExitBBs; SmallDenseMap SplitExitBBMap; // Handle the default exit if necessary. // FIXME: It'd be great if we could merge this with the loop below but LLVM's // ranges aren't quite powerful enough yet. if (DefaultExitBB) { if (pred_empty(DefaultExitBB)) { UnswitchedExitBBs.insert(DefaultExitBB); rewritePHINodesForUnswitchedExitBlock(*DefaultExitBB, *ParentBB, *OldPH); } else { auto *SplitBB = SplitBlock(DefaultExitBB, &DefaultExitBB->front(), &DT, &LI, MSSAU); rewritePHINodesForExitAndUnswitchedBlocks(*DefaultExitBB, *SplitBB, *ParentBB, *OldPH, /*FullUnswitch*/ true); DefaultExitBB = SplitExitBBMap[DefaultExitBB] = SplitBB; } } // Note that we must use a reference in the for loop so that we update the // container. for (auto &ExitCase : reverse(ExitCases)) { // Grab a reference to the exit block in the pair so that we can update it. BasicBlock *ExitBB = std::get<1>(ExitCase); // If this case is the last edge into the exit block, we can simply reuse it // as it will no longer be a loop exit. No mapping necessary. if (pred_empty(ExitBB)) { // Only rewrite once. if (UnswitchedExitBBs.insert(ExitBB).second) rewritePHINodesForUnswitchedExitBlock(*ExitBB, *ParentBB, *OldPH); continue; } // Otherwise we need to split the exit block so that we retain an exit // block from the loop and a target for the unswitched condition. BasicBlock *&SplitExitBB = SplitExitBBMap[ExitBB]; if (!SplitExitBB) { // If this is the first time we see this, do the split and remember it. SplitExitBB = SplitBlock(ExitBB, &ExitBB->front(), &DT, &LI, MSSAU); rewritePHINodesForExitAndUnswitchedBlocks(*ExitBB, *SplitExitBB, *ParentBB, *OldPH, /*FullUnswitch*/ true); } // Update the case pair to point to the split block. std::get<1>(ExitCase) = SplitExitBB; } // Now add the unswitched cases. We do this in reverse order as we built them // in reverse order. for (auto &ExitCase : reverse(ExitCases)) { ConstantInt *CaseVal = std::get<0>(ExitCase); BasicBlock *UnswitchedBB = std::get<1>(ExitCase); NewSIW.addCase(CaseVal, UnswitchedBB, std::get<2>(ExitCase)); } // If the default was unswitched, re-point it and add explicit cases for // entering the loop. if (DefaultExitBB) { NewSIW->setDefaultDest(DefaultExitBB); NewSIW.setSuccessorWeight(0, DefaultCaseWeight); // We removed all the exit cases, so we just copy the cases to the // unswitched switch. for (const auto &Case : SI.cases()) NewSIW.addCase(Case.getCaseValue(), NewPH, SIW.getSuccessorWeight(Case.getSuccessorIndex())); } else if (DefaultCaseWeight) { // We have to set branch weight of the default case. uint64_t SW = *DefaultCaseWeight; for (const auto &Case : SI.cases()) { auto W = SIW.getSuccessorWeight(Case.getSuccessorIndex()); assert(W && "case weight must be defined as default case weight is defined"); SW += *W; } NewSIW.setSuccessorWeight(0, SW); } // If we ended up with a common successor for every path through the switch // after unswitching, rewrite it to an unconditional branch to make it easy // to recognize. Otherwise we potentially have to recognize the default case // pointing at unreachable and other complexity. if (CommonSuccBB) { BasicBlock *BB = SI.getParent(); // We may have had multiple edges to this common successor block, so remove // them as predecessors. We skip the first one, either the default or the // actual first case. bool SkippedFirst = DefaultExitBB == nullptr; for (auto Case : SI.cases()) { assert(Case.getCaseSuccessor() == CommonSuccBB && "Non-common successor!"); (void)Case; if (!SkippedFirst) { SkippedFirst = true; continue; } CommonSuccBB->removePredecessor(BB, /*KeepOneInputPHIs*/ true); } // Now nuke the switch and replace it with a direct branch. SIW.eraseFromParent(); BranchInst::Create(CommonSuccBB, BB); } else if (DefaultExitBB) { assert(SI.getNumCases() > 0 && "If we had no cases we'd have a common successor!"); // Move the last case to the default successor. This is valid as if the // default got unswitched it cannot be reached. This has the advantage of // being simple and keeping the number of edges from this switch to // successors the same, and avoiding any PHI update complexity. auto LastCaseI = std::prev(SI.case_end()); SI.setDefaultDest(LastCaseI->getCaseSuccessor()); SIW.setSuccessorWeight( 0, SIW.getSuccessorWeight(LastCaseI->getSuccessorIndex())); SIW.removeCase(LastCaseI); } // Walk the unswitched exit blocks and the unswitched split blocks and update // the dominator tree based on the CFG edits. While we are walking unordered // containers here, the API for applyUpdates takes an unordered list of // updates and requires them to not contain duplicates. SmallVector DTUpdates; for (auto *UnswitchedExitBB : UnswitchedExitBBs) { DTUpdates.push_back({DT.Delete, ParentBB, UnswitchedExitBB}); DTUpdates.push_back({DT.Insert, OldPH, UnswitchedExitBB}); } for (auto SplitUnswitchedPair : SplitExitBBMap) { DTUpdates.push_back({DT.Delete, ParentBB, SplitUnswitchedPair.first}); DTUpdates.push_back({DT.Insert, OldPH, SplitUnswitchedPair.second}); } if (MSSAU) { MSSAU->applyUpdates(DTUpdates, DT, /*UpdateDT=*/true); if (VerifyMemorySSA) MSSAU->getMemorySSA()->verifyMemorySSA(); } else { DT.applyUpdates(DTUpdates); } assert(DT.verify(DominatorTree::VerificationLevel::Fast)); // We may have changed the nesting relationship for this loop so hoist it to // its correct parent if needed. hoistLoopToNewParent(L, *NewPH, DT, LI, MSSAU, SE); if (MSSAU && VerifyMemorySSA) MSSAU->getMemorySSA()->verifyMemorySSA(); ++NumTrivial; ++NumSwitches; LLVM_DEBUG(dbgs() << " done: unswitching trivial switch...\n"); return true; } /// This routine scans the loop to find a branch or switch which occurs before /// any side effects occur. These can potentially be unswitched without /// duplicating the loop. If a branch or switch is successfully unswitched the /// scanning continues to see if subsequent branches or switches have become /// trivial. Once all trivial candidates have been unswitched, this routine /// returns. /// /// The return value indicates whether anything was unswitched (and therefore /// changed). /// /// If `SE` is not null, it will be updated based on the potential loop SCEVs /// invalidated by this. static bool unswitchAllTrivialConditions(Loop &L, DominatorTree &DT, LoopInfo &LI, ScalarEvolution *SE, MemorySSAUpdater *MSSAU) { bool Changed = false; // If loop header has only one reachable successor we should keep looking for // trivial condition candidates in the successor as well. An alternative is // to constant fold conditions and merge successors into loop header (then we // only need to check header's terminator). The reason for not doing this in // LoopUnswitch pass is that it could potentially break LoopPassManager's // invariants. Folding dead branches could either eliminate the current loop // or make other loops unreachable. LCSSA form might also not be preserved // after deleting branches. The following code keeps traversing loop header's // successors until it finds the trivial condition candidate (condition that // is not a constant). Since unswitching generates branches with constant // conditions, this scenario could be very common in practice. BasicBlock *CurrentBB = L.getHeader(); SmallPtrSet Visited; Visited.insert(CurrentBB); do { // Check if there are any side-effecting instructions (e.g. stores, calls, // volatile loads) in the part of the loop that the code *would* execute // without unswitching. if (MSSAU) // Possible early exit with MSSA if (auto *Defs = MSSAU->getMemorySSA()->getBlockDefs(CurrentBB)) if (!isa(*Defs->begin()) || (++Defs->begin() != Defs->end())) return Changed; if (llvm::any_of(*CurrentBB, [](Instruction &I) { return I.mayHaveSideEffects(); })) return Changed; Instruction *CurrentTerm = CurrentBB->getTerminator(); if (auto *SI = dyn_cast(CurrentTerm)) { // Don't bother trying to unswitch past a switch with a constant // condition. This should be removed prior to running this pass by // simplifycfg. if (isa(SI->getCondition())) return Changed; if (!unswitchTrivialSwitch(L, *SI, DT, LI, SE, MSSAU)) // Couldn't unswitch this one so we're done. return Changed; // Mark that we managed to unswitch something. Changed = true; // If unswitching turned the terminator into an unconditional branch then // we can continue. The unswitching logic specifically works to fold any // cases it can into an unconditional branch to make it easier to // recognize here. auto *BI = dyn_cast(CurrentBB->getTerminator()); if (!BI || BI->isConditional()) return Changed; CurrentBB = BI->getSuccessor(0); continue; } auto *BI = dyn_cast(CurrentTerm); if (!BI) // We do not understand other terminator instructions. return Changed; // Don't bother trying to unswitch past an unconditional branch or a branch // with a constant value. These should be removed by simplifycfg prior to // running this pass. if (!BI->isConditional() || isa(BI->getCondition())) return Changed; // Found a trivial condition candidate: non-foldable conditional branch. If // we fail to unswitch this, we can't do anything else that is trivial. if (!unswitchTrivialBranch(L, *BI, DT, LI, SE, MSSAU)) return Changed; // Mark that we managed to unswitch something. Changed = true; // If we only unswitched some of the conditions feeding the branch, we won't // have collapsed it to a single successor. BI = cast(CurrentBB->getTerminator()); if (BI->isConditional()) return Changed; // Follow the newly unconditional branch into its successor. CurrentBB = BI->getSuccessor(0); // When continuing, if we exit the loop or reach a previous visited block, // then we can not reach any trivial condition candidates (unfoldable // branch instructions or switch instructions) and no unswitch can happen. } while (L.contains(CurrentBB) && Visited.insert(CurrentBB).second); return Changed; } /// Build the cloned blocks for an unswitched copy of the given loop. /// /// The cloned blocks are inserted before the loop preheader (`LoopPH`) and /// after the split block (`SplitBB`) that will be used to select between the /// cloned and original loop. /// /// This routine handles cloning all of the necessary loop blocks and exit /// blocks including rewriting their instructions and the relevant PHI nodes. /// Any loop blocks or exit blocks which are dominated by a different successor /// than the one for this clone of the loop blocks can be trivially skipped. We /// use the `DominatingSucc` map to determine whether a block satisfies that /// property with a simple map lookup. /// /// It also correctly creates the unconditional branch in the cloned /// unswitched parent block to only point at the unswitched successor. /// /// This does not handle most of the necessary updates to `LoopInfo`. Only exit /// block splitting is correctly reflected in `LoopInfo`, essentially all of /// the cloned blocks (and their loops) are left without full `LoopInfo` /// updates. This also doesn't fully update `DominatorTree`. It adds the cloned /// blocks to them but doesn't create the cloned `DominatorTree` structure and /// instead the caller must recompute an accurate DT. It *does* correctly /// update the `AssumptionCache` provided in `AC`. static BasicBlock *buildClonedLoopBlocks( Loop &L, BasicBlock *LoopPH, BasicBlock *SplitBB, ArrayRef ExitBlocks, BasicBlock *ParentBB, BasicBlock *UnswitchedSuccBB, BasicBlock *ContinueSuccBB, const SmallDenseMap &DominatingSucc, ValueToValueMapTy &VMap, SmallVectorImpl &DTUpdates, AssumptionCache &AC, DominatorTree &DT, LoopInfo &LI, MemorySSAUpdater *MSSAU) { SmallVector NewBlocks; NewBlocks.reserve(L.getNumBlocks() + ExitBlocks.size()); // We will need to clone a bunch of blocks, wrap up the clone operation in // a helper. auto CloneBlock = [&](BasicBlock *OldBB) { // Clone the basic block and insert it before the new preheader. BasicBlock *NewBB = CloneBasicBlock(OldBB, VMap, ".us", OldBB->getParent()); NewBB->moveBefore(LoopPH); // Record this block and the mapping. NewBlocks.push_back(NewBB); VMap[OldBB] = NewBB; return NewBB; }; // We skip cloning blocks when they have a dominating succ that is not the // succ we are cloning for. auto SkipBlock = [&](BasicBlock *BB) { auto It = DominatingSucc.find(BB); return It != DominatingSucc.end() && It->second != UnswitchedSuccBB; }; // First, clone the preheader. auto *ClonedPH = CloneBlock(LoopPH); // Then clone all the loop blocks, skipping the ones that aren't necessary. for (auto *LoopBB : L.blocks()) if (!SkipBlock(LoopBB)) CloneBlock(LoopBB); // Split all the loop exit edges so that when we clone the exit blocks, if // any of the exit blocks are *also* a preheader for some other loop, we // don't create multiple predecessors entering the loop header. for (auto *ExitBB : ExitBlocks) { if (SkipBlock(ExitBB)) continue; // When we are going to clone an exit, we don't need to clone all the // instructions in the exit block and we want to ensure we have an easy // place to merge the CFG, so split the exit first. This is always safe to // do because there cannot be any non-loop predecessors of a loop exit in // loop simplified form. auto *MergeBB = SplitBlock(ExitBB, &ExitBB->front(), &DT, &LI, MSSAU); // Rearrange the names to make it easier to write test cases by having the // exit block carry the suffix rather than the merge block carrying the // suffix. MergeBB->takeName(ExitBB); ExitBB->setName(Twine(MergeBB->getName()) + ".split"); // Now clone the original exit block. auto *ClonedExitBB = CloneBlock(ExitBB); assert(ClonedExitBB->getTerminator()->getNumSuccessors() == 1 && "Exit block should have been split to have one successor!"); assert(ClonedExitBB->getTerminator()->getSuccessor(0) == MergeBB && "Cloned exit block has the wrong successor!"); // Remap any cloned instructions and create a merge phi node for them. for (auto ZippedInsts : llvm::zip_first( llvm::make_range(ExitBB->begin(), std::prev(ExitBB->end())), llvm::make_range(ClonedExitBB->begin(), std::prev(ClonedExitBB->end())))) { Instruction &I = std::get<0>(ZippedInsts); Instruction &ClonedI = std::get<1>(ZippedInsts); // The only instructions in the exit block should be PHI nodes and // potentially a landing pad. assert( (isa(I) || isa(I) || isa(I)) && "Bad instruction in exit block!"); // We should have a value map between the instruction and its clone. assert(VMap.lookup(&I) == &ClonedI && "Mismatch in the value map!"); auto *MergePN = PHINode::Create(I.getType(), /*NumReservedValues*/ 2, ".us-phi", &*MergeBB->getFirstInsertionPt()); I.replaceAllUsesWith(MergePN); MergePN->addIncoming(&I, ExitBB); MergePN->addIncoming(&ClonedI, ClonedExitBB); } } // Rewrite the instructions in the cloned blocks to refer to the instructions // in the cloned blocks. We have to do this as a second pass so that we have // everything available. Also, we have inserted new instructions which may // include assume intrinsics, so we update the assumption cache while // processing this. for (auto *ClonedBB : NewBlocks) for (Instruction &I : *ClonedBB) { RemapInstruction(&I, VMap, RF_NoModuleLevelChanges | RF_IgnoreMissingLocals); if (auto *II = dyn_cast(&I)) AC.registerAssumption(II); } // Update any PHI nodes in the cloned successors of the skipped blocks to not // have spurious incoming values. for (auto *LoopBB : L.blocks()) if (SkipBlock(LoopBB)) for (auto *SuccBB : successors(LoopBB)) if (auto *ClonedSuccBB = cast_or_null(VMap.lookup(SuccBB))) for (PHINode &PN : ClonedSuccBB->phis()) PN.removeIncomingValue(LoopBB, /*DeletePHIIfEmpty*/ false); // Remove the cloned parent as a predecessor of any successor we ended up // cloning other than the unswitched one. auto *ClonedParentBB = cast(VMap.lookup(ParentBB)); for (auto *SuccBB : successors(ParentBB)) { if (SuccBB == UnswitchedSuccBB) continue; auto *ClonedSuccBB = cast_or_null(VMap.lookup(SuccBB)); if (!ClonedSuccBB) continue; ClonedSuccBB->removePredecessor(ClonedParentBB, /*KeepOneInputPHIs*/ true); } // Replace the cloned branch with an unconditional branch to the cloned // unswitched successor. auto *ClonedSuccBB = cast(VMap.lookup(UnswitchedSuccBB)); Instruction *ClonedTerminator = ClonedParentBB->getTerminator(); // Trivial Simplification. If Terminator is a conditional branch and // condition becomes dead - erase it. Value *ClonedConditionToErase = nullptr; if (auto *BI = dyn_cast(ClonedTerminator)) ClonedConditionToErase = BI->getCondition(); else if (auto *SI = dyn_cast(ClonedTerminator)) ClonedConditionToErase = SI->getCondition(); ClonedTerminator->eraseFromParent(); BranchInst::Create(ClonedSuccBB, ClonedParentBB); if (ClonedConditionToErase) RecursivelyDeleteTriviallyDeadInstructions(ClonedConditionToErase, nullptr, MSSAU); // If there are duplicate entries in the PHI nodes because of multiple edges // to the unswitched successor, we need to nuke all but one as we replaced it // with a direct branch. for (PHINode &PN : ClonedSuccBB->phis()) { bool Found = false; // Loop over the incoming operands backwards so we can easily delete as we // go without invalidating the index. for (int i = PN.getNumOperands() - 1; i >= 0; --i) { if (PN.getIncomingBlock(i) != ClonedParentBB) continue; if (!Found) { Found = true; continue; } PN.removeIncomingValue(i, /*DeletePHIIfEmpty*/ false); } } // Record the domtree updates for the new blocks. SmallPtrSet SuccSet; for (auto *ClonedBB : NewBlocks) { for (auto *SuccBB : successors(ClonedBB)) if (SuccSet.insert(SuccBB).second) DTUpdates.push_back({DominatorTree::Insert, ClonedBB, SuccBB}); SuccSet.clear(); } return ClonedPH; } /// Recursively clone the specified loop and all of its children. /// /// The target parent loop for the clone should be provided, or can be null if /// the clone is a top-level loop. While cloning, all the blocks are mapped /// with the provided value map. The entire original loop must be present in /// the value map. The cloned loop is returned. static Loop *cloneLoopNest(Loop &OrigRootL, Loop *RootParentL, const ValueToValueMapTy &VMap, LoopInfo &LI) { auto AddClonedBlocksToLoop = [&](Loop &OrigL, Loop &ClonedL) { assert(ClonedL.getBlocks().empty() && "Must start with an empty loop!"); ClonedL.reserveBlocks(OrigL.getNumBlocks()); for (auto *BB : OrigL.blocks()) { auto *ClonedBB = cast(VMap.lookup(BB)); ClonedL.addBlockEntry(ClonedBB); if (LI.getLoopFor(BB) == &OrigL) LI.changeLoopFor(ClonedBB, &ClonedL); } }; // We specially handle the first loop because it may get cloned into // a different parent and because we most commonly are cloning leaf loops. Loop *ClonedRootL = LI.AllocateLoop(); if (RootParentL) RootParentL->addChildLoop(ClonedRootL); else LI.addTopLevelLoop(ClonedRootL); AddClonedBlocksToLoop(OrigRootL, *ClonedRootL); if (OrigRootL.isInnermost()) return ClonedRootL; // If we have a nest, we can quickly clone the entire loop nest using an // iterative approach because it is a tree. We keep the cloned parent in the // data structure to avoid repeatedly querying through a map to find it. SmallVector, 16> LoopsToClone; // Build up the loops to clone in reverse order as we'll clone them from the // back. for (Loop *ChildL : llvm::reverse(OrigRootL)) LoopsToClone.push_back({ClonedRootL, ChildL}); do { Loop *ClonedParentL, *L; std::tie(ClonedParentL, L) = LoopsToClone.pop_back_val(); Loop *ClonedL = LI.AllocateLoop(); ClonedParentL->addChildLoop(ClonedL); AddClonedBlocksToLoop(*L, *ClonedL); for (Loop *ChildL : llvm::reverse(*L)) LoopsToClone.push_back({ClonedL, ChildL}); } while (!LoopsToClone.empty()); return ClonedRootL; } /// Build the cloned loops of an original loop from unswitching. /// /// Because unswitching simplifies the CFG of the loop, this isn't a trivial /// operation. We need to re-verify that there even is a loop (as the backedge /// may not have been cloned), and even if there are remaining backedges the /// backedge set may be different. However, we know that each child loop is /// undisturbed, we only need to find where to place each child loop within /// either any parent loop or within a cloned version of the original loop. /// /// Because child loops may end up cloned outside of any cloned version of the /// original loop, multiple cloned sibling loops may be created. All of them /// are returned so that the newly introduced loop nest roots can be /// identified. static void buildClonedLoops(Loop &OrigL, ArrayRef ExitBlocks, const ValueToValueMapTy &VMap, LoopInfo &LI, SmallVectorImpl &NonChildClonedLoops) { Loop *ClonedL = nullptr; auto *OrigPH = OrigL.getLoopPreheader(); auto *OrigHeader = OrigL.getHeader(); auto *ClonedPH = cast(VMap.lookup(OrigPH)); auto *ClonedHeader = cast(VMap.lookup(OrigHeader)); // We need to know the loops of the cloned exit blocks to even compute the // accurate parent loop. If we only clone exits to some parent of the // original parent, we want to clone into that outer loop. We also keep track // of the loops that our cloned exit blocks participate in. Loop *ParentL = nullptr; SmallVector ClonedExitsInLoops; SmallDenseMap ExitLoopMap; ClonedExitsInLoops.reserve(ExitBlocks.size()); for (auto *ExitBB : ExitBlocks) if (auto *ClonedExitBB = cast_or_null(VMap.lookup(ExitBB))) if (Loop *ExitL = LI.getLoopFor(ExitBB)) { ExitLoopMap[ClonedExitBB] = ExitL; ClonedExitsInLoops.push_back(ClonedExitBB); if (!ParentL || (ParentL != ExitL && ParentL->contains(ExitL))) ParentL = ExitL; } assert((!ParentL || ParentL == OrigL.getParentLoop() || ParentL->contains(OrigL.getParentLoop())) && "The computed parent loop should always contain (or be) the parent of " "the original loop."); // We build the set of blocks dominated by the cloned header from the set of // cloned blocks out of the original loop. While not all of these will // necessarily be in the cloned loop, it is enough to establish that they // aren't in unreachable cycles, etc. SmallSetVector ClonedLoopBlocks; for (auto *BB : OrigL.blocks()) if (auto *ClonedBB = cast_or_null(VMap.lookup(BB))) ClonedLoopBlocks.insert(ClonedBB); // Rebuild the set of blocks that will end up in the cloned loop. We may have // skipped cloning some region of this loop which can in turn skip some of // the backedges so we have to rebuild the blocks in the loop based on the // backedges that remain after cloning. SmallVector Worklist; SmallPtrSet BlocksInClonedLoop; for (auto *Pred : predecessors(ClonedHeader)) { // The only possible non-loop header predecessor is the preheader because // we know we cloned the loop in simplified form. if (Pred == ClonedPH) continue; // Because the loop was in simplified form, the only non-loop predecessor // should be the preheader. assert(ClonedLoopBlocks.count(Pred) && "Found a predecessor of the loop " "header other than the preheader " "that is not part of the loop!"); // Insert this block into the loop set and on the first visit (and if it // isn't the header we're currently walking) put it into the worklist to // recurse through. if (BlocksInClonedLoop.insert(Pred).second && Pred != ClonedHeader) Worklist.push_back(Pred); } // If we had any backedges then there *is* a cloned loop. Put the header into // the loop set and then walk the worklist backwards to find all the blocks // that remain within the loop after cloning. if (!BlocksInClonedLoop.empty()) { BlocksInClonedLoop.insert(ClonedHeader); while (!Worklist.empty()) { BasicBlock *BB = Worklist.pop_back_val(); assert(BlocksInClonedLoop.count(BB) && "Didn't put block into the loop set!"); // Insert any predecessors that are in the possible set into the cloned // set, and if the insert is successful, add them to the worklist. Note // that we filter on the blocks that are definitely reachable via the // backedge to the loop header so we may prune out dead code within the // cloned loop. for (auto *Pred : predecessors(BB)) if (ClonedLoopBlocks.count(Pred) && BlocksInClonedLoop.insert(Pred).second) Worklist.push_back(Pred); } ClonedL = LI.AllocateLoop(); if (ParentL) { ParentL->addBasicBlockToLoop(ClonedPH, LI); ParentL->addChildLoop(ClonedL); } else { LI.addTopLevelLoop(ClonedL); } NonChildClonedLoops.push_back(ClonedL); ClonedL->reserveBlocks(BlocksInClonedLoop.size()); // We don't want to just add the cloned loop blocks based on how we // discovered them. The original order of blocks was carefully built in // a way that doesn't rely on predecessor ordering. Rather than re-invent // that logic, we just re-walk the original blocks (and those of the child // loops) and filter them as we add them into the cloned loop. for (auto *BB : OrigL.blocks()) { auto *ClonedBB = cast_or_null(VMap.lookup(BB)); if (!ClonedBB || !BlocksInClonedLoop.count(ClonedBB)) continue; // Directly add the blocks that are only in this loop. if (LI.getLoopFor(BB) == &OrigL) { ClonedL->addBasicBlockToLoop(ClonedBB, LI); continue; } // We want to manually add it to this loop and parents. // Registering it with LoopInfo will happen when we clone the top // loop for this block. for (Loop *PL = ClonedL; PL; PL = PL->getParentLoop()) PL->addBlockEntry(ClonedBB); } // Now add each child loop whose header remains within the cloned loop. All // of the blocks within the loop must satisfy the same constraints as the // header so once we pass the header checks we can just clone the entire // child loop nest. for (Loop *ChildL : OrigL) { auto *ClonedChildHeader = cast_or_null(VMap.lookup(ChildL->getHeader())); if (!ClonedChildHeader || !BlocksInClonedLoop.count(ClonedChildHeader)) continue; #ifndef NDEBUG // We should never have a cloned child loop header but fail to have // all of the blocks for that child loop. for (auto *ChildLoopBB : ChildL->blocks()) assert(BlocksInClonedLoop.count( cast(VMap.lookup(ChildLoopBB))) && "Child cloned loop has a header within the cloned outer " "loop but not all of its blocks!"); #endif cloneLoopNest(*ChildL, ClonedL, VMap, LI); } } // Now that we've handled all the components of the original loop that were // cloned into a new loop, we still need to handle anything from the original // loop that wasn't in a cloned loop. // Figure out what blocks are left to place within any loop nest containing // the unswitched loop. If we never formed a loop, the cloned PH is one of // them. SmallPtrSet UnloopedBlockSet; if (BlocksInClonedLoop.empty()) UnloopedBlockSet.insert(ClonedPH); for (auto *ClonedBB : ClonedLoopBlocks) if (!BlocksInClonedLoop.count(ClonedBB)) UnloopedBlockSet.insert(ClonedBB); // Copy the cloned exits and sort them in ascending loop depth, we'll work // backwards across these to process them inside out. The order shouldn't // matter as we're just trying to build up the map from inside-out; we use // the map in a more stably ordered way below. auto OrderedClonedExitsInLoops = ClonedExitsInLoops; llvm::sort(OrderedClonedExitsInLoops, [&](BasicBlock *LHS, BasicBlock *RHS) { return ExitLoopMap.lookup(LHS)->getLoopDepth() < ExitLoopMap.lookup(RHS)->getLoopDepth(); }); // Populate the existing ExitLoopMap with everything reachable from each // exit, starting from the inner most exit. while (!UnloopedBlockSet.empty() && !OrderedClonedExitsInLoops.empty()) { assert(Worklist.empty() && "Didn't clear worklist!"); BasicBlock *ExitBB = OrderedClonedExitsInLoops.pop_back_val(); Loop *ExitL = ExitLoopMap.lookup(ExitBB); // Walk the CFG back until we hit the cloned PH adding everything reachable // and in the unlooped set to this exit block's loop. Worklist.push_back(ExitBB); do { BasicBlock *BB = Worklist.pop_back_val(); // We can stop recursing at the cloned preheader (if we get there). if (BB == ClonedPH) continue; for (BasicBlock *PredBB : predecessors(BB)) { // If this pred has already been moved to our set or is part of some // (inner) loop, no update needed. if (!UnloopedBlockSet.erase(PredBB)) { assert( (BlocksInClonedLoop.count(PredBB) || ExitLoopMap.count(PredBB)) && "Predecessor not mapped to a loop!"); continue; } // We just insert into the loop set here. We'll add these blocks to the // exit loop after we build up the set in an order that doesn't rely on // predecessor order (which in turn relies on use list order). bool Inserted = ExitLoopMap.insert({PredBB, ExitL}).second; (void)Inserted; assert(Inserted && "Should only visit an unlooped block once!"); // And recurse through to its predecessors. Worklist.push_back(PredBB); } } while (!Worklist.empty()); } // Now that the ExitLoopMap gives as mapping for all the non-looping cloned // blocks to their outer loops, walk the cloned blocks and the cloned exits // in their original order adding them to the correct loop. // We need a stable insertion order. We use the order of the original loop // order and map into the correct parent loop. for (auto *BB : llvm::concat( makeArrayRef(ClonedPH), ClonedLoopBlocks, ClonedExitsInLoops)) if (Loop *OuterL = ExitLoopMap.lookup(BB)) OuterL->addBasicBlockToLoop(BB, LI); #ifndef NDEBUG for (auto &BBAndL : ExitLoopMap) { auto *BB = BBAndL.first; auto *OuterL = BBAndL.second; assert(LI.getLoopFor(BB) == OuterL && "Failed to put all blocks into outer loops!"); } #endif // Now that all the blocks are placed into the correct containing loop in the // absence of child loops, find all the potentially cloned child loops and // clone them into whatever outer loop we placed their header into. for (Loop *ChildL : OrigL) { auto *ClonedChildHeader = cast_or_null(VMap.lookup(ChildL->getHeader())); if (!ClonedChildHeader || BlocksInClonedLoop.count(ClonedChildHeader)) continue; #ifndef NDEBUG for (auto *ChildLoopBB : ChildL->blocks()) assert(VMap.count(ChildLoopBB) && "Cloned a child loop header but not all of that loops blocks!"); #endif NonChildClonedLoops.push_back(cloneLoopNest( *ChildL, ExitLoopMap.lookup(ClonedChildHeader), VMap, LI)); } } static void deleteDeadClonedBlocks(Loop &L, ArrayRef ExitBlocks, ArrayRef> VMaps, DominatorTree &DT, MemorySSAUpdater *MSSAU) { // Find all the dead clones, and remove them from their successors. SmallVector DeadBlocks; for (BasicBlock *BB : llvm::concat(L.blocks(), ExitBlocks)) for (auto &VMap : VMaps) if (BasicBlock *ClonedBB = cast_or_null(VMap->lookup(BB))) if (!DT.isReachableFromEntry(ClonedBB)) { for (BasicBlock *SuccBB : successors(ClonedBB)) SuccBB->removePredecessor(ClonedBB); DeadBlocks.push_back(ClonedBB); } // Remove all MemorySSA in the dead blocks if (MSSAU) { SmallSetVector DeadBlockSet(DeadBlocks.begin(), DeadBlocks.end()); MSSAU->removeBlocks(DeadBlockSet); } // Drop any remaining references to break cycles. for (BasicBlock *BB : DeadBlocks) BB->dropAllReferences(); // Erase them from the IR. for (BasicBlock *BB : DeadBlocks) BB->eraseFromParent(); } -static void deleteDeadBlocksFromLoop(Loop &L, - SmallVectorImpl &ExitBlocks, - DominatorTree &DT, LoopInfo &LI, - MemorySSAUpdater *MSSAU) { +static void +deleteDeadBlocksFromLoop(Loop &L, + SmallVectorImpl &ExitBlocks, + DominatorTree &DT, LoopInfo &LI, + MemorySSAUpdater *MSSAU, + function_ref DestroyLoopCB) { // Find all the dead blocks tied to this loop, and remove them from their // successors. SmallSetVector DeadBlockSet; // Start with loop/exit blocks and get a transitive closure of reachable dead // blocks. SmallVector DeathCandidates(ExitBlocks.begin(), ExitBlocks.end()); DeathCandidates.append(L.blocks().begin(), L.blocks().end()); while (!DeathCandidates.empty()) { auto *BB = DeathCandidates.pop_back_val(); if (!DeadBlockSet.count(BB) && !DT.isReachableFromEntry(BB)) { for (BasicBlock *SuccBB : successors(BB)) { SuccBB->removePredecessor(BB); DeathCandidates.push_back(SuccBB); } DeadBlockSet.insert(BB); } } // Remove all MemorySSA in the dead blocks if (MSSAU) MSSAU->removeBlocks(DeadBlockSet); // Filter out the dead blocks from the exit blocks list so that it can be // used in the caller. llvm::erase_if(ExitBlocks, [&](BasicBlock *BB) { return DeadBlockSet.count(BB); }); // Walk from this loop up through its parents removing all of the dead blocks. for (Loop *ParentL = &L; ParentL; ParentL = ParentL->getParentLoop()) { for (auto *BB : DeadBlockSet) ParentL->getBlocksSet().erase(BB); llvm::erase_if(ParentL->getBlocksVector(), [&](BasicBlock *BB) { return DeadBlockSet.count(BB); }); } // Now delete the dead child loops. This raw delete will clear them // recursively. llvm::erase_if(L.getSubLoopsVector(), [&](Loop *ChildL) { if (!DeadBlockSet.count(ChildL->getHeader())) return false; assert(llvm::all_of(ChildL->blocks(), [&](BasicBlock *ChildBB) { return DeadBlockSet.count(ChildBB); }) && "If the child loop header is dead all blocks in the child loop must " "be dead as well!"); + DestroyLoopCB(*ChildL, ChildL->getName()); LI.destroy(ChildL); return true; }); // Remove the loop mappings for the dead blocks and drop all the references // from these blocks to others to handle cyclic references as we start // deleting the blocks themselves. for (auto *BB : DeadBlockSet) { // Check that the dominator tree has already been updated. assert(!DT.getNode(BB) && "Should already have cleared domtree!"); LI.changeLoopFor(BB, nullptr); // Drop all uses of the instructions to make sure we won't have dangling // uses in other blocks. for (auto &I : *BB) if (!I.use_empty()) I.replaceAllUsesWith(UndefValue::get(I.getType())); BB->dropAllReferences(); } // Actually delete the blocks now that they've been fully unhooked from the // IR. for (auto *BB : DeadBlockSet) BB->eraseFromParent(); } /// Recompute the set of blocks in a loop after unswitching. /// /// This walks from the original headers predecessors to rebuild the loop. We /// take advantage of the fact that new blocks can't have been added, and so we /// filter by the original loop's blocks. This also handles potentially /// unreachable code that we don't want to explore but might be found examining /// the predecessors of the header. /// /// If the original loop is no longer a loop, this will return an empty set. If /// it remains a loop, all the blocks within it will be added to the set /// (including those blocks in inner loops). static SmallPtrSet recomputeLoopBlockSet(Loop &L, LoopInfo &LI) { SmallPtrSet LoopBlockSet; auto *PH = L.getLoopPreheader(); auto *Header = L.getHeader(); // A worklist to use while walking backwards from the header. SmallVector Worklist; // First walk the predecessors of the header to find the backedges. This will // form the basis of our walk. for (auto *Pred : predecessors(Header)) { // Skip the preheader. if (Pred == PH) continue; // Because the loop was in simplified form, the only non-loop predecessor // is the preheader. assert(L.contains(Pred) && "Found a predecessor of the loop header other " "than the preheader that is not part of the " "loop!"); // Insert this block into the loop set and on the first visit and, if it // isn't the header we're currently walking, put it into the worklist to // recurse through. if (LoopBlockSet.insert(Pred).second && Pred != Header) Worklist.push_back(Pred); } // If no backedges were found, we're done. if (LoopBlockSet.empty()) return LoopBlockSet; // We found backedges, recurse through them to identify the loop blocks. while (!Worklist.empty()) { BasicBlock *BB = Worklist.pop_back_val(); assert(LoopBlockSet.count(BB) && "Didn't put block into the loop set!"); // No need to walk past the header. if (BB == Header) continue; // Because we know the inner loop structure remains valid we can use the // loop structure to jump immediately across the entire nested loop. // Further, because it is in loop simplified form, we can directly jump // to its preheader afterward. if (Loop *InnerL = LI.getLoopFor(BB)) if (InnerL != &L) { assert(L.contains(InnerL) && "Should not reach a loop *outside* this loop!"); // The preheader is the only possible predecessor of the loop so // insert it into the set and check whether it was already handled. auto *InnerPH = InnerL->getLoopPreheader(); assert(L.contains(InnerPH) && "Cannot contain an inner loop block " "but not contain the inner loop " "preheader!"); if (!LoopBlockSet.insert(InnerPH).second) // The only way to reach the preheader is through the loop body // itself so if it has been visited the loop is already handled. continue; // Insert all of the blocks (other than those already present) into // the loop set. We expect at least the block that led us to find the // inner loop to be in the block set, but we may also have other loop // blocks if they were already enqueued as predecessors of some other // outer loop block. for (auto *InnerBB : InnerL->blocks()) { if (InnerBB == BB) { assert(LoopBlockSet.count(InnerBB) && "Block should already be in the set!"); continue; } LoopBlockSet.insert(InnerBB); } // Add the preheader to the worklist so we will continue past the // loop body. Worklist.push_back(InnerPH); continue; } // Insert any predecessors that were in the original loop into the new // set, and if the insert is successful, add them to the worklist. for (auto *Pred : predecessors(BB)) if (L.contains(Pred) && LoopBlockSet.insert(Pred).second) Worklist.push_back(Pred); } assert(LoopBlockSet.count(Header) && "Cannot fail to add the header!"); // We've found all the blocks participating in the loop, return our completed // set. return LoopBlockSet; } /// Rebuild a loop after unswitching removes some subset of blocks and edges. /// /// The removal may have removed some child loops entirely but cannot have /// disturbed any remaining child loops. However, they may need to be hoisted /// to the parent loop (or to be top-level loops). The original loop may be /// completely removed. /// /// The sibling loops resulting from this update are returned. If the original /// loop remains a valid loop, it will be the first entry in this list with all /// of the newly sibling loops following it. /// /// Returns true if the loop remains a loop after unswitching, and false if it /// is no longer a loop after unswitching (and should not continue to be /// referenced). static bool rebuildLoopAfterUnswitch(Loop &L, ArrayRef ExitBlocks, LoopInfo &LI, SmallVectorImpl &HoistedLoops) { auto *PH = L.getLoopPreheader(); // Compute the actual parent loop from the exit blocks. Because we may have // pruned some exits the loop may be different from the original parent. Loop *ParentL = nullptr; SmallVector ExitLoops; SmallVector ExitsInLoops; ExitsInLoops.reserve(ExitBlocks.size()); for (auto *ExitBB : ExitBlocks) if (Loop *ExitL = LI.getLoopFor(ExitBB)) { ExitLoops.push_back(ExitL); ExitsInLoops.push_back(ExitBB); if (!ParentL || (ParentL != ExitL && ParentL->contains(ExitL))) ParentL = ExitL; } // Recompute the blocks participating in this loop. This may be empty if it // is no longer a loop. auto LoopBlockSet = recomputeLoopBlockSet(L, LI); // If we still have a loop, we need to re-set the loop's parent as the exit // block set changing may have moved it within the loop nest. Note that this // can only happen when this loop has a parent as it can only hoist the loop // *up* the nest. if (!LoopBlockSet.empty() && L.getParentLoop() != ParentL) { // Remove this loop's (original) blocks from all of the intervening loops. for (Loop *IL = L.getParentLoop(); IL != ParentL; IL = IL->getParentLoop()) { IL->getBlocksSet().erase(PH); for (auto *BB : L.blocks()) IL->getBlocksSet().erase(BB); llvm::erase_if(IL->getBlocksVector(), [&](BasicBlock *BB) { return BB == PH || L.contains(BB); }); } LI.changeLoopFor(PH, ParentL); L.getParentLoop()->removeChildLoop(&L); if (ParentL) ParentL->addChildLoop(&L); else LI.addTopLevelLoop(&L); } // Now we update all the blocks which are no longer within the loop. auto &Blocks = L.getBlocksVector(); auto BlocksSplitI = LoopBlockSet.empty() ? Blocks.begin() : std::stable_partition( Blocks.begin(), Blocks.end(), [&](BasicBlock *BB) { return LoopBlockSet.count(BB); }); // Before we erase the list of unlooped blocks, build a set of them. SmallPtrSet UnloopedBlocks(BlocksSplitI, Blocks.end()); if (LoopBlockSet.empty()) UnloopedBlocks.insert(PH); // Now erase these blocks from the loop. for (auto *BB : make_range(BlocksSplitI, Blocks.end())) L.getBlocksSet().erase(BB); Blocks.erase(BlocksSplitI, Blocks.end()); // Sort the exits in ascending loop depth, we'll work backwards across these // to process them inside out. llvm::stable_sort(ExitsInLoops, [&](BasicBlock *LHS, BasicBlock *RHS) { return LI.getLoopDepth(LHS) < LI.getLoopDepth(RHS); }); // We'll build up a set for each exit loop. SmallPtrSet NewExitLoopBlocks; Loop *PrevExitL = L.getParentLoop(); // The deepest possible exit loop. auto RemoveUnloopedBlocksFromLoop = [](Loop &L, SmallPtrSetImpl &UnloopedBlocks) { for (auto *BB : UnloopedBlocks) L.getBlocksSet().erase(BB); llvm::erase_if(L.getBlocksVector(), [&](BasicBlock *BB) { return UnloopedBlocks.count(BB); }); }; SmallVector Worklist; while (!UnloopedBlocks.empty() && !ExitsInLoops.empty()) { assert(Worklist.empty() && "Didn't clear worklist!"); assert(NewExitLoopBlocks.empty() && "Didn't clear loop set!"); // Grab the next exit block, in decreasing loop depth order. BasicBlock *ExitBB = ExitsInLoops.pop_back_val(); Loop &ExitL = *LI.getLoopFor(ExitBB); assert(ExitL.contains(&L) && "Exit loop must contain the inner loop!"); // Erase all of the unlooped blocks from the loops between the previous // exit loop and this exit loop. This works because the ExitInLoops list is // sorted in increasing order of loop depth and thus we visit loops in // decreasing order of loop depth. for (; PrevExitL != &ExitL; PrevExitL = PrevExitL->getParentLoop()) RemoveUnloopedBlocksFromLoop(*PrevExitL, UnloopedBlocks); // Walk the CFG back until we hit the cloned PH adding everything reachable // and in the unlooped set to this exit block's loop. Worklist.push_back(ExitBB); do { BasicBlock *BB = Worklist.pop_back_val(); // We can stop recursing at the cloned preheader (if we get there). if (BB == PH) continue; for (BasicBlock *PredBB : predecessors(BB)) { // If this pred has already been moved to our set or is part of some // (inner) loop, no update needed. if (!UnloopedBlocks.erase(PredBB)) { assert((NewExitLoopBlocks.count(PredBB) || ExitL.contains(LI.getLoopFor(PredBB))) && "Predecessor not in a nested loop (or already visited)!"); continue; } // We just insert into the loop set here. We'll add these blocks to the // exit loop after we build up the set in a deterministic order rather // than the predecessor-influenced visit order. bool Inserted = NewExitLoopBlocks.insert(PredBB).second; (void)Inserted; assert(Inserted && "Should only visit an unlooped block once!"); // And recurse through to its predecessors. Worklist.push_back(PredBB); } } while (!Worklist.empty()); // If blocks in this exit loop were directly part of the original loop (as // opposed to a child loop) update the map to point to this exit loop. This // just updates a map and so the fact that the order is unstable is fine. for (auto *BB : NewExitLoopBlocks) if (Loop *BBL = LI.getLoopFor(BB)) if (BBL == &L || !L.contains(BBL)) LI.changeLoopFor(BB, &ExitL); // We will remove the remaining unlooped blocks from this loop in the next // iteration or below. NewExitLoopBlocks.clear(); } // Any remaining unlooped blocks are no longer part of any loop unless they // are part of some child loop. for (; PrevExitL; PrevExitL = PrevExitL->getParentLoop()) RemoveUnloopedBlocksFromLoop(*PrevExitL, UnloopedBlocks); for (auto *BB : UnloopedBlocks) if (Loop *BBL = LI.getLoopFor(BB)) if (BBL == &L || !L.contains(BBL)) LI.changeLoopFor(BB, nullptr); // Sink all the child loops whose headers are no longer in the loop set to // the parent (or to be top level loops). We reach into the loop and directly // update its subloop vector to make this batch update efficient. auto &SubLoops = L.getSubLoopsVector(); auto SubLoopsSplitI = LoopBlockSet.empty() ? SubLoops.begin() : std::stable_partition( SubLoops.begin(), SubLoops.end(), [&](Loop *SubL) { return LoopBlockSet.count(SubL->getHeader()); }); for (auto *HoistedL : make_range(SubLoopsSplitI, SubLoops.end())) { HoistedLoops.push_back(HoistedL); HoistedL->setParentLoop(nullptr); // To compute the new parent of this hoisted loop we look at where we // placed the preheader above. We can't lookup the header itself because we // retained the mapping from the header to the hoisted loop. But the // preheader and header should have the exact same new parent computed // based on the set of exit blocks from the original loop as the preheader // is a predecessor of the header and so reached in the reverse walk. And // because the loops were all in simplified form the preheader of the // hoisted loop can't be part of some *other* loop. if (auto *NewParentL = LI.getLoopFor(HoistedL->getLoopPreheader())) NewParentL->addChildLoop(HoistedL); else LI.addTopLevelLoop(HoistedL); } SubLoops.erase(SubLoopsSplitI, SubLoops.end()); // Actually delete the loop if nothing remained within it. if (Blocks.empty()) { assert(SubLoops.empty() && "Failed to remove all subloops from the original loop!"); if (Loop *ParentL = L.getParentLoop()) ParentL->removeChildLoop(llvm::find(*ParentL, &L)); else LI.removeLoop(llvm::find(LI, &L)); + // markLoopAsDeleted for L should be triggered by the caller (it is typically + // done by using the UnswitchCB callback). LI.destroy(&L); return false; } return true; } /// Helper to visit a dominator subtree, invoking a callable on each node. /// /// Returning false at any point will stop walking past that node of the tree. template void visitDomSubTree(DominatorTree &DT, BasicBlock *BB, CallableT Callable) { SmallVector DomWorklist; DomWorklist.push_back(DT[BB]); #ifndef NDEBUG SmallPtrSet Visited; Visited.insert(DT[BB]); #endif do { DomTreeNode *N = DomWorklist.pop_back_val(); // Visit this node. if (!Callable(N->getBlock())) continue; // Accumulate the child nodes. for (DomTreeNode *ChildN : *N) { assert(Visited.insert(ChildN).second && "Cannot visit a node twice when walking a tree!"); DomWorklist.push_back(ChildN); } } while (!DomWorklist.empty()); } static void unswitchNontrivialInvariants( Loop &L, Instruction &TI, ArrayRef Invariants, SmallVectorImpl &ExitBlocks, IVConditionInfo &PartialIVInfo, DominatorTree &DT, LoopInfo &LI, AssumptionCache &AC, function_ref)> UnswitchCB, - ScalarEvolution *SE, MemorySSAUpdater *MSSAU) { + ScalarEvolution *SE, MemorySSAUpdater *MSSAU, + function_ref DestroyLoopCB) { auto *ParentBB = TI.getParent(); BranchInst *BI = dyn_cast(&TI); SwitchInst *SI = BI ? nullptr : cast(&TI); // We can only unswitch switches, conditional branches with an invariant // condition, or combining invariant conditions with an instruction or // partially invariant instructions. assert((SI || (BI && BI->isConditional())) && "Can only unswitch switches and conditional branch!"); bool PartiallyInvariant = !PartialIVInfo.InstToDuplicate.empty(); bool FullUnswitch = SI || (BI->getCondition() == Invariants[0] && !PartiallyInvariant); if (FullUnswitch) assert(Invariants.size() == 1 && "Cannot have other invariants with full unswitching!"); else assert(isa(BI->getCondition()) && "Partial unswitching requires an instruction as the condition!"); if (MSSAU && VerifyMemorySSA) MSSAU->getMemorySSA()->verifyMemorySSA(); // Constant and BBs tracking the cloned and continuing successor. When we are // unswitching the entire condition, this can just be trivially chosen to // unswitch towards `true`. However, when we are unswitching a set of // invariants combined with `and` or `or` or partially invariant instructions, // the combining operation determines the best direction to unswitch: we want // to unswitch the direction that will collapse the branch. bool Direction = true; int ClonedSucc = 0; if (!FullUnswitch) { Value *Cond = BI->getCondition(); (void)Cond; assert(((match(Cond, m_LogicalAnd()) ^ match(Cond, m_LogicalOr())) || PartiallyInvariant) && "Only `or`, `and`, an `select`, partially invariant instructions " "can combine invariants being unswitched."); if (!match(BI->getCondition(), m_LogicalOr())) { if (match(BI->getCondition(), m_LogicalAnd()) || (PartiallyInvariant && !PartialIVInfo.KnownValue->isOneValue())) { Direction = false; ClonedSucc = 1; } } } BasicBlock *RetainedSuccBB = BI ? BI->getSuccessor(1 - ClonedSucc) : SI->getDefaultDest(); SmallSetVector UnswitchedSuccBBs; if (BI) UnswitchedSuccBBs.insert(BI->getSuccessor(ClonedSucc)); else for (auto Case : SI->cases()) if (Case.getCaseSuccessor() != RetainedSuccBB) UnswitchedSuccBBs.insert(Case.getCaseSuccessor()); assert(!UnswitchedSuccBBs.count(RetainedSuccBB) && "Should not unswitch the same successor we are retaining!"); // The branch should be in this exact loop. Any inner loop's invariant branch // should be handled by unswitching that inner loop. The caller of this // routine should filter out any candidates that remain (but were skipped for // whatever reason). assert(LI.getLoopFor(ParentBB) == &L && "Branch in an inner loop!"); // Compute the parent loop now before we start hacking on things. Loop *ParentL = L.getParentLoop(); // Get blocks in RPO order for MSSA update, before changing the CFG. LoopBlocksRPO LBRPO(&L); if (MSSAU) LBRPO.perform(&LI); // Compute the outer-most loop containing one of our exit blocks. This is the // furthest up our loopnest which can be mutated, which we will use below to // update things. Loop *OuterExitL = &L; for (auto *ExitBB : ExitBlocks) { Loop *NewOuterExitL = LI.getLoopFor(ExitBB); if (!NewOuterExitL) { // We exited the entire nest with this block, so we're done. OuterExitL = nullptr; break; } if (NewOuterExitL != OuterExitL && NewOuterExitL->contains(OuterExitL)) OuterExitL = NewOuterExitL; } // At this point, we're definitely going to unswitch something so invalidate // any cached information in ScalarEvolution for the outer most loop // containing an exit block and all nested loops. if (SE) { if (OuterExitL) SE->forgetLoop(OuterExitL); else SE->forgetTopmostLoop(&L); } // If the edge from this terminator to a successor dominates that successor, // store a map from each block in its dominator subtree to it. This lets us // tell when cloning for a particular successor if a block is dominated by // some *other* successor with a single data structure. We use this to // significantly reduce cloning. SmallDenseMap DominatingSucc; for (auto *SuccBB : llvm::concat( makeArrayRef(RetainedSuccBB), UnswitchedSuccBBs)) if (SuccBB->getUniquePredecessor() || llvm::all_of(predecessors(SuccBB), [&](BasicBlock *PredBB) { return PredBB == ParentBB || DT.dominates(SuccBB, PredBB); })) visitDomSubTree(DT, SuccBB, [&](BasicBlock *BB) { DominatingSucc[BB] = SuccBB; return true; }); // Split the preheader, so that we know that there is a safe place to insert // the conditional branch. We will change the preheader to have a conditional // branch on LoopCond. The original preheader will become the split point // between the unswitched versions, and we will have a new preheader for the // original loop. BasicBlock *SplitBB = L.getLoopPreheader(); BasicBlock *LoopPH = SplitEdge(SplitBB, L.getHeader(), &DT, &LI, MSSAU); // Keep track of the dominator tree updates needed. SmallVector DTUpdates; // Clone the loop for each unswitched successor. SmallVector, 4> VMaps; VMaps.reserve(UnswitchedSuccBBs.size()); SmallDenseMap ClonedPHs; for (auto *SuccBB : UnswitchedSuccBBs) { VMaps.emplace_back(new ValueToValueMapTy()); ClonedPHs[SuccBB] = buildClonedLoopBlocks( L, LoopPH, SplitBB, ExitBlocks, ParentBB, SuccBB, RetainedSuccBB, DominatingSucc, *VMaps.back(), DTUpdates, AC, DT, LI, MSSAU); } // Drop metadata if we may break its semantics by moving this instr into the // split block. if (TI.getMetadata(LLVMContext::MD_make_implicit)) { if (DropNonTrivialImplicitNullChecks) // Do not spend time trying to understand if we can keep it, just drop it // to save compile time. TI.setMetadata(LLVMContext::MD_make_implicit, nullptr); else { // It is only legal to preserve make.implicit metadata if we are // guaranteed no reach implicit null check after following this branch. ICFLoopSafetyInfo SafetyInfo; SafetyInfo.computeLoopSafetyInfo(&L); if (!SafetyInfo.isGuaranteedToExecute(TI, &DT, &L)) TI.setMetadata(LLVMContext::MD_make_implicit, nullptr); } } // The stitching of the branched code back together depends on whether we're // doing full unswitching or not with the exception that we always want to // nuke the initial terminator placed in the split block. SplitBB->getTerminator()->eraseFromParent(); if (FullUnswitch) { // Splice the terminator from the original loop and rewrite its // successors. SplitBB->getInstList().splice(SplitBB->end(), ParentBB->getInstList(), TI); // Keep a clone of the terminator for MSSA updates. Instruction *NewTI = TI.clone(); ParentBB->getInstList().push_back(NewTI); // First wire up the moved terminator to the preheaders. if (BI) { BasicBlock *ClonedPH = ClonedPHs.begin()->second; BI->setSuccessor(ClonedSucc, ClonedPH); BI->setSuccessor(1 - ClonedSucc, LoopPH); DTUpdates.push_back({DominatorTree::Insert, SplitBB, ClonedPH}); } else { assert(SI && "Must either be a branch or switch!"); // Walk the cases and directly update their successors. assert(SI->getDefaultDest() == RetainedSuccBB && "Not retaining default successor!"); SI->setDefaultDest(LoopPH); for (auto &Case : SI->cases()) if (Case.getCaseSuccessor() == RetainedSuccBB) Case.setSuccessor(LoopPH); else Case.setSuccessor(ClonedPHs.find(Case.getCaseSuccessor())->second); // We need to use the set to populate domtree updates as even when there // are multiple cases pointing at the same successor we only want to // remove and insert one edge in the domtree. for (BasicBlock *SuccBB : UnswitchedSuccBBs) DTUpdates.push_back( {DominatorTree::Insert, SplitBB, ClonedPHs.find(SuccBB)->second}); } if (MSSAU) { DT.applyUpdates(DTUpdates); DTUpdates.clear(); // Remove all but one edge to the retained block and all unswitched // blocks. This is to avoid having duplicate entries in the cloned Phis, // when we know we only keep a single edge for each case. MSSAU->removeDuplicatePhiEdgesBetween(ParentBB, RetainedSuccBB); for (BasicBlock *SuccBB : UnswitchedSuccBBs) MSSAU->removeDuplicatePhiEdgesBetween(ParentBB, SuccBB); for (auto &VMap : VMaps) MSSAU->updateForClonedLoop(LBRPO, ExitBlocks, *VMap, /*IgnoreIncomingWithNoClones=*/true); MSSAU->updateExitBlocksForClonedLoop(ExitBlocks, VMaps, DT); // Remove all edges to unswitched blocks. for (BasicBlock *SuccBB : UnswitchedSuccBBs) MSSAU->removeEdge(ParentBB, SuccBB); } // Now unhook the successor relationship as we'll be replacing // the terminator with a direct branch. This is much simpler for branches // than switches so we handle those first. if (BI) { // Remove the parent as a predecessor of the unswitched successor. assert(UnswitchedSuccBBs.size() == 1 && "Only one possible unswitched block for a branch!"); BasicBlock *UnswitchedSuccBB = *UnswitchedSuccBBs.begin(); UnswitchedSuccBB->removePredecessor(ParentBB, /*KeepOneInputPHIs*/ true); DTUpdates.push_back({DominatorTree::Delete, ParentBB, UnswitchedSuccBB}); } else { // Note that we actually want to remove the parent block as a predecessor // of *every* case successor. The case successor is either unswitched, // completely eliminating an edge from the parent to that successor, or it // is a duplicate edge to the retained successor as the retained successor // is always the default successor and as we'll replace this with a direct // branch we no longer need the duplicate entries in the PHI nodes. SwitchInst *NewSI = cast(NewTI); assert(NewSI->getDefaultDest() == RetainedSuccBB && "Not retaining default successor!"); for (auto &Case : NewSI->cases()) Case.getCaseSuccessor()->removePredecessor( ParentBB, /*KeepOneInputPHIs*/ true); // We need to use the set to populate domtree updates as even when there // are multiple cases pointing at the same successor we only want to // remove and insert one edge in the domtree. for (BasicBlock *SuccBB : UnswitchedSuccBBs) DTUpdates.push_back({DominatorTree::Delete, ParentBB, SuccBB}); } // After MSSAU update, remove the cloned terminator instruction NewTI. ParentBB->getTerminator()->eraseFromParent(); // Create a new unconditional branch to the continuing block (as opposed to // the one cloned). BranchInst::Create(RetainedSuccBB, ParentBB); } else { assert(BI && "Only branches have partial unswitching."); assert(UnswitchedSuccBBs.size() == 1 && "Only one possible unswitched block for a branch!"); BasicBlock *ClonedPH = ClonedPHs.begin()->second; // When doing a partial unswitch, we have to do a bit more work to build up // the branch in the split block. if (PartiallyInvariant) buildPartialInvariantUnswitchConditionalBranch( *SplitBB, Invariants, Direction, *ClonedPH, *LoopPH, L, MSSAU); else buildPartialUnswitchConditionalBranch(*SplitBB, Invariants, Direction, *ClonedPH, *LoopPH); DTUpdates.push_back({DominatorTree::Insert, SplitBB, ClonedPH}); if (MSSAU) { DT.applyUpdates(DTUpdates); DTUpdates.clear(); // Perform MSSA cloning updates. for (auto &VMap : VMaps) MSSAU->updateForClonedLoop(LBRPO, ExitBlocks, *VMap, /*IgnoreIncomingWithNoClones=*/true); MSSAU->updateExitBlocksForClonedLoop(ExitBlocks, VMaps, DT); } } // Apply the updates accumulated above to get an up-to-date dominator tree. DT.applyUpdates(DTUpdates); // Now that we have an accurate dominator tree, first delete the dead cloned // blocks so that we can accurately build any cloned loops. It is important to // not delete the blocks from the original loop yet because we still want to // reference the original loop to understand the cloned loop's structure. deleteDeadClonedBlocks(L, ExitBlocks, VMaps, DT, MSSAU); // Build the cloned loop structure itself. This may be substantially // different from the original structure due to the simplified CFG. This also // handles inserting all the cloned blocks into the correct loops. SmallVector NonChildClonedLoops; for (std::unique_ptr &VMap : VMaps) buildClonedLoops(L, ExitBlocks, *VMap, LI, NonChildClonedLoops); // Now that our cloned loops have been built, we can update the original loop. // First we delete the dead blocks from it and then we rebuild the loop // structure taking these deletions into account. - deleteDeadBlocksFromLoop(L, ExitBlocks, DT, LI, MSSAU); + deleteDeadBlocksFromLoop(L, ExitBlocks, DT, LI, MSSAU, DestroyLoopCB); if (MSSAU && VerifyMemorySSA) MSSAU->getMemorySSA()->verifyMemorySSA(); SmallVector HoistedLoops; bool IsStillLoop = rebuildLoopAfterUnswitch(L, ExitBlocks, LI, HoistedLoops); if (MSSAU && VerifyMemorySSA) MSSAU->getMemorySSA()->verifyMemorySSA(); // This transformation has a high risk of corrupting the dominator tree, and // the below steps to rebuild loop structures will result in hard to debug // errors in that case so verify that the dominator tree is sane first. // FIXME: Remove this when the bugs stop showing up and rely on existing // verification steps. assert(DT.verify(DominatorTree::VerificationLevel::Fast)); if (BI && !PartiallyInvariant) { // If we unswitched a branch which collapses the condition to a known // constant we want to replace all the uses of the invariants within both // the original and cloned blocks. We do this here so that we can use the // now updated dominator tree to identify which side the users are on. assert(UnswitchedSuccBBs.size() == 1 && "Only one possible unswitched block for a branch!"); BasicBlock *ClonedPH = ClonedPHs.begin()->second; // When considering multiple partially-unswitched invariants // we cant just go replace them with constants in both branches. // // For 'AND' we infer that true branch ("continue") means true // for each invariant operand. // For 'OR' we can infer that false branch ("continue") means false // for each invariant operand. // So it happens that for multiple-partial case we dont replace // in the unswitched branch. bool ReplaceUnswitched = FullUnswitch || (Invariants.size() == 1) || PartiallyInvariant; ConstantInt *UnswitchedReplacement = Direction ? ConstantInt::getTrue(BI->getContext()) : ConstantInt::getFalse(BI->getContext()); ConstantInt *ContinueReplacement = Direction ? ConstantInt::getFalse(BI->getContext()) : ConstantInt::getTrue(BI->getContext()); for (Value *Invariant : Invariants) // Use make_early_inc_range here as set invalidates the iterator. for (Use &U : llvm::make_early_inc_range(Invariant->uses())) { Instruction *UserI = dyn_cast(U.getUser()); if (!UserI) continue; // Replace it with the 'continue' side if in the main loop body, and the // unswitched if in the cloned blocks. if (DT.dominates(LoopPH, UserI->getParent())) U.set(ContinueReplacement); else if (ReplaceUnswitched && DT.dominates(ClonedPH, UserI->getParent())) U.set(UnswitchedReplacement); } } // We can change which blocks are exit blocks of all the cloned sibling // loops, the current loop, and any parent loops which shared exit blocks // with the current loop. As a consequence, we need to re-form LCSSA for // them. But we shouldn't need to re-form LCSSA for any child loops. // FIXME: This could be made more efficient by tracking which exit blocks are // new, and focusing on them, but that isn't likely to be necessary. // // In order to reasonably rebuild LCSSA we need to walk inside-out across the // loop nest and update every loop that could have had its exits changed. We // also need to cover any intervening loops. We add all of these loops to // a list and sort them by loop depth to achieve this without updating // unnecessary loops. auto UpdateLoop = [&](Loop &UpdateL) { #ifndef NDEBUG UpdateL.verifyLoop(); for (Loop *ChildL : UpdateL) { ChildL->verifyLoop(); assert(ChildL->isRecursivelyLCSSAForm(DT, LI) && "Perturbed a child loop's LCSSA form!"); } #endif // First build LCSSA for this loop so that we can preserve it when // forming dedicated exits. We don't want to perturb some other loop's // LCSSA while doing that CFG edit. formLCSSA(UpdateL, DT, &LI, SE); // For loops reached by this loop's original exit blocks we may // introduced new, non-dedicated exits. At least try to re-form dedicated // exits for these loops. This may fail if they couldn't have dedicated // exits to start with. formDedicatedExitBlocks(&UpdateL, &DT, &LI, MSSAU, /*PreserveLCSSA*/ true); }; // For non-child cloned loops and hoisted loops, we just need to update LCSSA // and we can do it in any order as they don't nest relative to each other. // // Also check if any of the loops we have updated have become top-level loops // as that will necessitate widening the outer loop scope. for (Loop *UpdatedL : llvm::concat(NonChildClonedLoops, HoistedLoops)) { UpdateLoop(*UpdatedL); if (UpdatedL->isOutermost()) OuterExitL = nullptr; } if (IsStillLoop) { UpdateLoop(L); if (L.isOutermost()) OuterExitL = nullptr; } // If the original loop had exit blocks, walk up through the outer most loop // of those exit blocks to update LCSSA and form updated dedicated exits. if (OuterExitL != &L) for (Loop *OuterL = ParentL; OuterL != OuterExitL; OuterL = OuterL->getParentLoop()) UpdateLoop(*OuterL); #ifndef NDEBUG // Verify the entire loop structure to catch any incorrect updates before we // progress in the pass pipeline. LI.verify(DT); #endif // Now that we've unswitched something, make callbacks to report the changes. // For that we need to merge together the updated loops and the cloned loops // and check whether the original loop survived. SmallVector SibLoops; for (Loop *UpdatedL : llvm::concat(NonChildClonedLoops, HoistedLoops)) if (UpdatedL->getParentLoop() == ParentL) SibLoops.push_back(UpdatedL); UnswitchCB(IsStillLoop, PartiallyInvariant, SibLoops); if (MSSAU && VerifyMemorySSA) MSSAU->getMemorySSA()->verifyMemorySSA(); if (BI) ++NumBranches; else ++NumSwitches; } /// Recursively compute the cost of a dominator subtree based on the per-block /// cost map provided. /// /// The recursive computation is memozied into the provided DT-indexed cost map /// to allow querying it for most nodes in the domtree without it becoming /// quadratic. static InstructionCost computeDomSubtreeCost( DomTreeNode &N, const SmallDenseMap &BBCostMap, SmallDenseMap &DTCostMap) { // Don't accumulate cost (or recurse through) blocks not in our block cost // map and thus not part of the duplication cost being considered. auto BBCostIt = BBCostMap.find(N.getBlock()); if (BBCostIt == BBCostMap.end()) return 0; // Lookup this node to see if we already computed its cost. auto DTCostIt = DTCostMap.find(&N); if (DTCostIt != DTCostMap.end()) return DTCostIt->second; // If not, we have to compute it. We can't use insert above and update // because computing the cost may insert more things into the map. InstructionCost Cost = std::accumulate( N.begin(), N.end(), BBCostIt->second, [&](InstructionCost Sum, DomTreeNode *ChildN) -> InstructionCost { return Sum + computeDomSubtreeCost(*ChildN, BBCostMap, DTCostMap); }); bool Inserted = DTCostMap.insert({&N, Cost}).second; (void)Inserted; assert(Inserted && "Should not insert a node while visiting children!"); return Cost; } /// Turns a llvm.experimental.guard intrinsic into implicit control flow branch, /// making the following replacement: /// /// --code before guard-- /// call void (i1, ...) @llvm.experimental.guard(i1 %cond) [ "deopt"() ] /// --code after guard-- /// /// into /// /// --code before guard-- /// br i1 %cond, label %guarded, label %deopt /// /// guarded: /// --code after guard-- /// /// deopt: /// call void (i1, ...) @llvm.experimental.guard(i1 false) [ "deopt"() ] /// unreachable /// /// It also makes all relevant DT and LI updates, so that all structures are in /// valid state after this transform. static BranchInst * turnGuardIntoBranch(IntrinsicInst *GI, Loop &L, SmallVectorImpl &ExitBlocks, DominatorTree &DT, LoopInfo &LI, MemorySSAUpdater *MSSAU) { SmallVector DTUpdates; LLVM_DEBUG(dbgs() << "Turning " << *GI << " into a branch.\n"); BasicBlock *CheckBB = GI->getParent(); if (MSSAU && VerifyMemorySSA) MSSAU->getMemorySSA()->verifyMemorySSA(); // Remove all CheckBB's successors from DomTree. A block can be seen among // successors more than once, but for DomTree it should be added only once. SmallPtrSet Successors; for (auto *Succ : successors(CheckBB)) if (Successors.insert(Succ).second) DTUpdates.push_back({DominatorTree::Delete, CheckBB, Succ}); Instruction *DeoptBlockTerm = SplitBlockAndInsertIfThen(GI->getArgOperand(0), GI, true); BranchInst *CheckBI = cast(CheckBB->getTerminator()); // SplitBlockAndInsertIfThen inserts control flow that branches to // DeoptBlockTerm if the condition is true. We want the opposite. CheckBI->swapSuccessors(); BasicBlock *GuardedBlock = CheckBI->getSuccessor(0); GuardedBlock->setName("guarded"); CheckBI->getSuccessor(1)->setName("deopt"); BasicBlock *DeoptBlock = CheckBI->getSuccessor(1); // We now have a new exit block. ExitBlocks.push_back(CheckBI->getSuccessor(1)); if (MSSAU) MSSAU->moveAllAfterSpliceBlocks(CheckBB, GuardedBlock, GI); GI->moveBefore(DeoptBlockTerm); GI->setArgOperand(0, ConstantInt::getFalse(GI->getContext())); // Add new successors of CheckBB into DomTree. for (auto *Succ : successors(CheckBB)) DTUpdates.push_back({DominatorTree::Insert, CheckBB, Succ}); // Now the blocks that used to be CheckBB's successors are GuardedBlock's // successors. for (auto *Succ : Successors) DTUpdates.push_back({DominatorTree::Insert, GuardedBlock, Succ}); // Make proper changes to DT. DT.applyUpdates(DTUpdates); // Inform LI of a new loop block. L.addBasicBlockToLoop(GuardedBlock, LI); if (MSSAU) { MemoryDef *MD = cast(MSSAU->getMemorySSA()->getMemoryAccess(GI)); MSSAU->moveToPlace(MD, DeoptBlock, MemorySSA::BeforeTerminator); if (VerifyMemorySSA) MSSAU->getMemorySSA()->verifyMemorySSA(); } ++NumGuards; return CheckBI; } /// Cost multiplier is a way to limit potentially exponential behavior /// of loop-unswitch. Cost is multipied in proportion of 2^number of unswitch /// candidates available. Also accounting for the number of "sibling" loops with /// the idea to account for previous unswitches that already happened on this /// cluster of loops. There was an attempt to keep this formula simple, /// just enough to limit the worst case behavior. Even if it is not that simple /// now it is still not an attempt to provide a detailed heuristic size /// prediction. /// /// TODO: Make a proper accounting of "explosion" effect for all kinds of /// unswitch candidates, making adequate predictions instead of wild guesses. /// That requires knowing not just the number of "remaining" candidates but /// also costs of unswitching for each of these candidates. static int CalculateUnswitchCostMultiplier( Instruction &TI, Loop &L, LoopInfo &LI, DominatorTree &DT, ArrayRef>> UnswitchCandidates) { // Guards and other exiting conditions do not contribute to exponential // explosion as soon as they dominate the latch (otherwise there might be // another path to the latch remaining that does not allow to eliminate the // loop copy on unswitch). BasicBlock *Latch = L.getLoopLatch(); BasicBlock *CondBlock = TI.getParent(); if (DT.dominates(CondBlock, Latch) && (isGuard(&TI) || llvm::count_if(successors(&TI), [&L](BasicBlock *SuccBB) { return L.contains(SuccBB); }) <= 1)) { NumCostMultiplierSkipped++; return 1; } auto *ParentL = L.getParentLoop(); int SiblingsCount = (ParentL ? ParentL->getSubLoopsVector().size() : std::distance(LI.begin(), LI.end())); // Count amount of clones that all the candidates might cause during // unswitching. Branch/guard counts as 1, switch counts as log2 of its cases. int UnswitchedClones = 0; for (auto Candidate : UnswitchCandidates) { Instruction *CI = Candidate.first; BasicBlock *CondBlock = CI->getParent(); bool SkipExitingSuccessors = DT.dominates(CondBlock, Latch); if (isGuard(CI)) { if (!SkipExitingSuccessors) UnswitchedClones++; continue; } int NonExitingSuccessors = llvm::count_if( successors(CondBlock), [SkipExitingSuccessors, &L](BasicBlock *SuccBB) { return !SkipExitingSuccessors || L.contains(SuccBB); }); UnswitchedClones += Log2_32(NonExitingSuccessors); } // Ignore up to the "unscaled candidates" number of unswitch candidates // when calculating the power-of-two scaling of the cost. The main idea // with this control is to allow a small number of unswitches to happen // and rely more on siblings multiplier (see below) when the number // of candidates is small. unsigned ClonesPower = std::max(UnswitchedClones - (int)UnswitchNumInitialUnscaledCandidates, 0); // Allowing top-level loops to spread a bit more than nested ones. int SiblingsMultiplier = std::max((ParentL ? SiblingsCount : SiblingsCount / (int)UnswitchSiblingsToplevelDiv), 1); // Compute the cost multiplier in a way that won't overflow by saturating // at an upper bound. int CostMultiplier; if (ClonesPower > Log2_32(UnswitchThreshold) || SiblingsMultiplier > UnswitchThreshold) CostMultiplier = UnswitchThreshold; else CostMultiplier = std::min(SiblingsMultiplier * (1 << ClonesPower), (int)UnswitchThreshold); LLVM_DEBUG(dbgs() << " Computed multiplier " << CostMultiplier << " (siblings " << SiblingsMultiplier << " * clones " << (1 << ClonesPower) << ")" << " for unswitch candidate: " << TI << "\n"); return CostMultiplier; } static bool unswitchBestCondition( Loop &L, DominatorTree &DT, LoopInfo &LI, AssumptionCache &AC, AAResults &AA, TargetTransformInfo &TTI, function_ref)> UnswitchCB, - ScalarEvolution *SE, MemorySSAUpdater *MSSAU) { + ScalarEvolution *SE, MemorySSAUpdater *MSSAU, + function_ref DestroyLoopCB) { // Collect all invariant conditions within this loop (as opposed to an inner // loop which would be handled when visiting that inner loop). SmallVector>, 4> UnswitchCandidates; // Whether or not we should also collect guards in the loop. bool CollectGuards = false; if (UnswitchGuards) { auto *GuardDecl = L.getHeader()->getParent()->getParent()->getFunction( Intrinsic::getName(Intrinsic::experimental_guard)); if (GuardDecl && !GuardDecl->use_empty()) CollectGuards = true; } IVConditionInfo PartialIVInfo; for (auto *BB : L.blocks()) { if (LI.getLoopFor(BB) != &L) continue; if (CollectGuards) for (auto &I : *BB) if (isGuard(&I)) { auto *Cond = cast(&I)->getArgOperand(0); // TODO: Support AND, OR conditions and partial unswitching. if (!isa(Cond) && L.isLoopInvariant(Cond)) UnswitchCandidates.push_back({&I, {Cond}}); } if (auto *SI = dyn_cast(BB->getTerminator())) { // We can only consider fully loop-invariant switch conditions as we need // to completely eliminate the switch after unswitching. if (!isa(SI->getCondition()) && L.isLoopInvariant(SI->getCondition()) && !BB->getUniqueSuccessor()) UnswitchCandidates.push_back({SI, {SI->getCondition()}}); continue; } auto *BI = dyn_cast(BB->getTerminator()); if (!BI || !BI->isConditional() || isa(BI->getCondition()) || BI->getSuccessor(0) == BI->getSuccessor(1)) continue; // If BI's condition is 'select _, true, false', simplify it to confuse // matchers Value *Cond = BI->getCondition(), *CondNext; while (match(Cond, m_Select(m_Value(CondNext), m_One(), m_Zero()))) Cond = CondNext; BI->setCondition(Cond); if (L.isLoopInvariant(BI->getCondition())) { UnswitchCandidates.push_back({BI, {BI->getCondition()}}); continue; } Instruction &CondI = *cast(BI->getCondition()); if (match(&CondI, m_CombineOr(m_LogicalAnd(), m_LogicalOr()))) { TinyPtrVector Invariants = collectHomogenousInstGraphLoopInvariants(L, CondI, LI); if (Invariants.empty()) continue; UnswitchCandidates.push_back({BI, std::move(Invariants)}); continue; } } Instruction *PartialIVCondBranch = nullptr; if (MSSAU && !findOptionMDForLoop(&L, "llvm.loop.unswitch.partial.disable") && !any_of(UnswitchCandidates, [&L](auto &TerminatorAndInvariants) { return TerminatorAndInvariants.first == L.getHeader()->getTerminator(); })) { MemorySSA *MSSA = MSSAU->getMemorySSA(); if (auto Info = hasPartialIVCondition(L, MSSAThreshold, *MSSA, AA)) { LLVM_DEBUG( dbgs() << "simple-loop-unswitch: Found partially invariant condition " << *Info->InstToDuplicate[0] << "\n"); PartialIVInfo = *Info; PartialIVCondBranch = L.getHeader()->getTerminator(); TinyPtrVector ValsToDuplicate; for (auto *Inst : Info->InstToDuplicate) ValsToDuplicate.push_back(Inst); UnswitchCandidates.push_back( {L.getHeader()->getTerminator(), std::move(ValsToDuplicate)}); } } // If we didn't find any candidates, we're done. if (UnswitchCandidates.empty()) return false; // Check if there are irreducible CFG cycles in this loop. If so, we cannot // easily unswitch non-trivial edges out of the loop. Doing so might turn the // irreducible control flow into reducible control flow and introduce new // loops "out of thin air". If we ever discover important use cases for doing // this, we can add support to loop unswitch, but it is a lot of complexity // for what seems little or no real world benefit. LoopBlocksRPO RPOT(&L); RPOT.perform(&LI); if (containsIrreducibleCFG(RPOT, LI)) return false; SmallVector ExitBlocks; L.getUniqueExitBlocks(ExitBlocks); // We cannot unswitch if exit blocks contain a cleanuppad/catchswitch // instruction as we don't know how to split those exit blocks. // FIXME: We should teach SplitBlock to handle this and remove this // restriction. for (auto *ExitBB : ExitBlocks) { auto *I = ExitBB->getFirstNonPHI(); if (isa(I) || isa(I)) { LLVM_DEBUG(dbgs() << "Cannot unswitch because of cleanuppad/catchswitch " "in exit block\n"); return false; } } LLVM_DEBUG( dbgs() << "Considering " << UnswitchCandidates.size() << " non-trivial loop invariant conditions for unswitching.\n"); // Given that unswitching these terminators will require duplicating parts of // the loop, so we need to be able to model that cost. Compute the ephemeral // values and set up a data structure to hold per-BB costs. We cache each // block's cost so that we don't recompute this when considering different // subsets of the loop for duplication during unswitching. SmallPtrSet EphValues; CodeMetrics::collectEphemeralValues(&L, &AC, EphValues); SmallDenseMap BBCostMap; // Compute the cost of each block, as well as the total loop cost. Also, bail // out if we see instructions which are incompatible with loop unswitching // (convergent, noduplicate, or cross-basic-block tokens). // FIXME: We might be able to safely handle some of these in non-duplicated // regions. TargetTransformInfo::TargetCostKind CostKind = L.getHeader()->getParent()->hasMinSize() ? TargetTransformInfo::TCK_CodeSize : TargetTransformInfo::TCK_SizeAndLatency; InstructionCost LoopCost = 0; for (auto *BB : L.blocks()) { InstructionCost Cost = 0; for (auto &I : *BB) { if (EphValues.count(&I)) continue; if (I.getType()->isTokenTy() && I.isUsedOutsideOfBlock(BB)) return false; if (auto *CB = dyn_cast(&I)) if (CB->isConvergent() || CB->cannotDuplicate()) return false; Cost += TTI.getUserCost(&I, CostKind); } assert(Cost >= 0 && "Must not have negative costs!"); LoopCost += Cost; assert(LoopCost >= 0 && "Must not have negative loop costs!"); BBCostMap[BB] = Cost; } LLVM_DEBUG(dbgs() << " Total loop cost: " << LoopCost << "\n"); // Now we find the best candidate by searching for the one with the following // properties in order: // // 1) An unswitching cost below the threshold // 2) The smallest number of duplicated unswitch candidates (to avoid // creating redundant subsequent unswitching) // 3) The smallest cost after unswitching. // // We prioritize reducing fanout of unswitch candidates provided the cost // remains below the threshold because this has a multiplicative effect. // // This requires memoizing each dominator subtree to avoid redundant work. // // FIXME: Need to actually do the number of candidates part above. SmallDenseMap DTCostMap; // Given a terminator which might be unswitched, computes the non-duplicated // cost for that terminator. auto ComputeUnswitchedCost = [&](Instruction &TI, bool FullUnswitch) -> InstructionCost { BasicBlock &BB = *TI.getParent(); SmallPtrSet Visited; InstructionCost Cost = 0; for (BasicBlock *SuccBB : successors(&BB)) { // Don't count successors more than once. if (!Visited.insert(SuccBB).second) continue; // If this is a partial unswitch candidate, then it must be a conditional // branch with a condition of either `or`, `and`, their corresponding // select forms or partially invariant instructions. In that case, one of // the successors is necessarily duplicated, so don't even try to remove // its cost. if (!FullUnswitch) { auto &BI = cast(TI); if (match(BI.getCondition(), m_LogicalAnd())) { if (SuccBB == BI.getSuccessor(1)) continue; } else if (match(BI.getCondition(), m_LogicalOr())) { if (SuccBB == BI.getSuccessor(0)) continue; } else if ((PartialIVInfo.KnownValue->isOneValue() && SuccBB == BI.getSuccessor(0)) || (!PartialIVInfo.KnownValue->isOneValue() && SuccBB == BI.getSuccessor(1))) continue; } // This successor's domtree will not need to be duplicated after // unswitching if the edge to the successor dominates it (and thus the // entire tree). This essentially means there is no other path into this // subtree and so it will end up live in only one clone of the loop. if (SuccBB->getUniquePredecessor() || llvm::all_of(predecessors(SuccBB), [&](BasicBlock *PredBB) { return PredBB == &BB || DT.dominates(SuccBB, PredBB); })) { Cost += computeDomSubtreeCost(*DT[SuccBB], BBCostMap, DTCostMap); assert(Cost <= LoopCost && "Non-duplicated cost should never exceed total loop cost!"); } } // Now scale the cost by the number of unique successors minus one. We // subtract one because there is already at least one copy of the entire // loop. This is computing the new cost of unswitching a condition. // Note that guards always have 2 unique successors that are implicit and // will be materialized if we decide to unswitch it. int SuccessorsCount = isGuard(&TI) ? 2 : Visited.size(); assert(SuccessorsCount > 1 && "Cannot unswitch a condition without multiple distinct successors!"); return (LoopCost - Cost) * (SuccessorsCount - 1); }; Instruction *BestUnswitchTI = nullptr; InstructionCost BestUnswitchCost = 0; ArrayRef BestUnswitchInvariants; for (auto &TerminatorAndInvariants : UnswitchCandidates) { Instruction &TI = *TerminatorAndInvariants.first; ArrayRef Invariants = TerminatorAndInvariants.second; BranchInst *BI = dyn_cast(&TI); InstructionCost CandidateCost = ComputeUnswitchedCost( TI, /*FullUnswitch*/ !BI || (Invariants.size() == 1 && Invariants[0] == BI->getCondition())); // Calculate cost multiplier which is a tool to limit potentially // exponential behavior of loop-unswitch. if (EnableUnswitchCostMultiplier) { int CostMultiplier = CalculateUnswitchCostMultiplier(TI, L, LI, DT, UnswitchCandidates); assert( (CostMultiplier > 0 && CostMultiplier <= UnswitchThreshold) && "cost multiplier needs to be in the range of 1..UnswitchThreshold"); CandidateCost *= CostMultiplier; LLVM_DEBUG(dbgs() << " Computed cost of " << CandidateCost << " (multiplier: " << CostMultiplier << ")" << " for unswitch candidate: " << TI << "\n"); } else { LLVM_DEBUG(dbgs() << " Computed cost of " << CandidateCost << " for unswitch candidate: " << TI << "\n"); } if (!BestUnswitchTI || CandidateCost < BestUnswitchCost) { BestUnswitchTI = &TI; BestUnswitchCost = CandidateCost; BestUnswitchInvariants = Invariants; } } assert(BestUnswitchTI && "Failed to find loop unswitch candidate"); if (BestUnswitchCost >= UnswitchThreshold) { LLVM_DEBUG(dbgs() << "Cannot unswitch, lowest cost found: " << BestUnswitchCost << "\n"); return false; } if (BestUnswitchTI != PartialIVCondBranch) PartialIVInfo.InstToDuplicate.clear(); // If the best candidate is a guard, turn it into a branch. if (isGuard(BestUnswitchTI)) BestUnswitchTI = turnGuardIntoBranch(cast(BestUnswitchTI), L, ExitBlocks, DT, LI, MSSAU); LLVM_DEBUG(dbgs() << " Unswitching non-trivial (cost = " << BestUnswitchCost << ") terminator: " << *BestUnswitchTI << "\n"); unswitchNontrivialInvariants(L, *BestUnswitchTI, BestUnswitchInvariants, ExitBlocks, PartialIVInfo, DT, LI, AC, - UnswitchCB, SE, MSSAU); + UnswitchCB, SE, MSSAU, DestroyLoopCB); return true; } /// Unswitch control flow predicated on loop invariant conditions. /// /// This first hoists all branches or switches which are trivial (IE, do not /// require duplicating any part of the loop) out of the loop body. It then /// looks at other loop invariant control flows and tries to unswitch those as /// well by cloning the loop if the result is small enough. /// /// The `DT`, `LI`, `AC`, `AA`, `TTI` parameters are required analyses that are /// also updated based on the unswitch. The `MSSA` analysis is also updated if /// valid (i.e. its use is enabled). /// /// If either `NonTrivial` is true or the flag `EnableNonTrivialUnswitch` is /// true, we will attempt to do non-trivial unswitching as well as trivial /// unswitching. /// /// The `UnswitchCB` callback provided will be run after unswitching is /// complete, with the first parameter set to `true` if the provided loop /// remains a loop, and a list of new sibling loops created. /// /// If `SE` is non-null, we will update that analysis based on the unswitching /// done. static bool unswitchLoop(Loop &L, DominatorTree &DT, LoopInfo &LI, AssumptionCache &AC, AAResults &AA, TargetTransformInfo &TTI, bool Trivial, bool NonTrivial, function_ref)> UnswitchCB, - ScalarEvolution *SE, MemorySSAUpdater *MSSAU) { + ScalarEvolution *SE, MemorySSAUpdater *MSSAU, + function_ref DestroyLoopCB) { assert(L.isRecursivelyLCSSAForm(DT, LI) && "Loops must be in LCSSA form before unswitching."); // Must be in loop simplified form: we need a preheader and dedicated exits. if (!L.isLoopSimplifyForm()) return false; // Try trivial unswitch first before loop over other basic blocks in the loop. if (Trivial && unswitchAllTrivialConditions(L, DT, LI, SE, MSSAU)) { // If we unswitched successfully we will want to clean up the loop before // processing it further so just mark it as unswitched and return. UnswitchCB(/*CurrentLoopValid*/ true, false, {}); return true; } // Check whether we should continue with non-trivial conditions. // EnableNonTrivialUnswitch: Global variable that forces non-trivial // unswitching for testing and debugging. // NonTrivial: Parameter that enables non-trivial unswitching for this // invocation of the transform. But this should be allowed only // for targets without branch divergence. // // FIXME: If divergence analysis becomes available to a loop // transform, we should allow unswitching for non-trivial uniform // branches even on targets that have divergence. // https://bugs.llvm.org/show_bug.cgi?id=48819 bool ContinueWithNonTrivial = EnableNonTrivialUnswitch || (NonTrivial && !TTI.hasBranchDivergence()); if (!ContinueWithNonTrivial) return false; // Skip non-trivial unswitching for optsize functions. if (L.getHeader()->getParent()->hasOptSize()) return false; // Skip non-trivial unswitching for loops that cannot be cloned. if (!L.isSafeToClone()) return false; // For non-trivial unswitching, because it often creates new loops, we rely on // the pass manager to iterate on the loops rather than trying to immediately // reach a fixed point. There is no substantial advantage to iterating // internally, and if any of the new loops are simplified enough to contain // trivial unswitching we want to prefer those. // Try to unswitch the best invariant condition. We prefer this full unswitch to // a partial unswitch when possible below the threshold. - if (unswitchBestCondition(L, DT, LI, AC, AA, TTI, UnswitchCB, SE, MSSAU)) + if (unswitchBestCondition(L, DT, LI, AC, AA, TTI, UnswitchCB, SE, MSSAU, + DestroyLoopCB)) return true; // No other opportunities to unswitch. return false; } PreservedAnalyses SimpleLoopUnswitchPass::run(Loop &L, LoopAnalysisManager &AM, LoopStandardAnalysisResults &AR, LPMUpdater &U) { Function &F = *L.getHeader()->getParent(); (void)F; LLVM_DEBUG(dbgs() << "Unswitching loop in " << F.getName() << ": " << L << "\n"); // Save the current loop name in a variable so that we can report it even // after it has been deleted. std::string LoopName = std::string(L.getName()); auto UnswitchCB = [&L, &U, &LoopName](bool CurrentLoopValid, bool PartiallyInvariant, ArrayRef NewLoops) { // If we did a non-trivial unswitch, we have added new (cloned) loops. if (!NewLoops.empty()) U.addSiblingLoops(NewLoops); // If the current loop remains valid, we should revisit it to catch any // other unswitch opportunities. Otherwise, we need to mark it as deleted. if (CurrentLoopValid) { if (PartiallyInvariant) { // Mark the new loop as partially unswitched, to avoid unswitching on // the same condition again. auto &Context = L.getHeader()->getContext(); MDNode *DisableUnswitchMD = MDNode::get( Context, MDString::get(Context, "llvm.loop.unswitch.partial.disable")); MDNode *NewLoopID = makePostTransformationMetadata( Context, L.getLoopID(), {"llvm.loop.unswitch.partial"}, {DisableUnswitchMD}); L.setLoopID(NewLoopID); } else U.revisitCurrentLoop(); } else U.markLoopAsDeleted(L, LoopName); }; + auto DestroyLoopCB = [&U](Loop &L, StringRef Name) { + U.markLoopAsDeleted(L, Name); + }; + Optional MSSAU; if (AR.MSSA) { MSSAU = MemorySSAUpdater(AR.MSSA); if (VerifyMemorySSA) AR.MSSA->verifyMemorySSA(); } if (!unswitchLoop(L, AR.DT, AR.LI, AR.AC, AR.AA, AR.TTI, Trivial, NonTrivial, UnswitchCB, &AR.SE, - MSSAU.hasValue() ? MSSAU.getPointer() : nullptr)) + MSSAU.hasValue() ? MSSAU.getPointer() : nullptr, + DestroyLoopCB)) return PreservedAnalyses::all(); if (AR.MSSA && VerifyMemorySSA) AR.MSSA->verifyMemorySSA(); // Historically this pass has had issues with the dominator tree so verify it // in asserts builds. assert(AR.DT.verify(DominatorTree::VerificationLevel::Fast)); auto PA = getLoopPassPreservedAnalyses(); if (AR.MSSA) PA.preserve(); return PA; } namespace { class SimpleLoopUnswitchLegacyPass : public LoopPass { bool NonTrivial; public: static char ID; // Pass ID, replacement for typeid explicit SimpleLoopUnswitchLegacyPass(bool NonTrivial = false) : LoopPass(ID), NonTrivial(NonTrivial) { initializeSimpleLoopUnswitchLegacyPassPass( *PassRegistry::getPassRegistry()); } bool runOnLoop(Loop *L, LPPassManager &LPM) override; void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired(); AU.addRequired(); if (EnableMSSALoopDependency) { AU.addRequired(); AU.addPreserved(); } getLoopAnalysisUsage(AU); } }; } // end anonymous namespace bool SimpleLoopUnswitchLegacyPass::runOnLoop(Loop *L, LPPassManager &LPM) { if (skipLoop(L)) return false; Function &F = *L->getHeader()->getParent(); LLVM_DEBUG(dbgs() << "Unswitching loop in " << F.getName() << ": " << *L << "\n"); auto &DT = getAnalysis().getDomTree(); auto &LI = getAnalysis().getLoopInfo(); auto &AC = getAnalysis().getAssumptionCache(F); auto &AA = getAnalysis().getAAResults(); auto &TTI = getAnalysis().getTTI(F); MemorySSA *MSSA = nullptr; Optional MSSAU; if (EnableMSSALoopDependency) { MSSA = &getAnalysis().getMSSA(); MSSAU = MemorySSAUpdater(MSSA); } auto *SEWP = getAnalysisIfAvailable(); auto *SE = SEWP ? &SEWP->getSE() : nullptr; auto UnswitchCB = [&L, &LPM](bool CurrentLoopValid, bool PartiallyInvariant, ArrayRef NewLoops) { // If we did a non-trivial unswitch, we have added new (cloned) loops. for (auto *NewL : NewLoops) LPM.addLoop(*NewL); // If the current loop remains valid, re-add it to the queue. This is // a little wasteful as we'll finish processing the current loop as well, // but it is the best we can do in the old PM. if (CurrentLoopValid) { // If the current loop has been unswitched using a partially invariant // condition, we should not re-add the current loop to avoid unswitching // on the same condition again. if (!PartiallyInvariant) LPM.addLoop(*L); } else LPM.markLoopAsDeleted(*L); }; + auto DestroyLoopCB = [&LPM](Loop &L, StringRef /* Name */) { + LPM.markLoopAsDeleted(L); + }; + if (MSSA && VerifyMemorySSA) MSSA->verifyMemorySSA(); bool Changed = unswitchLoop(*L, DT, LI, AC, AA, TTI, true, NonTrivial, UnswitchCB, SE, - MSSAU.hasValue() ? MSSAU.getPointer() : nullptr); + MSSAU.hasValue() ? MSSAU.getPointer() : nullptr, + DestroyLoopCB); if (MSSA && VerifyMemorySSA) MSSA->verifyMemorySSA(); // Historically this pass has had issues with the dominator tree so verify it // in asserts builds. assert(DT.verify(DominatorTree::VerificationLevel::Fast)); return Changed; } char SimpleLoopUnswitchLegacyPass::ID = 0; INITIALIZE_PASS_BEGIN(SimpleLoopUnswitchLegacyPass, "simple-loop-unswitch", "Simple unswitch loops", false, false) INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopPass) INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass) INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) INITIALIZE_PASS_END(SimpleLoopUnswitchLegacyPass, "simple-loop-unswitch", "Simple unswitch loops", false, false) Pass *llvm::createSimpleLoopUnswitchLegacyPass(bool NonTrivial) { return new SimpleLoopUnswitchLegacyPass(NonTrivial); } diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp index 583bb379488e..d86ecbb6db00 100644 --- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp @@ -1,6662 +1,6680 @@ //===- SimplifyCFG.cpp - Code to perform CFG simplification ---------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // Peephole optimize the CFG. // //===----------------------------------------------------------------------===// #include "llvm/ADT/APInt.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/MapVector.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/ScopeExit.h" #include "llvm/ADT/Sequence.h" #include "llvm/ADT/SetOperations.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/ADT/StringRef.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/ConstantFolding.h" #include "llvm/Analysis/EHPersonalities.h" #include "llvm/Analysis/GuardUtils.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/MemorySSA.h" #include "llvm/Analysis/MemorySSAUpdater.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/Attributes.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/CFG.h" #include "llvm/IR/Constant.h" #include "llvm/IR/ConstantRange.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Function.h" #include "llvm/IR/GlobalValue.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/InstrTypes.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/MDBuilder.h" #include "llvm/IR/Metadata.h" #include "llvm/IR/Module.h" #include "llvm/IR/NoFolder.h" #include "llvm/IR/Operator.h" #include "llvm/IR/PatternMatch.h" #include "llvm/IR/PseudoProbe.h" #include "llvm/IR/Type.h" #include "llvm/IR/Use.h" #include "llvm/IR/User.h" #include "llvm/IR/Value.h" #include "llvm/IR/ValueHandle.h" #include "llvm/Support/BranchProbability.h" #include "llvm/Support/Casting.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/KnownBits.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/SSAUpdater.h" #include "llvm/Transforms/Utils/ValueMapper.h" #include #include #include #include #include #include #include #include #include #include #include using namespace llvm; using namespace PatternMatch; #define DEBUG_TYPE "simplifycfg" cl::opt llvm::RequireAndPreserveDomTree( "simplifycfg-require-and-preserve-domtree", cl::Hidden, cl::ZeroOrMore, cl::init(false), cl::desc("Temorary development switch used to gradually uplift SimplifyCFG " "into preserving DomTree,")); // Chosen as 2 so as to be cheap, but still to have enough power to fold // a select, so the "clamp" idiom (of a min followed by a max) will be caught. // To catch this, we need to fold a compare and a select, hence '2' being the // minimum reasonable default. static cl::opt PHINodeFoldingThreshold( "phi-node-folding-threshold", cl::Hidden, cl::init(2), cl::desc( "Control the amount of phi node folding to perform (default = 2)")); static cl::opt TwoEntryPHINodeFoldingThreshold( "two-entry-phi-node-folding-threshold", cl::Hidden, cl::init(4), cl::desc("Control the maximal total instruction cost that we are willing " "to speculatively execute to fold a 2-entry PHI node into a " "select (default = 4)")); static cl::opt HoistCommon("simplifycfg-hoist-common", cl::Hidden, cl::init(true), cl::desc("Hoist common instructions up to the parent block")); static cl::opt SinkCommon("simplifycfg-sink-common", cl::Hidden, cl::init(true), cl::desc("Sink common instructions down to the end block")); static cl::opt HoistCondStores( "simplifycfg-hoist-cond-stores", cl::Hidden, cl::init(true), cl::desc("Hoist conditional stores if an unconditional store precedes")); static cl::opt MergeCondStores( "simplifycfg-merge-cond-stores", cl::Hidden, cl::init(true), cl::desc("Hoist conditional stores even if an unconditional store does not " "precede - hoist multiple conditional stores into a single " "predicated store")); static cl::opt MergeCondStoresAggressively( "simplifycfg-merge-cond-stores-aggressively", cl::Hidden, cl::init(false), cl::desc("When merging conditional stores, do so even if the resultant " "basic blocks are unlikely to be if-converted as a result")); static cl::opt SpeculateOneExpensiveInst( "speculate-one-expensive-inst", cl::Hidden, cl::init(true), cl::desc("Allow exactly one expensive instruction to be speculatively " "executed")); static cl::opt MaxSpeculationDepth( "max-speculation-depth", cl::Hidden, cl::init(10), cl::desc("Limit maximum recursion depth when calculating costs of " "speculatively executed instructions")); static cl::opt MaxSmallBlockSize("simplifycfg-max-small-block-size", cl::Hidden, cl::init(10), cl::desc("Max size of a block which is still considered " "small enough to thread through")); // Two is chosen to allow one negation and a logical combine. static cl::opt BranchFoldThreshold("simplifycfg-branch-fold-threshold", cl::Hidden, cl::init(2), cl::desc("Maximum cost of combining conditions when " "folding branches")); STATISTIC(NumBitMaps, "Number of switch instructions turned into bitmaps"); STATISTIC(NumLinearMaps, "Number of switch instructions turned into linear mapping"); STATISTIC(NumLookupTables, "Number of switch instructions turned into lookup tables"); STATISTIC( NumLookupTablesHoles, "Number of switch instructions turned into lookup tables (holes checked)"); STATISTIC(NumTableCmpReuses, "Number of reused switch table lookup compares"); STATISTIC(NumFoldValueComparisonIntoPredecessors, "Number of value comparisons folded into predecessor basic blocks"); STATISTIC(NumFoldBranchToCommonDest, "Number of branches folded into predecessor basic block"); STATISTIC( NumHoistCommonCode, "Number of common instruction 'blocks' hoisted up to the begin block"); STATISTIC(NumHoistCommonInstrs, "Number of common instructions hoisted up to the begin block"); STATISTIC(NumSinkCommonCode, "Number of common instruction 'blocks' sunk down to the end block"); STATISTIC(NumSinkCommonInstrs, "Number of common instructions sunk down to the end block"); STATISTIC(NumSpeculations, "Number of speculative executed instructions"); STATISTIC(NumInvokes, "Number of invokes with empty resume blocks simplified into calls"); namespace { // The first field contains the value that the switch produces when a certain // case group is selected, and the second field is a vector containing the // cases composing the case group. using SwitchCaseResultVectorTy = SmallVector>, 2>; // The first field contains the phi node that generates a result of the switch // and the second field contains the value generated for a certain case in the // switch for that PHI. using SwitchCaseResultsTy = SmallVector, 4>; /// ValueEqualityComparisonCase - Represents a case of a switch. struct ValueEqualityComparisonCase { ConstantInt *Value; BasicBlock *Dest; ValueEqualityComparisonCase(ConstantInt *Value, BasicBlock *Dest) : Value(Value), Dest(Dest) {} bool operator<(ValueEqualityComparisonCase RHS) const { // Comparing pointers is ok as we only rely on the order for uniquing. return Value < RHS.Value; } bool operator==(BasicBlock *RHSDest) const { return Dest == RHSDest; } }; class SimplifyCFGOpt { const TargetTransformInfo &TTI; DomTreeUpdater *DTU; const DataLayout &DL; ArrayRef LoopHeaders; const SimplifyCFGOptions &Options; bool Resimplify; Value *isValueEqualityComparison(Instruction *TI); BasicBlock *GetValueEqualityComparisonCases( Instruction *TI, std::vector &Cases); bool SimplifyEqualityComparisonWithOnlyPredecessor(Instruction *TI, BasicBlock *Pred, IRBuilder<> &Builder); bool PerformValueComparisonIntoPredecessorFolding(Instruction *TI, Value *&CV, Instruction *PTI, IRBuilder<> &Builder); bool FoldValueComparisonIntoPredecessors(Instruction *TI, IRBuilder<> &Builder); bool simplifyResume(ResumeInst *RI, IRBuilder<> &Builder); bool simplifySingleResume(ResumeInst *RI); bool simplifyCommonResume(ResumeInst *RI); bool simplifyCleanupReturn(CleanupReturnInst *RI); bool simplifyUnreachable(UnreachableInst *UI); bool simplifySwitch(SwitchInst *SI, IRBuilder<> &Builder); bool simplifyIndirectBr(IndirectBrInst *IBI); bool simplifyBranch(BranchInst *Branch, IRBuilder<> &Builder); bool simplifyUncondBranch(BranchInst *BI, IRBuilder<> &Builder); bool simplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder); bool tryToSimplifyUncondBranchWithICmpInIt(ICmpInst *ICI, IRBuilder<> &Builder); bool HoistThenElseCodeToIf(BranchInst *BI, const TargetTransformInfo &TTI, bool EqTermsOnly); bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *ThenBB, const TargetTransformInfo &TTI); bool SimplifyTerminatorOnSelect(Instruction *OldTerm, Value *Cond, BasicBlock *TrueBB, BasicBlock *FalseBB, uint32_t TrueWeight, uint32_t FalseWeight); bool SimplifyBranchOnICmpChain(BranchInst *BI, IRBuilder<> &Builder, const DataLayout &DL); bool SimplifySwitchOnSelect(SwitchInst *SI, SelectInst *Select); bool SimplifyIndirectBrOnSelect(IndirectBrInst *IBI, SelectInst *SI); bool TurnSwitchRangeIntoICmp(SwitchInst *SI, IRBuilder<> &Builder); public: SimplifyCFGOpt(const TargetTransformInfo &TTI, DomTreeUpdater *DTU, const DataLayout &DL, ArrayRef LoopHeaders, const SimplifyCFGOptions &Opts) : TTI(TTI), DTU(DTU), DL(DL), LoopHeaders(LoopHeaders), Options(Opts) { assert((!DTU || !DTU->hasPostDomTree()) && "SimplifyCFG is not yet capable of maintaining validity of a " "PostDomTree, so don't ask for it."); } bool simplifyOnce(BasicBlock *BB); bool simplifyOnceImpl(BasicBlock *BB); bool run(BasicBlock *BB); // Helper to set Resimplify and return change indication. bool requestResimplify() { Resimplify = true; return true; } }; } // end anonymous namespace /// Return true if it is safe to merge these two /// terminator instructions together. static bool SafeToMergeTerminators(Instruction *SI1, Instruction *SI2, SmallSetVector *FailBlocks = nullptr) { if (SI1 == SI2) return false; // Can't merge with self! // It is not safe to merge these two switch instructions if they have a common // successor, and if that successor has a PHI node, and if *that* PHI node has // conflicting incoming values from the two switch blocks. BasicBlock *SI1BB = SI1->getParent(); BasicBlock *SI2BB = SI2->getParent(); SmallPtrSet SI1Succs(succ_begin(SI1BB), succ_end(SI1BB)); bool Fail = false; for (BasicBlock *Succ : successors(SI2BB)) if (SI1Succs.count(Succ)) for (BasicBlock::iterator BBI = Succ->begin(); isa(BBI); ++BBI) { PHINode *PN = cast(BBI); if (PN->getIncomingValueForBlock(SI1BB) != PN->getIncomingValueForBlock(SI2BB)) { if (FailBlocks) FailBlocks->insert(Succ); Fail = true; } } return !Fail; } /// Update PHI nodes in Succ to indicate that there will now be entries in it /// from the 'NewPred' block. The values that will be flowing into the PHI nodes /// will be the same as those coming in from ExistPred, an existing predecessor /// of Succ. static void AddPredecessorToBlock(BasicBlock *Succ, BasicBlock *NewPred, BasicBlock *ExistPred, MemorySSAUpdater *MSSAU = nullptr) { for (PHINode &PN : Succ->phis()) PN.addIncoming(PN.getIncomingValueForBlock(ExistPred), NewPred); if (MSSAU) if (auto *MPhi = MSSAU->getMemorySSA()->getMemoryAccess(Succ)) MPhi->addIncoming(MPhi->getIncomingValueForBlock(ExistPred), NewPred); } /// Compute an abstract "cost" of speculating the given instruction, /// which is assumed to be safe to speculate. TCC_Free means cheap, /// TCC_Basic means less cheap, and TCC_Expensive means prohibitively /// expensive. static InstructionCost computeSpeculationCost(const User *I, const TargetTransformInfo &TTI) { assert(isSafeToSpeculativelyExecute(I) && "Instruction is not safe to speculatively execute!"); return TTI.getUserCost(I, TargetTransformInfo::TCK_SizeAndLatency); } /// If we have a merge point of an "if condition" as accepted above, /// return true if the specified value dominates the block. We /// don't handle the true generality of domination here, just a special case /// which works well enough for us. /// /// If AggressiveInsts is non-null, and if V does not dominate BB, we check to /// see if V (which must be an instruction) and its recursive operands /// that do not dominate BB have a combined cost lower than Budget and /// are non-trapping. If both are true, the instruction is inserted into the /// set and true is returned. /// /// The cost for most non-trapping instructions is defined as 1 except for /// Select whose cost is 2. /// /// After this function returns, Cost is increased by the cost of /// V plus its non-dominating operands. If that cost is greater than /// Budget, false is returned and Cost is undefined. static bool dominatesMergePoint(Value *V, BasicBlock *BB, SmallPtrSetImpl &AggressiveInsts, InstructionCost &Cost, InstructionCost Budget, const TargetTransformInfo &TTI, unsigned Depth = 0) { // It is possible to hit a zero-cost cycle (phi/gep instructions for example), // so limit the recursion depth. // TODO: While this recursion limit does prevent pathological behavior, it // would be better to track visited instructions to avoid cycles. if (Depth == MaxSpeculationDepth) return false; Instruction *I = dyn_cast(V); if (!I) { // Non-instructions all dominate instructions, but not all constantexprs // can be executed unconditionally. if (ConstantExpr *C = dyn_cast(V)) if (C->canTrap()) return false; return true; } BasicBlock *PBB = I->getParent(); // We don't want to allow weird loops that might have the "if condition" in // the bottom of this block. if (PBB == BB) return false; // If this instruction is defined in a block that contains an unconditional // branch to BB, then it must be in the 'conditional' part of the "if // statement". If not, it definitely dominates the region. BranchInst *BI = dyn_cast(PBB->getTerminator()); if (!BI || BI->isConditional() || BI->getSuccessor(0) != BB) return true; // If we have seen this instruction before, don't count it again. if (AggressiveInsts.count(I)) return true; // Okay, it looks like the instruction IS in the "condition". Check to // see if it's a cheap instruction to unconditionally compute, and if it // only uses stuff defined outside of the condition. If so, hoist it out. if (!isSafeToSpeculativelyExecute(I)) return false; Cost += computeSpeculationCost(I, TTI); // Allow exactly one instruction to be speculated regardless of its cost // (as long as it is safe to do so). // This is intended to flatten the CFG even if the instruction is a division // or other expensive operation. The speculation of an expensive instruction // is expected to be undone in CodeGenPrepare if the speculation has not // enabled further IR optimizations. if (Cost > Budget && (!SpeculateOneExpensiveInst || !AggressiveInsts.empty() || Depth > 0 || !Cost.isValid())) return false; // Okay, we can only really hoist these out if their operands do // not take us over the cost threshold. for (Use &Op : I->operands()) if (!dominatesMergePoint(Op, BB, AggressiveInsts, Cost, Budget, TTI, Depth + 1)) return false; // Okay, it's safe to do this! Remember this instruction. AggressiveInsts.insert(I); return true; } /// Extract ConstantInt from value, looking through IntToPtr /// and PointerNullValue. Return NULL if value is not a constant int. static ConstantInt *GetConstantInt(Value *V, const DataLayout &DL) { // Normal constant int. ConstantInt *CI = dyn_cast(V); if (CI || !isa(V) || !V->getType()->isPointerTy()) return CI; // This is some kind of pointer constant. Turn it into a pointer-sized // ConstantInt if possible. IntegerType *PtrTy = cast(DL.getIntPtrType(V->getType())); // Null pointer means 0, see SelectionDAGBuilder::getValue(const Value*). if (isa(V)) return ConstantInt::get(PtrTy, 0); // IntToPtr const int. if (ConstantExpr *CE = dyn_cast(V)) if (CE->getOpcode() == Instruction::IntToPtr) if (ConstantInt *CI = dyn_cast(CE->getOperand(0))) { // The constant is very likely to have the right type already. if (CI->getType() == PtrTy) return CI; else return cast( ConstantExpr::getIntegerCast(CI, PtrTy, /*isSigned=*/false)); } return nullptr; } namespace { /// Given a chain of or (||) or and (&&) comparison of a value against a /// constant, this will try to recover the information required for a switch /// structure. /// It will depth-first traverse the chain of comparison, seeking for patterns /// like %a == 12 or %a < 4 and combine them to produce a set of integer /// representing the different cases for the switch. /// Note that if the chain is composed of '||' it will build the set of elements /// that matches the comparisons (i.e. any of this value validate the chain) /// while for a chain of '&&' it will build the set elements that make the test /// fail. struct ConstantComparesGatherer { const DataLayout &DL; /// Value found for the switch comparison Value *CompValue = nullptr; /// Extra clause to be checked before the switch Value *Extra = nullptr; /// Set of integers to match in switch SmallVector Vals; /// Number of comparisons matched in the and/or chain unsigned UsedICmps = 0; /// Construct and compute the result for the comparison instruction Cond ConstantComparesGatherer(Instruction *Cond, const DataLayout &DL) : DL(DL) { gather(Cond); } ConstantComparesGatherer(const ConstantComparesGatherer &) = delete; ConstantComparesGatherer & operator=(const ConstantComparesGatherer &) = delete; private: /// Try to set the current value used for the comparison, it succeeds only if /// it wasn't set before or if the new value is the same as the old one bool setValueOnce(Value *NewVal) { if (CompValue && CompValue != NewVal) return false; CompValue = NewVal; return (CompValue != nullptr); } /// Try to match Instruction "I" as a comparison against a constant and /// populates the array Vals with the set of values that match (or do not /// match depending on isEQ). /// Return false on failure. On success, the Value the comparison matched /// against is placed in CompValue. /// If CompValue is already set, the function is expected to fail if a match /// is found but the value compared to is different. bool matchInstruction(Instruction *I, bool isEQ) { // If this is an icmp against a constant, handle this as one of the cases. ICmpInst *ICI; ConstantInt *C; if (!((ICI = dyn_cast(I)) && (C = GetConstantInt(I->getOperand(1), DL)))) { return false; } Value *RHSVal; const APInt *RHSC; // Pattern match a special case // (x & ~2^z) == y --> x == y || x == y|2^z // This undoes a transformation done by instcombine to fuse 2 compares. if (ICI->getPredicate() == (isEQ ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE)) { // It's a little bit hard to see why the following transformations are // correct. Here is a CVC3 program to verify them for 64-bit values: /* ONE : BITVECTOR(64) = BVZEROEXTEND(0bin1, 63); x : BITVECTOR(64); y : BITVECTOR(64); z : BITVECTOR(64); mask : BITVECTOR(64) = BVSHL(ONE, z); QUERY( (y & ~mask = y) => ((x & ~mask = y) <=> (x = y OR x = (y | mask))) ); QUERY( (y | mask = y) => ((x | mask = y) <=> (x = y OR x = (y & ~mask))) ); */ // Please note that each pattern must be a dual implication (<--> or // iff). One directional implication can create spurious matches. If the // implication is only one-way, an unsatisfiable condition on the left // side can imply a satisfiable condition on the right side. Dual // implication ensures that satisfiable conditions are transformed to // other satisfiable conditions and unsatisfiable conditions are // transformed to other unsatisfiable conditions. // Here is a concrete example of a unsatisfiable condition on the left // implying a satisfiable condition on the right: // // mask = (1 << z) // (x & ~mask) == y --> (x == y || x == (y | mask)) // // Substituting y = 3, z = 0 yields: // (x & -2) == 3 --> (x == 3 || x == 2) // Pattern match a special case: /* QUERY( (y & ~mask = y) => ((x & ~mask = y) <=> (x = y OR x = (y | mask))) ); */ if (match(ICI->getOperand(0), m_And(m_Value(RHSVal), m_APInt(RHSC)))) { APInt Mask = ~*RHSC; if (Mask.isPowerOf2() && (C->getValue() & ~Mask) == C->getValue()) { // If we already have a value for the switch, it has to match! if (!setValueOnce(RHSVal)) return false; Vals.push_back(C); Vals.push_back( ConstantInt::get(C->getContext(), C->getValue() | Mask)); UsedICmps++; return true; } } // Pattern match a special case: /* QUERY( (y | mask = y) => ((x | mask = y) <=> (x = y OR x = (y & ~mask))) ); */ if (match(ICI->getOperand(0), m_Or(m_Value(RHSVal), m_APInt(RHSC)))) { APInt Mask = *RHSC; if (Mask.isPowerOf2() && (C->getValue() | Mask) == C->getValue()) { // If we already have a value for the switch, it has to match! if (!setValueOnce(RHSVal)) return false; Vals.push_back(C); Vals.push_back(ConstantInt::get(C->getContext(), C->getValue() & ~Mask)); UsedICmps++; return true; } } // If we already have a value for the switch, it has to match! if (!setValueOnce(ICI->getOperand(0))) return false; UsedICmps++; Vals.push_back(C); return ICI->getOperand(0); } // If we have "x ult 3", for example, then we can add 0,1,2 to the set. ConstantRange Span = ConstantRange::makeExactICmpRegion(ICI->getPredicate(), C->getValue()); // Shift the range if the compare is fed by an add. This is the range // compare idiom as emitted by instcombine. Value *CandidateVal = I->getOperand(0); if (match(I->getOperand(0), m_Add(m_Value(RHSVal), m_APInt(RHSC)))) { Span = Span.subtract(*RHSC); CandidateVal = RHSVal; } // If this is an and/!= check, then we are looking to build the set of // value that *don't* pass the and chain. I.e. to turn "x ugt 2" into // x != 0 && x != 1. if (!isEQ) Span = Span.inverse(); // If there are a ton of values, we don't want to make a ginormous switch. if (Span.isSizeLargerThan(8) || Span.isEmptySet()) { return false; } // If we already have a value for the switch, it has to match! if (!setValueOnce(CandidateVal)) return false; // Add all values from the range to the set for (APInt Tmp = Span.getLower(); Tmp != Span.getUpper(); ++Tmp) Vals.push_back(ConstantInt::get(I->getContext(), Tmp)); UsedICmps++; return true; } /// Given a potentially 'or'd or 'and'd together collection of icmp /// eq/ne/lt/gt instructions that compare a value against a constant, extract /// the value being compared, and stick the list constants into the Vals /// vector. /// One "Extra" case is allowed to differ from the other. void gather(Value *V) { bool isEQ = match(V, m_LogicalOr(m_Value(), m_Value())); // Keep a stack (SmallVector for efficiency) for depth-first traversal SmallVector DFT; SmallPtrSet Visited; // Initialize Visited.insert(V); DFT.push_back(V); while (!DFT.empty()) { V = DFT.pop_back_val(); if (Instruction *I = dyn_cast(V)) { // If it is a || (or && depending on isEQ), process the operands. Value *Op0, *Op1; if (isEQ ? match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))) : match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) { if (Visited.insert(Op1).second) DFT.push_back(Op1); if (Visited.insert(Op0).second) DFT.push_back(Op0); continue; } // Try to match the current instruction if (matchInstruction(I, isEQ)) // Match succeed, continue the loop continue; } // One element of the sequence of || (or &&) could not be match as a // comparison against the same value as the others. // We allow only one "Extra" case to be checked before the switch if (!Extra) { Extra = V; continue; } // Failed to parse a proper sequence, abort now CompValue = nullptr; break; } } }; } // end anonymous namespace static void EraseTerminatorAndDCECond(Instruction *TI, MemorySSAUpdater *MSSAU = nullptr) { Instruction *Cond = nullptr; if (SwitchInst *SI = dyn_cast(TI)) { Cond = dyn_cast(SI->getCondition()); } else if (BranchInst *BI = dyn_cast(TI)) { if (BI->isConditional()) Cond = dyn_cast(BI->getCondition()); } else if (IndirectBrInst *IBI = dyn_cast(TI)) { Cond = dyn_cast(IBI->getAddress()); } TI->eraseFromParent(); if (Cond) RecursivelyDeleteTriviallyDeadInstructions(Cond, nullptr, MSSAU); } /// Return true if the specified terminator checks /// to see if a value is equal to constant integer value. Value *SimplifyCFGOpt::isValueEqualityComparison(Instruction *TI) { Value *CV = nullptr; if (SwitchInst *SI = dyn_cast(TI)) { // Do not permit merging of large switch instructions into their // predecessors unless there is only one predecessor. if (!SI->getParent()->hasNPredecessorsOrMore(128 / SI->getNumSuccessors())) CV = SI->getCondition(); } else if (BranchInst *BI = dyn_cast(TI)) if (BI->isConditional() && BI->getCondition()->hasOneUse()) if (ICmpInst *ICI = dyn_cast(BI->getCondition())) { if (ICI->isEquality() && GetConstantInt(ICI->getOperand(1), DL)) CV = ICI->getOperand(0); } // Unwrap any lossless ptrtoint cast. if (CV) { if (PtrToIntInst *PTII = dyn_cast(CV)) { Value *Ptr = PTII->getPointerOperand(); if (PTII->getType() == DL.getIntPtrType(Ptr->getType())) CV = Ptr; } } return CV; } /// Given a value comparison instruction, /// decode all of the 'cases' that it represents and return the 'default' block. BasicBlock *SimplifyCFGOpt::GetValueEqualityComparisonCases( Instruction *TI, std::vector &Cases) { if (SwitchInst *SI = dyn_cast(TI)) { Cases.reserve(SI->getNumCases()); for (auto Case : SI->cases()) Cases.push_back(ValueEqualityComparisonCase(Case.getCaseValue(), Case.getCaseSuccessor())); return SI->getDefaultDest(); } BranchInst *BI = cast(TI); ICmpInst *ICI = cast(BI->getCondition()); BasicBlock *Succ = BI->getSuccessor(ICI->getPredicate() == ICmpInst::ICMP_NE); Cases.push_back(ValueEqualityComparisonCase( GetConstantInt(ICI->getOperand(1), DL), Succ)); return BI->getSuccessor(ICI->getPredicate() == ICmpInst::ICMP_EQ); } /// Given a vector of bb/value pairs, remove any entries /// in the list that match the specified block. static void EliminateBlockCases(BasicBlock *BB, std::vector &Cases) { llvm::erase_value(Cases, BB); } /// Return true if there are any keys in C1 that exist in C2 as well. static bool ValuesOverlap(std::vector &C1, std::vector &C2) { std::vector *V1 = &C1, *V2 = &C2; // Make V1 be smaller than V2. if (V1->size() > V2->size()) std::swap(V1, V2); if (V1->empty()) return false; if (V1->size() == 1) { // Just scan V2. ConstantInt *TheVal = (*V1)[0].Value; for (unsigned i = 0, e = V2->size(); i != e; ++i) if (TheVal == (*V2)[i].Value) return true; } // Otherwise, just sort both lists and compare element by element. array_pod_sort(V1->begin(), V1->end()); array_pod_sort(V2->begin(), V2->end()); unsigned i1 = 0, i2 = 0, e1 = V1->size(), e2 = V2->size(); while (i1 != e1 && i2 != e2) { if ((*V1)[i1].Value == (*V2)[i2].Value) return true; if ((*V1)[i1].Value < (*V2)[i2].Value) ++i1; else ++i2; } return false; } // Set branch weights on SwitchInst. This sets the metadata if there is at // least one non-zero weight. static void setBranchWeights(SwitchInst *SI, ArrayRef Weights) { // Check that there is at least one non-zero weight. Otherwise, pass // nullptr to setMetadata which will erase the existing metadata. MDNode *N = nullptr; if (llvm::any_of(Weights, [](uint32_t W) { return W != 0; })) N = MDBuilder(SI->getParent()->getContext()).createBranchWeights(Weights); SI->setMetadata(LLVMContext::MD_prof, N); } // Similar to the above, but for branch and select instructions that take // exactly 2 weights. static void setBranchWeights(Instruction *I, uint32_t TrueWeight, uint32_t FalseWeight) { assert(isa(I) || isa(I)); // Check that there is at least one non-zero weight. Otherwise, pass // nullptr to setMetadata which will erase the existing metadata. MDNode *N = nullptr; if (TrueWeight || FalseWeight) N = MDBuilder(I->getParent()->getContext()) .createBranchWeights(TrueWeight, FalseWeight); I->setMetadata(LLVMContext::MD_prof, N); } /// If TI is known to be a terminator instruction and its block is known to /// only have a single predecessor block, check to see if that predecessor is /// also a value comparison with the same value, and if that comparison /// determines the outcome of this comparison. If so, simplify TI. This does a /// very limited form of jump threading. bool SimplifyCFGOpt::SimplifyEqualityComparisonWithOnlyPredecessor( Instruction *TI, BasicBlock *Pred, IRBuilder<> &Builder) { Value *PredVal = isValueEqualityComparison(Pred->getTerminator()); if (!PredVal) return false; // Not a value comparison in predecessor. Value *ThisVal = isValueEqualityComparison(TI); assert(ThisVal && "This isn't a value comparison!!"); if (ThisVal != PredVal) return false; // Different predicates. // TODO: Preserve branch weight metadata, similarly to how // FoldValueComparisonIntoPredecessors preserves it. // Find out information about when control will move from Pred to TI's block. std::vector PredCases; BasicBlock *PredDef = GetValueEqualityComparisonCases(Pred->getTerminator(), PredCases); EliminateBlockCases(PredDef, PredCases); // Remove default from cases. // Find information about how control leaves this block. std::vector ThisCases; BasicBlock *ThisDef = GetValueEqualityComparisonCases(TI, ThisCases); EliminateBlockCases(ThisDef, ThisCases); // Remove default from cases. // If TI's block is the default block from Pred's comparison, potentially // simplify TI based on this knowledge. if (PredDef == TI->getParent()) { // If we are here, we know that the value is none of those cases listed in // PredCases. If there are any cases in ThisCases that are in PredCases, we // can simplify TI. if (!ValuesOverlap(PredCases, ThisCases)) return false; if (isa(TI)) { // Okay, one of the successors of this condbr is dead. Convert it to a // uncond br. assert(ThisCases.size() == 1 && "Branch can only have one case!"); // Insert the new branch. Instruction *NI = Builder.CreateBr(ThisDef); (void)NI; // Remove PHI node entries for the dead edge. ThisCases[0].Dest->removePredecessor(PredDef); LLVM_DEBUG(dbgs() << "Threading pred instr: " << *Pred->getTerminator() << "Through successor TI: " << *TI << "Leaving: " << *NI << "\n"); EraseTerminatorAndDCECond(TI); if (DTU) DTU->applyUpdates( {{DominatorTree::Delete, PredDef, ThisCases[0].Dest}}); return true; } SwitchInstProfUpdateWrapper SI = *cast(TI); // Okay, TI has cases that are statically dead, prune them away. SmallPtrSet DeadCases; for (unsigned i = 0, e = PredCases.size(); i != e; ++i) DeadCases.insert(PredCases[i].Value); LLVM_DEBUG(dbgs() << "Threading pred instr: " << *Pred->getTerminator() << "Through successor TI: " << *TI); SmallDenseMap NumPerSuccessorCases; for (SwitchInst::CaseIt i = SI->case_end(), e = SI->case_begin(); i != e;) { --i; auto *Successor = i->getCaseSuccessor(); if (DTU) ++NumPerSuccessorCases[Successor]; if (DeadCases.count(i->getCaseValue())) { Successor->removePredecessor(PredDef); SI.removeCase(i); if (DTU) --NumPerSuccessorCases[Successor]; } } if (DTU) { std::vector Updates; for (const std::pair &I : NumPerSuccessorCases) if (I.second == 0) Updates.push_back({DominatorTree::Delete, PredDef, I.first}); DTU->applyUpdates(Updates); } LLVM_DEBUG(dbgs() << "Leaving: " << *TI << "\n"); return true; } // Otherwise, TI's block must correspond to some matched value. Find out // which value (or set of values) this is. ConstantInt *TIV = nullptr; BasicBlock *TIBB = TI->getParent(); for (unsigned i = 0, e = PredCases.size(); i != e; ++i) if (PredCases[i].Dest == TIBB) { if (TIV) return false; // Cannot handle multiple values coming to this block. TIV = PredCases[i].Value; } assert(TIV && "No edge from pred to succ?"); // Okay, we found the one constant that our value can be if we get into TI's // BB. Find out which successor will unconditionally be branched to. BasicBlock *TheRealDest = nullptr; for (unsigned i = 0, e = ThisCases.size(); i != e; ++i) if (ThisCases[i].Value == TIV) { TheRealDest = ThisCases[i].Dest; break; } // If not handled by any explicit cases, it is handled by the default case. if (!TheRealDest) TheRealDest = ThisDef; SmallPtrSet RemovedSuccs; // Remove PHI node entries for dead edges. BasicBlock *CheckEdge = TheRealDest; for (BasicBlock *Succ : successors(TIBB)) if (Succ != CheckEdge) { if (Succ != TheRealDest) RemovedSuccs.insert(Succ); Succ->removePredecessor(TIBB); } else CheckEdge = nullptr; // Insert the new branch. Instruction *NI = Builder.CreateBr(TheRealDest); (void)NI; LLVM_DEBUG(dbgs() << "Threading pred instr: " << *Pred->getTerminator() << "Through successor TI: " << *TI << "Leaving: " << *NI << "\n"); EraseTerminatorAndDCECond(TI); if (DTU) { SmallVector Updates; Updates.reserve(RemovedSuccs.size()); for (auto *RemovedSucc : RemovedSuccs) Updates.push_back({DominatorTree::Delete, TIBB, RemovedSucc}); DTU->applyUpdates(Updates); } return true; } namespace { /// This class implements a stable ordering of constant /// integers that does not depend on their address. This is important for /// applications that sort ConstantInt's to ensure uniqueness. struct ConstantIntOrdering { bool operator()(const ConstantInt *LHS, const ConstantInt *RHS) const { return LHS->getValue().ult(RHS->getValue()); } }; } // end anonymous namespace static int ConstantIntSortPredicate(ConstantInt *const *P1, ConstantInt *const *P2) { const ConstantInt *LHS = *P1; const ConstantInt *RHS = *P2; if (LHS == RHS) return 0; return LHS->getValue().ult(RHS->getValue()) ? 1 : -1; } static inline bool HasBranchWeights(const Instruction *I) { MDNode *ProfMD = I->getMetadata(LLVMContext::MD_prof); if (ProfMD && ProfMD->getOperand(0)) if (MDString *MDS = dyn_cast(ProfMD->getOperand(0))) return MDS->getString().equals("branch_weights"); return false; } /// Get Weights of a given terminator, the default weight is at the front /// of the vector. If TI is a conditional eq, we need to swap the branch-weight /// metadata. static void GetBranchWeights(Instruction *TI, SmallVectorImpl &Weights) { MDNode *MD = TI->getMetadata(LLVMContext::MD_prof); assert(MD); for (unsigned i = 1, e = MD->getNumOperands(); i < e; ++i) { ConstantInt *CI = mdconst::extract(MD->getOperand(i)); Weights.push_back(CI->getValue().getZExtValue()); } // If TI is a conditional eq, the default case is the false case, // and the corresponding branch-weight data is at index 2. We swap the // default weight to be the first entry. if (BranchInst *BI = dyn_cast(TI)) { assert(Weights.size() == 2); ICmpInst *ICI = cast(BI->getCondition()); if (ICI->getPredicate() == ICmpInst::ICMP_EQ) std::swap(Weights.front(), Weights.back()); } } /// Keep halving the weights until all can fit in uint32_t. static void FitWeights(MutableArrayRef Weights) { uint64_t Max = *std::max_element(Weights.begin(), Weights.end()); if (Max > UINT_MAX) { unsigned Offset = 32 - countLeadingZeros(Max); for (uint64_t &I : Weights) I >>= Offset; } } static void CloneInstructionsIntoPredecessorBlockAndUpdateSSAUses( BasicBlock *BB, BasicBlock *PredBlock, ValueToValueMapTy &VMap) { Instruction *PTI = PredBlock->getTerminator(); // If we have bonus instructions, clone them into the predecessor block. // Note that there may be multiple predecessor blocks, so we cannot move // bonus instructions to a predecessor block. for (Instruction &BonusInst : *BB) { if (isa(BonusInst) || BonusInst.isTerminator()) continue; Instruction *NewBonusInst = BonusInst.clone(); if (PTI->getDebugLoc() != NewBonusInst->getDebugLoc()) { // Unless the instruction has the same !dbg location as the original // branch, drop it. When we fold the bonus instructions we want to make // sure we reset their debug locations in order to avoid stepping on // dead code caused by folding dead branches. NewBonusInst->setDebugLoc(DebugLoc()); } RemapInstruction(NewBonusInst, VMap, RF_NoModuleLevelChanges | RF_IgnoreMissingLocals); VMap[&BonusInst] = NewBonusInst; // If we moved a load, we cannot any longer claim any knowledge about // its potential value. The previous information might have been valid // only given the branch precondition. // For an analogous reason, we must also drop all the metadata whose // semantics we don't understand. We *can* preserve !annotation, because // it is tied to the instruction itself, not the value or position. // Similarly strip attributes on call parameters that may cause UB in // location the call is moved to. NewBonusInst->dropUndefImplyingAttrsAndUnknownMetadata( LLVMContext::MD_annotation); PredBlock->getInstList().insert(PTI->getIterator(), NewBonusInst); NewBonusInst->takeName(&BonusInst); BonusInst.setName(NewBonusInst->getName() + ".old"); // Update (liveout) uses of bonus instructions, // now that the bonus instruction has been cloned into predecessor. - SSAUpdater SSAUpdate; - SSAUpdate.Initialize(BonusInst.getType(), - (NewBonusInst->getName() + ".merge").str()); - SSAUpdate.AddAvailableValue(BB, &BonusInst); - SSAUpdate.AddAvailableValue(PredBlock, NewBonusInst); + // Note that we expect to be in a block-closed SSA form for this to work! for (Use &U : make_early_inc_range(BonusInst.uses())) { auto *UI = cast(U.getUser()); - if (UI->getParent() != PredBlock) - SSAUpdate.RewriteUseAfterInsertions(U); - else // Use is in the same block as, and comes before, NewBonusInst. - SSAUpdate.RewriteUse(U); + auto *PN = dyn_cast(UI); + if (!PN) { + assert(UI->getParent() == BB && BonusInst.comesBefore(UI) && + "If the user is not a PHI node, then it should be in the same " + "block as, and come after, the original bonus instruction."); + continue; // Keep using the original bonus instruction. + } + // Is this the block-closed SSA form PHI node? + if (PN->getIncomingBlock(U) == BB) + continue; // Great, keep using the original bonus instruction. + // The only other alternative is an "use" when coming from + // the predecessor block - here we should refer to the cloned bonus instr. + assert(PN->getIncomingBlock(U) == PredBlock && + "Not in block-closed SSA form?"); + U.set(NewBonusInst); } } } bool SimplifyCFGOpt::PerformValueComparisonIntoPredecessorFolding( Instruction *TI, Value *&CV, Instruction *PTI, IRBuilder<> &Builder) { BasicBlock *BB = TI->getParent(); BasicBlock *Pred = PTI->getParent(); SmallVector Updates; // Figure out which 'cases' to copy from SI to PSI. std::vector BBCases; BasicBlock *BBDefault = GetValueEqualityComparisonCases(TI, BBCases); std::vector PredCases; BasicBlock *PredDefault = GetValueEqualityComparisonCases(PTI, PredCases); // Based on whether the default edge from PTI goes to BB or not, fill in // PredCases and PredDefault with the new switch cases we would like to // build. SmallMapVector NewSuccessors; // Update the branch weight metadata along the way SmallVector Weights; bool PredHasWeights = HasBranchWeights(PTI); bool SuccHasWeights = HasBranchWeights(TI); if (PredHasWeights) { GetBranchWeights(PTI, Weights); // branch-weight metadata is inconsistent here. if (Weights.size() != 1 + PredCases.size()) PredHasWeights = SuccHasWeights = false; } else if (SuccHasWeights) // If there are no predecessor weights but there are successor weights, // populate Weights with 1, which will later be scaled to the sum of // successor's weights Weights.assign(1 + PredCases.size(), 1); SmallVector SuccWeights; if (SuccHasWeights) { GetBranchWeights(TI, SuccWeights); // branch-weight metadata is inconsistent here. if (SuccWeights.size() != 1 + BBCases.size()) PredHasWeights = SuccHasWeights = false; } else if (PredHasWeights) SuccWeights.assign(1 + BBCases.size(), 1); if (PredDefault == BB) { // If this is the default destination from PTI, only the edges in TI // that don't occur in PTI, or that branch to BB will be activated. std::set PTIHandled; for (unsigned i = 0, e = PredCases.size(); i != e; ++i) if (PredCases[i].Dest != BB) PTIHandled.insert(PredCases[i].Value); else { // The default destination is BB, we don't need explicit targets. std::swap(PredCases[i], PredCases.back()); if (PredHasWeights || SuccHasWeights) { // Increase weight for the default case. Weights[0] += Weights[i + 1]; std::swap(Weights[i + 1], Weights.back()); Weights.pop_back(); } PredCases.pop_back(); --i; --e; } // Reconstruct the new switch statement we will be building. if (PredDefault != BBDefault) { PredDefault->removePredecessor(Pred); if (DTU && PredDefault != BB) Updates.push_back({DominatorTree::Delete, Pred, PredDefault}); PredDefault = BBDefault; ++NewSuccessors[BBDefault]; } unsigned CasesFromPred = Weights.size(); uint64_t ValidTotalSuccWeight = 0; for (unsigned i = 0, e = BBCases.size(); i != e; ++i) if (!PTIHandled.count(BBCases[i].Value) && BBCases[i].Dest != BBDefault) { PredCases.push_back(BBCases[i]); ++NewSuccessors[BBCases[i].Dest]; if (SuccHasWeights || PredHasWeights) { // The default weight is at index 0, so weight for the ith case // should be at index i+1. Scale the cases from successor by // PredDefaultWeight (Weights[0]). Weights.push_back(Weights[0] * SuccWeights[i + 1]); ValidTotalSuccWeight += SuccWeights[i + 1]; } } if (SuccHasWeights || PredHasWeights) { ValidTotalSuccWeight += SuccWeights[0]; // Scale the cases from predecessor by ValidTotalSuccWeight. for (unsigned i = 1; i < CasesFromPred; ++i) Weights[i] *= ValidTotalSuccWeight; // Scale the default weight by SuccDefaultWeight (SuccWeights[0]). Weights[0] *= SuccWeights[0]; } } else { // If this is not the default destination from PSI, only the edges // in SI that occur in PSI with a destination of BB will be // activated. std::set PTIHandled; std::map WeightsForHandled; for (unsigned i = 0, e = PredCases.size(); i != e; ++i) if (PredCases[i].Dest == BB) { PTIHandled.insert(PredCases[i].Value); if (PredHasWeights || SuccHasWeights) { WeightsForHandled[PredCases[i].Value] = Weights[i + 1]; std::swap(Weights[i + 1], Weights.back()); Weights.pop_back(); } std::swap(PredCases[i], PredCases.back()); PredCases.pop_back(); --i; --e; } // Okay, now we know which constants were sent to BB from the // predecessor. Figure out where they will all go now. for (unsigned i = 0, e = BBCases.size(); i != e; ++i) if (PTIHandled.count(BBCases[i].Value)) { // If this is one we are capable of getting... if (PredHasWeights || SuccHasWeights) Weights.push_back(WeightsForHandled[BBCases[i].Value]); PredCases.push_back(BBCases[i]); ++NewSuccessors[BBCases[i].Dest]; PTIHandled.erase(BBCases[i].Value); // This constant is taken care of } // If there are any constants vectored to BB that TI doesn't handle, // they must go to the default destination of TI. for (ConstantInt *I : PTIHandled) { if (PredHasWeights || SuccHasWeights) Weights.push_back(WeightsForHandled[I]); PredCases.push_back(ValueEqualityComparisonCase(I, BBDefault)); ++NewSuccessors[BBDefault]; } } // Okay, at this point, we know which new successor Pred will get. Make // sure we update the number of entries in the PHI nodes for these // successors. SmallPtrSet SuccsOfPred; if (DTU) { SuccsOfPred = {succ_begin(Pred), succ_end(Pred)}; Updates.reserve(Updates.size() + NewSuccessors.size()); } for (const std::pair &NewSuccessor : NewSuccessors) { for (auto I : seq(0, NewSuccessor.second)) { (void)I; AddPredecessorToBlock(NewSuccessor.first, Pred, BB); } if (DTU && !SuccsOfPred.contains(NewSuccessor.first)) Updates.push_back({DominatorTree::Insert, Pred, NewSuccessor.first}); } Builder.SetInsertPoint(PTI); // Convert pointer to int before we switch. if (CV->getType()->isPointerTy()) { CV = Builder.CreatePtrToInt(CV, DL.getIntPtrType(CV->getType()), "magicptr"); } // Now that the successors are updated, create the new Switch instruction. SwitchInst *NewSI = Builder.CreateSwitch(CV, PredDefault, PredCases.size()); NewSI->setDebugLoc(PTI->getDebugLoc()); for (ValueEqualityComparisonCase &V : PredCases) NewSI->addCase(V.Value, V.Dest); if (PredHasWeights || SuccHasWeights) { // Halve the weights if any of them cannot fit in an uint32_t FitWeights(Weights); SmallVector MDWeights(Weights.begin(), Weights.end()); setBranchWeights(NewSI, MDWeights); } EraseTerminatorAndDCECond(PTI); // Okay, last check. If BB is still a successor of PSI, then we must // have an infinite loop case. If so, add an infinitely looping block // to handle the case to preserve the behavior of the code. BasicBlock *InfLoopBlock = nullptr; for (unsigned i = 0, e = NewSI->getNumSuccessors(); i != e; ++i) if (NewSI->getSuccessor(i) == BB) { if (!InfLoopBlock) { // Insert it at the end of the function, because it's either code, // or it won't matter if it's hot. :) InfLoopBlock = BasicBlock::Create(BB->getContext(), "infloop", BB->getParent()); BranchInst::Create(InfLoopBlock, InfLoopBlock); if (DTU) Updates.push_back( {DominatorTree::Insert, InfLoopBlock, InfLoopBlock}); } NewSI->setSuccessor(i, InfLoopBlock); } if (DTU) { if (InfLoopBlock) Updates.push_back({DominatorTree::Insert, Pred, InfLoopBlock}); Updates.push_back({DominatorTree::Delete, Pred, BB}); DTU->applyUpdates(Updates); } ++NumFoldValueComparisonIntoPredecessors; return true; } /// The specified terminator is a value equality comparison instruction /// (either a switch or a branch on "X == c"). /// See if any of the predecessors of the terminator block are value comparisons /// on the same value. If so, and if safe to do so, fold them together. bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(Instruction *TI, IRBuilder<> &Builder) { BasicBlock *BB = TI->getParent(); Value *CV = isValueEqualityComparison(TI); // CondVal assert(CV && "Not a comparison?"); bool Changed = false; SmallSetVector Preds(pred_begin(BB), pred_end(BB)); while (!Preds.empty()) { BasicBlock *Pred = Preds.pop_back_val(); Instruction *PTI = Pred->getTerminator(); // Don't try to fold into itself. if (Pred == BB) continue; // See if the predecessor is a comparison with the same value. Value *PCV = isValueEqualityComparison(PTI); // PredCondVal if (PCV != CV) continue; SmallSetVector FailBlocks; if (!SafeToMergeTerminators(TI, PTI, &FailBlocks)) { for (auto *Succ : FailBlocks) { if (!SplitBlockPredecessors(Succ, TI->getParent(), ".fold.split", DTU)) return false; } } PerformValueComparisonIntoPredecessorFolding(TI, CV, PTI, Builder); Changed = true; } return Changed; } // If we would need to insert a select that uses the value of this invoke // (comments in HoistThenElseCodeToIf explain why we would need to do this), we // can't hoist the invoke, as there is nowhere to put the select in this case. static bool isSafeToHoistInvoke(BasicBlock *BB1, BasicBlock *BB2, Instruction *I1, Instruction *I2) { for (BasicBlock *Succ : successors(BB1)) { for (const PHINode &PN : Succ->phis()) { Value *BB1V = PN.getIncomingValueForBlock(BB1); Value *BB2V = PN.getIncomingValueForBlock(BB2); if (BB1V != BB2V && (BB1V == I1 || BB2V == I2)) { return false; } } } return true; } static bool passingValueIsAlwaysUndefined(Value *V, Instruction *I, bool PtrValueMayBeModified = false); /// Given a conditional branch that goes to BB1 and BB2, hoist any common code /// in the two blocks up into the branch block. The caller of this function /// guarantees that BI's block dominates BB1 and BB2. If EqTermsOnly is given, /// only perform hoisting in case both blocks only contain a terminator. In that /// case, only the original BI will be replaced and selects for PHIs are added. bool SimplifyCFGOpt::HoistThenElseCodeToIf(BranchInst *BI, const TargetTransformInfo &TTI, bool EqTermsOnly) { // This does very trivial matching, with limited scanning, to find identical // instructions in the two blocks. In particular, we don't want to get into // O(M*N) situations here where M and N are the sizes of BB1 and BB2. As // such, we currently just scan for obviously identical instructions in an // identical order. BasicBlock *BB1 = BI->getSuccessor(0); // The true destination. BasicBlock *BB2 = BI->getSuccessor(1); // The false destination // If either of the blocks has it's address taken, then we can't do this fold, // because the code we'd hoist would no longer run when we jump into the block // by it's address. if (BB1->hasAddressTaken() || BB2->hasAddressTaken()) return false; BasicBlock::iterator BB1_Itr = BB1->begin(); BasicBlock::iterator BB2_Itr = BB2->begin(); Instruction *I1 = &*BB1_Itr++, *I2 = &*BB2_Itr++; // Skip debug info if it is not identical. DbgInfoIntrinsic *DBI1 = dyn_cast(I1); DbgInfoIntrinsic *DBI2 = dyn_cast(I2); if (!DBI1 || !DBI2 || !DBI1->isIdenticalToWhenDefined(DBI2)) { while (isa(I1)) I1 = &*BB1_Itr++; while (isa(I2)) I2 = &*BB2_Itr++; } // FIXME: Can we define a safety predicate for CallBr? if (isa(I1) || !I1->isIdenticalToWhenDefined(I2) || (isa(I1) && !isSafeToHoistInvoke(BB1, BB2, I1, I2)) || isa(I1)) return false; BasicBlock *BIParent = BI->getParent(); bool Changed = false; auto _ = make_scope_exit([&]() { if (Changed) ++NumHoistCommonCode; }); // Check if only hoisting terminators is allowed. This does not add new // instructions to the hoist location. if (EqTermsOnly) { // Skip any debug intrinsics, as they are free to hoist. auto *I1NonDbg = &*skipDebugIntrinsics(I1->getIterator()); auto *I2NonDbg = &*skipDebugIntrinsics(I2->getIterator()); if (!I1NonDbg->isIdenticalToWhenDefined(I2NonDbg)) return false; if (!I1NonDbg->isTerminator()) return false; // Now we know that we only need to hoist debug instrinsics and the // terminator. Let the loop below handle those 2 cases. } do { // If we are hoisting the terminator instruction, don't move one (making a // broken BB), instead clone it, and remove BI. if (I1->isTerminator()) goto HoistTerminator; // If we're going to hoist a call, make sure that the two instructions we're // commoning/hoisting are both marked with musttail, or neither of them is // marked as such. Otherwise, we might end up in a situation where we hoist // from a block where the terminator is a `ret` to a block where the terminator // is a `br`, and `musttail` calls expect to be followed by a return. auto *C1 = dyn_cast(I1); auto *C2 = dyn_cast(I2); if (C1 && C2) if (C1->isMustTailCall() != C2->isMustTailCall()) return Changed; if (!TTI.isProfitableToHoist(I1) || !TTI.isProfitableToHoist(I2)) return Changed; // If any of the two call sites has nomerge attribute, stop hoisting. if (const auto *CB1 = dyn_cast(I1)) if (CB1->cannotMerge()) return Changed; if (const auto *CB2 = dyn_cast(I2)) if (CB2->cannotMerge()) return Changed; if (isa(I1) || isa(I2)) { assert (isa(I1) && isa(I2)); // The debug location is an integral part of a debug info intrinsic // and can't be separated from it or replaced. Instead of attempting // to merge locations, simply hoist both copies of the intrinsic. BIParent->getInstList().splice(BI->getIterator(), BB1->getInstList(), I1); BIParent->getInstList().splice(BI->getIterator(), BB2->getInstList(), I2); Changed = true; } else { // For a normal instruction, we just move one to right before the branch, // then replace all uses of the other with the first. Finally, we remove // the now redundant second instruction. BIParent->getInstList().splice(BI->getIterator(), BB1->getInstList(), I1); if (!I2->use_empty()) I2->replaceAllUsesWith(I1); I1->andIRFlags(I2); unsigned KnownIDs[] = {LLVMContext::MD_tbaa, LLVMContext::MD_range, LLVMContext::MD_fpmath, LLVMContext::MD_invariant_load, LLVMContext::MD_nonnull, LLVMContext::MD_invariant_group, LLVMContext::MD_align, LLVMContext::MD_dereferenceable, LLVMContext::MD_dereferenceable_or_null, LLVMContext::MD_mem_parallel_loop_access, LLVMContext::MD_access_group, LLVMContext::MD_preserve_access_index}; combineMetadata(I1, I2, KnownIDs, true); // I1 and I2 are being combined into a single instruction. Its debug // location is the merged locations of the original instructions. I1->applyMergedLocation(I1->getDebugLoc(), I2->getDebugLoc()); I2->eraseFromParent(); Changed = true; } ++NumHoistCommonInstrs; I1 = &*BB1_Itr++; I2 = &*BB2_Itr++; // Skip debug info if it is not identical. DbgInfoIntrinsic *DBI1 = dyn_cast(I1); DbgInfoIntrinsic *DBI2 = dyn_cast(I2); if (!DBI1 || !DBI2 || !DBI1->isIdenticalToWhenDefined(DBI2)) { while (isa(I1)) I1 = &*BB1_Itr++; while (isa(I2)) I2 = &*BB2_Itr++; } } while (I1->isIdenticalToWhenDefined(I2)); return true; HoistTerminator: // It may not be possible to hoist an invoke. // FIXME: Can we define a safety predicate for CallBr? if (isa(I1) && !isSafeToHoistInvoke(BB1, BB2, I1, I2)) return Changed; // TODO: callbr hoisting currently disabled pending further study. if (isa(I1)) return Changed; for (BasicBlock *Succ : successors(BB1)) { for (PHINode &PN : Succ->phis()) { Value *BB1V = PN.getIncomingValueForBlock(BB1); Value *BB2V = PN.getIncomingValueForBlock(BB2); if (BB1V == BB2V) continue; // Check for passingValueIsAlwaysUndefined here because we would rather // eliminate undefined control flow then converting it to a select. if (passingValueIsAlwaysUndefined(BB1V, &PN) || passingValueIsAlwaysUndefined(BB2V, &PN)) return Changed; if (isa(BB1V) && !isSafeToSpeculativelyExecute(BB1V)) return Changed; if (isa(BB2V) && !isSafeToSpeculativelyExecute(BB2V)) return Changed; } } // Okay, it is safe to hoist the terminator. Instruction *NT = I1->clone(); BIParent->getInstList().insert(BI->getIterator(), NT); if (!NT->getType()->isVoidTy()) { I1->replaceAllUsesWith(NT); I2->replaceAllUsesWith(NT); NT->takeName(I1); } Changed = true; ++NumHoistCommonInstrs; // Ensure terminator gets a debug location, even an unknown one, in case // it involves inlinable calls. NT->applyMergedLocation(I1->getDebugLoc(), I2->getDebugLoc()); // PHIs created below will adopt NT's merged DebugLoc. IRBuilder Builder(NT); // Hoisting one of the terminators from our successor is a great thing. // Unfortunately, the successors of the if/else blocks may have PHI nodes in // them. If they do, all PHI entries for BB1/BB2 must agree for all PHI // nodes, so we insert select instruction to compute the final result. std::map, SelectInst *> InsertedSelects; for (BasicBlock *Succ : successors(BB1)) { for (PHINode &PN : Succ->phis()) { Value *BB1V = PN.getIncomingValueForBlock(BB1); Value *BB2V = PN.getIncomingValueForBlock(BB2); if (BB1V == BB2V) continue; // These values do not agree. Insert a select instruction before NT // that determines the right value. SelectInst *&SI = InsertedSelects[std::make_pair(BB1V, BB2V)]; if (!SI) { // Propagate fast-math-flags from phi node to its replacement select. IRBuilder<>::FastMathFlagGuard FMFGuard(Builder); if (isa(PN)) Builder.setFastMathFlags(PN.getFastMathFlags()); SI = cast( Builder.CreateSelect(BI->getCondition(), BB1V, BB2V, BB1V->getName() + "." + BB2V->getName(), BI)); } // Make the PHI node use the select for all incoming values for BB1/BB2 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) if (PN.getIncomingBlock(i) == BB1 || PN.getIncomingBlock(i) == BB2) PN.setIncomingValue(i, SI); } } SmallVector Updates; // Update any PHI nodes in our new successors. for (BasicBlock *Succ : successors(BB1)) { AddPredecessorToBlock(Succ, BIParent, BB1); if (DTU) Updates.push_back({DominatorTree::Insert, BIParent, Succ}); } if (DTU) for (BasicBlock *Succ : successors(BI)) Updates.push_back({DominatorTree::Delete, BIParent, Succ}); EraseTerminatorAndDCECond(BI); if (DTU) DTU->applyUpdates(Updates); return Changed; } // Check lifetime markers. static bool isLifeTimeMarker(const Instruction *I) { if (auto II = dyn_cast(I)) { switch (II->getIntrinsicID()) { default: break; case Intrinsic::lifetime_start: case Intrinsic::lifetime_end: return true; } } return false; } // TODO: Refine this. This should avoid cases like turning constant memcpy sizes // into variables. static bool replacingOperandWithVariableIsCheap(const Instruction *I, int OpIdx) { return !isa(I); } // All instructions in Insts belong to different blocks that all unconditionally // branch to a common successor. Analyze each instruction and return true if it // would be possible to sink them into their successor, creating one common // instruction instead. For every value that would be required to be provided by // PHI node (because an operand varies in each input block), add to PHIOperands. static bool canSinkInstructions( ArrayRef Insts, DenseMap> &PHIOperands) { // Prune out obviously bad instructions to move. Each instruction must have // exactly zero or one use, and we check later that use is by a single, common // PHI instruction in the successor. bool HasUse = !Insts.front()->user_empty(); for (auto *I : Insts) { // These instructions may change or break semantics if moved. if (isa(I) || I->isEHPad() || isa(I) || I->getType()->isTokenTy()) return false; // Do not try to sink an instruction in an infinite loop - it can cause // this algorithm to infinite loop. if (I->getParent()->getSingleSuccessor() == I->getParent()) return false; // Conservatively return false if I is an inline-asm instruction. Sinking // and merging inline-asm instructions can potentially create arguments // that cannot satisfy the inline-asm constraints. // If the instruction has nomerge attribute, return false. if (const auto *C = dyn_cast(I)) if (C->isInlineAsm() || C->cannotMerge()) return false; // Each instruction must have zero or one use. if (HasUse && !I->hasOneUse()) return false; if (!HasUse && !I->user_empty()) return false; } const Instruction *I0 = Insts.front(); for (auto *I : Insts) if (!I->isSameOperationAs(I0)) return false; // All instructions in Insts are known to be the same opcode. If they have a // use, check that the only user is a PHI or in the same block as the // instruction, because if a user is in the same block as an instruction we're // contemplating sinking, it must already be determined to be sinkable. if (HasUse) { auto *PNUse = dyn_cast(*I0->user_begin()); auto *Succ = I0->getParent()->getTerminator()->getSuccessor(0); if (!all_of(Insts, [&PNUse,&Succ](const Instruction *I) -> bool { auto *U = cast(*I->user_begin()); return (PNUse && PNUse->getParent() == Succ && PNUse->getIncomingValueForBlock(I->getParent()) == I) || U->getParent() == I->getParent(); })) return false; } // Because SROA can't handle speculating stores of selects, try not to sink // loads, stores or lifetime markers of allocas when we'd have to create a // PHI for the address operand. Also, because it is likely that loads or // stores of allocas will disappear when Mem2Reg/SROA is run, don't sink // them. // This can cause code churn which can have unintended consequences down // the line - see https://llvm.org/bugs/show_bug.cgi?id=30244. // FIXME: This is a workaround for a deficiency in SROA - see // https://llvm.org/bugs/show_bug.cgi?id=30188 if (isa(I0) && any_of(Insts, [](const Instruction *I) { return isa(I->getOperand(1)->stripPointerCasts()); })) return false; if (isa(I0) && any_of(Insts, [](const Instruction *I) { return isa(I->getOperand(0)->stripPointerCasts()); })) return false; if (isLifeTimeMarker(I0) && any_of(Insts, [](const Instruction *I) { return isa(I->getOperand(1)->stripPointerCasts()); })) return false; // For calls to be sinkable, they must all be indirect, or have same callee. // I.e. if we have two direct calls to different callees, we don't want to // turn that into an indirect call. Likewise, if we have an indirect call, // and a direct call, we don't actually want to have a single indirect call. if (isa(I0)) { auto IsIndirectCall = [](const Instruction *I) { return cast(I)->isIndirectCall(); }; bool HaveIndirectCalls = any_of(Insts, IsIndirectCall); bool AllCallsAreIndirect = all_of(Insts, IsIndirectCall); if (HaveIndirectCalls) { if (!AllCallsAreIndirect) return false; } else { // All callees must be identical. Value *Callee = nullptr; for (const Instruction *I : Insts) { Value *CurrCallee = cast(I)->getCalledOperand(); if (!Callee) Callee = CurrCallee; else if (Callee != CurrCallee) return false; } } } for (unsigned OI = 0, OE = I0->getNumOperands(); OI != OE; ++OI) { Value *Op = I0->getOperand(OI); if (Op->getType()->isTokenTy()) // Don't touch any operand of token type. return false; auto SameAsI0 = [&I0, OI](const Instruction *I) { assert(I->getNumOperands() == I0->getNumOperands()); return I->getOperand(OI) == I0->getOperand(OI); }; if (!all_of(Insts, SameAsI0)) { if ((isa(Op) && !replacingOperandWithVariableIsCheap(I0, OI)) || !canReplaceOperandWithVariable(I0, OI)) // We can't create a PHI from this GEP. return false; for (auto *I : Insts) PHIOperands[I].push_back(I->getOperand(OI)); } } return true; } // Assuming canSinkInstructions(Blocks) has returned true, sink the last // instruction of every block in Blocks to their common successor, commoning // into one instruction. static bool sinkLastInstruction(ArrayRef Blocks) { auto *BBEnd = Blocks[0]->getTerminator()->getSuccessor(0); // canSinkInstructions returning true guarantees that every block has at // least one non-terminator instruction. SmallVector Insts; for (auto *BB : Blocks) { Instruction *I = BB->getTerminator(); do { I = I->getPrevNode(); } while (isa(I) && I != &BB->front()); if (!isa(I)) Insts.push_back(I); } // The only checking we need to do now is that all users of all instructions // are the same PHI node. canSinkInstructions should have checked this but // it is slightly over-aggressive - it gets confused by commutative // instructions so double-check it here. Instruction *I0 = Insts.front(); if (!I0->user_empty()) { auto *PNUse = dyn_cast(*I0->user_begin()); if (!all_of(Insts, [&PNUse](const Instruction *I) -> bool { auto *U = cast(*I->user_begin()); return U == PNUse; })) return false; } // We don't need to do any more checking here; canSinkInstructions should // have done it all for us. SmallVector NewOperands; for (unsigned O = 0, E = I0->getNumOperands(); O != E; ++O) { // This check is different to that in canSinkInstructions. There, we // cared about the global view once simplifycfg (and instcombine) have // completed - it takes into account PHIs that become trivially // simplifiable. However here we need a more local view; if an operand // differs we create a PHI and rely on instcombine to clean up the very // small mess we may make. bool NeedPHI = any_of(Insts, [&I0, O](const Instruction *I) { return I->getOperand(O) != I0->getOperand(O); }); if (!NeedPHI) { NewOperands.push_back(I0->getOperand(O)); continue; } // Create a new PHI in the successor block and populate it. auto *Op = I0->getOperand(O); assert(!Op->getType()->isTokenTy() && "Can't PHI tokens!"); auto *PN = PHINode::Create(Op->getType(), Insts.size(), Op->getName() + ".sink", &BBEnd->front()); for (auto *I : Insts) PN->addIncoming(I->getOperand(O), I->getParent()); NewOperands.push_back(PN); } // Arbitrarily use I0 as the new "common" instruction; remap its operands // and move it to the start of the successor block. for (unsigned O = 0, E = I0->getNumOperands(); O != E; ++O) I0->getOperandUse(O).set(NewOperands[O]); I0->moveBefore(&*BBEnd->getFirstInsertionPt()); // Update metadata and IR flags, and merge debug locations. for (auto *I : Insts) if (I != I0) { // The debug location for the "common" instruction is the merged locations // of all the commoned instructions. We start with the original location // of the "common" instruction and iteratively merge each location in the // loop below. // This is an N-way merge, which will be inefficient if I0 is a CallInst. // However, as N-way merge for CallInst is rare, so we use simplified API // instead of using complex API for N-way merge. I0->applyMergedLocation(I0->getDebugLoc(), I->getDebugLoc()); combineMetadataForCSE(I0, I, true); I0->andIRFlags(I); } if (!I0->user_empty()) { // canSinkLastInstruction checked that all instructions were used by // one and only one PHI node. Find that now, RAUW it to our common // instruction and nuke it. auto *PN = cast(*I0->user_begin()); PN->replaceAllUsesWith(I0); PN->eraseFromParent(); } // Finally nuke all instructions apart from the common instruction. for (auto *I : Insts) if (I != I0) I->eraseFromParent(); return true; } namespace { // LockstepReverseIterator - Iterates through instructions // in a set of blocks in reverse order from the first non-terminator. // For example (assume all blocks have size n): // LockstepReverseIterator I([B1, B2, B3]); // *I-- = [B1[n], B2[n], B3[n]]; // *I-- = [B1[n-1], B2[n-1], B3[n-1]]; // *I-- = [B1[n-2], B2[n-2], B3[n-2]]; // ... class LockstepReverseIterator { ArrayRef Blocks; SmallVector Insts; bool Fail; public: LockstepReverseIterator(ArrayRef Blocks) : Blocks(Blocks) { reset(); } void reset() { Fail = false; Insts.clear(); for (auto *BB : Blocks) { Instruction *Inst = BB->getTerminator(); for (Inst = Inst->getPrevNode(); Inst && isa(Inst);) Inst = Inst->getPrevNode(); if (!Inst) { // Block wasn't big enough. Fail = true; return; } Insts.push_back(Inst); } } bool isValid() const { return !Fail; } void operator--() { if (Fail) return; for (auto *&Inst : Insts) { for (Inst = Inst->getPrevNode(); Inst && isa(Inst);) Inst = Inst->getPrevNode(); // Already at beginning of block. if (!Inst) { Fail = true; return; } } } void operator++() { if (Fail) return; for (auto *&Inst : Insts) { for (Inst = Inst->getNextNode(); Inst && isa(Inst);) Inst = Inst->getNextNode(); // Already at end of block. if (!Inst) { Fail = true; return; } } } ArrayRef operator * () const { return Insts; } }; } // end anonymous namespace /// Check whether BB's predecessors end with unconditional branches. If it is /// true, sink any common code from the predecessors to BB. static bool SinkCommonCodeFromPredecessors(BasicBlock *BB, DomTreeUpdater *DTU) { // We support two situations: // (1) all incoming arcs are unconditional // (2) there are non-unconditional incoming arcs // // (2) is very common in switch defaults and // else-if patterns; // // if (a) f(1); // else if (b) f(2); // // produces: // // [if] // / \ // [f(1)] [if] // | | \ // | | | // | [f(2)]| // \ | / // [ end ] // // [end] has two unconditional predecessor arcs and one conditional. The // conditional refers to the implicit empty 'else' arc. This conditional // arc can also be caused by an empty default block in a switch. // // In this case, we attempt to sink code from all *unconditional* arcs. // If we can sink instructions from these arcs (determined during the scan // phase below) we insert a common successor for all unconditional arcs and // connect that to [end], to enable sinking: // // [if] // / \ // [x(1)] [if] // | | \ // | | \ // | [x(2)] | // \ / | // [sink.split] | // \ / // [ end ] // SmallVector UnconditionalPreds; bool HaveNonUnconditionalPredecessors = false; for (auto *PredBB : predecessors(BB)) { auto *PredBr = dyn_cast(PredBB->getTerminator()); if (PredBr && PredBr->isUnconditional()) UnconditionalPreds.push_back(PredBB); else HaveNonUnconditionalPredecessors = true; } if (UnconditionalPreds.size() < 2) return false; // We take a two-step approach to tail sinking. First we scan from the end of // each block upwards in lockstep. If the n'th instruction from the end of each // block can be sunk, those instructions are added to ValuesToSink and we // carry on. If we can sink an instruction but need to PHI-merge some operands // (because they're not identical in each instruction) we add these to // PHIOperands. int ScanIdx = 0; SmallPtrSet InstructionsToSink; DenseMap> PHIOperands; LockstepReverseIterator LRI(UnconditionalPreds); while (LRI.isValid() && canSinkInstructions(*LRI, PHIOperands)) { LLVM_DEBUG(dbgs() << "SINK: instruction can be sunk: " << *(*LRI)[0] << "\n"); InstructionsToSink.insert((*LRI).begin(), (*LRI).end()); ++ScanIdx; --LRI; } // If no instructions can be sunk, early-return. if (ScanIdx == 0) return false; // Okay, we *could* sink last ScanIdx instructions. But how many can we // actually sink before encountering instruction that is unprofitable to sink? auto ProfitableToSinkInstruction = [&](LockstepReverseIterator &LRI) { unsigned NumPHIdValues = 0; for (auto *I : *LRI) for (auto *V : PHIOperands[I]) { if (InstructionsToSink.count(V) == 0) ++NumPHIdValues; // FIXME: this check is overly optimistic. We may end up not sinking // said instruction, due to the very same profitability check. // See @creating_too_many_phis in sink-common-code.ll. } LLVM_DEBUG(dbgs() << "SINK: #phid values: " << NumPHIdValues << "\n"); unsigned NumPHIInsts = NumPHIdValues / UnconditionalPreds.size(); if ((NumPHIdValues % UnconditionalPreds.size()) != 0) NumPHIInsts++; return NumPHIInsts <= 1; }; // We've determined that we are going to sink last ScanIdx instructions, // and recorded them in InstructionsToSink. Now, some instructions may be // unprofitable to sink. But that determination depends on the instructions // that we are going to sink. // First, forward scan: find the first instruction unprofitable to sink, // recording all the ones that are profitable to sink. // FIXME: would it be better, after we detect that not all are profitable. // to either record the profitable ones, or erase the unprofitable ones? // Maybe we need to choose (at runtime) the one that will touch least instrs? LRI.reset(); int Idx = 0; SmallPtrSet InstructionsProfitableToSink; while (Idx < ScanIdx) { if (!ProfitableToSinkInstruction(LRI)) { // Too many PHIs would be created. LLVM_DEBUG( dbgs() << "SINK: stopping here, too many PHIs would be created!\n"); break; } InstructionsProfitableToSink.insert((*LRI).begin(), (*LRI).end()); --LRI; ++Idx; } // If no instructions can be sunk, early-return. if (Idx == 0) return false; // Did we determine that (only) some instructions are unprofitable to sink? if (Idx < ScanIdx) { // Okay, some instructions are unprofitable. ScanIdx = Idx; InstructionsToSink = InstructionsProfitableToSink; // But, that may make other instructions unprofitable, too. // So, do a backward scan, do any earlier instructions become unprofitable? assert(!ProfitableToSinkInstruction(LRI) && "We already know that the last instruction is unprofitable to sink"); ++LRI; --Idx; while (Idx >= 0) { // If we detect that an instruction becomes unprofitable to sink, // all earlier instructions won't be sunk either, // so preemptively keep InstructionsProfitableToSink in sync. // FIXME: is this the most performant approach? for (auto *I : *LRI) InstructionsProfitableToSink.erase(I); if (!ProfitableToSinkInstruction(LRI)) { // Everything starting with this instruction won't be sunk. ScanIdx = Idx; InstructionsToSink = InstructionsProfitableToSink; } ++LRI; --Idx; } } // If no instructions can be sunk, early-return. if (ScanIdx == 0) return false; bool Changed = false; if (HaveNonUnconditionalPredecessors) { // It is always legal to sink common instructions from unconditional // predecessors. However, if not all predecessors are unconditional, // this transformation might be pessimizing. So as a rule of thumb, // don't do it unless we'd sink at least one non-speculatable instruction. // See https://bugs.llvm.org/show_bug.cgi?id=30244 LRI.reset(); int Idx = 0; bool Profitable = false; while (Idx < ScanIdx) { if (!isSafeToSpeculativelyExecute((*LRI)[0])) { Profitable = true; break; } --LRI; ++Idx; } if (!Profitable) return false; LLVM_DEBUG(dbgs() << "SINK: Splitting edge\n"); // We have a conditional edge and we're going to sink some instructions. // Insert a new block postdominating all blocks we're going to sink from. if (!SplitBlockPredecessors(BB, UnconditionalPreds, ".sink.split", DTU)) // Edges couldn't be split. return false; Changed = true; } // Now that we've analyzed all potential sinking candidates, perform the // actual sink. We iteratively sink the last non-terminator of the source // blocks into their common successor unless doing so would require too // many PHI instructions to be generated (currently only one PHI is allowed // per sunk instruction). // // We can use InstructionsToSink to discount values needing PHI-merging that will // actually be sunk in a later iteration. This allows us to be more // aggressive in what we sink. This does allow a false positive where we // sink presuming a later value will also be sunk, but stop half way through // and never actually sink it which means we produce more PHIs than intended. // This is unlikely in practice though. int SinkIdx = 0; for (; SinkIdx != ScanIdx; ++SinkIdx) { LLVM_DEBUG(dbgs() << "SINK: Sink: " << *UnconditionalPreds[0]->getTerminator()->getPrevNode() << "\n"); // Because we've sunk every instruction in turn, the current instruction to // sink is always at index 0. LRI.reset(); if (!sinkLastInstruction(UnconditionalPreds)) { LLVM_DEBUG( dbgs() << "SINK: stopping here, failed to actually sink instruction!\n"); break; } NumSinkCommonInstrs++; Changed = true; } if (SinkIdx != 0) ++NumSinkCommonCode; return Changed; } /// Determine if we can hoist sink a sole store instruction out of a /// conditional block. /// /// We are looking for code like the following: /// BrBB: /// store i32 %add, i32* %arrayidx2 /// ... // No other stores or function calls (we could be calling a memory /// ... // function). /// %cmp = icmp ult %x, %y /// br i1 %cmp, label %EndBB, label %ThenBB /// ThenBB: /// store i32 %add5, i32* %arrayidx2 /// br label EndBB /// EndBB: /// ... /// We are going to transform this into: /// BrBB: /// store i32 %add, i32* %arrayidx2 /// ... // /// %cmp = icmp ult %x, %y /// %add.add5 = select i1 %cmp, i32 %add, %add5 /// store i32 %add.add5, i32* %arrayidx2 /// ... /// /// \return The pointer to the value of the previous store if the store can be /// hoisted into the predecessor block. 0 otherwise. static Value *isSafeToSpeculateStore(Instruction *I, BasicBlock *BrBB, BasicBlock *StoreBB, BasicBlock *EndBB) { StoreInst *StoreToHoist = dyn_cast(I); if (!StoreToHoist) return nullptr; // Volatile or atomic. if (!StoreToHoist->isSimple()) return nullptr; Value *StorePtr = StoreToHoist->getPointerOperand(); Type *StoreTy = StoreToHoist->getValueOperand()->getType(); // Look for a store to the same pointer in BrBB. unsigned MaxNumInstToLookAt = 9; // Skip pseudo probe intrinsic calls which are not really killing any memory // accesses. for (Instruction &CurI : reverse(BrBB->instructionsWithoutDebug(true))) { if (!MaxNumInstToLookAt) break; --MaxNumInstToLookAt; // Could be calling an instruction that affects memory like free(). if (CurI.mayWriteToMemory() && !isa(CurI)) return nullptr; if (auto *SI = dyn_cast(&CurI)) { // Found the previous store to same location and type. Make sure it is // simple, to avoid introducing a spurious non-atomic write after an // atomic write. if (SI->getPointerOperand() == StorePtr && SI->getValueOperand()->getType() == StoreTy && SI->isSimple()) // Found the previous store, return its value operand. return SI->getValueOperand(); return nullptr; // Unknown store. } } return nullptr; } /// Estimate the cost of the insertion(s) and check that the PHI nodes can be /// converted to selects. static bool validateAndCostRequiredSelects(BasicBlock *BB, BasicBlock *ThenBB, BasicBlock *EndBB, unsigned &SpeculatedInstructions, InstructionCost &Cost, const TargetTransformInfo &TTI) { TargetTransformInfo::TargetCostKind CostKind = BB->getParent()->hasMinSize() ? TargetTransformInfo::TCK_CodeSize : TargetTransformInfo::TCK_SizeAndLatency; bool HaveRewritablePHIs = false; for (PHINode &PN : EndBB->phis()) { Value *OrigV = PN.getIncomingValueForBlock(BB); Value *ThenV = PN.getIncomingValueForBlock(ThenBB); // FIXME: Try to remove some of the duplication with HoistThenElseCodeToIf. // Skip PHIs which are trivial. if (ThenV == OrigV) continue; Cost += TTI.getCmpSelInstrCost(Instruction::Select, PN.getType(), nullptr, CmpInst::BAD_ICMP_PREDICATE, CostKind); // Don't convert to selects if we could remove undefined behavior instead. if (passingValueIsAlwaysUndefined(OrigV, &PN) || passingValueIsAlwaysUndefined(ThenV, &PN)) return false; HaveRewritablePHIs = true; ConstantExpr *OrigCE = dyn_cast(OrigV); ConstantExpr *ThenCE = dyn_cast(ThenV); if (!OrigCE && !ThenCE) continue; // Known safe and cheap. if ((ThenCE && !isSafeToSpeculativelyExecute(ThenCE)) || (OrigCE && !isSafeToSpeculativelyExecute(OrigCE))) return false; InstructionCost OrigCost = OrigCE ? computeSpeculationCost(OrigCE, TTI) : 0; InstructionCost ThenCost = ThenCE ? computeSpeculationCost(ThenCE, TTI) : 0; InstructionCost MaxCost = 2 * PHINodeFoldingThreshold * TargetTransformInfo::TCC_Basic; if (OrigCost + ThenCost > MaxCost) return false; // Account for the cost of an unfolded ConstantExpr which could end up // getting expanded into Instructions. // FIXME: This doesn't account for how many operations are combined in the // constant expression. ++SpeculatedInstructions; if (SpeculatedInstructions > 1) return false; } return HaveRewritablePHIs; } /// Speculate a conditional basic block flattening the CFG. /// /// Note that this is a very risky transform currently. Speculating /// instructions like this is most often not desirable. Instead, there is an MI /// pass which can do it with full awareness of the resource constraints. /// However, some cases are "obvious" and we should do directly. An example of /// this is speculating a single, reasonably cheap instruction. /// /// There is only one distinct advantage to flattening the CFG at the IR level: /// it makes very common but simplistic optimizations such as are common in /// instcombine and the DAG combiner more powerful by removing CFG edges and /// modeling their effects with easier to reason about SSA value graphs. /// /// /// An illustration of this transform is turning this IR: /// \code /// BB: /// %cmp = icmp ult %x, %y /// br i1 %cmp, label %EndBB, label %ThenBB /// ThenBB: /// %sub = sub %x, %y /// br label BB2 /// EndBB: /// %phi = phi [ %sub, %ThenBB ], [ 0, %EndBB ] /// ... /// \endcode /// /// Into this IR: /// \code /// BB: /// %cmp = icmp ult %x, %y /// %sub = sub %x, %y /// %cond = select i1 %cmp, 0, %sub /// ... /// \endcode /// /// \returns true if the conditional block is removed. bool SimplifyCFGOpt::SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *ThenBB, const TargetTransformInfo &TTI) { // Be conservative for now. FP select instruction can often be expensive. Value *BrCond = BI->getCondition(); if (isa(BrCond)) return false; BasicBlock *BB = BI->getParent(); BasicBlock *EndBB = ThenBB->getTerminator()->getSuccessor(0); InstructionCost Budget = PHINodeFoldingThreshold * TargetTransformInfo::TCC_Basic; // If ThenBB is actually on the false edge of the conditional branch, remember // to swap the select operands later. bool Invert = false; if (ThenBB != BI->getSuccessor(0)) { assert(ThenBB == BI->getSuccessor(1) && "No edge from 'if' block?"); Invert = true; } assert(EndBB == BI->getSuccessor(!Invert) && "No edge from to end block"); // If the branch is non-unpredictable, and is predicted to *not* branch to // the `then` block, then avoid speculating it. if (!BI->getMetadata(LLVMContext::MD_unpredictable)) { uint64_t TWeight, FWeight; if (BI->extractProfMetadata(TWeight, FWeight) && (TWeight + FWeight) != 0) { uint64_t EndWeight = Invert ? TWeight : FWeight; BranchProbability BIEndProb = BranchProbability::getBranchProbability(EndWeight, TWeight + FWeight); BranchProbability Likely = TTI.getPredictableBranchThreshold(); if (BIEndProb >= Likely) return false; } } // Keep a count of how many times instructions are used within ThenBB when // they are candidates for sinking into ThenBB. Specifically: // - They are defined in BB, and // - They have no side effects, and // - All of their uses are in ThenBB. SmallDenseMap SinkCandidateUseCounts; SmallVector SpeculatedDbgIntrinsics; unsigned SpeculatedInstructions = 0; Value *SpeculatedStoreValue = nullptr; StoreInst *SpeculatedStore = nullptr; for (BasicBlock::iterator BBI = ThenBB->begin(), BBE = std::prev(ThenBB->end()); BBI != BBE; ++BBI) { Instruction *I = &*BBI; // Skip debug info. if (isa(I)) { SpeculatedDbgIntrinsics.push_back(I); continue; } // Skip pseudo probes. The consequence is we lose track of the branch // probability for ThenBB, which is fine since the optimization here takes // place regardless of the branch probability. if (isa(I)) { // The probe should be deleted so that it will not be over-counted when // the samples collected on the non-conditional path are counted towards // the conditional path. We leave it for the counts inference algorithm to // figure out a proper count for an unknown probe. SpeculatedDbgIntrinsics.push_back(I); continue; } // Only speculatively execute a single instruction (not counting the // terminator) for now. ++SpeculatedInstructions; if (SpeculatedInstructions > 1) return false; // Don't hoist the instruction if it's unsafe or expensive. if (!isSafeToSpeculativelyExecute(I) && !(HoistCondStores && (SpeculatedStoreValue = isSafeToSpeculateStore( I, BB, ThenBB, EndBB)))) return false; if (!SpeculatedStoreValue && computeSpeculationCost(I, TTI) > PHINodeFoldingThreshold * TargetTransformInfo::TCC_Basic) return false; // Store the store speculation candidate. if (SpeculatedStoreValue) SpeculatedStore = cast(I); // Do not hoist the instruction if any of its operands are defined but not // used in BB. The transformation will prevent the operand from // being sunk into the use block. for (Use &Op : I->operands()) { Instruction *OpI = dyn_cast(Op); if (!OpI || OpI->getParent() != BB || OpI->mayHaveSideEffects()) continue; // Not a candidate for sinking. ++SinkCandidateUseCounts[OpI]; } } // Consider any sink candidates which are only used in ThenBB as costs for // speculation. Note, while we iterate over a DenseMap here, we are summing // and so iteration order isn't significant. for (SmallDenseMap::iterator I = SinkCandidateUseCounts.begin(), E = SinkCandidateUseCounts.end(); I != E; ++I) if (I->first->hasNUses(I->second)) { ++SpeculatedInstructions; if (SpeculatedInstructions > 1) return false; } // Check that we can insert the selects and that it's not too expensive to do // so. bool Convert = SpeculatedStore != nullptr; InstructionCost Cost = 0; Convert |= validateAndCostRequiredSelects(BB, ThenBB, EndBB, SpeculatedInstructions, Cost, TTI); if (!Convert || Cost > Budget) return false; // If we get here, we can hoist the instruction and if-convert. LLVM_DEBUG(dbgs() << "SPECULATIVELY EXECUTING BB" << *ThenBB << "\n";); // Insert a select of the value of the speculated store. if (SpeculatedStoreValue) { IRBuilder Builder(BI); Value *TrueV = SpeculatedStore->getValueOperand(); Value *FalseV = SpeculatedStoreValue; if (Invert) std::swap(TrueV, FalseV); Value *S = Builder.CreateSelect( BrCond, TrueV, FalseV, "spec.store.select", BI); SpeculatedStore->setOperand(0, S); SpeculatedStore->applyMergedLocation(BI->getDebugLoc(), SpeculatedStore->getDebugLoc()); } // Metadata can be dependent on the condition we are hoisting above. // Conservatively strip all metadata on the instruction. Drop the debug loc // to avoid making it appear as if the condition is a constant, which would // be misleading while debugging. // Similarly strip attributes that maybe dependent on condition we are // hoisting above. for (auto &I : *ThenBB) { if (!SpeculatedStoreValue || &I != SpeculatedStore) I.setDebugLoc(DebugLoc()); I.dropUndefImplyingAttrsAndUnknownMetadata(); } // Hoist the instructions. BB->getInstList().splice(BI->getIterator(), ThenBB->getInstList(), ThenBB->begin(), std::prev(ThenBB->end())); // Insert selects and rewrite the PHI operands. IRBuilder Builder(BI); for (PHINode &PN : EndBB->phis()) { unsigned OrigI = PN.getBasicBlockIndex(BB); unsigned ThenI = PN.getBasicBlockIndex(ThenBB); Value *OrigV = PN.getIncomingValue(OrigI); Value *ThenV = PN.getIncomingValue(ThenI); // Skip PHIs which are trivial. if (OrigV == ThenV) continue; // Create a select whose true value is the speculatively executed value and // false value is the pre-existing value. Swap them if the branch // destinations were inverted. Value *TrueV = ThenV, *FalseV = OrigV; if (Invert) std::swap(TrueV, FalseV); Value *V = Builder.CreateSelect(BrCond, TrueV, FalseV, "spec.select", BI); PN.setIncomingValue(OrigI, V); PN.setIncomingValue(ThenI, V); } // Remove speculated dbg intrinsics. // FIXME: Is it possible to do this in a more elegant way? Moving/merging the // dbg value for the different flows and inserting it after the select. for (Instruction *I : SpeculatedDbgIntrinsics) I->eraseFromParent(); ++NumSpeculations; return true; } /// Return true if we can thread a branch across this block. static bool BlockIsSimpleEnoughToThreadThrough(BasicBlock *BB) { int Size = 0; SmallPtrSet EphValues; auto IsEphemeral = [&](const Value *V) { if (isa(V)) return true; return isSafeToSpeculativelyExecute(V) && all_of(V->users(), [&](const User *U) { return EphValues.count(U); }); }; // Walk the loop in reverse so that we can identify ephemeral values properly // (values only feeding assumes). for (Instruction &I : reverse(BB->instructionsWithoutDebug())) { // Can't fold blocks that contain noduplicate or convergent calls. if (CallInst *CI = dyn_cast(&I)) if (CI->cannotDuplicate() || CI->isConvergent()) return false; // Ignore ephemeral values which are deleted during codegen. if (IsEphemeral(&I)) EphValues.insert(&I); // We will delete Phis while threading, so Phis should not be accounted in // block's size. else if (!isa(I)) { if (Size++ > MaxSmallBlockSize) return false; // Don't clone large BB's. } // We can only support instructions that do not define values that are // live outside of the current basic block. for (User *U : I.users()) { Instruction *UI = cast(U); if (UI->getParent() != BB || isa(UI)) return false; } // Looks ok, continue checking. } return true; } /// If we have a conditional branch on a PHI node value that is defined in the /// same block as the branch and if any PHI entries are constants, thread edges /// corresponding to that entry to be branches to their ultimate destination. static bool FoldCondBranchOnPHI(BranchInst *BI, DomTreeUpdater *DTU, const DataLayout &DL, AssumptionCache *AC) { BasicBlock *BB = BI->getParent(); PHINode *PN = dyn_cast(BI->getCondition()); // NOTE: we currently cannot transform this case if the PHI node is used // outside of the block. if (!PN || PN->getParent() != BB || !PN->hasOneUse()) return false; // Degenerate case of a single entry PHI. if (PN->getNumIncomingValues() == 1) { FoldSingleEntryPHINodes(PN->getParent()); return true; } // Now we know that this block has multiple preds and two succs. if (!BlockIsSimpleEnoughToThreadThrough(BB)) return false; // Okay, this is a simple enough basic block. See if any phi values are // constants. for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { ConstantInt *CB = dyn_cast(PN->getIncomingValue(i)); if (!CB || !CB->getType()->isIntegerTy(1)) continue; // Okay, we now know that all edges from PredBB should be revectored to // branch to RealDest. BasicBlock *PredBB = PN->getIncomingBlock(i); BasicBlock *RealDest = BI->getSuccessor(!CB->getZExtValue()); if (RealDest == BB) continue; // Skip self loops. // Skip if the predecessor's terminator is an indirect branch. if (isa(PredBB->getTerminator())) continue; SmallVector Updates; // The dest block might have PHI nodes, other predecessors and other // difficult cases. Instead of being smart about this, just insert a new // block that jumps to the destination block, effectively splitting // the edge we are about to create. BasicBlock *EdgeBB = BasicBlock::Create(BB->getContext(), RealDest->getName() + ".critedge", RealDest->getParent(), RealDest); BranchInst *CritEdgeBranch = BranchInst::Create(RealDest, EdgeBB); if (DTU) Updates.push_back({DominatorTree::Insert, EdgeBB, RealDest}); CritEdgeBranch->setDebugLoc(BI->getDebugLoc()); // Update PHI nodes. AddPredecessorToBlock(RealDest, EdgeBB, BB); // BB may have instructions that are being threaded over. Clone these // instructions into EdgeBB. We know that there will be no uses of the // cloned instructions outside of EdgeBB. BasicBlock::iterator InsertPt = EdgeBB->begin(); DenseMap TranslateMap; // Track translated values. for (BasicBlock::iterator BBI = BB->begin(); &*BBI != BI; ++BBI) { if (PHINode *PN = dyn_cast(BBI)) { TranslateMap[PN] = PN->getIncomingValueForBlock(PredBB); continue; } // Clone the instruction. Instruction *N = BBI->clone(); if (BBI->hasName()) N->setName(BBI->getName() + ".c"); // Update operands due to translation. for (Use &Op : N->operands()) { DenseMap::iterator PI = TranslateMap.find(Op); if (PI != TranslateMap.end()) Op = PI->second; } // Check for trivial simplification. if (Value *V = SimplifyInstruction(N, {DL, nullptr, nullptr, AC})) { if (!BBI->use_empty()) TranslateMap[&*BBI] = V; if (!N->mayHaveSideEffects()) { N->deleteValue(); // Instruction folded away, don't need actual inst N = nullptr; } } else { if (!BBI->use_empty()) TranslateMap[&*BBI] = N; } if (N) { // Insert the new instruction into its new home. EdgeBB->getInstList().insert(InsertPt, N); // Register the new instruction with the assumption cache if necessary. if (auto *Assume = dyn_cast(N)) if (AC) AC->registerAssumption(Assume); } } // Loop over all of the edges from PredBB to BB, changing them to branch // to EdgeBB instead. Instruction *PredBBTI = PredBB->getTerminator(); for (unsigned i = 0, e = PredBBTI->getNumSuccessors(); i != e; ++i) if (PredBBTI->getSuccessor(i) == BB) { BB->removePredecessor(PredBB); PredBBTI->setSuccessor(i, EdgeBB); } if (DTU) { Updates.push_back({DominatorTree::Insert, PredBB, EdgeBB}); Updates.push_back({DominatorTree::Delete, PredBB, BB}); DTU->applyUpdates(Updates); } // Recurse, simplifying any other constants. return FoldCondBranchOnPHI(BI, DTU, DL, AC) || true; } return false; } /// Given a BB that starts with the specified two-entry PHI node, /// see if we can eliminate it. static bool FoldTwoEntryPHINode(PHINode *PN, const TargetTransformInfo &TTI, DomTreeUpdater *DTU, const DataLayout &DL) { // Ok, this is a two entry PHI node. Check to see if this is a simple "if // statement", which has a very simple dominance structure. Basically, we // are trying to find the condition that is being branched on, which // subsequently causes this merge to happen. We really want control // dependence information for this check, but simplifycfg can't keep it up // to date, and this catches most of the cases we care about anyway. BasicBlock *BB = PN->getParent(); BasicBlock *IfTrue, *IfFalse; BranchInst *DomBI = GetIfCondition(BB, IfTrue, IfFalse); if (!DomBI) return false; Value *IfCond = DomBI->getCondition(); // Don't bother if the branch will be constant folded trivially. if (isa(IfCond)) return false; BasicBlock *DomBlock = DomBI->getParent(); SmallVector IfBlocks; llvm::copy_if( PN->blocks(), std::back_inserter(IfBlocks), [](BasicBlock *IfBlock) { return cast(IfBlock->getTerminator())->isUnconditional(); }); assert((IfBlocks.size() == 1 || IfBlocks.size() == 2) && "Will have either one or two blocks to speculate."); // If the branch is non-unpredictable, see if we either predictably jump to // the merge bb (if we have only a single 'then' block), or if we predictably // jump to one specific 'then' block (if we have two of them). // It isn't beneficial to speculatively execute the code // from the block that we know is predictably not entered. if (!DomBI->getMetadata(LLVMContext::MD_unpredictable)) { uint64_t TWeight, FWeight; if (DomBI->extractProfMetadata(TWeight, FWeight) && (TWeight + FWeight) != 0) { BranchProbability BITrueProb = BranchProbability::getBranchProbability(TWeight, TWeight + FWeight); BranchProbability Likely = TTI.getPredictableBranchThreshold(); BranchProbability BIFalseProb = BITrueProb.getCompl(); if (IfBlocks.size() == 1) { BranchProbability BIBBProb = DomBI->getSuccessor(0) == BB ? BITrueProb : BIFalseProb; if (BIBBProb >= Likely) return false; } else { if (BITrueProb >= Likely || BIFalseProb >= Likely) return false; } } } // Don't try to fold an unreachable block. For example, the phi node itself // can't be the candidate if-condition for a select that we want to form. if (auto *IfCondPhiInst = dyn_cast(IfCond)) if (IfCondPhiInst->getParent() == BB) return false; // Okay, we found that we can merge this two-entry phi node into a select. // Doing so would require us to fold *all* two entry phi nodes in this block. // At some point this becomes non-profitable (particularly if the target // doesn't support cmov's). Only do this transformation if there are two or // fewer PHI nodes in this block. unsigned NumPhis = 0; for (BasicBlock::iterator I = BB->begin(); isa(I); ++NumPhis, ++I) if (NumPhis > 2) return false; // Loop over the PHI's seeing if we can promote them all to select // instructions. While we are at it, keep track of the instructions // that need to be moved to the dominating block. SmallPtrSet AggressiveInsts; InstructionCost Cost = 0; InstructionCost Budget = TwoEntryPHINodeFoldingThreshold * TargetTransformInfo::TCC_Basic; bool Changed = false; for (BasicBlock::iterator II = BB->begin(); isa(II);) { PHINode *PN = cast(II++); if (Value *V = SimplifyInstruction(PN, {DL, PN})) { PN->replaceAllUsesWith(V); PN->eraseFromParent(); Changed = true; continue; } if (!dominatesMergePoint(PN->getIncomingValue(0), BB, AggressiveInsts, Cost, Budget, TTI) || !dominatesMergePoint(PN->getIncomingValue(1), BB, AggressiveInsts, Cost, Budget, TTI)) return Changed; } // If we folded the first phi, PN dangles at this point. Refresh it. If // we ran out of PHIs then we simplified them all. PN = dyn_cast(BB->begin()); if (!PN) return true; // Return true if at least one of these is a 'not', and another is either // a 'not' too, or a constant. auto CanHoistNotFromBothValues = [](Value *V0, Value *V1) { if (!match(V0, m_Not(m_Value()))) std::swap(V0, V1); auto Invertible = m_CombineOr(m_Not(m_Value()), m_AnyIntegralConstant()); return match(V0, m_Not(m_Value())) && match(V1, Invertible); }; // Don't fold i1 branches on PHIs which contain binary operators or // (possibly inverted) select form of or/ands, unless one of // the incoming values is an 'not' and another one is freely invertible. // These can often be turned into switches and other things. auto IsBinOpOrAnd = [](Value *V) { return match( V, m_CombineOr( m_BinOp(), m_CombineOr(m_Select(m_Value(), m_ImmConstant(), m_Value()), m_Select(m_Value(), m_Value(), m_ImmConstant())))); }; if (PN->getType()->isIntegerTy(1) && (IsBinOpOrAnd(PN->getIncomingValue(0)) || IsBinOpOrAnd(PN->getIncomingValue(1)) || IsBinOpOrAnd(IfCond)) && !CanHoistNotFromBothValues(PN->getIncomingValue(0), PN->getIncomingValue(1))) return Changed; // If all PHI nodes are promotable, check to make sure that all instructions // in the predecessor blocks can be promoted as well. If not, we won't be able // to get rid of the control flow, so it's not worth promoting to select // instructions. for (BasicBlock *IfBlock : IfBlocks) for (BasicBlock::iterator I = IfBlock->begin(); !I->isTerminator(); ++I) if (!AggressiveInsts.count(&*I) && !isa(I) && !isa(I)) { // This is not an aggressive instruction that we can promote. // Because of this, we won't be able to get rid of the control flow, so // the xform is not worth it. return Changed; } // If either of the blocks has it's address taken, we can't do this fold. if (any_of(IfBlocks, [](BasicBlock *IfBlock) { return IfBlock->hasAddressTaken(); })) return Changed; LLVM_DEBUG(dbgs() << "FOUND IF CONDITION! " << *IfCond << " T: " << IfTrue->getName() << " F: " << IfFalse->getName() << "\n"); // If we can still promote the PHI nodes after this gauntlet of tests, // do all of the PHI's now. // Move all 'aggressive' instructions, which are defined in the // conditional parts of the if's up to the dominating block. for (BasicBlock *IfBlock : IfBlocks) hoistAllInstructionsInto(DomBlock, DomBI, IfBlock); IRBuilder Builder(DomBI); // Propagate fast-math-flags from phi nodes to replacement selects. IRBuilder<>::FastMathFlagGuard FMFGuard(Builder); while (PHINode *PN = dyn_cast(BB->begin())) { if (isa(PN)) Builder.setFastMathFlags(PN->getFastMathFlags()); // Change the PHI node into a select instruction. Value *TrueVal = PN->getIncomingValueForBlock(IfTrue); Value *FalseVal = PN->getIncomingValueForBlock(IfFalse); Value *Sel = Builder.CreateSelect(IfCond, TrueVal, FalseVal, "", DomBI); PN->replaceAllUsesWith(Sel); Sel->takeName(PN); PN->eraseFromParent(); } // At this point, all IfBlocks are empty, so our if statement // has been flattened. Change DomBlock to jump directly to our new block to // avoid other simplifycfg's kicking in on the diamond. Builder.CreateBr(BB); SmallVector Updates; if (DTU) { Updates.push_back({DominatorTree::Insert, DomBlock, BB}); for (auto *Successor : successors(DomBlock)) Updates.push_back({DominatorTree::Delete, DomBlock, Successor}); } DomBI->eraseFromParent(); if (DTU) DTU->applyUpdates(Updates); return true; } static Value *createLogicalOp(IRBuilderBase &Builder, Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name = "") { // Try to relax logical op to binary op. if (impliesPoison(RHS, LHS)) return Builder.CreateBinOp(Opc, LHS, RHS, Name); if (Opc == Instruction::And) return Builder.CreateLogicalAnd(LHS, RHS, Name); if (Opc == Instruction::Or) return Builder.CreateLogicalOr(LHS, RHS, Name); llvm_unreachable("Invalid logical opcode"); } /// Return true if either PBI or BI has branch weight available, and store /// the weights in {Pred|Succ}{True|False}Weight. If one of PBI and BI does /// not have branch weight, use 1:1 as its weight. static bool extractPredSuccWeights(BranchInst *PBI, BranchInst *BI, uint64_t &PredTrueWeight, uint64_t &PredFalseWeight, uint64_t &SuccTrueWeight, uint64_t &SuccFalseWeight) { bool PredHasWeights = PBI->extractProfMetadata(PredTrueWeight, PredFalseWeight); bool SuccHasWeights = BI->extractProfMetadata(SuccTrueWeight, SuccFalseWeight); if (PredHasWeights || SuccHasWeights) { if (!PredHasWeights) PredTrueWeight = PredFalseWeight = 1; if (!SuccHasWeights) SuccTrueWeight = SuccFalseWeight = 1; return true; } else { return false; } } /// Determine if the two branches share a common destination and deduce a glue /// that joins the branches' conditions to arrive at the common destination if /// that would be profitable. static Optional> shouldFoldCondBranchesToCommonDestination(BranchInst *BI, BranchInst *PBI, const TargetTransformInfo *TTI) { assert(BI && PBI && BI->isConditional() && PBI->isConditional() && "Both blocks must end with a conditional branches."); assert(is_contained(predecessors(BI->getParent()), PBI->getParent()) && "PredBB must be a predecessor of BB."); // We have the potential to fold the conditions together, but if the // predecessor branch is predictable, we may not want to merge them. uint64_t PTWeight, PFWeight; BranchProbability PBITrueProb, Likely; if (TTI && !PBI->getMetadata(LLVMContext::MD_unpredictable) && PBI->extractProfMetadata(PTWeight, PFWeight) && (PTWeight + PFWeight) != 0) { PBITrueProb = BranchProbability::getBranchProbability(PTWeight, PTWeight + PFWeight); Likely = TTI->getPredictableBranchThreshold(); } if (PBI->getSuccessor(0) == BI->getSuccessor(0)) { // Speculate the 2nd condition unless the 1st is probably true. if (PBITrueProb.isUnknown() || PBITrueProb < Likely) return {{Instruction::Or, false}}; } else if (PBI->getSuccessor(1) == BI->getSuccessor(1)) { // Speculate the 2nd condition unless the 1st is probably false. if (PBITrueProb.isUnknown() || PBITrueProb.getCompl() < Likely) return {{Instruction::And, false}}; } else if (PBI->getSuccessor(0) == BI->getSuccessor(1)) { // Speculate the 2nd condition unless the 1st is probably true. if (PBITrueProb.isUnknown() || PBITrueProb < Likely) return {{Instruction::And, true}}; } else if (PBI->getSuccessor(1) == BI->getSuccessor(0)) { // Speculate the 2nd condition unless the 1st is probably false. if (PBITrueProb.isUnknown() || PBITrueProb.getCompl() < Likely) return {{Instruction::Or, true}}; } return None; } static bool performBranchToCommonDestFolding(BranchInst *BI, BranchInst *PBI, DomTreeUpdater *DTU, MemorySSAUpdater *MSSAU, const TargetTransformInfo *TTI) { BasicBlock *BB = BI->getParent(); BasicBlock *PredBlock = PBI->getParent(); // Determine if the two branches share a common destination. Instruction::BinaryOps Opc; bool InvertPredCond; std::tie(Opc, InvertPredCond) = *shouldFoldCondBranchesToCommonDestination(BI, PBI, TTI); LLVM_DEBUG(dbgs() << "FOLDING BRANCH TO COMMON DEST:\n" << *PBI << *BB); IRBuilder<> Builder(PBI); // The builder is used to create instructions to eliminate the branch in BB. // If BB's terminator has !annotation metadata, add it to the new // instructions. Builder.CollectMetadataToCopy(BB->getTerminator(), {LLVMContext::MD_annotation}); // If we need to invert the condition in the pred block to match, do so now. if (InvertPredCond) { Value *NewCond = PBI->getCondition(); if (NewCond->hasOneUse() && isa(NewCond)) { CmpInst *CI = cast(NewCond); CI->setPredicate(CI->getInversePredicate()); } else { NewCond = Builder.CreateNot(NewCond, PBI->getCondition()->getName() + ".not"); } PBI->setCondition(NewCond); PBI->swapSuccessors(); } BasicBlock *UniqueSucc = PBI->getSuccessor(0) == BB ? BI->getSuccessor(0) : BI->getSuccessor(1); // Before cloning instructions, notify the successor basic block that it // is about to have a new predecessor. This will update PHI nodes, // which will allow us to update live-out uses of bonus instructions. AddPredecessorToBlock(UniqueSucc, PredBlock, BB, MSSAU); // Try to update branch weights. uint64_t PredTrueWeight, PredFalseWeight, SuccTrueWeight, SuccFalseWeight; if (extractPredSuccWeights(PBI, BI, PredTrueWeight, PredFalseWeight, SuccTrueWeight, SuccFalseWeight)) { SmallVector NewWeights; if (PBI->getSuccessor(0) == BB) { // PBI: br i1 %x, BB, FalseDest // BI: br i1 %y, UniqueSucc, FalseDest // TrueWeight is TrueWeight for PBI * TrueWeight for BI. NewWeights.push_back(PredTrueWeight * SuccTrueWeight); // FalseWeight is FalseWeight for PBI * TotalWeight for BI + // TrueWeight for PBI * FalseWeight for BI. // We assume that total weights of a BranchInst can fit into 32 bits. // Therefore, we will not have overflow using 64-bit arithmetic. NewWeights.push_back(PredFalseWeight * (SuccFalseWeight + SuccTrueWeight) + PredTrueWeight * SuccFalseWeight); } else { // PBI: br i1 %x, TrueDest, BB // BI: br i1 %y, TrueDest, UniqueSucc // TrueWeight is TrueWeight for PBI * TotalWeight for BI + // FalseWeight for PBI * TrueWeight for BI. NewWeights.push_back(PredTrueWeight * (SuccFalseWeight + SuccTrueWeight) + PredFalseWeight * SuccTrueWeight); // FalseWeight is FalseWeight for PBI * FalseWeight for BI. NewWeights.push_back(PredFalseWeight * SuccFalseWeight); } // Halve the weights if any of them cannot fit in an uint32_t FitWeights(NewWeights); SmallVector MDWeights(NewWeights.begin(), NewWeights.end()); setBranchWeights(PBI, MDWeights[0], MDWeights[1]); // TODO: If BB is reachable from all paths through PredBlock, then we // could replace PBI's branch probabilities with BI's. } else PBI->setMetadata(LLVMContext::MD_prof, nullptr); // Now, update the CFG. PBI->setSuccessor(PBI->getSuccessor(0) != BB, UniqueSucc); if (DTU) DTU->applyUpdates({{DominatorTree::Insert, PredBlock, UniqueSucc}, {DominatorTree::Delete, PredBlock, BB}}); // If BI was a loop latch, it may have had associated loop metadata. // We need to copy it to the new latch, that is, PBI. if (MDNode *LoopMD = BI->getMetadata(LLVMContext::MD_loop)) PBI->setMetadata(LLVMContext::MD_loop, LoopMD); ValueToValueMapTy VMap; // maps original values to cloned values CloneInstructionsIntoPredecessorBlockAndUpdateSSAUses(BB, PredBlock, VMap); // Now that the Cond was cloned into the predecessor basic block, // or/and the two conditions together. Value *BICond = VMap[BI->getCondition()]; PBI->setCondition( createLogicalOp(Builder, Opc, PBI->getCondition(), BICond, "or.cond")); // Copy any debug value intrinsics into the end of PredBlock. for (Instruction &I : *BB) { if (isa(I)) { Instruction *NewI = I.clone(); RemapInstruction(NewI, VMap, RF_NoModuleLevelChanges | RF_IgnoreMissingLocals); NewI->insertBefore(PBI); } } ++NumFoldBranchToCommonDest; return true; } /// If this basic block is simple enough, and if a predecessor branches to us /// and one of our successors, fold the block into the predecessor and use /// logical operations to pick the right destination. bool llvm::FoldBranchToCommonDest(BranchInst *BI, DomTreeUpdater *DTU, MemorySSAUpdater *MSSAU, const TargetTransformInfo *TTI, unsigned BonusInstThreshold) { // If this block ends with an unconditional branch, // let SpeculativelyExecuteBB() deal with it. if (!BI->isConditional()) return false; BasicBlock *BB = BI->getParent(); TargetTransformInfo::TargetCostKind CostKind = BB->getParent()->hasMinSize() ? TargetTransformInfo::TCK_CodeSize : TargetTransformInfo::TCK_SizeAndLatency; Instruction *Cond = dyn_cast(BI->getCondition()); if (!Cond || (!isa(Cond) && !isa(Cond)) || Cond->getParent() != BB || !Cond->hasOneUse()) return false; // Cond is known to be a compare or binary operator. Check to make sure that // neither operand is a potentially-trapping constant expression. if (ConstantExpr *CE = dyn_cast(Cond->getOperand(0))) if (CE->canTrap()) return false; if (ConstantExpr *CE = dyn_cast(Cond->getOperand(1))) if (CE->canTrap()) return false; // Finally, don't infinitely unroll conditional loops. if (is_contained(successors(BB), BB)) return false; // With which predecessors will we want to deal with? SmallVector Preds; for (BasicBlock *PredBlock : predecessors(BB)) { BranchInst *PBI = dyn_cast(PredBlock->getTerminator()); // Check that we have two conditional branches. If there is a PHI node in // the common successor, verify that the same value flows in from both // blocks. if (!PBI || PBI->isUnconditional() || !SafeToMergeTerminators(BI, PBI)) continue; // Determine if the two branches share a common destination. Instruction::BinaryOps Opc; bool InvertPredCond; if (auto Recipe = shouldFoldCondBranchesToCommonDestination(BI, PBI, TTI)) std::tie(Opc, InvertPredCond) = *Recipe; else continue; // Check the cost of inserting the necessary logic before performing the // transformation. if (TTI) { Type *Ty = BI->getCondition()->getType(); InstructionCost Cost = TTI->getArithmeticInstrCost(Opc, Ty, CostKind); if (InvertPredCond && (!PBI->getCondition()->hasOneUse() || !isa(PBI->getCondition()))) Cost += TTI->getArithmeticInstrCost(Instruction::Xor, Ty, CostKind); if (Cost > BranchFoldThreshold) continue; } // Ok, we do want to deal with this predecessor. Record it. Preds.emplace_back(PredBlock); } // If there aren't any predecessors into which we can fold, // don't bother checking the cost. if (Preds.empty()) return false; // Only allow this transformation if computing the condition doesn't involve // too many instructions and these involved instructions can be executed // unconditionally. We denote all involved instructions except the condition // as "bonus instructions", and only allow this transformation when the // number of the bonus instructions we'll need to create when cloning into // each predecessor does not exceed a certain threshold. unsigned NumBonusInsts = 0; const unsigned PredCount = Preds.size(); for (Instruction &I : *BB) { // Don't check the branch condition comparison itself. if (&I == Cond) continue; // Ignore dbg intrinsics, and the terminator. if (isa(I) || isa(I)) continue; // I must be safe to execute unconditionally. if (!isSafeToSpeculativelyExecute(&I)) return false; // Account for the cost of duplicating this instruction into each // predecessor. NumBonusInsts += PredCount; // Early exits once we reach the limit. if (NumBonusInsts > BonusInstThreshold) return false; + + auto IsBCSSAUse = [BB, &I](Use &U) { + auto *UI = cast(U.getUser()); + if (auto *PN = dyn_cast(UI)) + return PN->getIncomingBlock(U) == BB; + return UI->getParent() == BB && I.comesBefore(UI); + }; + + // Does this instruction require rewriting of uses? + if (!all_of(I.uses(), IsBCSSAUse)) + return false; } // Ok, we have the budget. Perform the transformation. for (BasicBlock *PredBlock : Preds) { auto *PBI = cast(PredBlock->getTerminator()); return performBranchToCommonDestFolding(BI, PBI, DTU, MSSAU, TTI); } return false; } // If there is only one store in BB1 and BB2, return it, otherwise return // nullptr. static StoreInst *findUniqueStoreInBlocks(BasicBlock *BB1, BasicBlock *BB2) { StoreInst *S = nullptr; for (auto *BB : {BB1, BB2}) { if (!BB) continue; for (auto &I : *BB) if (auto *SI = dyn_cast(&I)) { if (S) // Multiple stores seen. return nullptr; else S = SI; } } return S; } static Value *ensureValueAvailableInSuccessor(Value *V, BasicBlock *BB, Value *AlternativeV = nullptr) { // PHI is going to be a PHI node that allows the value V that is defined in // BB to be referenced in BB's only successor. // // If AlternativeV is nullptr, the only value we care about in PHI is V. It // doesn't matter to us what the other operand is (it'll never get used). We // could just create a new PHI with an undef incoming value, but that could // increase register pressure if EarlyCSE/InstCombine can't fold it with some // other PHI. So here we directly look for some PHI in BB's successor with V // as an incoming operand. If we find one, we use it, else we create a new // one. // // If AlternativeV is not nullptr, we care about both incoming values in PHI. // PHI must be exactly: phi [ %BB, %V ], [ %OtherBB, %AlternativeV] // where OtherBB is the single other predecessor of BB's only successor. PHINode *PHI = nullptr; BasicBlock *Succ = BB->getSingleSuccessor(); for (auto I = Succ->begin(); isa(I); ++I) if (cast(I)->getIncomingValueForBlock(BB) == V) { PHI = cast(I); if (!AlternativeV) break; assert(Succ->hasNPredecessors(2)); auto PredI = pred_begin(Succ); BasicBlock *OtherPredBB = *PredI == BB ? *++PredI : *PredI; if (PHI->getIncomingValueForBlock(OtherPredBB) == AlternativeV) break; PHI = nullptr; } if (PHI) return PHI; // If V is not an instruction defined in BB, just return it. if (!AlternativeV && (!isa(V) || cast(V)->getParent() != BB)) return V; PHI = PHINode::Create(V->getType(), 2, "simplifycfg.merge", &Succ->front()); PHI->addIncoming(V, BB); for (BasicBlock *PredBB : predecessors(Succ)) if (PredBB != BB) PHI->addIncoming( AlternativeV ? AlternativeV : UndefValue::get(V->getType()), PredBB); return PHI; } static bool mergeConditionalStoreToAddress( BasicBlock *PTB, BasicBlock *PFB, BasicBlock *QTB, BasicBlock *QFB, BasicBlock *PostBB, Value *Address, bool InvertPCond, bool InvertQCond, DomTreeUpdater *DTU, const DataLayout &DL, const TargetTransformInfo &TTI) { // For every pointer, there must be exactly two stores, one coming from // PTB or PFB, and the other from QTB or QFB. We don't support more than one // store (to any address) in PTB,PFB or QTB,QFB. // FIXME: We could relax this restriction with a bit more work and performance // testing. StoreInst *PStore = findUniqueStoreInBlocks(PTB, PFB); StoreInst *QStore = findUniqueStoreInBlocks(QTB, QFB); if (!PStore || !QStore) return false; // Now check the stores are compatible. if (!QStore->isUnordered() || !PStore->isUnordered()) return false; // Check that sinking the store won't cause program behavior changes. Sinking // the store out of the Q blocks won't change any behavior as we're sinking // from a block to its unconditional successor. But we're moving a store from // the P blocks down through the middle block (QBI) and past both QFB and QTB. // So we need to check that there are no aliasing loads or stores in // QBI, QTB and QFB. We also need to check there are no conflicting memory // operations between PStore and the end of its parent block. // // The ideal way to do this is to query AliasAnalysis, but we don't // preserve AA currently so that is dangerous. Be super safe and just // check there are no other memory operations at all. for (auto &I : *QFB->getSinglePredecessor()) if (I.mayReadOrWriteMemory()) return false; for (auto &I : *QFB) if (&I != QStore && I.mayReadOrWriteMemory()) return false; if (QTB) for (auto &I : *QTB) if (&I != QStore && I.mayReadOrWriteMemory()) return false; for (auto I = BasicBlock::iterator(PStore), E = PStore->getParent()->end(); I != E; ++I) if (&*I != PStore && I->mayReadOrWriteMemory()) return false; // If we're not in aggressive mode, we only optimize if we have some // confidence that by optimizing we'll allow P and/or Q to be if-converted. auto IsWorthwhile = [&](BasicBlock *BB, ArrayRef FreeStores) { if (!BB) return true; // Heuristic: if the block can be if-converted/phi-folded and the // instructions inside are all cheap (arithmetic/GEPs), it's worthwhile to // thread this store. InstructionCost Cost = 0; InstructionCost Budget = PHINodeFoldingThreshold * TargetTransformInfo::TCC_Basic; for (auto &I : BB->instructionsWithoutDebug()) { // Consider terminator instruction to be free. if (I.isTerminator()) continue; // If this is one the stores that we want to speculate out of this BB, // then don't count it's cost, consider it to be free. if (auto *S = dyn_cast(&I)) if (llvm::find(FreeStores, S)) continue; // Else, we have a white-list of instructions that we are ak speculating. if (!isa(I) && !isa(I)) return false; // Not in white-list - not worthwhile folding. // And finally, if this is a non-free instruction that we are okay // speculating, ensure that we consider the speculation budget. Cost += TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency); if (Cost > Budget) return false; // Eagerly refuse to fold as soon as we're out of budget. } assert(Cost <= Budget && "When we run out of budget we will eagerly return from within the " "per-instruction loop."); return true; }; const std::array FreeStores = {PStore, QStore}; if (!MergeCondStoresAggressively && (!IsWorthwhile(PTB, FreeStores) || !IsWorthwhile(PFB, FreeStores) || !IsWorthwhile(QTB, FreeStores) || !IsWorthwhile(QFB, FreeStores))) return false; // If PostBB has more than two predecessors, we need to split it so we can // sink the store. if (std::next(pred_begin(PostBB), 2) != pred_end(PostBB)) { // We know that QFB's only successor is PostBB. And QFB has a single // predecessor. If QTB exists, then its only successor is also PostBB. // If QTB does not exist, then QFB's only predecessor has a conditional // branch to QFB and PostBB. BasicBlock *TruePred = QTB ? QTB : QFB->getSinglePredecessor(); BasicBlock *NewBB = SplitBlockPredecessors(PostBB, {QFB, TruePred}, "condstore.split", DTU); if (!NewBB) return false; PostBB = NewBB; } // OK, we're going to sink the stores to PostBB. The store has to be // conditional though, so first create the predicate. Value *PCond = cast(PFB->getSinglePredecessor()->getTerminator()) ->getCondition(); Value *QCond = cast(QFB->getSinglePredecessor()->getTerminator()) ->getCondition(); Value *PPHI = ensureValueAvailableInSuccessor(PStore->getValueOperand(), PStore->getParent()); Value *QPHI = ensureValueAvailableInSuccessor(QStore->getValueOperand(), QStore->getParent(), PPHI); IRBuilder<> QB(&*PostBB->getFirstInsertionPt()); Value *PPred = PStore->getParent() == PTB ? PCond : QB.CreateNot(PCond); Value *QPred = QStore->getParent() == QTB ? QCond : QB.CreateNot(QCond); if (InvertPCond) PPred = QB.CreateNot(PPred); if (InvertQCond) QPred = QB.CreateNot(QPred); Value *CombinedPred = QB.CreateOr(PPred, QPred); auto *T = SplitBlockAndInsertIfThen(CombinedPred, &*QB.GetInsertPoint(), /*Unreachable=*/false, /*BranchWeights=*/nullptr, DTU); QB.SetInsertPoint(T); StoreInst *SI = cast(QB.CreateStore(QPHI, Address)); AAMDNodes AAMD; PStore->getAAMetadata(AAMD, /*Merge=*/false); PStore->getAAMetadata(AAMD, /*Merge=*/true); SI->setAAMetadata(AAMD); // Choose the minimum alignment. If we could prove both stores execute, we // could use biggest one. In this case, though, we only know that one of the // stores executes. And we don't know it's safe to take the alignment from a // store that doesn't execute. SI->setAlignment(std::min(PStore->getAlign(), QStore->getAlign())); QStore->eraseFromParent(); PStore->eraseFromParent(); return true; } static bool mergeConditionalStores(BranchInst *PBI, BranchInst *QBI, DomTreeUpdater *DTU, const DataLayout &DL, const TargetTransformInfo &TTI) { // The intention here is to find diamonds or triangles (see below) where each // conditional block contains a store to the same address. Both of these // stores are conditional, so they can't be unconditionally sunk. But it may // be profitable to speculatively sink the stores into one merged store at the // end, and predicate the merged store on the union of the two conditions of // PBI and QBI. // // This can reduce the number of stores executed if both of the conditions are // true, and can allow the blocks to become small enough to be if-converted. // This optimization will also chain, so that ladders of test-and-set // sequences can be if-converted away. // // We only deal with simple diamonds or triangles: // // PBI or PBI or a combination of the two // / \ | \ // PTB PFB | PFB // \ / | / // QBI QBI // / \ | \ // QTB QFB | QFB // \ / | / // PostBB PostBB // // We model triangles as a type of diamond with a nullptr "true" block. // Triangles are canonicalized so that the fallthrough edge is represented by // a true condition, as in the diagram above. BasicBlock *PTB = PBI->getSuccessor(0); BasicBlock *PFB = PBI->getSuccessor(1); BasicBlock *QTB = QBI->getSuccessor(0); BasicBlock *QFB = QBI->getSuccessor(1); BasicBlock *PostBB = QFB->getSingleSuccessor(); // Make sure we have a good guess for PostBB. If QTB's only successor is // QFB, then QFB is a better PostBB. if (QTB->getSingleSuccessor() == QFB) PostBB = QFB; // If we couldn't find a good PostBB, stop. if (!PostBB) return false; bool InvertPCond = false, InvertQCond = false; // Canonicalize fallthroughs to the true branches. if (PFB == QBI->getParent()) { std::swap(PFB, PTB); InvertPCond = true; } if (QFB == PostBB) { std::swap(QFB, QTB); InvertQCond = true; } // From this point on we can assume PTB or QTB may be fallthroughs but PFB // and QFB may not. Model fallthroughs as a nullptr block. if (PTB == QBI->getParent()) PTB = nullptr; if (QTB == PostBB) QTB = nullptr; // Legality bailouts. We must have at least the non-fallthrough blocks and // the post-dominating block, and the non-fallthroughs must only have one // predecessor. auto HasOnePredAndOneSucc = [](BasicBlock *BB, BasicBlock *P, BasicBlock *S) { return BB->getSinglePredecessor() == P && BB->getSingleSuccessor() == S; }; if (!HasOnePredAndOneSucc(PFB, PBI->getParent(), QBI->getParent()) || !HasOnePredAndOneSucc(QFB, QBI->getParent(), PostBB)) return false; if ((PTB && !HasOnePredAndOneSucc(PTB, PBI->getParent(), QBI->getParent())) || (QTB && !HasOnePredAndOneSucc(QTB, QBI->getParent(), PostBB))) return false; if (!QBI->getParent()->hasNUses(2)) return false; // OK, this is a sequence of two diamonds or triangles. // Check if there are stores in PTB or PFB that are repeated in QTB or QFB. SmallPtrSet PStoreAddresses, QStoreAddresses; for (auto *BB : {PTB, PFB}) { if (!BB) continue; for (auto &I : *BB) if (StoreInst *SI = dyn_cast(&I)) PStoreAddresses.insert(SI->getPointerOperand()); } for (auto *BB : {QTB, QFB}) { if (!BB) continue; for (auto &I : *BB) if (StoreInst *SI = dyn_cast(&I)) QStoreAddresses.insert(SI->getPointerOperand()); } set_intersect(PStoreAddresses, QStoreAddresses); // set_intersect mutates PStoreAddresses in place. Rename it here to make it // clear what it contains. auto &CommonAddresses = PStoreAddresses; bool Changed = false; for (auto *Address : CommonAddresses) Changed |= mergeConditionalStoreToAddress(PTB, PFB, QTB, QFB, PostBB, Address, InvertPCond, InvertQCond, DTU, DL, TTI); return Changed; } /// If the previous block ended with a widenable branch, determine if reusing /// the target block is profitable and legal. This will have the effect of /// "widening" PBI, but doesn't require us to reason about hosting safety. static bool tryWidenCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI, DomTreeUpdater *DTU) { // TODO: This can be generalized in two important ways: // 1) We can allow phi nodes in IfFalseBB and simply reuse all the input // values from the PBI edge. // 2) We can sink side effecting instructions into BI's fallthrough // successor provided they doesn't contribute to computation of // BI's condition. Value *CondWB, *WC; BasicBlock *IfTrueBB, *IfFalseBB; if (!parseWidenableBranch(PBI, CondWB, WC, IfTrueBB, IfFalseBB) || IfTrueBB != BI->getParent() || !BI->getParent()->getSinglePredecessor()) return false; if (!IfFalseBB->phis().empty()) return false; // TODO // Use lambda to lazily compute expensive condition after cheap ones. auto NoSideEffects = [](BasicBlock &BB) { return !llvm::any_of(BB, [](const Instruction &I) { return I.mayWriteToMemory() || I.mayHaveSideEffects(); }); }; if (BI->getSuccessor(1) != IfFalseBB && // no inf looping BI->getSuccessor(1)->getTerminatingDeoptimizeCall() && // profitability NoSideEffects(*BI->getParent())) { auto *OldSuccessor = BI->getSuccessor(1); OldSuccessor->removePredecessor(BI->getParent()); BI->setSuccessor(1, IfFalseBB); if (DTU) DTU->applyUpdates( {{DominatorTree::Insert, BI->getParent(), IfFalseBB}, {DominatorTree::Delete, BI->getParent(), OldSuccessor}}); return true; } if (BI->getSuccessor(0) != IfFalseBB && // no inf looping BI->getSuccessor(0)->getTerminatingDeoptimizeCall() && // profitability NoSideEffects(*BI->getParent())) { auto *OldSuccessor = BI->getSuccessor(0); OldSuccessor->removePredecessor(BI->getParent()); BI->setSuccessor(0, IfFalseBB); if (DTU) DTU->applyUpdates( {{DominatorTree::Insert, BI->getParent(), IfFalseBB}, {DominatorTree::Delete, BI->getParent(), OldSuccessor}}); return true; } return false; } /// If we have a conditional branch as a predecessor of another block, /// this function tries to simplify it. We know /// that PBI and BI are both conditional branches, and BI is in one of the /// successor blocks of PBI - PBI branches to BI. static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI, DomTreeUpdater *DTU, const DataLayout &DL, const TargetTransformInfo &TTI) { assert(PBI->isConditional() && BI->isConditional()); BasicBlock *BB = BI->getParent(); // If this block ends with a branch instruction, and if there is a // predecessor that ends on a branch of the same condition, make // this conditional branch redundant. if (PBI->getCondition() == BI->getCondition() && PBI->getSuccessor(0) != PBI->getSuccessor(1)) { // Okay, the outcome of this conditional branch is statically // knowable. If this block had a single pred, handle specially. if (BB->getSinglePredecessor()) { // Turn this into a branch on constant. bool CondIsTrue = PBI->getSuccessor(0) == BB; BI->setCondition( ConstantInt::get(Type::getInt1Ty(BB->getContext()), CondIsTrue)); return true; // Nuke the branch on constant. } // Otherwise, if there are multiple predecessors, insert a PHI that merges // in the constant and simplify the block result. Subsequent passes of // simplifycfg will thread the block. if (BlockIsSimpleEnoughToThreadThrough(BB)) { pred_iterator PB = pred_begin(BB), PE = pred_end(BB); PHINode *NewPN = PHINode::Create( Type::getInt1Ty(BB->getContext()), std::distance(PB, PE), BI->getCondition()->getName() + ".pr", &BB->front()); // Okay, we're going to insert the PHI node. Since PBI is not the only // predecessor, compute the PHI'd conditional value for all of the preds. // Any predecessor where the condition is not computable we keep symbolic. for (pred_iterator PI = PB; PI != PE; ++PI) { BasicBlock *P = *PI; if ((PBI = dyn_cast(P->getTerminator())) && PBI != BI && PBI->isConditional() && PBI->getCondition() == BI->getCondition() && PBI->getSuccessor(0) != PBI->getSuccessor(1)) { bool CondIsTrue = PBI->getSuccessor(0) == BB; NewPN->addIncoming( ConstantInt::get(Type::getInt1Ty(BB->getContext()), CondIsTrue), P); } else { NewPN->addIncoming(BI->getCondition(), P); } } BI->setCondition(NewPN); return true; } } // If the previous block ended with a widenable branch, determine if reusing // the target block is profitable and legal. This will have the effect of // "widening" PBI, but doesn't require us to reason about hosting safety. if (tryWidenCondBranchToCondBranch(PBI, BI, DTU)) return true; if (auto *CE = dyn_cast(BI->getCondition())) if (CE->canTrap()) return false; // If both branches are conditional and both contain stores to the same // address, remove the stores from the conditionals and create a conditional // merged store at the end. if (MergeCondStores && mergeConditionalStores(PBI, BI, DTU, DL, TTI)) return true; // If this is a conditional branch in an empty block, and if any // predecessors are a conditional branch to one of our destinations, // fold the conditions into logical ops and one cond br. // Ignore dbg intrinsics. if (&*BB->instructionsWithoutDebug().begin() != BI) return false; int PBIOp, BIOp; if (PBI->getSuccessor(0) == BI->getSuccessor(0)) { PBIOp = 0; BIOp = 0; } else if (PBI->getSuccessor(0) == BI->getSuccessor(1)) { PBIOp = 0; BIOp = 1; } else if (PBI->getSuccessor(1) == BI->getSuccessor(0)) { PBIOp = 1; BIOp = 0; } else if (PBI->getSuccessor(1) == BI->getSuccessor(1)) { PBIOp = 1; BIOp = 1; } else { return false; } // Check to make sure that the other destination of this branch // isn't BB itself. If so, this is an infinite loop that will // keep getting unwound. if (PBI->getSuccessor(PBIOp) == BB) return false; // Do not perform this transformation if it would require // insertion of a large number of select instructions. For targets // without predication/cmovs, this is a big pessimization. // Also do not perform this transformation if any phi node in the common // destination block can trap when reached by BB or PBB (PR17073). In that // case, it would be unsafe to hoist the operation into a select instruction. BasicBlock *CommonDest = PBI->getSuccessor(PBIOp); BasicBlock *RemovedDest = PBI->getSuccessor(PBIOp ^ 1); unsigned NumPhis = 0; for (BasicBlock::iterator II = CommonDest->begin(); isa(II); ++II, ++NumPhis) { if (NumPhis > 2) // Disable this xform. return false; PHINode *PN = cast(II); Value *BIV = PN->getIncomingValueForBlock(BB); if (ConstantExpr *CE = dyn_cast(BIV)) if (CE->canTrap()) return false; unsigned PBBIdx = PN->getBasicBlockIndex(PBI->getParent()); Value *PBIV = PN->getIncomingValue(PBBIdx); if (ConstantExpr *CE = dyn_cast(PBIV)) if (CE->canTrap()) return false; } // Finally, if everything is ok, fold the branches to logical ops. BasicBlock *OtherDest = BI->getSuccessor(BIOp ^ 1); LLVM_DEBUG(dbgs() << "FOLDING BRs:" << *PBI->getParent() << "AND: " << *BI->getParent()); SmallVector Updates; // If OtherDest *is* BB, then BB is a basic block with a single conditional // branch in it, where one edge (OtherDest) goes back to itself but the other // exits. We don't *know* that the program avoids the infinite loop // (even though that seems likely). If we do this xform naively, we'll end up // recursively unpeeling the loop. Since we know that (after the xform is // done) that the block *is* infinite if reached, we just make it an obviously // infinite loop with no cond branch. if (OtherDest == BB) { // Insert it at the end of the function, because it's either code, // or it won't matter if it's hot. :) BasicBlock *InfLoopBlock = BasicBlock::Create(BB->getContext(), "infloop", BB->getParent()); BranchInst::Create(InfLoopBlock, InfLoopBlock); if (DTU) Updates.push_back({DominatorTree::Insert, InfLoopBlock, InfLoopBlock}); OtherDest = InfLoopBlock; } LLVM_DEBUG(dbgs() << *PBI->getParent()->getParent()); // BI may have other predecessors. Because of this, we leave // it alone, but modify PBI. // Make sure we get to CommonDest on True&True directions. Value *PBICond = PBI->getCondition(); IRBuilder Builder(PBI); if (PBIOp) PBICond = Builder.CreateNot(PBICond, PBICond->getName() + ".not"); Value *BICond = BI->getCondition(); if (BIOp) BICond = Builder.CreateNot(BICond, BICond->getName() + ".not"); // Merge the conditions. Value *Cond = createLogicalOp(Builder, Instruction::Or, PBICond, BICond, "brmerge"); // Modify PBI to branch on the new condition to the new dests. PBI->setCondition(Cond); PBI->setSuccessor(0, CommonDest); PBI->setSuccessor(1, OtherDest); if (DTU) { Updates.push_back({DominatorTree::Insert, PBI->getParent(), OtherDest}); Updates.push_back({DominatorTree::Delete, PBI->getParent(), RemovedDest}); DTU->applyUpdates(Updates); } // Update branch weight for PBI. uint64_t PredTrueWeight, PredFalseWeight, SuccTrueWeight, SuccFalseWeight; uint64_t PredCommon, PredOther, SuccCommon, SuccOther; bool HasWeights = extractPredSuccWeights(PBI, BI, PredTrueWeight, PredFalseWeight, SuccTrueWeight, SuccFalseWeight); if (HasWeights) { PredCommon = PBIOp ? PredFalseWeight : PredTrueWeight; PredOther = PBIOp ? PredTrueWeight : PredFalseWeight; SuccCommon = BIOp ? SuccFalseWeight : SuccTrueWeight; SuccOther = BIOp ? SuccTrueWeight : SuccFalseWeight; // The weight to CommonDest should be PredCommon * SuccTotal + // PredOther * SuccCommon. // The weight to OtherDest should be PredOther * SuccOther. uint64_t NewWeights[2] = {PredCommon * (SuccCommon + SuccOther) + PredOther * SuccCommon, PredOther * SuccOther}; // Halve the weights if any of them cannot fit in an uint32_t FitWeights(NewWeights); setBranchWeights(PBI, NewWeights[0], NewWeights[1]); } // OtherDest may have phi nodes. If so, add an entry from PBI's // block that are identical to the entries for BI's block. AddPredecessorToBlock(OtherDest, PBI->getParent(), BB); // We know that the CommonDest already had an edge from PBI to // it. If it has PHIs though, the PHIs may have different // entries for BB and PBI's BB. If so, insert a select to make // them agree. for (PHINode &PN : CommonDest->phis()) { Value *BIV = PN.getIncomingValueForBlock(BB); unsigned PBBIdx = PN.getBasicBlockIndex(PBI->getParent()); Value *PBIV = PN.getIncomingValue(PBBIdx); if (BIV != PBIV) { // Insert a select in PBI to pick the right value. SelectInst *NV = cast( Builder.CreateSelect(PBICond, PBIV, BIV, PBIV->getName() + ".mux")); PN.setIncomingValue(PBBIdx, NV); // Although the select has the same condition as PBI, the original branch // weights for PBI do not apply to the new select because the select's // 'logical' edges are incoming edges of the phi that is eliminated, not // the outgoing edges of PBI. if (HasWeights) { uint64_t PredCommon = PBIOp ? PredFalseWeight : PredTrueWeight; uint64_t PredOther = PBIOp ? PredTrueWeight : PredFalseWeight; uint64_t SuccCommon = BIOp ? SuccFalseWeight : SuccTrueWeight; uint64_t SuccOther = BIOp ? SuccTrueWeight : SuccFalseWeight; // The weight to PredCommonDest should be PredCommon * SuccTotal. // The weight to PredOtherDest should be PredOther * SuccCommon. uint64_t NewWeights[2] = {PredCommon * (SuccCommon + SuccOther), PredOther * SuccCommon}; FitWeights(NewWeights); setBranchWeights(NV, NewWeights[0], NewWeights[1]); } } } LLVM_DEBUG(dbgs() << "INTO: " << *PBI->getParent()); LLVM_DEBUG(dbgs() << *PBI->getParent()->getParent()); // This basic block is probably dead. We know it has at least // one fewer predecessor. return true; } // Simplifies a terminator by replacing it with a branch to TrueBB if Cond is // true or to FalseBB if Cond is false. // Takes care of updating the successors and removing the old terminator. // Also makes sure not to introduce new successors by assuming that edges to // non-successor TrueBBs and FalseBBs aren't reachable. bool SimplifyCFGOpt::SimplifyTerminatorOnSelect(Instruction *OldTerm, Value *Cond, BasicBlock *TrueBB, BasicBlock *FalseBB, uint32_t TrueWeight, uint32_t FalseWeight) { auto *BB = OldTerm->getParent(); // Remove any superfluous successor edges from the CFG. // First, figure out which successors to preserve. // If TrueBB and FalseBB are equal, only try to preserve one copy of that // successor. BasicBlock *KeepEdge1 = TrueBB; BasicBlock *KeepEdge2 = TrueBB != FalseBB ? FalseBB : nullptr; SmallPtrSet RemovedSuccessors; // Then remove the rest. for (BasicBlock *Succ : successors(OldTerm)) { // Make sure only to keep exactly one copy of each edge. if (Succ == KeepEdge1) KeepEdge1 = nullptr; else if (Succ == KeepEdge2) KeepEdge2 = nullptr; else { Succ->removePredecessor(BB, /*KeepOneInputPHIs=*/true); if (Succ != TrueBB && Succ != FalseBB) RemovedSuccessors.insert(Succ); } } IRBuilder<> Builder(OldTerm); Builder.SetCurrentDebugLocation(OldTerm->getDebugLoc()); // Insert an appropriate new terminator. if (!KeepEdge1 && !KeepEdge2) { if (TrueBB == FalseBB) { // We were only looking for one successor, and it was present. // Create an unconditional branch to it. Builder.CreateBr(TrueBB); } else { // We found both of the successors we were looking for. // Create a conditional branch sharing the condition of the select. BranchInst *NewBI = Builder.CreateCondBr(Cond, TrueBB, FalseBB); if (TrueWeight != FalseWeight) setBranchWeights(NewBI, TrueWeight, FalseWeight); } } else if (KeepEdge1 && (KeepEdge2 || TrueBB == FalseBB)) { // Neither of the selected blocks were successors, so this // terminator must be unreachable. new UnreachableInst(OldTerm->getContext(), OldTerm); } else { // One of the selected values was a successor, but the other wasn't. // Insert an unconditional branch to the one that was found; // the edge to the one that wasn't must be unreachable. if (!KeepEdge1) { // Only TrueBB was found. Builder.CreateBr(TrueBB); } else { // Only FalseBB was found. Builder.CreateBr(FalseBB); } } EraseTerminatorAndDCECond(OldTerm); if (DTU) { SmallVector Updates; Updates.reserve(RemovedSuccessors.size()); for (auto *RemovedSuccessor : RemovedSuccessors) Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor}); DTU->applyUpdates(Updates); } return true; } // Replaces // (switch (select cond, X, Y)) on constant X, Y // with a branch - conditional if X and Y lead to distinct BBs, // unconditional otherwise. bool SimplifyCFGOpt::SimplifySwitchOnSelect(SwitchInst *SI, SelectInst *Select) { // Check for constant integer values in the select. ConstantInt *TrueVal = dyn_cast(Select->getTrueValue()); ConstantInt *FalseVal = dyn_cast(Select->getFalseValue()); if (!TrueVal || !FalseVal) return false; // Find the relevant condition and destinations. Value *Condition = Select->getCondition(); BasicBlock *TrueBB = SI->findCaseValue(TrueVal)->getCaseSuccessor(); BasicBlock *FalseBB = SI->findCaseValue(FalseVal)->getCaseSuccessor(); // Get weight for TrueBB and FalseBB. uint32_t TrueWeight = 0, FalseWeight = 0; SmallVector Weights; bool HasWeights = HasBranchWeights(SI); if (HasWeights) { GetBranchWeights(SI, Weights); if (Weights.size() == 1 + SI->getNumCases()) { TrueWeight = (uint32_t)Weights[SI->findCaseValue(TrueVal)->getSuccessorIndex()]; FalseWeight = (uint32_t)Weights[SI->findCaseValue(FalseVal)->getSuccessorIndex()]; } } // Perform the actual simplification. return SimplifyTerminatorOnSelect(SI, Condition, TrueBB, FalseBB, TrueWeight, FalseWeight); } // Replaces // (indirectbr (select cond, blockaddress(@fn, BlockA), // blockaddress(@fn, BlockB))) // with // (br cond, BlockA, BlockB). bool SimplifyCFGOpt::SimplifyIndirectBrOnSelect(IndirectBrInst *IBI, SelectInst *SI) { // Check that both operands of the select are block addresses. BlockAddress *TBA = dyn_cast(SI->getTrueValue()); BlockAddress *FBA = dyn_cast(SI->getFalseValue()); if (!TBA || !FBA) return false; // Extract the actual blocks. BasicBlock *TrueBB = TBA->getBasicBlock(); BasicBlock *FalseBB = FBA->getBasicBlock(); // Perform the actual simplification. return SimplifyTerminatorOnSelect(IBI, SI->getCondition(), TrueBB, FalseBB, 0, 0); } /// This is called when we find an icmp instruction /// (a seteq/setne with a constant) as the only instruction in a /// block that ends with an uncond branch. We are looking for a very specific /// pattern that occurs when "A == 1 || A == 2 || A == 3" gets simplified. In /// this case, we merge the first two "or's of icmp" into a switch, but then the /// default value goes to an uncond block with a seteq in it, we get something /// like: /// /// switch i8 %A, label %DEFAULT [ i8 1, label %end i8 2, label %end ] /// DEFAULT: /// %tmp = icmp eq i8 %A, 92 /// br label %end /// end: /// ... = phi i1 [ true, %entry ], [ %tmp, %DEFAULT ], [ true, %entry ] /// /// We prefer to split the edge to 'end' so that there is a true/false entry to /// the PHI, merging the third icmp into the switch. bool SimplifyCFGOpt::tryToSimplifyUncondBranchWithICmpInIt( ICmpInst *ICI, IRBuilder<> &Builder) { BasicBlock *BB = ICI->getParent(); // If the block has any PHIs in it or the icmp has multiple uses, it is too // complex. if (isa(BB->begin()) || !ICI->hasOneUse()) return false; Value *V = ICI->getOperand(0); ConstantInt *Cst = cast(ICI->getOperand(1)); // The pattern we're looking for is where our only predecessor is a switch on // 'V' and this block is the default case for the switch. In this case we can // fold the compared value into the switch to simplify things. BasicBlock *Pred = BB->getSinglePredecessor(); if (!Pred || !isa(Pred->getTerminator())) return false; SwitchInst *SI = cast(Pred->getTerminator()); if (SI->getCondition() != V) return false; // If BB is reachable on a non-default case, then we simply know the value of // V in this block. Substitute it and constant fold the icmp instruction // away. if (SI->getDefaultDest() != BB) { ConstantInt *VVal = SI->findCaseDest(BB); assert(VVal && "Should have a unique destination value"); ICI->setOperand(0, VVal); if (Value *V = SimplifyInstruction(ICI, {DL, ICI})) { ICI->replaceAllUsesWith(V); ICI->eraseFromParent(); } // BB is now empty, so it is likely to simplify away. return requestResimplify(); } // Ok, the block is reachable from the default dest. If the constant we're // comparing exists in one of the other edges, then we can constant fold ICI // and zap it. if (SI->findCaseValue(Cst) != SI->case_default()) { Value *V; if (ICI->getPredicate() == ICmpInst::ICMP_EQ) V = ConstantInt::getFalse(BB->getContext()); else V = ConstantInt::getTrue(BB->getContext()); ICI->replaceAllUsesWith(V); ICI->eraseFromParent(); // BB is now empty, so it is likely to simplify away. return requestResimplify(); } // The use of the icmp has to be in the 'end' block, by the only PHI node in // the block. BasicBlock *SuccBlock = BB->getTerminator()->getSuccessor(0); PHINode *PHIUse = dyn_cast(ICI->user_back()); if (PHIUse == nullptr || PHIUse != &SuccBlock->front() || isa(++BasicBlock::iterator(PHIUse))) return false; // If the icmp is a SETEQ, then the default dest gets false, the new edge gets // true in the PHI. Constant *DefaultCst = ConstantInt::getTrue(BB->getContext()); Constant *NewCst = ConstantInt::getFalse(BB->getContext()); if (ICI->getPredicate() == ICmpInst::ICMP_EQ) std::swap(DefaultCst, NewCst); // Replace ICI (which is used by the PHI for the default value) with true or // false depending on if it is EQ or NE. ICI->replaceAllUsesWith(DefaultCst); ICI->eraseFromParent(); SmallVector Updates; // Okay, the switch goes to this block on a default value. Add an edge from // the switch to the merge point on the compared value. BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), "switch.edge", BB->getParent(), BB); { SwitchInstProfUpdateWrapper SIW(*SI); auto W0 = SIW.getSuccessorWeight(0); SwitchInstProfUpdateWrapper::CaseWeightOpt NewW; if (W0) { NewW = ((uint64_t(*W0) + 1) >> 1); SIW.setSuccessorWeight(0, *NewW); } SIW.addCase(Cst, NewBB, NewW); if (DTU) Updates.push_back({DominatorTree::Insert, Pred, NewBB}); } // NewBB branches to the phi block, add the uncond branch and the phi entry. Builder.SetInsertPoint(NewBB); Builder.SetCurrentDebugLocation(SI->getDebugLoc()); Builder.CreateBr(SuccBlock); PHIUse->addIncoming(NewCst, NewBB); if (DTU) { Updates.push_back({DominatorTree::Insert, NewBB, SuccBlock}); DTU->applyUpdates(Updates); } return true; } /// The specified branch is a conditional branch. /// Check to see if it is branching on an or/and chain of icmp instructions, and /// fold it into a switch instruction if so. bool SimplifyCFGOpt::SimplifyBranchOnICmpChain(BranchInst *BI, IRBuilder<> &Builder, const DataLayout &DL) { Instruction *Cond = dyn_cast(BI->getCondition()); if (!Cond) return false; // Change br (X == 0 | X == 1), T, F into a switch instruction. // If this is a bunch of seteq's or'd together, or if it's a bunch of // 'setne's and'ed together, collect them. // Try to gather values from a chain of and/or to be turned into a switch ConstantComparesGatherer ConstantCompare(Cond, DL); // Unpack the result SmallVectorImpl &Values = ConstantCompare.Vals; Value *CompVal = ConstantCompare.CompValue; unsigned UsedICmps = ConstantCompare.UsedICmps; Value *ExtraCase = ConstantCompare.Extra; // If we didn't have a multiply compared value, fail. if (!CompVal) return false; // Avoid turning single icmps into a switch. if (UsedICmps <= 1) return false; bool TrueWhenEqual = match(Cond, m_LogicalOr(m_Value(), m_Value())); // There might be duplicate constants in the list, which the switch // instruction can't handle, remove them now. array_pod_sort(Values.begin(), Values.end(), ConstantIntSortPredicate); Values.erase(std::unique(Values.begin(), Values.end()), Values.end()); // If Extra was used, we require at least two switch values to do the // transformation. A switch with one value is just a conditional branch. if (ExtraCase && Values.size() < 2) return false; // TODO: Preserve branch weight metadata, similarly to how // FoldValueComparisonIntoPredecessors preserves it. // Figure out which block is which destination. BasicBlock *DefaultBB = BI->getSuccessor(1); BasicBlock *EdgeBB = BI->getSuccessor(0); if (!TrueWhenEqual) std::swap(DefaultBB, EdgeBB); BasicBlock *BB = BI->getParent(); LLVM_DEBUG(dbgs() << "Converting 'icmp' chain with " << Values.size() << " cases into SWITCH. BB is:\n" << *BB); SmallVector Updates; // If there are any extra values that couldn't be folded into the switch // then we evaluate them with an explicit branch first. Split the block // right before the condbr to handle it. if (ExtraCase) { BasicBlock *NewBB = SplitBlock(BB, BI, DTU, /*LI=*/nullptr, /*MSSAU=*/nullptr, "switch.early.test"); // Remove the uncond branch added to the old block. Instruction *OldTI = BB->getTerminator(); Builder.SetInsertPoint(OldTI); // There can be an unintended UB if extra values are Poison. Before the // transformation, extra values may not be evaluated according to the // condition, and it will not raise UB. But after transformation, we are // evaluating extra values before checking the condition, and it will raise // UB. It can be solved by adding freeze instruction to extra values. AssumptionCache *AC = Options.AC; if (!isGuaranteedNotToBeUndefOrPoison(ExtraCase, AC, BI, nullptr)) ExtraCase = Builder.CreateFreeze(ExtraCase); if (TrueWhenEqual) Builder.CreateCondBr(ExtraCase, EdgeBB, NewBB); else Builder.CreateCondBr(ExtraCase, NewBB, EdgeBB); OldTI->eraseFromParent(); if (DTU) Updates.push_back({DominatorTree::Insert, BB, EdgeBB}); // If there are PHI nodes in EdgeBB, then we need to add a new entry to them // for the edge we just added. AddPredecessorToBlock(EdgeBB, BB, NewBB); LLVM_DEBUG(dbgs() << " ** 'icmp' chain unhandled condition: " << *ExtraCase << "\nEXTRABB = " << *BB); BB = NewBB; } Builder.SetInsertPoint(BI); // Convert pointer to int before we switch. if (CompVal->getType()->isPointerTy()) { CompVal = Builder.CreatePtrToInt( CompVal, DL.getIntPtrType(CompVal->getType()), "magicptr"); } // Create the new switch instruction now. SwitchInst *New = Builder.CreateSwitch(CompVal, DefaultBB, Values.size()); // Add all of the 'cases' to the switch instruction. for (unsigned i = 0, e = Values.size(); i != e; ++i) New->addCase(Values[i], EdgeBB); // We added edges from PI to the EdgeBB. As such, if there were any // PHI nodes in EdgeBB, they need entries to be added corresponding to // the number of edges added. for (BasicBlock::iterator BBI = EdgeBB->begin(); isa(BBI); ++BBI) { PHINode *PN = cast(BBI); Value *InVal = PN->getIncomingValueForBlock(BB); for (unsigned i = 0, e = Values.size() - 1; i != e; ++i) PN->addIncoming(InVal, BB); } // Erase the old branch instruction. EraseTerminatorAndDCECond(BI); if (DTU) DTU->applyUpdates(Updates); LLVM_DEBUG(dbgs() << " ** 'icmp' chain result is:\n" << *BB << '\n'); return true; } bool SimplifyCFGOpt::simplifyResume(ResumeInst *RI, IRBuilder<> &Builder) { if (isa(RI->getValue())) return simplifyCommonResume(RI); else if (isa(RI->getParent()->getFirstNonPHI()) && RI->getValue() == RI->getParent()->getFirstNonPHI()) // The resume must unwind the exception that caused control to branch here. return simplifySingleResume(RI); return false; } // Check if cleanup block is empty static bool isCleanupBlockEmpty(iterator_range R) { for (Instruction &I : R) { auto *II = dyn_cast(&I); if (!II) return false; Intrinsic::ID IntrinsicID = II->getIntrinsicID(); switch (IntrinsicID) { case Intrinsic::dbg_declare: case Intrinsic::dbg_value: case Intrinsic::dbg_label: case Intrinsic::lifetime_end: break; default: return false; } } return true; } // Simplify resume that is shared by several landing pads (phi of landing pad). bool SimplifyCFGOpt::simplifyCommonResume(ResumeInst *RI) { BasicBlock *BB = RI->getParent(); // Check that there are no other instructions except for debug and lifetime // intrinsics between the phi's and resume instruction. if (!isCleanupBlockEmpty( make_range(RI->getParent()->getFirstNonPHI(), BB->getTerminator()))) return false; SmallSetVector TrivialUnwindBlocks; auto *PhiLPInst = cast(RI->getValue()); // Check incoming blocks to see if any of them are trivial. for (unsigned Idx = 0, End = PhiLPInst->getNumIncomingValues(); Idx != End; Idx++) { auto *IncomingBB = PhiLPInst->getIncomingBlock(Idx); auto *IncomingValue = PhiLPInst->getIncomingValue(Idx); // If the block has other successors, we can not delete it because // it has other dependents. if (IncomingBB->getUniqueSuccessor() != BB) continue; auto *LandingPad = dyn_cast(IncomingBB->getFirstNonPHI()); // Not the landing pad that caused the control to branch here. if (IncomingValue != LandingPad) continue; if (isCleanupBlockEmpty( make_range(LandingPad->getNextNode(), IncomingBB->getTerminator()))) TrivialUnwindBlocks.insert(IncomingBB); } // If no trivial unwind blocks, don't do any simplifications. if (TrivialUnwindBlocks.empty()) return false; // Turn all invokes that unwind here into calls. for (auto *TrivialBB : TrivialUnwindBlocks) { // Blocks that will be simplified should be removed from the phi node. // Note there could be multiple edges to the resume block, and we need // to remove them all. while (PhiLPInst->getBasicBlockIndex(TrivialBB) != -1) BB->removePredecessor(TrivialBB, true); for (BasicBlock *Pred : llvm::make_early_inc_range(predecessors(TrivialBB))) { removeUnwindEdge(Pred, DTU); ++NumInvokes; } // In each SimplifyCFG run, only the current processed block can be erased. // Otherwise, it will break the iteration of SimplifyCFG pass. So instead // of erasing TrivialBB, we only remove the branch to the common resume // block so that we can later erase the resume block since it has no // predecessors. TrivialBB->getTerminator()->eraseFromParent(); new UnreachableInst(RI->getContext(), TrivialBB); if (DTU) DTU->applyUpdates({{DominatorTree::Delete, TrivialBB, BB}}); } // Delete the resume block if all its predecessors have been removed. if (pred_empty(BB)) DeleteDeadBlock(BB, DTU); return !TrivialUnwindBlocks.empty(); } // Simplify resume that is only used by a single (non-phi) landing pad. bool SimplifyCFGOpt::simplifySingleResume(ResumeInst *RI) { BasicBlock *BB = RI->getParent(); auto *LPInst = cast(BB->getFirstNonPHI()); assert(RI->getValue() == LPInst && "Resume must unwind the exception that caused control to here"); // Check that there are no other instructions except for debug intrinsics. if (!isCleanupBlockEmpty( make_range(LPInst->getNextNode(), RI))) return false; // Turn all invokes that unwind here into calls and delete the basic block. for (BasicBlock *Pred : llvm::make_early_inc_range(predecessors(BB))) { removeUnwindEdge(Pred, DTU); ++NumInvokes; } // The landingpad is now unreachable. Zap it. DeleteDeadBlock(BB, DTU); return true; } static bool removeEmptyCleanup(CleanupReturnInst *RI, DomTreeUpdater *DTU) { // If this is a trivial cleanup pad that executes no instructions, it can be // eliminated. If the cleanup pad continues to the caller, any predecessor // that is an EH pad will be updated to continue to the caller and any // predecessor that terminates with an invoke instruction will have its invoke // instruction converted to a call instruction. If the cleanup pad being // simplified does not continue to the caller, each predecessor will be // updated to continue to the unwind destination of the cleanup pad being // simplified. BasicBlock *BB = RI->getParent(); CleanupPadInst *CPInst = RI->getCleanupPad(); if (CPInst->getParent() != BB) // This isn't an empty cleanup. return false; // We cannot kill the pad if it has multiple uses. This typically arises // from unreachable basic blocks. if (!CPInst->hasOneUse()) return false; // Check that there are no other instructions except for benign intrinsics. if (!isCleanupBlockEmpty( make_range(CPInst->getNextNode(), RI))) return false; // If the cleanup return we are simplifying unwinds to the caller, this will // set UnwindDest to nullptr. BasicBlock *UnwindDest = RI->getUnwindDest(); Instruction *DestEHPad = UnwindDest ? UnwindDest->getFirstNonPHI() : nullptr; // We're about to remove BB from the control flow. Before we do, sink any // PHINodes into the unwind destination. Doing this before changing the // control flow avoids some potentially slow checks, since we can currently // be certain that UnwindDest and BB have no common predecessors (since they // are both EH pads). if (UnwindDest) { // First, go through the PHI nodes in UnwindDest and update any nodes that // reference the block we are removing for (PHINode &DestPN : UnwindDest->phis()) { int Idx = DestPN.getBasicBlockIndex(BB); // Since BB unwinds to UnwindDest, it has to be in the PHI node. assert(Idx != -1); // This PHI node has an incoming value that corresponds to a control // path through the cleanup pad we are removing. If the incoming // value is in the cleanup pad, it must be a PHINode (because we // verified above that the block is otherwise empty). Otherwise, the // value is either a constant or a value that dominates the cleanup // pad being removed. // // Because BB and UnwindDest are both EH pads, all of their // predecessors must unwind to these blocks, and since no instruction // can have multiple unwind destinations, there will be no overlap in // incoming blocks between SrcPN and DestPN. Value *SrcVal = DestPN.getIncomingValue(Idx); PHINode *SrcPN = dyn_cast(SrcVal); bool NeedPHITranslation = SrcPN && SrcPN->getParent() == BB; for (auto *Pred : predecessors(BB)) { Value *Incoming = NeedPHITranslation ? SrcPN->getIncomingValueForBlock(Pred) : SrcVal; DestPN.addIncoming(Incoming, Pred); } } // Sink any remaining PHI nodes directly into UnwindDest. Instruction *InsertPt = DestEHPad; for (PHINode &PN : make_early_inc_range(BB->phis())) { if (PN.use_empty() || !PN.isUsedOutsideOfBlock(BB)) // If the PHI node has no uses or all of its uses are in this basic // block (meaning they are debug or lifetime intrinsics), just leave // it. It will be erased when we erase BB below. continue; // Otherwise, sink this PHI node into UnwindDest. // Any predecessors to UnwindDest which are not already represented // must be back edges which inherit the value from the path through // BB. In this case, the PHI value must reference itself. for (auto *pred : predecessors(UnwindDest)) if (pred != BB) PN.addIncoming(&PN, pred); PN.moveBefore(InsertPt); // Also, add a dummy incoming value for the original BB itself, // so that the PHI is well-formed until we drop said predecessor. PN.addIncoming(UndefValue::get(PN.getType()), BB); } } std::vector Updates; // We use make_early_inc_range here because we will remove all predecessors. for (BasicBlock *PredBB : llvm::make_early_inc_range(predecessors(BB))) { if (UnwindDest == nullptr) { if (DTU) { DTU->applyUpdates(Updates); Updates.clear(); } removeUnwindEdge(PredBB, DTU); ++NumInvokes; } else { BB->removePredecessor(PredBB); Instruction *TI = PredBB->getTerminator(); TI->replaceUsesOfWith(BB, UnwindDest); if (DTU) { Updates.push_back({DominatorTree::Insert, PredBB, UnwindDest}); Updates.push_back({DominatorTree::Delete, PredBB, BB}); } } } if (DTU) DTU->applyUpdates(Updates); DeleteDeadBlock(BB, DTU); return true; } // Try to merge two cleanuppads together. static bool mergeCleanupPad(CleanupReturnInst *RI) { // Skip any cleanuprets which unwind to caller, there is nothing to merge // with. BasicBlock *UnwindDest = RI->getUnwindDest(); if (!UnwindDest) return false; // This cleanupret isn't the only predecessor of this cleanuppad, it wouldn't // be safe to merge without code duplication. if (UnwindDest->getSinglePredecessor() != RI->getParent()) return false; // Verify that our cleanuppad's unwind destination is another cleanuppad. auto *SuccessorCleanupPad = dyn_cast(&UnwindDest->front()); if (!SuccessorCleanupPad) return false; CleanupPadInst *PredecessorCleanupPad = RI->getCleanupPad(); // Replace any uses of the successor cleanupad with the predecessor pad // The only cleanuppad uses should be this cleanupret, it's cleanupret and // funclet bundle operands. SuccessorCleanupPad->replaceAllUsesWith(PredecessorCleanupPad); // Remove the old cleanuppad. SuccessorCleanupPad->eraseFromParent(); // Now, we simply replace the cleanupret with a branch to the unwind // destination. BranchInst::Create(UnwindDest, RI->getParent()); RI->eraseFromParent(); return true; } bool SimplifyCFGOpt::simplifyCleanupReturn(CleanupReturnInst *RI) { // It is possible to transiantly have an undef cleanuppad operand because we // have deleted some, but not all, dead blocks. // Eventually, this block will be deleted. if (isa(RI->getOperand(0))) return false; if (mergeCleanupPad(RI)) return true; if (removeEmptyCleanup(RI, DTU)) return true; return false; } // WARNING: keep in sync with InstCombinerImpl::visitUnreachableInst()! bool SimplifyCFGOpt::simplifyUnreachable(UnreachableInst *UI) { BasicBlock *BB = UI->getParent(); bool Changed = false; // If there are any instructions immediately before the unreachable that can // be removed, do so. while (UI->getIterator() != BB->begin()) { BasicBlock::iterator BBI = UI->getIterator(); --BBI; if (!isGuaranteedToTransferExecutionToSuccessor(&*BBI)) break; // Can not drop any more instructions. We're done here. // Otherwise, this instruction can be freely erased, // even if it is not side-effect free. // Note that deleting EH's here is in fact okay, although it involves a bit // of subtle reasoning. If this inst is an EH, all the predecessors of this // block will be the unwind edges of Invoke/CatchSwitch/CleanupReturn, // and we can therefore guarantee this block will be erased. // Delete this instruction (any uses are guaranteed to be dead) BBI->replaceAllUsesWith(PoisonValue::get(BBI->getType())); BBI->eraseFromParent(); Changed = true; } // If the unreachable instruction is the first in the block, take a gander // at all of the predecessors of this instruction, and simplify them. if (&BB->front() != UI) return Changed; std::vector Updates; SmallSetVector Preds(pred_begin(BB), pred_end(BB)); for (unsigned i = 0, e = Preds.size(); i != e; ++i) { auto *Predecessor = Preds[i]; Instruction *TI = Predecessor->getTerminator(); IRBuilder<> Builder(TI); if (auto *BI = dyn_cast(TI)) { // We could either have a proper unconditional branch, // or a degenerate conditional branch with matching destinations. if (all_of(BI->successors(), [BB](auto *Successor) { return Successor == BB; })) { new UnreachableInst(TI->getContext(), TI); TI->eraseFromParent(); Changed = true; } else { assert(BI->isConditional() && "Can't get here with an uncond branch."); Value* Cond = BI->getCondition(); assert(BI->getSuccessor(0) != BI->getSuccessor(1) && "The destinations are guaranteed to be different here."); if (BI->getSuccessor(0) == BB) { Builder.CreateAssumption(Builder.CreateNot(Cond)); Builder.CreateBr(BI->getSuccessor(1)); } else { assert(BI->getSuccessor(1) == BB && "Incorrect CFG"); Builder.CreateAssumption(Cond); Builder.CreateBr(BI->getSuccessor(0)); } EraseTerminatorAndDCECond(BI); Changed = true; } if (DTU) Updates.push_back({DominatorTree::Delete, Predecessor, BB}); } else if (auto *SI = dyn_cast(TI)) { SwitchInstProfUpdateWrapper SU(*SI); for (auto i = SU->case_begin(), e = SU->case_end(); i != e;) { if (i->getCaseSuccessor() != BB) { ++i; continue; } BB->removePredecessor(SU->getParent()); i = SU.removeCase(i); e = SU->case_end(); Changed = true; } // Note that the default destination can't be removed! if (DTU && SI->getDefaultDest() != BB) Updates.push_back({DominatorTree::Delete, Predecessor, BB}); } else if (auto *II = dyn_cast(TI)) { if (II->getUnwindDest() == BB) { if (DTU) { DTU->applyUpdates(Updates); Updates.clear(); } removeUnwindEdge(TI->getParent(), DTU); Changed = true; } } else if (auto *CSI = dyn_cast(TI)) { if (CSI->getUnwindDest() == BB) { if (DTU) { DTU->applyUpdates(Updates); Updates.clear(); } removeUnwindEdge(TI->getParent(), DTU); Changed = true; continue; } for (CatchSwitchInst::handler_iterator I = CSI->handler_begin(), E = CSI->handler_end(); I != E; ++I) { if (*I == BB) { CSI->removeHandler(I); --I; --E; Changed = true; } } if (DTU) Updates.push_back({DominatorTree::Delete, Predecessor, BB}); if (CSI->getNumHandlers() == 0) { if (CSI->hasUnwindDest()) { // Redirect all predecessors of the block containing CatchSwitchInst // to instead branch to the CatchSwitchInst's unwind destination. if (DTU) { for (auto *PredecessorOfPredecessor : predecessors(Predecessor)) { Updates.push_back({DominatorTree::Insert, PredecessorOfPredecessor, CSI->getUnwindDest()}); Updates.push_back({DominatorTree::Delete, PredecessorOfPredecessor, Predecessor}); } } Predecessor->replaceAllUsesWith(CSI->getUnwindDest()); } else { // Rewrite all preds to unwind to caller (or from invoke to call). if (DTU) { DTU->applyUpdates(Updates); Updates.clear(); } SmallVector EHPreds(predecessors(Predecessor)); for (BasicBlock *EHPred : EHPreds) removeUnwindEdge(EHPred, DTU); } // The catchswitch is no longer reachable. new UnreachableInst(CSI->getContext(), CSI); CSI->eraseFromParent(); Changed = true; } } else if (auto *CRI = dyn_cast(TI)) { (void)CRI; assert(CRI->hasUnwindDest() && CRI->getUnwindDest() == BB && "Expected to always have an unwind to BB."); if (DTU) Updates.push_back({DominatorTree::Delete, Predecessor, BB}); new UnreachableInst(TI->getContext(), TI); TI->eraseFromParent(); Changed = true; } } if (DTU) DTU->applyUpdates(Updates); // If this block is now dead, remove it. if (pred_empty(BB) && BB != &BB->getParent()->getEntryBlock()) { DeleteDeadBlock(BB, DTU); return true; } return Changed; } static bool CasesAreContiguous(SmallVectorImpl &Cases) { assert(Cases.size() >= 1); array_pod_sort(Cases.begin(), Cases.end(), ConstantIntSortPredicate); for (size_t I = 1, E = Cases.size(); I != E; ++I) { if (Cases[I - 1]->getValue() != Cases[I]->getValue() + 1) return false; } return true; } static void createUnreachableSwitchDefault(SwitchInst *Switch, DomTreeUpdater *DTU) { LLVM_DEBUG(dbgs() << "SimplifyCFG: switch default is dead.\n"); auto *BB = Switch->getParent(); BasicBlock *NewDefaultBlock = SplitBlockPredecessors( Switch->getDefaultDest(), Switch->getParent(), "", DTU); auto *OrigDefaultBlock = Switch->getDefaultDest(); Switch->setDefaultDest(&*NewDefaultBlock); if (DTU) DTU->applyUpdates({{DominatorTree::Insert, BB, &*NewDefaultBlock}, {DominatorTree::Delete, BB, OrigDefaultBlock}}); SplitBlock(&*NewDefaultBlock, &NewDefaultBlock->front(), DTU); SmallVector Updates; if (DTU) for (auto *Successor : successors(NewDefaultBlock)) Updates.push_back({DominatorTree::Delete, NewDefaultBlock, Successor}); auto *NewTerminator = NewDefaultBlock->getTerminator(); new UnreachableInst(Switch->getContext(), NewTerminator); EraseTerminatorAndDCECond(NewTerminator); if (DTU) DTU->applyUpdates(Updates); } /// Turn a switch with two reachable destinations into an integer range /// comparison and branch. bool SimplifyCFGOpt::TurnSwitchRangeIntoICmp(SwitchInst *SI, IRBuilder<> &Builder) { assert(SI->getNumCases() > 1 && "Degenerate switch?"); bool HasDefault = !isa(SI->getDefaultDest()->getFirstNonPHIOrDbg()); auto *BB = SI->getParent(); // Partition the cases into two sets with different destinations. BasicBlock *DestA = HasDefault ? SI->getDefaultDest() : nullptr; BasicBlock *DestB = nullptr; SmallVector CasesA; SmallVector CasesB; for (auto Case : SI->cases()) { BasicBlock *Dest = Case.getCaseSuccessor(); if (!DestA) DestA = Dest; if (Dest == DestA) { CasesA.push_back(Case.getCaseValue()); continue; } if (!DestB) DestB = Dest; if (Dest == DestB) { CasesB.push_back(Case.getCaseValue()); continue; } return false; // More than two destinations. } assert(DestA && DestB && "Single-destination switch should have been folded."); assert(DestA != DestB); assert(DestB != SI->getDefaultDest()); assert(!CasesB.empty() && "There must be non-default cases."); assert(!CasesA.empty() || HasDefault); // Figure out if one of the sets of cases form a contiguous range. SmallVectorImpl *ContiguousCases = nullptr; BasicBlock *ContiguousDest = nullptr; BasicBlock *OtherDest = nullptr; if (!CasesA.empty() && CasesAreContiguous(CasesA)) { ContiguousCases = &CasesA; ContiguousDest = DestA; OtherDest = DestB; } else if (CasesAreContiguous(CasesB)) { ContiguousCases = &CasesB; ContiguousDest = DestB; OtherDest = DestA; } else return false; // Start building the compare and branch. Constant *Offset = ConstantExpr::getNeg(ContiguousCases->back()); Constant *NumCases = ConstantInt::get(Offset->getType(), ContiguousCases->size()); Value *Sub = SI->getCondition(); if (!Offset->isNullValue()) Sub = Builder.CreateAdd(Sub, Offset, Sub->getName() + ".off"); Value *Cmp; // If NumCases overflowed, then all possible values jump to the successor. if (NumCases->isNullValue() && !ContiguousCases->empty()) Cmp = ConstantInt::getTrue(SI->getContext()); else Cmp = Builder.CreateICmpULT(Sub, NumCases, "switch"); BranchInst *NewBI = Builder.CreateCondBr(Cmp, ContiguousDest, OtherDest); // Update weight for the newly-created conditional branch. if (HasBranchWeights(SI)) { SmallVector Weights; GetBranchWeights(SI, Weights); if (Weights.size() == 1 + SI->getNumCases()) { uint64_t TrueWeight = 0; uint64_t FalseWeight = 0; for (size_t I = 0, E = Weights.size(); I != E; ++I) { if (SI->getSuccessor(I) == ContiguousDest) TrueWeight += Weights[I]; else FalseWeight += Weights[I]; } while (TrueWeight > UINT32_MAX || FalseWeight > UINT32_MAX) { TrueWeight /= 2; FalseWeight /= 2; } setBranchWeights(NewBI, TrueWeight, FalseWeight); } } // Prune obsolete incoming values off the successors' PHI nodes. for (auto BBI = ContiguousDest->begin(); isa(BBI); ++BBI) { unsigned PreviousEdges = ContiguousCases->size(); if (ContiguousDest == SI->getDefaultDest()) ++PreviousEdges; for (unsigned I = 0, E = PreviousEdges - 1; I != E; ++I) cast(BBI)->removeIncomingValue(SI->getParent()); } for (auto BBI = OtherDest->begin(); isa(BBI); ++BBI) { unsigned PreviousEdges = SI->getNumCases() - ContiguousCases->size(); if (OtherDest == SI->getDefaultDest()) ++PreviousEdges; for (unsigned I = 0, E = PreviousEdges - 1; I != E; ++I) cast(BBI)->removeIncomingValue(SI->getParent()); } // Clean up the default block - it may have phis or other instructions before // the unreachable terminator. if (!HasDefault) createUnreachableSwitchDefault(SI, DTU); auto *UnreachableDefault = SI->getDefaultDest(); // Drop the switch. SI->eraseFromParent(); if (!HasDefault && DTU) DTU->applyUpdates({{DominatorTree::Delete, BB, UnreachableDefault}}); return true; } /// Compute masked bits for the condition of a switch /// and use it to remove dead cases. static bool eliminateDeadSwitchCases(SwitchInst *SI, DomTreeUpdater *DTU, AssumptionCache *AC, const DataLayout &DL) { Value *Cond = SI->getCondition(); unsigned Bits = Cond->getType()->getIntegerBitWidth(); KnownBits Known = computeKnownBits(Cond, DL, 0, AC, SI); // We can also eliminate cases by determining that their values are outside of // the limited range of the condition based on how many significant (non-sign) // bits are in the condition value. unsigned ExtraSignBits = ComputeNumSignBits(Cond, DL, 0, AC, SI) - 1; unsigned MaxSignificantBitsInCond = Bits - ExtraSignBits; // Gather dead cases. SmallVector DeadCases; SmallDenseMap NumPerSuccessorCases; for (auto &Case : SI->cases()) { auto *Successor = Case.getCaseSuccessor(); if (DTU) ++NumPerSuccessorCases[Successor]; const APInt &CaseVal = Case.getCaseValue()->getValue(); if (Known.Zero.intersects(CaseVal) || !Known.One.isSubsetOf(CaseVal) || (CaseVal.getMinSignedBits() > MaxSignificantBitsInCond)) { DeadCases.push_back(Case.getCaseValue()); if (DTU) --NumPerSuccessorCases[Successor]; LLVM_DEBUG(dbgs() << "SimplifyCFG: switch case " << CaseVal << " is dead.\n"); } } // If we can prove that the cases must cover all possible values, the // default destination becomes dead and we can remove it. If we know some // of the bits in the value, we can use that to more precisely compute the // number of possible unique case values. bool HasDefault = !isa(SI->getDefaultDest()->getFirstNonPHIOrDbg()); const unsigned NumUnknownBits = Bits - (Known.Zero | Known.One).countPopulation(); assert(NumUnknownBits <= Bits); if (HasDefault && DeadCases.empty() && NumUnknownBits < 64 /* avoid overflow */ && SI->getNumCases() == (1ULL << NumUnknownBits)) { createUnreachableSwitchDefault(SI, DTU); return true; } if (DeadCases.empty()) return false; SwitchInstProfUpdateWrapper SIW(*SI); for (ConstantInt *DeadCase : DeadCases) { SwitchInst::CaseIt CaseI = SI->findCaseValue(DeadCase); assert(CaseI != SI->case_default() && "Case was not found. Probably mistake in DeadCases forming."); // Prune unused values from PHI nodes. CaseI->getCaseSuccessor()->removePredecessor(SI->getParent()); SIW.removeCase(CaseI); } if (DTU) { std::vector Updates; for (const std::pair &I : NumPerSuccessorCases) if (I.second == 0) Updates.push_back({DominatorTree::Delete, SI->getParent(), I.first}); DTU->applyUpdates(Updates); } return true; } /// If BB would be eligible for simplification by /// TryToSimplifyUncondBranchFromEmptyBlock (i.e. it is empty and terminated /// by an unconditional branch), look at the phi node for BB in the successor /// block and see if the incoming value is equal to CaseValue. If so, return /// the phi node, and set PhiIndex to BB's index in the phi node. static PHINode *FindPHIForConditionForwarding(ConstantInt *CaseValue, BasicBlock *BB, int *PhiIndex) { if (BB->getFirstNonPHIOrDbg() != BB->getTerminator()) return nullptr; // BB must be empty to be a candidate for simplification. if (!BB->getSinglePredecessor()) return nullptr; // BB must be dominated by the switch. BranchInst *Branch = dyn_cast(BB->getTerminator()); if (!Branch || !Branch->isUnconditional()) return nullptr; // Terminator must be unconditional branch. BasicBlock *Succ = Branch->getSuccessor(0); for (PHINode &PHI : Succ->phis()) { int Idx = PHI.getBasicBlockIndex(BB); assert(Idx >= 0 && "PHI has no entry for predecessor?"); Value *InValue = PHI.getIncomingValue(Idx); if (InValue != CaseValue) continue; *PhiIndex = Idx; return &PHI; } return nullptr; } /// Try to forward the condition of a switch instruction to a phi node /// dominated by the switch, if that would mean that some of the destination /// blocks of the switch can be folded away. Return true if a change is made. static bool ForwardSwitchConditionToPHI(SwitchInst *SI) { using ForwardingNodesMap = DenseMap>; ForwardingNodesMap ForwardingNodes; BasicBlock *SwitchBlock = SI->getParent(); bool Changed = false; for (auto &Case : SI->cases()) { ConstantInt *CaseValue = Case.getCaseValue(); BasicBlock *CaseDest = Case.getCaseSuccessor(); // Replace phi operands in successor blocks that are using the constant case // value rather than the switch condition variable: // switchbb: // switch i32 %x, label %default [ // i32 17, label %succ // ... // succ: // %r = phi i32 ... [ 17, %switchbb ] ... // --> // %r = phi i32 ... [ %x, %switchbb ] ... for (PHINode &Phi : CaseDest->phis()) { // This only works if there is exactly 1 incoming edge from the switch to // a phi. If there is >1, that means multiple cases of the switch map to 1 // value in the phi, and that phi value is not the switch condition. Thus, // this transform would not make sense (the phi would be invalid because // a phi can't have different incoming values from the same block). int SwitchBBIdx = Phi.getBasicBlockIndex(SwitchBlock); if (Phi.getIncomingValue(SwitchBBIdx) == CaseValue && count(Phi.blocks(), SwitchBlock) == 1) { Phi.setIncomingValue(SwitchBBIdx, SI->getCondition()); Changed = true; } } // Collect phi nodes that are indirectly using this switch's case constants. int PhiIdx; if (auto *Phi = FindPHIForConditionForwarding(CaseValue, CaseDest, &PhiIdx)) ForwardingNodes[Phi].push_back(PhiIdx); } for (auto &ForwardingNode : ForwardingNodes) { PHINode *Phi = ForwardingNode.first; SmallVectorImpl &Indexes = ForwardingNode.second; if (Indexes.size() < 2) continue; for (int Index : Indexes) Phi->setIncomingValue(Index, SI->getCondition()); Changed = true; } return Changed; } /// Return true if the backend will be able to handle /// initializing an array of constants like C. static bool ValidLookupTableConstant(Constant *C, const TargetTransformInfo &TTI) { if (C->isThreadDependent()) return false; if (C->isDLLImportDependent()) return false; if (!isa(C) && !isa(C) && !isa(C) && !isa(C) && !isa(C) && !isa(C)) return false; if (ConstantExpr *CE = dyn_cast(C)) { if (!CE->isGEPWithNoNotionalOverIndexing()) return false; if (!ValidLookupTableConstant(CE->getOperand(0), TTI)) return false; } if (!TTI.shouldBuildLookupTablesForConstant(C)) return false; return true; } /// If V is a Constant, return it. Otherwise, try to look up /// its constant value in ConstantPool, returning 0 if it's not there. static Constant * LookupConstant(Value *V, const SmallDenseMap &ConstantPool) { if (Constant *C = dyn_cast(V)) return C; return ConstantPool.lookup(V); } /// Try to fold instruction I into a constant. This works for /// simple instructions such as binary operations where both operands are /// constant or can be replaced by constants from the ConstantPool. Returns the /// resulting constant on success, 0 otherwise. static Constant * ConstantFold(Instruction *I, const DataLayout &DL, const SmallDenseMap &ConstantPool) { if (SelectInst *Select = dyn_cast(I)) { Constant *A = LookupConstant(Select->getCondition(), ConstantPool); if (!A) return nullptr; if (A->isAllOnesValue()) return LookupConstant(Select->getTrueValue(), ConstantPool); if (A->isNullValue()) return LookupConstant(Select->getFalseValue(), ConstantPool); return nullptr; } SmallVector COps; for (unsigned N = 0, E = I->getNumOperands(); N != E; ++N) { if (Constant *A = LookupConstant(I->getOperand(N), ConstantPool)) COps.push_back(A); else return nullptr; } if (CmpInst *Cmp = dyn_cast(I)) { return ConstantFoldCompareInstOperands(Cmp->getPredicate(), COps[0], COps[1], DL); } return ConstantFoldInstOperands(I, COps, DL); } /// Try to determine the resulting constant values in phi nodes /// at the common destination basic block, *CommonDest, for one of the case /// destionations CaseDest corresponding to value CaseVal (0 for the default /// case), of a switch instruction SI. static bool GetCaseResults(SwitchInst *SI, ConstantInt *CaseVal, BasicBlock *CaseDest, BasicBlock **CommonDest, SmallVectorImpl> &Res, const DataLayout &DL, const TargetTransformInfo &TTI) { // The block from which we enter the common destination. BasicBlock *Pred = SI->getParent(); // If CaseDest is empty except for some side-effect free instructions through // which we can constant-propagate the CaseVal, continue to its successor. SmallDenseMap ConstantPool; ConstantPool.insert(std::make_pair(SI->getCondition(), CaseVal)); for (Instruction &I :CaseDest->instructionsWithoutDebug()) { if (I.isTerminator()) { // If the terminator is a simple branch, continue to the next block. if (I.getNumSuccessors() != 1 || I.isExceptionalTerminator()) return false; Pred = CaseDest; CaseDest = I.getSuccessor(0); } else if (Constant *C = ConstantFold(&I, DL, ConstantPool)) { // Instruction is side-effect free and constant. // If the instruction has uses outside this block or a phi node slot for // the block, it is not safe to bypass the instruction since it would then // no longer dominate all its uses. for (auto &Use : I.uses()) { User *User = Use.getUser(); if (Instruction *I = dyn_cast(User)) if (I->getParent() == CaseDest) continue; if (PHINode *Phi = dyn_cast(User)) if (Phi->getIncomingBlock(Use) == CaseDest) continue; return false; } ConstantPool.insert(std::make_pair(&I, C)); } else { break; } } // If we did not have a CommonDest before, use the current one. if (!*CommonDest) *CommonDest = CaseDest; // If the destination isn't the common one, abort. if (CaseDest != *CommonDest) return false; // Get the values for this case from phi nodes in the destination block. for (PHINode &PHI : (*CommonDest)->phis()) { int Idx = PHI.getBasicBlockIndex(Pred); if (Idx == -1) continue; Constant *ConstVal = LookupConstant(PHI.getIncomingValue(Idx), ConstantPool); if (!ConstVal) return false; // Be conservative about which kinds of constants we support. if (!ValidLookupTableConstant(ConstVal, TTI)) return false; Res.push_back(std::make_pair(&PHI, ConstVal)); } return Res.size() > 0; } // Helper function used to add CaseVal to the list of cases that generate // Result. Returns the updated number of cases that generate this result. static uintptr_t MapCaseToResult(ConstantInt *CaseVal, SwitchCaseResultVectorTy &UniqueResults, Constant *Result) { for (auto &I : UniqueResults) { if (I.first == Result) { I.second.push_back(CaseVal); return I.second.size(); } } UniqueResults.push_back( std::make_pair(Result, SmallVector(1, CaseVal))); return 1; } // Helper function that initializes a map containing // results for the PHI node of the common destination block for a switch // instruction. Returns false if multiple PHI nodes have been found or if // there is not a common destination block for the switch. static bool InitializeUniqueCases(SwitchInst *SI, PHINode *&PHI, BasicBlock *&CommonDest, SwitchCaseResultVectorTy &UniqueResults, Constant *&DefaultResult, const DataLayout &DL, const TargetTransformInfo &TTI, uintptr_t MaxUniqueResults, uintptr_t MaxCasesPerResult) { for (auto &I : SI->cases()) { ConstantInt *CaseVal = I.getCaseValue(); // Resulting value at phi nodes for this case value. SwitchCaseResultsTy Results; if (!GetCaseResults(SI, CaseVal, I.getCaseSuccessor(), &CommonDest, Results, DL, TTI)) return false; // Only one value per case is permitted. if (Results.size() > 1) return false; // Add the case->result mapping to UniqueResults. const uintptr_t NumCasesForResult = MapCaseToResult(CaseVal, UniqueResults, Results.begin()->second); // Early out if there are too many cases for this result. if (NumCasesForResult > MaxCasesPerResult) return false; // Early out if there are too many unique results. if (UniqueResults.size() > MaxUniqueResults) return false; // Check the PHI consistency. if (!PHI) PHI = Results[0].first; else if (PHI != Results[0].first) return false; } // Find the default result value. SmallVector, 1> DefaultResults; BasicBlock *DefaultDest = SI->getDefaultDest(); GetCaseResults(SI, nullptr, SI->getDefaultDest(), &CommonDest, DefaultResults, DL, TTI); // If the default value is not found abort unless the default destination // is unreachable. DefaultResult = DefaultResults.size() == 1 ? DefaultResults.begin()->second : nullptr; if ((!DefaultResult && !isa(DefaultDest->getFirstNonPHIOrDbg()))) return false; return true; } // Helper function that checks if it is possible to transform a switch with only // two cases (or two cases + default) that produces a result into a select. // Example: // switch (a) { // case 10: %0 = icmp eq i32 %a, 10 // return 10; %1 = select i1 %0, i32 10, i32 4 // case 20: ----> %2 = icmp eq i32 %a, 20 // return 2; %3 = select i1 %2, i32 2, i32 %1 // default: // return 4; // } static Value *ConvertTwoCaseSwitch(const SwitchCaseResultVectorTy &ResultVector, Constant *DefaultResult, Value *Condition, IRBuilder<> &Builder) { // If we are selecting between only two cases transform into a simple // select or a two-way select if default is possible. if (ResultVector.size() == 2 && ResultVector[0].second.size() == 1 && ResultVector[1].second.size() == 1) { ConstantInt *const FirstCase = ResultVector[0].second[0]; ConstantInt *const SecondCase = ResultVector[1].second[0]; bool DefaultCanTrigger = DefaultResult; Value *SelectValue = ResultVector[1].first; if (DefaultCanTrigger) { Value *const ValueCompare = Builder.CreateICmpEQ(Condition, SecondCase, "switch.selectcmp"); SelectValue = Builder.CreateSelect(ValueCompare, ResultVector[1].first, DefaultResult, "switch.select"); } Value *const ValueCompare = Builder.CreateICmpEQ(Condition, FirstCase, "switch.selectcmp"); return Builder.CreateSelect(ValueCompare, ResultVector[0].first, SelectValue, "switch.select"); } // Handle the degenerate case where two cases have the same value. if (ResultVector.size() == 1 && ResultVector[0].second.size() == 2 && DefaultResult) { Value *Cmp1 = Builder.CreateICmpEQ( Condition, ResultVector[0].second[0], "switch.selectcmp.case1"); Value *Cmp2 = Builder.CreateICmpEQ( Condition, ResultVector[0].second[1], "switch.selectcmp.case2"); Value *Cmp = Builder.CreateOr(Cmp1, Cmp2, "switch.selectcmp"); return Builder.CreateSelect(Cmp, ResultVector[0].first, DefaultResult); } return nullptr; } // Helper function to cleanup a switch instruction that has been converted into // a select, fixing up PHI nodes and basic blocks. static void RemoveSwitchAfterSelectConversion(SwitchInst *SI, PHINode *PHI, Value *SelectValue, IRBuilder<> &Builder, DomTreeUpdater *DTU) { std::vector Updates; BasicBlock *SelectBB = SI->getParent(); BasicBlock *DestBB = PHI->getParent(); if (DTU && !is_contained(predecessors(DestBB), SelectBB)) Updates.push_back({DominatorTree::Insert, SelectBB, DestBB}); Builder.CreateBr(DestBB); // Remove the switch. while (PHI->getBasicBlockIndex(SelectBB) >= 0) PHI->removeIncomingValue(SelectBB); PHI->addIncoming(SelectValue, SelectBB); SmallPtrSet RemovedSuccessors; for (unsigned i = 0, e = SI->getNumSuccessors(); i < e; ++i) { BasicBlock *Succ = SI->getSuccessor(i); if (Succ == DestBB) continue; Succ->removePredecessor(SelectBB); if (DTU && RemovedSuccessors.insert(Succ).second) Updates.push_back({DominatorTree::Delete, SelectBB, Succ}); } SI->eraseFromParent(); if (DTU) DTU->applyUpdates(Updates); } /// If the switch is only used to initialize one or more /// phi nodes in a common successor block with only two different /// constant values, replace the switch with select. static bool switchToSelect(SwitchInst *SI, IRBuilder<> &Builder, DomTreeUpdater *DTU, const DataLayout &DL, const TargetTransformInfo &TTI) { Value *const Cond = SI->getCondition(); PHINode *PHI = nullptr; BasicBlock *CommonDest = nullptr; Constant *DefaultResult; SwitchCaseResultVectorTy UniqueResults; // Collect all the cases that will deliver the same value from the switch. if (!InitializeUniqueCases(SI, PHI, CommonDest, UniqueResults, DefaultResult, DL, TTI, /*MaxUniqueResults*/2, /*MaxCasesPerResult*/2)) return false; assert(PHI != nullptr && "PHI for value select not found"); Builder.SetInsertPoint(SI); Value *SelectValue = ConvertTwoCaseSwitch(UniqueResults, DefaultResult, Cond, Builder); if (SelectValue) { RemoveSwitchAfterSelectConversion(SI, PHI, SelectValue, Builder, DTU); return true; } // The switch couldn't be converted into a select. return false; } namespace { /// This class represents a lookup table that can be used to replace a switch. class SwitchLookupTable { public: /// Create a lookup table to use as a switch replacement with the contents /// of Values, using DefaultValue to fill any holes in the table. SwitchLookupTable( Module &M, uint64_t TableSize, ConstantInt *Offset, const SmallVectorImpl> &Values, Constant *DefaultValue, const DataLayout &DL, const StringRef &FuncName); /// Build instructions with Builder to retrieve the value at /// the position given by Index in the lookup table. Value *BuildLookup(Value *Index, IRBuilder<> &Builder); /// Return true if a table with TableSize elements of /// type ElementType would fit in a target-legal register. static bool WouldFitInRegister(const DataLayout &DL, uint64_t TableSize, Type *ElementType); private: // Depending on the contents of the table, it can be represented in // different ways. enum { // For tables where each element contains the same value, we just have to // store that single value and return it for each lookup. SingleValueKind, // For tables where there is a linear relationship between table index // and values. We calculate the result with a simple multiplication // and addition instead of a table lookup. LinearMapKind, // For small tables with integer elements, we can pack them into a bitmap // that fits into a target-legal register. Values are retrieved by // shift and mask operations. BitMapKind, // The table is stored as an array of values. Values are retrieved by load // instructions from the table. ArrayKind } Kind; // For SingleValueKind, this is the single value. Constant *SingleValue = nullptr; // For BitMapKind, this is the bitmap. ConstantInt *BitMap = nullptr; IntegerType *BitMapElementTy = nullptr; // For LinearMapKind, these are the constants used to derive the value. ConstantInt *LinearOffset = nullptr; ConstantInt *LinearMultiplier = nullptr; // For ArrayKind, this is the array. GlobalVariable *Array = nullptr; }; } // end anonymous namespace SwitchLookupTable::SwitchLookupTable( Module &M, uint64_t TableSize, ConstantInt *Offset, const SmallVectorImpl> &Values, Constant *DefaultValue, const DataLayout &DL, const StringRef &FuncName) { assert(Values.size() && "Can't build lookup table without values!"); assert(TableSize >= Values.size() && "Can't fit values in table!"); // If all values in the table are equal, this is that value. SingleValue = Values.begin()->second; Type *ValueType = Values.begin()->second->getType(); // Build up the table contents. SmallVector TableContents(TableSize); for (size_t I = 0, E = Values.size(); I != E; ++I) { ConstantInt *CaseVal = Values[I].first; Constant *CaseRes = Values[I].second; assert(CaseRes->getType() == ValueType); uint64_t Idx = (CaseVal->getValue() - Offset->getValue()).getLimitedValue(); TableContents[Idx] = CaseRes; if (CaseRes != SingleValue) SingleValue = nullptr; } // Fill in any holes in the table with the default result. if (Values.size() < TableSize) { assert(DefaultValue && "Need a default value to fill the lookup table holes."); assert(DefaultValue->getType() == ValueType); for (uint64_t I = 0; I < TableSize; ++I) { if (!TableContents[I]) TableContents[I] = DefaultValue; } if (DefaultValue != SingleValue) SingleValue = nullptr; } // If each element in the table contains the same value, we only need to store // that single value. if (SingleValue) { Kind = SingleValueKind; return; } // Check if we can derive the value with a linear transformation from the // table index. if (isa(ValueType)) { bool LinearMappingPossible = true; APInt PrevVal; APInt DistToPrev; assert(TableSize >= 2 && "Should be a SingleValue table."); // Check if there is the same distance between two consecutive values. for (uint64_t I = 0; I < TableSize; ++I) { ConstantInt *ConstVal = dyn_cast(TableContents[I]); if (!ConstVal) { // This is an undef. We could deal with it, but undefs in lookup tables // are very seldom. It's probably not worth the additional complexity. LinearMappingPossible = false; break; } const APInt &Val = ConstVal->getValue(); if (I != 0) { APInt Dist = Val - PrevVal; if (I == 1) { DistToPrev = Dist; } else if (Dist != DistToPrev) { LinearMappingPossible = false; break; } } PrevVal = Val; } if (LinearMappingPossible) { LinearOffset = cast(TableContents[0]); LinearMultiplier = ConstantInt::get(M.getContext(), DistToPrev); Kind = LinearMapKind; ++NumLinearMaps; return; } } // If the type is integer and the table fits in a register, build a bitmap. if (WouldFitInRegister(DL, TableSize, ValueType)) { IntegerType *IT = cast(ValueType); APInt TableInt(TableSize * IT->getBitWidth(), 0); for (uint64_t I = TableSize; I > 0; --I) { TableInt <<= IT->getBitWidth(); // Insert values into the bitmap. Undef values are set to zero. if (!isa(TableContents[I - 1])) { ConstantInt *Val = cast(TableContents[I - 1]); TableInt |= Val->getValue().zext(TableInt.getBitWidth()); } } BitMap = ConstantInt::get(M.getContext(), TableInt); BitMapElementTy = IT; Kind = BitMapKind; ++NumBitMaps; return; } // Store the table in an array. ArrayType *ArrayTy = ArrayType::get(ValueType, TableSize); Constant *Initializer = ConstantArray::get(ArrayTy, TableContents); Array = new GlobalVariable(M, ArrayTy, /*isConstant=*/true, GlobalVariable::PrivateLinkage, Initializer, "switch.table." + FuncName); Array->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); // Set the alignment to that of an array items. We will be only loading one // value out of it. Array->setAlignment(Align(DL.getPrefTypeAlignment(ValueType))); Kind = ArrayKind; } Value *SwitchLookupTable::BuildLookup(Value *Index, IRBuilder<> &Builder) { switch (Kind) { case SingleValueKind: return SingleValue; case LinearMapKind: { // Derive the result value from the input value. Value *Result = Builder.CreateIntCast(Index, LinearMultiplier->getType(), false, "switch.idx.cast"); if (!LinearMultiplier->isOne()) Result = Builder.CreateMul(Result, LinearMultiplier, "switch.idx.mult"); if (!LinearOffset->isZero()) Result = Builder.CreateAdd(Result, LinearOffset, "switch.offset"); return Result; } case BitMapKind: { // Type of the bitmap (e.g. i59). IntegerType *MapTy = BitMap->getType(); // Cast Index to the same type as the bitmap. // Note: The Index is <= the number of elements in the table, so // truncating it to the width of the bitmask is safe. Value *ShiftAmt = Builder.CreateZExtOrTrunc(Index, MapTy, "switch.cast"); // Multiply the shift amount by the element width. ShiftAmt = Builder.CreateMul( ShiftAmt, ConstantInt::get(MapTy, BitMapElementTy->getBitWidth()), "switch.shiftamt"); // Shift down. Value *DownShifted = Builder.CreateLShr(BitMap, ShiftAmt, "switch.downshift"); // Mask off. return Builder.CreateTrunc(DownShifted, BitMapElementTy, "switch.masked"); } case ArrayKind: { // Make sure the table index will not overflow when treated as signed. IntegerType *IT = cast(Index->getType()); uint64_t TableSize = Array->getInitializer()->getType()->getArrayNumElements(); if (TableSize > (1ULL << (IT->getBitWidth() - 1))) Index = Builder.CreateZExt( Index, IntegerType::get(IT->getContext(), IT->getBitWidth() + 1), "switch.tableidx.zext"); Value *GEPIndices[] = {Builder.getInt32(0), Index}; Value *GEP = Builder.CreateInBoundsGEP(Array->getValueType(), Array, GEPIndices, "switch.gep"); return Builder.CreateLoad( cast(Array->getValueType())->getElementType(), GEP, "switch.load"); } } llvm_unreachable("Unknown lookup table kind!"); } bool SwitchLookupTable::WouldFitInRegister(const DataLayout &DL, uint64_t TableSize, Type *ElementType) { auto *IT = dyn_cast(ElementType); if (!IT) return false; // FIXME: If the type is wider than it needs to be, e.g. i8 but all values // are <= 15, we could try to narrow the type. // Avoid overflow, fitsInLegalInteger uses unsigned int for the width. if (TableSize >= UINT_MAX / IT->getBitWidth()) return false; return DL.fitsInLegalInteger(TableSize * IT->getBitWidth()); } /// Determine whether a lookup table should be built for this switch, based on /// the number of cases, size of the table, and the types of the results. static bool ShouldBuildLookupTable(SwitchInst *SI, uint64_t TableSize, const TargetTransformInfo &TTI, const DataLayout &DL, const SmallDenseMap &ResultTypes) { if (SI->getNumCases() > TableSize || TableSize >= UINT64_MAX / 10) return false; // TableSize overflowed, or mul below might overflow. bool AllTablesFitInRegister = true; bool HasIllegalType = false; for (const auto &I : ResultTypes) { Type *Ty = I.second; // Saturate this flag to true. HasIllegalType = HasIllegalType || !TTI.isTypeLegal(Ty); // Saturate this flag to false. AllTablesFitInRegister = AllTablesFitInRegister && SwitchLookupTable::WouldFitInRegister(DL, TableSize, Ty); // If both flags saturate, we're done. NOTE: This *only* works with // saturating flags, and all flags have to saturate first due to the // non-deterministic behavior of iterating over a dense map. if (HasIllegalType && !AllTablesFitInRegister) break; } // If each table would fit in a register, we should build it anyway. if (AllTablesFitInRegister) return true; // Don't build a table that doesn't fit in-register if it has illegal types. if (HasIllegalType) return false; // The table density should be at least 40%. This is the same criterion as for // jump tables, see SelectionDAGBuilder::handleJTSwitchCase. // FIXME: Find the best cut-off. return SI->getNumCases() * 10 >= TableSize * 4; } /// Try to reuse the switch table index compare. Following pattern: /// \code /// if (idx < tablesize) /// r = table[idx]; // table does not contain default_value /// else /// r = default_value; /// if (r != default_value) /// ... /// \endcode /// Is optimized to: /// \code /// cond = idx < tablesize; /// if (cond) /// r = table[idx]; /// else /// r = default_value; /// if (cond) /// ... /// \endcode /// Jump threading will then eliminate the second if(cond). static void reuseTableCompare( User *PhiUser, BasicBlock *PhiBlock, BranchInst *RangeCheckBranch, Constant *DefaultValue, const SmallVectorImpl> &Values) { ICmpInst *CmpInst = dyn_cast(PhiUser); if (!CmpInst) return; // We require that the compare is in the same block as the phi so that jump // threading can do its work afterwards. if (CmpInst->getParent() != PhiBlock) return; Constant *CmpOp1 = dyn_cast(CmpInst->getOperand(1)); if (!CmpOp1) return; Value *RangeCmp = RangeCheckBranch->getCondition(); Constant *TrueConst = ConstantInt::getTrue(RangeCmp->getType()); Constant *FalseConst = ConstantInt::getFalse(RangeCmp->getType()); // Check if the compare with the default value is constant true or false. Constant *DefaultConst = ConstantExpr::getICmp(CmpInst->getPredicate(), DefaultValue, CmpOp1, true); if (DefaultConst != TrueConst && DefaultConst != FalseConst) return; // Check if the compare with the case values is distinct from the default // compare result. for (auto ValuePair : Values) { Constant *CaseConst = ConstantExpr::getICmp(CmpInst->getPredicate(), ValuePair.second, CmpOp1, true); if (!CaseConst || CaseConst == DefaultConst || isa(CaseConst)) return; assert((CaseConst == TrueConst || CaseConst == FalseConst) && "Expect true or false as compare result."); } // Check if the branch instruction dominates the phi node. It's a simple // dominance check, but sufficient for our needs. // Although this check is invariant in the calling loops, it's better to do it // at this late stage. Practically we do it at most once for a switch. BasicBlock *BranchBlock = RangeCheckBranch->getParent(); for (BasicBlock *Pred : predecessors(PhiBlock)) { if (Pred != BranchBlock && Pred->getUniquePredecessor() != BranchBlock) return; } if (DefaultConst == FalseConst) { // The compare yields the same result. We can replace it. CmpInst->replaceAllUsesWith(RangeCmp); ++NumTableCmpReuses; } else { // The compare yields the same result, just inverted. We can replace it. Value *InvertedTableCmp = BinaryOperator::CreateXor( RangeCmp, ConstantInt::get(RangeCmp->getType(), 1), "inverted.cmp", RangeCheckBranch); CmpInst->replaceAllUsesWith(InvertedTableCmp); ++NumTableCmpReuses; } } /// If the switch is only used to initialize one or more phi nodes in a common /// successor block with different constant values, replace the switch with /// lookup tables. static bool SwitchToLookupTable(SwitchInst *SI, IRBuilder<> &Builder, DomTreeUpdater *DTU, const DataLayout &DL, const TargetTransformInfo &TTI) { assert(SI->getNumCases() > 1 && "Degenerate switch?"); BasicBlock *BB = SI->getParent(); Function *Fn = BB->getParent(); // Only build lookup table when we have a target that supports it or the // attribute is not set. if (!TTI.shouldBuildLookupTables() || (Fn->getFnAttribute("no-jump-tables").getValueAsBool())) return false; // FIXME: If the switch is too sparse for a lookup table, perhaps we could // split off a dense part and build a lookup table for that. // FIXME: This creates arrays of GEPs to constant strings, which means each // GEP needs a runtime relocation in PIC code. We should just build one big // string and lookup indices into that. // Ignore switches with less than three cases. Lookup tables will not make // them faster, so we don't analyze them. if (SI->getNumCases() < 3) return false; // Figure out the corresponding result for each case value and phi node in the // common destination, as well as the min and max case values. assert(!SI->cases().empty()); SwitchInst::CaseIt CI = SI->case_begin(); ConstantInt *MinCaseVal = CI->getCaseValue(); ConstantInt *MaxCaseVal = CI->getCaseValue(); BasicBlock *CommonDest = nullptr; using ResultListTy = SmallVector, 4>; SmallDenseMap ResultLists; SmallDenseMap DefaultResults; SmallDenseMap ResultTypes; SmallVector PHIs; for (SwitchInst::CaseIt E = SI->case_end(); CI != E; ++CI) { ConstantInt *CaseVal = CI->getCaseValue(); if (CaseVal->getValue().slt(MinCaseVal->getValue())) MinCaseVal = CaseVal; if (CaseVal->getValue().sgt(MaxCaseVal->getValue())) MaxCaseVal = CaseVal; // Resulting value at phi nodes for this case value. using ResultsTy = SmallVector, 4>; ResultsTy Results; if (!GetCaseResults(SI, CaseVal, CI->getCaseSuccessor(), &CommonDest, Results, DL, TTI)) return false; // Append the result from this case to the list for each phi. for (const auto &I : Results) { PHINode *PHI = I.first; Constant *Value = I.second; if (!ResultLists.count(PHI)) PHIs.push_back(PHI); ResultLists[PHI].push_back(std::make_pair(CaseVal, Value)); } } // Keep track of the result types. for (PHINode *PHI : PHIs) { ResultTypes[PHI] = ResultLists[PHI][0].second->getType(); } uint64_t NumResults = ResultLists[PHIs[0]].size(); APInt RangeSpread = MaxCaseVal->getValue() - MinCaseVal->getValue(); uint64_t TableSize = RangeSpread.getLimitedValue() + 1; bool TableHasHoles = (NumResults < TableSize); // If the table has holes, we need a constant result for the default case // or a bitmask that fits in a register. SmallVector, 4> DefaultResultsList; bool HasDefaultResults = GetCaseResults(SI, nullptr, SI->getDefaultDest(), &CommonDest, DefaultResultsList, DL, TTI); bool NeedMask = (TableHasHoles && !HasDefaultResults); if (NeedMask) { // As an extra penalty for the validity test we require more cases. if (SI->getNumCases() < 4) // FIXME: Find best threshold value (benchmark). return false; if (!DL.fitsInLegalInteger(TableSize)) return false; } for (const auto &I : DefaultResultsList) { PHINode *PHI = I.first; Constant *Result = I.second; DefaultResults[PHI] = Result; } if (!ShouldBuildLookupTable(SI, TableSize, TTI, DL, ResultTypes)) return false; std::vector Updates; // Create the BB that does the lookups. Module &Mod = *CommonDest->getParent()->getParent(); BasicBlock *LookupBB = BasicBlock::Create( Mod.getContext(), "switch.lookup", CommonDest->getParent(), CommonDest); // Compute the table index value. Builder.SetInsertPoint(SI); Value *TableIndex; if (MinCaseVal->isNullValue()) TableIndex = SI->getCondition(); else TableIndex = Builder.CreateSub(SI->getCondition(), MinCaseVal, "switch.tableidx"); // Compute the maximum table size representable by the integer type we are // switching upon. unsigned CaseSize = MinCaseVal->getType()->getPrimitiveSizeInBits(); uint64_t MaxTableSize = CaseSize > 63 ? UINT64_MAX : 1ULL << CaseSize; assert(MaxTableSize >= TableSize && "It is impossible for a switch to have more entries than the max " "representable value of its input integer type's size."); // If the default destination is unreachable, or if the lookup table covers // all values of the conditional variable, branch directly to the lookup table // BB. Otherwise, check that the condition is within the case range. const bool DefaultIsReachable = !isa(SI->getDefaultDest()->getFirstNonPHIOrDbg()); const bool GeneratingCoveredLookupTable = (MaxTableSize == TableSize); BranchInst *RangeCheckBranch = nullptr; if (!DefaultIsReachable || GeneratingCoveredLookupTable) { Builder.CreateBr(LookupBB); if (DTU) Updates.push_back({DominatorTree::Insert, BB, LookupBB}); // Note: We call removeProdecessor later since we need to be able to get the // PHI value for the default case in case we're using a bit mask. } else { Value *Cmp = Builder.CreateICmpULT( TableIndex, ConstantInt::get(MinCaseVal->getType(), TableSize)); RangeCheckBranch = Builder.CreateCondBr(Cmp, LookupBB, SI->getDefaultDest()); if (DTU) Updates.push_back({DominatorTree::Insert, BB, LookupBB}); } // Populate the BB that does the lookups. Builder.SetInsertPoint(LookupBB); if (NeedMask) { // Before doing the lookup, we do the hole check. The LookupBB is therefore // re-purposed to do the hole check, and we create a new LookupBB. BasicBlock *MaskBB = LookupBB; MaskBB->setName("switch.hole_check"); LookupBB = BasicBlock::Create(Mod.getContext(), "switch.lookup", CommonDest->getParent(), CommonDest); // Make the mask's bitwidth at least 8-bit and a power-of-2 to avoid // unnecessary illegal types. uint64_t TableSizePowOf2 = NextPowerOf2(std::max(7ULL, TableSize - 1ULL)); APInt MaskInt(TableSizePowOf2, 0); APInt One(TableSizePowOf2, 1); // Build bitmask; fill in a 1 bit for every case. const ResultListTy &ResultList = ResultLists[PHIs[0]]; for (size_t I = 0, E = ResultList.size(); I != E; ++I) { uint64_t Idx = (ResultList[I].first->getValue() - MinCaseVal->getValue()) .getLimitedValue(); MaskInt |= One << Idx; } ConstantInt *TableMask = ConstantInt::get(Mod.getContext(), MaskInt); // Get the TableIndex'th bit of the bitmask. // If this bit is 0 (meaning hole) jump to the default destination, // else continue with table lookup. IntegerType *MapTy = TableMask->getType(); Value *MaskIndex = Builder.CreateZExtOrTrunc(TableIndex, MapTy, "switch.maskindex"); Value *Shifted = Builder.CreateLShr(TableMask, MaskIndex, "switch.shifted"); Value *LoBit = Builder.CreateTrunc( Shifted, Type::getInt1Ty(Mod.getContext()), "switch.lobit"); Builder.CreateCondBr(LoBit, LookupBB, SI->getDefaultDest()); if (DTU) { Updates.push_back({DominatorTree::Insert, MaskBB, LookupBB}); Updates.push_back({DominatorTree::Insert, MaskBB, SI->getDefaultDest()}); } Builder.SetInsertPoint(LookupBB); AddPredecessorToBlock(SI->getDefaultDest(), MaskBB, BB); } if (!DefaultIsReachable || GeneratingCoveredLookupTable) { // We cached PHINodes in PHIs. To avoid accessing deleted PHINodes later, // do not delete PHINodes here. SI->getDefaultDest()->removePredecessor(BB, /*KeepOneInputPHIs=*/true); if (DTU) Updates.push_back({DominatorTree::Delete, BB, SI->getDefaultDest()}); } for (PHINode *PHI : PHIs) { const ResultListTy &ResultList = ResultLists[PHI]; // If using a bitmask, use any value to fill the lookup table holes. Constant *DV = NeedMask ? ResultLists[PHI][0].second : DefaultResults[PHI]; StringRef FuncName = Fn->getName(); SwitchLookupTable Table(Mod, TableSize, MinCaseVal, ResultList, DV, DL, FuncName); Value *Result = Table.BuildLookup(TableIndex, Builder); // Do a small peephole optimization: re-use the switch table compare if // possible. if (!TableHasHoles && HasDefaultResults && RangeCheckBranch) { BasicBlock *PhiBlock = PHI->getParent(); // Search for compare instructions which use the phi. for (auto *User : PHI->users()) { reuseTableCompare(User, PhiBlock, RangeCheckBranch, DV, ResultList); } } PHI->addIncoming(Result, LookupBB); } Builder.CreateBr(CommonDest); if (DTU) Updates.push_back({DominatorTree::Insert, LookupBB, CommonDest}); // Remove the switch. SmallPtrSet RemovedSuccessors; for (unsigned i = 0, e = SI->getNumSuccessors(); i < e; ++i) { BasicBlock *Succ = SI->getSuccessor(i); if (Succ == SI->getDefaultDest()) continue; Succ->removePredecessor(BB); RemovedSuccessors.insert(Succ); } SI->eraseFromParent(); if (DTU) { for (BasicBlock *RemovedSuccessor : RemovedSuccessors) Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor}); DTU->applyUpdates(Updates); } ++NumLookupTables; if (NeedMask) ++NumLookupTablesHoles; return true; } static bool isSwitchDense(ArrayRef Values) { // See also SelectionDAGBuilder::isDense(), which this function was based on. uint64_t Diff = (uint64_t)Values.back() - (uint64_t)Values.front(); uint64_t Range = Diff + 1; uint64_t NumCases = Values.size(); // 40% is the default density for building a jump table in optsize/minsize mode. uint64_t MinDensity = 40; return NumCases * 100 >= Range * MinDensity; } /// Try to transform a switch that has "holes" in it to a contiguous sequence /// of cases. /// /// A switch such as: switch(i) {case 5: case 9: case 13: case 17:} can be /// range-reduced to: switch ((i-5) / 4) {case 0: case 1: case 2: case 3:}. /// /// This converts a sparse switch into a dense switch which allows better /// lowering and could also allow transforming into a lookup table. static bool ReduceSwitchRange(SwitchInst *SI, IRBuilder<> &Builder, const DataLayout &DL, const TargetTransformInfo &TTI) { auto *CondTy = cast(SI->getCondition()->getType()); if (CondTy->getIntegerBitWidth() > 64 || !DL.fitsInLegalInteger(CondTy->getIntegerBitWidth())) return false; // Only bother with this optimization if there are more than 3 switch cases; // SDAG will only bother creating jump tables for 4 or more cases. if (SI->getNumCases() < 4) return false; // This transform is agnostic to the signedness of the input or case values. We // can treat the case values as signed or unsigned. We can optimize more common // cases such as a sequence crossing zero {-4,0,4,8} if we interpret case values // as signed. SmallVector Values; for (auto &C : SI->cases()) Values.push_back(C.getCaseValue()->getValue().getSExtValue()); llvm::sort(Values); // If the switch is already dense, there's nothing useful to do here. if (isSwitchDense(Values)) return false; // First, transform the values such that they start at zero and ascend. int64_t Base = Values[0]; for (auto &V : Values) V -= (uint64_t)(Base); // Now we have signed numbers that have been shifted so that, given enough // precision, there are no negative values. Since the rest of the transform // is bitwise only, we switch now to an unsigned representation. // This transform can be done speculatively because it is so cheap - it // results in a single rotate operation being inserted. // FIXME: It's possible that optimizing a switch on powers of two might also // be beneficial - flag values are often powers of two and we could use a CLZ // as the key function. // countTrailingZeros(0) returns 64. As Values is guaranteed to have more than // one element and LLVM disallows duplicate cases, Shift is guaranteed to be // less than 64. unsigned Shift = 64; for (auto &V : Values) Shift = std::min(Shift, countTrailingZeros((uint64_t)V)); assert(Shift < 64); if (Shift > 0) for (auto &V : Values) V = (int64_t)((uint64_t)V >> Shift); if (!isSwitchDense(Values)) // Transform didn't create a dense switch. return false; // The obvious transform is to shift the switch condition right and emit a // check that the condition actually cleanly divided by GCD, i.e. // C & (1 << Shift - 1) == 0 // inserting a new CFG edge to handle the case where it didn't divide cleanly. // // A cheaper way of doing this is a simple ROTR(C, Shift). This performs the // shift and puts the shifted-off bits in the uppermost bits. If any of these // are nonzero then the switch condition will be very large and will hit the // default case. auto *Ty = cast(SI->getCondition()->getType()); Builder.SetInsertPoint(SI); auto *ShiftC = ConstantInt::get(Ty, Shift); auto *Sub = Builder.CreateSub(SI->getCondition(), ConstantInt::get(Ty, Base)); auto *LShr = Builder.CreateLShr(Sub, ShiftC); auto *Shl = Builder.CreateShl(Sub, Ty->getBitWidth() - Shift); auto *Rot = Builder.CreateOr(LShr, Shl); SI->replaceUsesOfWith(SI->getCondition(), Rot); for (auto Case : SI->cases()) { auto *Orig = Case.getCaseValue(); auto Sub = Orig->getValue() - APInt(Ty->getBitWidth(), Base); Case.setValue( cast(ConstantInt::get(Ty, Sub.lshr(ShiftC->getValue())))); } return true; } bool SimplifyCFGOpt::simplifySwitch(SwitchInst *SI, IRBuilder<> &Builder) { BasicBlock *BB = SI->getParent(); if (isValueEqualityComparison(SI)) { // If we only have one predecessor, and if it is a branch on this value, // see if that predecessor totally determines the outcome of this switch. if (BasicBlock *OnlyPred = BB->getSinglePredecessor()) if (SimplifyEqualityComparisonWithOnlyPredecessor(SI, OnlyPred, Builder)) return requestResimplify(); Value *Cond = SI->getCondition(); if (SelectInst *Select = dyn_cast(Cond)) if (SimplifySwitchOnSelect(SI, Select)) return requestResimplify(); // If the block only contains the switch, see if we can fold the block // away into any preds. if (SI == &*BB->instructionsWithoutDebug().begin()) if (FoldValueComparisonIntoPredecessors(SI, Builder)) return requestResimplify(); } // Try to transform the switch into an icmp and a branch. if (TurnSwitchRangeIntoICmp(SI, Builder)) return requestResimplify(); // Remove unreachable cases. if (eliminateDeadSwitchCases(SI, DTU, Options.AC, DL)) return requestResimplify(); if (switchToSelect(SI, Builder, DTU, DL, TTI)) return requestResimplify(); if (Options.ForwardSwitchCondToPhi && ForwardSwitchConditionToPHI(SI)) return requestResimplify(); // The conversion from switch to lookup tables results in difficult-to-analyze // code and makes pruning branches much harder. This is a problem if the // switch expression itself can still be restricted as a result of inlining or // CVP. Therefore, only apply this transformation during late stages of the // optimisation pipeline. if (Options.ConvertSwitchToLookupTable && SwitchToLookupTable(SI, Builder, DTU, DL, TTI)) return requestResimplify(); if (ReduceSwitchRange(SI, Builder, DL, TTI)) return requestResimplify(); return false; } bool SimplifyCFGOpt::simplifyIndirectBr(IndirectBrInst *IBI) { BasicBlock *BB = IBI->getParent(); bool Changed = false; // Eliminate redundant destinations. SmallPtrSet Succs; SmallPtrSet RemovedSuccs; for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) { BasicBlock *Dest = IBI->getDestination(i); if (!Dest->hasAddressTaken() || !Succs.insert(Dest).second) { if (!Dest->hasAddressTaken()) RemovedSuccs.insert(Dest); Dest->removePredecessor(BB); IBI->removeDestination(i); --i; --e; Changed = true; } } if (DTU) { std::vector Updates; Updates.reserve(RemovedSuccs.size()); for (auto *RemovedSucc : RemovedSuccs) Updates.push_back({DominatorTree::Delete, BB, RemovedSucc}); DTU->applyUpdates(Updates); } if (IBI->getNumDestinations() == 0) { // If the indirectbr has no successors, change it to unreachable. new UnreachableInst(IBI->getContext(), IBI); EraseTerminatorAndDCECond(IBI); return true; } if (IBI->getNumDestinations() == 1) { // If the indirectbr has one successor, change it to a direct branch. BranchInst::Create(IBI->getDestination(0), IBI); EraseTerminatorAndDCECond(IBI); return true; } if (SelectInst *SI = dyn_cast(IBI->getAddress())) { if (SimplifyIndirectBrOnSelect(IBI, SI)) return requestResimplify(); } return Changed; } /// Given an block with only a single landing pad and a unconditional branch /// try to find another basic block which this one can be merged with. This /// handles cases where we have multiple invokes with unique landing pads, but /// a shared handler. /// /// We specifically choose to not worry about merging non-empty blocks /// here. That is a PRE/scheduling problem and is best solved elsewhere. In /// practice, the optimizer produces empty landing pad blocks quite frequently /// when dealing with exception dense code. (see: instcombine, gvn, if-else /// sinking in this file) /// /// This is primarily a code size optimization. We need to avoid performing /// any transform which might inhibit optimization (such as our ability to /// specialize a particular handler via tail commoning). We do this by not /// merging any blocks which require us to introduce a phi. Since the same /// values are flowing through both blocks, we don't lose any ability to /// specialize. If anything, we make such specialization more likely. /// /// TODO - This transformation could remove entries from a phi in the target /// block when the inputs in the phi are the same for the two blocks being /// merged. In some cases, this could result in removal of the PHI entirely. static bool TryToMergeLandingPad(LandingPadInst *LPad, BranchInst *BI, BasicBlock *BB, DomTreeUpdater *DTU) { auto Succ = BB->getUniqueSuccessor(); assert(Succ); // If there's a phi in the successor block, we'd likely have to introduce // a phi into the merged landing pad block. if (isa(*Succ->begin())) return false; for (BasicBlock *OtherPred : predecessors(Succ)) { if (BB == OtherPred) continue; BasicBlock::iterator I = OtherPred->begin(); LandingPadInst *LPad2 = dyn_cast(I); if (!LPad2 || !LPad2->isIdenticalTo(LPad)) continue; for (++I; isa(I); ++I) ; BranchInst *BI2 = dyn_cast(I); if (!BI2 || !BI2->isIdenticalTo(BI)) continue; std::vector Updates; // We've found an identical block. Update our predecessors to take that // path instead and make ourselves dead. SmallPtrSet Preds(pred_begin(BB), pred_end(BB)); for (BasicBlock *Pred : Preds) { InvokeInst *II = cast(Pred->getTerminator()); assert(II->getNormalDest() != BB && II->getUnwindDest() == BB && "unexpected successor"); II->setUnwindDest(OtherPred); if (DTU) { Updates.push_back({DominatorTree::Insert, Pred, OtherPred}); Updates.push_back({DominatorTree::Delete, Pred, BB}); } } // The debug info in OtherPred doesn't cover the merged control flow that // used to go through BB. We need to delete it or update it. for (auto I = OtherPred->begin(), E = OtherPred->end(); I != E;) { Instruction &Inst = *I; I++; if (isa(Inst)) Inst.eraseFromParent(); } SmallPtrSet Succs(succ_begin(BB), succ_end(BB)); for (BasicBlock *Succ : Succs) { Succ->removePredecessor(BB); if (DTU) Updates.push_back({DominatorTree::Delete, BB, Succ}); } IRBuilder<> Builder(BI); Builder.CreateUnreachable(); BI->eraseFromParent(); if (DTU) DTU->applyUpdates(Updates); return true; } return false; } bool SimplifyCFGOpt::simplifyBranch(BranchInst *Branch, IRBuilder<> &Builder) { return Branch->isUnconditional() ? simplifyUncondBranch(Branch, Builder) : simplifyCondBranch(Branch, Builder); } bool SimplifyCFGOpt::simplifyUncondBranch(BranchInst *BI, IRBuilder<> &Builder) { BasicBlock *BB = BI->getParent(); BasicBlock *Succ = BI->getSuccessor(0); // If the Terminator is the only non-phi instruction, simplify the block. // If LoopHeader is provided, check if the block or its successor is a loop // header. (This is for early invocations before loop simplify and // vectorization to keep canonical loop forms for nested loops. These blocks // can be eliminated when the pass is invoked later in the back-end.) // Note that if BB has only one predecessor then we do not introduce new // backedge, so we can eliminate BB. bool NeedCanonicalLoop = Options.NeedCanonicalLoop && (!LoopHeaders.empty() && BB->hasNPredecessorsOrMore(2) && (is_contained(LoopHeaders, BB) || is_contained(LoopHeaders, Succ))); BasicBlock::iterator I = BB->getFirstNonPHIOrDbg(true)->getIterator(); if (I->isTerminator() && BB != &BB->getParent()->getEntryBlock() && !NeedCanonicalLoop && TryToSimplifyUncondBranchFromEmptyBlock(BB, DTU)) return true; // If the only instruction in the block is a seteq/setne comparison against a // constant, try to simplify the block. if (ICmpInst *ICI = dyn_cast(I)) if (ICI->isEquality() && isa(ICI->getOperand(1))) { for (++I; isa(I); ++I) ; if (I->isTerminator() && tryToSimplifyUncondBranchWithICmpInIt(ICI, Builder)) return true; } // See if we can merge an empty landing pad block with another which is // equivalent. if (LandingPadInst *LPad = dyn_cast(I)) { for (++I; isa(I); ++I) ; if (I->isTerminator() && TryToMergeLandingPad(LPad, BI, BB, DTU)) return true; } // If this basic block is ONLY a compare and a branch, and if a predecessor // branches to us and our successor, fold the comparison into the // predecessor and use logical operations to update the incoming value // for PHI nodes in common successor. if (FoldBranchToCommonDest(BI, DTU, /*MSSAU=*/nullptr, &TTI, Options.BonusInstThreshold)) return requestResimplify(); return false; } static BasicBlock *allPredecessorsComeFromSameSource(BasicBlock *BB) { BasicBlock *PredPred = nullptr; for (auto *P : predecessors(BB)) { BasicBlock *PPred = P->getSinglePredecessor(); if (!PPred || (PredPred && PredPred != PPred)) return nullptr; PredPred = PPred; } return PredPred; } bool SimplifyCFGOpt::simplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) { BasicBlock *BB = BI->getParent(); if (!Options.SimplifyCondBranch) return false; // Conditional branch if (isValueEqualityComparison(BI)) { // If we only have one predecessor, and if it is a branch on this value, // see if that predecessor totally determines the outcome of this // switch. if (BasicBlock *OnlyPred = BB->getSinglePredecessor()) if (SimplifyEqualityComparisonWithOnlyPredecessor(BI, OnlyPred, Builder)) return requestResimplify(); // This block must be empty, except for the setcond inst, if it exists. // Ignore dbg and pseudo intrinsics. auto I = BB->instructionsWithoutDebug(true).begin(); if (&*I == BI) { if (FoldValueComparisonIntoPredecessors(BI, Builder)) return requestResimplify(); } else if (&*I == cast(BI->getCondition())) { ++I; if (&*I == BI && FoldValueComparisonIntoPredecessors(BI, Builder)) return requestResimplify(); } } // Try to turn "br (X == 0 | X == 1), T, F" into a switch instruction. if (SimplifyBranchOnICmpChain(BI, Builder, DL)) return true; // If this basic block has dominating predecessor blocks and the dominating // blocks' conditions imply BI's condition, we know the direction of BI. Optional Imp = isImpliedByDomCondition(BI->getCondition(), BI, DL); if (Imp) { // Turn this into a branch on constant. auto *OldCond = BI->getCondition(); ConstantInt *TorF = *Imp ? ConstantInt::getTrue(BB->getContext()) : ConstantInt::getFalse(BB->getContext()); BI->setCondition(TorF); RecursivelyDeleteTriviallyDeadInstructions(OldCond); return requestResimplify(); } // If this basic block is ONLY a compare and a branch, and if a predecessor // branches to us and one of our successors, fold the comparison into the // predecessor and use logical operations to pick the right destination. if (FoldBranchToCommonDest(BI, DTU, /*MSSAU=*/nullptr, &TTI, Options.BonusInstThreshold)) return requestResimplify(); // We have a conditional branch to two blocks that are only reachable // from BI. We know that the condbr dominates the two blocks, so see if // there is any identical code in the "then" and "else" blocks. If so, we // can hoist it up to the branching block. if (BI->getSuccessor(0)->getSinglePredecessor()) { if (BI->getSuccessor(1)->getSinglePredecessor()) { if (HoistCommon && HoistThenElseCodeToIf(BI, TTI, !Options.HoistCommonInsts)) return requestResimplify(); } else { // If Successor #1 has multiple preds, we may be able to conditionally // execute Successor #0 if it branches to Successor #1. Instruction *Succ0TI = BI->getSuccessor(0)->getTerminator(); if (Succ0TI->getNumSuccessors() == 1 && Succ0TI->getSuccessor(0) == BI->getSuccessor(1)) if (SpeculativelyExecuteBB(BI, BI->getSuccessor(0), TTI)) return requestResimplify(); } } else if (BI->getSuccessor(1)->getSinglePredecessor()) { // If Successor #0 has multiple preds, we may be able to conditionally // execute Successor #1 if it branches to Successor #0. Instruction *Succ1TI = BI->getSuccessor(1)->getTerminator(); if (Succ1TI->getNumSuccessors() == 1 && Succ1TI->getSuccessor(0) == BI->getSuccessor(0)) if (SpeculativelyExecuteBB(BI, BI->getSuccessor(1), TTI)) return requestResimplify(); } // If this is a branch on a phi node in the current block, thread control // through this block if any PHI node entries are constants. if (PHINode *PN = dyn_cast(BI->getCondition())) if (PN->getParent() == BI->getParent()) if (FoldCondBranchOnPHI(BI, DTU, DL, Options.AC)) return requestResimplify(); // Scan predecessor blocks for conditional branches. for (BasicBlock *Pred : predecessors(BB)) if (BranchInst *PBI = dyn_cast(Pred->getTerminator())) if (PBI != BI && PBI->isConditional()) if (SimplifyCondBranchToCondBranch(PBI, BI, DTU, DL, TTI)) return requestResimplify(); // Look for diamond patterns. if (MergeCondStores) if (BasicBlock *PrevBB = allPredecessorsComeFromSameSource(BB)) if (BranchInst *PBI = dyn_cast(PrevBB->getTerminator())) if (PBI != BI && PBI->isConditional()) if (mergeConditionalStores(PBI, BI, DTU, DL, TTI)) return requestResimplify(); return false; } /// Check if passing a value to an instruction will cause undefined behavior. static bool passingValueIsAlwaysUndefined(Value *V, Instruction *I, bool PtrValueMayBeModified) { Constant *C = dyn_cast(V); if (!C) return false; if (I->use_empty()) return false; if (C->isNullValue() || isa(C)) { // Only look at the first use, avoid hurting compile time with long uselists User *Use = *I->user_begin(); // Now make sure that there are no instructions in between that can alter // control flow (eg. calls) for (BasicBlock::iterator i = ++BasicBlock::iterator(I), UI = BasicBlock::iterator(dyn_cast(Use)); i != UI; ++i) { if (i == I->getParent()->end()) return false; if (!isGuaranteedToTransferExecutionToSuccessor(&*i)) return false; } // Look through GEPs. A load from a GEP derived from NULL is still undefined if (GetElementPtrInst *GEP = dyn_cast(Use)) if (GEP->getPointerOperand() == I) { if (!GEP->isInBounds() || !GEP->hasAllZeroIndices()) PtrValueMayBeModified = true; return passingValueIsAlwaysUndefined(V, GEP, PtrValueMayBeModified); } // Look through bitcasts. if (BitCastInst *BC = dyn_cast(Use)) return passingValueIsAlwaysUndefined(V, BC, PtrValueMayBeModified); // Load from null is undefined. if (LoadInst *LI = dyn_cast(Use)) if (!LI->isVolatile()) return !NullPointerIsDefined(LI->getFunction(), LI->getPointerAddressSpace()); // Store to null is undefined. if (StoreInst *SI = dyn_cast(Use)) if (!SI->isVolatile()) return (!NullPointerIsDefined(SI->getFunction(), SI->getPointerAddressSpace())) && SI->getPointerOperand() == I; if (auto *CB = dyn_cast(Use)) { if (C->isNullValue() && NullPointerIsDefined(CB->getFunction())) return false; // A call to null is undefined. if (CB->getCalledOperand() == I) return true; if (C->isNullValue()) { for (const llvm::Use &Arg : CB->args()) if (Arg == I) { unsigned ArgIdx = CB->getArgOperandNo(&Arg); if (CB->isPassingUndefUB(ArgIdx) && CB->paramHasAttr(ArgIdx, Attribute::NonNull)) { // Passing null to a nonnnull+noundef argument is undefined. return !PtrValueMayBeModified; } } } else if (isa(C)) { // Passing undef to a noundef argument is undefined. for (const llvm::Use &Arg : CB->args()) if (Arg == I) { unsigned ArgIdx = CB->getArgOperandNo(&Arg); if (CB->isPassingUndefUB(ArgIdx)) { // Passing undef to a noundef argument is undefined. return true; } } } } } return false; } /// If BB has an incoming value that will always trigger undefined behavior /// (eg. null pointer dereference), remove the branch leading here. static bool removeUndefIntroducingPredecessor(BasicBlock *BB, DomTreeUpdater *DTU) { for (PHINode &PHI : BB->phis()) for (unsigned i = 0, e = PHI.getNumIncomingValues(); i != e; ++i) if (passingValueIsAlwaysUndefined(PHI.getIncomingValue(i), &PHI)) { BasicBlock *Predecessor = PHI.getIncomingBlock(i); Instruction *T = Predecessor->getTerminator(); IRBuilder<> Builder(T); if (BranchInst *BI = dyn_cast(T)) { BB->removePredecessor(Predecessor); // Turn uncoditional branches into unreachables and remove the dead // destination from conditional branches. if (BI->isUnconditional()) Builder.CreateUnreachable(); else Builder.CreateBr(BI->getSuccessor(0) == BB ? BI->getSuccessor(1) : BI->getSuccessor(0)); BI->eraseFromParent(); if (DTU) DTU->applyUpdates({{DominatorTree::Delete, Predecessor, BB}}); return true; } // TODO: SwitchInst. } return false; } bool SimplifyCFGOpt::simplifyOnceImpl(BasicBlock *BB) { bool Changed = false; assert(BB && BB->getParent() && "Block not embedded in function!"); assert(BB->getTerminator() && "Degenerate basic block encountered!"); // Remove basic blocks that have no predecessors (except the entry block)... // or that just have themself as a predecessor. These are unreachable. if ((pred_empty(BB) && BB != &BB->getParent()->getEntryBlock()) || BB->getSinglePredecessor() == BB) { LLVM_DEBUG(dbgs() << "Removing BB: \n" << *BB); DeleteDeadBlock(BB, DTU); return true; } // Check to see if we can constant propagate this terminator instruction // away... Changed |= ConstantFoldTerminator(BB, /*DeleteDeadConditions=*/true, /*TLI=*/nullptr, DTU); // Check for and eliminate duplicate PHI nodes in this block. Changed |= EliminateDuplicatePHINodes(BB); // Check for and remove branches that will always cause undefined behavior. Changed |= removeUndefIntroducingPredecessor(BB, DTU); // Merge basic blocks into their predecessor if there is only one distinct // pred, and if there is only one distinct successor of the predecessor, and // if there are no PHI nodes. if (MergeBlockIntoPredecessor(BB, DTU)) return true; if (SinkCommon && Options.SinkCommonInsts) if (SinkCommonCodeFromPredecessors(BB, DTU)) { // SinkCommonCodeFromPredecessors() does not automatically CSE PHI's, // so we may now how duplicate PHI's. // Let's rerun EliminateDuplicatePHINodes() first, // before FoldTwoEntryPHINode() potentially converts them into select's, // after which we'd need a whole EarlyCSE pass run to cleanup them. return true; } IRBuilder<> Builder(BB); if (Options.FoldTwoEntryPHINode) { // If there is a trivial two-entry PHI node in this basic block, and we can // eliminate it, do so now. if (auto *PN = dyn_cast(BB->begin())) if (PN->getNumIncomingValues() == 2) Changed |= FoldTwoEntryPHINode(PN, TTI, DTU, DL); } Instruction *Terminator = BB->getTerminator(); Builder.SetInsertPoint(Terminator); switch (Terminator->getOpcode()) { case Instruction::Br: Changed |= simplifyBranch(cast(Terminator), Builder); break; case Instruction::Resume: Changed |= simplifyResume(cast(Terminator), Builder); break; case Instruction::CleanupRet: Changed |= simplifyCleanupReturn(cast(Terminator)); break; case Instruction::Switch: Changed |= simplifySwitch(cast(Terminator), Builder); break; case Instruction::Unreachable: Changed |= simplifyUnreachable(cast(Terminator)); break; case Instruction::IndirectBr: Changed |= simplifyIndirectBr(cast(Terminator)); break; } return Changed; } bool SimplifyCFGOpt::simplifyOnce(BasicBlock *BB) { bool Changed = simplifyOnceImpl(BB); return Changed; } bool SimplifyCFGOpt::run(BasicBlock *BB) { bool Changed = false; // Repeated simplify BB as long as resimplification is requested. do { Resimplify = false; // Perform one round of simplifcation. Resimplify flag will be set if // another iteration is requested. Changed |= simplifyOnce(BB); } while (Resimplify); return Changed; } bool llvm::simplifyCFG(BasicBlock *BB, const TargetTransformInfo &TTI, DomTreeUpdater *DTU, const SimplifyCFGOptions &Options, ArrayRef LoopHeaders) { return SimplifyCFGOpt(TTI, DTU, BB->getModule()->getDataLayout(), LoopHeaders, Options) .run(BB); } diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index 52b5ae083d0e..c05a8408e1fd 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -1,263 +1,266 @@ //===-- VPlanTransforms.cpp - Utility VPlan to VPlan transforms -----------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// /// \file /// This file implements a set of utility VPlan to VPlan transformations. /// //===----------------------------------------------------------------------===// #include "VPlanTransforms.h" #include "llvm/ADT/PostOrderIterator.h" using namespace llvm; void VPlanTransforms::VPInstructionsToVPRecipes( Loop *OrigLoop, VPlanPtr &Plan, LoopVectorizationLegality::InductionList &Inductions, SmallPtrSetImpl &DeadInstructions, ScalarEvolution &SE) { auto *TopRegion = cast(Plan->getEntry()); ReversePostOrderTraversal RPOT(TopRegion->getEntry()); for (VPBlockBase *Base : RPOT) { // Do not widen instructions in pre-header and exit blocks. if (Base->getNumPredecessors() == 0 || Base->getNumSuccessors() == 0) continue; VPBasicBlock *VPBB = Base->getEntryBasicBlock(); // Introduce each ingredient into VPlan. for (auto I = VPBB->begin(), E = VPBB->end(); I != E;) { VPRecipeBase *Ingredient = &*I++; VPValue *VPV = Ingredient->getVPSingleValue(); Instruction *Inst = cast(VPV->getUnderlyingValue()); if (DeadInstructions.count(Inst)) { VPValue DummyValue; VPV->replaceAllUsesWith(&DummyValue); Ingredient->eraseFromParent(); continue; } VPRecipeBase *NewRecipe = nullptr; if (auto *VPPhi = dyn_cast(Ingredient)) { auto *Phi = cast(VPPhi->getUnderlyingValue()); InductionDescriptor II = Inductions.lookup(Phi); if (II.getKind() == InductionDescriptor::IK_IntInduction || II.getKind() == InductionDescriptor::IK_FpInduction) { VPValue *Start = Plan->getOrAddVPValue(II.getStartValue()); NewRecipe = new VPWidenIntOrFpInductionRecipe(Phi, Start, nullptr); } else { Plan->addVPValue(Phi, VPPhi); continue; } } else { assert(isa(Ingredient) && "only VPInstructions expected here"); assert(!isa(Inst) && "phis should be handled above"); // Create VPWidenMemoryInstructionRecipe for loads and stores. if (LoadInst *Load = dyn_cast(Inst)) { NewRecipe = new VPWidenMemoryInstructionRecipe( *Load, Plan->getOrAddVPValue(getLoadStorePointerOperand(Inst)), nullptr /*Mask*/); } else if (StoreInst *Store = dyn_cast(Inst)) { NewRecipe = new VPWidenMemoryInstructionRecipe( *Store, Plan->getOrAddVPValue(getLoadStorePointerOperand(Inst)), Plan->getOrAddVPValue(Store->getValueOperand()), nullptr /*Mask*/); } else if (GetElementPtrInst *GEP = dyn_cast(Inst)) { NewRecipe = new VPWidenGEPRecipe( GEP, Plan->mapToVPValues(GEP->operands()), OrigLoop); } else if (CallInst *CI = dyn_cast(Inst)) { NewRecipe = new VPWidenCallRecipe( *CI, Plan->mapToVPValues(CI->arg_operands())); } else if (SelectInst *SI = dyn_cast(Inst)) { bool InvariantCond = SE.isLoopInvariant(SE.getSCEV(SI->getOperand(0)), OrigLoop); NewRecipe = new VPWidenSelectRecipe( *SI, Plan->mapToVPValues(SI->operands()), InvariantCond); } else { NewRecipe = new VPWidenRecipe(*Inst, Plan->mapToVPValues(Inst->operands())); } } NewRecipe->insertBefore(Ingredient); if (NewRecipe->getNumDefinedValues() == 1) VPV->replaceAllUsesWith(NewRecipe->getVPSingleValue()); else assert(NewRecipe->getNumDefinedValues() == 0 && "Only recpies with zero or one defined values expected"); Ingredient->eraseFromParent(); Plan->removeVPValueFor(Inst); for (auto *Def : NewRecipe->definedValues()) { Plan->addVPValue(Inst, Def); } } } } bool VPlanTransforms::sinkScalarOperands(VPlan &Plan) { auto Iter = depth_first( VPBlockRecursiveTraversalWrapper(Plan.getEntry())); bool Changed = false; // First, collect the operands of all predicated replicate recipes as seeds // for sinking. SetVector WorkList; for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly(Iter)) { for (auto &Recipe : *VPBB) { auto *RepR = dyn_cast(&Recipe); if (!RepR || !RepR->isPredicated()) continue; WorkList.insert(RepR->op_begin(), RepR->op_end()); } } // Try to sink each replicate recipe in the worklist. while (!WorkList.empty()) { auto *C = WorkList.pop_back_val(); auto *SinkCandidate = dyn_cast_or_null(C->Def); if (!SinkCandidate || SinkCandidate->isUniform()) continue; // All users of SinkCandidate must be in the same block in order to perform // sinking. Therefore the destination block for sinking must match the block // containing the first user. auto *FirstUser = dyn_cast(*SinkCandidate->user_begin()); if (!FirstUser) continue; VPBasicBlock *SinkTo = FirstUser->getParent(); if (SinkCandidate->getParent() == SinkTo || SinkCandidate->mayHaveSideEffects() || SinkCandidate->mayReadOrWriteMemory()) continue; // All recipe users of the sink candidate must be in the same block SinkTo. if (any_of(SinkCandidate->users(), [SinkTo](VPUser *U) { auto *UI = dyn_cast(U); return !UI || UI->getParent() != SinkTo; })) continue; SinkCandidate->moveBefore(*SinkTo, SinkTo->getFirstNonPhi()); WorkList.insert(SinkCandidate->op_begin(), SinkCandidate->op_end()); Changed = true; } return Changed; } /// If \p R is a region with a VPBranchOnMaskRecipe in the entry block, return /// the mask. VPValue *getPredicatedMask(VPRegionBlock *R) { auto *EntryBB = dyn_cast(R->getEntry()); if (!EntryBB || EntryBB->size() != 1 || !isa(EntryBB->begin())) return nullptr; return cast(&*EntryBB->begin())->getOperand(0); } /// If \p R is a triangle region, return the 'then' block of the triangle. static VPBasicBlock *getPredicatedThenBlock(VPRegionBlock *R) { auto *EntryBB = cast(R->getEntry()); if (EntryBB->getNumSuccessors() != 2) return nullptr; auto *Succ0 = dyn_cast(EntryBB->getSuccessors()[0]); auto *Succ1 = dyn_cast(EntryBB->getSuccessors()[1]); if (!Succ0 || !Succ1) return nullptr; if (Succ0->getNumSuccessors() + Succ1->getNumSuccessors() != 1) return nullptr; if (Succ0->getSingleSuccessor() == Succ1) return Succ0; if (Succ1->getSingleSuccessor() == Succ0) return Succ1; return nullptr; } bool VPlanTransforms::mergeReplicateRegions(VPlan &Plan) { SetVector DeletedRegions; bool Changed = false; // Collect region blocks to process up-front, to avoid iterator invalidation // issues while merging regions. SmallVector CandidateRegions( VPBlockUtils::blocksOnly(depth_first( VPBlockRecursiveTraversalWrapper(Plan.getEntry())))); // Check if Base is a predicated triangle, followed by an empty block, // followed by another predicate triangle. If that's the case, move the // recipes from the first to the second triangle. for (VPRegionBlock *Region1 : CandidateRegions) { if (DeletedRegions.contains(Region1)) continue; auto *MiddleBasicBlock = dyn_cast_or_null(Region1->getSingleSuccessor()); if (!MiddleBasicBlock || !MiddleBasicBlock->empty()) continue; auto *Region2 = dyn_cast_or_null(MiddleBasicBlock->getSingleSuccessor()); if (!Region2) continue; VPValue *Mask1 = getPredicatedMask(Region1); VPValue *Mask2 = getPredicatedMask(Region2); if (!Mask1 || Mask1 != Mask2) continue; VPBasicBlock *Then1 = getPredicatedThenBlock(Region1); VPBasicBlock *Then2 = getPredicatedThenBlock(Region2); if (!Then1 || !Then2) continue; assert(Mask1 && Mask2 && "both region must have conditions"); // Note: No fusion-preventing memory dependencies are expected in either // region. Such dependencies should be rejected during earlier dependence // checks, which guarantee accesses can be re-ordered for vectorization. // // Move recipes to the successor region. for (VPRecipeBase &ToMove : make_early_inc_range(reverse(*Then1))) ToMove.moveBefore(*Then2, Then2->getFirstNonPhi()); auto *Merge1 = cast(Then1->getSingleSuccessor()); auto *Merge2 = cast(Then2->getSingleSuccessor()); // Move VPPredInstPHIRecipes from the merge block to the successor region's // merge block. Update all users inside the successor region to use the // original values. for (VPRecipeBase &Phi1ToMove : make_early_inc_range(reverse(*Merge1))) { VPValue *PredInst1 = cast(&Phi1ToMove)->getOperand(0); - for (VPUser *U : Phi1ToMove.getVPSingleValue()->users()) { + VPValue *Phi1ToMoveV = Phi1ToMove.getVPSingleValue(); + SmallVector Users(Phi1ToMoveV->user_begin(), + Phi1ToMoveV->user_end()); + for (VPUser *U : Users) { auto *UI = dyn_cast(U); if (!UI || UI->getParent() != Then2) continue; for (unsigned I = 0, E = U->getNumOperands(); I != E; ++I) { - if (Phi1ToMove.getVPSingleValue() != U->getOperand(I)) + if (Phi1ToMoveV != U->getOperand(I)) continue; U->setOperand(I, PredInst1); } } Phi1ToMove.moveBefore(*Merge2, Merge2->begin()); } // Finally, remove the first region. for (VPBlockBase *Pred : make_early_inc_range(Region1->getPredecessors())) { VPBlockUtils::disconnectBlocks(Pred, Region1); VPBlockUtils::connectBlocks(Pred, MiddleBasicBlock); } VPBlockUtils::disconnectBlocks(Region1, MiddleBasicBlock); DeletedRegions.insert(Region1); } for (VPRegionBlock *ToDelete : DeletedRegions) delete ToDelete; return Changed; }