Index: head/contrib/compiler-rt =================================================================== --- head/contrib/compiler-rt (revision 315015) +++ head/contrib/compiler-rt (revision 315016) Property changes on: head/contrib/compiler-rt ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /vendor/compiler-rt/dist:r314418-314984 Index: head/contrib/libc++ =================================================================== --- head/contrib/libc++ (revision 315015) +++ head/contrib/libc++ (revision 315016) Property changes on: head/contrib/libc++ ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /vendor/libc++/dist:r314418-314984 Index: head/contrib/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp =================================================================== --- head/contrib/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp (revision 315015) +++ head/contrib/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp (revision 315016) @@ -1,958 +1,958 @@ //===- PassManagerBuilder.cpp - Build Standard Pass -----------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the PassManagerBuilder class, which is used to set up a // "standard" optimization sequence suitable for languages like C and C++. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/IPO/PassManagerBuilder.h" #include "llvm-c/Transforms/PassManagerBuilder.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Analysis/BasicAliasAnalysis.h" #include "llvm/Analysis/CFLAndersAliasAnalysis.h" #include "llvm/Analysis/CFLSteensAliasAnalysis.h" #include "llvm/Analysis/GlobalsModRef.h" #include "llvm/Analysis/InlineCost.h" #include "llvm/Analysis/Passes.h" #include "llvm/Analysis/ScopedNoAliasAA.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/TypeBasedAliasAnalysis.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/LegacyPassManager.h" #include "llvm/IR/ModuleSummaryIndex.h" #include "llvm/IR/Verifier.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/ManagedStatic.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Transforms/IPO.h" #include "llvm/Transforms/IPO/ForceFunctionAttrs.h" #include "llvm/Transforms/IPO/FunctionAttrs.h" #include "llvm/Transforms/IPO/InferFunctionAttrs.h" #include "llvm/Transforms/Instrumentation.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Scalar/GVN.h" #include "llvm/Transforms/Vectorize.h" using namespace llvm; static cl::opt RunLoopVectorization("vectorize-loops", cl::Hidden, cl::desc("Run the Loop vectorization passes")); static cl::opt RunSLPVectorization("vectorize-slp", cl::Hidden, cl::desc("Run the SLP vectorization passes")); static cl::opt RunBBVectorization("vectorize-slp-aggressive", cl::Hidden, cl::desc("Run the BB vectorization passes")); static cl::opt UseGVNAfterVectorization("use-gvn-after-vectorization", cl::init(false), cl::Hidden, cl::desc("Run GVN instead of Early CSE after vectorization passes")); static cl::opt ExtraVectorizerPasses( "extra-vectorizer-passes", cl::init(false), cl::Hidden, cl::desc("Run cleanup optimization passes after vectorization.")); static cl::opt RunLoopRerolling("reroll-loops", cl::Hidden, cl::desc("Run the loop rerolling pass")); static cl::opt RunLoadCombine("combine-loads", cl::init(false), cl::Hidden, cl::desc("Run the load combining pass")); static cl::opt RunNewGVN("enable-newgvn", cl::init(false), cl::Hidden, cl::desc("Run the NewGVN pass")); static cl::opt RunSLPAfterLoopVectorization("run-slp-after-loop-vectorization", cl::init(true), cl::Hidden, cl::desc("Run the SLP vectorizer (and BB vectorizer) after the Loop " "vectorizer instead of before")); // Experimental option to use CFL-AA enum class CFLAAType { None, Steensgaard, Andersen, Both }; static cl::opt UseCFLAA("use-cfl-aa", cl::init(CFLAAType::None), cl::Hidden, cl::desc("Enable the new, experimental CFL alias analysis"), cl::values(clEnumValN(CFLAAType::None, "none", "Disable CFL-AA"), clEnumValN(CFLAAType::Steensgaard, "steens", "Enable unification-based CFL-AA"), clEnumValN(CFLAAType::Andersen, "anders", "Enable inclusion-based CFL-AA"), clEnumValN(CFLAAType::Both, "both", "Enable both variants of CFL-AA"))); static cl::opt EnableMLSM("mlsm", cl::init(true), cl::Hidden, cl::desc("Enable motion of merged load and store")); static cl::opt EnableLoopInterchange( "enable-loopinterchange", cl::init(false), cl::Hidden, cl::desc("Enable the new, experimental LoopInterchange Pass")); static cl::opt EnableNonLTOGlobalsModRef( "enable-non-lto-gmr", cl::init(true), cl::Hidden, cl::desc( "Enable the GlobalsModRef AliasAnalysis outside of the LTO pipeline.")); static cl::opt EnableLoopLoadElim( "enable-loop-load-elim", cl::init(true), cl::Hidden, cl::desc("Enable the LoopLoadElimination Pass")); static cl::opt EnablePrepareForThinLTO("prepare-for-thinlto", cl::init(false), cl::Hidden, cl::desc("Enable preparation for ThinLTO.")); static cl::opt RunPGOInstrGen( "profile-generate", cl::init(false), cl::Hidden, cl::desc("Enable PGO instrumentation.")); static cl::opt PGOOutputFile("profile-generate-file", cl::init(""), cl::Hidden, cl::desc("Specify the path of profile data file.")); static cl::opt RunPGOInstrUse( "profile-use", cl::init(""), cl::Hidden, cl::value_desc("filename"), cl::desc("Enable use phase of PGO instrumentation and specify the path " "of profile data file")); static cl::opt UseLoopVersioningLICM( "enable-loop-versioning-licm", cl::init(false), cl::Hidden, cl::desc("Enable the experimental Loop Versioning LICM pass")); static cl::opt DisablePreInliner("disable-preinline", cl::init(false), cl::Hidden, cl::desc("Disable pre-instrumentation inliner")); static cl::opt PreInlineThreshold( "preinline-threshold", cl::Hidden, cl::init(75), cl::ZeroOrMore, cl::desc("Control the amount of inlining in pre-instrumentation inliner " "(default = 75)")); static cl::opt EnableGVNHoist( - "enable-gvn-hoist", cl::init(true), cl::Hidden, - cl::desc("Enable the GVN hoisting pass (default = on)")); + "enable-gvn-hoist", cl::init(false), cl::Hidden, + cl::desc("Enable the GVN hoisting pass")); static cl::opt DisableLibCallsShrinkWrap("disable-libcalls-shrinkwrap", cl::init(false), cl::Hidden, cl::desc("Disable shrink-wrap library calls")); PassManagerBuilder::PassManagerBuilder() { OptLevel = 2; SizeLevel = 0; LibraryInfo = nullptr; Inliner = nullptr; DisableUnitAtATime = false; DisableUnrollLoops = false; BBVectorize = RunBBVectorization; SLPVectorize = RunSLPVectorization; LoopVectorize = RunLoopVectorization; RerollLoops = RunLoopRerolling; LoadCombine = RunLoadCombine; NewGVN = RunNewGVN; DisableGVNLoadPRE = false; VerifyInput = false; VerifyOutput = false; MergeFunctions = false; PrepareForLTO = false; EnablePGOInstrGen = RunPGOInstrGen; PGOInstrGen = PGOOutputFile; PGOInstrUse = RunPGOInstrUse; PrepareForThinLTO = EnablePrepareForThinLTO; PerformThinLTO = false; } PassManagerBuilder::~PassManagerBuilder() { delete LibraryInfo; delete Inliner; } /// Set of global extensions, automatically added as part of the standard set. static ManagedStatic, 8> > GlobalExtensions; void PassManagerBuilder::addGlobalExtension( PassManagerBuilder::ExtensionPointTy Ty, PassManagerBuilder::ExtensionFn Fn) { GlobalExtensions->push_back(std::make_pair(Ty, std::move(Fn))); } void PassManagerBuilder::addExtension(ExtensionPointTy Ty, ExtensionFn Fn) { Extensions.push_back(std::make_pair(Ty, std::move(Fn))); } void PassManagerBuilder::addExtensionsToPM(ExtensionPointTy ETy, legacy::PassManagerBase &PM) const { for (unsigned i = 0, e = GlobalExtensions->size(); i != e; ++i) if ((*GlobalExtensions)[i].first == ETy) (*GlobalExtensions)[i].second(*this, PM); for (unsigned i = 0, e = Extensions.size(); i != e; ++i) if (Extensions[i].first == ETy) Extensions[i].second(*this, PM); } void PassManagerBuilder::addInitialAliasAnalysisPasses( legacy::PassManagerBase &PM) const { switch (UseCFLAA) { case CFLAAType::Steensgaard: PM.add(createCFLSteensAAWrapperPass()); break; case CFLAAType::Andersen: PM.add(createCFLAndersAAWrapperPass()); break; case CFLAAType::Both: PM.add(createCFLSteensAAWrapperPass()); PM.add(createCFLAndersAAWrapperPass()); break; default: break; } // Add TypeBasedAliasAnalysis before BasicAliasAnalysis so that // BasicAliasAnalysis wins if they disagree. This is intended to help // support "obvious" type-punning idioms. PM.add(createTypeBasedAAWrapperPass()); PM.add(createScopedNoAliasAAWrapperPass()); } void PassManagerBuilder::addInstructionCombiningPass( legacy::PassManagerBase &PM) const { bool ExpensiveCombines = OptLevel > 2; PM.add(createInstructionCombiningPass(ExpensiveCombines)); } void PassManagerBuilder::populateFunctionPassManager( legacy::FunctionPassManager &FPM) { addExtensionsToPM(EP_EarlyAsPossible, FPM); // Add LibraryInfo if we have some. if (LibraryInfo) FPM.add(new TargetLibraryInfoWrapperPass(*LibraryInfo)); if (OptLevel == 0) return; addInitialAliasAnalysisPasses(FPM); FPM.add(createCFGSimplificationPass()); FPM.add(createSROAPass()); FPM.add(createEarlyCSEPass()); if(EnableGVNHoist) FPM.add(createGVNHoistPass()); FPM.add(createLowerExpectIntrinsicPass()); } // Do PGO instrumentation generation or use pass as the option specified. void PassManagerBuilder::addPGOInstrPasses(legacy::PassManagerBase &MPM) { if (!EnablePGOInstrGen && PGOInstrUse.empty()) return; // Perform the preinline and cleanup passes for O1 and above. // And avoid doing them if optimizing for size. if (OptLevel > 0 && SizeLevel == 0 && !DisablePreInliner) { // Create preinline pass. We construct an InlineParams object and specify // the threshold here to avoid the command line options of the regular // inliner to influence pre-inlining. The only fields of InlineParams we // care about are DefaultThreshold and HintThreshold. InlineParams IP; IP.DefaultThreshold = PreInlineThreshold; // FIXME: The hint threshold has the same value used by the regular inliner. // This should probably be lowered after performance testing. IP.HintThreshold = 325; MPM.add(createFunctionInliningPass(IP)); MPM.add(createSROAPass()); MPM.add(createEarlyCSEPass()); // Catch trivial redundancies MPM.add(createCFGSimplificationPass()); // Merge & remove BBs MPM.add(createInstructionCombiningPass()); // Combine silly seq's addExtensionsToPM(EP_Peephole, MPM); } if (EnablePGOInstrGen) { MPM.add(createPGOInstrumentationGenLegacyPass()); // Add the profile lowering pass. InstrProfOptions Options; if (!PGOInstrGen.empty()) Options.InstrProfileOutput = PGOInstrGen; MPM.add(createInstrProfilingLegacyPass(Options)); } if (!PGOInstrUse.empty()) MPM.add(createPGOInstrumentationUseLegacyPass(PGOInstrUse)); } void PassManagerBuilder::addFunctionSimplificationPasses( legacy::PassManagerBase &MPM) { // Start of function pass. // Break up aggregate allocas, using SSAUpdater. MPM.add(createSROAPass()); MPM.add(createEarlyCSEPass()); // Catch trivial redundancies // Speculative execution if the target has divergent branches; otherwise nop. MPM.add(createSpeculativeExecutionIfHasBranchDivergencePass()); MPM.add(createJumpThreadingPass()); // Thread jumps. MPM.add(createCorrelatedValuePropagationPass()); // Propagate conditionals MPM.add(createCFGSimplificationPass()); // Merge & remove BBs // Combine silly seq's addInstructionCombiningPass(MPM); if (SizeLevel == 0 && !DisableLibCallsShrinkWrap) MPM.add(createLibCallsShrinkWrapPass()); addExtensionsToPM(EP_Peephole, MPM); MPM.add(createTailCallEliminationPass()); // Eliminate tail calls MPM.add(createCFGSimplificationPass()); // Merge & remove BBs MPM.add(createReassociatePass()); // Reassociate expressions // Rotate Loop - disable header duplication at -Oz MPM.add(createLoopRotatePass(SizeLevel == 2 ? 0 : -1)); MPM.add(createLICMPass()); // Hoist loop invariants MPM.add(createLoopUnswitchPass(SizeLevel || OptLevel < 3)); MPM.add(createCFGSimplificationPass()); addInstructionCombiningPass(MPM); MPM.add(createIndVarSimplifyPass()); // Canonicalize indvars MPM.add(createLoopIdiomPass()); // Recognize idioms like memset. MPM.add(createLoopDeletionPass()); // Delete dead loops if (EnableLoopInterchange) { MPM.add(createLoopInterchangePass()); // Interchange loops MPM.add(createCFGSimplificationPass()); } if (!DisableUnrollLoops) MPM.add(createSimpleLoopUnrollPass()); // Unroll small loops addExtensionsToPM(EP_LoopOptimizerEnd, MPM); if (OptLevel > 1) { if (EnableMLSM) MPM.add(createMergedLoadStoreMotionPass()); // Merge ld/st in diamonds MPM.add(NewGVN ? createNewGVNPass() : createGVNPass(DisableGVNLoadPRE)); // Remove redundancies } MPM.add(createMemCpyOptPass()); // Remove memcpy / form memset MPM.add(createSCCPPass()); // Constant prop with SCCP // Delete dead bit computations (instcombine runs after to fold away the dead // computations, and then ADCE will run later to exploit any new DCE // opportunities that creates). MPM.add(createBitTrackingDCEPass()); // Delete dead bit computations // Run instcombine after redundancy elimination to exploit opportunities // opened up by them. addInstructionCombiningPass(MPM); addExtensionsToPM(EP_Peephole, MPM); MPM.add(createJumpThreadingPass()); // Thread jumps MPM.add(createCorrelatedValuePropagationPass()); MPM.add(createDeadStoreEliminationPass()); // Delete dead stores MPM.add(createLICMPass()); addExtensionsToPM(EP_ScalarOptimizerLate, MPM); if (RerollLoops) MPM.add(createLoopRerollPass()); if (!RunSLPAfterLoopVectorization) { if (SLPVectorize) MPM.add(createSLPVectorizerPass()); // Vectorize parallel scalar chains. if (BBVectorize) { MPM.add(createBBVectorizePass()); addInstructionCombiningPass(MPM); addExtensionsToPM(EP_Peephole, MPM); if (OptLevel > 1 && UseGVNAfterVectorization) MPM.add(NewGVN ? createNewGVNPass() : createGVNPass(DisableGVNLoadPRE)); // Remove redundancies else MPM.add(createEarlyCSEPass()); // Catch trivial redundancies // BBVectorize may have significantly shortened a loop body; unroll again. if (!DisableUnrollLoops) MPM.add(createLoopUnrollPass()); } } if (LoadCombine) MPM.add(createLoadCombinePass()); MPM.add(createAggressiveDCEPass()); // Delete dead instructions MPM.add(createCFGSimplificationPass()); // Merge & remove BBs // Clean up after everything. addInstructionCombiningPass(MPM); addExtensionsToPM(EP_Peephole, MPM); } void PassManagerBuilder::populateModulePassManager( legacy::PassManagerBase &MPM) { if (!PGOSampleUse.empty()) { MPM.add(createPruneEHPass()); MPM.add(createSampleProfileLoaderPass(PGOSampleUse)); } // Allow forcing function attributes as a debugging and tuning aid. MPM.add(createForceFunctionAttrsLegacyPass()); // If all optimizations are disabled, just run the always-inline pass and, // if enabled, the function merging pass. if (OptLevel == 0) { addPGOInstrPasses(MPM); if (Inliner) { MPM.add(Inliner); Inliner = nullptr; } // FIXME: The BarrierNoopPass is a HACK! The inliner pass above implicitly // creates a CGSCC pass manager, but we don't want to add extensions into // that pass manager. To prevent this we insert a no-op module pass to reset // the pass manager to get the same behavior as EP_OptimizerLast in non-O0 // builds. The function merging pass is if (MergeFunctions) MPM.add(createMergeFunctionsPass()); else if (!GlobalExtensions->empty() || !Extensions.empty()) MPM.add(createBarrierNoopPass()); if (PrepareForThinLTO) // Rename anon globals to be able to export them in the summary. MPM.add(createNameAnonGlobalPass()); addExtensionsToPM(EP_EnabledOnOptLevel0, MPM); return; } // Add LibraryInfo if we have some. if (LibraryInfo) MPM.add(new TargetLibraryInfoWrapperPass(*LibraryInfo)); addInitialAliasAnalysisPasses(MPM); // For ThinLTO there are two passes of indirect call promotion. The // first is during the compile phase when PerformThinLTO=false and // intra-module indirect call targets are promoted. The second is during // the ThinLTO backend when PerformThinLTO=true, when we promote imported // inter-module indirect calls. For that we perform indirect call promotion // earlier in the pass pipeline, here before globalopt. Otherwise imported // available_externally functions look unreferenced and are removed. if (PerformThinLTO) MPM.add(createPGOIndirectCallPromotionLegacyPass(/*InLTO = */ true)); if (!DisableUnitAtATime) { // Infer attributes about declarations if possible. MPM.add(createInferFunctionAttrsLegacyPass()); addExtensionsToPM(EP_ModuleOptimizerEarly, MPM); MPM.add(createIPSCCPPass()); // IP SCCP MPM.add(createGlobalOptimizerPass()); // Optimize out global vars // Promote any localized global vars. MPM.add(createPromoteMemoryToRegisterPass()); MPM.add(createDeadArgEliminationPass()); // Dead argument elimination addInstructionCombiningPass(MPM); // Clean up after IPCP & DAE addExtensionsToPM(EP_Peephole, MPM); MPM.add(createCFGSimplificationPass()); // Clean up after IPCP & DAE } if (!PerformThinLTO) { /// PGO instrumentation is added during the compile phase for ThinLTO, do /// not run it a second time addPGOInstrPasses(MPM); // Indirect call promotion that promotes intra-module targets only. // For ThinLTO this is done earlier due to interactions with globalopt // for imported functions. MPM.add(createPGOIndirectCallPromotionLegacyPass()); } if (EnableNonLTOGlobalsModRef) // We add a module alias analysis pass here. In part due to bugs in the // analysis infrastructure this "works" in that the analysis stays alive // for the entire SCC pass run below. MPM.add(createGlobalsAAWrapperPass()); // Start of CallGraph SCC passes. if (!DisableUnitAtATime) MPM.add(createPruneEHPass()); // Remove dead EH info if (Inliner) { MPM.add(Inliner); Inliner = nullptr; } if (!DisableUnitAtATime) MPM.add(createPostOrderFunctionAttrsLegacyPass()); if (OptLevel > 2) MPM.add(createArgumentPromotionPass()); // Scalarize uninlined fn args addExtensionsToPM(EP_CGSCCOptimizerLate, MPM); addFunctionSimplificationPasses(MPM); // FIXME: This is a HACK! The inliner pass above implicitly creates a CGSCC // pass manager that we are specifically trying to avoid. To prevent this // we must insert a no-op module pass to reset the pass manager. MPM.add(createBarrierNoopPass()); if (!DisableUnitAtATime && OptLevel > 1 && !PrepareForLTO && !PrepareForThinLTO) // Remove avail extern fns and globals definitions if we aren't // compiling an object file for later LTO. For LTO we want to preserve // these so they are eligible for inlining at link-time. Note if they // are unreferenced they will be removed by GlobalDCE later, so // this only impacts referenced available externally globals. // Eventually they will be suppressed during codegen, but eliminating // here enables more opportunity for GlobalDCE as it may make // globals referenced by available external functions dead // and saves running remaining passes on the eliminated functions. MPM.add(createEliminateAvailableExternallyPass()); if (!DisableUnitAtATime) MPM.add(createReversePostOrderFunctionAttrsPass()); // If we are planning to perform ThinLTO later, let's not bloat the code with // unrolling/vectorization/... now. We'll first run the inliner + CGSCC passes // during ThinLTO and perform the rest of the optimizations afterward. if (PrepareForThinLTO) { // Reduce the size of the IR as much as possible. MPM.add(createGlobalOptimizerPass()); // Rename anon globals to be able to export them in the summary. MPM.add(createNameAnonGlobalPass()); return; } if (PerformThinLTO) // Optimize globals now when performing ThinLTO, this enables more // optimizations later. MPM.add(createGlobalOptimizerPass()); // Scheduling LoopVersioningLICM when inlining is over, because after that // we may see more accurate aliasing. Reason to run this late is that too // early versioning may prevent further inlining due to increase of code // size. By placing it just after inlining other optimizations which runs // later might get benefit of no-alias assumption in clone loop. if (UseLoopVersioningLICM) { MPM.add(createLoopVersioningLICMPass()); // Do LoopVersioningLICM MPM.add(createLICMPass()); // Hoist loop invariants } if (EnableNonLTOGlobalsModRef) // We add a fresh GlobalsModRef run at this point. This is particularly // useful as the above will have inlined, DCE'ed, and function-attr // propagated everything. We should at this point have a reasonably minimal // and richly annotated call graph. By computing aliasing and mod/ref // information for all local globals here, the late loop passes and notably // the vectorizer will be able to use them to help recognize vectorizable // memory operations. // // Note that this relies on a bug in the pass manager which preserves // a module analysis into a function pass pipeline (and throughout it) so // long as the first function pass doesn't invalidate the module analysis. // Thus both Float2Int and LoopRotate have to preserve AliasAnalysis for // this to work. Fortunately, it is trivial to preserve AliasAnalysis // (doing nothing preserves it as it is required to be conservatively // correct in the face of IR changes). MPM.add(createGlobalsAAWrapperPass()); MPM.add(createFloat2IntPass()); addExtensionsToPM(EP_VectorizerStart, MPM); // Re-rotate loops in all our loop nests. These may have fallout out of // rotated form due to GVN or other transformations, and the vectorizer relies // on the rotated form. Disable header duplication at -Oz. MPM.add(createLoopRotatePass(SizeLevel == 2 ? 0 : -1)); // Distribute loops to allow partial vectorization. I.e. isolate dependences // into separate loop that would otherwise inhibit vectorization. This is // currently only performed for loops marked with the metadata // llvm.loop.distribute=true or when -enable-loop-distribute is specified. MPM.add(createLoopDistributePass()); MPM.add(createLoopVectorizePass(DisableUnrollLoops, LoopVectorize)); // Eliminate loads by forwarding stores from the previous iteration to loads // of the current iteration. if (EnableLoopLoadElim) MPM.add(createLoopLoadEliminationPass()); // FIXME: Because of #pragma vectorize enable, the passes below are always // inserted in the pipeline, even when the vectorizer doesn't run (ex. when // on -O1 and no #pragma is found). Would be good to have these two passes // as function calls, so that we can only pass them when the vectorizer // changed the code. addInstructionCombiningPass(MPM); if (OptLevel > 1 && ExtraVectorizerPasses) { // At higher optimization levels, try to clean up any runtime overlap and // alignment checks inserted by the vectorizer. We want to track correllated // runtime checks for two inner loops in the same outer loop, fold any // common computations, hoist loop-invariant aspects out of any outer loop, // and unswitch the runtime checks if possible. Once hoisted, we may have // dead (or speculatable) control flows or more combining opportunities. MPM.add(createEarlyCSEPass()); MPM.add(createCorrelatedValuePropagationPass()); addInstructionCombiningPass(MPM); MPM.add(createLICMPass()); MPM.add(createLoopUnswitchPass(SizeLevel || OptLevel < 3)); MPM.add(createCFGSimplificationPass()); addInstructionCombiningPass(MPM); } if (RunSLPAfterLoopVectorization) { if (SLPVectorize) { MPM.add(createSLPVectorizerPass()); // Vectorize parallel scalar chains. if (OptLevel > 1 && ExtraVectorizerPasses) { MPM.add(createEarlyCSEPass()); } } if (BBVectorize) { MPM.add(createBBVectorizePass()); addInstructionCombiningPass(MPM); addExtensionsToPM(EP_Peephole, MPM); if (OptLevel > 1 && UseGVNAfterVectorization) MPM.add(NewGVN ? createNewGVNPass() : createGVNPass(DisableGVNLoadPRE)); // Remove redundancies else MPM.add(createEarlyCSEPass()); // Catch trivial redundancies // BBVectorize may have significantly shortened a loop body; unroll again. if (!DisableUnrollLoops) MPM.add(createLoopUnrollPass()); } } addExtensionsToPM(EP_Peephole, MPM); MPM.add(createCFGSimplificationPass()); addInstructionCombiningPass(MPM); if (!DisableUnrollLoops) { MPM.add(createLoopUnrollPass()); // Unroll small loops // LoopUnroll may generate some redundency to cleanup. addInstructionCombiningPass(MPM); // Runtime unrolling will introduce runtime check in loop prologue. If the // unrolled loop is a inner loop, then the prologue will be inside the // outer loop. LICM pass can help to promote the runtime check out if the // checked value is loop invariant. MPM.add(createLICMPass()); } // After vectorization and unrolling, assume intrinsics may tell us more // about pointer alignments. MPM.add(createAlignmentFromAssumptionsPass()); if (!DisableUnitAtATime) { // FIXME: We shouldn't bother with this anymore. MPM.add(createStripDeadPrototypesPass()); // Get rid of dead prototypes // GlobalOpt already deletes dead functions and globals, at -O2 try a // late pass of GlobalDCE. It is capable of deleting dead cycles. if (OptLevel > 1) { MPM.add(createGlobalDCEPass()); // Remove dead fns and globals. MPM.add(createConstantMergePass()); // Merge dup global constants } } if (MergeFunctions) MPM.add(createMergeFunctionsPass()); // LoopSink pass sinks instructions hoisted by LICM, which serves as a // canonicalization pass that enables other optimizations. As a result, // LoopSink pass needs to be a very late IR pass to avoid undoing LICM // result too early. MPM.add(createLoopSinkPass()); // Get rid of LCSSA nodes. MPM.add(createInstructionSimplifierPass()); addExtensionsToPM(EP_OptimizerLast, MPM); } void PassManagerBuilder::addLTOOptimizationPasses(legacy::PassManagerBase &PM) { // Remove unused virtual tables to improve the quality of code generated by // whole-program devirtualization and bitset lowering. PM.add(createGlobalDCEPass()); // Provide AliasAnalysis services for optimizations. addInitialAliasAnalysisPasses(PM); // Allow forcing function attributes as a debugging and tuning aid. PM.add(createForceFunctionAttrsLegacyPass()); // Infer attributes about declarations if possible. PM.add(createInferFunctionAttrsLegacyPass()); if (OptLevel > 1) { // Indirect call promotion. This should promote all the targets that are // left by the earlier promotion pass that promotes intra-module targets. // This two-step promotion is to save the compile time. For LTO, it should // produce the same result as if we only do promotion here. PM.add(createPGOIndirectCallPromotionLegacyPass(true)); // Propagate constants at call sites into the functions they call. This // opens opportunities for globalopt (and inlining) by substituting function // pointers passed as arguments to direct uses of functions. PM.add(createIPSCCPPass()); } // Infer attributes about definitions. The readnone attribute in particular is // required for virtual constant propagation. PM.add(createPostOrderFunctionAttrsLegacyPass()); PM.add(createReversePostOrderFunctionAttrsPass()); // Split globals using inrange annotations on GEP indices. This can help // improve the quality of generated code when virtual constant propagation or // control flow integrity are enabled. PM.add(createGlobalSplitPass()); // Apply whole-program devirtualization and virtual constant propagation. PM.add(createWholeProgramDevirtPass()); // That's all we need at opt level 1. if (OptLevel == 1) return; // Now that we internalized some globals, see if we can hack on them! PM.add(createGlobalOptimizerPass()); // Promote any localized global vars. PM.add(createPromoteMemoryToRegisterPass()); // Linking modules together can lead to duplicated global constants, only // keep one copy of each constant. PM.add(createConstantMergePass()); // Remove unused arguments from functions. PM.add(createDeadArgEliminationPass()); // Reduce the code after globalopt and ipsccp. Both can open up significant // simplification opportunities, and both can propagate functions through // function pointers. When this happens, we often have to resolve varargs // calls, etc, so let instcombine do this. addInstructionCombiningPass(PM); addExtensionsToPM(EP_Peephole, PM); // Inline small functions bool RunInliner = Inliner; if (RunInliner) { PM.add(Inliner); Inliner = nullptr; } PM.add(createPruneEHPass()); // Remove dead EH info. // Optimize globals again if we ran the inliner. if (RunInliner) PM.add(createGlobalOptimizerPass()); PM.add(createGlobalDCEPass()); // Remove dead functions. // If we didn't decide to inline a function, check to see if we can // transform it to pass arguments by value instead of by reference. PM.add(createArgumentPromotionPass()); // The IPO passes may leave cruft around. Clean up after them. addInstructionCombiningPass(PM); addExtensionsToPM(EP_Peephole, PM); PM.add(createJumpThreadingPass()); // Break up allocas PM.add(createSROAPass()); // Run a few AA driven optimizations here and now, to cleanup the code. PM.add(createPostOrderFunctionAttrsLegacyPass()); // Add nocapture. PM.add(createGlobalsAAWrapperPass()); // IP alias analysis. PM.add(createLICMPass()); // Hoist loop invariants. if (EnableMLSM) PM.add(createMergedLoadStoreMotionPass()); // Merge ld/st in diamonds. PM.add(NewGVN ? createNewGVNPass() : createGVNPass(DisableGVNLoadPRE)); // Remove redundancies. PM.add(createMemCpyOptPass()); // Remove dead memcpys. // Nuke dead stores. PM.add(createDeadStoreEliminationPass()); // More loops are countable; try to optimize them. PM.add(createIndVarSimplifyPass()); PM.add(createLoopDeletionPass()); if (EnableLoopInterchange) PM.add(createLoopInterchangePass()); if (!DisableUnrollLoops) PM.add(createSimpleLoopUnrollPass()); // Unroll small loops PM.add(createLoopVectorizePass(true, LoopVectorize)); // The vectorizer may have significantly shortened a loop body; unroll again. if (!DisableUnrollLoops) PM.add(createLoopUnrollPass()); // Now that we've optimized loops (in particular loop induction variables), // we may have exposed more scalar opportunities. Run parts of the scalar // optimizer again at this point. addInstructionCombiningPass(PM); // Initial cleanup PM.add(createCFGSimplificationPass()); // if-convert PM.add(createSCCPPass()); // Propagate exposed constants addInstructionCombiningPass(PM); // Clean up again PM.add(createBitTrackingDCEPass()); // More scalar chains could be vectorized due to more alias information if (RunSLPAfterLoopVectorization) if (SLPVectorize) PM.add(createSLPVectorizerPass()); // Vectorize parallel scalar chains. // After vectorization, assume intrinsics may tell us more about pointer // alignments. PM.add(createAlignmentFromAssumptionsPass()); if (LoadCombine) PM.add(createLoadCombinePass()); // Cleanup and simplify the code after the scalar optimizations. addInstructionCombiningPass(PM); addExtensionsToPM(EP_Peephole, PM); PM.add(createJumpThreadingPass()); } void PassManagerBuilder::addLateLTOOptimizationPasses( legacy::PassManagerBase &PM) { // Delete basic blocks, which optimization passes may have killed. PM.add(createCFGSimplificationPass()); // Drop bodies of available externally objects to improve GlobalDCE. PM.add(createEliminateAvailableExternallyPass()); // Now that we have optimized the program, discard unreachable functions. PM.add(createGlobalDCEPass()); // FIXME: this is profitable (for compiler time) to do at -O0 too, but // currently it damages debug info. if (MergeFunctions) PM.add(createMergeFunctionsPass()); } void PassManagerBuilder::populateThinLTOPassManager( legacy::PassManagerBase &PM) { PerformThinLTO = true; if (VerifyInput) PM.add(createVerifierPass()); populateModulePassManager(PM); if (VerifyOutput) PM.add(createVerifierPass()); PerformThinLTO = false; } void PassManagerBuilder::populateLTOPassManager(legacy::PassManagerBase &PM) { if (LibraryInfo) PM.add(new TargetLibraryInfoWrapperPass(*LibraryInfo)); if (VerifyInput) PM.add(createVerifierPass()); if (OptLevel != 0) addLTOOptimizationPasses(PM); // Create a function that performs CFI checks for cross-DSO calls with targets // in the current module. PM.add(createCrossDSOCFIPass()); // Lower type metadata and the type.test intrinsic. This pass supports Clang's // control flow integrity mechanisms (-fsanitize=cfi*) and needs to run at // link time if CFI is enabled. The pass does nothing if CFI is disabled. PM.add(createLowerTypeTestsPass(LowerTypeTestsSummaryAction::None, /*Summary=*/nullptr)); if (OptLevel != 0) addLateLTOOptimizationPasses(PM); if (VerifyOutput) PM.add(createVerifierPass()); } inline PassManagerBuilder *unwrap(LLVMPassManagerBuilderRef P) { return reinterpret_cast(P); } inline LLVMPassManagerBuilderRef wrap(PassManagerBuilder *P) { return reinterpret_cast(P); } LLVMPassManagerBuilderRef LLVMPassManagerBuilderCreate() { PassManagerBuilder *PMB = new PassManagerBuilder(); return wrap(PMB); } void LLVMPassManagerBuilderDispose(LLVMPassManagerBuilderRef PMB) { PassManagerBuilder *Builder = unwrap(PMB); delete Builder; } void LLVMPassManagerBuilderSetOptLevel(LLVMPassManagerBuilderRef PMB, unsigned OptLevel) { PassManagerBuilder *Builder = unwrap(PMB); Builder->OptLevel = OptLevel; } void LLVMPassManagerBuilderSetSizeLevel(LLVMPassManagerBuilderRef PMB, unsigned SizeLevel) { PassManagerBuilder *Builder = unwrap(PMB); Builder->SizeLevel = SizeLevel; } void LLVMPassManagerBuilderSetDisableUnitAtATime(LLVMPassManagerBuilderRef PMB, LLVMBool Value) { PassManagerBuilder *Builder = unwrap(PMB); Builder->DisableUnitAtATime = Value; } void LLVMPassManagerBuilderSetDisableUnrollLoops(LLVMPassManagerBuilderRef PMB, LLVMBool Value) { PassManagerBuilder *Builder = unwrap(PMB); Builder->DisableUnrollLoops = Value; } void LLVMPassManagerBuilderSetDisableSimplifyLibCalls(LLVMPassManagerBuilderRef PMB, LLVMBool Value) { // NOTE: The simplify-libcalls pass has been removed. } void LLVMPassManagerBuilderUseInlinerWithThreshold(LLVMPassManagerBuilderRef PMB, unsigned Threshold) { PassManagerBuilder *Builder = unwrap(PMB); Builder->Inliner = createFunctionInliningPass(Threshold); } void LLVMPassManagerBuilderPopulateFunctionPassManager(LLVMPassManagerBuilderRef PMB, LLVMPassManagerRef PM) { PassManagerBuilder *Builder = unwrap(PMB); legacy::FunctionPassManager *FPM = unwrap(PM); Builder->populateFunctionPassManager(*FPM); } void LLVMPassManagerBuilderPopulateModulePassManager(LLVMPassManagerBuilderRef PMB, LLVMPassManagerRef PM) { PassManagerBuilder *Builder = unwrap(PMB); legacy::PassManagerBase *MPM = unwrap(PM); Builder->populateModulePassManager(*MPM); } void LLVMPassManagerBuilderPopulateLTOPassManager(LLVMPassManagerBuilderRef PMB, LLVMPassManagerRef PM, LLVMBool Internalize, LLVMBool RunInliner) { PassManagerBuilder *Builder = unwrap(PMB); legacy::PassManagerBase *LPM = unwrap(PM); // A small backwards compatibility hack. populateLTOPassManager used to take // an RunInliner option. if (RunInliner && !Builder->Inliner) Builder->Inliner = createFunctionInliningPass(); Builder->populateLTOPassManager(*LPM); } Index: head/contrib/llvm/lib/Transforms/Scalar/GVNHoist.cpp =================================================================== --- head/contrib/llvm/lib/Transforms/Scalar/GVNHoist.cpp (revision 315015) +++ head/contrib/llvm/lib/Transforms/Scalar/GVNHoist.cpp (revision 315016) @@ -1,1014 +1,1001 @@ //===- GVNHoist.cpp - Hoist scalar and load expressions -------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This pass hoists expressions from branches to a common dominator. It uses // GVN (global value numbering) to discover expressions computing the same // values. The primary goals of code-hoisting are: // 1. To reduce the code size. // 2. In some cases reduce critical path (by exposing more ILP). // // Hoisting may affect the performance in some cases. To mitigate that, hoisting // is disabled in the following cases. // 1. Scalars across calls. // 2. geps when corresponding load/store cannot be hoisted. //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar/GVN.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/MemorySSA.h" using namespace llvm; #define DEBUG_TYPE "gvn-hoist" STATISTIC(NumHoisted, "Number of instructions hoisted"); STATISTIC(NumRemoved, "Number of instructions removed"); STATISTIC(NumLoadsHoisted, "Number of loads hoisted"); STATISTIC(NumLoadsRemoved, "Number of loads removed"); STATISTIC(NumStoresHoisted, "Number of stores hoisted"); STATISTIC(NumStoresRemoved, "Number of stores removed"); STATISTIC(NumCallsHoisted, "Number of calls hoisted"); STATISTIC(NumCallsRemoved, "Number of calls removed"); static cl::opt MaxHoistedThreshold("gvn-max-hoisted", cl::Hidden, cl::init(-1), cl::desc("Max number of instructions to hoist " "(default unlimited = -1)")); static cl::opt MaxNumberOfBBSInPath( "gvn-hoist-max-bbs", cl::Hidden, cl::init(4), cl::desc("Max number of basic blocks on the path between " "hoisting locations (default = 4, unlimited = -1)")); static cl::opt MaxDepthInBB( "gvn-hoist-max-depth", cl::Hidden, cl::init(100), cl::desc("Hoist instructions from the beginning of the BB up to the " "maximum specified depth (default = 100, unlimited = -1)")); static cl::opt MaxChainLength("gvn-hoist-max-chain-length", cl::Hidden, cl::init(10), cl::desc("Maximum length of dependent chains to hoist " "(default = 10, unlimited = -1)")); namespace { // Provides a sorting function based on the execution order of two instructions. struct SortByDFSIn { private: DenseMap &DFSNumber; public: SortByDFSIn(DenseMap &D) : DFSNumber(D) {} // Returns true when A executes before B. bool operator()(const Instruction *A, const Instruction *B) const { // FIXME: libc++ has a std::sort() algorithm that will call the compare // function on the same element. Once PR20837 is fixed and some more years // pass by and all the buildbots have moved to a corrected std::sort(), // enable the following assert: // // assert(A != B); const BasicBlock *BA = A->getParent(); const BasicBlock *BB = B->getParent(); unsigned ADFS, BDFS; if (BA == BB) { ADFS = DFSNumber.lookup(A); BDFS = DFSNumber.lookup(B); } else { ADFS = DFSNumber.lookup(BA); BDFS = DFSNumber.lookup(BB); } assert(ADFS && BDFS); return ADFS < BDFS; } }; // A map from a pair of VNs to all the instructions with those VNs. typedef DenseMap, SmallVector> VNtoInsns; // An invalid value number Used when inserting a single value number into // VNtoInsns. enum : unsigned { InvalidVN = ~2U }; // Records all scalar instructions candidate for code hoisting. class InsnInfo { VNtoInsns VNtoScalars; public: // Inserts I and its value number in VNtoScalars. void insert(Instruction *I, GVN::ValueTable &VN) { // Scalar instruction. unsigned V = VN.lookupOrAdd(I); VNtoScalars[{V, InvalidVN}].push_back(I); } const VNtoInsns &getVNTable() const { return VNtoScalars; } }; // Records all load instructions candidate for code hoisting. class LoadInfo { VNtoInsns VNtoLoads; public: // Insert Load and the value number of its memory address in VNtoLoads. void insert(LoadInst *Load, GVN::ValueTable &VN) { if (Load->isSimple()) { unsigned V = VN.lookupOrAdd(Load->getPointerOperand()); VNtoLoads[{V, InvalidVN}].push_back(Load); } } const VNtoInsns &getVNTable() const { return VNtoLoads; } }; // Records all store instructions candidate for code hoisting. class StoreInfo { VNtoInsns VNtoStores; public: // Insert the Store and a hash number of the store address and the stored // value in VNtoStores. void insert(StoreInst *Store, GVN::ValueTable &VN) { if (!Store->isSimple()) return; // Hash the store address and the stored value. Value *Ptr = Store->getPointerOperand(); Value *Val = Store->getValueOperand(); VNtoStores[{VN.lookupOrAdd(Ptr), VN.lookupOrAdd(Val)}].push_back(Store); } const VNtoInsns &getVNTable() const { return VNtoStores; } }; // Records all call instructions candidate for code hoisting. class CallInfo { VNtoInsns VNtoCallsScalars; VNtoInsns VNtoCallsLoads; VNtoInsns VNtoCallsStores; public: // Insert Call and its value numbering in one of the VNtoCalls* containers. void insert(CallInst *Call, GVN::ValueTable &VN) { // A call that doesNotAccessMemory is handled as a Scalar, // onlyReadsMemory will be handled as a Load instruction, // all other calls will be handled as stores. unsigned V = VN.lookupOrAdd(Call); auto Entry = std::make_pair(V, InvalidVN); if (Call->doesNotAccessMemory()) VNtoCallsScalars[Entry].push_back(Call); else if (Call->onlyReadsMemory()) VNtoCallsLoads[Entry].push_back(Call); else VNtoCallsStores[Entry].push_back(Call); } const VNtoInsns &getScalarVNTable() const { return VNtoCallsScalars; } const VNtoInsns &getLoadVNTable() const { return VNtoCallsLoads; } const VNtoInsns &getStoreVNTable() const { return VNtoCallsStores; } }; typedef DenseMap BBSideEffectsSet; typedef SmallVector SmallVecInsn; typedef SmallVectorImpl SmallVecImplInsn; static void combineKnownMetadata(Instruction *ReplInst, Instruction *I) { static const unsigned KnownIDs[] = { LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, LLVMContext::MD_noalias, LLVMContext::MD_range, LLVMContext::MD_fpmath, LLVMContext::MD_invariant_load, LLVMContext::MD_invariant_group}; combineMetadata(ReplInst, I, KnownIDs); } // This pass hoists common computations across branches sharing common // dominator. The primary goal is to reduce the code size, and in some // cases reduce critical path (by exposing more ILP). class GVNHoist { public: GVNHoist(DominatorTree *DT, AliasAnalysis *AA, MemoryDependenceResults *MD, - MemorySSA *MSSA, bool OptForMinSize) - : DT(DT), AA(AA), MD(MD), MSSA(MSSA), OptForMinSize(OptForMinSize), - HoistingGeps(OptForMinSize), HoistedCtr(0) { - // Hoist as far as possible when optimizing for code-size. - if (OptForMinSize) - MaxNumberOfBBSInPath = -1; - } + MemorySSA *MSSA) + : DT(DT), AA(AA), MD(MD), MSSA(MSSA), + HoistingGeps(false), + HoistedCtr(0) + { } bool run(Function &F) { VN.setDomTree(DT); VN.setAliasAnalysis(AA); VN.setMemDep(MD); bool Res = false; // Perform DFS Numbering of instructions. unsigned BBI = 0; for (const BasicBlock *BB : depth_first(&F.getEntryBlock())) { DFSNumber[BB] = ++BBI; unsigned I = 0; for (auto &Inst : *BB) DFSNumber[&Inst] = ++I; } int ChainLength = 0; // FIXME: use lazy evaluation of VN to avoid the fix-point computation. while (1) { if (MaxChainLength != -1 && ++ChainLength >= MaxChainLength) return Res; auto HoistStat = hoistExpressions(F); if (HoistStat.first + HoistStat.second == 0) return Res; if (HoistStat.second > 0) // To address a limitation of the current GVN, we need to rerun the // hoisting after we hoisted loads or stores in order to be able to // hoist all scalars dependent on the hoisted ld/st. VN.clear(); Res = true; } return Res; } private: GVN::ValueTable VN; DominatorTree *DT; AliasAnalysis *AA; MemoryDependenceResults *MD; MemorySSA *MSSA; - const bool OptForMinSize; const bool HoistingGeps; DenseMap DFSNumber; BBSideEffectsSet BBSideEffects; int HoistedCtr; enum InsKind { Unknown, Scalar, Load, Store }; // Return true when there are exception handling in BB. bool hasEH(const BasicBlock *BB) { auto It = BBSideEffects.find(BB); if (It != BBSideEffects.end()) return It->second; if (BB->isEHPad() || BB->hasAddressTaken()) { BBSideEffects[BB] = true; return true; } if (BB->getTerminator()->mayThrow()) { BBSideEffects[BB] = true; return true; } BBSideEffects[BB] = false; return false; } // Return true when a successor of BB dominates A. bool successorDominate(const BasicBlock *BB, const BasicBlock *A) { for (const BasicBlock *Succ : BB->getTerminator()->successors()) if (DT->dominates(Succ, A)) return true; return false; } // Return true when all paths from HoistBB to the end of the function pass // through one of the blocks in WL. bool hoistingFromAllPaths(const BasicBlock *HoistBB, SmallPtrSetImpl &WL) { // Copy WL as the loop will remove elements from it. SmallPtrSet WorkList(WL.begin(), WL.end()); for (auto It = df_begin(HoistBB), E = df_end(HoistBB); It != E;) { // There exists a path from HoistBB to the exit of the function if we are // still iterating in DF traversal and we removed all instructions from // the work list. if (WorkList.empty()) return false; const BasicBlock *BB = *It; if (WorkList.erase(BB)) { // Stop DFS traversal when BB is in the work list. It.skipChildren(); continue; } // Check for end of function, calls that do not return, etc. if (!isGuaranteedToTransferExecutionToSuccessor(BB->getTerminator())) return false; // When reaching the back-edge of a loop, there may be a path through the // loop that does not pass through B or C before exiting the loop. if (successorDominate(BB, HoistBB)) return false; // Increment DFS traversal when not skipping children. ++It; } return true; } /* Return true when I1 appears before I2 in the instructions of BB. */ bool firstInBB(const Instruction *I1, const Instruction *I2) { assert(I1->getParent() == I2->getParent()); unsigned I1DFS = DFSNumber.lookup(I1); unsigned I2DFS = DFSNumber.lookup(I2); assert(I1DFS && I2DFS); return I1DFS < I2DFS; } // Return true when there are memory uses of Def in BB. bool hasMemoryUse(const Instruction *NewPt, MemoryDef *Def, const BasicBlock *BB) { const MemorySSA::AccessList *Acc = MSSA->getBlockAccesses(BB); if (!Acc) return false; Instruction *OldPt = Def->getMemoryInst(); const BasicBlock *OldBB = OldPt->getParent(); const BasicBlock *NewBB = NewPt->getParent(); bool ReachedNewPt = false; for (const MemoryAccess &MA : *Acc) if (const MemoryUse *MU = dyn_cast(&MA)) { Instruction *Insn = MU->getMemoryInst(); // Do not check whether MU aliases Def when MU occurs after OldPt. if (BB == OldBB && firstInBB(OldPt, Insn)) break; // Do not check whether MU aliases Def when MU occurs before NewPt. if (BB == NewBB) { if (!ReachedNewPt) { if (firstInBB(Insn, NewPt)) continue; ReachedNewPt = true; } } if (defClobbersUseOrDef(Def, MU, *AA)) return true; } return false; } // Return true when there are exception handling or loads of memory Def // between Def and NewPt. This function is only called for stores: Def is // the MemoryDef of the store to be hoisted. // Decrement by 1 NBBsOnAllPaths for each block between HoistPt and BB, and // return true when the counter NBBsOnAllPaths reaces 0, except when it is // initialized to -1 which is unlimited. bool hasEHOrLoadsOnPath(const Instruction *NewPt, MemoryDef *Def, int &NBBsOnAllPaths) { const BasicBlock *NewBB = NewPt->getParent(); const BasicBlock *OldBB = Def->getBlock(); assert(DT->dominates(NewBB, OldBB) && "invalid path"); assert(DT->dominates(Def->getDefiningAccess()->getBlock(), NewBB) && "def does not dominate new hoisting point"); // Walk all basic blocks reachable in depth-first iteration on the inverse // CFG from OldBB to NewBB. These blocks are all the blocks that may be // executed between the execution of NewBB and OldBB. Hoisting an expression // from OldBB into NewBB has to be safe on all execution paths. for (auto I = idf_begin(OldBB), E = idf_end(OldBB); I != E;) { if (*I == NewBB) { // Stop traversal when reaching HoistPt. I.skipChildren(); continue; } // Stop walk once the limit is reached. if (NBBsOnAllPaths == 0) return true; // Impossible to hoist with exceptions on the path. if (hasEH(*I)) return true; // Check that we do not move a store past loads. if (hasMemoryUse(NewPt, Def, *I)) return true; // -1 is unlimited number of blocks on all paths. if (NBBsOnAllPaths != -1) --NBBsOnAllPaths; ++I; } return false; } // Return true when there are exception handling between HoistPt and BB. // Decrement by 1 NBBsOnAllPaths for each block between HoistPt and BB, and // return true when the counter NBBsOnAllPaths reaches 0, except when it is // initialized to -1 which is unlimited. bool hasEHOnPath(const BasicBlock *HoistPt, const BasicBlock *BB, int &NBBsOnAllPaths) { assert(DT->dominates(HoistPt, BB) && "Invalid path"); // Walk all basic blocks reachable in depth-first iteration on // the inverse CFG from BBInsn to NewHoistPt. These blocks are all the // blocks that may be executed between the execution of NewHoistPt and // BBInsn. Hoisting an expression from BBInsn into NewHoistPt has to be safe // on all execution paths. for (auto I = idf_begin(BB), E = idf_end(BB); I != E;) { if (*I == HoistPt) { // Stop traversal when reaching NewHoistPt. I.skipChildren(); continue; } // Stop walk once the limit is reached. if (NBBsOnAllPaths == 0) return true; // Impossible to hoist with exceptions on the path. if (hasEH(*I)) return true; // -1 is unlimited number of blocks on all paths. if (NBBsOnAllPaths != -1) --NBBsOnAllPaths; ++I; } return false; } // Return true when it is safe to hoist a memory load or store U from OldPt // to NewPt. bool safeToHoistLdSt(const Instruction *NewPt, const Instruction *OldPt, MemoryUseOrDef *U, InsKind K, int &NBBsOnAllPaths) { // In place hoisting is safe. if (NewPt == OldPt) return true; const BasicBlock *NewBB = NewPt->getParent(); const BasicBlock *OldBB = OldPt->getParent(); const BasicBlock *UBB = U->getBlock(); // Check for dependences on the Memory SSA. MemoryAccess *D = U->getDefiningAccess(); BasicBlock *DBB = D->getBlock(); if (DT->properlyDominates(NewBB, DBB)) // Cannot move the load or store to NewBB above its definition in DBB. return false; if (NewBB == DBB && !MSSA->isLiveOnEntryDef(D)) if (auto *UD = dyn_cast(D)) if (firstInBB(NewPt, UD->getMemoryInst())) // Cannot move the load or store to NewPt above its definition in D. return false; // Check for unsafe hoistings due to side effects. if (K == InsKind::Store) { if (hasEHOrLoadsOnPath(NewPt, dyn_cast(U), NBBsOnAllPaths)) return false; } else if (hasEHOnPath(NewBB, OldBB, NBBsOnAllPaths)) return false; if (UBB == NewBB) { if (DT->properlyDominates(DBB, NewBB)) return true; assert(UBB == DBB); assert(MSSA->locallyDominates(D, U)); } // No side effects: it is safe to hoist. return true; } // Return true when it is safe to hoist scalar instructions from all blocks in // WL to HoistBB. bool safeToHoistScalar(const BasicBlock *HoistBB, SmallPtrSetImpl &WL, int &NBBsOnAllPaths) { - // Enable scalar hoisting at -Oz as it is safe to hoist scalars to a place - // where they are partially needed. - if (OptForMinSize) - return true; - // Check that the hoisted expression is needed on all paths. if (!hoistingFromAllPaths(HoistBB, WL)) return false; for (const BasicBlock *BB : WL) if (hasEHOnPath(HoistBB, BB, NBBsOnAllPaths)) return false; return true; } // Each element of a hoisting list contains the basic block where to hoist and // a list of instructions to be hoisted. typedef std::pair HoistingPointInfo; typedef SmallVector HoistingPointList; // Partition InstructionsToHoist into a set of candidates which can share a // common hoisting point. The partitions are collected in HPL. IsScalar is // true when the instructions in InstructionsToHoist are scalars. IsLoad is // true when the InstructionsToHoist are loads, false when they are stores. void partitionCandidates(SmallVecImplInsn &InstructionsToHoist, HoistingPointList &HPL, InsKind K) { // No need to sort for two instructions. if (InstructionsToHoist.size() > 2) { SortByDFSIn Pred(DFSNumber); std::sort(InstructionsToHoist.begin(), InstructionsToHoist.end(), Pred); } int NumBBsOnAllPaths = MaxNumberOfBBSInPath; SmallVecImplInsn::iterator II = InstructionsToHoist.begin(); SmallVecImplInsn::iterator Start = II; Instruction *HoistPt = *II; BasicBlock *HoistBB = HoistPt->getParent(); MemoryUseOrDef *UD; if (K != InsKind::Scalar) UD = MSSA->getMemoryAccess(HoistPt); for (++II; II != InstructionsToHoist.end(); ++II) { Instruction *Insn = *II; BasicBlock *BB = Insn->getParent(); BasicBlock *NewHoistBB; Instruction *NewHoistPt; if (BB == HoistBB) { // Both are in the same Basic Block. NewHoistBB = HoistBB; NewHoistPt = firstInBB(Insn, HoistPt) ? Insn : HoistPt; } else { // If the hoisting point contains one of the instructions, // then hoist there, otherwise hoist before the terminator. NewHoistBB = DT->findNearestCommonDominator(HoistBB, BB); if (NewHoistBB == BB) NewHoistPt = Insn; else if (NewHoistBB == HoistBB) NewHoistPt = HoistPt; else NewHoistPt = NewHoistBB->getTerminator(); } SmallPtrSet WL; WL.insert(HoistBB); WL.insert(BB); if (K == InsKind::Scalar) { if (safeToHoistScalar(NewHoistBB, WL, NumBBsOnAllPaths)) { // Extend HoistPt to NewHoistPt. HoistPt = NewHoistPt; HoistBB = NewHoistBB; continue; } } else { // When NewBB already contains an instruction to be hoisted, the // expression is needed on all paths. // Check that the hoisted expression is needed on all paths: it is // unsafe to hoist loads to a place where there may be a path not // loading from the same address: for instance there may be a branch on // which the address of the load may not be initialized. if ((HoistBB == NewHoistBB || BB == NewHoistBB || hoistingFromAllPaths(NewHoistBB, WL)) && // Also check that it is safe to move the load or store from HoistPt // to NewHoistPt, and from Insn to NewHoistPt. safeToHoistLdSt(NewHoistPt, HoistPt, UD, K, NumBBsOnAllPaths) && safeToHoistLdSt(NewHoistPt, Insn, MSSA->getMemoryAccess(Insn), K, NumBBsOnAllPaths)) { // Extend HoistPt to NewHoistPt. HoistPt = NewHoistPt; HoistBB = NewHoistBB; continue; } } // At this point it is not safe to extend the current hoisting to // NewHoistPt: save the hoisting list so far. if (std::distance(Start, II) > 1) HPL.push_back({HoistBB, SmallVecInsn(Start, II)}); // Start over from BB. Start = II; if (K != InsKind::Scalar) UD = MSSA->getMemoryAccess(*Start); HoistPt = Insn; HoistBB = BB; NumBBsOnAllPaths = MaxNumberOfBBSInPath; } // Save the last partition. if (std::distance(Start, II) > 1) HPL.push_back({HoistBB, SmallVecInsn(Start, II)}); } // Initialize HPL from Map. void computeInsertionPoints(const VNtoInsns &Map, HoistingPointList &HPL, InsKind K) { for (const auto &Entry : Map) { if (MaxHoistedThreshold != -1 && ++HoistedCtr > MaxHoistedThreshold) return; const SmallVecInsn &V = Entry.second; if (V.size() < 2) continue; // Compute the insertion point and the list of expressions to be hoisted. SmallVecInsn InstructionsToHoist; for (auto I : V) if (!hasEH(I->getParent())) InstructionsToHoist.push_back(I); if (!InstructionsToHoist.empty()) partitionCandidates(InstructionsToHoist, HPL, K); } } // Return true when all operands of Instr are available at insertion point // HoistPt. When limiting the number of hoisted expressions, one could hoist // a load without hoisting its access function. So before hoisting any // expression, make sure that all its operands are available at insert point. bool allOperandsAvailable(const Instruction *I, const BasicBlock *HoistPt) const { for (const Use &Op : I->operands()) if (const auto *Inst = dyn_cast(&Op)) if (!DT->dominates(Inst->getParent(), HoistPt)) return false; return true; } // Same as allOperandsAvailable with recursive check for GEP operands. bool allGepOperandsAvailable(const Instruction *I, const BasicBlock *HoistPt) const { for (const Use &Op : I->operands()) if (const auto *Inst = dyn_cast(&Op)) if (!DT->dominates(Inst->getParent(), HoistPt)) { if (const GetElementPtrInst *GepOp = dyn_cast(Inst)) { if (!allGepOperandsAvailable(GepOp, HoistPt)) return false; // Gep is available if all operands of GepOp are available. } else { // Gep is not available if it has operands other than GEPs that are // defined in blocks not dominating HoistPt. return false; } } return true; } // Make all operands of the GEP available. void makeGepsAvailable(Instruction *Repl, BasicBlock *HoistPt, const SmallVecInsn &InstructionsToHoist, Instruction *Gep) const { assert(allGepOperandsAvailable(Gep, HoistPt) && "GEP operands not available"); Instruction *ClonedGep = Gep->clone(); for (unsigned i = 0, e = Gep->getNumOperands(); i != e; ++i) if (Instruction *Op = dyn_cast(Gep->getOperand(i))) { // Check whether the operand is already available. if (DT->dominates(Op->getParent(), HoistPt)) continue; // As a GEP can refer to other GEPs, recursively make all the operands // of this GEP available at HoistPt. if (GetElementPtrInst *GepOp = dyn_cast(Op)) makeGepsAvailable(ClonedGep, HoistPt, InstructionsToHoist, GepOp); } // Copy Gep and replace its uses in Repl with ClonedGep. ClonedGep->insertBefore(HoistPt->getTerminator()); // Conservatively discard any optimization hints, they may differ on the // other paths. ClonedGep->dropUnknownNonDebugMetadata(); // If we have optimization hints which agree with each other along different // paths, preserve them. for (const Instruction *OtherInst : InstructionsToHoist) { const GetElementPtrInst *OtherGep; if (auto *OtherLd = dyn_cast(OtherInst)) OtherGep = cast(OtherLd->getPointerOperand()); else OtherGep = cast( cast(OtherInst)->getPointerOperand()); ClonedGep->andIRFlags(OtherGep); } // Replace uses of Gep with ClonedGep in Repl. Repl->replaceUsesOfWith(Gep, ClonedGep); } // In the case Repl is a load or a store, we make all their GEPs // available: GEPs are not hoisted by default to avoid the address // computations to be hoisted without the associated load or store. bool makeGepOperandsAvailable(Instruction *Repl, BasicBlock *HoistPt, const SmallVecInsn &InstructionsToHoist) const { // Check whether the GEP of a ld/st can be synthesized at HoistPt. GetElementPtrInst *Gep = nullptr; Instruction *Val = nullptr; if (auto *Ld = dyn_cast(Repl)) { Gep = dyn_cast(Ld->getPointerOperand()); } else if (auto *St = dyn_cast(Repl)) { Gep = dyn_cast(St->getPointerOperand()); Val = dyn_cast(St->getValueOperand()); // Check that the stored value is available. if (Val) { if (isa(Val)) { // Check whether we can compute the GEP at HoistPt. if (!allGepOperandsAvailable(Val, HoistPt)) return false; } else if (!DT->dominates(Val->getParent(), HoistPt)) return false; } } // Check whether we can compute the Gep at HoistPt. if (!Gep || !allGepOperandsAvailable(Gep, HoistPt)) return false; makeGepsAvailable(Repl, HoistPt, InstructionsToHoist, Gep); if (Val && isa(Val)) makeGepsAvailable(Repl, HoistPt, InstructionsToHoist, Val); return true; } std::pair hoist(HoistingPointList &HPL) { unsigned NI = 0, NL = 0, NS = 0, NC = 0, NR = 0; for (const HoistingPointInfo &HP : HPL) { // Find out whether we already have one of the instructions in HoistPt, // in which case we do not have to move it. BasicBlock *HoistPt = HP.first; const SmallVecInsn &InstructionsToHoist = HP.second; Instruction *Repl = nullptr; for (Instruction *I : InstructionsToHoist) if (I->getParent() == HoistPt) // If there are two instructions in HoistPt to be hoisted in place: // update Repl to be the first one, such that we can rename the uses // of the second based on the first. if (!Repl || firstInBB(I, Repl)) Repl = I; // Keep track of whether we moved the instruction so we know whether we // should move the MemoryAccess. bool MoveAccess = true; if (Repl) { // Repl is already in HoistPt: it remains in place. assert(allOperandsAvailable(Repl, HoistPt) && "instruction depends on operands that are not available"); MoveAccess = false; } else { // When we do not find Repl in HoistPt, select the first in the list // and move it to HoistPt. Repl = InstructionsToHoist.front(); // We can move Repl in HoistPt only when all operands are available. // The order in which hoistings are done may influence the availability // of operands. if (!allOperandsAvailable(Repl, HoistPt)) { // When HoistingGeps there is nothing more we can do to make the // operands available: just continue. if (HoistingGeps) continue; // When not HoistingGeps we need to copy the GEPs. if (!makeGepOperandsAvailable(Repl, HoistPt, InstructionsToHoist)) continue; } // Move the instruction at the end of HoistPt. Instruction *Last = HoistPt->getTerminator(); MD->removeInstruction(Repl); Repl->moveBefore(Last); DFSNumber[Repl] = DFSNumber[Last]++; } MemoryAccess *NewMemAcc = MSSA->getMemoryAccess(Repl); if (MoveAccess) { if (MemoryUseOrDef *OldMemAcc = dyn_cast_or_null(NewMemAcc)) { // The definition of this ld/st will not change: ld/st hoisting is // legal when the ld/st is not moved past its current definition. MemoryAccess *Def = OldMemAcc->getDefiningAccess(); NewMemAcc = MSSA->createMemoryAccessInBB(Repl, Def, HoistPt, MemorySSA::End); OldMemAcc->replaceAllUsesWith(NewMemAcc); MSSA->removeMemoryAccess(OldMemAcc); } } if (isa(Repl)) ++NL; else if (isa(Repl)) ++NS; else if (isa(Repl)) ++NC; else // Scalar ++NI; // Remove and rename all other instructions. for (Instruction *I : InstructionsToHoist) if (I != Repl) { ++NR; if (auto *ReplacementLoad = dyn_cast(Repl)) { ReplacementLoad->setAlignment( std::min(ReplacementLoad->getAlignment(), cast(I)->getAlignment())); ++NumLoadsRemoved; } else if (auto *ReplacementStore = dyn_cast(Repl)) { ReplacementStore->setAlignment( std::min(ReplacementStore->getAlignment(), cast(I)->getAlignment())); ++NumStoresRemoved; } else if (auto *ReplacementAlloca = dyn_cast(Repl)) { ReplacementAlloca->setAlignment( std::max(ReplacementAlloca->getAlignment(), cast(I)->getAlignment())); } else if (isa(Repl)) { ++NumCallsRemoved; } if (NewMemAcc) { // Update the uses of the old MSSA access with NewMemAcc. MemoryAccess *OldMA = MSSA->getMemoryAccess(I); OldMA->replaceAllUsesWith(NewMemAcc); MSSA->removeMemoryAccess(OldMA); } Repl->andIRFlags(I); combineKnownMetadata(Repl, I); I->replaceAllUsesWith(Repl); // Also invalidate the Alias Analysis cache. MD->removeInstruction(I); I->eraseFromParent(); } // Remove MemorySSA phi nodes with the same arguments. if (NewMemAcc) { SmallPtrSet UsePhis; for (User *U : NewMemAcc->users()) if (MemoryPhi *Phi = dyn_cast(U)) UsePhis.insert(Phi); for (auto *Phi : UsePhis) { auto In = Phi->incoming_values(); if (all_of(In, [&](Use &U) { return U == NewMemAcc; })) { Phi->replaceAllUsesWith(NewMemAcc); MSSA->removeMemoryAccess(Phi); } } } } NumHoisted += NL + NS + NC + NI; NumRemoved += NR; NumLoadsHoisted += NL; NumStoresHoisted += NS; NumCallsHoisted += NC; return {NI, NL + NC + NS}; } // Hoist all expressions. Returns Number of scalars hoisted // and number of non-scalars hoisted. std::pair hoistExpressions(Function &F) { InsnInfo II; LoadInfo LI; StoreInfo SI; CallInfo CI; for (BasicBlock *BB : depth_first(&F.getEntryBlock())) { int InstructionNb = 0; for (Instruction &I1 : *BB) { // Only hoist the first instructions in BB up to MaxDepthInBB. Hoisting // deeper may increase the register pressure and compilation time. if (MaxDepthInBB != -1 && InstructionNb++ >= MaxDepthInBB) break; // Do not value number terminator instructions. if (isa(&I1)) break; if (auto *Load = dyn_cast(&I1)) LI.insert(Load, VN); else if (auto *Store = dyn_cast(&I1)) SI.insert(Store, VN); else if (auto *Call = dyn_cast(&I1)) { if (auto *Intr = dyn_cast(Call)) { if (isa(Intr) || Intr->getIntrinsicID() == Intrinsic::assume) continue; } - if (Call->mayHaveSideEffects()) { - if (!OptForMinSize) - break; - // We may continue hoisting across calls which write to memory. - if (Call->mayThrow()) - break; - } + if (Call->mayHaveSideEffects()) + break; if (Call->isConvergent()) break; CI.insert(Call, VN); } else if (HoistingGeps || !isa(&I1)) // Do not hoist scalars past calls that may write to memory because // that could result in spills later. geps are handled separately. // TODO: We can relax this for targets like AArch64 as they have more // registers than X86. II.insert(&I1, VN); } } HoistingPointList HPL; computeInsertionPoints(II.getVNTable(), HPL, InsKind::Scalar); computeInsertionPoints(LI.getVNTable(), HPL, InsKind::Load); computeInsertionPoints(SI.getVNTable(), HPL, InsKind::Store); computeInsertionPoints(CI.getScalarVNTable(), HPL, InsKind::Scalar); computeInsertionPoints(CI.getLoadVNTable(), HPL, InsKind::Load); computeInsertionPoints(CI.getStoreVNTable(), HPL, InsKind::Store); return hoist(HPL); } }; class GVNHoistLegacyPass : public FunctionPass { public: static char ID; GVNHoistLegacyPass() : FunctionPass(ID) { initializeGVNHoistLegacyPassPass(*PassRegistry::getPassRegistry()); } bool runOnFunction(Function &F) override { if (skipFunction(F)) return false; auto &DT = getAnalysis().getDomTree(); auto &AA = getAnalysis().getAAResults(); auto &MD = getAnalysis().getMemDep(); auto &MSSA = getAnalysis().getMSSA(); - GVNHoist G(&DT, &AA, &MD, &MSSA, F.optForMinSize()); + GVNHoist G(&DT, &AA, &MD, &MSSA); return G.run(F); } void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired(); AU.addRequired(); AU.addRequired(); AU.addRequired(); AU.addPreserved(); AU.addPreserved(); } }; } // namespace PreservedAnalyses GVNHoistPass::run(Function &F, FunctionAnalysisManager &AM) { DominatorTree &DT = AM.getResult(F); AliasAnalysis &AA = AM.getResult(F); MemoryDependenceResults &MD = AM.getResult(F); MemorySSA &MSSA = AM.getResult(F).getMSSA(); - GVNHoist G(&DT, &AA, &MD, &MSSA, F.optForMinSize()); + GVNHoist G(&DT, &AA, &MD, &MSSA); if (!G.run(F)) return PreservedAnalyses::all(); PreservedAnalyses PA; PA.preserve(); PA.preserve(); return PA; } char GVNHoistLegacyPass::ID = 0; INITIALIZE_PASS_BEGIN(GVNHoistLegacyPass, "gvn-hoist", "Early GVN Hoisting of Expressions", false, false) INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass) INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) INITIALIZE_PASS_END(GVNHoistLegacyPass, "gvn-hoist", "Early GVN Hoisting of Expressions", false, false) FunctionPass *llvm::createGVNHoistPass() { return new GVNHoistLegacyPass(); } Index: head/contrib/llvm/tools/clang/lib/Basic/Version.cpp =================================================================== --- head/contrib/llvm/tools/clang/lib/Basic/Version.cpp (revision 315015) +++ head/contrib/llvm/tools/clang/lib/Basic/Version.cpp (revision 315016) @@ -1,151 +1,151 @@ //===- Version.cpp - Clang Version Number -----------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines several version-related utility functions for Clang. // //===----------------------------------------------------------------------===// #include "clang/Basic/Version.h" #include "clang/Basic/LLVM.h" #include "clang/Config/config.h" #include "llvm/Support/raw_ostream.h" #include #include #ifdef HAVE_SVN_VERSION_INC # include "SVNVersion.inc" #endif namespace clang { std::string getClangRepositoryPath() { #if defined(CLANG_REPOSITORY_STRING) return CLANG_REPOSITORY_STRING; #else #ifdef SVN_REPOSITORY StringRef URL(SVN_REPOSITORY); #else StringRef URL(""); #endif // If the SVN_REPOSITORY is empty, try to use the SVN keyword. This helps us // pick up a tag in an SVN export, for example. - StringRef SVNRepository("$URL: https://llvm.org/svn/llvm-project/cfe/branches/release_40/lib/Basic/Version.cpp $"); + StringRef SVNRepository("$URL: https://llvm.org/svn/llvm-project/cfe/tags/RELEASE_400/final/lib/Basic/Version.cpp $"); if (URL.empty()) { URL = SVNRepository.slice(SVNRepository.find(':'), SVNRepository.find("/lib/Basic")); } // Strip off version from a build from an integration branch. URL = URL.slice(0, URL.find("/src/tools/clang")); // Trim path prefix off, assuming path came from standard cfe path. size_t Start = URL.find("cfe/"); if (Start != StringRef::npos) URL = URL.substr(Start + 4); return URL; #endif } std::string getLLVMRepositoryPath() { #ifdef LLVM_REPOSITORY StringRef URL(LLVM_REPOSITORY); #else StringRef URL(""); #endif // Trim path prefix off, assuming path came from standard llvm path. // Leave "llvm/" prefix to distinguish the following llvm revision from the // clang revision. size_t Start = URL.find("llvm/"); if (Start != StringRef::npos) URL = URL.substr(Start); return URL; } std::string getClangRevision() { #ifdef SVN_REVISION return SVN_REVISION; #else return ""; #endif } std::string getLLVMRevision() { #ifdef LLVM_REVISION return LLVM_REVISION; #else return ""; #endif } std::string getClangFullRepositoryVersion() { std::string buf; llvm::raw_string_ostream OS(buf); std::string Path = getClangRepositoryPath(); std::string Revision = getClangRevision(); if (!Path.empty() || !Revision.empty()) { OS << '('; if (!Path.empty()) OS << Path; if (!Revision.empty()) { if (!Path.empty()) OS << ' '; OS << Revision; } OS << ')'; } // Support LLVM in a separate repository. std::string LLVMRev = getLLVMRevision(); if (!LLVMRev.empty() && LLVMRev != Revision) { OS << " ("; std::string LLVMRepo = getLLVMRepositoryPath(); if (!LLVMRepo.empty()) OS << LLVMRepo << ' '; OS << LLVMRev << ')'; } return OS.str(); } std::string getClangFullVersion() { return getClangToolFullVersion("clang"); } std::string getClangToolFullVersion(StringRef ToolName) { std::string buf; llvm::raw_string_ostream OS(buf); #ifdef CLANG_VENDOR OS << CLANG_VENDOR; #endif OS << ToolName << " version " CLANG_VERSION_STRING " " << getClangFullRepositoryVersion(); // If vendor supplied, include the base LLVM version as well. #ifdef CLANG_VENDOR OS << " (based on " << BACKEND_PACKAGE_STRING << ")"; #endif return OS.str(); } std::string getClangFullCPPVersion() { // The version string we report in __VERSION__ is just a compacted version of // the one we report on the command line. std::string buf; llvm::raw_string_ostream OS(buf); #ifdef CLANG_VENDOR OS << CLANG_VENDOR; #endif OS << "Clang " CLANG_VERSION_STRING " " << getClangFullRepositoryVersion(); return OS.str(); } } // end namespace clang Index: head/contrib/llvm/tools/clang/lib/Serialization/ASTReaderDecl.cpp =================================================================== --- head/contrib/llvm/tools/clang/lib/Serialization/ASTReaderDecl.cpp (revision 315015) +++ head/contrib/llvm/tools/clang/lib/Serialization/ASTReaderDecl.cpp (revision 315016) @@ -1,4027 +1,4027 @@ //===--- ASTReaderDecl.cpp - Decl Deserialization ---------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the ASTReader::ReadDeclRecord method, which is the // entrypoint for loading a decl. // //===----------------------------------------------------------------------===// #include "ASTCommon.h" #include "ASTReaderInternals.h" #include "clang/AST/ASTContext.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclGroup.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclVisitor.h" #include "clang/AST/Expr.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/SemaDiagnostic.h" #include "clang/Serialization/ASTReader.h" #include "llvm/Support/SaveAndRestore.h" using namespace clang; using namespace clang::serialization; //===----------------------------------------------------------------------===// // Declaration deserialization //===----------------------------------------------------------------------===// namespace clang { class ASTDeclReader : public DeclVisitor { ASTReader &Reader; ASTRecordReader &Record; ASTReader::RecordLocation Loc; const DeclID ThisDeclID; const SourceLocation ThisDeclLoc; typedef ASTReader::RecordData RecordData; TypeID TypeIDForTypeDecl; unsigned AnonymousDeclNumber; GlobalDeclID NamedDeclForTagDecl; IdentifierInfo *TypedefNameForLinkage; bool HasPendingBody; ///\brief A flag to carry the information for a decl from the entity is /// used. We use it to delay the marking of the canonical decl as used until /// the entire declaration is deserialized and merged. bool IsDeclMarkedUsed; uint64_t GetCurrentCursorOffset(); uint64_t ReadLocalOffset() { uint64_t LocalOffset = Record.readInt(); assert(LocalOffset < Loc.Offset && "offset point after current record"); return LocalOffset ? Loc.Offset - LocalOffset : 0; } uint64_t ReadGlobalOffset() { uint64_t Local = ReadLocalOffset(); return Local ? Record.getGlobalBitOffset(Local) : 0; } SourceLocation ReadSourceLocation() { return Record.readSourceLocation(); } SourceRange ReadSourceRange() { return Record.readSourceRange(); } TypeSourceInfo *GetTypeSourceInfo() { return Record.getTypeSourceInfo(); } serialization::DeclID ReadDeclID() { return Record.readDeclID(); } std::string ReadString() { return Record.readString(); } void ReadDeclIDList(SmallVectorImpl &IDs) { for (unsigned I = 0, Size = Record.readInt(); I != Size; ++I) IDs.push_back(ReadDeclID()); } Decl *ReadDecl() { return Record.readDecl(); } template T *ReadDeclAs() { return Record.readDeclAs(); } void ReadQualifierInfo(QualifierInfo &Info) { Record.readQualifierInfo(Info); } void ReadDeclarationNameLoc(DeclarationNameLoc &DNLoc, DeclarationName Name) { Record.readDeclarationNameLoc(DNLoc, Name); } serialization::SubmoduleID readSubmoduleID() { if (Record.getIdx() == Record.size()) return 0; return Record.getGlobalSubmoduleID(Record.readInt()); } Module *readModule() { return Record.getSubmodule(readSubmoduleID()); } void ReadCXXRecordDefinition(CXXRecordDecl *D, bool Update); void ReadCXXDefinitionData(struct CXXRecordDecl::DefinitionData &Data); void MergeDefinitionData(CXXRecordDecl *D, struct CXXRecordDecl::DefinitionData &&NewDD); void ReadObjCDefinitionData(struct ObjCInterfaceDecl::DefinitionData &Data); void MergeDefinitionData(ObjCInterfaceDecl *D, struct ObjCInterfaceDecl::DefinitionData &&NewDD); static NamedDecl *getAnonymousDeclForMerging(ASTReader &Reader, DeclContext *DC, unsigned Index); static void setAnonymousDeclForMerging(ASTReader &Reader, DeclContext *DC, unsigned Index, NamedDecl *D); /// Results from loading a RedeclarableDecl. class RedeclarableResult { Decl *MergeWith; GlobalDeclID FirstID; bool IsKeyDecl; public: RedeclarableResult(Decl *MergeWith, GlobalDeclID FirstID, bool IsKeyDecl) : MergeWith(MergeWith), FirstID(FirstID), IsKeyDecl(IsKeyDecl) {} /// \brief Retrieve the first ID. GlobalDeclID getFirstID() const { return FirstID; } /// \brief Is this declaration a key declaration? bool isKeyDecl() const { return IsKeyDecl; } /// \brief Get a known declaration that this should be merged with, if /// any. Decl *getKnownMergeTarget() const { return MergeWith; } }; /// \brief Class used to capture the result of searching for an existing /// declaration of a specific kind and name, along with the ability /// to update the place where this result was found (the declaration /// chain hanging off an identifier or the DeclContext we searched in) /// if requested. class FindExistingResult { ASTReader &Reader; NamedDecl *New; NamedDecl *Existing; bool AddResult; unsigned AnonymousDeclNumber; IdentifierInfo *TypedefNameForLinkage; void operator=(FindExistingResult &&) = delete; public: FindExistingResult(ASTReader &Reader) : Reader(Reader), New(nullptr), Existing(nullptr), AddResult(false), AnonymousDeclNumber(0), TypedefNameForLinkage(nullptr) {} FindExistingResult(ASTReader &Reader, NamedDecl *New, NamedDecl *Existing, unsigned AnonymousDeclNumber, IdentifierInfo *TypedefNameForLinkage) : Reader(Reader), New(New), Existing(Existing), AddResult(true), AnonymousDeclNumber(AnonymousDeclNumber), TypedefNameForLinkage(TypedefNameForLinkage) {} FindExistingResult(FindExistingResult &&Other) : Reader(Other.Reader), New(Other.New), Existing(Other.Existing), AddResult(Other.AddResult), AnonymousDeclNumber(Other.AnonymousDeclNumber), TypedefNameForLinkage(Other.TypedefNameForLinkage) { Other.AddResult = false; } ~FindExistingResult(); /// \brief Suppress the addition of this result into the known set of /// names. void suppress() { AddResult = false; } operator NamedDecl*() const { return Existing; } template operator T*() const { return dyn_cast_or_null(Existing); } }; static DeclContext *getPrimaryContextForMerging(ASTReader &Reader, DeclContext *DC); FindExistingResult findExisting(NamedDecl *D); public: ASTDeclReader(ASTReader &Reader, ASTRecordReader &Record, ASTReader::RecordLocation Loc, DeclID thisDeclID, SourceLocation ThisDeclLoc) : Reader(Reader), Record(Record), Loc(Loc), ThisDeclID(thisDeclID), ThisDeclLoc(ThisDeclLoc), TypeIDForTypeDecl(0), NamedDeclForTagDecl(0), TypedefNameForLinkage(nullptr), HasPendingBody(false), IsDeclMarkedUsed(false) {} template static Decl *getMostRecentDeclImpl(Redeclarable *D); static Decl *getMostRecentDeclImpl(...); static Decl *getMostRecentDecl(Decl *D); template static void attachPreviousDeclImpl(ASTReader &Reader, Redeclarable *D, Decl *Previous, Decl *Canon); static void attachPreviousDeclImpl(ASTReader &Reader, ...); static void attachPreviousDecl(ASTReader &Reader, Decl *D, Decl *Previous, Decl *Canon); template static void attachLatestDeclImpl(Redeclarable *D, Decl *Latest); static void attachLatestDeclImpl(...); static void attachLatestDecl(Decl *D, Decl *latest); template static void markIncompleteDeclChainImpl(Redeclarable *D); static void markIncompleteDeclChainImpl(...); /// \brief Determine whether this declaration has a pending body. bool hasPendingBody() const { return HasPendingBody; } void Visit(Decl *D); void UpdateDecl(Decl *D); static void setNextObjCCategory(ObjCCategoryDecl *Cat, ObjCCategoryDecl *Next) { Cat->NextClassCategory = Next; } void VisitDecl(Decl *D); void VisitPragmaCommentDecl(PragmaCommentDecl *D); void VisitPragmaDetectMismatchDecl(PragmaDetectMismatchDecl *D); void VisitTranslationUnitDecl(TranslationUnitDecl *TU); void VisitNamedDecl(NamedDecl *ND); void VisitLabelDecl(LabelDecl *LD); void VisitNamespaceDecl(NamespaceDecl *D); void VisitUsingDirectiveDecl(UsingDirectiveDecl *D); void VisitNamespaceAliasDecl(NamespaceAliasDecl *D); void VisitTypeDecl(TypeDecl *TD); RedeclarableResult VisitTypedefNameDecl(TypedefNameDecl *TD); void VisitTypedefDecl(TypedefDecl *TD); void VisitTypeAliasDecl(TypeAliasDecl *TD); void VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D); RedeclarableResult VisitTagDecl(TagDecl *TD); void VisitEnumDecl(EnumDecl *ED); RedeclarableResult VisitRecordDeclImpl(RecordDecl *RD); void VisitRecordDecl(RecordDecl *RD) { VisitRecordDeclImpl(RD); } RedeclarableResult VisitCXXRecordDeclImpl(CXXRecordDecl *D); void VisitCXXRecordDecl(CXXRecordDecl *D) { VisitCXXRecordDeclImpl(D); } RedeclarableResult VisitClassTemplateSpecializationDeclImpl( ClassTemplateSpecializationDecl *D); void VisitClassTemplateSpecializationDecl( ClassTemplateSpecializationDecl *D) { VisitClassTemplateSpecializationDeclImpl(D); } void VisitClassTemplatePartialSpecializationDecl( ClassTemplatePartialSpecializationDecl *D); void VisitClassScopeFunctionSpecializationDecl( ClassScopeFunctionSpecializationDecl *D); RedeclarableResult VisitVarTemplateSpecializationDeclImpl(VarTemplateSpecializationDecl *D); void VisitVarTemplateSpecializationDecl(VarTemplateSpecializationDecl *D) { VisitVarTemplateSpecializationDeclImpl(D); } void VisitVarTemplatePartialSpecializationDecl( VarTemplatePartialSpecializationDecl *D); void VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D); void VisitValueDecl(ValueDecl *VD); void VisitEnumConstantDecl(EnumConstantDecl *ECD); void VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D); void VisitDeclaratorDecl(DeclaratorDecl *DD); void VisitFunctionDecl(FunctionDecl *FD); void VisitCXXMethodDecl(CXXMethodDecl *D); void VisitCXXConstructorDecl(CXXConstructorDecl *D); void VisitCXXDestructorDecl(CXXDestructorDecl *D); void VisitCXXConversionDecl(CXXConversionDecl *D); void VisitFieldDecl(FieldDecl *FD); void VisitMSPropertyDecl(MSPropertyDecl *FD); void VisitIndirectFieldDecl(IndirectFieldDecl *FD); RedeclarableResult VisitVarDeclImpl(VarDecl *D); void VisitVarDecl(VarDecl *VD) { VisitVarDeclImpl(VD); } void VisitImplicitParamDecl(ImplicitParamDecl *PD); void VisitParmVarDecl(ParmVarDecl *PD); void VisitDecompositionDecl(DecompositionDecl *DD); void VisitBindingDecl(BindingDecl *BD); void VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D); DeclID VisitTemplateDecl(TemplateDecl *D); RedeclarableResult VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D); void VisitClassTemplateDecl(ClassTemplateDecl *D); void VisitBuiltinTemplateDecl(BuiltinTemplateDecl *D); void VisitVarTemplateDecl(VarTemplateDecl *D); void VisitFunctionTemplateDecl(FunctionTemplateDecl *D); void VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D); void VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D); void VisitUsingDecl(UsingDecl *D); void VisitUsingPackDecl(UsingPackDecl *D); void VisitUsingShadowDecl(UsingShadowDecl *D); void VisitConstructorUsingShadowDecl(ConstructorUsingShadowDecl *D); void VisitLinkageSpecDecl(LinkageSpecDecl *D); void VisitExportDecl(ExportDecl *D); void VisitFileScopeAsmDecl(FileScopeAsmDecl *AD); void VisitImportDecl(ImportDecl *D); void VisitAccessSpecDecl(AccessSpecDecl *D); void VisitFriendDecl(FriendDecl *D); void VisitFriendTemplateDecl(FriendTemplateDecl *D); void VisitStaticAssertDecl(StaticAssertDecl *D); void VisitBlockDecl(BlockDecl *BD); void VisitCapturedDecl(CapturedDecl *CD); void VisitEmptyDecl(EmptyDecl *D); std::pair VisitDeclContext(DeclContext *DC); template RedeclarableResult VisitRedeclarable(Redeclarable *D); template void mergeRedeclarable(Redeclarable *D, RedeclarableResult &Redecl, DeclID TemplatePatternID = 0); template void mergeRedeclarable(Redeclarable *D, T *Existing, RedeclarableResult &Redecl, DeclID TemplatePatternID = 0); template void mergeMergeable(Mergeable *D); void mergeTemplatePattern(RedeclarableTemplateDecl *D, RedeclarableTemplateDecl *Existing, DeclID DsID, bool IsKeyDecl); ObjCTypeParamList *ReadObjCTypeParamList(); // FIXME: Reorder according to DeclNodes.td? void VisitObjCMethodDecl(ObjCMethodDecl *D); void VisitObjCTypeParamDecl(ObjCTypeParamDecl *D); void VisitObjCContainerDecl(ObjCContainerDecl *D); void VisitObjCInterfaceDecl(ObjCInterfaceDecl *D); void VisitObjCIvarDecl(ObjCIvarDecl *D); void VisitObjCProtocolDecl(ObjCProtocolDecl *D); void VisitObjCAtDefsFieldDecl(ObjCAtDefsFieldDecl *D); void VisitObjCCategoryDecl(ObjCCategoryDecl *D); void VisitObjCImplDecl(ObjCImplDecl *D); void VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D); void VisitObjCImplementationDecl(ObjCImplementationDecl *D); void VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *D); void VisitObjCPropertyDecl(ObjCPropertyDecl *D); void VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D); void VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D); void VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D); void VisitOMPCapturedExprDecl(OMPCapturedExprDecl *D); }; } // end namespace clang namespace { /// Iterator over the redeclarations of a declaration that have already /// been merged into the same redeclaration chain. template class MergedRedeclIterator { DeclT *Start, *Canonical, *Current; public: MergedRedeclIterator() : Current(nullptr) {} MergedRedeclIterator(DeclT *Start) : Start(Start), Canonical(nullptr), Current(Start) {} DeclT *operator*() { return Current; } MergedRedeclIterator &operator++() { if (Current->isFirstDecl()) { Canonical = Current; Current = Current->getMostRecentDecl(); } else Current = Current->getPreviousDecl(); // If we started in the merged portion, we'll reach our start position // eventually. Otherwise, we'll never reach it, but the second declaration // we reached was the canonical declaration, so stop when we see that one // again. if (Current == Start || Current == Canonical) Current = nullptr; return *this; } friend bool operator!=(const MergedRedeclIterator &A, const MergedRedeclIterator &B) { return A.Current != B.Current; } }; } // end anonymous namespace template static llvm::iterator_range> merged_redecls(DeclT *D) { return llvm::make_range(MergedRedeclIterator(D), MergedRedeclIterator()); } uint64_t ASTDeclReader::GetCurrentCursorOffset() { return Loc.F->DeclsCursor.GetCurrentBitNo() + Loc.F->GlobalBitOffset; } void ASTDeclReader::Visit(Decl *D) { DeclVisitor::Visit(D); // At this point we have deserialized and merged the decl and it is safe to // update its canonical decl to signal that the entire entity is used. D->getCanonicalDecl()->Used |= IsDeclMarkedUsed; IsDeclMarkedUsed = false; if (DeclaratorDecl *DD = dyn_cast(D)) { if (DD->DeclInfo) { DeclaratorDecl::ExtInfo *Info = DD->DeclInfo.get(); Info->TInfo = GetTypeSourceInfo(); } else { DD->DeclInfo = GetTypeSourceInfo(); } } if (TypeDecl *TD = dyn_cast(D)) { // We have a fully initialized TypeDecl. Read its type now. TD->setTypeForDecl(Reader.GetType(TypeIDForTypeDecl).getTypePtrOrNull()); // If this is a tag declaration with a typedef name for linkage, it's safe // to load that typedef now. if (NamedDeclForTagDecl) cast(D)->TypedefNameDeclOrQualifier = cast(Reader.GetDecl(NamedDeclForTagDecl)); } else if (ObjCInterfaceDecl *ID = dyn_cast(D)) { // if we have a fully initialized TypeDecl, we can safely read its type now. ID->TypeForDecl = Reader.GetType(TypeIDForTypeDecl).getTypePtrOrNull(); } else if (FunctionDecl *FD = dyn_cast(D)) { // FunctionDecl's body was written last after all other Stmts/Exprs. // We only read it if FD doesn't already have a body (e.g., from another // module). // FIXME: Can we diagnose ODR violations somehow? if (Record.readInt()) { if (auto *CD = dyn_cast(FD)) { CD->NumCtorInitializers = Record.readInt(); if (CD->NumCtorInitializers) CD->CtorInitializers = ReadGlobalOffset(); } Reader.PendingBodies[FD] = GetCurrentCursorOffset(); HasPendingBody = true; } } } void ASTDeclReader::VisitDecl(Decl *D) { if (D->isTemplateParameter() || D->isTemplateParameterPack() || isa(D)) { // We don't want to deserialize the DeclContext of a template // parameter or of a parameter of a function template immediately. These // entities might be used in the formulation of its DeclContext (for // example, a function parameter can be used in decltype() in trailing // return type of the function). Use the translation unit DeclContext as a // placeholder. GlobalDeclID SemaDCIDForTemplateParmDecl = ReadDeclID(); GlobalDeclID LexicalDCIDForTemplateParmDecl = ReadDeclID(); if (!LexicalDCIDForTemplateParmDecl) LexicalDCIDForTemplateParmDecl = SemaDCIDForTemplateParmDecl; Reader.addPendingDeclContextInfo(D, SemaDCIDForTemplateParmDecl, LexicalDCIDForTemplateParmDecl); D->setDeclContext(Reader.getContext().getTranslationUnitDecl()); } else { DeclContext *SemaDC = ReadDeclAs(); DeclContext *LexicalDC = ReadDeclAs(); if (!LexicalDC) LexicalDC = SemaDC; DeclContext *MergedSemaDC = Reader.MergedDeclContexts.lookup(SemaDC); // Avoid calling setLexicalDeclContext() directly because it uses // Decl::getASTContext() internally which is unsafe during derialization. D->setDeclContextsImpl(MergedSemaDC ? MergedSemaDC : SemaDC, LexicalDC, Reader.getContext()); } D->setLocation(ThisDeclLoc); D->setInvalidDecl(Record.readInt()); if (Record.readInt()) { // hasAttrs AttrVec Attrs; Record.readAttributes(Attrs); // Avoid calling setAttrs() directly because it uses Decl::getASTContext() // internally which is unsafe during derialization. D->setAttrsImpl(Attrs, Reader.getContext()); } D->setImplicit(Record.readInt()); D->Used = Record.readInt(); IsDeclMarkedUsed |= D->Used; D->setReferenced(Record.readInt()); D->setTopLevelDeclInObjCContainer(Record.readInt()); D->setAccess((AccessSpecifier)Record.readInt()); D->FromASTFile = true; D->setModulePrivate(Record.readInt()); D->Hidden = D->isModulePrivate(); // Determine whether this declaration is part of a (sub)module. If so, it // may not yet be visible. if (unsigned SubmoduleID = readSubmoduleID()) { // Store the owning submodule ID in the declaration. D->setOwningModuleID(SubmoduleID); if (D->Hidden) { // Module-private declarations are never visible, so there is no work to do. } else if (Reader.getContext().getLangOpts().ModulesLocalVisibility) { // If local visibility is being tracked, this declaration will become // hidden and visible as the owning module does. Inform Sema that this // declaration might not be visible. D->Hidden = true; } else if (Module *Owner = Reader.getSubmodule(SubmoduleID)) { if (Owner->NameVisibility != Module::AllVisible) { // The owning module is not visible. Mark this declaration as hidden. D->Hidden = true; // Note that this declaration was hidden because its owning module is // not yet visible. Reader.HiddenNamesMap[Owner].push_back(D); } } } } void ASTDeclReader::VisitPragmaCommentDecl(PragmaCommentDecl *D) { VisitDecl(D); D->setLocation(ReadSourceLocation()); D->CommentKind = (PragmaMSCommentKind)Record.readInt(); std::string Arg = ReadString(); memcpy(D->getTrailingObjects(), Arg.data(), Arg.size()); D->getTrailingObjects()[Arg.size()] = '\0'; } void ASTDeclReader::VisitPragmaDetectMismatchDecl(PragmaDetectMismatchDecl *D) { VisitDecl(D); D->setLocation(ReadSourceLocation()); std::string Name = ReadString(); memcpy(D->getTrailingObjects(), Name.data(), Name.size()); D->getTrailingObjects()[Name.size()] = '\0'; D->ValueStart = Name.size() + 1; std::string Value = ReadString(); memcpy(D->getTrailingObjects() + D->ValueStart, Value.data(), Value.size()); D->getTrailingObjects()[D->ValueStart + Value.size()] = '\0'; } void ASTDeclReader::VisitTranslationUnitDecl(TranslationUnitDecl *TU) { llvm_unreachable("Translation units are not serialized"); } void ASTDeclReader::VisitNamedDecl(NamedDecl *ND) { VisitDecl(ND); ND->setDeclName(Record.readDeclarationName()); AnonymousDeclNumber = Record.readInt(); } void ASTDeclReader::VisitTypeDecl(TypeDecl *TD) { VisitNamedDecl(TD); TD->setLocStart(ReadSourceLocation()); // Delay type reading until after we have fully initialized the decl. TypeIDForTypeDecl = Record.getGlobalTypeID(Record.readInt()); } ASTDeclReader::RedeclarableResult ASTDeclReader::VisitTypedefNameDecl(TypedefNameDecl *TD) { RedeclarableResult Redecl = VisitRedeclarable(TD); VisitTypeDecl(TD); TypeSourceInfo *TInfo = GetTypeSourceInfo(); if (Record.readInt()) { // isModed QualType modedT = Record.readType(); TD->setModedTypeSourceInfo(TInfo, modedT); } else TD->setTypeSourceInfo(TInfo); return Redecl; } void ASTDeclReader::VisitTypedefDecl(TypedefDecl *TD) { RedeclarableResult Redecl = VisitTypedefNameDecl(TD); mergeRedeclarable(TD, Redecl); } void ASTDeclReader::VisitTypeAliasDecl(TypeAliasDecl *TD) { RedeclarableResult Redecl = VisitTypedefNameDecl(TD); if (auto *Template = ReadDeclAs()) // Merged when we merge the template. TD->setDescribedAliasTemplate(Template); else mergeRedeclarable(TD, Redecl); } ASTDeclReader::RedeclarableResult ASTDeclReader::VisitTagDecl(TagDecl *TD) { RedeclarableResult Redecl = VisitRedeclarable(TD); VisitTypeDecl(TD); TD->IdentifierNamespace = Record.readInt(); TD->setTagKind((TagDecl::TagKind)Record.readInt()); if (!isa(TD)) TD->setCompleteDefinition(Record.readInt()); TD->setEmbeddedInDeclarator(Record.readInt()); TD->setFreeStanding(Record.readInt()); TD->setCompleteDefinitionRequired(Record.readInt()); TD->setBraceRange(ReadSourceRange()); switch (Record.readInt()) { case 0: break; case 1: { // ExtInfo TagDecl::ExtInfo *Info = new (Reader.getContext()) TagDecl::ExtInfo(); ReadQualifierInfo(*Info); TD->TypedefNameDeclOrQualifier = Info; break; } case 2: // TypedefNameForAnonDecl NamedDeclForTagDecl = ReadDeclID(); TypedefNameForLinkage = Record.getIdentifierInfo(); break; default: llvm_unreachable("unexpected tag info kind"); } if (!isa(TD)) mergeRedeclarable(TD, Redecl); return Redecl; } void ASTDeclReader::VisitEnumDecl(EnumDecl *ED) { VisitTagDecl(ED); if (TypeSourceInfo *TI = GetTypeSourceInfo()) ED->setIntegerTypeSourceInfo(TI); else ED->setIntegerType(Record.readType()); ED->setPromotionType(Record.readType()); ED->setNumPositiveBits(Record.readInt()); ED->setNumNegativeBits(Record.readInt()); ED->IsScoped = Record.readInt(); ED->IsScopedUsingClassTag = Record.readInt(); ED->IsFixed = Record.readInt(); // If this is a definition subject to the ODR, and we already have a // definition, merge this one into it. if (ED->IsCompleteDefinition && Reader.getContext().getLangOpts().Modules && Reader.getContext().getLangOpts().CPlusPlus) { EnumDecl *&OldDef = Reader.EnumDefinitions[ED->getCanonicalDecl()]; if (!OldDef) { // This is the first time we've seen an imported definition. Look for a // local definition before deciding that we are the first definition. for (auto *D : merged_redecls(ED->getCanonicalDecl())) { if (!D->isFromASTFile() && D->isCompleteDefinition()) { OldDef = D; break; } } } if (OldDef) { Reader.MergedDeclContexts.insert(std::make_pair(ED, OldDef)); ED->IsCompleteDefinition = false; Reader.mergeDefinitionVisibility(OldDef, ED); } else { OldDef = ED; } } if (EnumDecl *InstED = ReadDeclAs()) { TemplateSpecializationKind TSK = (TemplateSpecializationKind)Record.readInt(); SourceLocation POI = ReadSourceLocation(); ED->setInstantiationOfMemberEnum(Reader.getContext(), InstED, TSK); ED->getMemberSpecializationInfo()->setPointOfInstantiation(POI); } } ASTDeclReader::RedeclarableResult ASTDeclReader::VisitRecordDeclImpl(RecordDecl *RD) { RedeclarableResult Redecl = VisitTagDecl(RD); RD->setHasFlexibleArrayMember(Record.readInt()); RD->setAnonymousStructOrUnion(Record.readInt()); RD->setHasObjectMember(Record.readInt()); RD->setHasVolatileMember(Record.readInt()); return Redecl; } void ASTDeclReader::VisitValueDecl(ValueDecl *VD) { VisitNamedDecl(VD); VD->setType(Record.readType()); } void ASTDeclReader::VisitEnumConstantDecl(EnumConstantDecl *ECD) { VisitValueDecl(ECD); if (Record.readInt()) ECD->setInitExpr(Record.readExpr()); ECD->setInitVal(Record.readAPSInt()); mergeMergeable(ECD); } void ASTDeclReader::VisitDeclaratorDecl(DeclaratorDecl *DD) { VisitValueDecl(DD); DD->setInnerLocStart(ReadSourceLocation()); if (Record.readInt()) { // hasExtInfo DeclaratorDecl::ExtInfo *Info = new (Reader.getContext()) DeclaratorDecl::ExtInfo(); ReadQualifierInfo(*Info); DD->DeclInfo = Info; } } void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) { RedeclarableResult Redecl = VisitRedeclarable(FD); VisitDeclaratorDecl(FD); ReadDeclarationNameLoc(FD->DNLoc, FD->getDeclName()); FD->IdentifierNamespace = Record.readInt(); // FunctionDecl's body is handled last at ASTDeclReader::Visit, // after everything else is read. FD->SClass = (StorageClass)Record.readInt(); FD->IsInline = Record.readInt(); FD->IsInlineSpecified = Record.readInt(); FD->IsVirtualAsWritten = Record.readInt(); FD->IsPure = Record.readInt(); FD->HasInheritedPrototype = Record.readInt(); FD->HasWrittenPrototype = Record.readInt(); FD->IsDeleted = Record.readInt(); FD->IsTrivial = Record.readInt(); FD->IsDefaulted = Record.readInt(); FD->IsExplicitlyDefaulted = Record.readInt(); FD->HasImplicitReturnZero = Record.readInt(); FD->IsConstexpr = Record.readInt(); FD->UsesSEHTry = Record.readInt(); FD->HasSkippedBody = Record.readInt(); FD->IsLateTemplateParsed = Record.readInt(); FD->setCachedLinkage(Linkage(Record.readInt())); FD->EndRangeLoc = ReadSourceLocation(); switch ((FunctionDecl::TemplatedKind)Record.readInt()) { case FunctionDecl::TK_NonTemplate: mergeRedeclarable(FD, Redecl); break; case FunctionDecl::TK_FunctionTemplate: // Merged when we merge the template. FD->setDescribedFunctionTemplate(ReadDeclAs()); break; case FunctionDecl::TK_MemberSpecialization: { FunctionDecl *InstFD = ReadDeclAs(); TemplateSpecializationKind TSK = (TemplateSpecializationKind)Record.readInt(); SourceLocation POI = ReadSourceLocation(); FD->setInstantiationOfMemberFunction(Reader.getContext(), InstFD, TSK); FD->getMemberSpecializationInfo()->setPointOfInstantiation(POI); mergeRedeclarable(FD, Redecl); break; } case FunctionDecl::TK_FunctionTemplateSpecialization: { FunctionTemplateDecl *Template = ReadDeclAs(); TemplateSpecializationKind TSK = (TemplateSpecializationKind)Record.readInt(); // Template arguments. SmallVector TemplArgs; Record.readTemplateArgumentList(TemplArgs, /*Canonicalize*/ true); // Template args as written. SmallVector TemplArgLocs; SourceLocation LAngleLoc, RAngleLoc; bool HasTemplateArgumentsAsWritten = Record.readInt(); if (HasTemplateArgumentsAsWritten) { unsigned NumTemplateArgLocs = Record.readInt(); TemplArgLocs.reserve(NumTemplateArgLocs); for (unsigned i=0; i != NumTemplateArgLocs; ++i) TemplArgLocs.push_back(Record.readTemplateArgumentLoc()); LAngleLoc = ReadSourceLocation(); RAngleLoc = ReadSourceLocation(); } SourceLocation POI = ReadSourceLocation(); ASTContext &C = Reader.getContext(); TemplateArgumentList *TemplArgList = TemplateArgumentList::CreateCopy(C, TemplArgs); TemplateArgumentListInfo TemplArgsInfo(LAngleLoc, RAngleLoc); for (unsigned i=0, e = TemplArgLocs.size(); i != e; ++i) TemplArgsInfo.addArgument(TemplArgLocs[i]); FunctionTemplateSpecializationInfo *FTInfo = FunctionTemplateSpecializationInfo::Create(C, FD, Template, TSK, TemplArgList, HasTemplateArgumentsAsWritten ? &TemplArgsInfo : nullptr, POI); FD->TemplateOrSpecialization = FTInfo; if (FD->isCanonicalDecl()) { // if canonical add to template's set. // The template that contains the specializations set. It's not safe to // use getCanonicalDecl on Template since it may still be initializing. FunctionTemplateDecl *CanonTemplate = ReadDeclAs(); // Get the InsertPos by FindNodeOrInsertPos() instead of calling // InsertNode(FTInfo) directly to avoid the getASTContext() call in // FunctionTemplateSpecializationInfo's Profile(). // We avoid getASTContext because a decl in the parent hierarchy may // be initializing. llvm::FoldingSetNodeID ID; FunctionTemplateSpecializationInfo::Profile(ID, TemplArgs, C); void *InsertPos = nullptr; FunctionTemplateDecl::Common *CommonPtr = CanonTemplate->getCommonPtr(); FunctionTemplateSpecializationInfo *ExistingInfo = CommonPtr->Specializations.FindNodeOrInsertPos(ID, InsertPos); if (InsertPos) CommonPtr->Specializations.InsertNode(FTInfo, InsertPos); else { assert(Reader.getContext().getLangOpts().Modules && "already deserialized this template specialization"); mergeRedeclarable(FD, ExistingInfo->Function, Redecl); } } break; } case FunctionDecl::TK_DependentFunctionTemplateSpecialization: { // Templates. UnresolvedSet<8> TemplDecls; unsigned NumTemplates = Record.readInt(); while (NumTemplates--) TemplDecls.addDecl(ReadDeclAs()); // Templates args. TemplateArgumentListInfo TemplArgs; unsigned NumArgs = Record.readInt(); while (NumArgs--) TemplArgs.addArgument(Record.readTemplateArgumentLoc()); TemplArgs.setLAngleLoc(ReadSourceLocation()); TemplArgs.setRAngleLoc(ReadSourceLocation()); FD->setDependentTemplateSpecialization(Reader.getContext(), TemplDecls, TemplArgs); // These are not merged; we don't need to merge redeclarations of dependent // template friends. break; } } // Read in the parameters. unsigned NumParams = Record.readInt(); SmallVector Params; Params.reserve(NumParams); for (unsigned I = 0; I != NumParams; ++I) Params.push_back(ReadDeclAs()); FD->setParams(Reader.getContext(), Params); } void ASTDeclReader::VisitObjCMethodDecl(ObjCMethodDecl *MD) { VisitNamedDecl(MD); if (Record.readInt()) { // Load the body on-demand. Most clients won't care, because method // definitions rarely show up in headers. Reader.PendingBodies[MD] = GetCurrentCursorOffset(); HasPendingBody = true; MD->setSelfDecl(ReadDeclAs()); MD->setCmdDecl(ReadDeclAs()); } MD->setInstanceMethod(Record.readInt()); MD->setVariadic(Record.readInt()); MD->setPropertyAccessor(Record.readInt()); MD->setDefined(Record.readInt()); MD->IsOverriding = Record.readInt(); MD->HasSkippedBody = Record.readInt(); MD->IsRedeclaration = Record.readInt(); MD->HasRedeclaration = Record.readInt(); if (MD->HasRedeclaration) Reader.getContext().setObjCMethodRedeclaration(MD, ReadDeclAs()); MD->setDeclImplementation((ObjCMethodDecl::ImplementationControl)Record.readInt()); MD->setObjCDeclQualifier((Decl::ObjCDeclQualifier)Record.readInt()); MD->SetRelatedResultType(Record.readInt()); MD->setReturnType(Record.readType()); MD->setReturnTypeSourceInfo(GetTypeSourceInfo()); MD->DeclEndLoc = ReadSourceLocation(); unsigned NumParams = Record.readInt(); SmallVector Params; Params.reserve(NumParams); for (unsigned I = 0; I != NumParams; ++I) Params.push_back(ReadDeclAs()); MD->SelLocsKind = Record.readInt(); unsigned NumStoredSelLocs = Record.readInt(); SmallVector SelLocs; SelLocs.reserve(NumStoredSelLocs); for (unsigned i = 0; i != NumStoredSelLocs; ++i) SelLocs.push_back(ReadSourceLocation()); MD->setParamsAndSelLocs(Reader.getContext(), Params, SelLocs); } void ASTDeclReader::VisitObjCTypeParamDecl(ObjCTypeParamDecl *D) { VisitTypedefNameDecl(D); D->Variance = Record.readInt(); D->Index = Record.readInt(); D->VarianceLoc = ReadSourceLocation(); D->ColonLoc = ReadSourceLocation(); } void ASTDeclReader::VisitObjCContainerDecl(ObjCContainerDecl *CD) { VisitNamedDecl(CD); CD->setAtStartLoc(ReadSourceLocation()); CD->setAtEndRange(ReadSourceRange()); } ObjCTypeParamList *ASTDeclReader::ReadObjCTypeParamList() { unsigned numParams = Record.readInt(); if (numParams == 0) return nullptr; SmallVector typeParams; typeParams.reserve(numParams); for (unsigned i = 0; i != numParams; ++i) { auto typeParam = ReadDeclAs(); if (!typeParam) return nullptr; typeParams.push_back(typeParam); } SourceLocation lAngleLoc = ReadSourceLocation(); SourceLocation rAngleLoc = ReadSourceLocation(); return ObjCTypeParamList::create(Reader.getContext(), lAngleLoc, typeParams, rAngleLoc); } void ASTDeclReader::ReadObjCDefinitionData( struct ObjCInterfaceDecl::DefinitionData &Data) { // Read the superclass. Data.SuperClassTInfo = GetTypeSourceInfo(); Data.EndLoc = ReadSourceLocation(); Data.HasDesignatedInitializers = Record.readInt(); // Read the directly referenced protocols and their SourceLocations. unsigned NumProtocols = Record.readInt(); SmallVector Protocols; Protocols.reserve(NumProtocols); for (unsigned I = 0; I != NumProtocols; ++I) Protocols.push_back(ReadDeclAs()); SmallVector ProtoLocs; ProtoLocs.reserve(NumProtocols); for (unsigned I = 0; I != NumProtocols; ++I) ProtoLocs.push_back(ReadSourceLocation()); Data.ReferencedProtocols.set(Protocols.data(), NumProtocols, ProtoLocs.data(), Reader.getContext()); // Read the transitive closure of protocols referenced by this class. NumProtocols = Record.readInt(); Protocols.clear(); Protocols.reserve(NumProtocols); for (unsigned I = 0; I != NumProtocols; ++I) Protocols.push_back(ReadDeclAs()); Data.AllReferencedProtocols.set(Protocols.data(), NumProtocols, Reader.getContext()); } void ASTDeclReader::MergeDefinitionData(ObjCInterfaceDecl *D, struct ObjCInterfaceDecl::DefinitionData &&NewDD) { // FIXME: odr checking? } void ASTDeclReader::VisitObjCInterfaceDecl(ObjCInterfaceDecl *ID) { RedeclarableResult Redecl = VisitRedeclarable(ID); VisitObjCContainerDecl(ID); TypeIDForTypeDecl = Record.getGlobalTypeID(Record.readInt()); mergeRedeclarable(ID, Redecl); ID->TypeParamList = ReadObjCTypeParamList(); if (Record.readInt()) { // Read the definition. ID->allocateDefinitionData(); ReadObjCDefinitionData(ID->data()); ObjCInterfaceDecl *Canon = ID->getCanonicalDecl(); if (Canon->Data.getPointer()) { // If we already have a definition, keep the definition invariant and // merge the data. MergeDefinitionData(Canon, std::move(ID->data())); ID->Data = Canon->Data; } else { // Set the definition data of the canonical declaration, so other // redeclarations will see it. ID->getCanonicalDecl()->Data = ID->Data; // We will rebuild this list lazily. ID->setIvarList(nullptr); } // Note that we have deserialized a definition. Reader.PendingDefinitions.insert(ID); // Note that we've loaded this Objective-C class. Reader.ObjCClassesLoaded.push_back(ID); } else { ID->Data = ID->getCanonicalDecl()->Data; } } void ASTDeclReader::VisitObjCIvarDecl(ObjCIvarDecl *IVD) { VisitFieldDecl(IVD); IVD->setAccessControl((ObjCIvarDecl::AccessControl)Record.readInt()); // This field will be built lazily. IVD->setNextIvar(nullptr); bool synth = Record.readInt(); IVD->setSynthesize(synth); } void ASTDeclReader::VisitObjCProtocolDecl(ObjCProtocolDecl *PD) { RedeclarableResult Redecl = VisitRedeclarable(PD); VisitObjCContainerDecl(PD); mergeRedeclarable(PD, Redecl); if (Record.readInt()) { // Read the definition. PD->allocateDefinitionData(); // Set the definition data of the canonical declaration, so other // redeclarations will see it. PD->getCanonicalDecl()->Data = PD->Data; unsigned NumProtoRefs = Record.readInt(); SmallVector ProtoRefs; ProtoRefs.reserve(NumProtoRefs); for (unsigned I = 0; I != NumProtoRefs; ++I) ProtoRefs.push_back(ReadDeclAs()); SmallVector ProtoLocs; ProtoLocs.reserve(NumProtoRefs); for (unsigned I = 0; I != NumProtoRefs; ++I) ProtoLocs.push_back(ReadSourceLocation()); PD->setProtocolList(ProtoRefs.data(), NumProtoRefs, ProtoLocs.data(), Reader.getContext()); // Note that we have deserialized a definition. Reader.PendingDefinitions.insert(PD); } else { PD->Data = PD->getCanonicalDecl()->Data; } } void ASTDeclReader::VisitObjCAtDefsFieldDecl(ObjCAtDefsFieldDecl *FD) { VisitFieldDecl(FD); } void ASTDeclReader::VisitObjCCategoryDecl(ObjCCategoryDecl *CD) { VisitObjCContainerDecl(CD); CD->setCategoryNameLoc(ReadSourceLocation()); CD->setIvarLBraceLoc(ReadSourceLocation()); CD->setIvarRBraceLoc(ReadSourceLocation()); // Note that this category has been deserialized. We do this before // deserializing the interface declaration, so that it will consider this /// category. Reader.CategoriesDeserialized.insert(CD); CD->ClassInterface = ReadDeclAs(); CD->TypeParamList = ReadObjCTypeParamList(); unsigned NumProtoRefs = Record.readInt(); SmallVector ProtoRefs; ProtoRefs.reserve(NumProtoRefs); for (unsigned I = 0; I != NumProtoRefs; ++I) ProtoRefs.push_back(ReadDeclAs()); SmallVector ProtoLocs; ProtoLocs.reserve(NumProtoRefs); for (unsigned I = 0; I != NumProtoRefs; ++I) ProtoLocs.push_back(ReadSourceLocation()); CD->setProtocolList(ProtoRefs.data(), NumProtoRefs, ProtoLocs.data(), Reader.getContext()); } void ASTDeclReader::VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *CAD) { VisitNamedDecl(CAD); CAD->setClassInterface(ReadDeclAs()); } void ASTDeclReader::VisitObjCPropertyDecl(ObjCPropertyDecl *D) { VisitNamedDecl(D); D->setAtLoc(ReadSourceLocation()); D->setLParenLoc(ReadSourceLocation()); QualType T = Record.readType(); TypeSourceInfo *TSI = GetTypeSourceInfo(); D->setType(T, TSI); D->setPropertyAttributes( (ObjCPropertyDecl::PropertyAttributeKind)Record.readInt()); D->setPropertyAttributesAsWritten( (ObjCPropertyDecl::PropertyAttributeKind)Record.readInt()); D->setPropertyImplementation( (ObjCPropertyDecl::PropertyControl)Record.readInt()); D->setGetterName(Record.readDeclarationName().getObjCSelector()); D->setSetterName(Record.readDeclarationName().getObjCSelector()); D->setGetterMethodDecl(ReadDeclAs()); D->setSetterMethodDecl(ReadDeclAs()); D->setPropertyIvarDecl(ReadDeclAs()); } void ASTDeclReader::VisitObjCImplDecl(ObjCImplDecl *D) { VisitObjCContainerDecl(D); D->setClassInterface(ReadDeclAs()); } void ASTDeclReader::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D) { VisitObjCImplDecl(D); D->setIdentifier(Record.getIdentifierInfo()); D->CategoryNameLoc = ReadSourceLocation(); } void ASTDeclReader::VisitObjCImplementationDecl(ObjCImplementationDecl *D) { VisitObjCImplDecl(D); D->setSuperClass(ReadDeclAs()); D->SuperLoc = ReadSourceLocation(); D->setIvarLBraceLoc(ReadSourceLocation()); D->setIvarRBraceLoc(ReadSourceLocation()); D->setHasNonZeroConstructors(Record.readInt()); D->setHasDestructors(Record.readInt()); D->NumIvarInitializers = Record.readInt(); if (D->NumIvarInitializers) D->IvarInitializers = ReadGlobalOffset(); } void ASTDeclReader::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) { VisitDecl(D); D->setAtLoc(ReadSourceLocation()); D->setPropertyDecl(ReadDeclAs()); D->PropertyIvarDecl = ReadDeclAs(); D->IvarLoc = ReadSourceLocation(); D->setGetterCXXConstructor(Record.readExpr()); D->setSetterCXXAssignment(Record.readExpr()); } void ASTDeclReader::VisitFieldDecl(FieldDecl *FD) { VisitDeclaratorDecl(FD); FD->Mutable = Record.readInt(); if (int BitWidthOrInitializer = Record.readInt()) { FD->InitStorage.setInt( static_cast(BitWidthOrInitializer - 1)); if (FD->InitStorage.getInt() == FieldDecl::ISK_CapturedVLAType) { // Read captured variable length array. FD->InitStorage.setPointer(Record.readType().getAsOpaquePtr()); } else { FD->InitStorage.setPointer(Record.readExpr()); } } if (!FD->getDeclName()) { if (FieldDecl *Tmpl = ReadDeclAs()) Reader.getContext().setInstantiatedFromUnnamedFieldDecl(FD, Tmpl); } mergeMergeable(FD); } void ASTDeclReader::VisitMSPropertyDecl(MSPropertyDecl *PD) { VisitDeclaratorDecl(PD); PD->GetterId = Record.getIdentifierInfo(); PD->SetterId = Record.getIdentifierInfo(); } void ASTDeclReader::VisitIndirectFieldDecl(IndirectFieldDecl *FD) { VisitValueDecl(FD); FD->ChainingSize = Record.readInt(); assert(FD->ChainingSize >= 2 && "Anonymous chaining must be >= 2"); FD->Chaining = new (Reader.getContext())NamedDecl*[FD->ChainingSize]; for (unsigned I = 0; I != FD->ChainingSize; ++I) FD->Chaining[I] = ReadDeclAs(); mergeMergeable(FD); } ASTDeclReader::RedeclarableResult ASTDeclReader::VisitVarDeclImpl(VarDecl *VD) { RedeclarableResult Redecl = VisitRedeclarable(VD); VisitDeclaratorDecl(VD); VD->VarDeclBits.SClass = (StorageClass)Record.readInt(); VD->VarDeclBits.TSCSpec = Record.readInt(); VD->VarDeclBits.InitStyle = Record.readInt(); if (!isa(VD)) { VD->NonParmVarDeclBits.IsThisDeclarationADemotedDefinition = Record.readInt(); VD->NonParmVarDeclBits.ExceptionVar = Record.readInt(); VD->NonParmVarDeclBits.NRVOVariable = Record.readInt(); VD->NonParmVarDeclBits.CXXForRangeDecl = Record.readInt(); VD->NonParmVarDeclBits.ARCPseudoStrong = Record.readInt(); VD->NonParmVarDeclBits.IsInline = Record.readInt(); VD->NonParmVarDeclBits.IsInlineSpecified = Record.readInt(); VD->NonParmVarDeclBits.IsConstexpr = Record.readInt(); VD->NonParmVarDeclBits.IsInitCapture = Record.readInt(); VD->NonParmVarDeclBits.PreviousDeclInSameBlockScope = Record.readInt(); } Linkage VarLinkage = Linkage(Record.readInt()); VD->setCachedLinkage(VarLinkage); // Reconstruct the one piece of the IdentifierNamespace that we need. if (VD->getStorageClass() == SC_Extern && VarLinkage != NoLinkage && VD->getLexicalDeclContext()->isFunctionOrMethod()) VD->setLocalExternDecl(); if (uint64_t Val = Record.readInt()) { VD->setInit(Record.readExpr()); if (Val > 1) { // IsInitKnownICE = 1, IsInitNotICE = 2, IsInitICE = 3 EvaluatedStmt *Eval = VD->ensureEvaluatedStmt(); Eval->CheckedICE = true; Eval->IsICE = Val == 3; } } enum VarKind { VarNotTemplate = 0, VarTemplate, StaticDataMemberSpecialization }; switch ((VarKind)Record.readInt()) { case VarNotTemplate: // Only true variables (not parameters or implicit parameters) can be // merged; the other kinds are not really redeclarable at all. if (!isa(VD) && !isa(VD) && !isa(VD)) mergeRedeclarable(VD, Redecl); break; case VarTemplate: // Merged when we merge the template. VD->setDescribedVarTemplate(ReadDeclAs()); break; case StaticDataMemberSpecialization: { // HasMemberSpecializationInfo. VarDecl *Tmpl = ReadDeclAs(); TemplateSpecializationKind TSK = (TemplateSpecializationKind)Record.readInt(); SourceLocation POI = ReadSourceLocation(); Reader.getContext().setInstantiatedFromStaticDataMember(VD, Tmpl, TSK,POI); mergeRedeclarable(VD, Redecl); break; } } return Redecl; } void ASTDeclReader::VisitImplicitParamDecl(ImplicitParamDecl *PD) { VisitVarDecl(PD); } void ASTDeclReader::VisitParmVarDecl(ParmVarDecl *PD) { VisitVarDecl(PD); unsigned isObjCMethodParam = Record.readInt(); unsigned scopeDepth = Record.readInt(); unsigned scopeIndex = Record.readInt(); unsigned declQualifier = Record.readInt(); if (isObjCMethodParam) { assert(scopeDepth == 0); PD->setObjCMethodScopeInfo(scopeIndex); PD->ParmVarDeclBits.ScopeDepthOrObjCQuals = declQualifier; } else { PD->setScopeInfo(scopeDepth, scopeIndex); } PD->ParmVarDeclBits.IsKNRPromoted = Record.readInt(); PD->ParmVarDeclBits.HasInheritedDefaultArg = Record.readInt(); if (Record.readInt()) // hasUninstantiatedDefaultArg. PD->setUninstantiatedDefaultArg(Record.readExpr()); // FIXME: If this is a redeclaration of a function from another module, handle // inheritance of default arguments. } void ASTDeclReader::VisitDecompositionDecl(DecompositionDecl *DD) { VisitVarDecl(DD); BindingDecl **BDs = DD->getTrailingObjects(); for (unsigned I = 0; I != DD->NumBindings; ++I) BDs[I] = ReadDeclAs(); } void ASTDeclReader::VisitBindingDecl(BindingDecl *BD) { VisitValueDecl(BD); BD->Binding = Record.readExpr(); } void ASTDeclReader::VisitFileScopeAsmDecl(FileScopeAsmDecl *AD) { VisitDecl(AD); AD->setAsmString(cast(Record.readExpr())); AD->setRParenLoc(ReadSourceLocation()); } void ASTDeclReader::VisitBlockDecl(BlockDecl *BD) { VisitDecl(BD); BD->setBody(cast_or_null(Record.readStmt())); BD->setSignatureAsWritten(GetTypeSourceInfo()); unsigned NumParams = Record.readInt(); SmallVector Params; Params.reserve(NumParams); for (unsigned I = 0; I != NumParams; ++I) Params.push_back(ReadDeclAs()); BD->setParams(Params); BD->setIsVariadic(Record.readInt()); BD->setBlockMissingReturnType(Record.readInt()); BD->setIsConversionFromLambda(Record.readInt()); bool capturesCXXThis = Record.readInt(); unsigned numCaptures = Record.readInt(); SmallVector captures; captures.reserve(numCaptures); for (unsigned i = 0; i != numCaptures; ++i) { VarDecl *decl = ReadDeclAs(); unsigned flags = Record.readInt(); bool byRef = (flags & 1); bool nested = (flags & 2); Expr *copyExpr = ((flags & 4) ? Record.readExpr() : nullptr); captures.push_back(BlockDecl::Capture(decl, byRef, nested, copyExpr)); } BD->setCaptures(Reader.getContext(), captures, capturesCXXThis); } void ASTDeclReader::VisitCapturedDecl(CapturedDecl *CD) { VisitDecl(CD); unsigned ContextParamPos = Record.readInt(); CD->setNothrow(Record.readInt() != 0); // Body is set by VisitCapturedStmt. for (unsigned I = 0; I < CD->NumParams; ++I) { if (I != ContextParamPos) CD->setParam(I, ReadDeclAs()); else CD->setContextParam(I, ReadDeclAs()); } } void ASTDeclReader::VisitLinkageSpecDecl(LinkageSpecDecl *D) { VisitDecl(D); D->setLanguage((LinkageSpecDecl::LanguageIDs)Record.readInt()); D->setExternLoc(ReadSourceLocation()); D->setRBraceLoc(ReadSourceLocation()); } void ASTDeclReader::VisitExportDecl(ExportDecl *D) { VisitDecl(D); D->RBraceLoc = ReadSourceLocation(); } void ASTDeclReader::VisitLabelDecl(LabelDecl *D) { VisitNamedDecl(D); D->setLocStart(ReadSourceLocation()); } void ASTDeclReader::VisitNamespaceDecl(NamespaceDecl *D) { RedeclarableResult Redecl = VisitRedeclarable(D); VisitNamedDecl(D); D->setInline(Record.readInt()); D->LocStart = ReadSourceLocation(); D->RBraceLoc = ReadSourceLocation(); // Defer loading the anonymous namespace until we've finished merging // this namespace; loading it might load a later declaration of the // same namespace, and we have an invariant that older declarations // get merged before newer ones try to merge. GlobalDeclID AnonNamespace = 0; if (Redecl.getFirstID() == ThisDeclID) { AnonNamespace = ReadDeclID(); } else { // Link this namespace back to the first declaration, which has already // been deserialized. D->AnonOrFirstNamespaceAndInline.setPointer(D->getFirstDecl()); } mergeRedeclarable(D, Redecl); if (AnonNamespace) { // Each module has its own anonymous namespace, which is disjoint from // any other module's anonymous namespaces, so don't attach the anonymous // namespace at all. NamespaceDecl *Anon = cast(Reader.GetDecl(AnonNamespace)); if (!Record.isModule()) D->setAnonymousNamespace(Anon); } } void ASTDeclReader::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) { RedeclarableResult Redecl = VisitRedeclarable(D); VisitNamedDecl(D); D->NamespaceLoc = ReadSourceLocation(); D->IdentLoc = ReadSourceLocation(); D->QualifierLoc = Record.readNestedNameSpecifierLoc(); D->Namespace = ReadDeclAs(); mergeRedeclarable(D, Redecl); } void ASTDeclReader::VisitUsingDecl(UsingDecl *D) { VisitNamedDecl(D); D->setUsingLoc(ReadSourceLocation()); D->QualifierLoc = Record.readNestedNameSpecifierLoc(); ReadDeclarationNameLoc(D->DNLoc, D->getDeclName()); D->FirstUsingShadow.setPointer(ReadDeclAs()); D->setTypename(Record.readInt()); if (NamedDecl *Pattern = ReadDeclAs()) Reader.getContext().setInstantiatedFromUsingDecl(D, Pattern); mergeMergeable(D); } void ASTDeclReader::VisitUsingPackDecl(UsingPackDecl *D) { VisitNamedDecl(D); D->InstantiatedFrom = ReadDeclAs(); NamedDecl **Expansions = D->getTrailingObjects(); for (unsigned I = 0; I != D->NumExpansions; ++I) Expansions[I] = ReadDeclAs(); mergeMergeable(D); } void ASTDeclReader::VisitUsingShadowDecl(UsingShadowDecl *D) { RedeclarableResult Redecl = VisitRedeclarable(D); VisitNamedDecl(D); D->setTargetDecl(ReadDeclAs()); D->UsingOrNextShadow = ReadDeclAs(); UsingShadowDecl *Pattern = ReadDeclAs(); if (Pattern) Reader.getContext().setInstantiatedFromUsingShadowDecl(D, Pattern); mergeRedeclarable(D, Redecl); } void ASTDeclReader::VisitConstructorUsingShadowDecl( ConstructorUsingShadowDecl *D) { VisitUsingShadowDecl(D); D->NominatedBaseClassShadowDecl = ReadDeclAs(); D->ConstructedBaseClassShadowDecl = ReadDeclAs(); D->IsVirtual = Record.readInt(); } void ASTDeclReader::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) { VisitNamedDecl(D); D->UsingLoc = ReadSourceLocation(); D->NamespaceLoc = ReadSourceLocation(); D->QualifierLoc = Record.readNestedNameSpecifierLoc(); D->NominatedNamespace = ReadDeclAs(); D->CommonAncestor = ReadDeclAs(); } void ASTDeclReader::VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D) { VisitValueDecl(D); D->setUsingLoc(ReadSourceLocation()); D->QualifierLoc = Record.readNestedNameSpecifierLoc(); ReadDeclarationNameLoc(D->DNLoc, D->getDeclName()); D->EllipsisLoc = ReadSourceLocation(); mergeMergeable(D); } void ASTDeclReader::VisitUnresolvedUsingTypenameDecl( UnresolvedUsingTypenameDecl *D) { VisitTypeDecl(D); D->TypenameLocation = ReadSourceLocation(); D->QualifierLoc = Record.readNestedNameSpecifierLoc(); D->EllipsisLoc = ReadSourceLocation(); mergeMergeable(D); } void ASTDeclReader::ReadCXXDefinitionData( struct CXXRecordDecl::DefinitionData &Data) { // Note: the caller has deserialized the IsLambda bit already. Data.UserDeclaredConstructor = Record.readInt(); Data.UserDeclaredSpecialMembers = Record.readInt(); Data.Aggregate = Record.readInt(); Data.PlainOldData = Record.readInt(); Data.Empty = Record.readInt(); Data.Polymorphic = Record.readInt(); Data.Abstract = Record.readInt(); Data.IsStandardLayout = Record.readInt(); Data.HasNoNonEmptyBases = Record.readInt(); Data.HasPrivateFields = Record.readInt(); Data.HasProtectedFields = Record.readInt(); Data.HasPublicFields = Record.readInt(); Data.HasMutableFields = Record.readInt(); Data.HasVariantMembers = Record.readInt(); Data.HasOnlyCMembers = Record.readInt(); Data.HasInClassInitializer = Record.readInt(); Data.HasUninitializedReferenceMember = Record.readInt(); Data.HasUninitializedFields = Record.readInt(); Data.HasInheritedConstructor = Record.readInt(); Data.HasInheritedAssignment = Record.readInt(); Data.NeedOverloadResolutionForMoveConstructor = Record.readInt(); Data.NeedOverloadResolutionForMoveAssignment = Record.readInt(); Data.NeedOverloadResolutionForDestructor = Record.readInt(); Data.DefaultedMoveConstructorIsDeleted = Record.readInt(); Data.DefaultedMoveAssignmentIsDeleted = Record.readInt(); Data.DefaultedDestructorIsDeleted = Record.readInt(); Data.HasTrivialSpecialMembers = Record.readInt(); Data.DeclaredNonTrivialSpecialMembers = Record.readInt(); Data.HasIrrelevantDestructor = Record.readInt(); Data.HasConstexprNonCopyMoveConstructor = Record.readInt(); Data.HasDefaultedDefaultConstructor = Record.readInt(); Data.DefaultedDefaultConstructorIsConstexpr = Record.readInt(); Data.HasConstexprDefaultConstructor = Record.readInt(); Data.HasNonLiteralTypeFieldsOrBases = Record.readInt(); Data.ComputedVisibleConversions = Record.readInt(); Data.UserProvidedDefaultConstructor = Record.readInt(); Data.DeclaredSpecialMembers = Record.readInt(); Data.ImplicitCopyConstructorHasConstParam = Record.readInt(); Data.ImplicitCopyAssignmentHasConstParam = Record.readInt(); Data.HasDeclaredCopyConstructorWithConstParam = Record.readInt(); Data.HasDeclaredCopyAssignmentWithConstParam = Record.readInt(); Data.NumBases = Record.readInt(); if (Data.NumBases) Data.Bases = ReadGlobalOffset(); Data.NumVBases = Record.readInt(); if (Data.NumVBases) Data.VBases = ReadGlobalOffset(); Record.readUnresolvedSet(Data.Conversions); Record.readUnresolvedSet(Data.VisibleConversions); assert(Data.Definition && "Data.Definition should be already set!"); Data.FirstFriend = ReadDeclID(); if (Data.IsLambda) { typedef LambdaCapture Capture; CXXRecordDecl::LambdaDefinitionData &Lambda = static_cast(Data); Lambda.Dependent = Record.readInt(); Lambda.IsGenericLambda = Record.readInt(); Lambda.CaptureDefault = Record.readInt(); Lambda.NumCaptures = Record.readInt(); Lambda.NumExplicitCaptures = Record.readInt(); Lambda.ManglingNumber = Record.readInt(); Lambda.ContextDecl = ReadDeclID(); Lambda.Captures = (Capture*)Reader.Context.Allocate(sizeof(Capture)*Lambda.NumCaptures); Capture *ToCapture = Lambda.Captures; Lambda.MethodTyInfo = GetTypeSourceInfo(); for (unsigned I = 0, N = Lambda.NumCaptures; I != N; ++I) { SourceLocation Loc = ReadSourceLocation(); bool IsImplicit = Record.readInt(); LambdaCaptureKind Kind = static_cast(Record.readInt()); switch (Kind) { case LCK_StarThis: case LCK_This: case LCK_VLAType: *ToCapture++ = Capture(Loc, IsImplicit, Kind, nullptr,SourceLocation()); break; case LCK_ByCopy: case LCK_ByRef: VarDecl *Var = ReadDeclAs(); SourceLocation EllipsisLoc = ReadSourceLocation(); *ToCapture++ = Capture(Loc, IsImplicit, Kind, Var, EllipsisLoc); break; } } } } void ASTDeclReader::MergeDefinitionData( CXXRecordDecl *D, struct CXXRecordDecl::DefinitionData &&MergeDD) { assert(D->DefinitionData && "merging class definition into non-definition"); auto &DD = *D->DefinitionData; if (DD.Definition != MergeDD.Definition) { // Track that we merged the definitions. Reader.MergedDeclContexts.insert(std::make_pair(MergeDD.Definition, DD.Definition)); Reader.PendingDefinitions.erase(MergeDD.Definition); MergeDD.Definition->IsCompleteDefinition = false; Reader.mergeDefinitionVisibility(DD.Definition, MergeDD.Definition); assert(Reader.Lookups.find(MergeDD.Definition) == Reader.Lookups.end() && "already loaded pending lookups for merged definition"); } auto PFDI = Reader.PendingFakeDefinitionData.find(&DD); if (PFDI != Reader.PendingFakeDefinitionData.end() && PFDI->second == ASTReader::PendingFakeDefinitionKind::Fake) { // We faked up this definition data because we found a class for which we'd // not yet loaded the definition. Replace it with the real thing now. assert(!DD.IsLambda && !MergeDD.IsLambda && "faked up lambda definition?"); PFDI->second = ASTReader::PendingFakeDefinitionKind::FakeLoaded; // Don't change which declaration is the definition; that is required // to be invariant once we select it. auto *Def = DD.Definition; DD = std::move(MergeDD); DD.Definition = Def; return; } // FIXME: Move this out into a .def file? bool DetectedOdrViolation = false; #define OR_FIELD(Field) DD.Field |= MergeDD.Field; #define MATCH_FIELD(Field) \ DetectedOdrViolation |= DD.Field != MergeDD.Field; \ OR_FIELD(Field) MATCH_FIELD(UserDeclaredConstructor) MATCH_FIELD(UserDeclaredSpecialMembers) MATCH_FIELD(Aggregate) MATCH_FIELD(PlainOldData) MATCH_FIELD(Empty) MATCH_FIELD(Polymorphic) MATCH_FIELD(Abstract) MATCH_FIELD(IsStandardLayout) MATCH_FIELD(HasNoNonEmptyBases) MATCH_FIELD(HasPrivateFields) MATCH_FIELD(HasProtectedFields) MATCH_FIELD(HasPublicFields) MATCH_FIELD(HasMutableFields) MATCH_FIELD(HasVariantMembers) MATCH_FIELD(HasOnlyCMembers) MATCH_FIELD(HasInClassInitializer) MATCH_FIELD(HasUninitializedReferenceMember) MATCH_FIELD(HasUninitializedFields) MATCH_FIELD(HasInheritedConstructor) MATCH_FIELD(HasInheritedAssignment) MATCH_FIELD(NeedOverloadResolutionForMoveConstructor) MATCH_FIELD(NeedOverloadResolutionForMoveAssignment) MATCH_FIELD(NeedOverloadResolutionForDestructor) MATCH_FIELD(DefaultedMoveConstructorIsDeleted) MATCH_FIELD(DefaultedMoveAssignmentIsDeleted) MATCH_FIELD(DefaultedDestructorIsDeleted) OR_FIELD(HasTrivialSpecialMembers) OR_FIELD(DeclaredNonTrivialSpecialMembers) MATCH_FIELD(HasIrrelevantDestructor) OR_FIELD(HasConstexprNonCopyMoveConstructor) OR_FIELD(HasDefaultedDefaultConstructor) MATCH_FIELD(DefaultedDefaultConstructorIsConstexpr) OR_FIELD(HasConstexprDefaultConstructor) MATCH_FIELD(HasNonLiteralTypeFieldsOrBases) // ComputedVisibleConversions is handled below. MATCH_FIELD(UserProvidedDefaultConstructor) OR_FIELD(DeclaredSpecialMembers) MATCH_FIELD(ImplicitCopyConstructorHasConstParam) MATCH_FIELD(ImplicitCopyAssignmentHasConstParam) OR_FIELD(HasDeclaredCopyConstructorWithConstParam) OR_FIELD(HasDeclaredCopyAssignmentWithConstParam) MATCH_FIELD(IsLambda) #undef OR_FIELD #undef MATCH_FIELD if (DD.NumBases != MergeDD.NumBases || DD.NumVBases != MergeDD.NumVBases) DetectedOdrViolation = true; // FIXME: Issue a diagnostic if the base classes don't match when we come // to lazily load them. // FIXME: Issue a diagnostic if the list of conversion functions doesn't // match when we come to lazily load them. if (MergeDD.ComputedVisibleConversions && !DD.ComputedVisibleConversions) { DD.VisibleConversions = std::move(MergeDD.VisibleConversions); DD.ComputedVisibleConversions = true; } // FIXME: Issue a diagnostic if FirstFriend doesn't match when we come to // lazily load it. if (DD.IsLambda) { // FIXME: ODR-checking for merging lambdas (this happens, for instance, // when they occur within the body of a function template specialization). } if (DetectedOdrViolation) Reader.PendingOdrMergeFailures[DD.Definition].push_back(MergeDD.Definition); } void ASTDeclReader::ReadCXXRecordDefinition(CXXRecordDecl *D, bool Update) { struct CXXRecordDecl::DefinitionData *DD; ASTContext &C = Reader.getContext(); // Determine whether this is a lambda closure type, so that we can // allocate the appropriate DefinitionData structure. bool IsLambda = Record.readInt(); if (IsLambda) DD = new (C) CXXRecordDecl::LambdaDefinitionData(D, nullptr, false, false, LCD_None); else DD = new (C) struct CXXRecordDecl::DefinitionData(D); ReadCXXDefinitionData(*DD); // We might already have a definition for this record. This can happen either // because we're reading an update record, or because we've already done some // merging. Either way, just merge into it. CXXRecordDecl *Canon = D->getCanonicalDecl(); if (Canon->DefinitionData) { MergeDefinitionData(Canon, std::move(*DD)); D->DefinitionData = Canon->DefinitionData; return; } // Mark this declaration as being a definition. D->IsCompleteDefinition = true; D->DefinitionData = DD; // If this is not the first declaration or is an update record, we can have // other redeclarations already. Make a note that we need to propagate the // DefinitionData pointer onto them. if (Update || Canon != D) { Canon->DefinitionData = D->DefinitionData; Reader.PendingDefinitions.insert(D); } } ASTDeclReader::RedeclarableResult ASTDeclReader::VisitCXXRecordDeclImpl(CXXRecordDecl *D) { RedeclarableResult Redecl = VisitRecordDeclImpl(D); ASTContext &C = Reader.getContext(); enum CXXRecKind { CXXRecNotTemplate = 0, CXXRecTemplate, CXXRecMemberSpecialization }; switch ((CXXRecKind)Record.readInt()) { case CXXRecNotTemplate: // Merged when we merge the folding set entry in the primary template. if (!isa(D)) mergeRedeclarable(D, Redecl); break; case CXXRecTemplate: { // Merged when we merge the template. ClassTemplateDecl *Template = ReadDeclAs(); D->TemplateOrInstantiation = Template; if (!Template->getTemplatedDecl()) { // We've not actually loaded the ClassTemplateDecl yet, because we're // currently being loaded as its pattern. Rely on it to set up our // TypeForDecl (see VisitClassTemplateDecl). // // Beware: we do not yet know our canonical declaration, and may still // get merged once the surrounding class template has got off the ground. TypeIDForTypeDecl = 0; } break; } case CXXRecMemberSpecialization: { CXXRecordDecl *RD = ReadDeclAs(); TemplateSpecializationKind TSK = (TemplateSpecializationKind)Record.readInt(); SourceLocation POI = ReadSourceLocation(); MemberSpecializationInfo *MSI = new (C) MemberSpecializationInfo(RD, TSK); MSI->setPointOfInstantiation(POI); D->TemplateOrInstantiation = MSI; mergeRedeclarable(D, Redecl); break; } } bool WasDefinition = Record.readInt(); if (WasDefinition) ReadCXXRecordDefinition(D, /*Update*/false); else // Propagate DefinitionData pointer from the canonical declaration. D->DefinitionData = D->getCanonicalDecl()->DefinitionData; // Lazily load the key function to avoid deserializing every method so we can // compute it. if (WasDefinition) { DeclID KeyFn = ReadDeclID(); if (KeyFn && D->IsCompleteDefinition) // FIXME: This is wrong for the ARM ABI, where some other module may have // made this function no longer be a key function. We need an update // record or similar for that case. C.KeyFunctions[D] = KeyFn; } return Redecl; } void ASTDeclReader::VisitCXXMethodDecl(CXXMethodDecl *D) { VisitFunctionDecl(D); unsigned NumOverridenMethods = Record.readInt(); if (D->isCanonicalDecl()) { while (NumOverridenMethods--) { // Avoid invariant checking of CXXMethodDecl::addOverriddenMethod, // MD may be initializing. if (CXXMethodDecl *MD = ReadDeclAs()) Reader.getContext().addOverriddenMethod(D, MD->getCanonicalDecl()); } } else { // We don't care about which declarations this used to override; we get // the relevant information from the canonical declaration. Record.skipInts(NumOverridenMethods); } } void ASTDeclReader::VisitCXXConstructorDecl(CXXConstructorDecl *D) { // We need the inherited constructor information to merge the declaration, // so we have to read it before we call VisitCXXMethodDecl. if (D->isInheritingConstructor()) { auto *Shadow = ReadDeclAs(); auto *Ctor = ReadDeclAs(); *D->getTrailingObjects() = InheritedConstructor(Shadow, Ctor); } VisitCXXMethodDecl(D); D->IsExplicitSpecified = Record.readInt(); } void ASTDeclReader::VisitCXXDestructorDecl(CXXDestructorDecl *D) { VisitCXXMethodDecl(D); if (auto *OperatorDelete = ReadDeclAs()) { auto *Canon = cast(D->getCanonicalDecl()); // FIXME: Check consistency if we have an old and new operator delete. if (!Canon->OperatorDelete) Canon->OperatorDelete = OperatorDelete; } } void ASTDeclReader::VisitCXXConversionDecl(CXXConversionDecl *D) { VisitCXXMethodDecl(D); D->IsExplicitSpecified = Record.readInt(); } void ASTDeclReader::VisitImportDecl(ImportDecl *D) { VisitDecl(D); D->ImportedAndComplete.setPointer(readModule()); D->ImportedAndComplete.setInt(Record.readInt()); SourceLocation *StoredLocs = D->getTrailingObjects(); for (unsigned I = 0, N = Record.back(); I != N; ++I) StoredLocs[I] = ReadSourceLocation(); (void)Record.readInt(); // The number of stored source locations. } void ASTDeclReader::VisitAccessSpecDecl(AccessSpecDecl *D) { VisitDecl(D); D->setColonLoc(ReadSourceLocation()); } void ASTDeclReader::VisitFriendDecl(FriendDecl *D) { VisitDecl(D); if (Record.readInt()) // hasFriendDecl D->Friend = ReadDeclAs(); else D->Friend = GetTypeSourceInfo(); for (unsigned i = 0; i != D->NumTPLists; ++i) D->getTrailingObjects()[i] = Record.readTemplateParameterList(); D->NextFriend = ReadDeclID(); D->UnsupportedFriend = (Record.readInt() != 0); D->FriendLoc = ReadSourceLocation(); } void ASTDeclReader::VisitFriendTemplateDecl(FriendTemplateDecl *D) { VisitDecl(D); unsigned NumParams = Record.readInt(); D->NumParams = NumParams; D->Params = new TemplateParameterList*[NumParams]; for (unsigned i = 0; i != NumParams; ++i) D->Params[i] = Record.readTemplateParameterList(); if (Record.readInt()) // HasFriendDecl D->Friend = ReadDeclAs(); else D->Friend = GetTypeSourceInfo(); D->FriendLoc = ReadSourceLocation(); } DeclID ASTDeclReader::VisitTemplateDecl(TemplateDecl *D) { VisitNamedDecl(D); DeclID PatternID = ReadDeclID(); NamedDecl *TemplatedDecl = cast_or_null(Reader.GetDecl(PatternID)); TemplateParameterList *TemplateParams = Record.readTemplateParameterList(); D->init(TemplatedDecl, TemplateParams); return PatternID; } ASTDeclReader::RedeclarableResult ASTDeclReader::VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D) { RedeclarableResult Redecl = VisitRedeclarable(D); // Make sure we've allocated the Common pointer first. We do this before // VisitTemplateDecl so that getCommonPtr() can be used during initialization. RedeclarableTemplateDecl *CanonD = D->getCanonicalDecl(); if (!CanonD->Common) { CanonD->Common = CanonD->newCommon(Reader.getContext()); Reader.PendingDefinitions.insert(CanonD); } D->Common = CanonD->Common; // If this is the first declaration of the template, fill in the information // for the 'common' pointer. if (ThisDeclID == Redecl.getFirstID()) { if (RedeclarableTemplateDecl *RTD = ReadDeclAs()) { assert(RTD->getKind() == D->getKind() && "InstantiatedFromMemberTemplate kind mismatch"); D->setInstantiatedFromMemberTemplate(RTD); if (Record.readInt()) D->setMemberSpecialization(); } } DeclID PatternID = VisitTemplateDecl(D); D->IdentifierNamespace = Record.readInt(); mergeRedeclarable(D, Redecl, PatternID); // If we merged the template with a prior declaration chain, merge the common // pointer. // FIXME: Actually merge here, don't just overwrite. D->Common = D->getCanonicalDecl()->Common; return Redecl; } static DeclID *newDeclIDList(ASTContext &Context, DeclID *Old, SmallVectorImpl &IDs) { assert(!IDs.empty() && "no IDs to add to list"); if (Old) { IDs.insert(IDs.end(), Old + 1, Old + 1 + Old[0]); std::sort(IDs.begin(), IDs.end()); IDs.erase(std::unique(IDs.begin(), IDs.end()), IDs.end()); } auto *Result = new (Context) DeclID[1 + IDs.size()]; *Result = IDs.size(); std::copy(IDs.begin(), IDs.end(), Result + 1); return Result; } void ASTDeclReader::VisitClassTemplateDecl(ClassTemplateDecl *D) { RedeclarableResult Redecl = VisitRedeclarableTemplateDecl(D); if (ThisDeclID == Redecl.getFirstID()) { // This ClassTemplateDecl owns a CommonPtr; read it to keep track of all of // the specializations. SmallVector SpecIDs; ReadDeclIDList(SpecIDs); if (!SpecIDs.empty()) { auto *CommonPtr = D->getCommonPtr(); CommonPtr->LazySpecializations = newDeclIDList( Reader.getContext(), CommonPtr->LazySpecializations, SpecIDs); } } if (D->getTemplatedDecl()->TemplateOrInstantiation) { // We were loaded before our templated declaration was. We've not set up // its corresponding type yet (see VisitCXXRecordDeclImpl), so reconstruct // it now. Reader.Context.getInjectedClassNameType( D->getTemplatedDecl(), D->getInjectedClassNameSpecialization()); } } void ASTDeclReader::VisitBuiltinTemplateDecl(BuiltinTemplateDecl *D) { llvm_unreachable("BuiltinTemplates are not serialized"); } /// TODO: Unify with ClassTemplateDecl version? /// May require unifying ClassTemplateDecl and /// VarTemplateDecl beyond TemplateDecl... void ASTDeclReader::VisitVarTemplateDecl(VarTemplateDecl *D) { RedeclarableResult Redecl = VisitRedeclarableTemplateDecl(D); if (ThisDeclID == Redecl.getFirstID()) { // This VarTemplateDecl owns a CommonPtr; read it to keep track of all of // the specializations. SmallVector SpecIDs; ReadDeclIDList(SpecIDs); if (!SpecIDs.empty()) { auto *CommonPtr = D->getCommonPtr(); CommonPtr->LazySpecializations = newDeclIDList( Reader.getContext(), CommonPtr->LazySpecializations, SpecIDs); } } } ASTDeclReader::RedeclarableResult ASTDeclReader::VisitClassTemplateSpecializationDeclImpl( ClassTemplateSpecializationDecl *D) { RedeclarableResult Redecl = VisitCXXRecordDeclImpl(D); ASTContext &C = Reader.getContext(); if (Decl *InstD = ReadDecl()) { if (ClassTemplateDecl *CTD = dyn_cast(InstD)) { D->SpecializedTemplate = CTD; } else { SmallVector TemplArgs; Record.readTemplateArgumentList(TemplArgs); TemplateArgumentList *ArgList = TemplateArgumentList::CreateCopy(C, TemplArgs); ClassTemplateSpecializationDecl::SpecializedPartialSpecialization *PS = new (C) ClassTemplateSpecializationDecl:: SpecializedPartialSpecialization(); PS->PartialSpecialization = cast(InstD); PS->TemplateArgs = ArgList; D->SpecializedTemplate = PS; } } SmallVector TemplArgs; Record.readTemplateArgumentList(TemplArgs, /*Canonicalize*/ true); D->TemplateArgs = TemplateArgumentList::CreateCopy(C, TemplArgs); D->PointOfInstantiation = ReadSourceLocation(); D->SpecializationKind = (TemplateSpecializationKind)Record.readInt(); bool writtenAsCanonicalDecl = Record.readInt(); if (writtenAsCanonicalDecl) { ClassTemplateDecl *CanonPattern = ReadDeclAs(); if (D->isCanonicalDecl()) { // It's kept in the folding set. // Set this as, or find, the canonical declaration for this specialization ClassTemplateSpecializationDecl *CanonSpec; if (ClassTemplatePartialSpecializationDecl *Partial = dyn_cast(D)) { CanonSpec = CanonPattern->getCommonPtr()->PartialSpecializations .GetOrInsertNode(Partial); } else { CanonSpec = CanonPattern->getCommonPtr()->Specializations.GetOrInsertNode(D); } // If there was already a canonical specialization, merge into it. if (CanonSpec != D) { mergeRedeclarable(D, CanonSpec, Redecl); // This declaration might be a definition. Merge with any existing // definition. if (auto *DDD = D->DefinitionData) { if (CanonSpec->DefinitionData) MergeDefinitionData(CanonSpec, std::move(*DDD)); else CanonSpec->DefinitionData = D->DefinitionData; } D->DefinitionData = CanonSpec->DefinitionData; } } } // Explicit info. if (TypeSourceInfo *TyInfo = GetTypeSourceInfo()) { ClassTemplateSpecializationDecl::ExplicitSpecializationInfo *ExplicitInfo = new (C) ClassTemplateSpecializationDecl::ExplicitSpecializationInfo; ExplicitInfo->TypeAsWritten = TyInfo; ExplicitInfo->ExternLoc = ReadSourceLocation(); ExplicitInfo->TemplateKeywordLoc = ReadSourceLocation(); D->ExplicitInfo = ExplicitInfo; } return Redecl; } void ASTDeclReader::VisitClassTemplatePartialSpecializationDecl( ClassTemplatePartialSpecializationDecl *D) { RedeclarableResult Redecl = VisitClassTemplateSpecializationDeclImpl(D); D->TemplateParams = Record.readTemplateParameterList(); D->ArgsAsWritten = Record.readASTTemplateArgumentListInfo(); // These are read/set from/to the first declaration. if (ThisDeclID == Redecl.getFirstID()) { D->InstantiatedFromMember.setPointer( ReadDeclAs()); D->InstantiatedFromMember.setInt(Record.readInt()); } } void ASTDeclReader::VisitClassScopeFunctionSpecializationDecl( ClassScopeFunctionSpecializationDecl *D) { VisitDecl(D); D->Specialization = ReadDeclAs(); } void ASTDeclReader::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) { RedeclarableResult Redecl = VisitRedeclarableTemplateDecl(D); if (ThisDeclID == Redecl.getFirstID()) { // This FunctionTemplateDecl owns a CommonPtr; read it. SmallVector SpecIDs; ReadDeclIDList(SpecIDs); if (!SpecIDs.empty()) { auto *CommonPtr = D->getCommonPtr(); CommonPtr->LazySpecializations = newDeclIDList( Reader.getContext(), CommonPtr->LazySpecializations, SpecIDs); } } } /// TODO: Unify with ClassTemplateSpecializationDecl version? /// May require unifying ClassTemplate(Partial)SpecializationDecl and /// VarTemplate(Partial)SpecializationDecl with a new data /// structure Template(Partial)SpecializationDecl, and /// using Template(Partial)SpecializationDecl as input type. ASTDeclReader::RedeclarableResult ASTDeclReader::VisitVarTemplateSpecializationDeclImpl( VarTemplateSpecializationDecl *D) { RedeclarableResult Redecl = VisitVarDeclImpl(D); ASTContext &C = Reader.getContext(); if (Decl *InstD = ReadDecl()) { if (VarTemplateDecl *VTD = dyn_cast(InstD)) { D->SpecializedTemplate = VTD; } else { SmallVector TemplArgs; Record.readTemplateArgumentList(TemplArgs); TemplateArgumentList *ArgList = TemplateArgumentList::CreateCopy( C, TemplArgs); VarTemplateSpecializationDecl::SpecializedPartialSpecialization *PS = new (C) VarTemplateSpecializationDecl::SpecializedPartialSpecialization(); PS->PartialSpecialization = cast(InstD); PS->TemplateArgs = ArgList; D->SpecializedTemplate = PS; } } // Explicit info. if (TypeSourceInfo *TyInfo = GetTypeSourceInfo()) { VarTemplateSpecializationDecl::ExplicitSpecializationInfo *ExplicitInfo = new (C) VarTemplateSpecializationDecl::ExplicitSpecializationInfo; ExplicitInfo->TypeAsWritten = TyInfo; ExplicitInfo->ExternLoc = ReadSourceLocation(); ExplicitInfo->TemplateKeywordLoc = ReadSourceLocation(); D->ExplicitInfo = ExplicitInfo; } SmallVector TemplArgs; Record.readTemplateArgumentList(TemplArgs, /*Canonicalize*/ true); D->TemplateArgs = TemplateArgumentList::CreateCopy(C, TemplArgs); D->PointOfInstantiation = ReadSourceLocation(); D->SpecializationKind = (TemplateSpecializationKind)Record.readInt(); bool writtenAsCanonicalDecl = Record.readInt(); if (writtenAsCanonicalDecl) { VarTemplateDecl *CanonPattern = ReadDeclAs(); if (D->isCanonicalDecl()) { // It's kept in the folding set. // FIXME: If it's already present, merge it. if (VarTemplatePartialSpecializationDecl *Partial = dyn_cast(D)) { CanonPattern->getCommonPtr()->PartialSpecializations .GetOrInsertNode(Partial); } else { CanonPattern->getCommonPtr()->Specializations.GetOrInsertNode(D); } } } return Redecl; } /// TODO: Unify with ClassTemplatePartialSpecializationDecl version? /// May require unifying ClassTemplate(Partial)SpecializationDecl and /// VarTemplate(Partial)SpecializationDecl with a new data /// structure Template(Partial)SpecializationDecl, and /// using Template(Partial)SpecializationDecl as input type. void ASTDeclReader::VisitVarTemplatePartialSpecializationDecl( VarTemplatePartialSpecializationDecl *D) { RedeclarableResult Redecl = VisitVarTemplateSpecializationDeclImpl(D); D->TemplateParams = Record.readTemplateParameterList(); D->ArgsAsWritten = Record.readASTTemplateArgumentListInfo(); // These are read/set from/to the first declaration. if (ThisDeclID == Redecl.getFirstID()) { D->InstantiatedFromMember.setPointer( ReadDeclAs()); D->InstantiatedFromMember.setInt(Record.readInt()); } } void ASTDeclReader::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) { VisitTypeDecl(D); D->setDeclaredWithTypename(Record.readInt()); if (Record.readInt()) D->setDefaultArgument(GetTypeSourceInfo()); } void ASTDeclReader::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) { VisitDeclaratorDecl(D); // TemplateParmPosition. D->setDepth(Record.readInt()); D->setPosition(Record.readInt()); if (D->isExpandedParameterPack()) { auto TypesAndInfos = D->getTrailingObjects>(); for (unsigned I = 0, N = D->getNumExpansionTypes(); I != N; ++I) { new (&TypesAndInfos[I].first) QualType(Record.readType()); TypesAndInfos[I].second = GetTypeSourceInfo(); } } else { // Rest of NonTypeTemplateParmDecl. D->ParameterPack = Record.readInt(); if (Record.readInt()) D->setDefaultArgument(Record.readExpr()); } } void ASTDeclReader::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) { VisitTemplateDecl(D); // TemplateParmPosition. D->setDepth(Record.readInt()); D->setPosition(Record.readInt()); if (D->isExpandedParameterPack()) { TemplateParameterList **Data = D->getTrailingObjects(); for (unsigned I = 0, N = D->getNumExpansionTemplateParameters(); I != N; ++I) Data[I] = Record.readTemplateParameterList(); } else { // Rest of TemplateTemplateParmDecl. D->ParameterPack = Record.readInt(); if (Record.readInt()) D->setDefaultArgument(Reader.getContext(), Record.readTemplateArgumentLoc()); } } void ASTDeclReader::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) { VisitRedeclarableTemplateDecl(D); } void ASTDeclReader::VisitStaticAssertDecl(StaticAssertDecl *D) { VisitDecl(D); D->AssertExprAndFailed.setPointer(Record.readExpr()); D->AssertExprAndFailed.setInt(Record.readInt()); D->Message = cast_or_null(Record.readExpr()); D->RParenLoc = ReadSourceLocation(); } void ASTDeclReader::VisitEmptyDecl(EmptyDecl *D) { VisitDecl(D); } std::pair ASTDeclReader::VisitDeclContext(DeclContext *DC) { uint64_t LexicalOffset = ReadLocalOffset(); uint64_t VisibleOffset = ReadLocalOffset(); return std::make_pair(LexicalOffset, VisibleOffset); } template ASTDeclReader::RedeclarableResult ASTDeclReader::VisitRedeclarable(Redeclarable *D) { DeclID FirstDeclID = ReadDeclID(); Decl *MergeWith = nullptr; bool IsKeyDecl = ThisDeclID == FirstDeclID; bool IsFirstLocalDecl = false; uint64_t RedeclOffset = 0; // 0 indicates that this declaration was the only declaration of its entity, // and is used for space optimization. if (FirstDeclID == 0) { FirstDeclID = ThisDeclID; IsKeyDecl = true; IsFirstLocalDecl = true; } else if (unsigned N = Record.readInt()) { // This declaration was the first local declaration, but may have imported // other declarations. IsKeyDecl = N == 1; IsFirstLocalDecl = true; // We have some declarations that must be before us in our redeclaration // chain. Read them now, and remember that we ought to merge with one of // them. // FIXME: Provide a known merge target to the second and subsequent such // declaration. for (unsigned I = 0; I != N - 1; ++I) MergeWith = ReadDecl(); RedeclOffset = ReadLocalOffset(); } else { // This declaration was not the first local declaration. Read the first // local declaration now, to trigger the import of other redeclarations. (void)ReadDecl(); } T *FirstDecl = cast_or_null(Reader.GetDecl(FirstDeclID)); if (FirstDecl != D) { // We delay loading of the redeclaration chain to avoid deeply nested calls. // We temporarily set the first (canonical) declaration as the previous one // which is the one that matters and mark the real previous DeclID to be // loaded & attached later on. D->RedeclLink = Redeclarable::PreviousDeclLink(FirstDecl); D->First = FirstDecl->getCanonicalDecl(); } T *DAsT = static_cast(D); // Note that we need to load local redeclarations of this decl and build a // decl chain for them. This must happen *after* we perform the preloading // above; this ensures that the redeclaration chain is built in the correct // order. if (IsFirstLocalDecl) Reader.PendingDeclChains.push_back(std::make_pair(DAsT, RedeclOffset)); return RedeclarableResult(MergeWith, FirstDeclID, IsKeyDecl); } /// \brief Attempts to merge the given declaration (D) with another declaration /// of the same entity. template void ASTDeclReader::mergeRedeclarable(Redeclarable *DBase, RedeclarableResult &Redecl, DeclID TemplatePatternID) { // If modules are not available, there is no reason to perform this merge. if (!Reader.getContext().getLangOpts().Modules) return; // If we're not the canonical declaration, we don't need to merge. if (!DBase->isFirstDecl()) return; T *D = static_cast(DBase); if (auto *Existing = Redecl.getKnownMergeTarget()) // We already know of an existing declaration we should merge with. mergeRedeclarable(D, cast(Existing), Redecl, TemplatePatternID); else if (FindExistingResult ExistingRes = findExisting(D)) if (T *Existing = ExistingRes) mergeRedeclarable(D, Existing, Redecl, TemplatePatternID); } /// \brief "Cast" to type T, asserting if we don't have an implicit conversion. /// We use this to put code in a template that will only be valid for certain /// instantiations. template static T assert_cast(T t) { return t; } template static T assert_cast(...) { llvm_unreachable("bad assert_cast"); } /// \brief Merge together the pattern declarations from two template /// declarations. void ASTDeclReader::mergeTemplatePattern(RedeclarableTemplateDecl *D, RedeclarableTemplateDecl *Existing, DeclID DsID, bool IsKeyDecl) { auto *DPattern = D->getTemplatedDecl(); auto *ExistingPattern = Existing->getTemplatedDecl(); RedeclarableResult Result(/*MergeWith*/ ExistingPattern, DPattern->getCanonicalDecl()->getGlobalID(), IsKeyDecl); if (auto *DClass = dyn_cast(DPattern)) { // Merge with any existing definition. // FIXME: This is duplicated in several places. Refactor. auto *ExistingClass = cast(ExistingPattern)->getCanonicalDecl(); if (auto *DDD = DClass->DefinitionData) { if (ExistingClass->DefinitionData) { MergeDefinitionData(ExistingClass, std::move(*DDD)); } else { ExistingClass->DefinitionData = DClass->DefinitionData; // We may have skipped this before because we thought that DClass // was the canonical declaration. Reader.PendingDefinitions.insert(DClass); } } DClass->DefinitionData = ExistingClass->DefinitionData; return mergeRedeclarable(DClass, cast(ExistingPattern), Result); } if (auto *DFunction = dyn_cast(DPattern)) return mergeRedeclarable(DFunction, cast(ExistingPattern), Result); if (auto *DVar = dyn_cast(DPattern)) return mergeRedeclarable(DVar, cast(ExistingPattern), Result); if (auto *DAlias = dyn_cast(DPattern)) return mergeRedeclarable(DAlias, cast(ExistingPattern), Result); llvm_unreachable("merged an unknown kind of redeclarable template"); } /// \brief Attempts to merge the given declaration (D) with another declaration /// of the same entity. template void ASTDeclReader::mergeRedeclarable(Redeclarable *DBase, T *Existing, RedeclarableResult &Redecl, DeclID TemplatePatternID) { T *D = static_cast(DBase); T *ExistingCanon = Existing->getCanonicalDecl(); T *DCanon = D->getCanonicalDecl(); if (ExistingCanon != DCanon) { assert(DCanon->getGlobalID() == Redecl.getFirstID() && "already merged this declaration"); // Have our redeclaration link point back at the canonical declaration // of the existing declaration, so that this declaration has the // appropriate canonical declaration. D->RedeclLink = Redeclarable::PreviousDeclLink(ExistingCanon); D->First = ExistingCanon; ExistingCanon->Used |= D->Used; D->Used = false; // When we merge a namespace, update its pointer to the first namespace. // We cannot have loaded any redeclarations of this declaration yet, so // there's nothing else that needs to be updated. if (auto *Namespace = dyn_cast(D)) Namespace->AnonOrFirstNamespaceAndInline.setPointer( assert_cast(ExistingCanon)); // When we merge a template, merge its pattern. if (auto *DTemplate = dyn_cast(D)) mergeTemplatePattern( DTemplate, assert_cast(ExistingCanon), TemplatePatternID, Redecl.isKeyDecl()); // If this declaration is a key declaration, make a note of that. if (Redecl.isKeyDecl()) Reader.KeyDecls[ExistingCanon].push_back(Redecl.getFirstID()); } } /// \brief Attempts to merge the given declaration (D) with another declaration /// of the same entity, for the case where the entity is not actually /// redeclarable. This happens, for instance, when merging the fields of /// identical class definitions from two different modules. template void ASTDeclReader::mergeMergeable(Mergeable *D) { // If modules are not available, there is no reason to perform this merge. if (!Reader.getContext().getLangOpts().Modules) return; // ODR-based merging is only performed in C++. In C, identically-named things // in different translation units are not redeclarations (but may still have // compatible types). if (!Reader.getContext().getLangOpts().CPlusPlus) return; if (FindExistingResult ExistingRes = findExisting(static_cast(D))) if (T *Existing = ExistingRes) Reader.Context.setPrimaryMergedDecl(static_cast(D), Existing->getCanonicalDecl()); } void ASTDeclReader::VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D) { VisitDecl(D); unsigned NumVars = D->varlist_size(); SmallVector Vars; Vars.reserve(NumVars); for (unsigned i = 0; i != NumVars; ++i) { Vars.push_back(Record.readExpr()); } D->setVars(Vars); } void ASTDeclReader::VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D) { VisitValueDecl(D); D->setLocation(ReadSourceLocation()); D->setCombiner(Record.readExpr()); D->setInitializer(Record.readExpr()); D->PrevDeclInScope = ReadDeclID(); } void ASTDeclReader::VisitOMPCapturedExprDecl(OMPCapturedExprDecl *D) { VisitVarDecl(D); } //===----------------------------------------------------------------------===// // Attribute Reading //===----------------------------------------------------------------------===// /// \brief Reads attributes from the current stream position. void ASTReader::ReadAttributes(ModuleFile &F, AttrVec &Attrs, const RecordData &Record, unsigned &Idx) { for (unsigned i = 0, e = Record[Idx++]; i != e; ++i) { Attr *New = nullptr; attr::Kind Kind = (attr::Kind)Record[Idx++]; SourceRange Range = ReadSourceRange(F, Record, Idx); #include "clang/Serialization/AttrPCHRead.inc" assert(New && "Unable to decode attribute?"); Attrs.push_back(New); } } //===----------------------------------------------------------------------===// // ASTReader Implementation //===----------------------------------------------------------------------===// /// \brief Note that we have loaded the declaration with the given /// Index. /// /// This routine notes that this declaration has already been loaded, /// so that future GetDecl calls will return this declaration rather /// than trying to load a new declaration. inline void ASTReader::LoadedDecl(unsigned Index, Decl *D) { assert(!DeclsLoaded[Index] && "Decl loaded twice?"); DeclsLoaded[Index] = D; } /// \brief Determine whether the consumer will be interested in seeing /// this declaration (via HandleTopLevelDecl). /// /// This routine should return true for anything that might affect /// code generation, e.g., inline function definitions, Objective-C /// declarations with metadata, etc. static bool isConsumerInterestedIn(ASTContext &Ctx, Decl *D, bool HasBody) { // An ObjCMethodDecl is never considered as "interesting" because its // implementation container always is. // An ImportDecl or VarDecl imported from a module will get emitted when // we import the relevant module. - if ((isa(D) || isa(D)) && Ctx.DeclMustBeEmitted(D) && - D->getImportedOwningModule()) + if ((isa(D) || isa(D)) && D->getImportedOwningModule() && + Ctx.DeclMustBeEmitted(D)) return false; if (isa(D) || isa(D) || isa(D) || isa(D) || isa(D) || isa(D)) return true; if (isa(D) || isa(D)) return !D->getDeclContext()->isFunctionOrMethod(); if (VarDecl *Var = dyn_cast(D)) return Var->isFileVarDecl() && Var->isThisDeclarationADefinition() == VarDecl::Definition; if (FunctionDecl *Func = dyn_cast(D)) return Func->doesThisDeclarationHaveABody() || HasBody; return false; } /// \brief Get the correct cursor and offset for loading a declaration. ASTReader::RecordLocation ASTReader::DeclCursorForID(DeclID ID, SourceLocation &Loc) { GlobalDeclMapType::iterator I = GlobalDeclMap.find(ID); assert(I != GlobalDeclMap.end() && "Corrupted global declaration map"); ModuleFile *M = I->second; const DeclOffset &DOffs = M->DeclOffsets[ID - M->BaseDeclID - NUM_PREDEF_DECL_IDS]; Loc = TranslateSourceLocation(*M, DOffs.getLocation()); return RecordLocation(M, DOffs.BitOffset); } ASTReader::RecordLocation ASTReader::getLocalBitOffset(uint64_t GlobalOffset) { ContinuousRangeMap::iterator I = GlobalBitOffsetsMap.find(GlobalOffset); assert(I != GlobalBitOffsetsMap.end() && "Corrupted global bit offsets map"); return RecordLocation(I->second, GlobalOffset - I->second->GlobalBitOffset); } uint64_t ASTReader::getGlobalBitOffset(ModuleFile &M, uint32_t LocalOffset) { return LocalOffset + M.GlobalBitOffset; } static bool isSameTemplateParameterList(const TemplateParameterList *X, const TemplateParameterList *Y); /// \brief Determine whether two template parameters are similar enough /// that they may be used in declarations of the same template. static bool isSameTemplateParameter(const NamedDecl *X, const NamedDecl *Y) { if (X->getKind() != Y->getKind()) return false; if (const TemplateTypeParmDecl *TX = dyn_cast(X)) { const TemplateTypeParmDecl *TY = cast(Y); return TX->isParameterPack() == TY->isParameterPack(); } if (const NonTypeTemplateParmDecl *TX = dyn_cast(X)) { const NonTypeTemplateParmDecl *TY = cast(Y); return TX->isParameterPack() == TY->isParameterPack() && TX->getASTContext().hasSameType(TX->getType(), TY->getType()); } const TemplateTemplateParmDecl *TX = cast(X); const TemplateTemplateParmDecl *TY = cast(Y); return TX->isParameterPack() == TY->isParameterPack() && isSameTemplateParameterList(TX->getTemplateParameters(), TY->getTemplateParameters()); } static NamespaceDecl *getNamespace(const NestedNameSpecifier *X) { if (auto *NS = X->getAsNamespace()) return NS; if (auto *NAS = X->getAsNamespaceAlias()) return NAS->getNamespace(); return nullptr; } static bool isSameQualifier(const NestedNameSpecifier *X, const NestedNameSpecifier *Y) { if (auto *NSX = getNamespace(X)) { auto *NSY = getNamespace(Y); if (!NSY || NSX->getCanonicalDecl() != NSY->getCanonicalDecl()) return false; } else if (X->getKind() != Y->getKind()) return false; // FIXME: For namespaces and types, we're permitted to check that the entity // is named via the same tokens. We should probably do so. switch (X->getKind()) { case NestedNameSpecifier::Identifier: if (X->getAsIdentifier() != Y->getAsIdentifier()) return false; break; case NestedNameSpecifier::Namespace: case NestedNameSpecifier::NamespaceAlias: // We've already checked that we named the same namespace. break; case NestedNameSpecifier::TypeSpec: case NestedNameSpecifier::TypeSpecWithTemplate: if (X->getAsType()->getCanonicalTypeInternal() != Y->getAsType()->getCanonicalTypeInternal()) return false; break; case NestedNameSpecifier::Global: case NestedNameSpecifier::Super: return true; } // Recurse into earlier portion of NNS, if any. auto *PX = X->getPrefix(); auto *PY = Y->getPrefix(); if (PX && PY) return isSameQualifier(PX, PY); return !PX && !PY; } /// \brief Determine whether two template parameter lists are similar enough /// that they may be used in declarations of the same template. static bool isSameTemplateParameterList(const TemplateParameterList *X, const TemplateParameterList *Y) { if (X->size() != Y->size()) return false; for (unsigned I = 0, N = X->size(); I != N; ++I) if (!isSameTemplateParameter(X->getParam(I), Y->getParam(I))) return false; return true; } /// \brief Determine whether the two declarations refer to the same entity. static bool isSameEntity(NamedDecl *X, NamedDecl *Y) { assert(X->getDeclName() == Y->getDeclName() && "Declaration name mismatch!"); if (X == Y) return true; // Must be in the same context. if (!X->getDeclContext()->getRedeclContext()->Equals( Y->getDeclContext()->getRedeclContext())) return false; // Two typedefs refer to the same entity if they have the same underlying // type. if (TypedefNameDecl *TypedefX = dyn_cast(X)) if (TypedefNameDecl *TypedefY = dyn_cast(Y)) return X->getASTContext().hasSameType(TypedefX->getUnderlyingType(), TypedefY->getUnderlyingType()); // Must have the same kind. if (X->getKind() != Y->getKind()) return false; // Objective-C classes and protocols with the same name always match. if (isa(X) || isa(X)) return true; if (isa(X)) { // No need to handle these here: we merge them when adding them to the // template. return false; } // Compatible tags match. if (TagDecl *TagX = dyn_cast(X)) { TagDecl *TagY = cast(Y); return (TagX->getTagKind() == TagY->getTagKind()) || ((TagX->getTagKind() == TTK_Struct || TagX->getTagKind() == TTK_Class || TagX->getTagKind() == TTK_Interface) && (TagY->getTagKind() == TTK_Struct || TagY->getTagKind() == TTK_Class || TagY->getTagKind() == TTK_Interface)); } // Functions with the same type and linkage match. // FIXME: This needs to cope with merging of prototyped/non-prototyped // functions, etc. if (FunctionDecl *FuncX = dyn_cast(X)) { FunctionDecl *FuncY = cast(Y); if (CXXConstructorDecl *CtorX = dyn_cast(X)) { CXXConstructorDecl *CtorY = cast(Y); if (CtorX->getInheritedConstructor() && !isSameEntity(CtorX->getInheritedConstructor().getConstructor(), CtorY->getInheritedConstructor().getConstructor())) return false; } return (FuncX->getLinkageInternal() == FuncY->getLinkageInternal()) && FuncX->getASTContext().hasSameType(FuncX->getType(), FuncY->getType()); } // Variables with the same type and linkage match. if (VarDecl *VarX = dyn_cast(X)) { VarDecl *VarY = cast(Y); if (VarX->getLinkageInternal() == VarY->getLinkageInternal()) { ASTContext &C = VarX->getASTContext(); if (C.hasSameType(VarX->getType(), VarY->getType())) return true; // We can get decls with different types on the redecl chain. Eg. // template struct S { static T Var[]; }; // #1 // template T S::Var[sizeof(T)]; // #2 // Only? happens when completing an incomplete array type. In this case // when comparing #1 and #2 we should go through their element type. const ArrayType *VarXTy = C.getAsArrayType(VarX->getType()); const ArrayType *VarYTy = C.getAsArrayType(VarY->getType()); if (!VarXTy || !VarYTy) return false; if (VarXTy->isIncompleteArrayType() || VarYTy->isIncompleteArrayType()) return C.hasSameType(VarXTy->getElementType(), VarYTy->getElementType()); } return false; } // Namespaces with the same name and inlinedness match. if (NamespaceDecl *NamespaceX = dyn_cast(X)) { NamespaceDecl *NamespaceY = cast(Y); return NamespaceX->isInline() == NamespaceY->isInline(); } // Identical template names and kinds match if their template parameter lists // and patterns match. if (TemplateDecl *TemplateX = dyn_cast(X)) { TemplateDecl *TemplateY = cast(Y); return isSameEntity(TemplateX->getTemplatedDecl(), TemplateY->getTemplatedDecl()) && isSameTemplateParameterList(TemplateX->getTemplateParameters(), TemplateY->getTemplateParameters()); } // Fields with the same name and the same type match. if (FieldDecl *FDX = dyn_cast(X)) { FieldDecl *FDY = cast(Y); // FIXME: Also check the bitwidth is odr-equivalent, if any. return X->getASTContext().hasSameType(FDX->getType(), FDY->getType()); } // Indirect fields with the same target field match. if (auto *IFDX = dyn_cast(X)) { auto *IFDY = cast(Y); return IFDX->getAnonField()->getCanonicalDecl() == IFDY->getAnonField()->getCanonicalDecl(); } // Enumerators with the same name match. if (isa(X)) // FIXME: Also check the value is odr-equivalent. return true; // Using shadow declarations with the same target match. if (UsingShadowDecl *USX = dyn_cast(X)) { UsingShadowDecl *USY = cast(Y); return USX->getTargetDecl() == USY->getTargetDecl(); } // Using declarations with the same qualifier match. (We already know that // the name matches.) if (auto *UX = dyn_cast(X)) { auto *UY = cast(Y); return isSameQualifier(UX->getQualifier(), UY->getQualifier()) && UX->hasTypename() == UY->hasTypename() && UX->isAccessDeclaration() == UY->isAccessDeclaration(); } if (auto *UX = dyn_cast(X)) { auto *UY = cast(Y); return isSameQualifier(UX->getQualifier(), UY->getQualifier()) && UX->isAccessDeclaration() == UY->isAccessDeclaration(); } if (auto *UX = dyn_cast(X)) return isSameQualifier( UX->getQualifier(), cast(Y)->getQualifier()); // Namespace alias definitions with the same target match. if (auto *NAX = dyn_cast(X)) { auto *NAY = cast(Y); return NAX->getNamespace()->Equals(NAY->getNamespace()); } return false; } /// Find the context in which we should search for previous declarations when /// looking for declarations to merge. DeclContext *ASTDeclReader::getPrimaryContextForMerging(ASTReader &Reader, DeclContext *DC) { if (NamespaceDecl *ND = dyn_cast(DC)) return ND->getOriginalNamespace(); if (CXXRecordDecl *RD = dyn_cast(DC)) { // Try to dig out the definition. auto *DD = RD->DefinitionData; if (!DD) DD = RD->getCanonicalDecl()->DefinitionData; // If there's no definition yet, then DC's definition is added by an update // record, but we've not yet loaded that update record. In this case, we // commit to DC being the canonical definition now, and will fix this when // we load the update record. if (!DD) { DD = new (Reader.Context) struct CXXRecordDecl::DefinitionData(RD); RD->IsCompleteDefinition = true; RD->DefinitionData = DD; RD->getCanonicalDecl()->DefinitionData = DD; // Track that we did this horrible thing so that we can fix it later. Reader.PendingFakeDefinitionData.insert( std::make_pair(DD, ASTReader::PendingFakeDefinitionKind::Fake)); } return DD->Definition; } if (EnumDecl *ED = dyn_cast(DC)) return ED->getASTContext().getLangOpts().CPlusPlus? ED->getDefinition() : nullptr; // We can see the TU here only if we have no Sema object. In that case, // there's no TU scope to look in, so using the DC alone is sufficient. if (auto *TU = dyn_cast(DC)) return TU; return nullptr; } ASTDeclReader::FindExistingResult::~FindExistingResult() { // Record that we had a typedef name for linkage whether or not we merge // with that declaration. if (TypedefNameForLinkage) { DeclContext *DC = New->getDeclContext()->getRedeclContext(); Reader.ImportedTypedefNamesForLinkage.insert( std::make_pair(std::make_pair(DC, TypedefNameForLinkage), New)); return; } if (!AddResult || Existing) return; DeclarationName Name = New->getDeclName(); DeclContext *DC = New->getDeclContext()->getRedeclContext(); if (needsAnonymousDeclarationNumber(New)) { setAnonymousDeclForMerging(Reader, New->getLexicalDeclContext(), AnonymousDeclNumber, New); } else if (DC->isTranslationUnit() && !Reader.getContext().getLangOpts().CPlusPlus) { if (Reader.getIdResolver().tryAddTopLevelDecl(New, Name)) Reader.PendingFakeLookupResults[Name.getAsIdentifierInfo()] .push_back(New); } else if (DeclContext *MergeDC = getPrimaryContextForMerging(Reader, DC)) { // Add the declaration to its redeclaration context so later merging // lookups will find it. MergeDC->makeDeclVisibleInContextImpl(New, /*Internal*/true); } } /// Find the declaration that should be merged into, given the declaration found /// by name lookup. If we're merging an anonymous declaration within a typedef, /// we need a matching typedef, and we merge with the type inside it. static NamedDecl *getDeclForMerging(NamedDecl *Found, bool IsTypedefNameForLinkage) { if (!IsTypedefNameForLinkage) return Found; // If we found a typedef declaration that gives a name to some other // declaration, then we want that inner declaration. Declarations from // AST files are handled via ImportedTypedefNamesForLinkage. if (Found->isFromASTFile()) return nullptr; if (auto *TND = dyn_cast(Found)) return TND->getAnonDeclWithTypedefName(/*AnyRedecl*/true); return nullptr; } NamedDecl *ASTDeclReader::getAnonymousDeclForMerging(ASTReader &Reader, DeclContext *DC, unsigned Index) { // If the lexical context has been merged, look into the now-canonical // definition. if (auto *Merged = Reader.MergedDeclContexts.lookup(DC)) DC = Merged; // If we've seen this before, return the canonical declaration. auto &Previous = Reader.AnonymousDeclarationsForMerging[DC]; if (Index < Previous.size() && Previous[Index]) return Previous[Index]; // If this is the first time, but we have parsed a declaration of the context, // build the anonymous declaration list from the parsed declaration. if (!cast(DC)->isFromASTFile()) { numberAnonymousDeclsWithin(DC, [&](NamedDecl *ND, unsigned Number) { if (Previous.size() == Number) Previous.push_back(cast(ND->getCanonicalDecl())); else Previous[Number] = cast(ND->getCanonicalDecl()); }); } return Index < Previous.size() ? Previous[Index] : nullptr; } void ASTDeclReader::setAnonymousDeclForMerging(ASTReader &Reader, DeclContext *DC, unsigned Index, NamedDecl *D) { if (auto *Merged = Reader.MergedDeclContexts.lookup(DC)) DC = Merged; auto &Previous = Reader.AnonymousDeclarationsForMerging[DC]; if (Index >= Previous.size()) Previous.resize(Index + 1); if (!Previous[Index]) Previous[Index] = D; } ASTDeclReader::FindExistingResult ASTDeclReader::findExisting(NamedDecl *D) { DeclarationName Name = TypedefNameForLinkage ? TypedefNameForLinkage : D->getDeclName(); if (!Name && !needsAnonymousDeclarationNumber(D)) { // Don't bother trying to find unnamed declarations that are in // unmergeable contexts. FindExistingResult Result(Reader, D, /*Existing=*/nullptr, AnonymousDeclNumber, TypedefNameForLinkage); Result.suppress(); return Result; } DeclContext *DC = D->getDeclContext()->getRedeclContext(); if (TypedefNameForLinkage) { auto It = Reader.ImportedTypedefNamesForLinkage.find( std::make_pair(DC, TypedefNameForLinkage)); if (It != Reader.ImportedTypedefNamesForLinkage.end()) if (isSameEntity(It->second, D)) return FindExistingResult(Reader, D, It->second, AnonymousDeclNumber, TypedefNameForLinkage); // Go on to check in other places in case an existing typedef name // was not imported. } if (needsAnonymousDeclarationNumber(D)) { // This is an anonymous declaration that we may need to merge. Look it up // in its context by number. if (auto *Existing = getAnonymousDeclForMerging( Reader, D->getLexicalDeclContext(), AnonymousDeclNumber)) if (isSameEntity(Existing, D)) return FindExistingResult(Reader, D, Existing, AnonymousDeclNumber, TypedefNameForLinkage); } else if (DC->isTranslationUnit() && !Reader.getContext().getLangOpts().CPlusPlus) { IdentifierResolver &IdResolver = Reader.getIdResolver(); // Temporarily consider the identifier to be up-to-date. We don't want to // cause additional lookups here. class UpToDateIdentifierRAII { IdentifierInfo *II; bool WasOutToDate; public: explicit UpToDateIdentifierRAII(IdentifierInfo *II) : II(II), WasOutToDate(false) { if (II) { WasOutToDate = II->isOutOfDate(); if (WasOutToDate) II->setOutOfDate(false); } } ~UpToDateIdentifierRAII() { if (WasOutToDate) II->setOutOfDate(true); } } UpToDate(Name.getAsIdentifierInfo()); for (IdentifierResolver::iterator I = IdResolver.begin(Name), IEnd = IdResolver.end(); I != IEnd; ++I) { if (NamedDecl *Existing = getDeclForMerging(*I, TypedefNameForLinkage)) if (isSameEntity(Existing, D)) return FindExistingResult(Reader, D, Existing, AnonymousDeclNumber, TypedefNameForLinkage); } } else if (DeclContext *MergeDC = getPrimaryContextForMerging(Reader, DC)) { DeclContext::lookup_result R = MergeDC->noload_lookup(Name); for (DeclContext::lookup_iterator I = R.begin(), E = R.end(); I != E; ++I) { if (NamedDecl *Existing = getDeclForMerging(*I, TypedefNameForLinkage)) if (isSameEntity(Existing, D)) return FindExistingResult(Reader, D, Existing, AnonymousDeclNumber, TypedefNameForLinkage); } } else { // Not in a mergeable context. return FindExistingResult(Reader); } // If this declaration is from a merged context, make a note that we need to // check that the canonical definition of that context contains the decl. // // FIXME: We should do something similar if we merge two definitions of the // same template specialization into the same CXXRecordDecl. auto MergedDCIt = Reader.MergedDeclContexts.find(D->getLexicalDeclContext()); if (MergedDCIt != Reader.MergedDeclContexts.end() && MergedDCIt->second == D->getDeclContext()) Reader.PendingOdrMergeChecks.push_back(D); return FindExistingResult(Reader, D, /*Existing=*/nullptr, AnonymousDeclNumber, TypedefNameForLinkage); } template Decl *ASTDeclReader::getMostRecentDeclImpl(Redeclarable *D) { return D->RedeclLink.getLatestNotUpdated(); } Decl *ASTDeclReader::getMostRecentDeclImpl(...) { llvm_unreachable("getMostRecentDecl on non-redeclarable declaration"); } Decl *ASTDeclReader::getMostRecentDecl(Decl *D) { assert(D); switch (D->getKind()) { #define ABSTRACT_DECL(TYPE) #define DECL(TYPE, BASE) \ case Decl::TYPE: \ return getMostRecentDeclImpl(cast(D)); #include "clang/AST/DeclNodes.inc" } llvm_unreachable("unknown decl kind"); } Decl *ASTReader::getMostRecentExistingDecl(Decl *D) { return ASTDeclReader::getMostRecentDecl(D->getCanonicalDecl()); } template void ASTDeclReader::attachPreviousDeclImpl(ASTReader &Reader, Redeclarable *D, Decl *Previous, Decl *Canon) { D->RedeclLink.setPrevious(cast(Previous)); D->First = cast(Previous)->First; } namespace clang { template<> void ASTDeclReader::attachPreviousDeclImpl(ASTReader &Reader, Redeclarable *D, Decl *Previous, Decl *Canon) { VarDecl *VD = static_cast(D); VarDecl *PrevVD = cast(Previous); D->RedeclLink.setPrevious(PrevVD); D->First = PrevVD->First; // We should keep at most one definition on the chain. // FIXME: Cache the definition once we've found it. Building a chain with // N definitions currently takes O(N^2) time here. if (VD->isThisDeclarationADefinition() == VarDecl::Definition) { for (VarDecl *CurD = PrevVD; CurD; CurD = CurD->getPreviousDecl()) { if (CurD->isThisDeclarationADefinition() == VarDecl::Definition) { Reader.mergeDefinitionVisibility(CurD, VD); VD->demoteThisDefinitionToDeclaration(); break; } } } } template<> void ASTDeclReader::attachPreviousDeclImpl(ASTReader &Reader, Redeclarable *D, Decl *Previous, Decl *Canon) { FunctionDecl *FD = static_cast(D); FunctionDecl *PrevFD = cast(Previous); FD->RedeclLink.setPrevious(PrevFD); FD->First = PrevFD->First; // If the previous declaration is an inline function declaration, then this // declaration is too. if (PrevFD->IsInline != FD->IsInline) { // FIXME: [dcl.fct.spec]p4: // If a function with external linkage is declared inline in one // translation unit, it shall be declared inline in all translation // units in which it appears. // // Be careful of this case: // // module A: // template struct X { void f(); }; // template inline void X::f() {} // // module B instantiates the declaration of X::f // module C instantiates the definition of X::f // // If module B and C are merged, we do not have a violation of this rule. FD->IsInline = true; } // If we need to propagate an exception specification along the redecl // chain, make a note of that so that we can do so later. auto *FPT = FD->getType()->getAs(); auto *PrevFPT = PrevFD->getType()->getAs(); if (FPT && PrevFPT) { bool IsUnresolved = isUnresolvedExceptionSpec(FPT->getExceptionSpecType()); bool WasUnresolved = isUnresolvedExceptionSpec(PrevFPT->getExceptionSpecType()); if (IsUnresolved != WasUnresolved) Reader.PendingExceptionSpecUpdates.insert( std::make_pair(Canon, IsUnresolved ? PrevFD : FD)); } } } // end namespace clang void ASTDeclReader::attachPreviousDeclImpl(ASTReader &Reader, ...) { llvm_unreachable("attachPreviousDecl on non-redeclarable declaration"); } /// Inherit the default template argument from \p From to \p To. Returns /// \c false if there is no default template for \p From. template static bool inheritDefaultTemplateArgument(ASTContext &Context, ParmDecl *From, Decl *ToD) { auto *To = cast(ToD); if (!From->hasDefaultArgument()) return false; To->setInheritedDefaultArgument(Context, From); return true; } static void inheritDefaultTemplateArguments(ASTContext &Context, TemplateDecl *From, TemplateDecl *To) { auto *FromTP = From->getTemplateParameters(); auto *ToTP = To->getTemplateParameters(); assert(FromTP->size() == ToTP->size() && "merged mismatched templates?"); for (unsigned I = 0, N = FromTP->size(); I != N; ++I) { NamedDecl *FromParam = FromTP->getParam(N - I - 1); if (FromParam->isParameterPack()) continue; NamedDecl *ToParam = ToTP->getParam(N - I - 1); if (auto *FTTP = dyn_cast(FromParam)) { if (!inheritDefaultTemplateArgument(Context, FTTP, ToParam)) break; } else if (auto *FNTTP = dyn_cast(FromParam)) { if (!inheritDefaultTemplateArgument(Context, FNTTP, ToParam)) break; } else { if (!inheritDefaultTemplateArgument( Context, cast(FromParam), ToParam)) break; } } } void ASTDeclReader::attachPreviousDecl(ASTReader &Reader, Decl *D, Decl *Previous, Decl *Canon) { assert(D && Previous); switch (D->getKind()) { #define ABSTRACT_DECL(TYPE) #define DECL(TYPE, BASE) \ case Decl::TYPE: \ attachPreviousDeclImpl(Reader, cast(D), Previous, Canon); \ break; #include "clang/AST/DeclNodes.inc" } // If the declaration was visible in one module, a redeclaration of it in // another module remains visible even if it wouldn't be visible by itself. // // FIXME: In this case, the declaration should only be visible if a module // that makes it visible has been imported. D->IdentifierNamespace |= Previous->IdentifierNamespace & (Decl::IDNS_Ordinary | Decl::IDNS_Tag | Decl::IDNS_Type); // If the declaration declares a template, it may inherit default arguments // from the previous declaration. if (TemplateDecl *TD = dyn_cast(D)) inheritDefaultTemplateArguments(Reader.getContext(), cast(Previous), TD); } template void ASTDeclReader::attachLatestDeclImpl(Redeclarable *D, Decl *Latest) { D->RedeclLink.setLatest(cast(Latest)); } void ASTDeclReader::attachLatestDeclImpl(...) { llvm_unreachable("attachLatestDecl on non-redeclarable declaration"); } void ASTDeclReader::attachLatestDecl(Decl *D, Decl *Latest) { assert(D && Latest); switch (D->getKind()) { #define ABSTRACT_DECL(TYPE) #define DECL(TYPE, BASE) \ case Decl::TYPE: \ attachLatestDeclImpl(cast(D), Latest); \ break; #include "clang/AST/DeclNodes.inc" } } template void ASTDeclReader::markIncompleteDeclChainImpl(Redeclarable *D) { D->RedeclLink.markIncomplete(); } void ASTDeclReader::markIncompleteDeclChainImpl(...) { llvm_unreachable("markIncompleteDeclChain on non-redeclarable declaration"); } void ASTReader::markIncompleteDeclChain(Decl *D) { switch (D->getKind()) { #define ABSTRACT_DECL(TYPE) #define DECL(TYPE, BASE) \ case Decl::TYPE: \ ASTDeclReader::markIncompleteDeclChainImpl(cast(D)); \ break; #include "clang/AST/DeclNodes.inc" } } /// \brief Read the declaration at the given offset from the AST file. Decl *ASTReader::ReadDeclRecord(DeclID ID) { unsigned Index = ID - NUM_PREDEF_DECL_IDS; SourceLocation DeclLoc; RecordLocation Loc = DeclCursorForID(ID, DeclLoc); llvm::BitstreamCursor &DeclsCursor = Loc.F->DeclsCursor; // Keep track of where we are in the stream, then jump back there // after reading this declaration. SavedStreamPosition SavedPosition(DeclsCursor); ReadingKindTracker ReadingKind(Read_Decl, *this); // Note that we are loading a declaration record. Deserializing ADecl(this); DeclsCursor.JumpToBit(Loc.Offset); ASTRecordReader Record(*this, *Loc.F); ASTDeclReader Reader(*this, Record, Loc, ID, DeclLoc); unsigned Code = DeclsCursor.ReadCode(); Decl *D = nullptr; switch ((DeclCode)Record.readRecord(DeclsCursor, Code)) { case DECL_CONTEXT_LEXICAL: case DECL_CONTEXT_VISIBLE: llvm_unreachable("Record cannot be de-serialized with ReadDeclRecord"); case DECL_TYPEDEF: D = TypedefDecl::CreateDeserialized(Context, ID); break; case DECL_TYPEALIAS: D = TypeAliasDecl::CreateDeserialized(Context, ID); break; case DECL_ENUM: D = EnumDecl::CreateDeserialized(Context, ID); break; case DECL_RECORD: D = RecordDecl::CreateDeserialized(Context, ID); break; case DECL_ENUM_CONSTANT: D = EnumConstantDecl::CreateDeserialized(Context, ID); break; case DECL_FUNCTION: D = FunctionDecl::CreateDeserialized(Context, ID); break; case DECL_LINKAGE_SPEC: D = LinkageSpecDecl::CreateDeserialized(Context, ID); break; case DECL_EXPORT: D = ExportDecl::CreateDeserialized(Context, ID); break; case DECL_LABEL: D = LabelDecl::CreateDeserialized(Context, ID); break; case DECL_NAMESPACE: D = NamespaceDecl::CreateDeserialized(Context, ID); break; case DECL_NAMESPACE_ALIAS: D = NamespaceAliasDecl::CreateDeserialized(Context, ID); break; case DECL_USING: D = UsingDecl::CreateDeserialized(Context, ID); break; case DECL_USING_PACK: D = UsingPackDecl::CreateDeserialized(Context, ID, Record.readInt()); break; case DECL_USING_SHADOW: D = UsingShadowDecl::CreateDeserialized(Context, ID); break; case DECL_CONSTRUCTOR_USING_SHADOW: D = ConstructorUsingShadowDecl::CreateDeserialized(Context, ID); break; case DECL_USING_DIRECTIVE: D = UsingDirectiveDecl::CreateDeserialized(Context, ID); break; case DECL_UNRESOLVED_USING_VALUE: D = UnresolvedUsingValueDecl::CreateDeserialized(Context, ID); break; case DECL_UNRESOLVED_USING_TYPENAME: D = UnresolvedUsingTypenameDecl::CreateDeserialized(Context, ID); break; case DECL_CXX_RECORD: D = CXXRecordDecl::CreateDeserialized(Context, ID); break; case DECL_CXX_METHOD: D = CXXMethodDecl::CreateDeserialized(Context, ID); break; case DECL_CXX_CONSTRUCTOR: D = CXXConstructorDecl::CreateDeserialized(Context, ID, false); break; case DECL_CXX_INHERITED_CONSTRUCTOR: D = CXXConstructorDecl::CreateDeserialized(Context, ID, true); break; case DECL_CXX_DESTRUCTOR: D = CXXDestructorDecl::CreateDeserialized(Context, ID); break; case DECL_CXX_CONVERSION: D = CXXConversionDecl::CreateDeserialized(Context, ID); break; case DECL_ACCESS_SPEC: D = AccessSpecDecl::CreateDeserialized(Context, ID); break; case DECL_FRIEND: D = FriendDecl::CreateDeserialized(Context, ID, Record.readInt()); break; case DECL_FRIEND_TEMPLATE: D = FriendTemplateDecl::CreateDeserialized(Context, ID); break; case DECL_CLASS_TEMPLATE: D = ClassTemplateDecl::CreateDeserialized(Context, ID); break; case DECL_CLASS_TEMPLATE_SPECIALIZATION: D = ClassTemplateSpecializationDecl::CreateDeserialized(Context, ID); break; case DECL_CLASS_TEMPLATE_PARTIAL_SPECIALIZATION: D = ClassTemplatePartialSpecializationDecl::CreateDeserialized(Context, ID); break; case DECL_VAR_TEMPLATE: D = VarTemplateDecl::CreateDeserialized(Context, ID); break; case DECL_VAR_TEMPLATE_SPECIALIZATION: D = VarTemplateSpecializationDecl::CreateDeserialized(Context, ID); break; case DECL_VAR_TEMPLATE_PARTIAL_SPECIALIZATION: D = VarTemplatePartialSpecializationDecl::CreateDeserialized(Context, ID); break; case DECL_CLASS_SCOPE_FUNCTION_SPECIALIZATION: D = ClassScopeFunctionSpecializationDecl::CreateDeserialized(Context, ID); break; case DECL_FUNCTION_TEMPLATE: D = FunctionTemplateDecl::CreateDeserialized(Context, ID); break; case DECL_TEMPLATE_TYPE_PARM: D = TemplateTypeParmDecl::CreateDeserialized(Context, ID); break; case DECL_NON_TYPE_TEMPLATE_PARM: D = NonTypeTemplateParmDecl::CreateDeserialized(Context, ID); break; case DECL_EXPANDED_NON_TYPE_TEMPLATE_PARM_PACK: D = NonTypeTemplateParmDecl::CreateDeserialized(Context, ID, Record.readInt()); break; case DECL_TEMPLATE_TEMPLATE_PARM: D = TemplateTemplateParmDecl::CreateDeserialized(Context, ID); break; case DECL_EXPANDED_TEMPLATE_TEMPLATE_PARM_PACK: D = TemplateTemplateParmDecl::CreateDeserialized(Context, ID, Record.readInt()); break; case DECL_TYPE_ALIAS_TEMPLATE: D = TypeAliasTemplateDecl::CreateDeserialized(Context, ID); break; case DECL_STATIC_ASSERT: D = StaticAssertDecl::CreateDeserialized(Context, ID); break; case DECL_OBJC_METHOD: D = ObjCMethodDecl::CreateDeserialized(Context, ID); break; case DECL_OBJC_INTERFACE: D = ObjCInterfaceDecl::CreateDeserialized(Context, ID); break; case DECL_OBJC_IVAR: D = ObjCIvarDecl::CreateDeserialized(Context, ID); break; case DECL_OBJC_PROTOCOL: D = ObjCProtocolDecl::CreateDeserialized(Context, ID); break; case DECL_OBJC_AT_DEFS_FIELD: D = ObjCAtDefsFieldDecl::CreateDeserialized(Context, ID); break; case DECL_OBJC_CATEGORY: D = ObjCCategoryDecl::CreateDeserialized(Context, ID); break; case DECL_OBJC_CATEGORY_IMPL: D = ObjCCategoryImplDecl::CreateDeserialized(Context, ID); break; case DECL_OBJC_IMPLEMENTATION: D = ObjCImplementationDecl::CreateDeserialized(Context, ID); break; case DECL_OBJC_COMPATIBLE_ALIAS: D = ObjCCompatibleAliasDecl::CreateDeserialized(Context, ID); break; case DECL_OBJC_PROPERTY: D = ObjCPropertyDecl::CreateDeserialized(Context, ID); break; case DECL_OBJC_PROPERTY_IMPL: D = ObjCPropertyImplDecl::CreateDeserialized(Context, ID); break; case DECL_FIELD: D = FieldDecl::CreateDeserialized(Context, ID); break; case DECL_INDIRECTFIELD: D = IndirectFieldDecl::CreateDeserialized(Context, ID); break; case DECL_VAR: D = VarDecl::CreateDeserialized(Context, ID); break; case DECL_IMPLICIT_PARAM: D = ImplicitParamDecl::CreateDeserialized(Context, ID); break; case DECL_PARM_VAR: D = ParmVarDecl::CreateDeserialized(Context, ID); break; case DECL_DECOMPOSITION: D = DecompositionDecl::CreateDeserialized(Context, ID, Record.readInt()); break; case DECL_BINDING: D = BindingDecl::CreateDeserialized(Context, ID); break; case DECL_FILE_SCOPE_ASM: D = FileScopeAsmDecl::CreateDeserialized(Context, ID); break; case DECL_BLOCK: D = BlockDecl::CreateDeserialized(Context, ID); break; case DECL_MS_PROPERTY: D = MSPropertyDecl::CreateDeserialized(Context, ID); break; case DECL_CAPTURED: D = CapturedDecl::CreateDeserialized(Context, ID, Record.readInt()); break; case DECL_CXX_BASE_SPECIFIERS: Error("attempt to read a C++ base-specifier record as a declaration"); return nullptr; case DECL_CXX_CTOR_INITIALIZERS: Error("attempt to read a C++ ctor initializer record as a declaration"); return nullptr; case DECL_IMPORT: // Note: last entry of the ImportDecl record is the number of stored source // locations. D = ImportDecl::CreateDeserialized(Context, ID, Record.back()); break; case DECL_OMP_THREADPRIVATE: D = OMPThreadPrivateDecl::CreateDeserialized(Context, ID, Record.readInt()); break; case DECL_OMP_DECLARE_REDUCTION: D = OMPDeclareReductionDecl::CreateDeserialized(Context, ID); break; case DECL_OMP_CAPTUREDEXPR: D = OMPCapturedExprDecl::CreateDeserialized(Context, ID); break; case DECL_PRAGMA_COMMENT: D = PragmaCommentDecl::CreateDeserialized(Context, ID, Record.readInt()); break; case DECL_PRAGMA_DETECT_MISMATCH: D = PragmaDetectMismatchDecl::CreateDeserialized(Context, ID, Record.readInt()); break; case DECL_EMPTY: D = EmptyDecl::CreateDeserialized(Context, ID); break; case DECL_OBJC_TYPE_PARAM: D = ObjCTypeParamDecl::CreateDeserialized(Context, ID); break; } assert(D && "Unknown declaration reading AST file"); LoadedDecl(Index, D); // Set the DeclContext before doing any deserialization, to make sure internal // calls to Decl::getASTContext() by Decl's methods will find the // TranslationUnitDecl without crashing. D->setDeclContext(Context.getTranslationUnitDecl()); Reader.Visit(D); // If this declaration is also a declaration context, get the // offsets for its tables of lexical and visible declarations. if (DeclContext *DC = dyn_cast(D)) { std::pair Offsets = Reader.VisitDeclContext(DC); if (Offsets.first && ReadLexicalDeclContextStorage(*Loc.F, DeclsCursor, Offsets.first, DC)) return nullptr; if (Offsets.second && ReadVisibleDeclContextStorage(*Loc.F, DeclsCursor, Offsets.second, ID)) return nullptr; } assert(Record.getIdx() == Record.size()); // Load any relevant update records. PendingUpdateRecords.push_back(std::make_pair(ID, D)); // Load the categories after recursive loading is finished. if (ObjCInterfaceDecl *Class = dyn_cast(D)) // If we already have a definition when deserializing the ObjCInterfaceDecl, // we put the Decl in PendingDefinitions so we can pull the categories here. if (Class->isThisDeclarationADefinition() || PendingDefinitions.count(Class)) loadObjCCategories(ID, Class); // If we have deserialized a declaration that has a definition the // AST consumer might need to know about, queue it. // We don't pass it to the consumer immediately because we may be in recursive // loading, and some declarations may still be initializing. if (isConsumerInterestedIn(Context, D, Reader.hasPendingBody())) InterestingDecls.push_back(D); return D; } void ASTReader::loadDeclUpdateRecords(serialization::DeclID ID, Decl *D) { // The declaration may have been modified by files later in the chain. // If this is the case, read the record containing the updates from each file // and pass it to ASTDeclReader to make the modifications. ProcessingUpdatesRAIIObj ProcessingUpdates(*this); DeclUpdateOffsetsMap::iterator UpdI = DeclUpdateOffsets.find(ID); if (UpdI != DeclUpdateOffsets.end()) { auto UpdateOffsets = std::move(UpdI->second); DeclUpdateOffsets.erase(UpdI); bool WasInteresting = isConsumerInterestedIn(Context, D, false); for (auto &FileAndOffset : UpdateOffsets) { ModuleFile *F = FileAndOffset.first; uint64_t Offset = FileAndOffset.second; llvm::BitstreamCursor &Cursor = F->DeclsCursor; SavedStreamPosition SavedPosition(Cursor); Cursor.JumpToBit(Offset); unsigned Code = Cursor.ReadCode(); ASTRecordReader Record(*this, *F); unsigned RecCode = Record.readRecord(Cursor, Code); (void)RecCode; assert(RecCode == DECL_UPDATES && "Expected DECL_UPDATES record!"); ASTDeclReader Reader(*this, Record, RecordLocation(F, Offset), ID, SourceLocation()); Reader.UpdateDecl(D); // We might have made this declaration interesting. If so, remember that // we need to hand it off to the consumer. if (!WasInteresting && isConsumerInterestedIn(Context, D, Reader.hasPendingBody())) { InterestingDecls.push_back(D); WasInteresting = true; } } } // Load the pending visible updates for this decl context, if it has any. auto I = PendingVisibleUpdates.find(ID); if (I != PendingVisibleUpdates.end()) { auto VisibleUpdates = std::move(I->second); PendingVisibleUpdates.erase(I); auto *DC = cast(D)->getPrimaryContext(); for (const PendingVisibleUpdate &Update : VisibleUpdates) Lookups[DC].Table.add( Update.Mod, Update.Data, reader::ASTDeclContextNameLookupTrait(*this, *Update.Mod)); DC->setHasExternalVisibleStorage(true); } } void ASTReader::loadPendingDeclChain(Decl *FirstLocal, uint64_t LocalOffset) { // Attach FirstLocal to the end of the decl chain. Decl *CanonDecl = FirstLocal->getCanonicalDecl(); if (FirstLocal != CanonDecl) { Decl *PrevMostRecent = ASTDeclReader::getMostRecentDecl(CanonDecl); ASTDeclReader::attachPreviousDecl( *this, FirstLocal, PrevMostRecent ? PrevMostRecent : CanonDecl, CanonDecl); } if (!LocalOffset) { ASTDeclReader::attachLatestDecl(CanonDecl, FirstLocal); return; } // Load the list of other redeclarations from this module file. ModuleFile *M = getOwningModuleFile(FirstLocal); assert(M && "imported decl from no module file"); llvm::BitstreamCursor &Cursor = M->DeclsCursor; SavedStreamPosition SavedPosition(Cursor); Cursor.JumpToBit(LocalOffset); RecordData Record; unsigned Code = Cursor.ReadCode(); unsigned RecCode = Cursor.readRecord(Code, Record); (void)RecCode; assert(RecCode == LOCAL_REDECLARATIONS && "expected LOCAL_REDECLARATIONS record!"); // FIXME: We have several different dispatches on decl kind here; maybe // we should instead generate one loop per kind and dispatch up-front? Decl *MostRecent = FirstLocal; for (unsigned I = 0, N = Record.size(); I != N; ++I) { auto *D = GetLocalDecl(*M, Record[N - I - 1]); ASTDeclReader::attachPreviousDecl(*this, D, MostRecent, CanonDecl); MostRecent = D; } ASTDeclReader::attachLatestDecl(CanonDecl, MostRecent); } namespace { /// \brief Given an ObjC interface, goes through the modules and links to the /// interface all the categories for it. class ObjCCategoriesVisitor { ASTReader &Reader; ObjCInterfaceDecl *Interface; llvm::SmallPtrSetImpl &Deserialized; ObjCCategoryDecl *Tail; llvm::DenseMap NameCategoryMap; serialization::GlobalDeclID InterfaceID; unsigned PreviousGeneration; void add(ObjCCategoryDecl *Cat) { // Only process each category once. if (!Deserialized.erase(Cat)) return; // Check for duplicate categories. if (Cat->getDeclName()) { ObjCCategoryDecl *&Existing = NameCategoryMap[Cat->getDeclName()]; if (Existing && Reader.getOwningModuleFile(Existing) != Reader.getOwningModuleFile(Cat)) { // FIXME: We should not warn for duplicates in diamond: // // MT // // / \ // // ML MR // // \ / // // MB // // // If there are duplicates in ML/MR, there will be warning when // creating MB *and* when importing MB. We should not warn when // importing. Reader.Diag(Cat->getLocation(), diag::warn_dup_category_def) << Interface->getDeclName() << Cat->getDeclName(); Reader.Diag(Existing->getLocation(), diag::note_previous_definition); } else if (!Existing) { // Record this category. Existing = Cat; } } // Add this category to the end of the chain. if (Tail) ASTDeclReader::setNextObjCCategory(Tail, Cat); else Interface->setCategoryListRaw(Cat); Tail = Cat; } public: ObjCCategoriesVisitor(ASTReader &Reader, ObjCInterfaceDecl *Interface, llvm::SmallPtrSetImpl &Deserialized, serialization::GlobalDeclID InterfaceID, unsigned PreviousGeneration) : Reader(Reader), Interface(Interface), Deserialized(Deserialized), Tail(nullptr), InterfaceID(InterfaceID), PreviousGeneration(PreviousGeneration) { // Populate the name -> category map with the set of known categories. for (auto *Cat : Interface->known_categories()) { if (Cat->getDeclName()) NameCategoryMap[Cat->getDeclName()] = Cat; // Keep track of the tail of the category list. Tail = Cat; } } bool operator()(ModuleFile &M) { // If we've loaded all of the category information we care about from // this module file, we're done. if (M.Generation <= PreviousGeneration) return true; // Map global ID of the definition down to the local ID used in this // module file. If there is no such mapping, we'll find nothing here // (or in any module it imports). DeclID LocalID = Reader.mapGlobalIDToModuleFileGlobalID(M, InterfaceID); if (!LocalID) return true; // Perform a binary search to find the local redeclarations for this // declaration (if any). const ObjCCategoriesInfo Compare = { LocalID, 0 }; const ObjCCategoriesInfo *Result = std::lower_bound(M.ObjCCategoriesMap, M.ObjCCategoriesMap + M.LocalNumObjCCategoriesInMap, Compare); if (Result == M.ObjCCategoriesMap + M.LocalNumObjCCategoriesInMap || Result->DefinitionID != LocalID) { // We didn't find anything. If the class definition is in this module // file, then the module files it depends on cannot have any categories, // so suppress further lookup. return Reader.isDeclIDFromModule(InterfaceID, M); } // We found something. Dig out all of the categories. unsigned Offset = Result->Offset; unsigned N = M.ObjCCategories[Offset]; M.ObjCCategories[Offset++] = 0; // Don't try to deserialize again for (unsigned I = 0; I != N; ++I) add(cast_or_null( Reader.GetLocalDecl(M, M.ObjCCategories[Offset++]))); return true; } }; } // end anonymous namespace void ASTReader::loadObjCCategories(serialization::GlobalDeclID ID, ObjCInterfaceDecl *D, unsigned PreviousGeneration) { ObjCCategoriesVisitor Visitor(*this, D, CategoriesDeserialized, ID, PreviousGeneration); ModuleMgr.visit(Visitor); } template static void forAllLaterRedecls(DeclT *D, Fn F) { F(D); // Check whether we've already merged D into its redeclaration chain. // MostRecent may or may not be nullptr if D has not been merged. If // not, walk the merged redecl chain and see if it's there. auto *MostRecent = D->getMostRecentDecl(); bool Found = false; for (auto *Redecl = MostRecent; Redecl && !Found; Redecl = Redecl->getPreviousDecl()) Found = (Redecl == D); // If this declaration is merged, apply the functor to all later decls. if (Found) { for (auto *Redecl = MostRecent; Redecl != D; Redecl = Redecl->getPreviousDecl()) F(Redecl); } } void ASTDeclReader::UpdateDecl(Decl *D) { while (Record.getIdx() < Record.size()) { switch ((DeclUpdateKind)Record.readInt()) { case UPD_CXX_ADDED_IMPLICIT_MEMBER: { auto *RD = cast(D); // FIXME: If we also have an update record for instantiating the // definition of D, we need that to happen before we get here. Decl *MD = Record.readDecl(); assert(MD && "couldn't read decl from update record"); // FIXME: We should call addHiddenDecl instead, to add the member // to its DeclContext. RD->addedMember(MD); break; } case UPD_CXX_ADDED_TEMPLATE_SPECIALIZATION: // It will be added to the template's specializations set when loaded. (void)Record.readDecl(); break; case UPD_CXX_ADDED_ANONYMOUS_NAMESPACE: { NamespaceDecl *Anon = ReadDeclAs(); // Each module has its own anonymous namespace, which is disjoint from // any other module's anonymous namespaces, so don't attach the anonymous // namespace at all. if (!Record.isModule()) { if (TranslationUnitDecl *TU = dyn_cast(D)) TU->setAnonymousNamespace(Anon); else cast(D)->setAnonymousNamespace(Anon); } break; } case UPD_CXX_INSTANTIATED_STATIC_DATA_MEMBER: cast(D)->getMemberSpecializationInfo()->setPointOfInstantiation( ReadSourceLocation()); break; case UPD_CXX_INSTANTIATED_DEFAULT_ARGUMENT: { auto Param = cast(D); // We have to read the default argument regardless of whether we use it // so that hypothetical further update records aren't messed up. // TODO: Add a function to skip over the next expr record. auto DefaultArg = Record.readExpr(); // Only apply the update if the parameter still has an uninstantiated // default argument. if (Param->hasUninstantiatedDefaultArg()) Param->setDefaultArg(DefaultArg); break; } case UPD_CXX_INSTANTIATED_DEFAULT_MEMBER_INITIALIZER: { auto FD = cast(D); auto DefaultInit = Record.readExpr(); // Only apply the update if the field still has an uninstantiated // default member initializer. if (FD->hasInClassInitializer() && !FD->getInClassInitializer()) { if (DefaultInit) FD->setInClassInitializer(DefaultInit); else // Instantiation failed. We can get here if we serialized an AST for // an invalid program. FD->removeInClassInitializer(); } break; } case UPD_CXX_ADDED_FUNCTION_DEFINITION: { FunctionDecl *FD = cast(D); if (Reader.PendingBodies[FD]) { // FIXME: Maybe check for ODR violations. // It's safe to stop now because this update record is always last. return; } if (Record.readInt()) { // Maintain AST consistency: any later redeclarations of this function // are inline if this one is. (We might have merged another declaration // into this one.) forAllLaterRedecls(FD, [](FunctionDecl *FD) { FD->setImplicitlyInline(); }); } FD->setInnerLocStart(ReadSourceLocation()); if (auto *CD = dyn_cast(FD)) { CD->NumCtorInitializers = Record.readInt(); if (CD->NumCtorInitializers) CD->CtorInitializers = ReadGlobalOffset(); } // Store the offset of the body so we can lazily load it later. Reader.PendingBodies[FD] = GetCurrentCursorOffset(); HasPendingBody = true; assert(Record.getIdx() == Record.size() && "lazy body must be last"); break; } case UPD_CXX_INSTANTIATED_CLASS_DEFINITION: { auto *RD = cast(D); auto *OldDD = RD->getCanonicalDecl()->DefinitionData; bool HadRealDefinition = OldDD && (OldDD->Definition != RD || !Reader.PendingFakeDefinitionData.count(OldDD)); ReadCXXRecordDefinition(RD, /*Update*/true); // Visible update is handled separately. uint64_t LexicalOffset = ReadLocalOffset(); if (!HadRealDefinition && LexicalOffset) { Record.readLexicalDeclContextStorage(LexicalOffset, RD); Reader.PendingFakeDefinitionData.erase(OldDD); } auto TSK = (TemplateSpecializationKind)Record.readInt(); SourceLocation POI = ReadSourceLocation(); if (MemberSpecializationInfo *MSInfo = RD->getMemberSpecializationInfo()) { MSInfo->setTemplateSpecializationKind(TSK); MSInfo->setPointOfInstantiation(POI); } else { ClassTemplateSpecializationDecl *Spec = cast(RD); Spec->setTemplateSpecializationKind(TSK); Spec->setPointOfInstantiation(POI); if (Record.readInt()) { auto PartialSpec = ReadDeclAs(); SmallVector TemplArgs; Record.readTemplateArgumentList(TemplArgs); auto *TemplArgList = TemplateArgumentList::CreateCopy( Reader.getContext(), TemplArgs); // FIXME: If we already have a partial specialization set, // check that it matches. if (!Spec->getSpecializedTemplateOrPartial() .is()) Spec->setInstantiationOf(PartialSpec, TemplArgList); } } RD->setTagKind((TagTypeKind)Record.readInt()); RD->setLocation(ReadSourceLocation()); RD->setLocStart(ReadSourceLocation()); RD->setBraceRange(ReadSourceRange()); if (Record.readInt()) { AttrVec Attrs; Record.readAttributes(Attrs); // If the declaration already has attributes, we assume that some other // AST file already loaded them. if (!D->hasAttrs()) D->setAttrsImpl(Attrs, Reader.getContext()); } break; } case UPD_CXX_RESOLVED_DTOR_DELETE: { // Set the 'operator delete' directly to avoid emitting another update // record. auto *Del = ReadDeclAs(); auto *First = cast(D->getCanonicalDecl()); // FIXME: Check consistency if we have an old and new operator delete. if (!First->OperatorDelete) First->OperatorDelete = Del; break; } case UPD_CXX_RESOLVED_EXCEPTION_SPEC: { FunctionProtoType::ExceptionSpecInfo ESI; SmallVector ExceptionStorage; Record.readExceptionSpec(ExceptionStorage, ESI); // Update this declaration's exception specification, if needed. auto *FD = cast(D); auto *FPT = FD->getType()->castAs(); // FIXME: If the exception specification is already present, check that it // matches. if (isUnresolvedExceptionSpec(FPT->getExceptionSpecType())) { FD->setType(Reader.Context.getFunctionType( FPT->getReturnType(), FPT->getParamTypes(), FPT->getExtProtoInfo().withExceptionSpec(ESI))); // When we get to the end of deserializing, see if there are other decls // that we need to propagate this exception specification onto. Reader.PendingExceptionSpecUpdates.insert( std::make_pair(FD->getCanonicalDecl(), FD)); } break; } case UPD_CXX_DEDUCED_RETURN_TYPE: { // FIXME: Also do this when merging redecls. QualType DeducedResultType = Record.readType(); for (auto *Redecl : merged_redecls(D)) { // FIXME: If the return type is already deduced, check that it matches. FunctionDecl *FD = cast(Redecl); Reader.Context.adjustDeducedFunctionResultType(FD, DeducedResultType); } break; } case UPD_DECL_MARKED_USED: { // Maintain AST consistency: any later redeclarations are used too. D->markUsed(Reader.Context); break; } case UPD_MANGLING_NUMBER: Reader.Context.setManglingNumber(cast(D), Record.readInt()); break; case UPD_STATIC_LOCAL_NUMBER: Reader.Context.setStaticLocalNumber(cast(D), Record.readInt()); break; case UPD_DECL_MARKED_OPENMP_THREADPRIVATE: D->addAttr(OMPThreadPrivateDeclAttr::CreateImplicit( Reader.Context, ReadSourceRange())); break; case UPD_DECL_EXPORTED: { unsigned SubmoduleID = readSubmoduleID(); auto *Exported = cast(D); if (auto *TD = dyn_cast(Exported)) Exported = TD->getDefinition(); Module *Owner = SubmoduleID ? Reader.getSubmodule(SubmoduleID) : nullptr; if (Reader.getContext().getLangOpts().ModulesLocalVisibility) { Reader.getContext().mergeDefinitionIntoModule(cast(Exported), Owner); Reader.PendingMergedDefinitionsToDeduplicate.insert( cast(Exported)); } else if (Owner && Owner->NameVisibility != Module::AllVisible) { // If Owner is made visible at some later point, make this declaration // visible too. Reader.HiddenNamesMap[Owner].push_back(Exported); } else { // The declaration is now visible. Exported->Hidden = false; } break; } case UPD_DECL_MARKED_OPENMP_DECLARETARGET: case UPD_ADDED_ATTR_TO_RECORD: AttrVec Attrs; Record.readAttributes(Attrs); assert(Attrs.size() == 1); D->addAttr(Attrs[0]); break; } } } Index: head/contrib/llvm/tools/clang =================================================================== --- head/contrib/llvm/tools/clang (revision 315015) +++ head/contrib/llvm/tools/clang (revision 315016) Property changes on: head/contrib/llvm/tools/clang ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /vendor/clang/dist:r314418-314984 Index: head/contrib/llvm/tools/lld =================================================================== --- head/contrib/llvm/tools/lld (revision 315015) +++ head/contrib/llvm/tools/lld (revision 315016) Property changes on: head/contrib/llvm/tools/lld ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /vendor/lld/dist:r314418-314984 Index: head/contrib/llvm/tools/lldb =================================================================== --- head/contrib/llvm/tools/lldb (revision 315015) +++ head/contrib/llvm/tools/lldb (revision 315016) Property changes on: head/contrib/llvm/tools/lldb ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /vendor/lldb/dist:r314418-314984 Index: head/contrib/llvm =================================================================== --- head/contrib/llvm (revision 315015) +++ head/contrib/llvm (revision 315016) Property changes on: head/contrib/llvm ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /vendor/llvm/dist:r314418-314984 Index: head/lib/clang/include/clang/Basic/Version.inc =================================================================== --- head/lib/clang/include/clang/Basic/Version.inc (revision 315015) +++ head/lib/clang/include/clang/Basic/Version.inc (revision 315016) @@ -1,11 +1,11 @@ /* $FreeBSD$ */ #define CLANG_VERSION 4.0.0 #define CLANG_VERSION_STRING "4.0.0" #define CLANG_VERSION_MAJOR 4 #define CLANG_VERSION_MINOR 0 #define CLANG_VERSION_PATCHLEVEL 0 #define CLANG_VENDOR "FreeBSD " -#define SVN_REVISION "296509" +#define SVN_REVISION "297347" Index: head/lib/clang/include/lld/Config/Version.inc =================================================================== --- head/lib/clang/include/lld/Config/Version.inc (revision 315015) +++ head/lib/clang/include/lld/Config/Version.inc (revision 315016) @@ -1,8 +1,8 @@ // $FreeBSD$ #define LLD_VERSION 4.0.0 #define LLD_VERSION_STRING "4.0.0" #define LLD_VERSION_MAJOR 4 #define LLD_VERSION_MINOR 0 -#define LLD_REVISION_STRING "296509" +#define LLD_REVISION_STRING "297347" #define LLD_REPOSITORY_STRING "FreeBSD"