diff --git a/clang/lib/Tooling/Syntax/Tokens.cpp b/clang/lib/Tooling/Syntax/Tokens.cpp index e2014f965c90..1fa73c667b7f 100644 --- a/clang/lib/Tooling/Syntax/Tokens.cpp +++ b/clang/lib/Tooling/Syntax/Tokens.cpp @@ -1,931 +1,1025 @@ //===- Tokens.cpp - collect tokens from preprocessing ---------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "clang/Tooling/Syntax/Tokens.h" #include "clang/Basic/Diagnostic.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/LangOptions.h" #include "clang/Basic/SourceLocation.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/TokenKinds.h" #include "clang/Lex/PPCallbacks.h" #include "clang/Lex/Preprocessor.h" #include "clang/Lex/Token.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/None.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/STLExtras.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/FormatVariadic.h" #include "llvm/Support/raw_ostream.h" #include #include #include #include #include #include using namespace clang; using namespace clang::syntax; namespace { // Finds the smallest consecutive subsuquence of Toks that covers R. llvm::ArrayRef getTokensCovering(llvm::ArrayRef Toks, SourceRange R, const SourceManager &SM) { if (R.isInvalid()) return {}; const syntax::Token *Begin = llvm::partition_point(Toks, [&](const syntax::Token &T) { return SM.isBeforeInTranslationUnit(T.location(), R.getBegin()); }); const syntax::Token *End = llvm::partition_point(Toks, [&](const syntax::Token &T) { return !SM.isBeforeInTranslationUnit(R.getEnd(), T.location()); }); if (Begin > End) return {}; return {Begin, End}; } -// Finds the smallest expansion range that contains expanded tokens First and -// Last, e.g.: +// Finds the range within FID corresponding to expanded tokens [First, Last]. +// Prev precedes First and Next follows Last, these must *not* be included. +// If no range satisfies the criteria, returns an invalid range. +// // #define ID(x) x // ID(ID(ID(a1) a2)) // ~~ -> a1 // ~~ -> a2 // ~~~~~~~~~ -> a1 a2 -SourceRange findCommonRangeForMacroArgs(const syntax::Token &First, - const syntax::Token &Last, - const SourceManager &SM) { - SourceRange Res; - auto FirstLoc = First.location(), LastLoc = Last.location(); - // Keep traversing up the spelling chain as longs as tokens are part of the - // same expansion. - while (!FirstLoc.isFileID() && !LastLoc.isFileID()) { - auto ExpInfoFirst = SM.getSLocEntry(SM.getFileID(FirstLoc)).getExpansion(); - auto ExpInfoLast = SM.getSLocEntry(SM.getFileID(LastLoc)).getExpansion(); - // Stop if expansions have diverged. - if (ExpInfoFirst.getExpansionLocStart() != - ExpInfoLast.getExpansionLocStart()) +SourceRange spelledForExpandedSlow(SourceLocation First, SourceLocation Last, + SourceLocation Prev, SourceLocation Next, + FileID TargetFile, + const SourceManager &SM) { + // There are two main parts to this algorithm: + // - identifying which spelled range covers the expanded tokens + // - validating that this range doesn't cover any extra tokens (First/Last) + // + // We do these in order. However as we transform the expanded range into the + // spelled one, we adjust First/Last so the validation remains simple. + + assert(SM.getSLocEntry(TargetFile).isFile()); + // In most cases, to select First and Last we must return their expansion + // range, i.e. the whole of any macros they are included in. + // + // When First and Last are part of the *same macro arg* of a macro written + // in TargetFile, we that slice of the arg, i.e. their spelling range. + // + // Unwrap such macro calls. If the target file has A(B(C)), the + // SourceLocation stack of a token inside C shows us the expansion of A first, + // then B, then any macros inside C's body, then C itself. + // (This is the reverse of the order the PP applies the expansions in). + while (First.isMacroID() && Last.isMacroID()) { + auto DecFirst = SM.getDecomposedLoc(First); + auto DecLast = SM.getDecomposedLoc(Last); + auto &ExpFirst = SM.getSLocEntry(DecFirst.first).getExpansion(); + auto &ExpLast = SM.getSLocEntry(DecLast.first).getExpansion(); + + if (!ExpFirst.isMacroArgExpansion() || !ExpLast.isMacroArgExpansion()) + break; + // Locations are in the same macro arg if they expand to the same place. + // (They may still have different FileIDs - an arg can have >1 chunks!) + if (ExpFirst.getExpansionLocStart() != ExpLast.getExpansionLocStart()) break; - // Do not continue into macro bodies. - if (!ExpInfoFirst.isMacroArgExpansion() || - !ExpInfoLast.isMacroArgExpansion()) + // Careful, given: + // #define HIDE ID(ID(a)) + // ID(ID(HIDE)) + // The token `a` is wrapped in 4 arg-expansions, we only want to unwrap 2. + // We distinguish them by whether the macro expands into the target file. + // Fortunately, the target file ones will always appear first. + auto &ExpMacro = + SM.getSLocEntry(SM.getFileID(ExpFirst.getExpansionLocStart())) + .getExpansion(); + if (ExpMacro.getExpansionLocStart().isMacroID()) break; - FirstLoc = SM.getImmediateSpellingLoc(FirstLoc); - LastLoc = SM.getImmediateSpellingLoc(LastLoc); - // Update the result afterwards, as we want the tokens that triggered the - // expansion. - Res = {FirstLoc, LastLoc}; + // Replace each endpoint with its spelling inside the macro arg. + // (This is getImmediateSpellingLoc without repeating lookups). + First = ExpFirst.getSpellingLoc().getLocWithOffset(DecFirst.second); + Last = ExpLast.getSpellingLoc().getLocWithOffset(DecLast.second); + + // Now: how do we adjust the previous/next bounds? Three cases: + // A) If they are also part of the same macro arg, we translate them too. + // This will ensure that we don't select any macros nested within the + // macro arg that cover extra tokens. Critical case: + // #define ID(X) X + // ID(prev target) // selecting 'target' succeeds + // #define LARGE ID(prev target) + // LARGE // selecting 'target' fails. + // B) They are not in the macro at all, then their expansion range is a + // sibling to it, and we can safely substitute that. + // #define PREV prev + // #define ID(X) X + // PREV ID(target) // selecting 'target' succeeds. + // #define LARGE PREV ID(target) + // LARGE // selecting 'target' fails. + // C) They are in a different arg of this macro, or the macro body. + // Now selecting the whole macro arg is fine, but the whole macro is not. + // Model this by setting using the edge of the macro call as the bound. + // #define ID2(X, Y) X Y + // ID2(prev, target) // selecting 'target' succeeds + // #define LARGE ID2(prev, target) + // LARGE // selecting 'target' fails + auto AdjustBound = [&](SourceLocation &Bound) { + if (Bound.isInvalid() || !Bound.isMacroID()) // Non-macro must be case B. + return; + auto DecBound = SM.getDecomposedLoc(Bound); + auto &ExpBound = SM.getSLocEntry(DecBound.first).getExpansion(); + if (ExpBound.isMacroArgExpansion() && + ExpBound.getExpansionLocStart() == ExpFirst.getExpansionLocStart()) { + // Case A: translate to (spelling) loc within the macro arg. + Bound = ExpBound.getSpellingLoc().getLocWithOffset(DecBound.second); + return; + } + while (Bound.isMacroID()) { + SourceRange Exp = SM.getImmediateExpansionRange(Bound).getAsRange(); + if (Exp.getBegin() == ExpMacro.getExpansionLocStart()) { + // Case B: bounds become the macro call itself. + Bound = (&Bound == &Prev) ? Exp.getBegin() : Exp.getEnd(); + return; + } + // Either case C, or expansion location will later find case B. + // We choose the upper bound for Prev and the lower one for Next: + // ID(prev) target ID(next) + // ^ ^ + // new-prev new-next + Bound = (&Bound == &Prev) ? Exp.getEnd() : Exp.getBegin(); + } + }; + AdjustBound(Prev); + AdjustBound(Next); } - // Normally mapping back to expansion location here only changes FileID, as - // we've already found some tokens expanded from the same macro argument, and - // they should map to a consecutive subset of spelled tokens. Unfortunately - // SourceManager::isBeforeInTranslationUnit discriminates sourcelocations - // based on their FileID in addition to offsets. So even though we are - // referring to same tokens, SourceManager might tell us that one is before - // the other if they've got different FileIDs. - return SM.getExpansionRange(CharSourceRange(Res, true)).getAsRange(); + + // In all remaining cases we need the full containing macros. + // If this overlaps Prev or Next, then no range is possible. + SourceRange Candidate = + SM.getExpansionRange(SourceRange(First, Last)).getAsRange(); + auto DecFirst = SM.getDecomposedExpansionLoc(Candidate.getBegin()); + auto DecLast = SM.getDecomposedLoc(Candidate.getEnd()); + // Can end up in the wrong file due to bad input or token-pasting shenanigans. + if (Candidate.isInvalid() || DecFirst.first != TargetFile || DecLast.first != TargetFile) + return SourceRange(); + // Check bounds, which may still be inside macros. + if (Prev.isValid()) { + auto Dec = SM.getDecomposedLoc(SM.getExpansionRange(Prev).getBegin()); + if (Dec.first != DecFirst.first || Dec.second >= DecFirst.second) + return SourceRange(); + } + if (Next.isValid()) { + auto Dec = SM.getDecomposedLoc(SM.getExpansionRange(Next).getEnd()); + if (Dec.first != DecLast.first || Dec.second <= DecLast.second) + return SourceRange(); + } + // Now we know that Candidate is a file range that covers [First, Last] + // without encroaching on {Prev, Next}. Ship it! + return Candidate; } } // namespace syntax::Token::Token(SourceLocation Location, unsigned Length, tok::TokenKind Kind) : Location(Location), Length(Length), Kind(Kind) { assert(Location.isValid()); } syntax::Token::Token(const clang::Token &T) : Token(T.getLocation(), T.getLength(), T.getKind()) { assert(!T.isAnnotation()); } llvm::StringRef syntax::Token::text(const SourceManager &SM) const { bool Invalid = false; const char *Start = SM.getCharacterData(location(), &Invalid); assert(!Invalid); return llvm::StringRef(Start, length()); } FileRange syntax::Token::range(const SourceManager &SM) const { assert(location().isFileID() && "must be a spelled token"); FileID File; unsigned StartOffset; std::tie(File, StartOffset) = SM.getDecomposedLoc(location()); return FileRange(File, StartOffset, StartOffset + length()); } FileRange syntax::Token::range(const SourceManager &SM, const syntax::Token &First, const syntax::Token &Last) { auto F = First.range(SM); auto L = Last.range(SM); assert(F.file() == L.file() && "tokens from different files"); assert((F == L || F.endOffset() <= L.beginOffset()) && "wrong order of tokens"); return FileRange(F.file(), F.beginOffset(), L.endOffset()); } llvm::raw_ostream &syntax::operator<<(llvm::raw_ostream &OS, const Token &T) { return OS << T.str(); } FileRange::FileRange(FileID File, unsigned BeginOffset, unsigned EndOffset) : File(File), Begin(BeginOffset), End(EndOffset) { assert(File.isValid()); assert(BeginOffset <= EndOffset); } FileRange::FileRange(const SourceManager &SM, SourceLocation BeginLoc, unsigned Length) { assert(BeginLoc.isValid()); assert(BeginLoc.isFileID()); std::tie(File, Begin) = SM.getDecomposedLoc(BeginLoc); End = Begin + Length; } FileRange::FileRange(const SourceManager &SM, SourceLocation BeginLoc, SourceLocation EndLoc) { assert(BeginLoc.isValid()); assert(BeginLoc.isFileID()); assert(EndLoc.isValid()); assert(EndLoc.isFileID()); assert(SM.getFileID(BeginLoc) == SM.getFileID(EndLoc)); assert(SM.getFileOffset(BeginLoc) <= SM.getFileOffset(EndLoc)); std::tie(File, Begin) = SM.getDecomposedLoc(BeginLoc); End = SM.getFileOffset(EndLoc); } llvm::raw_ostream &syntax::operator<<(llvm::raw_ostream &OS, const FileRange &R) { return OS << llvm::formatv("FileRange(file = {0}, offsets = {1}-{2})", R.file().getHashValue(), R.beginOffset(), R.endOffset()); } llvm::StringRef FileRange::text(const SourceManager &SM) const { bool Invalid = false; StringRef Text = SM.getBufferData(File, &Invalid); if (Invalid) return ""; assert(Begin <= Text.size()); assert(End <= Text.size()); return Text.substr(Begin, length()); } void TokenBuffer::indexExpandedTokens() { // No-op if the index is already created. if (!ExpandedTokIndex.empty()) return; ExpandedTokIndex.reserve(ExpandedTokens.size()); // Index ExpandedTokens for faster lookups by SourceLocation. for (size_t I = 0, E = ExpandedTokens.size(); I != E; ++I) { SourceLocation Loc = ExpandedTokens[I].location(); if (Loc.isValid()) ExpandedTokIndex[Loc] = I; } } llvm::ArrayRef TokenBuffer::expandedTokens(SourceRange R) const { if (R.isInvalid()) return {}; if (!ExpandedTokIndex.empty()) { // Quick lookup if `R` is a token range. // This is a huge win since majority of the users use ranges provided by an // AST. Ranges in AST are token ranges from expanded token stream. const auto B = ExpandedTokIndex.find(R.getBegin()); const auto E = ExpandedTokIndex.find(R.getEnd()); if (B != ExpandedTokIndex.end() && E != ExpandedTokIndex.end()) { const Token *L = ExpandedTokens.data() + B->getSecond(); // Add 1 to End to make a half-open range. const Token *R = ExpandedTokens.data() + E->getSecond() + 1; if (L > R) return {}; return {L, R}; } } // Slow case. Use `isBeforeInTranslationUnit` to binary search for the // required range. return getTokensCovering(expandedTokens(), R, *SourceMgr); } CharSourceRange FileRange::toCharRange(const SourceManager &SM) const { return CharSourceRange( SourceRange(SM.getComposedLoc(File, Begin), SM.getComposedLoc(File, End)), /*IsTokenRange=*/false); } std::pair TokenBuffer::spelledForExpandedToken(const syntax::Token *Expanded) const { assert(Expanded); assert(ExpandedTokens.data() <= Expanded && Expanded < ExpandedTokens.data() + ExpandedTokens.size()); auto FileIt = Files.find( SourceMgr->getFileID(SourceMgr->getExpansionLoc(Expanded->location()))); assert(FileIt != Files.end() && "no file for an expanded token"); const MarkedFile &File = FileIt->second; unsigned ExpandedIndex = Expanded - ExpandedTokens.data(); // Find the first mapping that produced tokens after \p Expanded. auto It = llvm::partition_point(File.Mappings, [&](const Mapping &M) { return M.BeginExpanded <= ExpandedIndex; }); // Our token could only be produced by the previous mapping. if (It == File.Mappings.begin()) { // No previous mapping, no need to modify offsets. return {&File.SpelledTokens[ExpandedIndex - File.BeginExpanded], /*Mapping=*/nullptr}; } --It; // 'It' now points to last mapping that started before our token. // Check if the token is part of the mapping. if (ExpandedIndex < It->EndExpanded) return {&File.SpelledTokens[It->BeginSpelled], /*Mapping=*/&*It}; // Not part of the mapping, use the index from previous mapping to compute the // corresponding spelled token. return { &File.SpelledTokens[It->EndSpelled + (ExpandedIndex - It->EndExpanded)], /*Mapping=*/nullptr}; } const TokenBuffer::Mapping * TokenBuffer::mappingStartingBeforeSpelled(const MarkedFile &F, const syntax::Token *Spelled) { assert(F.SpelledTokens.data() <= Spelled); unsigned SpelledI = Spelled - F.SpelledTokens.data(); assert(SpelledI < F.SpelledTokens.size()); auto It = llvm::partition_point(F.Mappings, [SpelledI](const Mapping &M) { return M.BeginSpelled <= SpelledI; }); if (It == F.Mappings.begin()) return nullptr; --It; return &*It; } llvm::SmallVector, 1> TokenBuffer::expandedForSpelled(llvm::ArrayRef Spelled) const { if (Spelled.empty()) return {}; const auto &File = fileForSpelled(Spelled); auto *FrontMapping = mappingStartingBeforeSpelled(File, &Spelled.front()); unsigned SpelledFrontI = &Spelled.front() - File.SpelledTokens.data(); assert(SpelledFrontI < File.SpelledTokens.size()); unsigned ExpandedBegin; if (!FrontMapping) { // No mapping that starts before the first token of Spelled, we don't have // to modify offsets. ExpandedBegin = File.BeginExpanded + SpelledFrontI; } else if (SpelledFrontI < FrontMapping->EndSpelled) { // This mapping applies to Spelled tokens. if (SpelledFrontI != FrontMapping->BeginSpelled) { // Spelled tokens don't cover the entire mapping, returning empty result. return {}; // FIXME: support macro arguments. } // Spelled tokens start at the beginning of this mapping. ExpandedBegin = FrontMapping->BeginExpanded; } else { // Spelled tokens start after the mapping ends (they start in the hole // between 2 mappings, or between a mapping and end of the file). ExpandedBegin = FrontMapping->EndExpanded + (SpelledFrontI - FrontMapping->EndSpelled); } auto *BackMapping = mappingStartingBeforeSpelled(File, &Spelled.back()); unsigned SpelledBackI = &Spelled.back() - File.SpelledTokens.data(); unsigned ExpandedEnd; if (!BackMapping) { // No mapping that starts before the last token of Spelled, we don't have to // modify offsets. ExpandedEnd = File.BeginExpanded + SpelledBackI + 1; } else if (SpelledBackI < BackMapping->EndSpelled) { // This mapping applies to Spelled tokens. if (SpelledBackI + 1 != BackMapping->EndSpelled) { // Spelled tokens don't cover the entire mapping, returning empty result. return {}; // FIXME: support macro arguments. } ExpandedEnd = BackMapping->EndExpanded; } else { // Spelled tokens end after the mapping ends. ExpandedEnd = BackMapping->EndExpanded + (SpelledBackI - BackMapping->EndSpelled) + 1; } assert(ExpandedBegin < ExpandedTokens.size()); assert(ExpandedEnd < ExpandedTokens.size()); // Avoid returning empty ranges. if (ExpandedBegin == ExpandedEnd) return {}; return {llvm::makeArrayRef(ExpandedTokens.data() + ExpandedBegin, ExpandedTokens.data() + ExpandedEnd)}; } llvm::ArrayRef TokenBuffer::spelledTokens(FileID FID) const { auto It = Files.find(FID); assert(It != Files.end()); return It->second.SpelledTokens; } const syntax::Token *TokenBuffer::spelledTokenAt(SourceLocation Loc) const { assert(Loc.isFileID()); const auto *Tok = llvm::partition_point( spelledTokens(SourceMgr->getFileID(Loc)), [&](const syntax::Token &Tok) { return Tok.location() < Loc; }); if (!Tok || Tok->location() != Loc) return nullptr; return Tok; } std::string TokenBuffer::Mapping::str() const { return std::string( llvm::formatv("spelled tokens: [{0},{1}), expanded tokens: [{2},{3})", BeginSpelled, EndSpelled, BeginExpanded, EndExpanded)); } llvm::Optional> TokenBuffer::spelledForExpanded(llvm::ArrayRef Expanded) const { // Mapping an empty range is ambiguous in case of empty mappings at either end // of the range, bail out in that case. if (Expanded.empty()) return llvm::None; - - const syntax::Token *BeginSpelled; - const Mapping *BeginMapping; - std::tie(BeginSpelled, BeginMapping) = - spelledForExpandedToken(&Expanded.front()); - - const syntax::Token *LastSpelled; - const Mapping *LastMapping; - std::tie(LastSpelled, LastMapping) = - spelledForExpandedToken(&Expanded.back()); - - FileID FID = SourceMgr->getFileID(BeginSpelled->location()); + const syntax::Token *First = &Expanded.front(); + const syntax::Token *Last = &Expanded.back(); + const syntax::Token *FirstSpelled, *LastSpelled; + const TokenBuffer::Mapping *FirstMapping, *LastMapping; + std::tie(FirstSpelled, FirstMapping) = spelledForExpandedToken(First); + std::tie(LastSpelled, LastMapping) = spelledForExpandedToken(Last); + + FileID FID = SourceMgr->getFileID(FirstSpelled->location()); // FIXME: Handle multi-file changes by trying to map onto a common root. if (FID != SourceMgr->getFileID(LastSpelled->location())) return llvm::None; const MarkedFile &File = Files.find(FID)->second; - // If both tokens are coming from a macro argument expansion, try and map to - // smallest part of the macro argument. BeginMapping && LastMapping check is - // only for performance, they are a prerequisite for Expanded.front() and - // Expanded.back() being part of a macro arg expansion. - if (BeginMapping && LastMapping && - SourceMgr->isMacroArgExpansion(Expanded.front().location()) && - SourceMgr->isMacroArgExpansion(Expanded.back().location())) { - auto CommonRange = findCommonRangeForMacroArgs(Expanded.front(), - Expanded.back(), *SourceMgr); - // It might be the case that tokens are arguments of different macro calls, - // in that case we should continue with the logic below instead of returning - // an empty range. - if (CommonRange.isValid()) - return getTokensCovering(File.SpelledTokens, CommonRange, *SourceMgr); + // If the range is within one macro argument, the result may be only part of a + // Mapping. We must use the general (SourceManager-based) algorithm. + if (FirstMapping && FirstMapping == LastMapping && + SourceMgr->isMacroArgExpansion(First->location()) && + SourceMgr->isMacroArgExpansion(Last->location())) { + // We use excluded Prev/Next token for bounds checking. + SourceLocation Prev = (First == &ExpandedTokens.front()) + ? SourceLocation() + : (First - 1)->location(); + SourceLocation Next = (Last == &ExpandedTokens.back()) + ? SourceLocation() + : (Last + 1)->location(); + SourceRange Range = spelledForExpandedSlow( + First->location(), Last->location(), Prev, Next, FID, *SourceMgr); + if (Range.isInvalid()) + return llvm::None; + return getTokensCovering(File.SpelledTokens, Range, *SourceMgr); } + // Otherwise, use the fast version based on Mappings. // Do not allow changes that doesn't cover full expansion. - unsigned BeginExpanded = Expanded.begin() - ExpandedTokens.data(); - unsigned EndExpanded = Expanded.end() - ExpandedTokens.data(); - if (BeginMapping && BeginExpanded != BeginMapping->BeginExpanded) + unsigned FirstExpanded = Expanded.begin() - ExpandedTokens.data(); + unsigned LastExpanded = Expanded.end() - ExpandedTokens.data(); + if (FirstMapping && FirstExpanded != FirstMapping->BeginExpanded) return llvm::None; - if (LastMapping && LastMapping->EndExpanded != EndExpanded) + if (LastMapping && LastMapping->EndExpanded != LastExpanded) return llvm::None; - // All is good, return the result. return llvm::makeArrayRef( - BeginMapping ? File.SpelledTokens.data() + BeginMapping->BeginSpelled - : BeginSpelled, + FirstMapping ? File.SpelledTokens.data() + FirstMapping->BeginSpelled + : FirstSpelled, LastMapping ? File.SpelledTokens.data() + LastMapping->EndSpelled : LastSpelled + 1); } TokenBuffer::Expansion TokenBuffer::makeExpansion(const MarkedFile &F, const Mapping &M) const { Expansion E; E.Spelled = llvm::makeArrayRef(F.SpelledTokens.data() + M.BeginSpelled, F.SpelledTokens.data() + M.EndSpelled); E.Expanded = llvm::makeArrayRef(ExpandedTokens.data() + M.BeginExpanded, ExpandedTokens.data() + M.EndExpanded); return E; } const TokenBuffer::MarkedFile & TokenBuffer::fileForSpelled(llvm::ArrayRef Spelled) const { assert(!Spelled.empty()); assert(Spelled.front().location().isFileID() && "not a spelled token"); auto FileIt = Files.find(SourceMgr->getFileID(Spelled.front().location())); assert(FileIt != Files.end() && "file not tracked by token buffer"); const auto &File = FileIt->second; assert(File.SpelledTokens.data() <= Spelled.data() && Spelled.end() <= (File.SpelledTokens.data() + File.SpelledTokens.size()) && "Tokens not in spelled range"); #ifndef NDEBUG auto T1 = Spelled.back().location(); auto T2 = File.SpelledTokens.back().location(); assert(T1 == T2 || sourceManager().isBeforeInTranslationUnit(T1, T2)); #endif return File; } llvm::Optional TokenBuffer::expansionStartingAt(const syntax::Token *Spelled) const { assert(Spelled); const auto &File = fileForSpelled(*Spelled); unsigned SpelledIndex = Spelled - File.SpelledTokens.data(); auto M = llvm::partition_point(File.Mappings, [&](const Mapping &M) { return M.BeginSpelled < SpelledIndex; }); if (M == File.Mappings.end() || M->BeginSpelled != SpelledIndex) return llvm::None; return makeExpansion(File, *M); } std::vector TokenBuffer::expansionsOverlapping( llvm::ArrayRef Spelled) const { if (Spelled.empty()) return {}; const auto &File = fileForSpelled(Spelled); // Find the first overlapping range, and then copy until we stop overlapping. unsigned SpelledBeginIndex = Spelled.begin() - File.SpelledTokens.data(); unsigned SpelledEndIndex = Spelled.end() - File.SpelledTokens.data(); auto M = llvm::partition_point(File.Mappings, [&](const Mapping &M) { return M.EndSpelled <= SpelledBeginIndex; }); std::vector Expansions; for (; M != File.Mappings.end() && M->BeginSpelled < SpelledEndIndex; ++M) Expansions.push_back(makeExpansion(File, *M)); return Expansions; } llvm::ArrayRef syntax::spelledTokensTouching(SourceLocation Loc, llvm::ArrayRef Tokens) { assert(Loc.isFileID()); auto *Right = llvm::partition_point( Tokens, [&](const syntax::Token &Tok) { return Tok.location() < Loc; }); bool AcceptRight = Right != Tokens.end() && Right->location() <= Loc; bool AcceptLeft = Right != Tokens.begin() && (Right - 1)->endLocation() >= Loc; return llvm::makeArrayRef(Right - (AcceptLeft ? 1 : 0), Right + (AcceptRight ? 1 : 0)); } llvm::ArrayRef syntax::spelledTokensTouching(SourceLocation Loc, const syntax::TokenBuffer &Tokens) { return spelledTokensTouching( Loc, Tokens.spelledTokens(Tokens.sourceManager().getFileID(Loc))); } const syntax::Token * syntax::spelledIdentifierTouching(SourceLocation Loc, llvm::ArrayRef Tokens) { for (const syntax::Token &Tok : spelledTokensTouching(Loc, Tokens)) { if (Tok.kind() == tok::identifier) return &Tok; } return nullptr; } const syntax::Token * syntax::spelledIdentifierTouching(SourceLocation Loc, const syntax::TokenBuffer &Tokens) { return spelledIdentifierTouching( Loc, Tokens.spelledTokens(Tokens.sourceManager().getFileID(Loc))); } std::vector TokenBuffer::macroExpansions(FileID FID) const { auto FileIt = Files.find(FID); assert(FileIt != Files.end() && "file not tracked by token buffer"); auto &File = FileIt->second; std::vector Expansions; auto &Spelled = File.SpelledTokens; for (auto Mapping : File.Mappings) { const syntax::Token *Token = &Spelled[Mapping.BeginSpelled]; if (Token->kind() == tok::TokenKind::identifier) Expansions.push_back(Token); } return Expansions; } std::vector syntax::tokenize(const FileRange &FR, const SourceManager &SM, const LangOptions &LO) { std::vector Tokens; IdentifierTable Identifiers(LO); auto AddToken = [&](clang::Token T) { // Fill the proper token kind for keywords, etc. if (T.getKind() == tok::raw_identifier && !T.needsCleaning() && !T.hasUCN()) { // FIXME: support needsCleaning and hasUCN cases. clang::IdentifierInfo &II = Identifiers.get(T.getRawIdentifier()); T.setIdentifierInfo(&II); T.setKind(II.getTokenID()); } Tokens.push_back(syntax::Token(T)); }; auto SrcBuffer = SM.getBufferData(FR.file()); Lexer L(SM.getLocForStartOfFile(FR.file()), LO, SrcBuffer.data(), SrcBuffer.data() + FR.beginOffset(), // We can't make BufEnd point to FR.endOffset, as Lexer requires a // null terminated buffer. SrcBuffer.data() + SrcBuffer.size()); clang::Token T; while (!L.LexFromRawLexer(T) && L.getCurrentBufferOffset() < FR.endOffset()) AddToken(T); // LexFromRawLexer returns true when it parses the last token of the file, add // it iff it starts within the range we are interested in. if (SM.getFileOffset(T.getLocation()) < FR.endOffset()) AddToken(T); return Tokens; } std::vector syntax::tokenize(FileID FID, const SourceManager &SM, const LangOptions &LO) { return tokenize(syntax::FileRange(FID, 0, SM.getFileIDSize(FID)), SM, LO); } /// Records information reqired to construct mappings for the token buffer that /// we are collecting. class TokenCollector::CollectPPExpansions : public PPCallbacks { public: CollectPPExpansions(TokenCollector &C) : Collector(&C) {} /// Disabled instance will stop reporting anything to TokenCollector. /// This ensures that uses of the preprocessor after TokenCollector::consume() /// is called do not access the (possibly invalid) collector instance. void disable() { Collector = nullptr; } void MacroExpands(const clang::Token &MacroNameTok, const MacroDefinition &MD, SourceRange Range, const MacroArgs *Args) override { if (!Collector) return; const auto &SM = Collector->PP.getSourceManager(); // Only record top-level expansions that directly produce expanded tokens. // This excludes those where: // - the macro use is inside a macro body, // - the macro appears in an argument to another macro. // However macro expansion isn't really a tree, it's token rewrite rules, // so there are other cases, e.g. // #define B(X) X // #define A 1 + B // A(2) // Both A and B produce expanded tokens, though the macro name 'B' comes // from an expansion. The best we can do is merge the mappings for both. // The *last* token of any top-level macro expansion must be in a file. // (In the example above, see the closing paren of the expansion of B). if (!Range.getEnd().isFileID()) return; // If there's a current expansion that encloses this one, this one can't be // top-level. if (LastExpansionEnd.isValid() && !SM.isBeforeInTranslationUnit(LastExpansionEnd, Range.getEnd())) return; // If the macro invocation (B) starts in a macro (A) but ends in a file, // we'll create a merged mapping for A + B by overwriting the endpoint for // A's startpoint. if (!Range.getBegin().isFileID()) { Range.setBegin(SM.getExpansionLoc(Range.getBegin())); assert(Collector->Expansions.count(Range.getBegin()) && "Overlapping macros should have same expansion location"); } Collector->Expansions[Range.getBegin()] = Range.getEnd(); LastExpansionEnd = Range.getEnd(); } // FIXME: handle directives like #pragma, #include, etc. private: TokenCollector *Collector; /// Used to detect recursive macro expansions. SourceLocation LastExpansionEnd; }; /// Fills in the TokenBuffer by tracing the run of a preprocessor. The /// implementation tracks the tokens, macro expansions and directives coming /// from the preprocessor and: /// - for each token, figures out if it is a part of an expanded token stream, /// spelled token stream or both. Stores the tokens appropriately. /// - records mappings from the spelled to expanded token ranges, e.g. for macro /// expansions. /// FIXME: also properly record: /// - #include directives, /// - #pragma, #line and other PP directives, /// - skipped pp regions, /// - ... TokenCollector::TokenCollector(Preprocessor &PP) : PP(PP) { // Collect the expanded token stream during preprocessing. PP.setTokenWatcher([this](const clang::Token &T) { if (T.isAnnotation()) return; DEBUG_WITH_TYPE("collect-tokens", llvm::dbgs() << "Token: " << syntax::Token(T).dumpForTests( this->PP.getSourceManager()) << "\n" ); Expanded.push_back(syntax::Token(T)); }); // And locations of macro calls, to properly recover boundaries of those in // case of empty expansions. auto CB = std::make_unique(*this); this->Collector = CB.get(); PP.addPPCallbacks(std::move(CB)); } /// Builds mappings and spelled tokens in the TokenBuffer based on the expanded /// token stream. class TokenCollector::Builder { public: Builder(std::vector Expanded, PPExpansions CollectedExpansions, const SourceManager &SM, const LangOptions &LangOpts) : Result(SM), CollectedExpansions(std::move(CollectedExpansions)), SM(SM), LangOpts(LangOpts) { Result.ExpandedTokens = std::move(Expanded); } TokenBuffer build() && { assert(!Result.ExpandedTokens.empty()); assert(Result.ExpandedTokens.back().kind() == tok::eof); // Tokenize every file that contributed tokens to the expanded stream. buildSpelledTokens(); // The expanded token stream consists of runs of tokens that came from // the same source (a macro expansion, part of a file etc). // Between these runs are the logical positions of spelled tokens that // didn't expand to anything. while (NextExpanded < Result.ExpandedTokens.size() - 1 /* eof */) { // Create empty mappings for spelled tokens that expanded to nothing here. // May advance NextSpelled, but NextExpanded is unchanged. discard(); // Create mapping for a contiguous run of expanded tokens. // Advances NextExpanded past the run, and NextSpelled accordingly. unsigned OldPosition = NextExpanded; advance(); if (NextExpanded == OldPosition) diagnoseAdvanceFailure(); } // If any tokens remain in any of the files, they didn't expand to anything. // Create empty mappings up until the end of the file. for (const auto &File : Result.Files) discard(File.first); #ifndef NDEBUG for (auto &pair : Result.Files) { auto &mappings = pair.second.Mappings; assert(llvm::is_sorted(mappings, [](const TokenBuffer::Mapping &M1, const TokenBuffer::Mapping &M2) { return M1.BeginSpelled < M2.BeginSpelled && M1.EndSpelled < M2.EndSpelled && M1.BeginExpanded < M2.BeginExpanded && M1.EndExpanded < M2.EndExpanded; })); } #endif return std::move(Result); } private: // Consume a sequence of spelled tokens that didn't expand to anything. // In the simplest case, skips spelled tokens until finding one that produced // the NextExpanded token, and creates an empty mapping for them. // If Drain is provided, skips remaining tokens from that file instead. void discard(llvm::Optional Drain = llvm::None) { SourceLocation Target = Drain ? SM.getLocForEndOfFile(*Drain) : SM.getExpansionLoc( Result.ExpandedTokens[NextExpanded].location()); FileID File = SM.getFileID(Target); const auto &SpelledTokens = Result.Files[File].SpelledTokens; auto &NextSpelled = this->NextSpelled[File]; TokenBuffer::Mapping Mapping; Mapping.BeginSpelled = NextSpelled; // When dropping trailing tokens from a file, the empty mapping should // be positioned within the file's expanded-token range (at the end). Mapping.BeginExpanded = Mapping.EndExpanded = Drain ? Result.Files[*Drain].EndExpanded : NextExpanded; // We may want to split into several adjacent empty mappings. // FlushMapping() emits the current mapping and starts a new one. auto FlushMapping = [&, this] { Mapping.EndSpelled = NextSpelled; if (Mapping.BeginSpelled != Mapping.EndSpelled) Result.Files[File].Mappings.push_back(Mapping); Mapping.BeginSpelled = NextSpelled; }; while (NextSpelled < SpelledTokens.size() && SpelledTokens[NextSpelled].location() < Target) { // If we know mapping bounds at [NextSpelled, KnownEnd] (macro expansion) // then we want to partition our (empty) mapping. // [Start, NextSpelled) [NextSpelled, KnownEnd] (KnownEnd, Target) SourceLocation KnownEnd = CollectedExpansions.lookup(SpelledTokens[NextSpelled].location()); if (KnownEnd.isValid()) { FlushMapping(); // Emits [Start, NextSpelled) while (NextSpelled < SpelledTokens.size() && SpelledTokens[NextSpelled].location() <= KnownEnd) ++NextSpelled; FlushMapping(); // Emits [NextSpelled, KnownEnd] // Now the loop contitues and will emit (KnownEnd, Target). } else { ++NextSpelled; } } FlushMapping(); } // Consumes the NextExpanded token and others that are part of the same run. // Increases NextExpanded and NextSpelled by at least one, and adds a mapping // (unless this is a run of file tokens, which we represent with no mapping). void advance() { const syntax::Token &Tok = Result.ExpandedTokens[NextExpanded]; SourceLocation Expansion = SM.getExpansionLoc(Tok.location()); FileID File = SM.getFileID(Expansion); const auto &SpelledTokens = Result.Files[File].SpelledTokens; auto &NextSpelled = this->NextSpelled[File]; if (Tok.location().isFileID()) { // A run of file tokens continues while the expanded/spelled tokens match. while (NextSpelled < SpelledTokens.size() && NextExpanded < Result.ExpandedTokens.size() && SpelledTokens[NextSpelled].location() == Result.ExpandedTokens[NextExpanded].location()) { ++NextSpelled; ++NextExpanded; } // We need no mapping for file tokens copied to the expanded stream. } else { // We found a new macro expansion. We should have its spelling bounds. auto End = CollectedExpansions.lookup(Expansion); assert(End.isValid() && "Macro expansion wasn't captured?"); // Mapping starts here... TokenBuffer::Mapping Mapping; Mapping.BeginExpanded = NextExpanded; Mapping.BeginSpelled = NextSpelled; // ... consumes spelled tokens within bounds we captured ... while (NextSpelled < SpelledTokens.size() && SpelledTokens[NextSpelled].location() <= End) ++NextSpelled; // ... consumes expanded tokens rooted at the same expansion ... while (NextExpanded < Result.ExpandedTokens.size() && SM.getExpansionLoc( Result.ExpandedTokens[NextExpanded].location()) == Expansion) ++NextExpanded; // ... and ends here. Mapping.EndExpanded = NextExpanded; Mapping.EndSpelled = NextSpelled; Result.Files[File].Mappings.push_back(Mapping); } } // advance() is supposed to consume at least one token - if not, we crash. void diagnoseAdvanceFailure() { #ifndef NDEBUG // Show the failed-to-map token in context. for (unsigned I = (NextExpanded < 10) ? 0 : NextExpanded - 10; I < NextExpanded + 5 && I < Result.ExpandedTokens.size(); ++I) { const char *L = (I == NextExpanded) ? "!! " : (I < NextExpanded) ? "ok " : " "; llvm::errs() << L << Result.ExpandedTokens[I].dumpForTests(SM) << "\n"; } #endif llvm_unreachable("Couldn't map expanded token to spelled tokens!"); } /// Initializes TokenBuffer::Files and fills spelled tokens and expanded /// ranges for each of the files. void buildSpelledTokens() { for (unsigned I = 0; I < Result.ExpandedTokens.size(); ++I) { const auto &Tok = Result.ExpandedTokens[I]; auto FID = SM.getFileID(SM.getExpansionLoc(Tok.location())); auto It = Result.Files.try_emplace(FID); TokenBuffer::MarkedFile &File = It.first->second; // The eof token should not be considered part of the main-file's range. File.EndExpanded = Tok.kind() == tok::eof ? I : I + 1; if (!It.second) continue; // we have seen this file before. // This is the first time we see this file. File.BeginExpanded = I; File.SpelledTokens = tokenize(FID, SM, LangOpts); } } TokenBuffer Result; unsigned NextExpanded = 0; // cursor in ExpandedTokens llvm::DenseMap NextSpelled; // cursor in SpelledTokens PPExpansions CollectedExpansions; const SourceManager &SM; const LangOptions &LangOpts; }; TokenBuffer TokenCollector::consume() && { PP.setTokenWatcher(nullptr); Collector->disable(); return Builder(std::move(Expanded), std::move(Expansions), PP.getSourceManager(), PP.getLangOpts()) .build(); } std::string syntax::Token::str() const { return std::string(llvm::formatv("Token({0}, length = {1})", tok::getTokenName(kind()), length())); } std::string syntax::Token::dumpForTests(const SourceManager &SM) const { return std::string(llvm::formatv("Token(`{0}`, {1}, length = {2})", text(SM), tok::getTokenName(kind()), length())); } std::string TokenBuffer::dumpForTests() const { auto PrintToken = [this](const syntax::Token &T) -> std::string { if (T.kind() == tok::eof) return ""; return std::string(T.text(*SourceMgr)); }; auto DumpTokens = [this, &PrintToken](llvm::raw_ostream &OS, llvm::ArrayRef Tokens) { if (Tokens.empty()) { OS << ""; return; } OS << Tokens[0].text(*SourceMgr); for (unsigned I = 1; I < Tokens.size(); ++I) { if (Tokens[I].kind() == tok::eof) continue; OS << " " << PrintToken(Tokens[I]); } }; std::string Dump; llvm::raw_string_ostream OS(Dump); OS << "expanded tokens:\n" << " "; // (!) we do not show ''. DumpTokens(OS, llvm::makeArrayRef(ExpandedTokens).drop_back()); OS << "\n"; std::vector Keys; for (auto F : Files) Keys.push_back(F.first); llvm::sort(Keys); for (FileID ID : Keys) { const MarkedFile &File = Files.find(ID)->second; auto *Entry = SourceMgr->getFileEntryForID(ID); if (!Entry) continue; // Skip builtin files. OS << llvm::formatv("file '{0}'\n", Entry->getName()) << " spelled tokens:\n" << " "; DumpTokens(OS, File.SpelledTokens); OS << "\n"; if (File.Mappings.empty()) { OS << " no mappings.\n"; continue; } OS << " mappings:\n"; for (auto &M : File.Mappings) { OS << llvm::formatv( " ['{0}'_{1}, '{2}'_{3}) => ['{4}'_{5}, '{6}'_{7})\n", PrintToken(File.SpelledTokens[M.BeginSpelled]), M.BeginSpelled, M.EndSpelled == File.SpelledTokens.size() ? "" : PrintToken(File.SpelledTokens[M.EndSpelled]), M.EndSpelled, PrintToken(ExpandedTokens[M.BeginExpanded]), M.BeginExpanded, PrintToken(ExpandedTokens[M.EndExpanded]), M.EndExpanded); } } return Dump; } diff --git a/libcxx/include/__config b/libcxx/include/__config index 01377a9617ea..589b5c3b2241 100644 --- a/libcxx/include/__config +++ b/libcxx/include/__config @@ -1,1231 +1,1231 @@ // -*- C++ -*- //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #ifndef _LIBCPP___CONFIG #define _LIBCPP___CONFIG #include <__config_site> #if defined(_MSC_VER) && !defined(__clang__) # if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # define _LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER # endif #endif #ifndef _LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER # pragma GCC system_header #endif #if defined(__apple_build_version__) # define _LIBCPP_COMPILER_CLANG_BASED # define _LIBCPP_APPLE_CLANG_VER (__apple_build_version__ / 10000) #elif defined(__clang__) # define _LIBCPP_COMPILER_CLANG_BASED # define _LIBCPP_CLANG_VER (__clang_major__ * 100 + __clang_minor__) #elif defined(__GNUC__) # define _LIBCPP_COMPILER_GCC #elif defined(_MSC_VER) # define _LIBCPP_COMPILER_MSVC #endif #ifdef __cplusplus -# define _LIBCPP_VERSION 15002 +# define _LIBCPP_VERSION 15003 # define _LIBCPP_CONCAT_IMPL(_X, _Y) _X##_Y # define _LIBCPP_CONCAT(_X, _Y) _LIBCPP_CONCAT_IMPL(_X, _Y) // Valid C++ identifier that revs with every libc++ version. This can be used to // generate identifiers that must be unique for every released libc++ version. # define _LIBCPP_VERSIONED_IDENTIFIER _LIBCPP_CONCAT(v, _LIBCPP_VERSION) # if __STDC_HOSTED__ == 0 # define _LIBCPP_FREESTANDING # endif # ifndef _LIBCPP_STD_VER # if __cplusplus <= 201103L # define _LIBCPP_STD_VER 11 # elif __cplusplus <= 201402L # define _LIBCPP_STD_VER 14 # elif __cplusplus <= 201703L # define _LIBCPP_STD_VER 17 # elif __cplusplus <= 202002L # define _LIBCPP_STD_VER 20 # else # define _LIBCPP_STD_VER 22 // current year, or date of c++2b ratification # endif # endif // _LIBCPP_STD_VER # if defined(__ELF__) # define _LIBCPP_OBJECT_FORMAT_ELF 1 # elif defined(__MACH__) # define _LIBCPP_OBJECT_FORMAT_MACHO 1 # elif defined(_WIN32) # define _LIBCPP_OBJECT_FORMAT_COFF 1 # elif defined(__wasm__) # define _LIBCPP_OBJECT_FORMAT_WASM 1 # elif defined(_AIX) # define _LIBCPP_OBJECT_FORMAT_XCOFF 1 # else // ... add new file formats here ... # endif # if _LIBCPP_ABI_VERSION >= 2 // Change short string representation so that string data starts at offset 0, // improving its alignment in some cases. # define _LIBCPP_ABI_ALTERNATE_STRING_LAYOUT // Fix deque iterator type in order to support incomplete types. # define _LIBCPP_ABI_INCOMPLETE_TYPES_IN_DEQUE // Fix undefined behavior in how std::list stores its linked nodes. # define _LIBCPP_ABI_LIST_REMOVE_NODE_POINTER_UB // Fix undefined behavior in how __tree stores its end and parent nodes. # define _LIBCPP_ABI_TREE_REMOVE_NODE_POINTER_UB // Fix undefined behavior in how __hash_table stores its pointer types. # define _LIBCPP_ABI_FIX_UNORDERED_NODE_POINTER_UB # define _LIBCPP_ABI_FORWARD_LIST_REMOVE_NODE_POINTER_UB # define _LIBCPP_ABI_FIX_UNORDERED_CONTAINER_SIZE_TYPE // Define a key function for `bad_function_call` in the library, to centralize // its vtable and typeinfo to libc++ rather than having all other libraries // using that class define their own copies. # define _LIBCPP_ABI_BAD_FUNCTION_CALL_KEY_FUNCTION // Override the default return value of exception::what() for // bad_function_call::what() with a string that is specific to // bad_function_call (see http://wg21.link/LWG2233). This is an ABI break // because it changes the vtable layout of bad_function_call. # define _LIBCPP_ABI_BAD_FUNCTION_CALL_GOOD_WHAT_MESSAGE // Enable optimized version of __do_get_(un)signed which avoids redundant copies. # define _LIBCPP_ABI_OPTIMIZED_LOCALE_NUM_GET // Give reverse_iterator one data member of type T, not two. // Also, in C++17 and later, don't derive iterator types from std::iterator. # define _LIBCPP_ABI_NO_ITERATOR_BASES // Use the smallest possible integer type to represent the index of the variant. // Previously libc++ used "unsigned int" exclusively. # define _LIBCPP_ABI_VARIANT_INDEX_TYPE_OPTIMIZATION // Unstable attempt to provide a more optimized std::function # define _LIBCPP_ABI_OPTIMIZED_FUNCTION // All the regex constants must be distinct and nonzero. # define _LIBCPP_ABI_REGEX_CONSTANTS_NONZERO // Re-worked external template instantiations for std::string with a focus on // performance and fast-path inlining. # define _LIBCPP_ABI_STRING_OPTIMIZED_EXTERNAL_INSTANTIATION // Enable clang::trivial_abi on std::unique_ptr. # define _LIBCPP_ABI_ENABLE_UNIQUE_PTR_TRIVIAL_ABI // Enable clang::trivial_abi on std::shared_ptr and std::weak_ptr # define _LIBCPP_ABI_ENABLE_SHARED_PTR_TRIVIAL_ABI // std::random_device holds some state when it uses an implementation that gets // entropy from a file (see _LIBCPP_USING_DEV_RANDOM). When switching from this // implementation to another one on a platform that has already shipped // std::random_device, one needs to retain the same object layout to remain ABI // compatible. This switch removes these workarounds for platforms that don't care // about ABI compatibility. # define _LIBCPP_ABI_NO_RANDOM_DEVICE_COMPATIBILITY_LAYOUT // Don't export the legacy __basic_string_common class and its methods from the built library. # define _LIBCPP_ABI_DO_NOT_EXPORT_BASIC_STRING_COMMON // Don't export the legacy __vector_base_common class and its methods from the built library. # define _LIBCPP_ABI_DO_NOT_EXPORT_VECTOR_BASE_COMMON // According to the Standard, `bitset::operator[] const` returns bool # define _LIBCPP_ABI_BITSET_VECTOR_BOOL_CONST_SUBSCRIPT_RETURN_BOOL // Remove the base 10 implementation of std::to_chars from the dylib. // The implementation moved to the header, but we still export the symbols from // the dylib for backwards compatibility. # define _LIBCPP_ABI_DO_NOT_EXPORT_TO_CHARS_BASE_10 # elif _LIBCPP_ABI_VERSION == 1 # if !(defined(_LIBCPP_OBJECT_FORMAT_COFF) || defined(_LIBCPP_OBJECT_FORMAT_XCOFF)) // Enable compiling copies of now inline methods into the dylib to support // applications compiled against older libraries. This is unnecessary with // COFF dllexport semantics, since dllexport forces a non-inline definition // of inline functions to be emitted anyway. Our own non-inline copy would // conflict with the dllexport-emitted copy, so we disable it. For XCOFF, // the linker will take issue with the symbols in the shared object if the // weak inline methods get visibility (such as from -fvisibility-inlines-hidden), // so disable it. # define _LIBCPP_DEPRECATED_ABI_LEGACY_LIBRARY_DEFINITIONS_FOR_INLINE_FUNCTIONS # endif // Feature macros for disabling pre ABI v1 features. All of these options // are deprecated. # if defined(__FreeBSD__) # define _LIBCPP_DEPRECATED_ABI_DISABLE_PAIR_TRIVIAL_COPY_CTOR # endif # endif # if defined(_LIBCPP_BUILDING_LIBRARY) || _LIBCPP_ABI_VERSION >= 2 // Enable additional explicit instantiations of iostreams components. This // reduces the number of weak definitions generated in programs that use // iostreams by providing a single strong definition in the shared library. # define _LIBCPP_ABI_ENABLE_ADDITIONAL_IOSTREAM_EXPLICIT_INSTANTIATIONS_1 // Define a key function for `bad_function_call` in the library, to centralize // its vtable and typeinfo to libc++ rather than having all other libraries // using that class define their own copies. # define _LIBCPP_ABI_BAD_FUNCTION_CALL_KEY_FUNCTION # endif # define _LIBCPP_TOSTRING2(x) # x # define _LIBCPP_TOSTRING(x) _LIBCPP_TOSTRING2(x) # if __cplusplus < 201103L # define _LIBCPP_CXX03_LANG # endif # ifndef __has_attribute # define __has_attribute(__x) 0 # endif # ifndef __has_builtin # define __has_builtin(__x) 0 # endif # ifndef __has_extension # define __has_extension(__x) 0 # endif # ifndef __has_feature # define __has_feature(__x) 0 # endif # ifndef __has_cpp_attribute # define __has_cpp_attribute(__x) 0 # endif // '__is_identifier' returns '0' if '__x' is a reserved identifier provided by // the compiler and '1' otherwise. # ifndef __is_identifier # define __is_identifier(__x) 1 # endif # ifndef __has_declspec_attribute # define __has_declspec_attribute(__x) 0 # endif # define __has_keyword(__x) !(__is_identifier(__x)) # ifndef __has_include # define __has_include(...) 0 # endif # if !defined(_LIBCPP_COMPILER_CLANG_BASED) && __cplusplus < 201103L # error "libc++ only supports C++03 with Clang-based compilers. Please enable C++11" # endif # ifdef _LIBCPP_COMPILER_MSVC # error If you successfully use libc++ with MSVC please tell the libc++ developers and consider upstreaming your \ changes. We are not aware of anybody using this configuration and know that at least some code is currently broken. \ If there are users of this configuration we are happy to provide support. # endif // FIXME: ABI detection should be done via compiler builtin macros. This // is just a placeholder until Clang implements such macros. For now assume // that Windows compilers pretending to be MSVC++ target the Microsoft ABI, // and allow the user to explicitly specify the ABI to handle cases where this // heuristic falls short. # if defined(_LIBCPP_ABI_FORCE_ITANIUM) && defined(_LIBCPP_ABI_FORCE_MICROSOFT) # error "Only one of _LIBCPP_ABI_FORCE_ITANIUM and _LIBCPP_ABI_FORCE_MICROSOFT can be defined" # elif defined(_LIBCPP_ABI_FORCE_ITANIUM) # define _LIBCPP_ABI_ITANIUM # elif defined(_LIBCPP_ABI_FORCE_MICROSOFT) # define _LIBCPP_ABI_MICROSOFT # else # if defined(_WIN32) && defined(_MSC_VER) # define _LIBCPP_ABI_MICROSOFT # else # define _LIBCPP_ABI_ITANIUM # endif # endif # if defined(_LIBCPP_ABI_MICROSOFT) && !defined(_LIBCPP_NO_VCRUNTIME) # define _LIBCPP_ABI_VCRUNTIME # endif # if __has_feature(experimental_library) # ifndef _LIBCPP_ENABLE_EXPERIMENTAL # define _LIBCPP_ENABLE_EXPERIMENTAL # endif # endif // Incomplete features get their own specific disabling flags. This makes it // easier to grep for target specific flags once the feature is complete. # if !defined(_LIBCPP_ENABLE_EXPERIMENTAL) && !defined(_LIBCPP_BUILDING_LIBRARY) # define _LIBCPP_HAS_NO_INCOMPLETE_FORMAT # define _LIBCPP_HAS_NO_INCOMPLETE_RANGES # endif // Need to detect which libc we're using if we're on Linux. # if defined(__linux__) # include # if defined(__GLIBC_PREREQ) # define _LIBCPP_GLIBC_PREREQ(a, b) __GLIBC_PREREQ(a, b) # else # define _LIBCPP_GLIBC_PREREQ(a, b) 0 # endif // defined(__GLIBC_PREREQ) # endif // defined(__linux__) # if defined(__MVS__) # include // for __NATIVE_ASCII_F # endif # ifdef __LITTLE_ENDIAN__ # if __LITTLE_ENDIAN__ # define _LIBCPP_LITTLE_ENDIAN # endif // __LITTLE_ENDIAN__ # endif // __LITTLE_ENDIAN__ # ifdef __BIG_ENDIAN__ # if __BIG_ENDIAN__ # define _LIBCPP_BIG_ENDIAN # endif // __BIG_ENDIAN__ # endif // __BIG_ENDIAN__ # ifdef __BYTE_ORDER__ # if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ # define _LIBCPP_LITTLE_ENDIAN # elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ # define _LIBCPP_BIG_ENDIAN # endif // __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ # endif // __BYTE_ORDER__ # ifdef __FreeBSD__ # include # include # if _BYTE_ORDER == _LITTLE_ENDIAN # define _LIBCPP_LITTLE_ENDIAN # else // _BYTE_ORDER == _LITTLE_ENDIAN # define _LIBCPP_BIG_ENDIAN # endif // _BYTE_ORDER == _LITTLE_ENDIAN # endif // __FreeBSD__ # if defined(__NetBSD__) || defined(__OpenBSD__) # include # if _BYTE_ORDER == _LITTLE_ENDIAN # define _LIBCPP_LITTLE_ENDIAN # else // _BYTE_ORDER == _LITTLE_ENDIAN # define _LIBCPP_BIG_ENDIAN # endif // _BYTE_ORDER == _LITTLE_ENDIAN # endif // defined(__NetBSD__) || defined(__OpenBSD__) # if defined(_WIN32) # define _LIBCPP_WIN32API # define _LIBCPP_LITTLE_ENDIAN # define _LIBCPP_SHORT_WCHAR 1 // Both MinGW and native MSVC provide a "MSVC"-like environment # define _LIBCPP_MSVCRT_LIKE // If mingw not explicitly detected, assume using MS C runtime only if // a MS compatibility version is specified. # if defined(_MSC_VER) && !defined(__MINGW32__) # define _LIBCPP_MSVCRT // Using Microsoft's C Runtime library # endif # if (defined(_M_AMD64) || defined(__x86_64__)) || (defined(_M_ARM) || defined(__arm__)) # define _LIBCPP_HAS_BITSCAN64 # endif # define _LIBCPP_HAS_OPEN_WITH_WCHAR # endif // defined(_WIN32) # ifdef __sun__ # include # ifdef _LITTLE_ENDIAN # define _LIBCPP_LITTLE_ENDIAN # else # define _LIBCPP_BIG_ENDIAN # endif # endif // __sun__ # if defined(_AIX) && !defined(__64BIT__) // The size of wchar is 2 byte on 32-bit mode on AIX. # define _LIBCPP_SHORT_WCHAR 1 # endif // Libc++ supports various implementations of std::random_device. // // _LIBCPP_USING_DEV_RANDOM // Read entropy from the given file, by default `/dev/urandom`. // If a token is provided, it is assumed to be the path to a file // to read entropy from. This is the default behavior if nothing // else is specified. This implementation requires storing state // inside `std::random_device`. // // _LIBCPP_USING_ARC4_RANDOM // Use arc4random(). This allows obtaining random data even when // using sandboxing mechanisms. On some platforms like Apple, this // is the recommended source of entropy for user-space programs. // When this option is used, the token passed to `std::random_device`'s // constructor *must* be "/dev/urandom" -- anything else is an error. // // _LIBCPP_USING_GETENTROPY // Use getentropy(). // When this option is used, the token passed to `std::random_device`'s // constructor *must* be "/dev/urandom" -- anything else is an error. // // _LIBCPP_USING_FUCHSIA_CPRNG // Use Fuchsia's zx_cprng_draw() system call, which is specified to // deliver high-quality entropy and cannot fail. // When this option is used, the token passed to `std::random_device`'s // constructor *must* be "/dev/urandom" -- anything else is an error. // // _LIBCPP_USING_NACL_RANDOM // NaCl's sandbox (which PNaCl also runs in) doesn't allow filesystem access, // including accesses to the special files under `/dev`. This implementation // uses the NaCL syscall `nacl_secure_random_init()` to get entropy. // When this option is used, the token passed to `std::random_device`'s // constructor *must* be "/dev/urandom" -- anything else is an error. // // _LIBCPP_USING_WIN32_RANDOM // Use rand_s(), for use on Windows. // When this option is used, the token passed to `std::random_device`'s // constructor *must* be "/dev/urandom" -- anything else is an error. # if defined(__APPLE__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || \ defined(__DragonFly__) || defined(__sun__) # define _LIBCPP_USING_ARC4_RANDOM # elif defined(__wasi__) || defined(__EMSCRIPTEN__) # define _LIBCPP_USING_GETENTROPY # elif defined(__Fuchsia__) # define _LIBCPP_USING_FUCHSIA_CPRNG # elif defined(__native_client__) # define _LIBCPP_USING_NACL_RANDOM # elif defined(_LIBCPP_WIN32API) # define _LIBCPP_USING_WIN32_RANDOM # else # define _LIBCPP_USING_DEV_RANDOM # endif # if !defined(_LIBCPP_LITTLE_ENDIAN) && !defined(_LIBCPP_BIG_ENDIAN) # include # if __BYTE_ORDER == __LITTLE_ENDIAN # define _LIBCPP_LITTLE_ENDIAN # elif __BYTE_ORDER == __BIG_ENDIAN # define _LIBCPP_BIG_ENDIAN # else // __BYTE_ORDER == __BIG_ENDIAN # error unable to determine endian # endif # endif // !defined(_LIBCPP_LITTLE_ENDIAN) && !defined(_LIBCPP_BIG_ENDIAN) # if __has_attribute(__no_sanitize__) && !defined(_LIBCPP_COMPILER_GCC) # define _LIBCPP_NO_CFI __attribute__((__no_sanitize__("cfi"))) # else # define _LIBCPP_NO_CFI # endif # ifndef _LIBCPP_CXX03_LANG # define _LIBCPP_ALIGNOF(_Tp) alignof(_Tp) # define _ALIGNAS_TYPE(x) alignas(x) # define _ALIGNAS(x) alignas(x) # define _LIBCPP_NORETURN [[noreturn]] # define _NOEXCEPT noexcept # define _NOEXCEPT_(x) noexcept(x) # else # define _LIBCPP_ALIGNOF(_Tp) _Alignof(_Tp) # define _ALIGNAS_TYPE(x) __attribute__((__aligned__(_LIBCPP_ALIGNOF(x)))) # define _ALIGNAS(x) __attribute__((__aligned__(x))) # define _LIBCPP_NORETURN __attribute__((noreturn)) # define _LIBCPP_HAS_NO_NOEXCEPT # define nullptr __nullptr # define _NOEXCEPT throw() # define _NOEXCEPT_(x) typedef __char16_t char16_t; typedef __char32_t char32_t; # endif # if !defined(__cpp_exceptions) || __cpp_exceptions < 199711L # define _LIBCPP_NO_EXCEPTIONS # endif # define _LIBCPP_PREFERRED_ALIGNOF(_Tp) __alignof(_Tp) # if defined(_LIBCPP_COMPILER_CLANG_BASED) # if defined(__APPLE__) && !defined(__i386__) && !defined(__x86_64__) && (!defined(__arm__) || __ARM_ARCH_7K__ >= 2) # define _LIBCPP_ABI_ALTERNATE_STRING_LAYOUT # endif // Objective-C++ features (opt-in) # if __has_feature(objc_arc) # define _LIBCPP_HAS_OBJC_ARC # endif # if __has_feature(objc_arc_weak) # define _LIBCPP_HAS_OBJC_ARC_WEAK # endif # if __has_extension(blocks) # define _LIBCPP_HAS_EXTENSION_BLOCKS # endif # if defined(_LIBCPP_HAS_EXTENSION_BLOCKS) && defined(__APPLE__) # define _LIBCPP_HAS_BLOCKS_RUNTIME # endif # if !__has_feature(address_sanitizer) # define _LIBCPP_HAS_NO_ASAN # endif // Allow for build-time disabling of unsigned integer sanitization # if __has_attribute(no_sanitize) # define _LIBCPP_DISABLE_UBSAN_UNSIGNED_INTEGER_CHECK __attribute__((__no_sanitize__("unsigned-integer-overflow"))) # endif # define _LIBCPP_ALWAYS_INLINE __attribute__((__always_inline__)) # define _LIBCPP_DISABLE_EXTENSION_WARNING __extension__ # elif defined(_LIBCPP_COMPILER_GCC) # if !defined(__SANITIZE_ADDRESS__) # define _LIBCPP_HAS_NO_ASAN # endif # define _LIBCPP_ALWAYS_INLINE __attribute__((__always_inline__)) # define _LIBCPP_DISABLE_EXTENSION_WARNING __extension__ # elif defined(_LIBCPP_COMPILER_MSVC) # define _LIBCPP_WARNING(x) __pragma(message(__FILE__ "(" _LIBCPP_TOSTRING(__LINE__) ") : warning note: " x)) # if _MSC_VER < 1900 # error "MSVC versions prior to Visual Studio 2015 are not supported" # endif # define _LIBCPP_NORETURN __declspec(noreturn) # define _LIBCPP_WEAK # define _LIBCPP_HAS_NO_ASAN # define _LIBCPP_ALWAYS_INLINE __forceinline # define _LIBCPP_HAS_NO_VECTOR_EXTENSION # define _LIBCPP_DISABLE_EXTENSION_WARNING # endif // _LIBCPP_COMPILER_[CLANG|GCC|MSVC] # if defined(_LIBCPP_OBJECT_FORMAT_COFF) # ifdef _DLL # define _LIBCPP_CRT_FUNC __declspec(dllimport) # else # define _LIBCPP_CRT_FUNC # endif # if defined(_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS) || (defined(__MINGW32__) && !defined(_LIBCPP_BUILDING_LIBRARY)) # define _LIBCPP_DLL_VIS # define _LIBCPP_EXTERN_TEMPLATE_TYPE_VIS # define _LIBCPP_CLASS_TEMPLATE_INSTANTIATION_VIS # define _LIBCPP_OVERRIDABLE_FUNC_VIS # define _LIBCPP_EXPORTED_FROM_ABI # elif defined(_LIBCPP_BUILDING_LIBRARY) # define _LIBCPP_DLL_VIS __declspec(dllexport) # if defined(__MINGW32__) # define _LIBCPP_EXTERN_TEMPLATE_TYPE_VIS _LIBCPP_DLL_VIS # define _LIBCPP_CLASS_TEMPLATE_INSTANTIATION_VIS # else # define _LIBCPP_EXTERN_TEMPLATE_TYPE_VIS # define _LIBCPP_CLASS_TEMPLATE_INSTANTIATION_VIS _LIBCPP_DLL_VIS # endif # define _LIBCPP_OVERRIDABLE_FUNC_VIS _LIBCPP_DLL_VIS # define _LIBCPP_EXPORTED_FROM_ABI __declspec(dllexport) # else # define _LIBCPP_DLL_VIS __declspec(dllimport) # define _LIBCPP_EXTERN_TEMPLATE_TYPE_VIS _LIBCPP_DLL_VIS # define _LIBCPP_CLASS_TEMPLATE_INSTANTIATION_VIS # define _LIBCPP_OVERRIDABLE_FUNC_VIS # define _LIBCPP_EXPORTED_FROM_ABI __declspec(dllimport) # endif # define _LIBCPP_TYPE_VIS _LIBCPP_DLL_VIS # define _LIBCPP_FUNC_VIS _LIBCPP_DLL_VIS # define _LIBCPP_EXCEPTION_ABI _LIBCPP_DLL_VIS # define _LIBCPP_HIDDEN # define _LIBCPP_METHOD_TEMPLATE_IMPLICIT_INSTANTIATION_VIS # define _LIBCPP_TEMPLATE_VIS # define _LIBCPP_TEMPLATE_DATA_VIS # define _LIBCPP_ENUM_VIS # else # if !defined(_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS) # define _LIBCPP_VISIBILITY(vis) __attribute__((__visibility__(vis))) # else # define _LIBCPP_VISIBILITY(vis) # endif # define _LIBCPP_HIDDEN _LIBCPP_VISIBILITY("hidden") # define _LIBCPP_FUNC_VIS _LIBCPP_VISIBILITY("default") # define _LIBCPP_TYPE_VIS _LIBCPP_VISIBILITY("default") # define _LIBCPP_TEMPLATE_DATA_VIS _LIBCPP_VISIBILITY("default") # define _LIBCPP_EXPORTED_FROM_ABI _LIBCPP_VISIBILITY("default") # define _LIBCPP_EXCEPTION_ABI _LIBCPP_VISIBILITY("default") # define _LIBCPP_EXTERN_TEMPLATE_TYPE_VIS _LIBCPP_VISIBILITY("default") # define _LIBCPP_CLASS_TEMPLATE_INSTANTIATION_VIS // TODO: Make this a proper customization point or remove the option to override it. # ifndef _LIBCPP_OVERRIDABLE_FUNC_VIS # define _LIBCPP_OVERRIDABLE_FUNC_VIS _LIBCPP_VISIBILITY("default") # endif # if !defined(_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS) // The inline should be removed once PR32114 is resolved # define _LIBCPP_METHOD_TEMPLATE_IMPLICIT_INSTANTIATION_VIS inline _LIBCPP_HIDDEN # else # define _LIBCPP_METHOD_TEMPLATE_IMPLICIT_INSTANTIATION_VIS # endif # if !defined(_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS) # if __has_attribute(__type_visibility__) # define _LIBCPP_TEMPLATE_VIS __attribute__((__type_visibility__("default"))) # else # define _LIBCPP_TEMPLATE_VIS __attribute__((__visibility__("default"))) # endif # else # define _LIBCPP_TEMPLATE_VIS # endif # if !defined(_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS) && __has_attribute(__type_visibility__) # define _LIBCPP_ENUM_VIS __attribute__((__type_visibility__("default"))) # else # define _LIBCPP_ENUM_VIS # endif # endif // defined(_LIBCPP_OBJECT_FORMAT_COFF) # if __has_attribute(exclude_from_explicit_instantiation) # define _LIBCPP_EXCLUDE_FROM_EXPLICIT_INSTANTIATION __attribute__((__exclude_from_explicit_instantiation__)) # else // Try to approximate the effect of exclude_from_explicit_instantiation // (which is that entities are not assumed to be provided by explicit // template instantiations in the dylib) by always inlining those entities. # define _LIBCPP_EXCLUDE_FROM_EXPLICIT_INSTANTIATION _LIBCPP_ALWAYS_INLINE # endif // This macro marks a symbol as being hidden from libc++'s ABI. This is achieved // on two levels: // 1. The symbol is given hidden visibility, which ensures that users won't start exporting // symbols from their dynamic library by means of using the libc++ headers. This ensures // that those symbols stay private to the dynamic library in which it is defined. // // 2. The symbol is given an ABI tag that changes with each version of libc++. This ensures // that no ODR violation can arise from mixing two TUs compiled with different versions // of libc++ where we would have changed the definition of a symbol. If the symbols shared // the same name, the ODR would require that their definitions be token-by-token equivalent, // which basically prevents us from being able to make any change to any function in our // headers. Using this ABI tag ensures that the symbol name is "bumped" artificially at // each release, which lets us change the definition of these symbols at our leisure. // Note that historically, this has been achieved in various ways, including force-inlining // all functions or giving internal linkage to all functions. Both these (previous) solutions // suffer from drawbacks that lead notably to code bloat. // // Note that we use _LIBCPP_EXCLUDE_FROM_EXPLICIT_INSTANTIATION to ensure that we don't depend // on _LIBCPP_HIDE_FROM_ABI methods of classes explicitly instantiated in the dynamic library. // // TODO: We provide a escape hatch with _LIBCPP_NO_ABI_TAG for folks who want to avoid increasing // the length of symbols with an ABI tag. In practice, we should remove the escape hatch and // use compression mangling instead, see https://github.com/itanium-cxx-abi/cxx-abi/issues/70. # ifndef _LIBCPP_NO_ABI_TAG # define _LIBCPP_HIDE_FROM_ABI \ _LIBCPP_HIDDEN _LIBCPP_EXCLUDE_FROM_EXPLICIT_INSTANTIATION \ __attribute__((__abi_tag__(_LIBCPP_TOSTRING(_LIBCPP_VERSIONED_IDENTIFIER)))) # else # define _LIBCPP_HIDE_FROM_ABI _LIBCPP_HIDDEN _LIBCPP_EXCLUDE_FROM_EXPLICIT_INSTANTIATION # endif # ifdef _LIBCPP_BUILDING_LIBRARY # if _LIBCPP_ABI_VERSION > 1 # define _LIBCPP_HIDE_FROM_ABI_AFTER_V1 _LIBCPP_HIDE_FROM_ABI # else # define _LIBCPP_HIDE_FROM_ABI_AFTER_V1 # endif # else # define _LIBCPP_HIDE_FROM_ABI_AFTER_V1 _LIBCPP_HIDE_FROM_ABI # endif // Just so we can migrate to the new macros gradually. # define _LIBCPP_INLINE_VISIBILITY _LIBCPP_HIDE_FROM_ABI // Inline namespaces are available in Clang/GCC/MSVC regardless of C++ dialect. // clang-format off # define _LIBCPP_BEGIN_NAMESPACE_STD namespace std { inline namespace _LIBCPP_ABI_NAMESPACE { # define _LIBCPP_END_NAMESPACE_STD }} # define _VSTD std _LIBCPP_BEGIN_NAMESPACE_STD _LIBCPP_END_NAMESPACE_STD # if _LIBCPP_STD_VER > 14 # define _LIBCPP_BEGIN_NAMESPACE_FILESYSTEM \ _LIBCPP_BEGIN_NAMESPACE_STD inline namespace __fs { namespace filesystem { # else # define _LIBCPP_BEGIN_NAMESPACE_FILESYSTEM \ _LIBCPP_BEGIN_NAMESPACE_STD namespace __fs { namespace filesystem { # endif # define _LIBCPP_END_NAMESPACE_FILESYSTEM _LIBCPP_END_NAMESPACE_STD }} // clang-format on # define _VSTD_FS std::__fs::filesystem # if __has_attribute(__enable_if__) # define _LIBCPP_PREFERRED_OVERLOAD __attribute__((__enable_if__(true, ""))) # endif # ifndef __SIZEOF_INT128__ # define _LIBCPP_HAS_NO_INT128 # endif # ifdef _LIBCPP_CXX03_LANG # define static_assert(...) _Static_assert(__VA_ARGS__) # define decltype(...) __decltype(__VA_ARGS__) # endif // _LIBCPP_CXX03_LANG # ifdef _LIBCPP_CXX03_LANG # define _LIBCPP_CONSTEXPR # else # define _LIBCPP_CONSTEXPR constexpr # endif # ifndef __cpp_consteval # define _LIBCPP_CONSTEVAL _LIBCPP_CONSTEXPR # else # define _LIBCPP_CONSTEVAL consteval # endif # ifdef __GNUC__ # define _LIBCPP_NOALIAS __attribute__((__malloc__)) # else # define _LIBCPP_NOALIAS # endif # if __has_attribute(using_if_exists) # define _LIBCPP_USING_IF_EXISTS __attribute__((using_if_exists)) # else # define _LIBCPP_USING_IF_EXISTS # endif # ifdef _LIBCPP_CXX03_LANG # define _LIBCPP_DECLARE_STRONG_ENUM(x) \ struct _LIBCPP_TYPE_VIS x { \ enum __lx // clang-format off # define _LIBCPP_DECLARE_STRONG_ENUM_EPILOG(x) \ __lx __v_; \ _LIBCPP_INLINE_VISIBILITY x(__lx __v) : __v_(__v) {} \ _LIBCPP_INLINE_VISIBILITY explicit x(int __v) : __v_(static_cast<__lx>(__v)) {} \ _LIBCPP_INLINE_VISIBILITY operator int() const { return __v_; } \ }; // clang-format on # else // _LIBCPP_CXX03_LANG # define _LIBCPP_DECLARE_STRONG_ENUM(x) enum class _LIBCPP_ENUM_VIS x # define _LIBCPP_DECLARE_STRONG_ENUM_EPILOG(x) # endif // _LIBCPP_CXX03_LANG # if defined(__APPLE__) || defined(__FreeBSD__) || defined(_LIBCPP_MSVCRT_LIKE) || defined(__sun__) || \ defined(__NetBSD__) # define _LIBCPP_LOCALE__L_EXTENSIONS 1 # endif # ifdef __FreeBSD__ # define _DECLARE_C99_LDBL_MATH 1 # endif // If we are getting operator new from the MSVC CRT, then allocation overloads // for align_val_t were added in 19.12, aka VS 2017 version 15.3. # if defined(_LIBCPP_MSVCRT) && defined(_MSC_VER) && _MSC_VER < 1912 # define _LIBCPP_HAS_NO_LIBRARY_ALIGNED_ALLOCATION # elif defined(_LIBCPP_ABI_VCRUNTIME) && !defined(__cpp_aligned_new) // We're deferring to Microsoft's STL to provide aligned new et al. We don't // have it unless the language feature test macro is defined. # define _LIBCPP_HAS_NO_LIBRARY_ALIGNED_ALLOCATION # elif defined(__MVS__) # define _LIBCPP_HAS_NO_LIBRARY_ALIGNED_ALLOCATION # endif # if defined(_LIBCPP_HAS_NO_LIBRARY_ALIGNED_ALLOCATION) || (!defined(__cpp_aligned_new) || __cpp_aligned_new < 201606) # define _LIBCPP_HAS_NO_ALIGNED_ALLOCATION # endif # if defined(__APPLE__) || defined(__FreeBSD__) # define _LIBCPP_HAS_DEFAULTRUNELOCALE # endif # if defined(__APPLE__) || defined(__FreeBSD__) || defined(__sun__) # define _LIBCPP_WCTYPE_IS_MASK # endif # if _LIBCPP_STD_VER <= 17 || !defined(__cpp_char8_t) # define _LIBCPP_HAS_NO_CHAR8_T # endif // Deprecation macros. // // Deprecations warnings are always enabled, except when users explicitly opt-out // by defining _LIBCPP_DISABLE_DEPRECATION_WARNINGS. # if !defined(_LIBCPP_DISABLE_DEPRECATION_WARNINGS) # if __has_attribute(deprecated) # define _LIBCPP_DEPRECATED __attribute__((deprecated)) # define _LIBCPP_DEPRECATED_(m) __attribute__((deprected(m))) # elif _LIBCPP_STD_VER > 11 # define _LIBCPP_DEPRECATED [[deprecated]] # define _LIBCPP_DEPRECATED_(m) [[deprecated(m)]] # else # define _LIBCPP_DEPRECATED # define _LIBCPP_DEPRECATED_(m) # endif # else # define _LIBCPP_DEPRECATED # define _LIBCPP_DEPRECATED_(m) # endif # if !defined(_LIBCPP_CXX03_LANG) # define _LIBCPP_DEPRECATED_IN_CXX11 _LIBCPP_DEPRECATED # else # define _LIBCPP_DEPRECATED_IN_CXX11 # endif # if _LIBCPP_STD_VER > 11 # define _LIBCPP_DEPRECATED_IN_CXX14 _LIBCPP_DEPRECATED # else # define _LIBCPP_DEPRECATED_IN_CXX14 # endif # if _LIBCPP_STD_VER > 14 # define _LIBCPP_DEPRECATED_IN_CXX17 _LIBCPP_DEPRECATED # else # define _LIBCPP_DEPRECATED_IN_CXX17 # endif # if _LIBCPP_STD_VER > 17 # define _LIBCPP_DEPRECATED_IN_CXX20 _LIBCPP_DEPRECATED # else # define _LIBCPP_DEPRECATED_IN_CXX20 # endif # if !defined(_LIBCPP_HAS_NO_CHAR8_T) # define _LIBCPP_DEPRECATED_WITH_CHAR8_T _LIBCPP_DEPRECATED # else # define _LIBCPP_DEPRECATED_WITH_CHAR8_T # endif // Macros to enter and leave a state where deprecation warnings are suppressed. # if defined(_LIBCPP_COMPILER_CLANG_BASED) || defined(_LIBCPP_COMPILER_GCC) # define _LIBCPP_SUPPRESS_DEPRECATED_PUSH \ _Pragma("GCC diagnostic push") _Pragma("GCC diagnostic ignored \"-Wdeprecated\"") \ _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") # define _LIBCPP_SUPPRESS_DEPRECATED_POP _Pragma("GCC diagnostic pop") # else # define _LIBCPP_SUPPRESS_DEPRECATED_PUSH # define _LIBCPP_SUPPRESS_DEPRECATED_POP # endif # if _LIBCPP_STD_VER <= 11 # define _LIBCPP_EXPLICIT_AFTER_CXX11 # else # define _LIBCPP_EXPLICIT_AFTER_CXX11 explicit # endif # if _LIBCPP_STD_VER > 11 # define _LIBCPP_CONSTEXPR_AFTER_CXX11 constexpr # else # define _LIBCPP_CONSTEXPR_AFTER_CXX11 # endif # if _LIBCPP_STD_VER > 14 # define _LIBCPP_CONSTEXPR_AFTER_CXX14 constexpr # else # define _LIBCPP_CONSTEXPR_AFTER_CXX14 # endif # if _LIBCPP_STD_VER > 17 # define _LIBCPP_CONSTEXPR_AFTER_CXX17 constexpr # else # define _LIBCPP_CONSTEXPR_AFTER_CXX17 # endif # if __has_cpp_attribute(nodiscard) || defined(_LIBCPP_COMPILER_MSVC) # define _LIBCPP_NODISCARD [[nodiscard]] # elif defined(_LIBCPP_COMPILER_CLANG_BASED) && !defined(_LIBCPP_CXX03_LANG) # define _LIBCPP_NODISCARD [[clang::warn_unused_result]] # else // We can't use GCC's [[gnu::warn_unused_result]] and // __attribute__((warn_unused_result)), because GCC does not silence them via // (void) cast. # define _LIBCPP_NODISCARD # endif // _LIBCPP_NODISCARD_EXT may be used to apply [[nodiscard]] to entities not // specified as such as an extension. # if defined(_LIBCPP_ENABLE_NODISCARD) && !defined(_LIBCPP_DISABLE_NODISCARD_EXT) # define _LIBCPP_NODISCARD_EXT _LIBCPP_NODISCARD # else # define _LIBCPP_NODISCARD_EXT # endif # if !defined(_LIBCPP_DISABLE_NODISCARD_AFTER_CXX17) && (_LIBCPP_STD_VER > 17 || defined(_LIBCPP_ENABLE_NODISCARD)) # define _LIBCPP_NODISCARD_AFTER_CXX17 _LIBCPP_NODISCARD # else # define _LIBCPP_NODISCARD_AFTER_CXX17 # endif # if __has_attribute(no_destroy) # define _LIBCPP_NO_DESTROY __attribute__((__no_destroy__)) # else # define _LIBCPP_NO_DESTROY # endif # ifndef _LIBCPP_HAS_NO_ASAN extern "C" _LIBCPP_FUNC_VIS void __sanitizer_annotate_contiguous_container(const void*, const void*, const void*, const void*); # endif // Try to find out if RTTI is disabled. # if !defined(__cpp_rtti) || __cpp_rtti < 199711L # define _LIBCPP_NO_RTTI # endif # ifndef _LIBCPP_WEAK # define _LIBCPP_WEAK __attribute__((__weak__)) # endif // Thread API // clang-format off # if !defined(_LIBCPP_HAS_NO_THREADS) && \ !defined(_LIBCPP_HAS_THREAD_API_PTHREAD) && \ !defined(_LIBCPP_HAS_THREAD_API_WIN32) && \ !defined(_LIBCPP_HAS_THREAD_API_EXTERNAL) # if defined(__FreeBSD__) || \ defined(__wasi__) || \ defined(__NetBSD__) || \ defined(__OpenBSD__) || \ defined(__NuttX__) || \ defined(__linux__) || \ defined(__GNU__) || \ defined(__APPLE__) || \ defined(__sun__) || \ defined(__MVS__) || \ defined(_AIX) || \ defined(__EMSCRIPTEN__) // clang-format on # define _LIBCPP_HAS_THREAD_API_PTHREAD # elif defined(__Fuchsia__) // TODO(44575): Switch to C11 thread API when possible. # define _LIBCPP_HAS_THREAD_API_PTHREAD # elif defined(_LIBCPP_WIN32API) # define _LIBCPP_HAS_THREAD_API_WIN32 # else # error "No thread API" # endif // _LIBCPP_HAS_THREAD_API # endif // _LIBCPP_HAS_NO_THREADS # if defined(_LIBCPP_HAS_THREAD_API_PTHREAD) # if defined(__ANDROID__) && __ANDROID_API__ >= 30 # define _LIBCPP_HAS_COND_CLOCKWAIT # elif defined(_LIBCPP_GLIBC_PREREQ) # if _LIBCPP_GLIBC_PREREQ(2, 30) # define _LIBCPP_HAS_COND_CLOCKWAIT # endif # endif # endif # if defined(_LIBCPP_HAS_NO_THREADS) && defined(_LIBCPP_HAS_THREAD_API_PTHREAD) # error _LIBCPP_HAS_THREAD_API_PTHREAD may only be defined when \ _LIBCPP_HAS_NO_THREADS is not defined. # endif # if defined(_LIBCPP_HAS_NO_THREADS) && defined(_LIBCPP_HAS_THREAD_API_EXTERNAL) # error _LIBCPP_HAS_THREAD_API_EXTERNAL may not be defined when \ _LIBCPP_HAS_NO_THREADS is defined. # endif # if defined(_LIBCPP_HAS_NO_MONOTONIC_CLOCK) && !defined(_LIBCPP_HAS_NO_THREADS) # error _LIBCPP_HAS_NO_MONOTONIC_CLOCK may only be defined when \ _LIBCPP_HAS_NO_THREADS is defined. # endif # if !defined(_LIBCPP_HAS_NO_THREADS) && !defined(__STDCPP_THREADS__) # define __STDCPP_THREADS__ 1 # endif // The glibc and Bionic implementation of pthreads implements // pthread_mutex_destroy as nop for regular mutexes. Additionally, Win32 // mutexes have no destroy mechanism. // // This optimization can't be performed on Apple platforms, where // pthread_mutex_destroy can allow the kernel to release resources. // See https://llvm.org/D64298 for details. // // TODO(EricWF): Enable this optimization on Bionic after speaking to their // respective stakeholders. // clang-format off # if (defined(_LIBCPP_HAS_THREAD_API_PTHREAD) && defined(__GLIBC__)) || \ (defined(_LIBCPP_HAS_THREAD_API_C11) && defined(__Fuchsia__)) || \ defined(_LIBCPP_HAS_THREAD_API_WIN32) // clang-format on # define _LIBCPP_HAS_TRIVIAL_MUTEX_DESTRUCTION # endif // Destroying a condvar is a nop on Windows. // // This optimization can't be performed on Apple platforms, where // pthread_cond_destroy can allow the kernel to release resources. // See https://llvm.org/D64298 for details. // // TODO(EricWF): This is potentially true for some pthread implementations // as well. # if (defined(_LIBCPP_HAS_THREAD_API_C11) && defined(__Fuchsia__)) || defined(_LIBCPP_HAS_THREAD_API_WIN32) # define _LIBCPP_HAS_TRIVIAL_CONDVAR_DESTRUCTION # endif // Some systems do not provide gets() in their C library, for security reasons. # if defined(_LIBCPP_MSVCRT) || (defined(__FreeBSD_version) && __FreeBSD_version >= 1300043) || defined(__OpenBSD__) # define _LIBCPP_C_HAS_NO_GETS # endif # if defined(__BIONIC__) || defined(__NuttX__) || defined(__Fuchsia__) || defined(__wasi__) || \ defined(_LIBCPP_HAS_MUSL_LIBC) || defined(__OpenBSD__) # define _LIBCPP_PROVIDES_DEFAULT_RUNE_TABLE # endif # if __has_feature(cxx_atomic) || __has_extension(c_atomic) || __has_keyword(_Atomic) # define _LIBCPP_HAS_C_ATOMIC_IMP # elif defined(_LIBCPP_COMPILER_GCC) # define _LIBCPP_HAS_GCC_ATOMIC_IMP # endif # if !defined(_LIBCPP_HAS_C_ATOMIC_IMP) && !defined(_LIBCPP_HAS_GCC_ATOMIC_IMP) && \ !defined(_LIBCPP_HAS_EXTERNAL_ATOMIC_IMP) # define _LIBCPP_HAS_NO_ATOMIC_HEADER # else # ifndef _LIBCPP_ATOMIC_FLAG_TYPE # define _LIBCPP_ATOMIC_FLAG_TYPE bool # endif # ifdef _LIBCPP_FREESTANDING # define _LIBCPP_ATOMIC_ONLY_USE_BUILTINS # endif # endif # ifndef _LIBCPP_DISABLE_UBSAN_UNSIGNED_INTEGER_CHECK # define _LIBCPP_DISABLE_UBSAN_UNSIGNED_INTEGER_CHECK # endif # if defined(_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS) # if defined(__clang__) && __has_attribute(acquire_capability) // Work around the attribute handling in clang. When both __declspec and // __attribute__ are present, the processing goes awry preventing the definition // of the types. In MinGW mode, __declspec evaluates to __attribute__, and thus // combining the two does work. # if !defined(_MSC_VER) # define _LIBCPP_HAS_THREAD_SAFETY_ANNOTATIONS # endif # endif # endif # ifdef _LIBCPP_HAS_THREAD_SAFETY_ANNOTATIONS # define _LIBCPP_THREAD_SAFETY_ANNOTATION(x) __attribute__((x)) # else # define _LIBCPP_THREAD_SAFETY_ANNOTATION(x) # endif # if _LIBCPP_STD_VER > 17 # define _LIBCPP_CONSTINIT constinit # elif __has_attribute(require_constant_initialization) # define _LIBCPP_CONSTINIT __attribute__((__require_constant_initialization__)) # else # define _LIBCPP_CONSTINIT # endif # if __has_attribute(diagnose_if) && !defined(_LIBCPP_DISABLE_ADDITIONAL_DIAGNOSTICS) # define _LIBCPP_DIAGNOSE_WARNING(...) __attribute__((diagnose_if(__VA_ARGS__, "warning"))) # define _LIBCPP_DIAGNOSE_ERROR(...) __attribute__((diagnose_if(__VA_ARGS__, "error"))) # else # define _LIBCPP_DIAGNOSE_WARNING(...) # define _LIBCPP_DIAGNOSE_ERROR(...) # endif // Use a function like macro to imply that it must be followed by a semicolon # if __has_cpp_attribute(fallthrough) # define _LIBCPP_FALLTHROUGH() [[fallthrough]] # elif __has_attribute(__fallthrough__) # define _LIBCPP_FALLTHROUGH() __attribute__((__fallthrough__)) # else # define _LIBCPP_FALLTHROUGH() ((void)0) # endif # if __has_attribute(__nodebug__) # define _LIBCPP_NODEBUG __attribute__((__nodebug__)) # else # define _LIBCPP_NODEBUG # endif # if __has_attribute(__standalone_debug__) # define _LIBCPP_STANDALONE_DEBUG __attribute__((__standalone_debug__)) # else # define _LIBCPP_STANDALONE_DEBUG # endif # if __has_attribute(__preferred_name__) # define _LIBCPP_PREFERRED_NAME(x) __attribute__((__preferred_name__(x))) # else # define _LIBCPP_PREFERRED_NAME(x) # endif // We often repeat things just for handling wide characters in the library. // When wide characters are disabled, it can be useful to have a quick way of // disabling it without having to resort to #if-#endif, which has a larger // impact on readability. # if defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS) # define _LIBCPP_IF_WIDE_CHARACTERS(...) # else # define _LIBCPP_IF_WIDE_CHARACTERS(...) __VA_ARGS__ # endif # if defined(_LIBCPP_ABI_MICROSOFT) && (defined(_LIBCPP_COMPILER_MSVC) || __has_declspec_attribute(empty_bases)) # define _LIBCPP_DECLSPEC_EMPTY_BASES __declspec(empty_bases) # else # define _LIBCPP_DECLSPEC_EMPTY_BASES # endif # if defined(_LIBCPP_ENABLE_CXX17_REMOVED_FEATURES) # define _LIBCPP_ENABLE_CXX17_REMOVED_AUTO_PTR # define _LIBCPP_ENABLE_CXX17_REMOVED_BINDERS # define _LIBCPP_ENABLE_CXX17_REMOVED_RANDOM_SHUFFLE # define _LIBCPP_ENABLE_CXX17_REMOVED_UNEXPECTED_FUNCTIONS # define _LIBCPP_ENABLE_CXX17_REMOVED_UNARY_BINARY_FUNCTION # endif // _LIBCPP_ENABLE_CXX17_REMOVED_FEATURES // Leave the deprecation notices in by default, but don't remove unary_function and // binary_function entirely just yet. That way, folks will have one release to act // on the deprecation warnings. # ifndef _LIBCPP_ENABLE_CXX17_REMOVED_UNARY_BINARY_FUNCTION # define _LIBCPP_ENABLE_CXX17_REMOVED_UNARY_BINARY_FUNCTION # endif # if defined(_LIBCPP_ENABLE_CXX20_REMOVED_FEATURES) # define _LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_MEMBERS # define _LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_VOID_SPECIALIZATION # define _LIBCPP_ENABLE_CXX20_REMOVED_BINDER_TYPEDEFS # define _LIBCPP_ENABLE_CXX20_REMOVED_NEGATORS # define _LIBCPP_ENABLE_CXX20_REMOVED_RAW_STORAGE_ITERATOR # define _LIBCPP_ENABLE_CXX20_REMOVED_TYPE_TRAITS # endif // _LIBCPP_ENABLE_CXX20_REMOVED_FEATURES # if !defined(__cpp_impl_coroutine) || __cpp_impl_coroutine < 201902L # define _LIBCPP_HAS_NO_CXX20_COROUTINES # endif # define _LIBCPP_PUSH_MACROS _Pragma("push_macro(\"min\")") _Pragma("push_macro(\"max\")") # define _LIBCPP_POP_MACROS _Pragma("pop_macro(\"min\")") _Pragma("pop_macro(\"max\")") # ifndef _LIBCPP_NO_AUTO_LINK # if defined(_LIBCPP_ABI_MICROSOFT) && !defined(_LIBCPP_BUILDING_LIBRARY) # if !defined(_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS) # pragma comment(lib, "c++.lib") # else # pragma comment(lib, "libc++.lib") # endif # endif // defined(_LIBCPP_ABI_MICROSOFT) && !defined(_LIBCPP_BUILDING_LIBRARY) # endif // _LIBCPP_NO_AUTO_LINK // Configures the fopen close-on-exec mode character, if any. This string will // be appended to any mode string used by fstream for fopen/fdopen. // // Not all platforms support this, but it helps avoid fd-leaks on platforms that // do. # if defined(__BIONIC__) # define _LIBCPP_FOPEN_CLOEXEC_MODE "e" # else # define _LIBCPP_FOPEN_CLOEXEC_MODE # endif // Support for _FILE_OFFSET_BITS=64 landed gradually in Android, so the full set // of functions used in cstdio may not be available for low API levels when // using 64-bit file offsets on LP32. # if defined(__BIONIC__) && defined(__USE_FILE_OFFSET64) && __ANDROID_API__ < 24 # define _LIBCPP_HAS_NO_FGETPOS_FSETPOS # endif # if __has_attribute(init_priority) // TODO: Remove this once we drop support for building libc++ with old Clangs # if (defined(_LIBCPP_CLANG_VER) && _LIBCPP_CLANG_VER < 1200) || \ (defined(__apple_build_version__) && __apple_build_version__ < 13000000) # define _LIBCPP_INIT_PRIORITY_MAX __attribute__((init_priority(101))) # else # define _LIBCPP_INIT_PRIORITY_MAX __attribute__((init_priority(100))) # endif # else # define _LIBCPP_INIT_PRIORITY_MAX # endif # if defined(__GNUC__) || defined(__clang__) // The attribute uses 1-based indices for ordinary and static member functions. // The attribute uses 2-based indices for non-static member functions. # define _LIBCPP_ATTRIBUTE_FORMAT(archetype, format_string_index, first_format_arg_index) \ __attribute__((__format__(archetype, format_string_index, first_format_arg_index))) # else # define _LIBCPP_ATTRIBUTE_FORMAT(archetype, format_string_index, first_format_arg_index) /* nothing */ # endif # if __has_cpp_attribute(msvc::no_unique_address) // MSVC implements [[no_unique_address]] as a silent no-op currently. // (If/when MSVC breaks its C++ ABI, it will be changed to work as intended.) // However, MSVC implements [[msvc::no_unique_address]] which does what // [[no_unique_address]] is supposed to do, in general. // Clang-cl does not yet (14.0) implement either [[no_unique_address]] or // [[msvc::no_unique_address]] though. If/when it does implement // [[msvc::no_unique_address]], this should be preferred though. # define _LIBCPP_NO_UNIQUE_ADDRESS [[msvc::no_unique_address]] # elif __has_cpp_attribute(no_unique_address) # define _LIBCPP_NO_UNIQUE_ADDRESS [[no_unique_address]] # else # define _LIBCPP_NO_UNIQUE_ADDRESS /* nothing */ // Note that this can be replaced by #error as soon as clang-cl // implements msvc::no_unique_address, since there should be no C++20 // compiler that doesn't support one of the two attributes at that point. // We generally don't want to use this macro outside of C++20-only code, // because using it conditionally in one language version only would make // the ABI inconsistent. # endif # ifdef _LIBCPP_COMPILER_CLANG_BASED # define _LIBCPP_DIAGNOSTIC_PUSH _Pragma("clang diagnostic push") # define _LIBCPP_DIAGNOSTIC_POP _Pragma("clang diagnostic pop") # define _LIBCPP_CLANG_DIAGNOSTIC_IGNORED(str) _Pragma(_LIBCPP_TOSTRING(clang diagnostic ignored str)) # define _LIBCPP_GCC_DIAGNOSTIC_IGNORED(str) # elif defined(_LIBCPP_COMPILER_GCC) # define _LIBCPP_DIAGNOSTIC_PUSH _Pragma("GCC diagnostic push") # define _LIBCPP_DIAGNOSTIC_POP _Pragma("GCC diagnostic pop") # define _LIBCPP_CLANG_DIAGNOSTIC_IGNORED(str) # define _LIBCPP_GCC_DIAGNOSTIC_IGNORED(str) _Pragma(_LIBCPP_TOSTRING(GCC diagnostic ignored str)) # else # define _LIBCPP_DIAGNOSTIC_PUSH # define _LIBCPP_DIAGNOSTIC_POP # define _LIBCPP_CLANG_DIAGNOSTIC_IGNORED(str) # define _LIBCPP_GCC_DIAGNOSTIC_IGNORED(str) # endif # if defined(_AIX) && !defined(_LIBCPP_COMPILER_GCC) # define _LIBCPP_PACKED_BYTE_FOR_AIX _Pragma("pack(1)") # define _LIBCPP_PACKED_BYTE_FOR_AIX_END _Pragma("pack(pop)") # else # define _LIBCPP_PACKED_BYTE_FOR_AIX /* empty */ # define _LIBCPP_PACKED_BYTE_FOR_AIX_END /* empty */ # endif # if __has_attribute(__packed__) # define _LIBCPP_PACKED __attribute__((__packed__)) # else # define _LIBCPP_PACKED # endif #endif // __cplusplus #endif // _LIBCPP___CONFIG diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp index 2958a5054afc..c784c27d36b4 100644 --- a/llvm/lib/Analysis/ScalarEvolution.cpp +++ b/llvm/lib/Analysis/ScalarEvolution.cpp @@ -1,14778 +1,14783 @@ //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file contains the implementation of the scalar evolution analysis // engine, which is used primarily to analyze expressions involving induction // variables in loops. // // There are several aspects to this library. First is the representation of // scalar expressions, which are represented as subclasses of the SCEV class. // These classes are used to represent certain types of subexpressions that we // can handle. We only create one SCEV of a particular shape, so // pointer-comparisons for equality are legal. // // One important aspect of the SCEV objects is that they are never cyclic, even // if there is a cycle in the dataflow for an expression (ie, a PHI node). If // the PHI node is one of the idioms that we can represent (e.g., a polynomial // recurrence) then we represent it directly as a recurrence node, otherwise we // represent it as a SCEVUnknown node. // // In addition to being able to represent expressions of various types, we also // have folders that are used to build the *canonical* representation for a // particular expression. These folders are capable of using a variety of // rewrite rules to simplify the expressions. // // Once the folders are defined, we can implement the more interesting // higher-level code, such as the code that recognizes PHI nodes of various // types, computes the execution count of a loop, etc. // // TODO: We should use these routines and value representations to implement // dependence analysis! // //===----------------------------------------------------------------------===// // // There are several good references for the techniques used in this analysis. // // Chains of recurrences -- a method to expedite the evaluation // of closed-form functions // Olaf Bachmann, Paul S. Wang, Eugene V. Zima // // On computational properties of chains of recurrences // Eugene V. Zima // // Symbolic Evaluation of Chains of Recurrences for Loop Optimization // Robert A. van Engelen // // Efficient Symbolic Analysis for Optimizing Compilers // Robert A. van Engelen // // Using the chains of recurrences algebra for data dependence testing and // induction variable substitution // MS Thesis, Johnie Birch // //===----------------------------------------------------------------------===// #include "llvm/Analysis/ScalarEvolution.h" #include "llvm/ADT/APInt.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/DepthFirstIterator.h" #include "llvm/ADT/EquivalenceClasses.h" #include "llvm/ADT/FoldingSet.h" #include "llvm/ADT/None.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/ScopeExit.h" #include "llvm/ADT/Sequence.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/ADT/StringRef.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/ConstantFolding.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/ScalarEvolutionExpressions.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/Config/llvm-config.h" #include "llvm/IR/Argument.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/CFG.h" #include "llvm/IR/Constant.h" #include "llvm/IR/ConstantRange.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Function.h" #include "llvm/IR/GlobalAlias.h" #include "llvm/IR/GlobalValue.h" #include "llvm/IR/InstIterator.h" #include "llvm/IR/InstrTypes.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Operator.h" #include "llvm/IR/PatternMatch.h" #include "llvm/IR/Type.h" #include "llvm/IR/Use.h" #include "llvm/IR/User.h" #include "llvm/IR/Value.h" #include "llvm/IR/Verifier.h" #include "llvm/InitializePasses.h" #include "llvm/Pass.h" #include "llvm/Support/Casting.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/KnownBits.h" #include "llvm/Support/SaveAndRestore.h" #include "llvm/Support/raw_ostream.h" #include #include #include #include #include #include #include #include #include #include using namespace llvm; using namespace PatternMatch; #define DEBUG_TYPE "scalar-evolution" STATISTIC(NumTripCountsComputed, "Number of loops with predictable loop counts"); STATISTIC(NumTripCountsNotComputed, "Number of loops without predictable loop counts"); STATISTIC(NumBruteForceTripCountsComputed, "Number of loops with trip counts computed by force"); #ifdef EXPENSIVE_CHECKS bool llvm::VerifySCEV = true; #else bool llvm::VerifySCEV = false; #endif static cl::opt MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, cl::desc("Maximum number of iterations SCEV will " "symbolically execute a constant " "derived loop"), cl::init(100)); static cl::opt VerifySCEVOpt( "verify-scev", cl::Hidden, cl::location(VerifySCEV), cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); static cl::opt VerifySCEVStrict( "verify-scev-strict", cl::Hidden, cl::desc("Enable stricter verification with -verify-scev is passed")); static cl::opt VerifySCEVMap("verify-scev-maps", cl::Hidden, cl::desc("Verify no dangling value in ScalarEvolution's " "ExprValueMap (slow)")); static cl::opt VerifyIR( "scev-verify-ir", cl::Hidden, cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"), cl::init(false)); static cl::opt MulOpsInlineThreshold( "scev-mulops-inline-threshold", cl::Hidden, cl::desc("Threshold for inlining multiplication operands into a SCEV"), cl::init(32)); static cl::opt AddOpsInlineThreshold( "scev-addops-inline-threshold", cl::Hidden, cl::desc("Threshold for inlining addition operands into a SCEV"), cl::init(500)); static cl::opt MaxSCEVCompareDepth( "scalar-evolution-max-scev-compare-depth", cl::Hidden, cl::desc("Maximum depth of recursive SCEV complexity comparisons"), cl::init(32)); static cl::opt MaxSCEVOperationsImplicationDepth( "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, cl::desc("Maximum depth of recursive SCEV operations implication analysis"), cl::init(2)); static cl::opt MaxValueCompareDepth( "scalar-evolution-max-value-compare-depth", cl::Hidden, cl::desc("Maximum depth of recursive value complexity comparisons"), cl::init(2)); static cl::opt MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, cl::desc("Maximum depth of recursive arithmetics"), cl::init(32)); static cl::opt MaxConstantEvolvingDepth( "scalar-evolution-max-constant-evolving-depth", cl::Hidden, cl::desc("Maximum depth of recursive constant evolving"), cl::init(32)); static cl::opt MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden, cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"), cl::init(8)); static cl::opt MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, cl::desc("Max coefficients in AddRec during evolving"), cl::init(8)); static cl::opt HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden, cl::desc("Size of the expression which is considered huge"), cl::init(4096)); static cl::opt ClassifyExpressions("scalar-evolution-classify-expressions", cl::Hidden, cl::init(true), cl::desc("When printing analysis, include information on every instruction")); static cl::opt UseExpensiveRangeSharpening( "scalar-evolution-use-expensive-range-sharpening", cl::Hidden, cl::init(false), cl::desc("Use more powerful methods of sharpening expression ranges. May " "be costly in terms of compile time")); static cl::opt MaxPhiSCCAnalysisSize( "scalar-evolution-max-scc-analysis-depth", cl::Hidden, cl::desc("Maximum amount of nodes to process while searching SCEVUnknown " "Phi strongly connected components"), cl::init(8)); static cl::opt EnableFiniteLoopControl("scalar-evolution-finite-loop", cl::Hidden, cl::desc("Handle <= and >= in finite loops"), cl::init(true)); //===----------------------------------------------------------------------===// // SCEV class definitions //===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// // Implementation of the SCEV class. // #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) LLVM_DUMP_METHOD void SCEV::dump() const { print(dbgs()); dbgs() << '\n'; } #endif void SCEV::print(raw_ostream &OS) const { switch (getSCEVType()) { case scConstant: cast(this)->getValue()->printAsOperand(OS, false); return; case scPtrToInt: { const SCEVPtrToIntExpr *PtrToInt = cast(this); const SCEV *Op = PtrToInt->getOperand(); OS << "(ptrtoint " << *Op->getType() << " " << *Op << " to " << *PtrToInt->getType() << ")"; return; } case scTruncate: { const SCEVTruncateExpr *Trunc = cast(this); const SCEV *Op = Trunc->getOperand(); OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Trunc->getType() << ")"; return; } case scZeroExtend: { const SCEVZeroExtendExpr *ZExt = cast(this); const SCEV *Op = ZExt->getOperand(); OS << "(zext " << *Op->getType() << " " << *Op << " to " << *ZExt->getType() << ")"; return; } case scSignExtend: { const SCEVSignExtendExpr *SExt = cast(this); const SCEV *Op = SExt->getOperand(); OS << "(sext " << *Op->getType() << " " << *Op << " to " << *SExt->getType() << ")"; return; } case scAddRecExpr: { const SCEVAddRecExpr *AR = cast(this); OS << "{" << *AR->getOperand(0); for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) OS << ",+," << *AR->getOperand(i); OS << "}<"; if (AR->hasNoUnsignedWrap()) OS << "nuw><"; if (AR->hasNoSignedWrap()) OS << "nsw><"; if (AR->hasNoSelfWrap() && !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW))) OS << "nw><"; AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false); OS << ">"; return; } case scAddExpr: case scMulExpr: case scUMaxExpr: case scSMaxExpr: case scUMinExpr: case scSMinExpr: case scSequentialUMinExpr: { const SCEVNAryExpr *NAry = cast(this); const char *OpStr = nullptr; switch (NAry->getSCEVType()) { case scAddExpr: OpStr = " + "; break; case scMulExpr: OpStr = " * "; break; case scUMaxExpr: OpStr = " umax "; break; case scSMaxExpr: OpStr = " smax "; break; case scUMinExpr: OpStr = " umin "; break; case scSMinExpr: OpStr = " smin "; break; case scSequentialUMinExpr: OpStr = " umin_seq "; break; default: llvm_unreachable("There are no other nary expression types."); } OS << "("; ListSeparator LS(OpStr); for (const SCEV *Op : NAry->operands()) OS << LS << *Op; OS << ")"; switch (NAry->getSCEVType()) { case scAddExpr: case scMulExpr: if (NAry->hasNoUnsignedWrap()) OS << ""; if (NAry->hasNoSignedWrap()) OS << ""; break; default: // Nothing to print for other nary expressions. break; } return; } case scUDivExpr: { const SCEVUDivExpr *UDiv = cast(this); OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; return; } case scUnknown: { const SCEVUnknown *U = cast(this); Type *AllocTy; if (U->isSizeOf(AllocTy)) { OS << "sizeof(" << *AllocTy << ")"; return; } if (U->isAlignOf(AllocTy)) { OS << "alignof(" << *AllocTy << ")"; return; } Type *CTy; Constant *FieldNo; if (U->isOffsetOf(CTy, FieldNo)) { OS << "offsetof(" << *CTy << ", "; FieldNo->printAsOperand(OS, false); OS << ")"; return; } // Otherwise just print it normally. U->getValue()->printAsOperand(OS, false); return; } case scCouldNotCompute: OS << "***COULDNOTCOMPUTE***"; return; } llvm_unreachable("Unknown SCEV kind!"); } Type *SCEV::getType() const { switch (getSCEVType()) { case scConstant: return cast(this)->getType(); case scPtrToInt: case scTruncate: case scZeroExtend: case scSignExtend: return cast(this)->getType(); case scAddRecExpr: return cast(this)->getType(); case scMulExpr: return cast(this)->getType(); case scUMaxExpr: case scSMaxExpr: case scUMinExpr: case scSMinExpr: return cast(this)->getType(); case scSequentialUMinExpr: return cast(this)->getType(); case scAddExpr: return cast(this)->getType(); case scUDivExpr: return cast(this)->getType(); case scUnknown: return cast(this)->getType(); case scCouldNotCompute: llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); } llvm_unreachable("Unknown SCEV kind!"); } bool SCEV::isZero() const { if (const SCEVConstant *SC = dyn_cast(this)) return SC->getValue()->isZero(); return false; } bool SCEV::isOne() const { if (const SCEVConstant *SC = dyn_cast(this)) return SC->getValue()->isOne(); return false; } bool SCEV::isAllOnesValue() const { if (const SCEVConstant *SC = dyn_cast(this)) return SC->getValue()->isMinusOne(); return false; } bool SCEV::isNonConstantNegative() const { const SCEVMulExpr *Mul = dyn_cast(this); if (!Mul) return false; // If there is a constant factor, it will be first. const SCEVConstant *SC = dyn_cast(Mul->getOperand(0)); if (!SC) return false; // Return true if the value is negative, this matches things like (-42 * V). return SC->getAPInt().isNegative(); } SCEVCouldNotCompute::SCEVCouldNotCompute() : SCEV(FoldingSetNodeIDRef(), scCouldNotCompute, 0) {} bool SCEVCouldNotCompute::classof(const SCEV *S) { return S->getSCEVType() == scCouldNotCompute; } const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { FoldingSetNodeID ID; ID.AddInteger(scConstant); ID.AddPointer(V); void *IP = nullptr; if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); UniqueSCEVs.InsertNode(S, IP); return S; } const SCEV *ScalarEvolution::getConstant(const APInt &Val) { return getConstant(ConstantInt::get(getContext(), Val)); } const SCEV * ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { IntegerType *ITy = cast(getEffectiveSCEVType(Ty)); return getConstant(ConstantInt::get(ITy, V, isSigned)); } SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, const SCEV *op, Type *ty) : SCEV(ID, SCEVTy, computeExpressionSize(op)), Ty(ty) { Operands[0] = op; } SCEVPtrToIntExpr::SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, const SCEV *Op, Type *ITy) : SCEVCastExpr(ID, scPtrToInt, Op, ITy) { assert(getOperand()->getType()->isPointerTy() && Ty->isIntegerTy() && "Must be a non-bit-width-changing pointer-to-integer cast!"); } SCEVIntegralCastExpr::SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, const SCEV *op, Type *ty) : SCEVCastExpr(ID, SCEVTy, op, ty) {} SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, const SCEV *op, Type *ty) : SCEVIntegralCastExpr(ID, scTruncate, op, ty) { assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && "Cannot truncate non-integer value!"); } SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, const SCEV *op, Type *ty) : SCEVIntegralCastExpr(ID, scZeroExtend, op, ty) { assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && "Cannot zero extend non-integer value!"); } SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, const SCEV *op, Type *ty) : SCEVIntegralCastExpr(ID, scSignExtend, op, ty) { assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && "Cannot sign extend non-integer value!"); } void SCEVUnknown::deleted() { // Clear this SCEVUnknown from various maps. SE->forgetMemoizedResults(this); // Remove this SCEVUnknown from the uniquing map. SE->UniqueSCEVs.RemoveNode(this); // Release the value. setValPtr(nullptr); } void SCEVUnknown::allUsesReplacedWith(Value *New) { // Clear this SCEVUnknown from various maps. SE->forgetMemoizedResults(this); // Remove this SCEVUnknown from the uniquing map. SE->UniqueSCEVs.RemoveNode(this); // Replace the value pointer in case someone is still using this SCEVUnknown. setValPtr(New); } bool SCEVUnknown::isSizeOf(Type *&AllocTy) const { if (ConstantExpr *VCE = dyn_cast(getValue())) if (VCE->getOpcode() == Instruction::PtrToInt) if (ConstantExpr *CE = dyn_cast(VCE->getOperand(0))) if (CE->getOpcode() == Instruction::GetElementPtr && CE->getOperand(0)->isNullValue() && CE->getNumOperands() == 2) if (ConstantInt *CI = dyn_cast(CE->getOperand(1))) if (CI->isOne()) { AllocTy = cast(CE)->getSourceElementType(); return true; } return false; } bool SCEVUnknown::isAlignOf(Type *&AllocTy) const { if (ConstantExpr *VCE = dyn_cast(getValue())) if (VCE->getOpcode() == Instruction::PtrToInt) if (ConstantExpr *CE = dyn_cast(VCE->getOperand(0))) if (CE->getOpcode() == Instruction::GetElementPtr && CE->getOperand(0)->isNullValue()) { Type *Ty = cast(CE)->getSourceElementType(); if (StructType *STy = dyn_cast(Ty)) if (!STy->isPacked() && CE->getNumOperands() == 3 && CE->getOperand(1)->isNullValue()) { if (ConstantInt *CI = dyn_cast(CE->getOperand(2))) if (CI->isOne() && STy->getNumElements() == 2 && STy->getElementType(0)->isIntegerTy(1)) { AllocTy = STy->getElementType(1); return true; } } } return false; } bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const { if (ConstantExpr *VCE = dyn_cast(getValue())) if (VCE->getOpcode() == Instruction::PtrToInt) if (ConstantExpr *CE = dyn_cast(VCE->getOperand(0))) if (CE->getOpcode() == Instruction::GetElementPtr && CE->getNumOperands() == 3 && CE->getOperand(0)->isNullValue() && CE->getOperand(1)->isNullValue()) { Type *Ty = cast(CE)->getSourceElementType(); // Ignore vector types here so that ScalarEvolutionExpander doesn't // emit getelementptrs that index into vectors. if (Ty->isStructTy() || Ty->isArrayTy()) { CTy = Ty; FieldNo = CE->getOperand(2); return true; } } return false; } //===----------------------------------------------------------------------===// // SCEV Utilities //===----------------------------------------------------------------------===// /// Compare the two values \p LV and \p RV in terms of their "complexity" where /// "complexity" is a partial (and somewhat ad-hoc) relation used to order /// operands in SCEV expressions. \p EqCache is a set of pairs of values that /// have been previously deemed to be "equally complex" by this routine. It is /// intended to avoid exponential time complexity in cases like: /// /// %a = f(%x, %y) /// %b = f(%a, %a) /// %c = f(%b, %b) /// /// %d = f(%x, %y) /// %e = f(%d, %d) /// %f = f(%e, %e) /// /// CompareValueComplexity(%f, %c) /// /// Since we do not continue running this routine on expression trees once we /// have seen unequal values, there is no need to track them in the cache. static int CompareValueComplexity(EquivalenceClasses &EqCacheValue, const LoopInfo *const LI, Value *LV, Value *RV, unsigned Depth) { if (Depth > MaxValueCompareDepth || EqCacheValue.isEquivalent(LV, RV)) return 0; // Order pointer values after integer values. This helps SCEVExpander form // GEPs. bool LIsPointer = LV->getType()->isPointerTy(), RIsPointer = RV->getType()->isPointerTy(); if (LIsPointer != RIsPointer) return (int)LIsPointer - (int)RIsPointer; // Compare getValueID values. unsigned LID = LV->getValueID(), RID = RV->getValueID(); if (LID != RID) return (int)LID - (int)RID; // Sort arguments by their position. if (const auto *LA = dyn_cast(LV)) { const auto *RA = cast(RV); unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); return (int)LArgNo - (int)RArgNo; } if (const auto *LGV = dyn_cast(LV)) { const auto *RGV = cast(RV); const auto IsGVNameSemantic = [&](const GlobalValue *GV) { auto LT = GV->getLinkage(); return !(GlobalValue::isPrivateLinkage(LT) || GlobalValue::isInternalLinkage(LT)); }; // Use the names to distinguish the two values, but only if the // names are semantically important. if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV)) return LGV->getName().compare(RGV->getName()); } // For instructions, compare their loop depth, and their operand count. This // is pretty loose. if (const auto *LInst = dyn_cast(LV)) { const auto *RInst = cast(RV); // Compare loop depths. const BasicBlock *LParent = LInst->getParent(), *RParent = RInst->getParent(); if (LParent != RParent) { unsigned LDepth = LI->getLoopDepth(LParent), RDepth = LI->getLoopDepth(RParent); if (LDepth != RDepth) return (int)LDepth - (int)RDepth; } // Compare the number of operands. unsigned LNumOps = LInst->getNumOperands(), RNumOps = RInst->getNumOperands(); if (LNumOps != RNumOps) return (int)LNumOps - (int)RNumOps; for (unsigned Idx : seq(0u, LNumOps)) { int Result = CompareValueComplexity(EqCacheValue, LI, LInst->getOperand(Idx), RInst->getOperand(Idx), Depth + 1); if (Result != 0) return Result; } } EqCacheValue.unionSets(LV, RV); return 0; } // Return negative, zero, or positive, if LHS is less than, equal to, or greater // than RHS, respectively. A three-way result allows recursive comparisons to be // more efficient. // If the max analysis depth was reached, return None, assuming we do not know // if they are equivalent for sure. static Optional CompareSCEVComplexity(EquivalenceClasses &EqCacheSCEV, EquivalenceClasses &EqCacheValue, const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS, DominatorTree &DT, unsigned Depth = 0) { // Fast-path: SCEVs are uniqued so we can do a quick equality check. if (LHS == RHS) return 0; // Primarily, sort the SCEVs by their getSCEVType(). SCEVTypes LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); if (LType != RType) return (int)LType - (int)RType; if (EqCacheSCEV.isEquivalent(LHS, RHS)) return 0; if (Depth > MaxSCEVCompareDepth) return None; // Aside from the getSCEVType() ordering, the particular ordering // isn't very important except that it's beneficial to be consistent, // so that (a + b) and (b + a) don't end up as different expressions. switch (LType) { case scUnknown: { const SCEVUnknown *LU = cast(LHS); const SCEVUnknown *RU = cast(RHS); int X = CompareValueComplexity(EqCacheValue, LI, LU->getValue(), RU->getValue(), Depth + 1); if (X == 0) EqCacheSCEV.unionSets(LHS, RHS); return X; } case scConstant: { const SCEVConstant *LC = cast(LHS); const SCEVConstant *RC = cast(RHS); // Compare constant values. const APInt &LA = LC->getAPInt(); const APInt &RA = RC->getAPInt(); unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); if (LBitWidth != RBitWidth) return (int)LBitWidth - (int)RBitWidth; return LA.ult(RA) ? -1 : 1; } case scAddRecExpr: { const SCEVAddRecExpr *LA = cast(LHS); const SCEVAddRecExpr *RA = cast(RHS); // There is always a dominance between two recs that are used by one SCEV, // so we can safely sort recs by loop header dominance. We require such // order in getAddExpr. const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); if (LLoop != RLoop) { const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader(); assert(LHead != RHead && "Two loops share the same header?"); if (DT.dominates(LHead, RHead)) return 1; else assert(DT.dominates(RHead, LHead) && "No dominance between recurrences used by one SCEV?"); return -1; } // Addrec complexity grows with operand count. unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands(); if (LNumOps != RNumOps) return (int)LNumOps - (int)RNumOps; // Lexicographically compare. for (unsigned i = 0; i != LNumOps; ++i) { auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LA->getOperand(i), RA->getOperand(i), DT, Depth + 1); if (X != 0) return X; } EqCacheSCEV.unionSets(LHS, RHS); return 0; } case scAddExpr: case scMulExpr: case scSMaxExpr: case scUMaxExpr: case scSMinExpr: case scUMinExpr: case scSequentialUMinExpr: { const SCEVNAryExpr *LC = cast(LHS); const SCEVNAryExpr *RC = cast(RHS); // Lexicographically compare n-ary expressions. unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); if (LNumOps != RNumOps) return (int)LNumOps - (int)RNumOps; for (unsigned i = 0; i != LNumOps; ++i) { auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getOperand(i), RC->getOperand(i), DT, Depth + 1); if (X != 0) return X; } EqCacheSCEV.unionSets(LHS, RHS); return 0; } case scUDivExpr: { const SCEVUDivExpr *LC = cast(LHS); const SCEVUDivExpr *RC = cast(RHS); // Lexicographically compare udiv expressions. auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getLHS(), RC->getLHS(), DT, Depth + 1); if (X != 0) return X; X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getRHS(), RC->getRHS(), DT, Depth + 1); if (X == 0) EqCacheSCEV.unionSets(LHS, RHS); return X; } case scPtrToInt: case scTruncate: case scZeroExtend: case scSignExtend: { const SCEVCastExpr *LC = cast(LHS); const SCEVCastExpr *RC = cast(RHS); // Compare cast expressions by operand. auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getOperand(), RC->getOperand(), DT, Depth + 1); if (X == 0) EqCacheSCEV.unionSets(LHS, RHS); return X; } case scCouldNotCompute: llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); } llvm_unreachable("Unknown SCEV kind!"); } /// Given a list of SCEV objects, order them by their complexity, and group /// objects of the same complexity together by value. When this routine is /// finished, we know that any duplicates in the vector are consecutive and that /// complexity is monotonically increasing. /// /// Note that we go take special precautions to ensure that we get deterministic /// results from this routine. In other words, we don't want the results of /// this to depend on where the addresses of various SCEV objects happened to /// land in memory. static void GroupByComplexity(SmallVectorImpl &Ops, LoopInfo *LI, DominatorTree &DT) { if (Ops.size() < 2) return; // Noop EquivalenceClasses EqCacheSCEV; EquivalenceClasses EqCacheValue; // Whether LHS has provably less complexity than RHS. auto IsLessComplex = [&](const SCEV *LHS, const SCEV *RHS) { auto Complexity = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LHS, RHS, DT); return Complexity && *Complexity < 0; }; if (Ops.size() == 2) { // This is the common case, which also happens to be trivially simple. // Special case it. const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; if (IsLessComplex(RHS, LHS)) std::swap(LHS, RHS); return; } // Do the rough sort by complexity. llvm::stable_sort(Ops, [&](const SCEV *LHS, const SCEV *RHS) { return IsLessComplex(LHS, RHS); }); // Now that we are sorted by complexity, group elements of the same // complexity. Note that this is, at worst, N^2, but the vector is likely to // be extremely short in practice. Note that we take this approach because we // do not want to depend on the addresses of the objects we are grouping. for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { const SCEV *S = Ops[i]; unsigned Complexity = S->getSCEVType(); // If there are any objects of the same complexity and same value as this // one, group them. for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { if (Ops[j] == S) { // Found a duplicate. // Move it to immediately after i'th element. std::swap(Ops[i+1], Ops[j]); ++i; // no need to rescan it. if (i == e-2) return; // Done! } } } } /// Returns true if \p Ops contains a huge SCEV (the subtree of S contains at /// least HugeExprThreshold nodes). static bool hasHugeExpression(ArrayRef Ops) { return any_of(Ops, [](const SCEV *S) { return S->getExpressionSize() >= HugeExprThreshold; }); } //===----------------------------------------------------------------------===// // Simple SCEV method implementations //===----------------------------------------------------------------------===// /// Compute BC(It, K). The result has width W. Assume, K > 0. static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, ScalarEvolution &SE, Type *ResultTy) { // Handle the simplest case efficiently. if (K == 1) return SE.getTruncateOrZeroExtend(It, ResultTy); // We are using the following formula for BC(It, K): // // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! // // Suppose, W is the bitwidth of the return value. We must be prepared for // overflow. Hence, we must assure that the result of our computation is // equal to the accurate one modulo 2^W. Unfortunately, division isn't // safe in modular arithmetic. // // However, this code doesn't use exactly that formula; the formula it uses // is something like the following, where T is the number of factors of 2 in // K! (i.e. trailing zeros in the binary representation of K!), and ^ is // exponentiation: // // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) // // This formula is trivially equivalent to the previous formula. However, // this formula can be implemented much more efficiently. The trick is that // K! / 2^T is odd, and exact division by an odd number *is* safe in modular // arithmetic. To do exact division in modular arithmetic, all we have // to do is multiply by the inverse. Therefore, this step can be done at // width W. // // The next issue is how to safely do the division by 2^T. The way this // is done is by doing the multiplication step at a width of at least W + T // bits. This way, the bottom W+T bits of the product are accurate. Then, // when we perform the division by 2^T (which is equivalent to a right shift // by T), the bottom W bits are accurate. Extra bits are okay; they'll get // truncated out after the division by 2^T. // // In comparison to just directly using the first formula, this technique // is much more efficient; using the first formula requires W * K bits, // but this formula less than W + K bits. Also, the first formula requires // a division step, whereas this formula only requires multiplies and shifts. // // It doesn't matter whether the subtraction step is done in the calculation // width or the input iteration count's width; if the subtraction overflows, // the result must be zero anyway. We prefer here to do it in the width of // the induction variable because it helps a lot for certain cases; CodeGen // isn't smart enough to ignore the overflow, which leads to much less // efficient code if the width of the subtraction is wider than the native // register width. // // (It's possible to not widen at all by pulling out factors of 2 before // the multiplication; for example, K=2 can be calculated as // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires // extra arithmetic, so it's not an obvious win, and it gets // much more complicated for K > 3.) // Protection from insane SCEVs; this bound is conservative, // but it probably doesn't matter. if (K > 1000) return SE.getCouldNotCompute(); unsigned W = SE.getTypeSizeInBits(ResultTy); // Calculate K! / 2^T and T; we divide out the factors of two before // multiplying for calculating K! / 2^T to avoid overflow. // Other overflow doesn't matter because we only care about the bottom // W bits of the result. APInt OddFactorial(W, 1); unsigned T = 1; for (unsigned i = 3; i <= K; ++i) { APInt Mult(W, i); unsigned TwoFactors = Mult.countTrailingZeros(); T += TwoFactors; Mult.lshrInPlace(TwoFactors); OddFactorial *= Mult; } // We need at least W + T bits for the multiplication step unsigned CalculationBits = W + T; // Calculate 2^T, at width T+W. APInt DivFactor = APInt::getOneBitSet(CalculationBits, T); // Calculate the multiplicative inverse of K! / 2^T; // this multiplication factor will perform the exact division by // K! / 2^T. APInt Mod = APInt::getSignedMinValue(W+1); APInt MultiplyFactor = OddFactorial.zext(W+1); MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); MultiplyFactor = MultiplyFactor.trunc(W); // Calculate the product, at width T+W IntegerType *CalculationTy = IntegerType::get(SE.getContext(), CalculationBits); const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); for (unsigned i = 1; i != K; ++i) { const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); Dividend = SE.getMulExpr(Dividend, SE.getTruncateOrZeroExtend(S, CalculationTy)); } // Divide by 2^T const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); // Truncate the result, and divide by K! / 2^T. return SE.getMulExpr(SE.getConstant(MultiplyFactor), SE.getTruncateOrZeroExtend(DivResult, ResultTy)); } /// Return the value of this chain of recurrences at the specified iteration /// number. We can evaluate this recurrence by multiplying each element in the /// chain by the binomial coefficient corresponding to it. In other words, we /// can evaluate {A,+,B,+,C,+,D} as: /// /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) /// /// where BC(It, k) stands for binomial coefficient. const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const { return evaluateAtIteration(makeArrayRef(op_begin(), op_end()), It, SE); } const SCEV * SCEVAddRecExpr::evaluateAtIteration(ArrayRef Operands, const SCEV *It, ScalarEvolution &SE) { assert(Operands.size() > 0); const SCEV *Result = Operands[0]; for (unsigned i = 1, e = Operands.size(); i != e; ++i) { // The computation is correct in the face of overflow provided that the // multiplication is performed _after_ the evaluation of the binomial // coefficient. const SCEV *Coeff = BinomialCoefficient(It, i, SE, Result->getType()); if (isa(Coeff)) return Coeff; Result = SE.getAddExpr(Result, SE.getMulExpr(Operands[i], Coeff)); } return Result; } //===----------------------------------------------------------------------===// // SCEV Expression folder implementations //===----------------------------------------------------------------------===// const SCEV *ScalarEvolution::getLosslessPtrToIntExpr(const SCEV *Op, unsigned Depth) { assert(Depth <= 1 && "getLosslessPtrToIntExpr() should self-recurse at most once."); // We could be called with an integer-typed operands during SCEV rewrites. // Since the operand is an integer already, just perform zext/trunc/self cast. if (!Op->getType()->isPointerTy()) return Op; // What would be an ID for such a SCEV cast expression? FoldingSetNodeID ID; ID.AddInteger(scPtrToInt); ID.AddPointer(Op); void *IP = nullptr; // Is there already an expression for such a cast? if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; // It isn't legal for optimizations to construct new ptrtoint expressions // for non-integral pointers. if (getDataLayout().isNonIntegralPointerType(Op->getType())) return getCouldNotCompute(); Type *IntPtrTy = getDataLayout().getIntPtrType(Op->getType()); // We can only trivially model ptrtoint if SCEV's effective (integer) type // is sufficiently wide to represent all possible pointer values. // We could theoretically teach SCEV to truncate wider pointers, but // that isn't implemented for now. if (getDataLayout().getTypeSizeInBits(getEffectiveSCEVType(Op->getType())) != getDataLayout().getTypeSizeInBits(IntPtrTy)) return getCouldNotCompute(); // If not, is this expression something we can't reduce any further? if (auto *U = dyn_cast(Op)) { // Perform some basic constant folding. If the operand of the ptr2int cast // is a null pointer, don't create a ptr2int SCEV expression (that will be // left as-is), but produce a zero constant. // NOTE: We could handle a more general case, but lack motivational cases. if (isa(U->getValue())) return getZero(IntPtrTy); // Create an explicit cast node. // We can reuse the existing insert position since if we get here, // we won't have made any changes which would invalidate it. SCEV *S = new (SCEVAllocator) SCEVPtrToIntExpr(ID.Intern(SCEVAllocator), Op, IntPtrTy); UniqueSCEVs.InsertNode(S, IP); registerUser(S, Op); return S; } assert(Depth == 0 && "getLosslessPtrToIntExpr() should not self-recurse for " "non-SCEVUnknown's."); // Otherwise, we've got some expression that is more complex than just a // single SCEVUnknown. But we don't want to have a SCEVPtrToIntExpr of an // arbitrary expression, we want to have SCEVPtrToIntExpr of an SCEVUnknown // only, and the expressions must otherwise be integer-typed. // So sink the cast down to the SCEVUnknown's. /// The SCEVPtrToIntSinkingRewriter takes a scalar evolution expression, /// which computes a pointer-typed value, and rewrites the whole expression /// tree so that *all* the computations are done on integers, and the only /// pointer-typed operands in the expression are SCEVUnknown. class SCEVPtrToIntSinkingRewriter : public SCEVRewriteVisitor { using Base = SCEVRewriteVisitor; public: SCEVPtrToIntSinkingRewriter(ScalarEvolution &SE) : SCEVRewriteVisitor(SE) {} static const SCEV *rewrite(const SCEV *Scev, ScalarEvolution &SE) { SCEVPtrToIntSinkingRewriter Rewriter(SE); return Rewriter.visit(Scev); } const SCEV *visit(const SCEV *S) { Type *STy = S->getType(); // If the expression is not pointer-typed, just keep it as-is. if (!STy->isPointerTy()) return S; // Else, recursively sink the cast down into it. return Base::visit(S); } const SCEV *visitAddExpr(const SCEVAddExpr *Expr) { SmallVector Operands; bool Changed = false; for (const auto *Op : Expr->operands()) { Operands.push_back(visit(Op)); Changed |= Op != Operands.back(); } return !Changed ? Expr : SE.getAddExpr(Operands, Expr->getNoWrapFlags()); } const SCEV *visitMulExpr(const SCEVMulExpr *Expr) { SmallVector Operands; bool Changed = false; for (const auto *Op : Expr->operands()) { Operands.push_back(visit(Op)); Changed |= Op != Operands.back(); } return !Changed ? Expr : SE.getMulExpr(Operands, Expr->getNoWrapFlags()); } const SCEV *visitUnknown(const SCEVUnknown *Expr) { assert(Expr->getType()->isPointerTy() && "Should only reach pointer-typed SCEVUnknown's."); return SE.getLosslessPtrToIntExpr(Expr, /*Depth=*/1); } }; // And actually perform the cast sinking. const SCEV *IntOp = SCEVPtrToIntSinkingRewriter::rewrite(Op, *this); assert(IntOp->getType()->isIntegerTy() && "We must have succeeded in sinking the cast, " "and ending up with an integer-typed expression!"); return IntOp; } const SCEV *ScalarEvolution::getPtrToIntExpr(const SCEV *Op, Type *Ty) { assert(Ty->isIntegerTy() && "Target type must be an integer type!"); const SCEV *IntOp = getLosslessPtrToIntExpr(Op); if (isa(IntOp)) return IntOp; return getTruncateOrZeroExtend(IntOp, Ty); } const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty, unsigned Depth) { assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && "This is not a truncating conversion!"); assert(isSCEVable(Ty) && "This is not a conversion to a SCEVable type!"); assert(!Op->getType()->isPointerTy() && "Can't truncate pointer!"); Ty = getEffectiveSCEVType(Ty); FoldingSetNodeID ID; ID.AddInteger(scTruncate); ID.AddPointer(Op); ID.AddPointer(Ty); void *IP = nullptr; if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; // Fold if the operand is constant. if (const SCEVConstant *SC = dyn_cast(Op)) return getConstant( cast(ConstantExpr::getTrunc(SC->getValue(), Ty))); // trunc(trunc(x)) --> trunc(x) if (const SCEVTruncateExpr *ST = dyn_cast(Op)) return getTruncateExpr(ST->getOperand(), Ty, Depth + 1); // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing if (const SCEVSignExtendExpr *SS = dyn_cast(Op)) return getTruncateOrSignExtend(SS->getOperand(), Ty, Depth + 1); // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing if (const SCEVZeroExtendExpr *SZ = dyn_cast(Op)) return getTruncateOrZeroExtend(SZ->getOperand(), Ty, Depth + 1); if (Depth > MaxCastDepth) { SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), Op, Ty); UniqueSCEVs.InsertNode(S, IP); registerUser(S, Op); return S; } // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN), // if after transforming we have at most one truncate, not counting truncates // that replace other casts. if (isa(Op) || isa(Op)) { auto *CommOp = cast(Op); SmallVector Operands; unsigned numTruncs = 0; for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2; ++i) { const SCEV *S = getTruncateExpr(CommOp->getOperand(i), Ty, Depth + 1); if (!isa(CommOp->getOperand(i)) && isa(S)) numTruncs++; Operands.push_back(S); } if (numTruncs < 2) { if (isa(Op)) return getAddExpr(Operands); else if (isa(Op)) return getMulExpr(Operands); else llvm_unreachable("Unexpected SCEV type for Op."); } // Although we checked in the beginning that ID is not in the cache, it is // possible that during recursion and different modification ID was inserted // into the cache. So if we find it, just return it. if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; } // If the input value is a chrec scev, truncate the chrec's operands. if (const SCEVAddRecExpr *AddRec = dyn_cast(Op)) { SmallVector Operands; for (const SCEV *Op : AddRec->operands()) Operands.push_back(getTruncateExpr(Op, Ty, Depth + 1)); return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap); } // Return zero if truncating to known zeros. uint32_t MinTrailingZeros = GetMinTrailingZeros(Op); if (MinTrailingZeros >= getTypeSizeInBits(Ty)) return getZero(Ty); // The cast wasn't folded; create an explicit cast node. We can reuse // the existing insert position since if we get here, we won't have // made any changes which would invalidate it. SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), Op, Ty); UniqueSCEVs.InsertNode(S, IP); registerUser(S, Op); return S; } // Get the limit of a recurrence such that incrementing by Step cannot cause // signed overflow as long as the value of the recurrence within the // loop does not exceed this limit before incrementing. static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE) { unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); if (SE->isKnownPositive(Step)) { *Pred = ICmpInst::ICMP_SLT; return SE->getConstant(APInt::getSignedMinValue(BitWidth) - SE->getSignedRangeMax(Step)); } if (SE->isKnownNegative(Step)) { *Pred = ICmpInst::ICMP_SGT; return SE->getConstant(APInt::getSignedMaxValue(BitWidth) - SE->getSignedRangeMin(Step)); } return nullptr; } // Get the limit of a recurrence such that incrementing by Step cannot cause // unsigned overflow as long as the value of the recurrence within the loop does // not exceed this limit before incrementing. static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE) { unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); *Pred = ICmpInst::ICMP_ULT; return SE->getConstant(APInt::getMinValue(BitWidth) - SE->getUnsignedRangeMax(Step)); } namespace { struct ExtendOpTraitsBase { typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *, unsigned); }; // Used to make code generic over signed and unsigned overflow. template struct ExtendOpTraits { // Members present: // // static const SCEV::NoWrapFlags WrapType; // // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr; // // static const SCEV *getOverflowLimitForStep(const SCEV *Step, // ICmpInst::Predicate *Pred, // ScalarEvolution *SE); }; template <> struct ExtendOpTraits : public ExtendOpTraitsBase { static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW; static const GetExtendExprTy GetExtendExpr; static const SCEV *getOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE) { return getSignedOverflowLimitForStep(Step, Pred, SE); } }; const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr; template <> struct ExtendOpTraits : public ExtendOpTraitsBase { static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW; static const GetExtendExprTy GetExtendExpr; static const SCEV *getOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE) { return getUnsignedOverflowLimitForStep(Step, Pred, SE); } }; const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr; } // end anonymous namespace // The recurrence AR has been shown to have no signed/unsigned wrap or something // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as // easily prove NSW/NUW for its preincrement or postincrement sibling. This // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step + // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the // expression "Step + sext/zext(PreIncAR)" is congruent with // "sext/zext(PostIncAR)" template static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, ScalarEvolution *SE, unsigned Depth) { auto WrapType = ExtendOpTraits::WrapType; auto GetExtendExpr = ExtendOpTraits::GetExtendExpr; const Loop *L = AR->getLoop(); const SCEV *Start = AR->getStart(); const SCEV *Step = AR->getStepRecurrence(*SE); // Check for a simple looking step prior to loop entry. const SCEVAddExpr *SA = dyn_cast(Start); if (!SA) return nullptr; // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV // subtraction is expensive. For this purpose, perform a quick and dirty // difference, by checking for Step in the operand list. SmallVector DiffOps; for (const SCEV *Op : SA->operands()) if (Op != Step) DiffOps.push_back(Op); if (DiffOps.size() == SA->getNumOperands()) return nullptr; // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` + // `Step`: // 1. NSW/NUW flags on the step increment. auto PreStartFlags = ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW); const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags); const SCEVAddRecExpr *PreAR = dyn_cast( SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); // "{S,+,X} is /" and "the backedge is taken at least once" implies // "S+X does not sign/unsign-overflow". // const SCEV *BECount = SE->getBackedgeTakenCount(L); if (PreAR && PreAR->getNoWrapFlags(WrapType) && !isa(BECount) && SE->isKnownPositive(BECount)) return PreStart; // 2. Direct overflow check on the step operation's expression. unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); const SCEV *OperandExtendedStart = SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth), (SE->*GetExtendExpr)(Step, WideTy, Depth)); if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) { if (PreAR && AR->getNoWrapFlags(WrapType)) { // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact. SE->setNoWrapFlags(const_cast(PreAR), WrapType); } return PreStart; } // 3. Loop precondition. ICmpInst::Predicate Pred; const SCEV *OverflowLimit = ExtendOpTraits::getOverflowLimitForStep(Step, &Pred, SE); if (OverflowLimit && SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) return PreStart; return nullptr; } // Get the normalized zero or sign extended expression for this AddRec's Start. template static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, ScalarEvolution *SE, unsigned Depth) { auto GetExtendExpr = ExtendOpTraits::GetExtendExpr; const SCEV *PreStart = getPreStartForExtend(AR, Ty, SE, Depth); if (!PreStart) return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth); return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty, Depth), (SE->*GetExtendExpr)(PreStart, Ty, Depth)); } // Try to prove away overflow by looking at "nearby" add recurrences. A // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`. // // Formally: // // {S,+,X} == {S-T,+,X} + T // => Ext({S,+,X}) == Ext({S-T,+,X} + T) // // If ({S-T,+,X} + T) does not overflow ... (1) // // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T) // // If {S-T,+,X} does not overflow ... (2) // // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T) // == {Ext(S-T)+Ext(T),+,Ext(X)} // // If (S-T)+T does not overflow ... (3) // // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)} // == {Ext(S),+,Ext(X)} == LHS // // Thus, if (1), (2) and (3) are true for some T, then // Ext({S,+,X}) == {Ext(S),+,Ext(X)} // // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T) // does not overflow" restricted to the 0th iteration. Therefore we only need // to check for (1) and (2). // // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T // is `Delta` (defined below). template bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start, const SCEV *Step, const Loop *L) { auto WrapType = ExtendOpTraits::WrapType; // We restrict `Start` to a constant to prevent SCEV from spending too much // time here. It is correct (but more expensive) to continue with a // non-constant `Start` and do a general SCEV subtraction to compute // `PreStart` below. const SCEVConstant *StartC = dyn_cast(Start); if (!StartC) return false; APInt StartAI = StartC->getAPInt(); for (unsigned Delta : {-2, -1, 1, 2}) { const SCEV *PreStart = getConstant(StartAI - Delta); FoldingSetNodeID ID; ID.AddInteger(scAddRecExpr); ID.AddPointer(PreStart); ID.AddPointer(Step); ID.AddPointer(L); void *IP = nullptr; const auto *PreAR = static_cast(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); // Give up if we don't already have the add recurrence we need because // actually constructing an add recurrence is relatively expensive. if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2) const SCEV *DeltaS = getConstant(StartC->getType(), Delta); ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; const SCEV *Limit = ExtendOpTraits::getOverflowLimitForStep( DeltaS, &Pred, this); if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1) return true; } } return false; } // Finds an integer D for an expression (C + x + y + ...) such that the top // level addition in (D + (C - D + x + y + ...)) would not wrap (signed or // unsigned) and the number of trailing zeros of (C - D + x + y + ...) is // maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and // the (C + x + y + ...) expression is \p WholeAddExpr. static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, const SCEVConstant *ConstantTerm, const SCEVAddExpr *WholeAddExpr) { const APInt &C = ConstantTerm->getAPInt(); const unsigned BitWidth = C.getBitWidth(); // Find number of trailing zeros of (x + y + ...) w/o the C first: uint32_t TZ = BitWidth; for (unsigned I = 1, E = WholeAddExpr->getNumOperands(); I < E && TZ; ++I) TZ = std::min(TZ, SE.GetMinTrailingZeros(WholeAddExpr->getOperand(I))); if (TZ) { // Set D to be as many least significant bits of C as possible while still // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap: return TZ < BitWidth ? C.trunc(TZ).zext(BitWidth) : C; } return APInt(BitWidth, 0); } // Finds an integer D for an affine AddRec expression {C,+,x} such that the top // level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the // number of trailing zeros of (C - D + x * n) is maximized, where C is the \p // ConstantStart, x is an arbitrary \p Step, and n is the loop trip count. static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, const APInt &ConstantStart, const SCEV *Step) { const unsigned BitWidth = ConstantStart.getBitWidth(); const uint32_t TZ = SE.GetMinTrailingZeros(Step); if (TZ) return TZ < BitWidth ? ConstantStart.trunc(TZ).zext(BitWidth) : ConstantStart; return APInt(BitWidth, 0); } const SCEV * ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && "This is not an extending conversion!"); assert(isSCEVable(Ty) && "This is not a conversion to a SCEVable type!"); assert(!Op->getType()->isPointerTy() && "Can't extend pointer!"); Ty = getEffectiveSCEVType(Ty); // Fold if the operand is constant. if (const SCEVConstant *SC = dyn_cast(Op)) return getConstant( cast(ConstantExpr::getZExt(SC->getValue(), Ty))); // zext(zext(x)) --> zext(x) if (const SCEVZeroExtendExpr *SZ = dyn_cast(Op)) return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); // Before doing any expensive analysis, check to see if we've already // computed a SCEV for this Op and Ty. FoldingSetNodeID ID; ID.AddInteger(scZeroExtend); ID.AddPointer(Op); ID.AddPointer(Ty); void *IP = nullptr; if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; if (Depth > MaxCastDepth) { SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), Op, Ty); UniqueSCEVs.InsertNode(S, IP); registerUser(S, Op); return S; } // zext(trunc(x)) --> zext(x) or x or trunc(x) if (const SCEVTruncateExpr *ST = dyn_cast(Op)) { // It's possible the bits taken off by the truncate were all zero bits. If // so, we should be able to simplify this further. const SCEV *X = ST->getOperand(); ConstantRange CR = getUnsignedRange(X); unsigned TruncBits = getTypeSizeInBits(ST->getType()); unsigned NewBits = getTypeSizeInBits(Ty); if (CR.truncate(TruncBits).zeroExtend(NewBits).contains( CR.zextOrTrunc(NewBits))) return getTruncateOrZeroExtend(X, Ty, Depth); } // If the input value is a chrec scev, and we can prove that the value // did not overflow the old, smaller, value, we can zero extend all of the // operands (often constants). This allows analysis of something like // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } if (const SCEVAddRecExpr *AR = dyn_cast(Op)) if (AR->isAffine()) { const SCEV *Start = AR->getStart(); const SCEV *Step = AR->getStepRecurrence(*this); unsigned BitWidth = getTypeSizeInBits(AR->getType()); const Loop *L = AR->getLoop(); if (!AR->hasNoUnsignedWrap()) { auto NewFlags = proveNoWrapViaConstantRanges(AR); setNoWrapFlags(const_cast(AR), NewFlags); } // If we have special knowledge that this addrec won't overflow, // we don't need to do any further analysis. if (AR->hasNoUnsignedWrap()) { Start = getExtendAddRecStart(AR, Ty, this, Depth + 1); Step = getZeroExtendExpr(Step, Ty, Depth + 1); return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); } // Check whether the backedge-taken count is SCEVCouldNotCompute. // Note that this serves two purposes: It filters out loops that are // simply not analyzable, and it covers the case where this code is // being called from within backedge-taken count analysis, such that // attempting to ask for the backedge-taken count would likely result // in infinite recursion. In the later case, the analysis code will // cope with a conservative value, and it will take care to purge // that value once it has finished. const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); if (!isa(MaxBECount)) { // Manually compute the final value for AR, checking for overflow. // Check whether the backedge-taken count can be losslessly casted to // the addrec's type. The count is always unsigned. const SCEV *CastedMaxBECount = getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( CastedMaxBECount, MaxBECount->getType(), Depth); if (MaxBECount == RecastedMaxBECount) { Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); // Check whether Start+Step*MaxBECount has no unsigned overflow. const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step, SCEV::FlagAnyWrap, Depth + 1); const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul, SCEV::FlagAnyWrap, Depth + 1), WideTy, Depth + 1); const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1); const SCEV *WideMaxBECount = getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); const SCEV *OperandExtendedAdd = getAddExpr(WideStart, getMulExpr(WideMaxBECount, getZeroExtendExpr(Step, WideTy, Depth + 1), SCEV::FlagAnyWrap, Depth + 1), SCEV::FlagAnyWrap, Depth + 1); if (ZAdd == OperandExtendedAdd) { // Cache knowledge of AR NUW, which is propagated to this AddRec. setNoWrapFlags(const_cast(AR), SCEV::FlagNUW); // Return the expression with the addrec on the outside. Start = getExtendAddRecStart(AR, Ty, this, Depth + 1); Step = getZeroExtendExpr(Step, Ty, Depth + 1); return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); } // Similar to above, only this time treat the step value as signed. // This covers loops that count down. OperandExtendedAdd = getAddExpr(WideStart, getMulExpr(WideMaxBECount, getSignExtendExpr(Step, WideTy, Depth + 1), SCEV::FlagAnyWrap, Depth + 1), SCEV::FlagAnyWrap, Depth + 1); if (ZAdd == OperandExtendedAdd) { // Cache knowledge of AR NW, which is propagated to this AddRec. // Negative step causes unsigned wrap, but it still can't self-wrap. setNoWrapFlags(const_cast(AR), SCEV::FlagNW); // Return the expression with the addrec on the outside. Start = getExtendAddRecStart(AR, Ty, this, Depth + 1); Step = getSignExtendExpr(Step, Ty, Depth + 1); return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); } } } // Normally, in the cases we can prove no-overflow via a // backedge guarding condition, we can also compute a backedge // taken count for the loop. The exceptions are assumptions and // guards present in the loop -- SCEV is not great at exploiting // these to compute max backedge taken counts, but can still use // these to prove lack of overflow. Use this fact to avoid // doing extra work that may not pay off. if (!isa(MaxBECount) || HasGuards || !AC.assumptions().empty()) { auto NewFlags = proveNoUnsignedWrapViaInduction(AR); setNoWrapFlags(const_cast(AR), NewFlags); if (AR->hasNoUnsignedWrap()) { // Same as nuw case above - duplicated here to avoid a compile time // issue. It's not clear that the order of checks does matter, but // it's one of two issue possible causes for a change which was // reverted. Be conservative for the moment. Start = getExtendAddRecStart(AR, Ty, this, Depth + 1); Step = getZeroExtendExpr(Step, Ty, Depth + 1); return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); } // For a negative step, we can extend the operands iff doing so only // traverses values in the range zext([0,UINT_MAX]). if (isKnownNegative(Step)) { const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - getSignedRangeMin(Step)); if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || isKnownOnEveryIteration(ICmpInst::ICMP_UGT, AR, N)) { // Cache knowledge of AR NW, which is propagated to this // AddRec. Negative step causes unsigned wrap, but it // still can't self-wrap. setNoWrapFlags(const_cast(AR), SCEV::FlagNW); // Return the expression with the addrec on the outside. Start = getExtendAddRecStart(AR, Ty, this, Depth + 1); Step = getSignExtendExpr(Step, Ty, Depth + 1); return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); } } } // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step})) // if D + (C - D + Step * n) could be proven to not unsigned wrap // where D maximizes the number of trailing zeros of (C - D + Step * n) if (const auto *SC = dyn_cast(Start)) { const APInt &C = SC->getAPInt(); const APInt &D = extractConstantWithoutWrapping(*this, C, Step); if (D != 0) { const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); const SCEV *SResidual = getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); return getAddExpr(SZExtD, SZExtR, (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), Depth + 1); } } if (proveNoWrapByVaryingStart(Start, Step, L)) { setNoWrapFlags(const_cast(AR), SCEV::FlagNUW); Start = getExtendAddRecStart(AR, Ty, this, Depth + 1); Step = getZeroExtendExpr(Step, Ty, Depth + 1); return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); } } // zext(A % B) --> zext(A) % zext(B) { const SCEV *LHS; const SCEV *RHS; if (matchURem(Op, LHS, RHS)) return getURemExpr(getZeroExtendExpr(LHS, Ty, Depth + 1), getZeroExtendExpr(RHS, Ty, Depth + 1)); } // zext(A / B) --> zext(A) / zext(B). if (auto *Div = dyn_cast(Op)) return getUDivExpr(getZeroExtendExpr(Div->getLHS(), Ty, Depth + 1), getZeroExtendExpr(Div->getRHS(), Ty, Depth + 1)); if (auto *SA = dyn_cast(Op)) { // zext((A + B + ...)) --> (zext(A) + zext(B) + ...) if (SA->hasNoUnsignedWrap()) { // If the addition does not unsign overflow then we can, by definition, // commute the zero extension with the addition operation. SmallVector Ops; for (const auto *Op : SA->operands()) Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1); } // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...)) // if D + (C - D + x + y + ...) could be proven to not unsigned wrap // where D maximizes the number of trailing zeros of (C - D + x + y + ...) // // Often address arithmetics contain expressions like // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))). // This transformation is useful while proving that such expressions are // equal or differ by a small constant amount, see LoadStoreVectorizer pass. if (const auto *SC = dyn_cast(SA->getOperand(0))) { const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); if (D != 0) { const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); const SCEV *SResidual = getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); return getAddExpr(SZExtD, SZExtR, (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), Depth + 1); } } } if (auto *SM = dyn_cast(Op)) { // zext((A * B * ...)) --> (zext(A) * zext(B) * ...) if (SM->hasNoUnsignedWrap()) { // If the multiply does not unsign overflow then we can, by definition, // commute the zero extension with the multiply operation. SmallVector Ops; for (const auto *Op : SM->operands()) Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1); } // zext(2^K * (trunc X to iN)) to iM -> // 2^K * (zext(trunc X to i{N-K}) to iM) // // Proof: // // zext(2^K * (trunc X to iN)) to iM // = zext((trunc X to iN) << K) to iM // = zext((trunc X to i{N-K}) << K) to iM // (because shl removes the top K bits) // = zext((2^K * (trunc X to i{N-K}))) to iM // = (2^K * (zext(trunc X to i{N-K}) to iM)). // if (SM->getNumOperands() == 2) if (auto *MulLHS = dyn_cast(SM->getOperand(0))) if (MulLHS->getAPInt().isPowerOf2()) if (auto *TruncRHS = dyn_cast(SM->getOperand(1))) { int NewTruncBits = getTypeSizeInBits(TruncRHS->getType()) - MulLHS->getAPInt().logBase2(); Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits); return getMulExpr( getZeroExtendExpr(MulLHS, Ty), getZeroExtendExpr( getTruncateExpr(TruncRHS->getOperand(), NewTruncTy), Ty), SCEV::FlagNUW, Depth + 1); } } // The cast wasn't folded; create an explicit cast node. // Recompute the insert position, as it may have been invalidated. if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), Op, Ty); UniqueSCEVs.InsertNode(S, IP); registerUser(S, Op); return S; } const SCEV * ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && "This is not an extending conversion!"); assert(isSCEVable(Ty) && "This is not a conversion to a SCEVable type!"); assert(!Op->getType()->isPointerTy() && "Can't extend pointer!"); Ty = getEffectiveSCEVType(Ty); // Fold if the operand is constant. if (const SCEVConstant *SC = dyn_cast(Op)) return getConstant( cast(ConstantExpr::getSExt(SC->getValue(), Ty))); // sext(sext(x)) --> sext(x) if (const SCEVSignExtendExpr *SS = dyn_cast(Op)) return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1); // sext(zext(x)) --> zext(x) if (const SCEVZeroExtendExpr *SZ = dyn_cast(Op)) return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); // Before doing any expensive analysis, check to see if we've already // computed a SCEV for this Op and Ty. FoldingSetNodeID ID; ID.AddInteger(scSignExtend); ID.AddPointer(Op); ID.AddPointer(Ty); void *IP = nullptr; if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; // Limit recursion depth. if (Depth > MaxCastDepth) { SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), Op, Ty); UniqueSCEVs.InsertNode(S, IP); registerUser(S, Op); return S; } // sext(trunc(x)) --> sext(x) or x or trunc(x) if (const SCEVTruncateExpr *ST = dyn_cast(Op)) { // It's possible the bits taken off by the truncate were all sign bits. If // so, we should be able to simplify this further. const SCEV *X = ST->getOperand(); ConstantRange CR = getSignedRange(X); unsigned TruncBits = getTypeSizeInBits(ST->getType()); unsigned NewBits = getTypeSizeInBits(Ty); if (CR.truncate(TruncBits).signExtend(NewBits).contains( CR.sextOrTrunc(NewBits))) return getTruncateOrSignExtend(X, Ty, Depth); } if (auto *SA = dyn_cast(Op)) { // sext((A + B + ...)) --> (sext(A) + sext(B) + ...) if (SA->hasNoSignedWrap()) { // If the addition does not sign overflow then we can, by definition, // commute the sign extension with the addition operation. SmallVector Ops; for (const auto *Op : SA->operands()) Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1)); return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1); } // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...)) // if D + (C - D + x + y + ...) could be proven to not signed wrap // where D maximizes the number of trailing zeros of (C - D + x + y + ...) // // For instance, this will bring two seemingly different expressions: // 1 + sext(5 + 20 * %x + 24 * %y) and // sext(6 + 20 * %x + 24 * %y) // to the same form: // 2 + sext(4 + 20 * %x + 24 * %y) if (const auto *SC = dyn_cast(SA->getOperand(0))) { const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); if (D != 0) { const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); const SCEV *SResidual = getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); return getAddExpr(SSExtD, SSExtR, (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), Depth + 1); } } } // If the input value is a chrec scev, and we can prove that the value // did not overflow the old, smaller, value, we can sign extend all of the // operands (often constants). This allows analysis of something like // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } if (const SCEVAddRecExpr *AR = dyn_cast(Op)) if (AR->isAffine()) { const SCEV *Start = AR->getStart(); const SCEV *Step = AR->getStepRecurrence(*this); unsigned BitWidth = getTypeSizeInBits(AR->getType()); const Loop *L = AR->getLoop(); if (!AR->hasNoSignedWrap()) { auto NewFlags = proveNoWrapViaConstantRanges(AR); setNoWrapFlags(const_cast(AR), NewFlags); } // If we have special knowledge that this addrec won't overflow, // we don't need to do any further analysis. if (AR->hasNoSignedWrap()) { Start = getExtendAddRecStart(AR, Ty, this, Depth + 1); Step = getSignExtendExpr(Step, Ty, Depth + 1); return getAddRecExpr(Start, Step, L, SCEV::FlagNSW); } // Check whether the backedge-taken count is SCEVCouldNotCompute. // Note that this serves two purposes: It filters out loops that are // simply not analyzable, and it covers the case where this code is // being called from within backedge-taken count analysis, such that // attempting to ask for the backedge-taken count would likely result // in infinite recursion. In the later case, the analysis code will // cope with a conservative value, and it will take care to purge // that value once it has finished. const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); if (!isa(MaxBECount)) { // Manually compute the final value for AR, checking for // overflow. // Check whether the backedge-taken count can be losslessly casted to // the addrec's type. The count is always unsigned. const SCEV *CastedMaxBECount = getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( CastedMaxBECount, MaxBECount->getType(), Depth); if (MaxBECount == RecastedMaxBECount) { Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); // Check whether Start+Step*MaxBECount has no signed overflow. const SCEV *SMul = getMulExpr(CastedMaxBECount, Step, SCEV::FlagAnyWrap, Depth + 1); const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul, SCEV::FlagAnyWrap, Depth + 1), WideTy, Depth + 1); const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1); const SCEV *WideMaxBECount = getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); const SCEV *OperandExtendedAdd = getAddExpr(WideStart, getMulExpr(WideMaxBECount, getSignExtendExpr(Step, WideTy, Depth + 1), SCEV::FlagAnyWrap, Depth + 1), SCEV::FlagAnyWrap, Depth + 1); if (SAdd == OperandExtendedAdd) { // Cache knowledge of AR NSW, which is propagated to this AddRec. setNoWrapFlags(const_cast(AR), SCEV::FlagNSW); // Return the expression with the addrec on the outside. Start = getExtendAddRecStart(AR, Ty, this, Depth + 1); Step = getSignExtendExpr(Step, Ty, Depth + 1); return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); } // Similar to above, only this time treat the step value as unsigned. // This covers loops that count up with an unsigned step. OperandExtendedAdd = getAddExpr(WideStart, getMulExpr(WideMaxBECount, getZeroExtendExpr(Step, WideTy, Depth + 1), SCEV::FlagAnyWrap, Depth + 1), SCEV::FlagAnyWrap, Depth + 1); if (SAdd == OperandExtendedAdd) { // If AR wraps around then // // abs(Step) * MaxBECount > unsigned-max(AR->getType()) // => SAdd != OperandExtendedAdd // // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=> // (SAdd == OperandExtendedAdd => AR is NW) setNoWrapFlags(const_cast(AR), SCEV::FlagNW); // Return the expression with the addrec on the outside. Start = getExtendAddRecStart(AR, Ty, this, Depth + 1); Step = getZeroExtendExpr(Step, Ty, Depth + 1); return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); } } } auto NewFlags = proveNoSignedWrapViaInduction(AR); setNoWrapFlags(const_cast(AR), NewFlags); if (AR->hasNoSignedWrap()) { // Same as nsw case above - duplicated here to avoid a compile time // issue. It's not clear that the order of checks does matter, but // it's one of two issue possible causes for a change which was // reverted. Be conservative for the moment. Start = getExtendAddRecStart(AR, Ty, this, Depth + 1); Step = getSignExtendExpr(Step, Ty, Depth + 1); return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); } // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step})) // if D + (C - D + Step * n) could be proven to not signed wrap // where D maximizes the number of trailing zeros of (C - D + Step * n) if (const auto *SC = dyn_cast(Start)) { const APInt &C = SC->getAPInt(); const APInt &D = extractConstantWithoutWrapping(*this, C, Step); if (D != 0) { const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); const SCEV *SResidual = getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); return getAddExpr(SSExtD, SSExtR, (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), Depth + 1); } } if (proveNoWrapByVaryingStart(Start, Step, L)) { setNoWrapFlags(const_cast(AR), SCEV::FlagNSW); Start = getExtendAddRecStart(AR, Ty, this, Depth + 1); Step = getSignExtendExpr(Step, Ty, Depth + 1); return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); } } // If the input value is provably positive and we could not simplify // away the sext build a zext instead. if (isKnownNonNegative(Op)) return getZeroExtendExpr(Op, Ty, Depth + 1); // The cast wasn't folded; create an explicit cast node. // Recompute the insert position, as it may have been invalidated. if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), Op, Ty); UniqueSCEVs.InsertNode(S, IP); registerUser(S, { Op }); return S; } const SCEV *ScalarEvolution::getCastExpr(SCEVTypes Kind, const SCEV *Op, Type *Ty) { switch (Kind) { case scTruncate: return getTruncateExpr(Op, Ty); case scZeroExtend: return getZeroExtendExpr(Op, Ty); case scSignExtend: return getSignExtendExpr(Op, Ty); case scPtrToInt: return getPtrToIntExpr(Op, Ty); default: llvm_unreachable("Not a SCEV cast expression!"); } } /// getAnyExtendExpr - Return a SCEV for the given operand extended with /// unspecified bits out to the given type. const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, Type *Ty) { assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && "This is not an extending conversion!"); assert(isSCEVable(Ty) && "This is not a conversion to a SCEVable type!"); Ty = getEffectiveSCEVType(Ty); // Sign-extend negative constants. if (const SCEVConstant *SC = dyn_cast(Op)) if (SC->getAPInt().isNegative()) return getSignExtendExpr(Op, Ty); // Peel off a truncate cast. if (const SCEVTruncateExpr *T = dyn_cast(Op)) { const SCEV *NewOp = T->getOperand(); if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) return getAnyExtendExpr(NewOp, Ty); return getTruncateOrNoop(NewOp, Ty); } // Next try a zext cast. If the cast is folded, use it. const SCEV *ZExt = getZeroExtendExpr(Op, Ty); if (!isa(ZExt)) return ZExt; // Next try a sext cast. If the cast is folded, use it. const SCEV *SExt = getSignExtendExpr(Op, Ty); if (!isa(SExt)) return SExt; // Force the cast to be folded into the operands of an addrec. if (const SCEVAddRecExpr *AR = dyn_cast(Op)) { SmallVector Ops; for (const SCEV *Op : AR->operands()) Ops.push_back(getAnyExtendExpr(Op, Ty)); return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW); } // If the expression is obviously signed, use the sext cast value. if (isa(Op)) return SExt; // Absent any other information, use the zext cast value. return ZExt; } /// Process the given Ops list, which is a list of operands to be added under /// the given scale, update the given map. This is a helper function for /// getAddRecExpr. As an example of what it does, given a sequence of operands /// that would form an add expression like this: /// /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r) /// /// where A and B are constants, update the map with these values: /// /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) /// /// and add 13 + A*B*29 to AccumulatedConstant. /// This will allow getAddRecExpr to produce this: /// /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) /// /// This form often exposes folding opportunities that are hidden in /// the original operand list. /// /// Return true iff it appears that any interesting folding opportunities /// may be exposed. This helps getAddRecExpr short-circuit extra work in /// the common case where no interesting opportunities are present, and /// is also used as a check to avoid infinite recursion. static bool CollectAddOperandsWithScales(DenseMap &M, SmallVectorImpl &NewOps, APInt &AccumulatedConstant, const SCEV *const *Ops, size_t NumOperands, const APInt &Scale, ScalarEvolution &SE) { bool Interesting = false; // Iterate over the add operands. They are sorted, with constants first. unsigned i = 0; while (const SCEVConstant *C = dyn_cast(Ops[i])) { ++i; // Pull a buried constant out to the outside. if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) Interesting = true; AccumulatedConstant += Scale * C->getAPInt(); } // Next comes everything else. We're especially interested in multiplies // here, but they're in the middle, so just visit the rest with one loop. for (; i != NumOperands; ++i) { const SCEVMulExpr *Mul = dyn_cast(Ops[i]); if (Mul && isa(Mul->getOperand(0))) { APInt NewScale = Scale * cast(Mul->getOperand(0))->getAPInt(); if (Mul->getNumOperands() == 2 && isa(Mul->getOperand(1))) { // A multiplication of a constant with another add; recurse. const SCEVAddExpr *Add = cast(Mul->getOperand(1)); Interesting |= CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, Add->op_begin(), Add->getNumOperands(), NewScale, SE); } else { // A multiplication of a constant with some other value. Update // the map. SmallVector MulOps(drop_begin(Mul->operands())); const SCEV *Key = SE.getMulExpr(MulOps); auto Pair = M.insert({Key, NewScale}); if (Pair.second) { NewOps.push_back(Pair.first->first); } else { Pair.first->second += NewScale; // The map already had an entry for this value, which may indicate // a folding opportunity. Interesting = true; } } } else { // An ordinary operand. Update the map. std::pair::iterator, bool> Pair = M.insert({Ops[i], Scale}); if (Pair.second) { NewOps.push_back(Pair.first->first); } else { Pair.first->second += Scale; // The map already had an entry for this value, which may indicate // a folding opportunity. Interesting = true; } } } return Interesting; } bool ScalarEvolution::willNotOverflow(Instruction::BinaryOps BinOp, bool Signed, const SCEV *LHS, const SCEV *RHS) { const SCEV *(ScalarEvolution::*Operation)(const SCEV *, const SCEV *, SCEV::NoWrapFlags, unsigned); switch (BinOp) { default: llvm_unreachable("Unsupported binary op"); case Instruction::Add: Operation = &ScalarEvolution::getAddExpr; break; case Instruction::Sub: Operation = &ScalarEvolution::getMinusSCEV; break; case Instruction::Mul: Operation = &ScalarEvolution::getMulExpr; break; } const SCEV *(ScalarEvolution::*Extension)(const SCEV *, Type *, unsigned) = Signed ? &ScalarEvolution::getSignExtendExpr : &ScalarEvolution::getZeroExtendExpr; // Check ext(LHS op RHS) == ext(LHS) op ext(RHS) auto *NarrowTy = cast(LHS->getType()); auto *WideTy = IntegerType::get(NarrowTy->getContext(), NarrowTy->getBitWidth() * 2); const SCEV *A = (this->*Extension)( (this->*Operation)(LHS, RHS, SCEV::FlagAnyWrap, 0), WideTy, 0); const SCEV *LHSB = (this->*Extension)(LHS, WideTy, 0); const SCEV *RHSB = (this->*Extension)(RHS, WideTy, 0); const SCEV *B = (this->*Operation)(LHSB, RHSB, SCEV::FlagAnyWrap, 0); return A == B; } Optional ScalarEvolution::getStrengthenedNoWrapFlagsFromBinOp( const OverflowingBinaryOperator *OBO) { // It cannot be done any better. if (OBO->hasNoUnsignedWrap() && OBO->hasNoSignedWrap()) return None; SCEV::NoWrapFlags Flags = SCEV::NoWrapFlags::FlagAnyWrap; if (OBO->hasNoUnsignedWrap()) Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); if (OBO->hasNoSignedWrap()) Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); bool Deduced = false; if (OBO->getOpcode() != Instruction::Add && OBO->getOpcode() != Instruction::Sub && OBO->getOpcode() != Instruction::Mul) return None; const SCEV *LHS = getSCEV(OBO->getOperand(0)); const SCEV *RHS = getSCEV(OBO->getOperand(1)); if (!OBO->hasNoUnsignedWrap() && willNotOverflow((Instruction::BinaryOps)OBO->getOpcode(), /* Signed */ false, LHS, RHS)) { Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); Deduced = true; } if (!OBO->hasNoSignedWrap() && willNotOverflow((Instruction::BinaryOps)OBO->getOpcode(), /* Signed */ true, LHS, RHS)) { Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); Deduced = true; } if (Deduced) return Flags; return None; } // We're trying to construct a SCEV of type `Type' with `Ops' as operands and // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of // can't-overflow flags for the operation if possible. static SCEV::NoWrapFlags StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, const ArrayRef Ops, SCEV::NoWrapFlags Flags) { using namespace std::placeholders; using OBO = OverflowingBinaryOperator; bool CanAnalyze = Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr; (void)CanAnalyze; assert(CanAnalyze && "don't call from other places!"); int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; SCEV::NoWrapFlags SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. auto IsKnownNonNegative = [&](const SCEV *S) { return SE->isKnownNonNegative(S); }; if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative)) Flags = ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); if (SignOrUnsignWrap != SignOrUnsignMask && (Type == scAddExpr || Type == scMulExpr) && Ops.size() == 2 && isa(Ops[0])) { auto Opcode = [&] { switch (Type) { case scAddExpr: return Instruction::Add; case scMulExpr: return Instruction::Mul; default: llvm_unreachable("Unexpected SCEV op."); } }(); const APInt &C = cast(Ops[0])->getAPInt(); // (A C) --> (A C) if the op doesn't sign overflow. if (!(SignOrUnsignWrap & SCEV::FlagNSW)) { auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( Opcode, C, OBO::NoSignedWrap); if (NSWRegion.contains(SE->getSignedRange(Ops[1]))) Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); } // (A C) --> (A C) if the op doesn't unsign overflow. if (!(SignOrUnsignWrap & SCEV::FlagNUW)) { auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( Opcode, C, OBO::NoUnsignedWrap); if (NUWRegion.contains(SE->getUnsignedRange(Ops[1]))) Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); } } // <0,+,nonnegative> is also nuw // TODO: Add corresponding nsw case if (Type == scAddRecExpr && ScalarEvolution::hasFlags(Flags, SCEV::FlagNW) && !ScalarEvolution::hasFlags(Flags, SCEV::FlagNUW) && Ops.size() == 2 && Ops[0]->isZero() && IsKnownNonNegative(Ops[1])) Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); // both (udiv X, Y) * Y and Y * (udiv X, Y) are always NUW if (Type == scMulExpr && !ScalarEvolution::hasFlags(Flags, SCEV::FlagNUW) && Ops.size() == 2) { if (auto *UDiv = dyn_cast(Ops[0])) if (UDiv->getOperand(1) == Ops[1]) Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); if (auto *UDiv = dyn_cast(Ops[1])) if (UDiv->getOperand(1) == Ops[0]) Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); } return Flags; } bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) { return isLoopInvariant(S, L) && properlyDominates(S, L->getHeader()); } /// Get a canonical add expression, or something simpler if possible. const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl &Ops, SCEV::NoWrapFlags OrigFlags, unsigned Depth) { assert(!(OrigFlags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && "only nuw or nsw allowed"); assert(!Ops.empty() && "Cannot get empty add!"); if (Ops.size() == 1) return Ops[0]; #ifndef NDEBUG Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); for (unsigned i = 1, e = Ops.size(); i != e; ++i) assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && "SCEVAddExpr operand types don't match!"); unsigned NumPtrs = count_if( Ops, [](const SCEV *Op) { return Op->getType()->isPointerTy(); }); assert(NumPtrs <= 1 && "add has at most one pointer operand"); #endif // Sort by complexity, this groups all similar expression types together. GroupByComplexity(Ops, &LI, DT); // If there are any constants, fold them together. unsigned Idx = 0; if (const SCEVConstant *LHSC = dyn_cast(Ops[0])) { ++Idx; assert(Idx < Ops.size()); while (const SCEVConstant *RHSC = dyn_cast(Ops[Idx])) { // We found two constants, fold them together! Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt()); if (Ops.size() == 2) return Ops[0]; Ops.erase(Ops.begin()+1); // Erase the folded element LHSC = cast(Ops[0]); } // If we are left with a constant zero being added, strip it off. if (LHSC->getValue()->isZero()) { Ops.erase(Ops.begin()); --Idx; } if (Ops.size() == 1) return Ops[0]; } // Delay expensive flag strengthening until necessary. auto ComputeFlags = [this, OrigFlags](const ArrayRef Ops) { return StrengthenNoWrapFlags(this, scAddExpr, Ops, OrigFlags); }; // Limit recursion calls depth. if (Depth > MaxArithDepth || hasHugeExpression(Ops)) return getOrCreateAddExpr(Ops, ComputeFlags(Ops)); if (SCEV *S = findExistingSCEVInCache(scAddExpr, Ops)) { // Don't strengthen flags if we have no new information. SCEVAddExpr *Add = static_cast(S); if (Add->getNoWrapFlags(OrigFlags) != OrigFlags) Add->setNoWrapFlags(ComputeFlags(Ops)); return S; } // Okay, check to see if the same value occurs in the operand list more than // once. If so, merge them together into an multiply expression. Since we // sorted the list, these values are required to be adjacent. Type *Ty = Ops[0]->getType(); bool FoundMatch = false; for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 // Scan ahead to count how many equal operands there are. unsigned Count = 2; while (i+Count != e && Ops[i+Count] == Ops[i]) ++Count; // Merge the values into a multiply. const SCEV *Scale = getConstant(Ty, Count); const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1); if (Ops.size() == Count) return Mul; Ops[i] = Mul; Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); --i; e -= Count - 1; FoundMatch = true; } if (FoundMatch) return getAddExpr(Ops, OrigFlags, Depth + 1); // Check for truncates. If all the operands are truncated from the same // type, see if factoring out the truncate would permit the result to be // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y) // if the contents of the resulting outer trunc fold to something simple. auto FindTruncSrcType = [&]() -> Type * { // We're ultimately looking to fold an addrec of truncs and muls of only // constants and truncs, so if we find any other types of SCEV // as operands of the addrec then we bail and return nullptr here. // Otherwise, we return the type of the operand of a trunc that we find. if (auto *T = dyn_cast(Ops[Idx])) return T->getOperand()->getType(); if (const auto *Mul = dyn_cast(Ops[Idx])) { const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1); if (const auto *T = dyn_cast(LastOp)) return T->getOperand()->getType(); } return nullptr; }; if (auto *SrcType = FindTruncSrcType()) { SmallVector LargeOps; bool Ok = true; // Check all the operands to see if they can be represented in the // source type of the truncate. for (unsigned i = 0, e = Ops.size(); i != e; ++i) { if (const SCEVTruncateExpr *T = dyn_cast(Ops[i])) { if (T->getOperand()->getType() != SrcType) { Ok = false; break; } LargeOps.push_back(T->getOperand()); } else if (const SCEVConstant *C = dyn_cast(Ops[i])) { LargeOps.push_back(getAnyExtendExpr(C, SrcType)); } else if (const SCEVMulExpr *M = dyn_cast(Ops[i])) { SmallVector LargeMulOps; for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { if (const SCEVTruncateExpr *T = dyn_cast(M->getOperand(j))) { if (T->getOperand()->getType() != SrcType) { Ok = false; break; } LargeMulOps.push_back(T->getOperand()); } else if (const auto *C = dyn_cast(M->getOperand(j))) { LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); } else { Ok = false; break; } } if (Ok) LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1)); } else { Ok = false; break; } } if (Ok) { // Evaluate the expression in the larger type. const SCEV *Fold = getAddExpr(LargeOps, SCEV::FlagAnyWrap, Depth + 1); // If it folds to something simple, use it. Otherwise, don't. if (isa(Fold) || isa(Fold)) return getTruncateExpr(Fold, Ty); } } if (Ops.size() == 2) { // Check if we have an expression of the form ((X + C1) - C2), where C1 and // C2 can be folded in a way that allows retaining wrapping flags of (X + // C1). const SCEV *A = Ops[0]; const SCEV *B = Ops[1]; auto *AddExpr = dyn_cast(B); auto *C = dyn_cast(A); if (AddExpr && C && isa(AddExpr->getOperand(0))) { auto C1 = cast(AddExpr->getOperand(0))->getAPInt(); auto C2 = C->getAPInt(); SCEV::NoWrapFlags PreservedFlags = SCEV::FlagAnyWrap; APInt ConstAdd = C1 + C2; auto AddFlags = AddExpr->getNoWrapFlags(); // Adding a smaller constant is NUW if the original AddExpr was NUW. if (ScalarEvolution::hasFlags(AddFlags, SCEV::FlagNUW) && ConstAdd.ule(C1)) { PreservedFlags = ScalarEvolution::setFlags(PreservedFlags, SCEV::FlagNUW); } // Adding a constant with the same sign and small magnitude is NSW, if the // original AddExpr was NSW. if (ScalarEvolution::hasFlags(AddFlags, SCEV::FlagNSW) && C1.isSignBitSet() == ConstAdd.isSignBitSet() && ConstAdd.abs().ule(C1.abs())) { PreservedFlags = ScalarEvolution::setFlags(PreservedFlags, SCEV::FlagNSW); } if (PreservedFlags != SCEV::FlagAnyWrap) { SmallVector NewOps(AddExpr->operands()); NewOps[0] = getConstant(ConstAdd); return getAddExpr(NewOps, PreservedFlags); } } } // Canonicalize (-1 * urem X, Y) + X --> (Y * X/Y) if (Ops.size() == 2) { const SCEVMulExpr *Mul = dyn_cast(Ops[0]); if (Mul && Mul->getNumOperands() == 2 && Mul->getOperand(0)->isAllOnesValue()) { const SCEV *X; const SCEV *Y; if (matchURem(Mul->getOperand(1), X, Y) && X == Ops[1]) { return getMulExpr(Y, getUDivExpr(X, Y)); } } } // Skip past any other cast SCEVs. while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) ++Idx; // If there are add operands they would be next. if (Idx < Ops.size()) { bool DeletedAdd = false; // If the original flags and all inlined SCEVAddExprs are NUW, use the // common NUW flag for expression after inlining. Other flags cannot be // preserved, because they may depend on the original order of operations. SCEV::NoWrapFlags CommonFlags = maskFlags(OrigFlags, SCEV::FlagNUW); while (const SCEVAddExpr *Add = dyn_cast(Ops[Idx])) { if (Ops.size() > AddOpsInlineThreshold || Add->getNumOperands() > AddOpsInlineThreshold) break; // If we have an add, expand the add operands onto the end of the operands // list. Ops.erase(Ops.begin()+Idx); Ops.append(Add->op_begin(), Add->op_end()); DeletedAdd = true; CommonFlags = maskFlags(CommonFlags, Add->getNoWrapFlags()); } // If we deleted at least one add, we added operands to the end of the list, // and they are not necessarily sorted. Recurse to resort and resimplify // any operands we just acquired. if (DeletedAdd) return getAddExpr(Ops, CommonFlags, Depth + 1); } // Skip over the add expression until we get to a multiply. while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) ++Idx; // Check to see if there are any folding opportunities present with // operands multiplied by constant values. if (Idx < Ops.size() && isa(Ops[Idx])) { uint64_t BitWidth = getTypeSizeInBits(Ty); DenseMap M; SmallVector NewOps; APInt AccumulatedConstant(BitWidth, 0); if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, Ops.data(), Ops.size(), APInt(BitWidth, 1), *this)) { struct APIntCompare { bool operator()(const APInt &LHS, const APInt &RHS) const { return LHS.ult(RHS); } }; // Some interesting folding opportunity is present, so its worthwhile to // re-generate the operands list. Group the operands by constant scale, // to avoid multiplying by the same constant scale multiple times. std::map, APIntCompare> MulOpLists; for (const SCEV *NewOp : NewOps) MulOpLists[M.find(NewOp)->second].push_back(NewOp); // Re-generate the operands list. Ops.clear(); if (AccumulatedConstant != 0) Ops.push_back(getConstant(AccumulatedConstant)); for (auto &MulOp : MulOpLists) { if (MulOp.first == 1) { Ops.push_back(getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1)); } else if (MulOp.first != 0) { Ops.push_back(getMulExpr( getConstant(MulOp.first), getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1), SCEV::FlagAnyWrap, Depth + 1)); } } if (Ops.empty()) return getZero(Ty); if (Ops.size() == 1) return Ops[0]; return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); } } // If we are adding something to a multiply expression, make sure the // something is not already an operand of the multiply. If so, merge it into // the multiply. for (; Idx < Ops.size() && isa(Ops[Idx]); ++Idx) { const SCEVMulExpr *Mul = cast(Ops[Idx]); for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { const SCEV *MulOpSCEV = Mul->getOperand(MulOp); if (isa(MulOpSCEV)) continue; for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) if (MulOpSCEV == Ops[AddOp]) { // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) const SCEV *InnerMul = Mul->getOperand(MulOp == 0); if (Mul->getNumOperands() != 2) { // If the multiply has more than two operands, we must get the // Y*Z term. SmallVector MulOps(Mul->op_begin(), Mul->op_begin()+MulOp); MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); } SmallVector TwoOps = {getOne(Ty), InnerMul}; const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV, SCEV::FlagAnyWrap, Depth + 1); if (Ops.size() == 2) return OuterMul; if (AddOp < Idx) { Ops.erase(Ops.begin()+AddOp); Ops.erase(Ops.begin()+Idx-1); } else { Ops.erase(Ops.begin()+Idx); Ops.erase(Ops.begin()+AddOp-1); } Ops.push_back(OuterMul); return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); } // Check this multiply against other multiplies being added together. for (unsigned OtherMulIdx = Idx+1; OtherMulIdx < Ops.size() && isa(Ops[OtherMulIdx]); ++OtherMulIdx) { const SCEVMulExpr *OtherMul = cast(Ops[OtherMulIdx]); // If MulOp occurs in OtherMul, we can fold the two multiplies // together. for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); OMulOp != e; ++OMulOp) if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); if (Mul->getNumOperands() != 2) { SmallVector MulOps(Mul->op_begin(), Mul->op_begin()+MulOp); MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); } const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); if (OtherMul->getNumOperands() != 2) { SmallVector MulOps(OtherMul->op_begin(), OtherMul->op_begin()+OMulOp); MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end()); InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); } SmallVector TwoOps = {InnerMul1, InnerMul2}; const SCEV *InnerMulSum = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum, SCEV::FlagAnyWrap, Depth + 1); if (Ops.size() == 2) return OuterMul; Ops.erase(Ops.begin()+Idx); Ops.erase(Ops.begin()+OtherMulIdx-1); Ops.push_back(OuterMul); return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); } } } } // If there are any add recurrences in the operands list, see if any other // added values are loop invariant. If so, we can fold them into the // recurrence. while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) ++Idx; // Scan over all recurrences, trying to fold loop invariants into them. for (; Idx < Ops.size() && isa(Ops[Idx]); ++Idx) { // Scan all of the other operands to this add and add them to the vector if // they are loop invariant w.r.t. the recurrence. SmallVector LIOps; const SCEVAddRecExpr *AddRec = cast(Ops[Idx]); const Loop *AddRecLoop = AddRec->getLoop(); for (unsigned i = 0, e = Ops.size(); i != e; ++i) if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { LIOps.push_back(Ops[i]); Ops.erase(Ops.begin()+i); --i; --e; } // If we found some loop invariants, fold them into the recurrence. if (!LIOps.empty()) { // Compute nowrap flags for the addition of the loop-invariant ops and // the addrec. Temporarily push it as an operand for that purpose. These // flags are valid in the scope of the addrec only. LIOps.push_back(AddRec); SCEV::NoWrapFlags Flags = ComputeFlags(LIOps); LIOps.pop_back(); // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} LIOps.push_back(AddRec->getStart()); SmallVector AddRecOps(AddRec->operands()); // It is not in general safe to propagate flags valid on an add within // the addrec scope to one outside it. We must prove that the inner // scope is guaranteed to execute if the outer one does to be able to // safely propagate. We know the program is undefined if poison is // produced on the inner scoped addrec. We also know that *for this use* // the outer scoped add can't overflow (because of the flags we just // computed for the inner scoped add) without the program being undefined. // Proving that entry to the outer scope neccesitates entry to the inner // scope, thus proves the program undefined if the flags would be violated // in the outer scope. SCEV::NoWrapFlags AddFlags = Flags; if (AddFlags != SCEV::FlagAnyWrap) { auto *DefI = getDefiningScopeBound(LIOps); auto *ReachI = &*AddRecLoop->getHeader()->begin(); if (!isGuaranteedToTransferExecutionTo(DefI, ReachI)) AddFlags = SCEV::FlagAnyWrap; } AddRecOps[0] = getAddExpr(LIOps, AddFlags, Depth + 1); // Build the new addrec. Propagate the NUW and NSW flags if both the // outer add and the inner addrec are guaranteed to have no overflow. // Always propagate NW. Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW)); const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags); // If all of the other operands were loop invariant, we are done. if (Ops.size() == 1) return NewRec; // Otherwise, add the folded AddRec by the non-invariant parts. for (unsigned i = 0;; ++i) if (Ops[i] == AddRec) { Ops[i] = NewRec; break; } return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); } // Okay, if there weren't any loop invariants to be folded, check to see if // there are multiple AddRec's with the same loop induction variable being // added together. If so, we can fold them. for (unsigned OtherIdx = Idx+1; OtherIdx < Ops.size() && isa(Ops[OtherIdx]); ++OtherIdx) { // We expect the AddRecExpr's to be sorted in reverse dominance order, // so that the 1st found AddRecExpr is dominated by all others. assert(DT.dominates( cast(Ops[OtherIdx])->getLoop()->getHeader(), AddRec->getLoop()->getHeader()) && "AddRecExprs are not sorted in reverse dominance order?"); if (AddRecLoop == cast(Ops[OtherIdx])->getLoop()) { // Other + {A,+,B} + {C,+,D} --> Other + {A+C,+,B+D} SmallVector AddRecOps(AddRec->operands()); for (; OtherIdx != Ops.size() && isa(Ops[OtherIdx]); ++OtherIdx) { const auto *OtherAddRec = cast(Ops[OtherIdx]); if (OtherAddRec->getLoop() == AddRecLoop) { for (unsigned i = 0, e = OtherAddRec->getNumOperands(); i != e; ++i) { if (i >= AddRecOps.size()) { AddRecOps.append(OtherAddRec->op_begin()+i, OtherAddRec->op_end()); break; } SmallVector TwoOps = { AddRecOps[i], OtherAddRec->getOperand(i)}; AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); } Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; } } // Step size has changed, so we cannot guarantee no self-wraparound. Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); } } // Otherwise couldn't fold anything into this recurrence. Move onto the // next one. } // Okay, it looks like we really DO need an add expr. Check to see if we // already have one, otherwise create a new one. return getOrCreateAddExpr(Ops, ComputeFlags(Ops)); } const SCEV * ScalarEvolution::getOrCreateAddExpr(ArrayRef Ops, SCEV::NoWrapFlags Flags) { FoldingSetNodeID ID; ID.AddInteger(scAddExpr); for (const SCEV *Op : Ops) ID.AddPointer(Op); void *IP = nullptr; SCEVAddExpr *S = static_cast(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); if (!S) { const SCEV **O = SCEVAllocator.Allocate(Ops.size()); std::uninitialized_copy(Ops.begin(), Ops.end(), O); S = new (SCEVAllocator) SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size()); UniqueSCEVs.InsertNode(S, IP); registerUser(S, Ops); } S->setNoWrapFlags(Flags); return S; } const SCEV * ScalarEvolution::getOrCreateAddRecExpr(ArrayRef Ops, const Loop *L, SCEV::NoWrapFlags Flags) { FoldingSetNodeID ID; ID.AddInteger(scAddRecExpr); for (const SCEV *Op : Ops) ID.AddPointer(Op); ID.AddPointer(L); void *IP = nullptr; SCEVAddRecExpr *S = static_cast(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); if (!S) { const SCEV **O = SCEVAllocator.Allocate(Ops.size()); std::uninitialized_copy(Ops.begin(), Ops.end(), O); S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Ops.size(), L); UniqueSCEVs.InsertNode(S, IP); LoopUsers[L].push_back(S); registerUser(S, Ops); } setNoWrapFlags(S, Flags); return S; } const SCEV * ScalarEvolution::getOrCreateMulExpr(ArrayRef Ops, SCEV::NoWrapFlags Flags) { FoldingSetNodeID ID; ID.AddInteger(scMulExpr); for (const SCEV *Op : Ops) ID.AddPointer(Op); void *IP = nullptr; SCEVMulExpr *S = static_cast(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); if (!S) { const SCEV **O = SCEVAllocator.Allocate(Ops.size()); std::uninitialized_copy(Ops.begin(), Ops.end(), O); S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), O, Ops.size()); UniqueSCEVs.InsertNode(S, IP); registerUser(S, Ops); } S->setNoWrapFlags(Flags); return S; } static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { uint64_t k = i*j; if (j > 1 && k / j != i) Overflow = true; return k; } /// Compute the result of "n choose k", the binomial coefficient. If an /// intermediate computation overflows, Overflow will be set and the return will /// be garbage. Overflow is not cleared on absence of overflow. static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) { // We use the multiplicative formula: // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 . // At each iteration, we take the n-th term of the numeral and divide by the // (k-n)th term of the denominator. This division will always produce an // integral result, and helps reduce the chance of overflow in the // intermediate computations. However, we can still overflow even when the // final result would fit. if (n == 0 || n == k) return 1; if (k > n) return 0; if (k > n/2) k = n-k; uint64_t r = 1; for (uint64_t i = 1; i <= k; ++i) { r = umul_ov(r, n-(i-1), Overflow); r /= i; } return r; } /// Determine if any of the operands in this SCEV are a constant or if /// any of the add or multiply expressions in this SCEV contain a constant. static bool containsConstantInAddMulChain(const SCEV *StartExpr) { struct FindConstantInAddMulChain { bool FoundConstant = false; bool follow(const SCEV *S) { FoundConstant |= isa(S); return isa(S) || isa(S); } bool isDone() const { return FoundConstant; } }; FindConstantInAddMulChain F; SCEVTraversal ST(F); ST.visitAll(StartExpr); return F.FoundConstant; } /// Get a canonical multiply expression, or something simpler if possible. const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl &Ops, SCEV::NoWrapFlags OrigFlags, unsigned Depth) { assert(OrigFlags == maskFlags(OrigFlags, SCEV::FlagNUW | SCEV::FlagNSW) && "only nuw or nsw allowed"); assert(!Ops.empty() && "Cannot get empty mul!"); if (Ops.size() == 1) return Ops[0]; #ifndef NDEBUG Type *ETy = Ops[0]->getType(); assert(!ETy->isPointerTy()); for (unsigned i = 1, e = Ops.size(); i != e; ++i) assert(Ops[i]->getType() == ETy && "SCEVMulExpr operand types don't match!"); #endif // Sort by complexity, this groups all similar expression types together. GroupByComplexity(Ops, &LI, DT); // If there are any constants, fold them together. unsigned Idx = 0; if (const SCEVConstant *LHSC = dyn_cast(Ops[0])) { ++Idx; assert(Idx < Ops.size()); while (const SCEVConstant *RHSC = dyn_cast(Ops[Idx])) { // We found two constants, fold them together! Ops[0] = getConstant(LHSC->getAPInt() * RHSC->getAPInt()); if (Ops.size() == 2) return Ops[0]; Ops.erase(Ops.begin()+1); // Erase the folded element LHSC = cast(Ops[0]); } // If we have a multiply of zero, it will always be zero. if (LHSC->getValue()->isZero()) return LHSC; // If we are left with a constant one being multiplied, strip it off. if (LHSC->getValue()->isOne()) { Ops.erase(Ops.begin()); --Idx; } if (Ops.size() == 1) return Ops[0]; } // Delay expensive flag strengthening until necessary. auto ComputeFlags = [this, OrigFlags](const ArrayRef Ops) { return StrengthenNoWrapFlags(this, scMulExpr, Ops, OrigFlags); }; // Limit recursion calls depth. if (Depth > MaxArithDepth || hasHugeExpression(Ops)) return getOrCreateMulExpr(Ops, ComputeFlags(Ops)); if (SCEV *S = findExistingSCEVInCache(scMulExpr, Ops)) { // Don't strengthen flags if we have no new information. SCEVMulExpr *Mul = static_cast(S); if (Mul->getNoWrapFlags(OrigFlags) != OrigFlags) Mul->setNoWrapFlags(ComputeFlags(Ops)); return S; } if (const SCEVConstant *LHSC = dyn_cast(Ops[0])) { if (Ops.size() == 2) { // C1*(C2+V) -> C1*C2 + C1*V if (const SCEVAddExpr *Add = dyn_cast(Ops[1])) // If any of Add's ops are Adds or Muls with a constant, apply this // transformation as well. // // TODO: There are some cases where this transformation is not // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of // this transformation should be narrowed down. if (Add->getNumOperands() == 2 && containsConstantInAddMulChain(Add)) { const SCEV *LHS = getMulExpr(LHSC, Add->getOperand(0), SCEV::FlagAnyWrap, Depth + 1); const SCEV *RHS = getMulExpr(LHSC, Add->getOperand(1), SCEV::FlagAnyWrap, Depth + 1); return getAddExpr(LHS, RHS, SCEV::FlagAnyWrap, Depth + 1); } if (Ops[0]->isAllOnesValue()) { // If we have a mul by -1 of an add, try distributing the -1 among the // add operands. if (const SCEVAddExpr *Add = dyn_cast(Ops[1])) { SmallVector NewOps; bool AnyFolded = false; for (const SCEV *AddOp : Add->operands()) { const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap, Depth + 1); if (!isa(Mul)) AnyFolded = true; NewOps.push_back(Mul); } if (AnyFolded) return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1); } else if (const auto *AddRec = dyn_cast(Ops[1])) { // Negation preserves a recurrence's no self-wrap property. SmallVector Operands; for (const SCEV *AddRecOp : AddRec->operands()) Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap, Depth + 1)); return getAddRecExpr(Operands, AddRec->getLoop(), AddRec->getNoWrapFlags(SCEV::FlagNW)); } } } } // Skip over the add expression until we get to a multiply. while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) ++Idx; // If there are mul operands inline them all into this expression. if (Idx < Ops.size()) { bool DeletedMul = false; while (const SCEVMulExpr *Mul = dyn_cast(Ops[Idx])) { if (Ops.size() > MulOpsInlineThreshold) break; // If we have an mul, expand the mul operands onto the end of the // operands list. Ops.erase(Ops.begin()+Idx); Ops.append(Mul->op_begin(), Mul->op_end()); DeletedMul = true; } // If we deleted at least one mul, we added operands to the end of the // list, and they are not necessarily sorted. Recurse to resort and // resimplify any operands we just acquired. if (DeletedMul) return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); } // If there are any add recurrences in the operands list, see if any other // added values are loop invariant. If so, we can fold them into the // recurrence. while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) ++Idx; // Scan over all recurrences, trying to fold loop invariants into them. for (; Idx < Ops.size() && isa(Ops[Idx]); ++Idx) { // Scan all of the other operands to this mul and add them to the vector // if they are loop invariant w.r.t. the recurrence. SmallVector LIOps; const SCEVAddRecExpr *AddRec = cast(Ops[Idx]); const Loop *AddRecLoop = AddRec->getLoop(); for (unsigned i = 0, e = Ops.size(); i != e; ++i) if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { LIOps.push_back(Ops[i]); Ops.erase(Ops.begin()+i); --i; --e; } // If we found some loop invariants, fold them into the recurrence. if (!LIOps.empty()) { // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} SmallVector NewOps; NewOps.reserve(AddRec->getNumOperands()); const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1); for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i), SCEV::FlagAnyWrap, Depth + 1)); // Build the new addrec. Propagate the NUW and NSW flags if both the // outer mul and the inner addrec are guaranteed to have no overflow. // // No self-wrap cannot be guaranteed after changing the step size, but // will be inferred if either NUW or NSW is true. SCEV::NoWrapFlags Flags = ComputeFlags({Scale, AddRec}); const SCEV *NewRec = getAddRecExpr( NewOps, AddRecLoop, AddRec->getNoWrapFlags(Flags)); // If all of the other operands were loop invariant, we are done. if (Ops.size() == 1) return NewRec; // Otherwise, multiply the folded AddRec by the non-invariant parts. for (unsigned i = 0;; ++i) if (Ops[i] == AddRec) { Ops[i] = NewRec; break; } return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); } // Okay, if there weren't any loop invariants to be folded, check to see // if there are multiple AddRec's with the same loop induction variable // being multiplied together. If so, we can fold them. // {A1,+,A2,+,...,+,An} * {B1,+,B2,+,...,+,Bn} // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z // ]]],+,...up to x=2n}. // Note that the arguments to choose() are always integers with values // known at compile time, never SCEV objects. // // The implementation avoids pointless extra computations when the two // addrec's are of different length (mathematically, it's equivalent to // an infinite stream of zeros on the right). bool OpsModified = false; for (unsigned OtherIdx = Idx+1; OtherIdx != Ops.size() && isa(Ops[OtherIdx]); ++OtherIdx) { const SCEVAddRecExpr *OtherAddRec = dyn_cast(Ops[OtherIdx]); if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop) continue; // Limit max number of arguments to avoid creation of unreasonably big // SCEVAddRecs with very complex operands. if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 > MaxAddRecSize || hasHugeExpression({AddRec, OtherAddRec})) continue; bool Overflow = false; Type *Ty = AddRec->getType(); bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64; SmallVector AddRecOps; for (int x = 0, xe = AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) { SmallVector SumOps; for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) { uint64_t Coeff1 = Choose(x, 2*x - y, Overflow); for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1), ze = std::min(x+1, (int)OtherAddRec->getNumOperands()); z < ze && !Overflow; ++z) { uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow); uint64_t Coeff; if (LargerThan64Bits) Coeff = umul_ov(Coeff1, Coeff2, Overflow); else Coeff = Coeff1*Coeff2; const SCEV *CoeffTerm = getConstant(Ty, Coeff); const SCEV *Term1 = AddRec->getOperand(y-z); const SCEV *Term2 = OtherAddRec->getOperand(z); SumOps.push_back(getMulExpr(CoeffTerm, Term1, Term2, SCEV::FlagAnyWrap, Depth + 1)); } } if (SumOps.empty()) SumOps.push_back(getZero(Ty)); AddRecOps.push_back(getAddExpr(SumOps, SCEV::FlagAnyWrap, Depth + 1)); } if (!Overflow) { const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); if (Ops.size() == 2) return NewAddRec; Ops[Idx] = NewAddRec; Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; OpsModified = true; AddRec = dyn_cast(NewAddRec); if (!AddRec) break; } } if (OpsModified) return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); // Otherwise couldn't fold anything into this recurrence. Move onto the // next one. } // Okay, it looks like we really DO need an mul expr. Check to see if we // already have one, otherwise create a new one. return getOrCreateMulExpr(Ops, ComputeFlags(Ops)); } /// Represents an unsigned remainder expression based on unsigned division. const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS, const SCEV *RHS) { assert(getEffectiveSCEVType(LHS->getType()) == getEffectiveSCEVType(RHS->getType()) && "SCEVURemExpr operand types don't match!"); // Short-circuit easy cases if (const SCEVConstant *RHSC = dyn_cast(RHS)) { // If constant is one, the result is trivial if (RHSC->getValue()->isOne()) return getZero(LHS->getType()); // X urem 1 --> 0 // If constant is a power of two, fold into a zext(trunc(LHS)). if (RHSC->getAPInt().isPowerOf2()) { Type *FullTy = LHS->getType(); Type *TruncTy = IntegerType::get(getContext(), RHSC->getAPInt().logBase2()); return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy); } } // Fallback to %a == %x urem %y == %x - ((%x udiv %y) * %y) const SCEV *UDiv = getUDivExpr(LHS, RHS); const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW); return getMinusSCEV(LHS, Mult, SCEV::FlagNUW); } /// Get a canonical unsigned division expression, or something simpler if /// possible. const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, const SCEV *RHS) { assert(!LHS->getType()->isPointerTy() && "SCEVUDivExpr operand can't be pointer!"); assert(LHS->getType() == RHS->getType() && "SCEVUDivExpr operand types don't match!"); FoldingSetNodeID ID; ID.AddInteger(scUDivExpr); ID.AddPointer(LHS); ID.AddPointer(RHS); void *IP = nullptr; if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; // 0 udiv Y == 0 if (const SCEVConstant *LHSC = dyn_cast(LHS)) if (LHSC->getValue()->isZero()) return LHS; if (const SCEVConstant *RHSC = dyn_cast(RHS)) { if (RHSC->getValue()->isOne()) return LHS; // X udiv 1 --> x // If the denominator is zero, the result of the udiv is undefined. Don't // try to analyze it, because the resolution chosen here may differ from // the resolution chosen in other parts of the compiler. if (!RHSC->getValue()->isZero()) { // Determine if the division can be folded into the operands of // its operands. // TODO: Generalize this to non-constants by using known-bits information. Type *Ty = LHS->getType(); unsigned LZ = RHSC->getAPInt().countLeadingZeros(); unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; // For non-power-of-two values, effectively round the value up to the // nearest power of two. if (!RHSC->getAPInt().isPowerOf2()) ++MaxShiftAmt; IntegerType *ExtTy = IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); if (const SCEVAddRecExpr *AR = dyn_cast(LHS)) if (const SCEVConstant *Step = dyn_cast(AR->getStepRecurrence(*this))) { // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. const APInt &StepInt = Step->getAPInt(); const APInt &DivInt = RHSC->getAPInt(); if (!StepInt.urem(DivInt) && getZeroExtendExpr(AR, ExtTy) == getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), getZeroExtendExpr(Step, ExtTy), AR->getLoop(), SCEV::FlagAnyWrap)) { SmallVector Operands; for (const SCEV *Op : AR->operands()) Operands.push_back(getUDivExpr(Op, RHS)); return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW); } /// Get a canonical UDivExpr for a recurrence. /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. // We can currently only fold X%N if X is constant. const SCEVConstant *StartC = dyn_cast(AR->getStart()); if (StartC && !DivInt.urem(StepInt) && getZeroExtendExpr(AR, ExtTy) == getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), getZeroExtendExpr(Step, ExtTy), AR->getLoop(), SCEV::FlagAnyWrap)) { const APInt &StartInt = StartC->getAPInt(); const APInt &StartRem = StartInt.urem(StepInt); if (StartRem != 0) { const SCEV *NewLHS = getAddRecExpr(getConstant(StartInt - StartRem), Step, AR->getLoop(), SCEV::FlagNW); if (LHS != NewLHS) { LHS = NewLHS; // Reset the ID to include the new LHS, and check if it is // already cached. ID.clear(); ID.AddInteger(scUDivExpr); ID.AddPointer(LHS); ID.AddPointer(RHS); IP = nullptr; if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; } } } } // (A*B)/C --> A*(B/C) if safe and B/C can be folded. if (const SCEVMulExpr *M = dyn_cast(LHS)) { SmallVector Operands; for (const SCEV *Op : M->operands()) Operands.push_back(getZeroExtendExpr(Op, ExtTy)); if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) // Find an operand that's safely divisible. for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { const SCEV *Op = M->getOperand(i); const SCEV *Div = getUDivExpr(Op, RHSC); if (!isa(Div) && getMulExpr(Div, RHSC) == Op) { Operands = SmallVector(M->operands()); Operands[i] = Div; return getMulExpr(Operands); } } } // (A/B)/C --> A/(B*C) if safe and B*C can be folded. if (const SCEVUDivExpr *OtherDiv = dyn_cast(LHS)) { if (auto *DivisorConstant = dyn_cast(OtherDiv->getRHS())) { bool Overflow = false; APInt NewRHS = DivisorConstant->getAPInt().umul_ov(RHSC->getAPInt(), Overflow); if (Overflow) { return getConstant(RHSC->getType(), 0, false); } return getUDivExpr(OtherDiv->getLHS(), getConstant(NewRHS)); } } // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. if (const SCEVAddExpr *A = dyn_cast(LHS)) { SmallVector Operands; for (const SCEV *Op : A->operands()) Operands.push_back(getZeroExtendExpr(Op, ExtTy)); if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { Operands.clear(); for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); if (isa(Op) || getMulExpr(Op, RHS) != A->getOperand(i)) break; Operands.push_back(Op); } if (Operands.size() == A->getNumOperands()) return getAddExpr(Operands); } } // Fold if both operands are constant. if (const SCEVConstant *LHSC = dyn_cast(LHS)) return getConstant(LHSC->getAPInt().udiv(RHSC->getAPInt())); } } // The Insertion Point (IP) might be invalid by now (due to UniqueSCEVs // changes). Make sure we get a new one. IP = nullptr; if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), LHS, RHS); UniqueSCEVs.InsertNode(S, IP); registerUser(S, {LHS, RHS}); return S; } APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) { APInt A = C1->getAPInt().abs(); APInt B = C2->getAPInt().abs(); uint32_t ABW = A.getBitWidth(); uint32_t BBW = B.getBitWidth(); if (ABW > BBW) B = B.zext(ABW); else if (ABW < BBW) A = A.zext(BBW); return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B)); } /// Get a canonical unsigned division expression, or something simpler if /// possible. There is no representation for an exact udiv in SCEV IR, but we /// can attempt to remove factors from the LHS and RHS. We can't do this when /// it's not exact because the udiv may be clearing bits. const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS, const SCEV *RHS) { // TODO: we could try to find factors in all sorts of things, but for now we // just deal with u/exact (multiply, constant). See SCEVDivision towards the // end of this file for inspiration. const SCEVMulExpr *Mul = dyn_cast(LHS); if (!Mul || !Mul->hasNoUnsignedWrap()) return getUDivExpr(LHS, RHS); if (const SCEVConstant *RHSCst = dyn_cast(RHS)) { // If the mulexpr multiplies by a constant, then that constant must be the // first element of the mulexpr. if (const auto *LHSCst = dyn_cast(Mul->getOperand(0))) { if (LHSCst == RHSCst) { SmallVector Operands(drop_begin(Mul->operands())); return getMulExpr(Operands); } // We can't just assume that LHSCst divides RHSCst cleanly, it could be // that there's a factor provided by one of the other terms. We need to // check. APInt Factor = gcd(LHSCst, RHSCst); if (!Factor.isIntN(1)) { LHSCst = cast(getConstant(LHSCst->getAPInt().udiv(Factor))); RHSCst = cast(getConstant(RHSCst->getAPInt().udiv(Factor))); SmallVector Operands; Operands.push_back(LHSCst); Operands.append(Mul->op_begin() + 1, Mul->op_end()); LHS = getMulExpr(Operands); RHS = RHSCst; Mul = dyn_cast(LHS); if (!Mul) return getUDivExactExpr(LHS, RHS); } } } for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) { if (Mul->getOperand(i) == RHS) { SmallVector Operands; Operands.append(Mul->op_begin(), Mul->op_begin() + i); Operands.append(Mul->op_begin() + i + 1, Mul->op_end()); return getMulExpr(Operands); } } return getUDivExpr(LHS, RHS); } /// Get an add recurrence expression for the specified loop. Simplify the /// expression as much as possible. const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step, const Loop *L, SCEV::NoWrapFlags Flags) { SmallVector Operands; Operands.push_back(Start); if (const SCEVAddRecExpr *StepChrec = dyn_cast(Step)) if (StepChrec->getLoop() == L) { Operands.append(StepChrec->op_begin(), StepChrec->op_end()); return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); } Operands.push_back(Step); return getAddRecExpr(Operands, L, Flags); } /// Get an add recurrence expression for the specified loop. Simplify the /// expression as much as possible. const SCEV * ScalarEvolution::getAddRecExpr(SmallVectorImpl &Operands, const Loop *L, SCEV::NoWrapFlags Flags) { if (Operands.size() == 1) return Operands[0]; #ifndef NDEBUG Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); for (unsigned i = 1, e = Operands.size(); i != e; ++i) { assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy && "SCEVAddRecExpr operand types don't match!"); assert(!Operands[i]->getType()->isPointerTy() && "Step must be integer"); } for (unsigned i = 0, e = Operands.size(); i != e; ++i) assert(isLoopInvariant(Operands[i], L) && "SCEVAddRecExpr operand is not loop-invariant!"); #endif if (Operands.back()->isZero()) { Operands.pop_back(); return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X } // It's tempting to want to call getConstantMaxBackedgeTakenCount count here and // use that information to infer NUW and NSW flags. However, computing a // BE count requires calling getAddRecExpr, so we may not yet have a // meaningful BE count at this point (and if we don't, we'd be stuck // with a SCEVCouldNotCompute as the cached BE count). Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); // Canonicalize nested AddRecs in by nesting them in order of loop depth. if (const SCEVAddRecExpr *NestedAR = dyn_cast(Operands[0])) { const Loop *NestedLoop = NestedAR->getLoop(); if (L->contains(NestedLoop) ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) : (!NestedLoop->contains(L) && DT.dominates(L->getHeader(), NestedLoop->getHeader()))) { SmallVector NestedOperands(NestedAR->operands()); Operands[0] = NestedAR->getStart(); // AddRecs require their operands be loop-invariant with respect to their // loops. Don't perform this transformation if it would break this // requirement. bool AllInvariant = all_of( Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); }); if (AllInvariant) { // Create a recurrence for the outer loop with the same step size. // // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the // inner recurrence has the same property. SCEV::NoWrapFlags OuterFlags = maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags()); NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags); AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) { return isLoopInvariant(Op, NestedLoop); }); if (AllInvariant) { // Ok, both add recurrences are valid after the transformation. // // The inner recurrence keeps its NW flag but only keeps NUW/NSW if // the outer recurrence has the same property. SCEV::NoWrapFlags InnerFlags = maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags); return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags); } } // Reset Operands to its original state. Operands[0] = NestedAR; } } // Okay, it looks like we really DO need an addrec expr. Check to see if we // already have one, otherwise create a new one. return getOrCreateAddRecExpr(Operands, L, Flags); } const SCEV * ScalarEvolution::getGEPExpr(GEPOperator *GEP, const SmallVectorImpl &IndexExprs) { const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand()); // getSCEV(Base)->getType() has the same address space as Base->getType() // because SCEV::getType() preserves the address space. Type *IntIdxTy = getEffectiveSCEVType(BaseExpr->getType()); const bool AssumeInBoundsFlags = [&]() { if (!GEP->isInBounds()) return false; // We'd like to propagate flags from the IR to the corresponding SCEV nodes, // but to do that, we have to ensure that said flag is valid in the entire // defined scope of the SCEV. auto *GEPI = dyn_cast(GEP); // TODO: non-instructions have global scope. We might be able to prove // some global scope cases return GEPI && isSCEVExprNeverPoison(GEPI); }(); SCEV::NoWrapFlags OffsetWrap = AssumeInBoundsFlags ? SCEV::FlagNSW : SCEV::FlagAnyWrap; Type *CurTy = GEP->getType(); bool FirstIter = true; SmallVector Offsets; for (const SCEV *IndexExpr : IndexExprs) { // Compute the (potentially symbolic) offset in bytes for this index. if (StructType *STy = dyn_cast(CurTy)) { // For a struct, add the member offset. ConstantInt *Index = cast(IndexExpr)->getValue(); unsigned FieldNo = Index->getZExtValue(); const SCEV *FieldOffset = getOffsetOfExpr(IntIdxTy, STy, FieldNo); Offsets.push_back(FieldOffset); // Update CurTy to the type of the field at Index. CurTy = STy->getTypeAtIndex(Index); } else { // Update CurTy to its element type. if (FirstIter) { assert(isa(CurTy) && "The first index of a GEP indexes a pointer"); CurTy = GEP->getSourceElementType(); FirstIter = false; } else { CurTy = GetElementPtrInst::getTypeAtIndex(CurTy, (uint64_t)0); } // For an array, add the element offset, explicitly scaled. const SCEV *ElementSize = getSizeOfExpr(IntIdxTy, CurTy); // Getelementptr indices are signed. IndexExpr = getTruncateOrSignExtend(IndexExpr, IntIdxTy); // Multiply the index by the element size to compute the element offset. const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, OffsetWrap); Offsets.push_back(LocalOffset); } } // Handle degenerate case of GEP without offsets. if (Offsets.empty()) return BaseExpr; // Add the offsets together, assuming nsw if inbounds. const SCEV *Offset = getAddExpr(Offsets, OffsetWrap); // Add the base address and the offset. We cannot use the nsw flag, as the // base address is unsigned. However, if we know that the offset is // non-negative, we can use nuw. SCEV::NoWrapFlags BaseWrap = AssumeInBoundsFlags && isKnownNonNegative(Offset) ? SCEV::FlagNUW : SCEV::FlagAnyWrap; auto *GEPExpr = getAddExpr(BaseExpr, Offset, BaseWrap); assert(BaseExpr->getType() == GEPExpr->getType() && "GEP should not change type mid-flight."); return GEPExpr; } SCEV *ScalarEvolution::findExistingSCEVInCache(SCEVTypes SCEVType, ArrayRef Ops) { FoldingSetNodeID ID; ID.AddInteger(SCEVType); for (const SCEV *Op : Ops) ID.AddPointer(Op); void *IP = nullptr; return UniqueSCEVs.FindNodeOrInsertPos(ID, IP); } const SCEV *ScalarEvolution::getAbsExpr(const SCEV *Op, bool IsNSW) { SCEV::NoWrapFlags Flags = IsNSW ? SCEV::FlagNSW : SCEV::FlagAnyWrap; return getSMaxExpr(Op, getNegativeSCEV(Op, Flags)); } const SCEV *ScalarEvolution::getMinMaxExpr(SCEVTypes Kind, SmallVectorImpl &Ops) { assert(SCEVMinMaxExpr::isMinMaxType(Kind) && "Not a SCEVMinMaxExpr!"); assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!"); if (Ops.size() == 1) return Ops[0]; #ifndef NDEBUG Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); for (unsigned i = 1, e = Ops.size(); i != e; ++i) { assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && "Operand types don't match!"); assert(Ops[0]->getType()->isPointerTy() == Ops[i]->getType()->isPointerTy() && "min/max should be consistently pointerish"); } #endif bool IsSigned = Kind == scSMaxExpr || Kind == scSMinExpr; bool IsMax = Kind == scSMaxExpr || Kind == scUMaxExpr; // Sort by complexity, this groups all similar expression types together. GroupByComplexity(Ops, &LI, DT); // Check if we have created the same expression before. if (const SCEV *S = findExistingSCEVInCache(Kind, Ops)) { return S; } // If there are any constants, fold them together. unsigned Idx = 0; if (const SCEVConstant *LHSC = dyn_cast(Ops[0])) { ++Idx; assert(Idx < Ops.size()); auto FoldOp = [&](const APInt &LHS, const APInt &RHS) { if (Kind == scSMaxExpr) return APIntOps::smax(LHS, RHS); else if (Kind == scSMinExpr) return APIntOps::smin(LHS, RHS); else if (Kind == scUMaxExpr) return APIntOps::umax(LHS, RHS); else if (Kind == scUMinExpr) return APIntOps::umin(LHS, RHS); llvm_unreachable("Unknown SCEV min/max opcode"); }; while (const SCEVConstant *RHSC = dyn_cast(Ops[Idx])) { // We found two constants, fold them together! ConstantInt *Fold = ConstantInt::get( getContext(), FoldOp(LHSC->getAPInt(), RHSC->getAPInt())); Ops[0] = getConstant(Fold); Ops.erase(Ops.begin()+1); // Erase the folded element if (Ops.size() == 1) return Ops[0]; LHSC = cast(Ops[0]); } bool IsMinV = LHSC->getValue()->isMinValue(IsSigned); bool IsMaxV = LHSC->getValue()->isMaxValue(IsSigned); if (IsMax ? IsMinV : IsMaxV) { // If we are left with a constant minimum(/maximum)-int, strip it off. Ops.erase(Ops.begin()); --Idx; } else if (IsMax ? IsMaxV : IsMinV) { // If we have a max(/min) with a constant maximum(/minimum)-int, // it will always be the extremum. return LHSC; } if (Ops.size() == 1) return Ops[0]; } // Find the first operation of the same kind while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < Kind) ++Idx; // Check to see if one of the operands is of the same kind. If so, expand its // operands onto our operand list, and recurse to simplify. if (Idx < Ops.size()) { bool DeletedAny = false; while (Ops[Idx]->getSCEVType() == Kind) { const SCEVMinMaxExpr *SMME = cast(Ops[Idx]); Ops.erase(Ops.begin()+Idx); Ops.append(SMME->op_begin(), SMME->op_end()); DeletedAny = true; } if (DeletedAny) return getMinMaxExpr(Kind, Ops); } // Okay, check to see if the same value occurs in the operand list twice. If // so, delete one. Since we sorted the list, these values are required to // be adjacent. llvm::CmpInst::Predicate GEPred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; llvm::CmpInst::Predicate LEPred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; llvm::CmpInst::Predicate FirstPred = IsMax ? GEPred : LEPred; llvm::CmpInst::Predicate SecondPred = IsMax ? LEPred : GEPred; for (unsigned i = 0, e = Ops.size() - 1; i != e; ++i) { if (Ops[i] == Ops[i + 1] || isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) { // X op Y op Y --> X op Y // X op Y --> X, if we know X, Y are ordered appropriately Ops.erase(Ops.begin() + i + 1, Ops.begin() + i + 2); --i; --e; } else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i], Ops[i + 1])) { // X op Y --> Y, if we know X, Y are ordered appropriately Ops.erase(Ops.begin() + i, Ops.begin() + i + 1); --i; --e; } } if (Ops.size() == 1) return Ops[0]; assert(!Ops.empty() && "Reduced smax down to nothing!"); // Okay, it looks like we really DO need an expr. Check to see if we // already have one, otherwise create a new one. FoldingSetNodeID ID; ID.AddInteger(Kind); for (unsigned i = 0, e = Ops.size(); i != e; ++i) ID.AddPointer(Ops[i]); void *IP = nullptr; const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(ID, IP); if (ExistingSCEV) return ExistingSCEV; const SCEV **O = SCEVAllocator.Allocate(Ops.size()); std::uninitialized_copy(Ops.begin(), Ops.end(), O); SCEV *S = new (SCEVAllocator) SCEVMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size()); UniqueSCEVs.InsertNode(S, IP); registerUser(S, Ops); return S; } namespace { class SCEVSequentialMinMaxDeduplicatingVisitor final : public SCEVVisitor> { using RetVal = Optional; using Base = SCEVVisitor; ScalarEvolution &SE; const SCEVTypes RootKind; // Must be a sequential min/max expression. const SCEVTypes NonSequentialRootKind; // Non-sequential variant of RootKind. SmallPtrSet SeenOps; bool canRecurseInto(SCEVTypes Kind) const { // We can only recurse into the SCEV expression of the same effective type // as the type of our root SCEV expression. return RootKind == Kind || NonSequentialRootKind == Kind; }; RetVal visitAnyMinMaxExpr(const SCEV *S) { assert((isa(S) || isa(S)) && "Only for min/max expressions."); SCEVTypes Kind = S->getSCEVType(); if (!canRecurseInto(Kind)) return S; auto *NAry = cast(S); SmallVector NewOps; bool Changed = visit(Kind, makeArrayRef(NAry->op_begin(), NAry->op_end()), NewOps); if (!Changed) return S; if (NewOps.empty()) return None; return isa(S) ? SE.getSequentialMinMaxExpr(Kind, NewOps) : SE.getMinMaxExpr(Kind, NewOps); } RetVal visit(const SCEV *S) { // Has the whole operand been seen already? if (!SeenOps.insert(S).second) return None; return Base::visit(S); } public: SCEVSequentialMinMaxDeduplicatingVisitor(ScalarEvolution &SE, SCEVTypes RootKind) : SE(SE), RootKind(RootKind), NonSequentialRootKind( SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType( RootKind)) {} bool /*Changed*/ visit(SCEVTypes Kind, ArrayRef OrigOps, SmallVectorImpl &NewOps) { bool Changed = false; SmallVector Ops; Ops.reserve(OrigOps.size()); for (const SCEV *Op : OrigOps) { RetVal NewOp = visit(Op); if (NewOp != Op) Changed = true; if (NewOp) Ops.emplace_back(*NewOp); } if (Changed) NewOps = std::move(Ops); return Changed; } RetVal visitConstant(const SCEVConstant *Constant) { return Constant; } RetVal visitPtrToIntExpr(const SCEVPtrToIntExpr *Expr) { return Expr; } RetVal visitTruncateExpr(const SCEVTruncateExpr *Expr) { return Expr; } RetVal visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { return Expr; } RetVal visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { return Expr; } RetVal visitAddExpr(const SCEVAddExpr *Expr) { return Expr; } RetVal visitMulExpr(const SCEVMulExpr *Expr) { return Expr; } RetVal visitUDivExpr(const SCEVUDivExpr *Expr) { return Expr; } RetVal visitAddRecExpr(const SCEVAddRecExpr *Expr) { return Expr; } RetVal visitSMaxExpr(const SCEVSMaxExpr *Expr) { return visitAnyMinMaxExpr(Expr); } RetVal visitUMaxExpr(const SCEVUMaxExpr *Expr) { return visitAnyMinMaxExpr(Expr); } RetVal visitSMinExpr(const SCEVSMinExpr *Expr) { return visitAnyMinMaxExpr(Expr); } RetVal visitUMinExpr(const SCEVUMinExpr *Expr) { return visitAnyMinMaxExpr(Expr); } RetVal visitSequentialUMinExpr(const SCEVSequentialUMinExpr *Expr) { return visitAnyMinMaxExpr(Expr); } RetVal visitUnknown(const SCEVUnknown *Expr) { return Expr; } RetVal visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { return Expr; } }; } // namespace /// Return true if V is poison given that AssumedPoison is already poison. static bool impliesPoison(const SCEV *AssumedPoison, const SCEV *S) { // The only way poison may be introduced in a SCEV expression is from a // poison SCEVUnknown (ConstantExprs are also represented as SCEVUnknown, // not SCEVConstant). Notably, nowrap flags in SCEV nodes can *not* // introduce poison -- they encode guaranteed, non-speculated knowledge. // // Additionally, all SCEV nodes propagate poison from inputs to outputs, // with the notable exception of umin_seq, where only poison from the first // operand is (unconditionally) propagated. struct SCEVPoisonCollector { bool LookThroughSeq; SmallPtrSet MaybePoison; SCEVPoisonCollector(bool LookThroughSeq) : LookThroughSeq(LookThroughSeq) {} bool follow(const SCEV *S) { // TODO: We can always follow the first operand, but the SCEVTraversal // API doesn't support this. if (!LookThroughSeq && isa(S)) return false; if (auto *SU = dyn_cast(S)) { if (!isGuaranteedNotToBePoison(SU->getValue())) MaybePoison.insert(S); } return true; } bool isDone() const { return false; } }; // First collect all SCEVs that might result in AssumedPoison to be poison. // We need to look through umin_seq here, because we want to find all SCEVs // that *might* result in poison, not only those that are *required* to. SCEVPoisonCollector PC1(/* LookThroughSeq */ true); visitAll(AssumedPoison, PC1); // AssumedPoison is never poison. As the assumption is false, the implication // is true. Don't bother walking the other SCEV in this case. if (PC1.MaybePoison.empty()) return true; // Collect all SCEVs in S that, if poison, *will* result in S being poison // as well. We cannot look through umin_seq here, as its argument only *may* // make the result poison. SCEVPoisonCollector PC2(/* LookThroughSeq */ false); visitAll(S, PC2); // Make sure that no matter which SCEV in PC1.MaybePoison is actually poison, // it will also make S poison by being part of PC2.MaybePoison. return all_of(PC1.MaybePoison, [&](const SCEV *S) { return PC2.MaybePoison.contains(S); }); } const SCEV * ScalarEvolution::getSequentialMinMaxExpr(SCEVTypes Kind, SmallVectorImpl &Ops) { assert(SCEVSequentialMinMaxExpr::isSequentialMinMaxType(Kind) && "Not a SCEVSequentialMinMaxExpr!"); assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!"); if (Ops.size() == 1) return Ops[0]; #ifndef NDEBUG Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); for (unsigned i = 1, e = Ops.size(); i != e; ++i) { assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && "Operand types don't match!"); assert(Ops[0]->getType()->isPointerTy() == Ops[i]->getType()->isPointerTy() && "min/max should be consistently pointerish"); } #endif // Note that SCEVSequentialMinMaxExpr is *NOT* commutative, // so we can *NOT* do any kind of sorting of the expressions! // Check if we have created the same expression before. if (const SCEV *S = findExistingSCEVInCache(Kind, Ops)) return S; // FIXME: there are *some* simplifications that we can do here. // Keep only the first instance of an operand. { SCEVSequentialMinMaxDeduplicatingVisitor Deduplicator(*this, Kind); bool Changed = Deduplicator.visit(Kind, Ops, Ops); if (Changed) return getSequentialMinMaxExpr(Kind, Ops); } // Check to see if one of the operands is of the same kind. If so, expand its // operands onto our operand list, and recurse to simplify. { unsigned Idx = 0; bool DeletedAny = false; while (Idx < Ops.size()) { if (Ops[Idx]->getSCEVType() != Kind) { ++Idx; continue; } const auto *SMME = cast(Ops[Idx]); Ops.erase(Ops.begin() + Idx); Ops.insert(Ops.begin() + Idx, SMME->op_begin(), SMME->op_end()); DeletedAny = true; } if (DeletedAny) return getSequentialMinMaxExpr(Kind, Ops); } const SCEV *SaturationPoint; ICmpInst::Predicate Pred; switch (Kind) { case scSequentialUMinExpr: SaturationPoint = getZero(Ops[0]->getType()); Pred = ICmpInst::ICMP_ULE; break; default: llvm_unreachable("Not a sequential min/max type."); } for (unsigned i = 1, e = Ops.size(); i != e; ++i) { // We can replace %x umin_seq %y with %x umin %y if either: // * %y being poison implies %x is also poison. // * %x cannot be the saturating value (e.g. zero for umin). if (::impliesPoison(Ops[i], Ops[i - 1]) || isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_NE, Ops[i - 1], SaturationPoint)) { SmallVector SeqOps = {Ops[i - 1], Ops[i]}; Ops[i - 1] = getMinMaxExpr( SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType(Kind), SeqOps); Ops.erase(Ops.begin() + i); return getSequentialMinMaxExpr(Kind, Ops); } // Fold %x umin_seq %y to %x if %x ule %y. // TODO: We might be able to prove the predicate for a later operand. if (isKnownViaNonRecursiveReasoning(Pred, Ops[i - 1], Ops[i])) { Ops.erase(Ops.begin() + i); return getSequentialMinMaxExpr(Kind, Ops); } } // Okay, it looks like we really DO need an expr. Check to see if we // already have one, otherwise create a new one. FoldingSetNodeID ID; ID.AddInteger(Kind); for (unsigned i = 0, e = Ops.size(); i != e; ++i) ID.AddPointer(Ops[i]); void *IP = nullptr; const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(ID, IP); if (ExistingSCEV) return ExistingSCEV; const SCEV **O = SCEVAllocator.Allocate(Ops.size()); std::uninitialized_copy(Ops.begin(), Ops.end(), O); SCEV *S = new (SCEVAllocator) SCEVSequentialMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size()); UniqueSCEVs.InsertNode(S, IP); registerUser(S, Ops); return S; } const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, const SCEV *RHS) { SmallVector Ops = {LHS, RHS}; return getSMaxExpr(Ops); } const SCEV *ScalarEvolution::getSMaxExpr(SmallVectorImpl &Ops) { return getMinMaxExpr(scSMaxExpr, Ops); } const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, const SCEV *RHS) { SmallVector Ops = {LHS, RHS}; return getUMaxExpr(Ops); } const SCEV *ScalarEvolution::getUMaxExpr(SmallVectorImpl &Ops) { return getMinMaxExpr(scUMaxExpr, Ops); } const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, const SCEV *RHS) { SmallVector Ops = { LHS, RHS }; return getSMinExpr(Ops); } const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl &Ops) { return getMinMaxExpr(scSMinExpr, Ops); } const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, const SCEV *RHS, bool Sequential) { SmallVector Ops = { LHS, RHS }; return getUMinExpr(Ops, Sequential); } const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl &Ops, bool Sequential) { return Sequential ? getSequentialMinMaxExpr(scSequentialUMinExpr, Ops) : getMinMaxExpr(scUMinExpr, Ops); } const SCEV * ScalarEvolution::getSizeOfScalableVectorExpr(Type *IntTy, ScalableVectorType *ScalableTy) { Constant *NullPtr = Constant::getNullValue(ScalableTy->getPointerTo()); Constant *One = ConstantInt::get(IntTy, 1); Constant *GEP = ConstantExpr::getGetElementPtr(ScalableTy, NullPtr, One); // Note that the expression we created is the final expression, we don't // want to simplify it any further Also, if we call a normal getSCEV(), // we'll end up in an endless recursion. So just create an SCEVUnknown. return getUnknown(ConstantExpr::getPtrToInt(GEP, IntTy)); } const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { if (auto *ScalableAllocTy = dyn_cast(AllocTy)) return getSizeOfScalableVectorExpr(IntTy, ScalableAllocTy); // We can bypass creating a target-independent constant expression and then // folding it back into a ConstantInt. This is just a compile-time // optimization. return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy)); } const SCEV *ScalarEvolution::getStoreSizeOfExpr(Type *IntTy, Type *StoreTy) { if (auto *ScalableStoreTy = dyn_cast(StoreTy)) return getSizeOfScalableVectorExpr(IntTy, ScalableStoreTy); // We can bypass creating a target-independent constant expression and then // folding it back into a ConstantInt. This is just a compile-time // optimization. return getConstant(IntTy, getDataLayout().getTypeStoreSize(StoreTy)); } const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, StructType *STy, unsigned FieldNo) { // We can bypass creating a target-independent constant expression and then // folding it back into a ConstantInt. This is just a compile-time // optimization. return getConstant( IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo)); } const SCEV *ScalarEvolution::getUnknown(Value *V) { // Don't attempt to do anything other than create a SCEVUnknown object // here. createSCEV only calls getUnknown after checking for all other // interesting possibilities, and any other code that calls getUnknown // is doing so in order to hide a value from SCEV canonicalization. FoldingSetNodeID ID; ID.AddInteger(scUnknown); ID.AddPointer(V); void *IP = nullptr; if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { assert(cast(S)->getValue() == V && "Stale SCEVUnknown in uniquing map!"); return S; } SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, FirstUnknown); FirstUnknown = cast(S); UniqueSCEVs.InsertNode(S, IP); return S; } //===----------------------------------------------------------------------===// // Basic SCEV Analysis and PHI Idiom Recognition Code // /// Test if values of the given type are analyzable within the SCEV /// framework. This primarily includes integer types, and it can optionally /// include pointer types if the ScalarEvolution class has access to /// target-specific information. bool ScalarEvolution::isSCEVable(Type *Ty) const { // Integers and pointers are always SCEVable. return Ty->isIntOrPtrTy(); } /// Return the size in bits of the specified type, for which isSCEVable must /// return true. uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { assert(isSCEVable(Ty) && "Type is not SCEVable!"); if (Ty->isPointerTy()) return getDataLayout().getIndexTypeSizeInBits(Ty); return getDataLayout().getTypeSizeInBits(Ty); } /// Return a type with the same bitwidth as the given type and which represents /// how SCEV will treat the given type, for which isSCEVable must return /// true. For pointer types, this is the pointer index sized integer type. Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { assert(isSCEVable(Ty) && "Type is not SCEVable!"); if (Ty->isIntegerTy()) return Ty; // The only other support type is pointer. assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); return getDataLayout().getIndexType(Ty); } Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const { return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2; } bool ScalarEvolution::instructionCouldExistWitthOperands(const SCEV *A, const SCEV *B) { /// For a valid use point to exist, the defining scope of one operand /// must dominate the other. bool PreciseA, PreciseB; auto *ScopeA = getDefiningScopeBound({A}, PreciseA); auto *ScopeB = getDefiningScopeBound({B}, PreciseB); if (!PreciseA || !PreciseB) // Can't tell. return false; return (ScopeA == ScopeB) || DT.dominates(ScopeA, ScopeB) || DT.dominates(ScopeB, ScopeA); } const SCEV *ScalarEvolution::getCouldNotCompute() { return CouldNotCompute.get(); } bool ScalarEvolution::checkValidity(const SCEV *S) const { bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) { auto *SU = dyn_cast(S); return SU && SU->getValue() == nullptr; }); return !ContainsNulls; } bool ScalarEvolution::containsAddRecurrence(const SCEV *S) { HasRecMapType::iterator I = HasRecMap.find(S); if (I != HasRecMap.end()) return I->second; bool FoundAddRec = SCEVExprContains(S, [](const SCEV *S) { return isa(S); }); HasRecMap.insert({S, FoundAddRec}); return FoundAddRec; } /// Return the ValueOffsetPair set for \p S. \p S can be represented /// by the value and offset from any ValueOffsetPair in the set. ArrayRef ScalarEvolution::getSCEVValues(const SCEV *S) { ExprValueMapType::iterator SI = ExprValueMap.find_as(S); if (SI == ExprValueMap.end()) return None; #ifndef NDEBUG if (VerifySCEVMap) { // Check there is no dangling Value in the set returned. for (Value *V : SI->second) assert(ValueExprMap.count(V)); } #endif return SI->second.getArrayRef(); } /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V) /// cannot be used separately. eraseValueFromMap should be used to remove /// V from ValueExprMap and ExprValueMap at the same time. void ScalarEvolution::eraseValueFromMap(Value *V) { ValueExprMapType::iterator I = ValueExprMap.find_as(V); if (I != ValueExprMap.end()) { auto EVIt = ExprValueMap.find(I->second); bool Removed = EVIt->second.remove(V); (void) Removed; assert(Removed && "Value not in ExprValueMap?"); ValueExprMap.erase(I); } } void ScalarEvolution::insertValueToMap(Value *V, const SCEV *S) { // A recursive query may have already computed the SCEV. It should be // equivalent, but may not necessarily be exactly the same, e.g. due to lazily // inferred nowrap flags. auto It = ValueExprMap.find_as(V); if (It == ValueExprMap.end()) { ValueExprMap.insert({SCEVCallbackVH(V, this), S}); ExprValueMap[S].insert(V); } } /// Return an existing SCEV if it exists, otherwise analyze the expression and /// create a new one. const SCEV *ScalarEvolution::getSCEV(Value *V) { assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); if (const SCEV *S = getExistingSCEV(V)) return S; return createSCEVIter(V); } const SCEV *ScalarEvolution::getExistingSCEV(Value *V) { assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); ValueExprMapType::iterator I = ValueExprMap.find_as(V); if (I != ValueExprMap.end()) { const SCEV *S = I->second; assert(checkValidity(S) && "existing SCEV has not been properly invalidated"); return S; } return nullptr; } /// Return a SCEV corresponding to -V = -1*V const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags) { if (const SCEVConstant *VC = dyn_cast(V)) return getConstant( cast(ConstantExpr::getNeg(VC->getValue()))); Type *Ty = V->getType(); Ty = getEffectiveSCEVType(Ty); return getMulExpr(V, getMinusOne(Ty), Flags); } /// If Expr computes ~A, return A else return nullptr static const SCEV *MatchNotExpr(const SCEV *Expr) { const SCEVAddExpr *Add = dyn_cast(Expr); if (!Add || Add->getNumOperands() != 2 || !Add->getOperand(0)->isAllOnesValue()) return nullptr; const SCEVMulExpr *AddRHS = dyn_cast(Add->getOperand(1)); if (!AddRHS || AddRHS->getNumOperands() != 2 || !AddRHS->getOperand(0)->isAllOnesValue()) return nullptr; return AddRHS->getOperand(1); } /// Return a SCEV corresponding to ~V = -1-V const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { assert(!V->getType()->isPointerTy() && "Can't negate pointer"); if (const SCEVConstant *VC = dyn_cast(V)) return getConstant( cast(ConstantExpr::getNot(VC->getValue()))); // Fold ~(u|s)(min|max)(~x, ~y) to (u|s)(max|min)(x, y) if (const SCEVMinMaxExpr *MME = dyn_cast(V)) { auto MatchMinMaxNegation = [&](const SCEVMinMaxExpr *MME) { SmallVector MatchedOperands; for (const SCEV *Operand : MME->operands()) { const SCEV *Matched = MatchNotExpr(Operand); if (!Matched) return (const SCEV *)nullptr; MatchedOperands.push_back(Matched); } return getMinMaxExpr(SCEVMinMaxExpr::negate(MME->getSCEVType()), MatchedOperands); }; if (const SCEV *Replaced = MatchMinMaxNegation(MME)) return Replaced; } Type *Ty = V->getType(); Ty = getEffectiveSCEVType(Ty); return getMinusSCEV(getMinusOne(Ty), V); } const SCEV *ScalarEvolution::removePointerBase(const SCEV *P) { assert(P->getType()->isPointerTy()); if (auto *AddRec = dyn_cast(P)) { // The base of an AddRec is the first operand. SmallVector Ops{AddRec->operands()}; Ops[0] = removePointerBase(Ops[0]); // Don't try to transfer nowrap flags for now. We could in some cases // (for example, if pointer operand of the AddRec is a SCEVUnknown). return getAddRecExpr(Ops, AddRec->getLoop(), SCEV::FlagAnyWrap); } if (auto *Add = dyn_cast(P)) { // The base of an Add is the pointer operand. SmallVector Ops{Add->operands()}; const SCEV **PtrOp = nullptr; for (const SCEV *&AddOp : Ops) { if (AddOp->getType()->isPointerTy()) { assert(!PtrOp && "Cannot have multiple pointer ops"); PtrOp = &AddOp; } } *PtrOp = removePointerBase(*PtrOp); // Don't try to transfer nowrap flags for now. We could in some cases // (for example, if the pointer operand of the Add is a SCEVUnknown). return getAddExpr(Ops); } // Any other expression must be a pointer base. return getZero(P->getType()); } const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags, unsigned Depth) { // Fast path: X - X --> 0. if (LHS == RHS) return getZero(LHS->getType()); // If we subtract two pointers with different pointer bases, bail. // Eventually, we're going to add an assertion to getMulExpr that we // can't multiply by a pointer. if (RHS->getType()->isPointerTy()) { if (!LHS->getType()->isPointerTy() || getPointerBase(LHS) != getPointerBase(RHS)) return getCouldNotCompute(); LHS = removePointerBase(LHS); RHS = removePointerBase(RHS); } // We represent LHS - RHS as LHS + (-1)*RHS. This transformation // makes it so that we cannot make much use of NUW. auto AddFlags = SCEV::FlagAnyWrap; const bool RHSIsNotMinSigned = !getSignedRangeMin(RHS).isMinSignedValue(); if (hasFlags(Flags, SCEV::FlagNSW)) { // Let M be the minimum representable signed value. Then (-1)*RHS // signed-wraps if and only if RHS is M. That can happen even for // a NSW subtraction because e.g. (-1)*M signed-wraps even though // -1 - M does not. So to transfer NSW from LHS - RHS to LHS + // (-1)*RHS, we need to prove that RHS != M. // // If LHS is non-negative and we know that LHS - RHS does not // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap // either by proving that RHS > M or that LHS >= 0. if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) { AddFlags = SCEV::FlagNSW; } } // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS - // RHS is NSW and LHS >= 0. // // The difficulty here is that the NSW flag may have been proven // relative to a loop that is to be found in a recurrence in LHS and // not in RHS. Applying NSW to (-1)*M may then let the NSW have a // larger scope than intended. auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap; return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth); } const SCEV *ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty, unsigned Depth) { Type *SrcTy = V->getType(); assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && "Cannot truncate or zero extend with non-integer arguments!"); if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) return V; // No conversion if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) return getTruncateExpr(V, Ty, Depth); return getZeroExtendExpr(V, Ty, Depth); } const SCEV *ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth) { Type *SrcTy = V->getType(); assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && "Cannot truncate or zero extend with non-integer arguments!"); if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) return V; // No conversion if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) return getTruncateExpr(V, Ty, Depth); return getSignExtendExpr(V, Ty, Depth); } const SCEV * ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) { Type *SrcTy = V->getType(); assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && "Cannot noop or zero extend with non-integer arguments!"); assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && "getNoopOrZeroExtend cannot truncate!"); if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) return V; // No conversion return getZeroExtendExpr(V, Ty); } const SCEV * ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) { Type *SrcTy = V->getType(); assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && "Cannot noop or sign extend with non-integer arguments!"); assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && "getNoopOrSignExtend cannot truncate!"); if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) return V; // No conversion return getSignExtendExpr(V, Ty); } const SCEV * ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) { Type *SrcTy = V->getType(); assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && "Cannot noop or any extend with non-integer arguments!"); assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && "getNoopOrAnyExtend cannot truncate!"); if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) return V; // No conversion return getAnyExtendExpr(V, Ty); } const SCEV * ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) { Type *SrcTy = V->getType(); assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && "Cannot truncate or noop with non-integer arguments!"); assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && "getTruncateOrNoop cannot extend!"); if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) return V; // No conversion return getTruncateExpr(V, Ty); } const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS) { const SCEV *PromotedLHS = LHS; const SCEV *PromotedRHS = RHS; if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); else PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); return getUMaxExpr(PromotedLHS, PromotedRHS); } const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS, bool Sequential) { SmallVector Ops = { LHS, RHS }; return getUMinFromMismatchedTypes(Ops, Sequential); } const SCEV * ScalarEvolution::getUMinFromMismatchedTypes(SmallVectorImpl &Ops, bool Sequential) { assert(!Ops.empty() && "At least one operand must be!"); // Trivial case. if (Ops.size() == 1) return Ops[0]; // Find the max type first. Type *MaxType = nullptr; for (const auto *S : Ops) if (MaxType) MaxType = getWiderType(MaxType, S->getType()); else MaxType = S->getType(); assert(MaxType && "Failed to find maximum type!"); // Extend all ops to max type. SmallVector PromotedOps; for (const auto *S : Ops) PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType)); // Generate umin. return getUMinExpr(PromotedOps, Sequential); } const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) { // A pointer operand may evaluate to a nonpointer expression, such as null. if (!V->getType()->isPointerTy()) return V; while (true) { if (auto *AddRec = dyn_cast(V)) { V = AddRec->getStart(); } else if (auto *Add = dyn_cast(V)) { const SCEV *PtrOp = nullptr; for (const SCEV *AddOp : Add->operands()) { if (AddOp->getType()->isPointerTy()) { assert(!PtrOp && "Cannot have multiple pointer ops"); PtrOp = AddOp; } } assert(PtrOp && "Must have pointer op"); V = PtrOp; } else // Not something we can look further into. return V; } } /// Push users of the given Instruction onto the given Worklist. static void PushDefUseChildren(Instruction *I, SmallVectorImpl &Worklist, SmallPtrSetImpl &Visited) { // Push the def-use children onto the Worklist stack. for (User *U : I->users()) { auto *UserInsn = cast(U); if (Visited.insert(UserInsn).second) Worklist.push_back(UserInsn); } } namespace { /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start /// expression in case its Loop is L. If it is not L then /// if IgnoreOtherLoops is true then use AddRec itself /// otherwise rewrite cannot be done. /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. class SCEVInitRewriter : public SCEVRewriteVisitor { public: static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, bool IgnoreOtherLoops = true) { SCEVInitRewriter Rewriter(L, SE); const SCEV *Result = Rewriter.visit(S); if (Rewriter.hasSeenLoopVariantSCEVUnknown()) return SE.getCouldNotCompute(); return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops ? SE.getCouldNotCompute() : Result; } const SCEV *visitUnknown(const SCEVUnknown *Expr) { if (!SE.isLoopInvariant(Expr, L)) SeenLoopVariantSCEVUnknown = true; return Expr; } const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { // Only re-write AddRecExprs for this loop. if (Expr->getLoop() == L) return Expr->getStart(); SeenOtherLoops = true; return Expr; } bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } bool hasSeenOtherLoops() { return SeenOtherLoops; } private: explicit SCEVInitRewriter(const Loop *L, ScalarEvolution &SE) : SCEVRewriteVisitor(SE), L(L) {} const Loop *L; bool SeenLoopVariantSCEVUnknown = false; bool SeenOtherLoops = false; }; /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post /// increment expression in case its Loop is L. If it is not L then /// use AddRec itself. /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. class SCEVPostIncRewriter : public SCEVRewriteVisitor { public: static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) { SCEVPostIncRewriter Rewriter(L, SE); const SCEV *Result = Rewriter.visit(S); return Rewriter.hasSeenLoopVariantSCEVUnknown() ? SE.getCouldNotCompute() : Result; } const SCEV *visitUnknown(const SCEVUnknown *Expr) { if (!SE.isLoopInvariant(Expr, L)) SeenLoopVariantSCEVUnknown = true; return Expr; } const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { // Only re-write AddRecExprs for this loop. if (Expr->getLoop() == L) return Expr->getPostIncExpr(SE); SeenOtherLoops = true; return Expr; } bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } bool hasSeenOtherLoops() { return SeenOtherLoops; } private: explicit SCEVPostIncRewriter(const Loop *L, ScalarEvolution &SE) : SCEVRewriteVisitor(SE), L(L) {} const Loop *L; bool SeenLoopVariantSCEVUnknown = false; bool SeenOtherLoops = false; }; /// This class evaluates the compare condition by matching it against the /// condition of loop latch. If there is a match we assume a true value /// for the condition while building SCEV nodes. class SCEVBackedgeConditionFolder : public SCEVRewriteVisitor { public: static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) { bool IsPosBECond = false; Value *BECond = nullptr; if (BasicBlock *Latch = L->getLoopLatch()) { BranchInst *BI = dyn_cast(Latch->getTerminator()); if (BI && BI->isConditional()) { assert(BI->getSuccessor(0) != BI->getSuccessor(1) && "Both outgoing branches should not target same header!"); BECond = BI->getCondition(); IsPosBECond = BI->getSuccessor(0) == L->getHeader(); } else { return S; } } SCEVBackedgeConditionFolder Rewriter(L, BECond, IsPosBECond, SE); return Rewriter.visit(S); } const SCEV *visitUnknown(const SCEVUnknown *Expr) { const SCEV *Result = Expr; bool InvariantF = SE.isLoopInvariant(Expr, L); if (!InvariantF) { Instruction *I = cast(Expr->getValue()); switch (I->getOpcode()) { case Instruction::Select: { SelectInst *SI = cast(I); Optional Res = compareWithBackedgeCondition(SI->getCondition()); if (Res) { bool IsOne = cast(Res.value())->getValue()->isOne(); Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue()); } break; } default: { Optional Res = compareWithBackedgeCondition(I); if (Res) Result = Res.value(); break; } } } return Result; } private: explicit SCEVBackedgeConditionFolder(const Loop *L, Value *BECond, bool IsPosBECond, ScalarEvolution &SE) : SCEVRewriteVisitor(SE), L(L), BackedgeCond(BECond), IsPositiveBECond(IsPosBECond) {} Optional compareWithBackedgeCondition(Value *IC); const Loop *L; /// Loop back condition. Value *BackedgeCond = nullptr; /// Set to true if loop back is on positive branch condition. bool IsPositiveBECond; }; Optional SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) { // If value matches the backedge condition for loop latch, // then return a constant evolution node based on loopback // branch taken. if (BackedgeCond == IC) return IsPositiveBECond ? SE.getOne(Type::getInt1Ty(SE.getContext())) : SE.getZero(Type::getInt1Ty(SE.getContext())); return None; } class SCEVShiftRewriter : public SCEVRewriteVisitor { public: static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) { SCEVShiftRewriter Rewriter(L, SE); const SCEV *Result = Rewriter.visit(S); return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); } const SCEV *visitUnknown(const SCEVUnknown *Expr) { // Only allow AddRecExprs for this loop. if (!SE.isLoopInvariant(Expr, L)) Valid = false; return Expr; } const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { if (Expr->getLoop() == L && Expr->isAffine()) return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE)); Valid = false; return Expr; } bool isValid() { return Valid; } private: explicit SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE) : SCEVRewriteVisitor(SE), L(L) {} const Loop *L; bool Valid = true; }; } // end anonymous namespace SCEV::NoWrapFlags ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) { if (!AR->isAffine()) return SCEV::FlagAnyWrap; using OBO = OverflowingBinaryOperator; SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap; if (!AR->hasNoSignedWrap()) { ConstantRange AddRecRange = getSignedRange(AR); ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this)); auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( Instruction::Add, IncRange, OBO::NoSignedWrap); if (NSWRegion.contains(AddRecRange)) Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW); } if (!AR->hasNoUnsignedWrap()) { ConstantRange AddRecRange = getUnsignedRange(AR); ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this)); auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( Instruction::Add, IncRange, OBO::NoUnsignedWrap); if (NUWRegion.contains(AddRecRange)) Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW); } return Result; } SCEV::NoWrapFlags ScalarEvolution::proveNoSignedWrapViaInduction(const SCEVAddRecExpr *AR) { SCEV::NoWrapFlags Result = AR->getNoWrapFlags(); if (AR->hasNoSignedWrap()) return Result; if (!AR->isAffine()) return Result; const SCEV *Step = AR->getStepRecurrence(*this); const Loop *L = AR->getLoop(); // Check whether the backedge-taken count is SCEVCouldNotCompute. // Note that this serves two purposes: It filters out loops that are // simply not analyzable, and it covers the case where this code is // being called from within backedge-taken count analysis, such that // attempting to ask for the backedge-taken count would likely result // in infinite recursion. In the later case, the analysis code will // cope with a conservative value, and it will take care to purge // that value once it has finished. const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); // Normally, in the cases we can prove no-overflow via a // backedge guarding condition, we can also compute a backedge // taken count for the loop. The exceptions are assumptions and // guards present in the loop -- SCEV is not great at exploiting // these to compute max backedge taken counts, but can still use // these to prove lack of overflow. Use this fact to avoid // doing extra work that may not pay off. if (isa(MaxBECount) && !HasGuards && AC.assumptions().empty()) return Result; // If the backedge is guarded by a comparison with the pre-inc value the // addrec is safe. Also, if the entry is guarded by a comparison with the // start value and the backedge is guarded by a comparison with the post-inc // value, the addrec is safe. ICmpInst::Predicate Pred; const SCEV *OverflowLimit = getSignedOverflowLimitForStep(Step, &Pred, this); if (OverflowLimit && (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || isKnownOnEveryIteration(Pred, AR, OverflowLimit))) { Result = setFlags(Result, SCEV::FlagNSW); } return Result; } SCEV::NoWrapFlags ScalarEvolution::proveNoUnsignedWrapViaInduction(const SCEVAddRecExpr *AR) { SCEV::NoWrapFlags Result = AR->getNoWrapFlags(); if (AR->hasNoUnsignedWrap()) return Result; if (!AR->isAffine()) return Result; const SCEV *Step = AR->getStepRecurrence(*this); unsigned BitWidth = getTypeSizeInBits(AR->getType()); const Loop *L = AR->getLoop(); // Check whether the backedge-taken count is SCEVCouldNotCompute. // Note that this serves two purposes: It filters out loops that are // simply not analyzable, and it covers the case where this code is // being called from within backedge-taken count analysis, such that // attempting to ask for the backedge-taken count would likely result // in infinite recursion. In the later case, the analysis code will // cope with a conservative value, and it will take care to purge // that value once it has finished. const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); // Normally, in the cases we can prove no-overflow via a // backedge guarding condition, we can also compute a backedge // taken count for the loop. The exceptions are assumptions and // guards present in the loop -- SCEV is not great at exploiting // these to compute max backedge taken counts, but can still use // these to prove lack of overflow. Use this fact to avoid // doing extra work that may not pay off. if (isa(MaxBECount) && !HasGuards && AC.assumptions().empty()) return Result; // If the backedge is guarded by a comparison with the pre-inc value the // addrec is safe. Also, if the entry is guarded by a comparison with the // start value and the backedge is guarded by a comparison with the post-inc // value, the addrec is safe. if (isKnownPositive(Step)) { const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - getUnsignedRangeMax(Step)); if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || isKnownOnEveryIteration(ICmpInst::ICMP_ULT, AR, N)) { Result = setFlags(Result, SCEV::FlagNUW); } } return Result; } namespace { /// Represents an abstract binary operation. This may exist as a /// normal instruction or constant expression, or may have been /// derived from an expression tree. struct BinaryOp { unsigned Opcode; Value *LHS; Value *RHS; bool IsNSW = false; bool IsNUW = false; /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or /// constant expression. Operator *Op = nullptr; explicit BinaryOp(Operator *Op) : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)), Op(Op) { if (auto *OBO = dyn_cast(Op)) { IsNSW = OBO->hasNoSignedWrap(); IsNUW = OBO->hasNoUnsignedWrap(); } } explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false, bool IsNUW = false) : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW) {} }; } // end anonymous namespace /// Try to map \p V into a BinaryOp, and return \c None on failure. static Optional MatchBinaryOp(Value *V, DominatorTree &DT) { auto *Op = dyn_cast(V); if (!Op) return None; // Implementation detail: all the cleverness here should happen without // creating new SCEV expressions -- our caller knowns tricks to avoid creating // SCEV expressions when possible, and we should not break that. switch (Op->getOpcode()) { case Instruction::Add: case Instruction::Sub: case Instruction::Mul: case Instruction::UDiv: case Instruction::URem: case Instruction::And: case Instruction::Or: case Instruction::AShr: case Instruction::Shl: return BinaryOp(Op); case Instruction::Xor: if (auto *RHSC = dyn_cast(Op->getOperand(1))) // If the RHS of the xor is a signmask, then this is just an add. // Instcombine turns add of signmask into xor as a strength reduction step. if (RHSC->getValue().isSignMask()) return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); // Binary `xor` is a bit-wise `add`. if (V->getType()->isIntegerTy(1)) return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); return BinaryOp(Op); case Instruction::LShr: // Turn logical shift right of a constant into a unsigned divide. if (ConstantInt *SA = dyn_cast(Op->getOperand(1))) { uint32_t BitWidth = cast(Op->getType())->getBitWidth(); // If the shift count is not less than the bitwidth, the result of // the shift is undefined. Don't try to analyze it, because the // resolution chosen here may differ from the resolution chosen in // other parts of the compiler. if (SA->getValue().ult(BitWidth)) { Constant *X = ConstantInt::get(SA->getContext(), APInt::getOneBitSet(BitWidth, SA->getZExtValue())); return BinaryOp(Instruction::UDiv, Op->getOperand(0), X); } } return BinaryOp(Op); case Instruction::ExtractValue: { auto *EVI = cast(Op); if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0) break; auto *WO = dyn_cast(EVI->getAggregateOperand()); if (!WO) break; Instruction::BinaryOps BinOp = WO->getBinaryOp(); bool Signed = WO->isSigned(); // TODO: Should add nuw/nsw flags for mul as well. if (BinOp == Instruction::Mul || !isOverflowIntrinsicNoWrap(WO, DT)) return BinaryOp(BinOp, WO->getLHS(), WO->getRHS()); // Now that we know that all uses of the arithmetic-result component of // CI are guarded by the overflow check, we can go ahead and pretend // that the arithmetic is non-overflowing. return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(), /* IsNSW = */ Signed, /* IsNUW = */ !Signed); } default: break; } // Recognise intrinsic loop.decrement.reg, and as this has exactly the same // semantics as a Sub, return a binary sub expression. if (auto *II = dyn_cast(V)) if (II->getIntrinsicID() == Intrinsic::loop_decrement_reg) return BinaryOp(Instruction::Sub, II->getOperand(0), II->getOperand(1)); return None; } /// Helper function to createAddRecFromPHIWithCasts. We have a phi /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the /// way. This function checks if \p Op, an operand of this SCEVAddExpr, /// follows one of the following patterns: /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) /// If the SCEV expression of \p Op conforms with one of the expected patterns /// we return the type of the truncation operation, and indicate whether the /// truncated type should be treated as signed/unsigned by setting /// \p Signed to true/false, respectively. static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, bool &Signed, ScalarEvolution &SE) { // The case where Op == SymbolicPHI (that is, with no type conversions on // the way) is handled by the regular add recurrence creating logic and // would have already been triggered in createAddRecForPHI. Reaching it here // means that createAddRecFromPHI had failed for this PHI before (e.g., // because one of the other operands of the SCEVAddExpr updating this PHI is // not invariant). // // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in // this case predicates that allow us to prove that Op == SymbolicPHI will // be added. if (Op == SymbolicPHI) return nullptr; unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType()); unsigned NewBits = SE.getTypeSizeInBits(Op->getType()); if (SourceBits != NewBits) return nullptr; const SCEVSignExtendExpr *SExt = dyn_cast(Op); const SCEVZeroExtendExpr *ZExt = dyn_cast(Op); if (!SExt && !ZExt) return nullptr; const SCEVTruncateExpr *Trunc = SExt ? dyn_cast(SExt->getOperand()) : dyn_cast(ZExt->getOperand()); if (!Trunc) return nullptr; const SCEV *X = Trunc->getOperand(); if (X != SymbolicPHI) return nullptr; Signed = SExt != nullptr; return Trunc->getType(); } static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) { if (!PN->getType()->isIntegerTy()) return nullptr; const Loop *L = LI.getLoopFor(PN->getParent()); if (!L || L->getHeader() != PN->getParent()) return nullptr; return L; } // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the // computation that updates the phi follows the following pattern: // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum // which correspond to a phi->trunc->sext/zext->add->phi update chain. // If so, try to see if it can be rewritten as an AddRecExpr under some // Predicates. If successful, return them as a pair. Also cache the results // of the analysis. // // Example usage scenario: // Say the Rewriter is called for the following SCEV: // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step) // where: // %X = phi i64 (%Start, %BEValue) // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X), // and call this function with %SymbolicPHI = %X. // // The analysis will find that the value coming around the backedge has // the following SCEV: // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step) // Upon concluding that this matches the desired pattern, the function // will return the pair {NewAddRec, SmallPredsVec} where: // NewAddRec = {%Start,+,%Step} // SmallPredsVec = {P1, P2, P3} as follows: // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)} Flags: // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64) // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64) // The returned pair means that SymbolicPHI can be rewritten into NewAddRec // under the predicates {P1,P2,P3}. // This predicated rewrite will be cached in PredicatedSCEVRewrites: // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)} // // TODO's: // // 1) Extend the Induction descriptor to also support inductions that involve // casts: When needed (namely, when we are called in the context of the // vectorizer induction analysis), a Set of cast instructions will be // populated by this method, and provided back to isInductionPHI. This is // needed to allow the vectorizer to properly record them to be ignored by // the cost model and to avoid vectorizing them (otherwise these casts, // which are redundant under the runtime overflow checks, will be // vectorized, which can be costly). // // 2) Support additional induction/PHISCEV patterns: We also want to support // inductions where the sext-trunc / zext-trunc operations (partly) occur // after the induction update operation (the induction increment): // // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix) // which correspond to a phi->add->trunc->sext/zext->phi update chain. // // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix) // which correspond to a phi->trunc->add->sext/zext->phi update chain. // // 3) Outline common code with createAddRecFromPHI to avoid duplication. Optional>> ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) { SmallVector Predicates; // *** Part1: Analyze if we have a phi-with-cast pattern for which we can // return an AddRec expression under some predicate. auto *PN = cast(SymbolicPHI->getValue()); const Loop *L = isIntegerLoopHeaderPHI(PN, LI); assert(L && "Expecting an integer loop header phi"); // The loop may have multiple entrances or multiple exits; we can analyze // this phi as an addrec if it has a unique entry value and a unique // backedge value. Value *BEValueV = nullptr, *StartValueV = nullptr; for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { Value *V = PN->getIncomingValue(i); if (L->contains(PN->getIncomingBlock(i))) { if (!BEValueV) { BEValueV = V; } else if (BEValueV != V) { BEValueV = nullptr; break; } } else if (!StartValueV) { StartValueV = V; } else if (StartValueV != V) { StartValueV = nullptr; break; } } if (!BEValueV || !StartValueV) return None; const SCEV *BEValue = getSCEV(BEValueV); // If the value coming around the backedge is an add with the symbolic // value we just inserted, possibly with casts that we can ignore under // an appropriate runtime guard, then we found a simple induction variable! const auto *Add = dyn_cast(BEValue); if (!Add) return None; // If there is a single occurrence of the symbolic value, possibly // casted, replace it with a recurrence. unsigned FoundIndex = Add->getNumOperands(); Type *TruncTy = nullptr; bool Signed; for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) if ((TruncTy = isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this))) if (FoundIndex == e) { FoundIndex = i; break; } if (FoundIndex == Add->getNumOperands()) return None; // Create an add with everything but the specified operand. SmallVector Ops; for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) if (i != FoundIndex) Ops.push_back(Add->getOperand(i)); const SCEV *Accum = getAddExpr(Ops); // The runtime checks will not be valid if the step amount is // varying inside the loop. if (!isLoopInvariant(Accum, L)) return None; // *** Part2: Create the predicates // Analysis was successful: we have a phi-with-cast pattern for which we // can return an AddRec expression under the following predicates: // // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum) // fits within the truncated type (does not overflow) for i = 0 to n-1. // P2: An Equal predicate that guarantees that // Start = (Ext ix (Trunc iy (Start) to ix) to iy) // P3: An Equal predicate that guarantees that // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy) // // As we next prove, the above predicates guarantee that: // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy) // // // More formally, we want to prove that: // Expr(i+1) = Start + (i+1) * Accum // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum // // Given that: // 1) Expr(0) = Start // 2) Expr(1) = Start + Accum // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2 // 3) Induction hypothesis (step i): // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum // // Proof: // Expr(i+1) = // = Start + (i+1)*Accum // = (Start + i*Accum) + Accum // = Expr(i) + Accum // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum // :: from step i // // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum // // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) // + (Ext ix (Trunc iy (Accum) to ix) to iy) // + Accum :: from P3 // // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy) // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y) // // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum // // By induction, the same applies to all iterations 1<=i(PHISCEV)) { SCEVWrapPredicate::IncrementWrapFlags AddedFlags = Signed ? SCEVWrapPredicate::IncrementNSSW : SCEVWrapPredicate::IncrementNUSW; const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags); Predicates.push_back(AddRecPred); } // Create the Equal Predicates P2,P3: // It is possible that the predicates P2 and/or P3 are computable at // compile time due to StartVal and/or Accum being constants. // If either one is, then we can check that now and escape if either P2 // or P3 is false. // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy) // for each of StartVal and Accum auto getExtendedExpr = [&](const SCEV *Expr, bool CreateSignExtend) -> const SCEV * { assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant"); const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy); const SCEV *ExtendedExpr = CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->getType()) : getZeroExtendExpr(TruncatedExpr, Expr->getType()); return ExtendedExpr; }; // Given: // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy // = getExtendedExpr(Expr) // Determine whether the predicate P: Expr == ExtendedExpr // is known to be false at compile time auto PredIsKnownFalse = [&](const SCEV *Expr, const SCEV *ExtendedExpr) -> bool { return Expr != ExtendedExpr && isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr); }; const SCEV *StartExtended = getExtendedExpr(StartVal, Signed); if (PredIsKnownFalse(StartVal, StartExtended)) { LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";); return None; } // The Step is always Signed (because the overflow checks are either // NSSW or NUSW) const SCEV *AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true); if (PredIsKnownFalse(Accum, AccumExtended)) { LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";); return None; } auto AppendPredicate = [&](const SCEV *Expr, const SCEV *ExtendedExpr) -> void { if (Expr != ExtendedExpr && !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) { const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr); LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred); Predicates.push_back(Pred); } }; AppendPredicate(StartVal, StartExtended); AppendPredicate(Accum, AccumExtended); // *** Part3: Predicates are ready. Now go ahead and create the new addrec in // which the casts had been folded away. The caller can rewrite SymbolicPHI // into NewAR if it will also add the runtime overflow checks specified in // Predicates. auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap); std::pair> PredRewrite = std::make_pair(NewAR, Predicates); // Remember the result of the analysis for this SCEV at this locayyytion. PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite; return PredRewrite; } Optional>> ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) { auto *PN = cast(SymbolicPHI->getValue()); const Loop *L = isIntegerLoopHeaderPHI(PN, LI); if (!L) return None; // Check to see if we already analyzed this PHI. auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L}); if (I != PredicatedSCEVRewrites.end()) { std::pair> Rewrite = I->second; // Analysis was done before and failed to create an AddRec: if (Rewrite.first == SymbolicPHI) return None; // Analysis was done before and succeeded to create an AddRec under // a predicate: assert(isa(Rewrite.first) && "Expected an AddRec"); assert(!(Rewrite.second).empty() && "Expected to find Predicates"); return Rewrite; } Optional>> Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI); // Record in the cache that the analysis failed if (!Rewrite) { SmallVector Predicates; PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates}; return None; } return Rewrite; } // FIXME: This utility is currently required because the Rewriter currently // does not rewrite this expression: // {0, +, (sext ix (trunc iy to ix) to iy)} // into {0, +, %step}, // even when the following Equal predicate exists: // "%step == (sext ix (trunc iy to ix) to iy)". bool PredicatedScalarEvolution::areAddRecsEqualWithPreds( const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const { if (AR1 == AR2) return true; auto areExprsEqual = [&](const SCEV *Expr1, const SCEV *Expr2) -> bool { if (Expr1 != Expr2 && !Preds->implies(SE.getEqualPredicate(Expr1, Expr2)) && !Preds->implies(SE.getEqualPredicate(Expr2, Expr1))) return false; return true; }; if (!areExprsEqual(AR1->getStart(), AR2->getStart()) || !areExprsEqual(AR1->getStepRecurrence(SE), AR2->getStepRecurrence(SE))) return false; return true; } /// A helper function for createAddRecFromPHI to handle simple cases. /// /// This function tries to find an AddRec expression for the simplest (yet most /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)). /// If it fails, createAddRecFromPHI will use a more general, but slow, /// technique for finding the AddRec expression. const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN, Value *BEValueV, Value *StartValueV) { const Loop *L = LI.getLoopFor(PN->getParent()); assert(L && L->getHeader() == PN->getParent()); assert(BEValueV && StartValueV); auto BO = MatchBinaryOp(BEValueV, DT); if (!BO) return nullptr; if (BO->Opcode != Instruction::Add) return nullptr; const SCEV *Accum = nullptr; if (BO->LHS == PN && L->isLoopInvariant(BO->RHS)) Accum = getSCEV(BO->RHS); else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS)) Accum = getSCEV(BO->LHS); if (!Accum) return nullptr; SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; if (BO->IsNUW) Flags = setFlags(Flags, SCEV::FlagNUW); if (BO->IsNSW) Flags = setFlags(Flags, SCEV::FlagNSW); const SCEV *StartVal = getSCEV(StartValueV); const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); insertValueToMap(PN, PHISCEV); // We can add Flags to the post-inc expression only if we // know that it is *undefined behavior* for BEValueV to // overflow. if (auto *BEInst = dyn_cast(BEValueV)) { assert(isLoopInvariant(Accum, L) && "Accum is defined outside L, but is not invariant?"); if (isAddRecNeverPoison(BEInst, L)) (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); } return PHISCEV; } const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) { const Loop *L = LI.getLoopFor(PN->getParent()); if (!L || L->getHeader() != PN->getParent()) return nullptr; // The loop may have multiple entrances or multiple exits; we can analyze // this phi as an addrec if it has a unique entry value and a unique // backedge value. Value *BEValueV = nullptr, *StartValueV = nullptr; for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { Value *V = PN->getIncomingValue(i); if (L->contains(PN->getIncomingBlock(i))) { if (!BEValueV) { BEValueV = V; } else if (BEValueV != V) { BEValueV = nullptr; break; } } else if (!StartValueV) { StartValueV = V; } else if (StartValueV != V) { StartValueV = nullptr; break; } } if (!BEValueV || !StartValueV) return nullptr; assert(ValueExprMap.find_as(PN) == ValueExprMap.end() && "PHI node already processed?"); // First, try to find AddRec expression without creating a fictituos symbolic // value for PN. if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV)) return S; // Handle PHI node value symbolically. const SCEV *SymbolicName = getUnknown(PN); insertValueToMap(PN, SymbolicName); // Using this symbolic name for the PHI, analyze the value coming around // the back-edge. const SCEV *BEValue = getSCEV(BEValueV); // NOTE: If BEValue is loop invariant, we know that the PHI node just // has a special value for the first iteration of the loop. // If the value coming around the backedge is an add with the symbolic // value we just inserted, then we found a simple induction variable! if (const SCEVAddExpr *Add = dyn_cast(BEValue)) { // If there is a single occurrence of the symbolic value, replace it // with a recurrence. unsigned FoundIndex = Add->getNumOperands(); for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) if (Add->getOperand(i) == SymbolicName) if (FoundIndex == e) { FoundIndex = i; break; } if (FoundIndex != Add->getNumOperands()) { // Create an add with everything but the specified operand. SmallVector Ops; for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) if (i != FoundIndex) Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i), L, *this)); const SCEV *Accum = getAddExpr(Ops); // This is not a valid addrec if the step amount is varying each // loop iteration, but is not itself an addrec in this loop. if (isLoopInvariant(Accum, L) || (isa(Accum) && cast(Accum)->getLoop() == L)) { SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; if (auto BO = MatchBinaryOp(BEValueV, DT)) { if (BO->Opcode == Instruction::Add && BO->LHS == PN) { if (BO->IsNUW) Flags = setFlags(Flags, SCEV::FlagNUW); if (BO->IsNSW) Flags = setFlags(Flags, SCEV::FlagNSW); } } else if (GEPOperator *GEP = dyn_cast(BEValueV)) { // If the increment is an inbounds GEP, then we know the address // space cannot be wrapped around. We cannot make any guarantee // about signed or unsigned overflow because pointers are // unsigned but we may have a negative index from the base // pointer. We can guarantee that no unsigned wrap occurs if the // indices form a positive value. if (GEP->isInBounds() && GEP->getOperand(0) == PN) { Flags = setFlags(Flags, SCEV::FlagNW); const SCEV *Ptr = getSCEV(GEP->getPointerOperand()); if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr))) Flags = setFlags(Flags, SCEV::FlagNUW); } // We cannot transfer nuw and nsw flags from subtraction // operations -- sub nuw X, Y is not the same as add nuw X, -Y // for instance. } const SCEV *StartVal = getSCEV(StartValueV); const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); // Okay, for the entire analysis of this edge we assumed the PHI // to be symbolic. We now need to go back and purge all of the // entries for the scalars that use the symbolic expression. forgetMemoizedResults(SymbolicName); insertValueToMap(PN, PHISCEV); // We can add Flags to the post-inc expression only if we // know that it is *undefined behavior* for BEValueV to // overflow. if (auto *BEInst = dyn_cast(BEValueV)) if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); return PHISCEV; } } } else { // Otherwise, this could be a loop like this: // i = 0; for (j = 1; ..; ++j) { .... i = j; } // In this case, j = {1,+,1} and BEValue is j. // Because the other in-value of i (0) fits the evolution of BEValue // i really is an addrec evolution. // // We can generalize this saying that i is the shifted value of BEValue // by one iteration: // PHI(f(0), f({1,+,1})) --> f({0,+,1}) const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this); const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this, false); if (Shifted != getCouldNotCompute() && Start != getCouldNotCompute()) { const SCEV *StartVal = getSCEV(StartValueV); if (Start == StartVal) { // Okay, for the entire analysis of this edge we assumed the PHI // to be symbolic. We now need to go back and purge all of the // entries for the scalars that use the symbolic expression. forgetMemoizedResults(SymbolicName); insertValueToMap(PN, Shifted); return Shifted; } } } // Remove the temporary PHI node SCEV that has been inserted while intending // to create an AddRecExpr for this PHI node. We can not keep this temporary // as it will prevent later (possibly simpler) SCEV expressions to be added // to the ValueExprMap. eraseValueFromMap(PN); return nullptr; } // Checks if the SCEV S is available at BB. S is considered available at BB // if S can be materialized at BB without introducing a fault. static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S, BasicBlock *BB) { struct CheckAvailable { bool TraversalDone = false; bool Available = true; const Loop *L = nullptr; // The loop BB is in (can be nullptr) BasicBlock *BB = nullptr; DominatorTree &DT; CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT) : L(L), BB(BB), DT(DT) {} bool setUnavailable() { TraversalDone = true; Available = false; return false; } bool follow(const SCEV *S) { switch (S->getSCEVType()) { case scConstant: case scPtrToInt: case scTruncate: case scZeroExtend: case scSignExtend: case scAddExpr: case scMulExpr: case scUMaxExpr: case scSMaxExpr: case scUMinExpr: case scSMinExpr: case scSequentialUMinExpr: // These expressions are available if their operand(s) is/are. return true; case scAddRecExpr: { // We allow add recurrences that are on the loop BB is in, or some // outer loop. This guarantees availability because the value of the // add recurrence at BB is simply the "current" value of the induction // variable. We can relax this in the future; for instance an add // recurrence on a sibling dominating loop is also available at BB. const auto *ARLoop = cast(S)->getLoop(); if (L && (ARLoop == L || ARLoop->contains(L))) return true; return setUnavailable(); } case scUnknown: { // For SCEVUnknown, we check for simple dominance. const auto *SU = cast(S); Value *V = SU->getValue(); if (isa(V)) return false; if (isa(V) && DT.dominates(cast(V), BB)) return false; return setUnavailable(); } case scUDivExpr: case scCouldNotCompute: // We do not try to smart about these at all. return setUnavailable(); } llvm_unreachable("Unknown SCEV kind!"); } bool isDone() { return TraversalDone; } }; CheckAvailable CA(L, BB, DT); SCEVTraversal ST(CA); ST.visitAll(S); return CA.Available; } // Try to match a control flow sequence that branches out at BI and merges back // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful // match. static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, Value *&C, Value *&LHS, Value *&RHS) { C = BI->getCondition(); BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0)); BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1)); if (!LeftEdge.isSingleEdge()) return false; assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()"); Use &LeftUse = Merge->getOperandUse(0); Use &RightUse = Merge->getOperandUse(1); if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) { LHS = LeftUse; RHS = RightUse; return true; } if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) { LHS = RightUse; RHS = LeftUse; return true; } return false; } const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) { auto IsReachable = [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); }; if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) { const Loop *L = LI.getLoopFor(PN->getParent()); // We don't want to break LCSSA, even in a SCEV expression tree. for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) if (LI.getLoopFor(PN->getIncomingBlock(i)) != L) return nullptr; // Try to match // // br %cond, label %left, label %right // left: // br label %merge // right: // br label %merge // merge: // V = phi [ %x, %left ], [ %y, %right ] // // as "select %cond, %x, %y" BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock(); assert(IDom && "At least the entry block should dominate PN"); auto *BI = dyn_cast(IDom->getTerminator()); Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr; if (BI && BI->isConditional() && BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) && IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) && IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent())) return createNodeForSelectOrPHI(PN, Cond, LHS, RHS); } return nullptr; } const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { if (const SCEV *S = createAddRecFromPHI(PN)) return S; if (const SCEV *S = createNodeFromSelectLikePHI(PN)) return S; + // If the PHI has a single incoming value, follow that value, unless the + // PHI's incoming blocks are in a different loop, in which case doing so + // risks breaking LCSSA form. Instcombine would normally zap these, but + // it doesn't have DominatorTree information, so it may miss cases. if (Value *V = simplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC})) - return getSCEV(V); + if (LI.replacementPreservesLCSSAForm(PN, V)) + return getSCEV(V); // If it's not a loop phi, we can't handle it yet. return getUnknown(PN); } bool SCEVMinMaxExprContains(const SCEV *Root, const SCEV *OperandToFind, SCEVTypes RootKind) { struct FindClosure { const SCEV *OperandToFind; const SCEVTypes RootKind; // Must be a sequential min/max expression. const SCEVTypes NonSequentialRootKind; // Non-seq variant of RootKind. bool Found = false; bool canRecurseInto(SCEVTypes Kind) const { // We can only recurse into the SCEV expression of the same effective type // as the type of our root SCEV expression, and into zero-extensions. return RootKind == Kind || NonSequentialRootKind == Kind || scZeroExtend == Kind; }; FindClosure(const SCEV *OperandToFind, SCEVTypes RootKind) : OperandToFind(OperandToFind), RootKind(RootKind), NonSequentialRootKind( SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType( RootKind)) {} bool follow(const SCEV *S) { Found = S == OperandToFind; return !isDone() && canRecurseInto(S->getSCEVType()); } bool isDone() const { return Found; } }; FindClosure FC(OperandToFind, RootKind); visitAll(Root, FC); return FC.Found; } const SCEV *ScalarEvolution::createNodeForSelectOrPHIInstWithICmpInstCond( Instruction *I, ICmpInst *Cond, Value *TrueVal, Value *FalseVal) { // Try to match some simple smax or umax patterns. auto *ICI = Cond; Value *LHS = ICI->getOperand(0); Value *RHS = ICI->getOperand(1); switch (ICI->getPredicate()) { case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: std::swap(LHS, RHS); LLVM_FALLTHROUGH; case ICmpInst::ICMP_SGT: case ICmpInst::ICMP_SGE: case ICmpInst::ICMP_UGT: case ICmpInst::ICMP_UGE: // a > b ? a+x : b+x -> max(a, b)+x // a > b ? b+x : a+x -> min(a, b)+x if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { bool Signed = ICI->isSigned(); const SCEV *LA = getSCEV(TrueVal); const SCEV *RA = getSCEV(FalseVal); const SCEV *LS = getSCEV(LHS); const SCEV *RS = getSCEV(RHS); if (LA->getType()->isPointerTy()) { // FIXME: Handle cases where LS/RS are pointers not equal to LA/RA. // Need to make sure we can't produce weird expressions involving // negated pointers. if (LA == LS && RA == RS) return Signed ? getSMaxExpr(LS, RS) : getUMaxExpr(LS, RS); if (LA == RS && RA == LS) return Signed ? getSMinExpr(LS, RS) : getUMinExpr(LS, RS); } auto CoerceOperand = [&](const SCEV *Op) -> const SCEV * { if (Op->getType()->isPointerTy()) { Op = getLosslessPtrToIntExpr(Op); if (isa(Op)) return Op; } if (Signed) Op = getNoopOrSignExtend(Op, I->getType()); else Op = getNoopOrZeroExtend(Op, I->getType()); return Op; }; LS = CoerceOperand(LS); RS = CoerceOperand(RS); if (isa(LS) || isa(RS)) break; const SCEV *LDiff = getMinusSCEV(LA, LS); const SCEV *RDiff = getMinusSCEV(RA, RS); if (LDiff == RDiff) return getAddExpr(Signed ? getSMaxExpr(LS, RS) : getUMaxExpr(LS, RS), LDiff); LDiff = getMinusSCEV(LA, RS); RDiff = getMinusSCEV(RA, LS); if (LDiff == RDiff) return getAddExpr(Signed ? getSMinExpr(LS, RS) : getUMinExpr(LS, RS), LDiff); } break; case ICmpInst::ICMP_NE: // x != 0 ? x+y : C+y -> x == 0 ? C+y : x+y std::swap(TrueVal, FalseVal); LLVM_FALLTHROUGH; case ICmpInst::ICMP_EQ: // x == 0 ? C+y : x+y -> umax(x, C)+y iff C u<= 1 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && isa(RHS) && cast(RHS)->isZero()) { const SCEV *X = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); const SCEV *TrueValExpr = getSCEV(TrueVal); // C+y const SCEV *FalseValExpr = getSCEV(FalseVal); // x+y const SCEV *Y = getMinusSCEV(FalseValExpr, X); // y = (x+y)-x const SCEV *C = getMinusSCEV(TrueValExpr, Y); // C = (C+y)-y if (isa(C) && cast(C)->getAPInt().ule(1)) return getAddExpr(getUMaxExpr(X, C), Y); } // x == 0 ? 0 : umin (..., x, ...) -> umin_seq(x, umin (...)) // x == 0 ? 0 : umin_seq(..., x, ...) -> umin_seq(x, umin_seq(...)) // x == 0 ? 0 : umin (..., umin_seq(..., x, ...), ...) // -> umin_seq(x, umin (..., umin_seq(...), ...)) if (isa(RHS) && cast(RHS)->isZero() && isa(TrueVal) && cast(TrueVal)->isZero()) { const SCEV *X = getSCEV(LHS); while (auto *ZExt = dyn_cast(X)) X = ZExt->getOperand(); if (getTypeSizeInBits(X->getType()) <= getTypeSizeInBits(I->getType())) { const SCEV *FalseValExpr = getSCEV(FalseVal); if (SCEVMinMaxExprContains(FalseValExpr, X, scSequentialUMinExpr)) return getUMinExpr(getNoopOrZeroExtend(X, I->getType()), FalseValExpr, /*Sequential=*/true); } } break; default: break; } return getUnknown(I); } static Optional createNodeForSelectViaUMinSeq(ScalarEvolution *SE, const SCEV *CondExpr, const SCEV *TrueExpr, const SCEV *FalseExpr) { assert(CondExpr->getType()->isIntegerTy(1) && TrueExpr->getType() == FalseExpr->getType() && TrueExpr->getType()->isIntegerTy(1) && "Unexpected operands of a select."); // i1 cond ? i1 x : i1 C --> C + (i1 cond ? (i1 x - i1 C) : i1 0) // --> C + (umin_seq cond, x - C) // // i1 cond ? i1 C : i1 x --> C + (i1 cond ? i1 0 : (i1 x - i1 C)) // --> C + (i1 ~cond ? (i1 x - i1 C) : i1 0) // --> C + (umin_seq ~cond, x - C) // FIXME: while we can't legally model the case where both of the hands // are fully variable, we only require that the *difference* is constant. if (!isa(TrueExpr) && !isa(FalseExpr)) return None; const SCEV *X, *C; if (isa(TrueExpr)) { CondExpr = SE->getNotSCEV(CondExpr); X = FalseExpr; C = TrueExpr; } else { X = TrueExpr; C = FalseExpr; } return SE->getAddExpr(C, SE->getUMinExpr(CondExpr, SE->getMinusSCEV(X, C), /*Sequential=*/true)); } static Optional createNodeForSelectViaUMinSeq(ScalarEvolution *SE, Value *Cond, Value *TrueVal, Value *FalseVal) { if (!isa(TrueVal) && !isa(FalseVal)) return None; const auto *SECond = SE->getSCEV(Cond); const auto *SETrue = SE->getSCEV(TrueVal); const auto *SEFalse = SE->getSCEV(FalseVal); return createNodeForSelectViaUMinSeq(SE, SECond, SETrue, SEFalse); } const SCEV *ScalarEvolution::createNodeForSelectOrPHIViaUMinSeq( Value *V, Value *Cond, Value *TrueVal, Value *FalseVal) { assert(Cond->getType()->isIntegerTy(1) && "Select condition is not an i1?"); assert(TrueVal->getType() == FalseVal->getType() && V->getType() == TrueVal->getType() && "Types of select hands and of the result must match."); // For now, only deal with i1-typed `select`s. if (!V->getType()->isIntegerTy(1)) return getUnknown(V); if (Optional S = createNodeForSelectViaUMinSeq(this, Cond, TrueVal, FalseVal)) return *S; return getUnknown(V); } const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Value *V, Value *Cond, Value *TrueVal, Value *FalseVal) { // Handle "constant" branch or select. This can occur for instance when a // loop pass transforms an inner loop and moves on to process the outer loop. if (auto *CI = dyn_cast(Cond)) return getSCEV(CI->isOne() ? TrueVal : FalseVal); if (auto *I = dyn_cast(V)) { if (auto *ICI = dyn_cast(Cond)) { const SCEV *S = createNodeForSelectOrPHIInstWithICmpInstCond( I, ICI, TrueVal, FalseVal); if (!isa(S)) return S; } } return createNodeForSelectOrPHIViaUMinSeq(V, Cond, TrueVal, FalseVal); } /// Expand GEP instructions into add and multiply operations. This allows them /// to be analyzed by regular SCEV code. const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { assert(GEP->getSourceElementType()->isSized() && "GEP source element type must be sized"); SmallVector IndexExprs; for (Value *Index : GEP->indices()) IndexExprs.push_back(getSCEV(Index)); return getGEPExpr(GEP, IndexExprs); } uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) { if (const SCEVConstant *C = dyn_cast(S)) return C->getAPInt().countTrailingZeros(); if (const SCEVPtrToIntExpr *I = dyn_cast(S)) return GetMinTrailingZeros(I->getOperand()); if (const SCEVTruncateExpr *T = dyn_cast(S)) return std::min(GetMinTrailingZeros(T->getOperand()), (uint32_t)getTypeSizeInBits(T->getType())); if (const SCEVZeroExtendExpr *E = dyn_cast(S)) { uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ? getTypeSizeInBits(E->getType()) : OpRes; } if (const SCEVSignExtendExpr *E = dyn_cast(S)) { uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ? getTypeSizeInBits(E->getType()) : OpRes; } if (const SCEVAddExpr *A = dyn_cast(S)) { // The result is the min of all operands results. uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); return MinOpRes; } if (const SCEVMulExpr *M = dyn_cast(S)) { // The result is the sum of all operands results. uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); uint32_t BitWidth = getTypeSizeInBits(M->getType()); for (unsigned i = 1, e = M->getNumOperands(); SumOpRes != BitWidth && i != e; ++i) SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth); return SumOpRes; } if (const SCEVAddRecExpr *A = dyn_cast(S)) { // The result is the min of all operands results. uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); return MinOpRes; } if (const SCEVSMaxExpr *M = dyn_cast(S)) { // The result is the min of all operands results. uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); return MinOpRes; } if (const SCEVUMaxExpr *M = dyn_cast(S)) { // The result is the min of all operands results. uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); return MinOpRes; } if (const SCEVUnknown *U = dyn_cast(S)) { // For a SCEVUnknown, ask ValueTracking. KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT); return Known.countMinTrailingZeros(); } // SCEVUDivExpr return 0; } uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { auto I = MinTrailingZerosCache.find(S); if (I != MinTrailingZerosCache.end()) return I->second; uint32_t Result = GetMinTrailingZerosImpl(S); auto InsertPair = MinTrailingZerosCache.insert({S, Result}); assert(InsertPair.second && "Should insert a new key"); return InsertPair.first->second; } /// Helper method to assign a range to V from metadata present in the IR. static Optional GetRangeFromMetadata(Value *V) { if (Instruction *I = dyn_cast(V)) if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) return getConstantRangeFromMetadata(*MD); return None; } void ScalarEvolution::setNoWrapFlags(SCEVAddRecExpr *AddRec, SCEV::NoWrapFlags Flags) { if (AddRec->getNoWrapFlags(Flags) != Flags) { AddRec->setNoWrapFlags(Flags); UnsignedRanges.erase(AddRec); SignedRanges.erase(AddRec); } } ConstantRange ScalarEvolution:: getRangeForUnknownRecurrence(const SCEVUnknown *U) { const DataLayout &DL = getDataLayout(); unsigned BitWidth = getTypeSizeInBits(U->getType()); const ConstantRange FullSet(BitWidth, /*isFullSet=*/true); // Match a simple recurrence of the form: , and then // use information about the trip count to improve our available range. Note // that the trip count independent cases are already handled by known bits. // WARNING: The definition of recurrence used here is subtly different than // the one used by AddRec (and thus most of this file). Step is allowed to // be arbitrarily loop varying here, where AddRec allows only loop invariant // and other addrecs in the same loop (for non-affine addrecs). The code // below intentionally handles the case where step is not loop invariant. auto *P = dyn_cast(U->getValue()); if (!P) return FullSet; // Make sure that no Phi input comes from an unreachable block. Otherwise, // even the values that are not available in these blocks may come from them, // and this leads to false-positive recurrence test. for (auto *Pred : predecessors(P->getParent())) if (!DT.isReachableFromEntry(Pred)) return FullSet; BinaryOperator *BO; Value *Start, *Step; if (!matchSimpleRecurrence(P, BO, Start, Step)) return FullSet; // If we found a recurrence in reachable code, we must be in a loop. Note // that BO might be in some subloop of L, and that's completely okay. auto *L = LI.getLoopFor(P->getParent()); assert(L && L->getHeader() == P->getParent()); if (!L->contains(BO->getParent())) // NOTE: This bailout should be an assert instead. However, asserting // the condition here exposes a case where LoopFusion is querying SCEV // with malformed loop information during the midst of the transform. // There doesn't appear to be an obvious fix, so for the moment bailout // until the caller issue can be fixed. PR49566 tracks the bug. return FullSet; // TODO: Extend to other opcodes such as mul, and div switch (BO->getOpcode()) { default: return FullSet; case Instruction::AShr: case Instruction::LShr: case Instruction::Shl: break; }; if (BO->getOperand(0) != P) // TODO: Handle the power function forms some day. return FullSet; unsigned TC = getSmallConstantMaxTripCount(L); if (!TC || TC >= BitWidth) return FullSet; auto KnownStart = computeKnownBits(Start, DL, 0, &AC, nullptr, &DT); auto KnownStep = computeKnownBits(Step, DL, 0, &AC, nullptr, &DT); assert(KnownStart.getBitWidth() == BitWidth && KnownStep.getBitWidth() == BitWidth); // Compute total shift amount, being careful of overflow and bitwidths. auto MaxShiftAmt = KnownStep.getMaxValue(); APInt TCAP(BitWidth, TC-1); bool Overflow = false; auto TotalShift = MaxShiftAmt.umul_ov(TCAP, Overflow); if (Overflow) return FullSet; switch (BO->getOpcode()) { default: llvm_unreachable("filtered out above"); case Instruction::AShr: { // For each ashr, three cases: // shift = 0 => unchanged value // saturation => 0 or -1 // other => a value closer to zero (of the same sign) // Thus, the end value is closer to zero than the start. auto KnownEnd = KnownBits::ashr(KnownStart, KnownBits::makeConstant(TotalShift)); if (KnownStart.isNonNegative()) // Analogous to lshr (simply not yet canonicalized) return ConstantRange::getNonEmpty(KnownEnd.getMinValue(), KnownStart.getMaxValue() + 1); if (KnownStart.isNegative()) // End >=u Start && End <=s Start return ConstantRange::getNonEmpty(KnownStart.getMinValue(), KnownEnd.getMaxValue() + 1); break; } case Instruction::LShr: { // For each lshr, three cases: // shift = 0 => unchanged value // saturation => 0 // other => a smaller positive number // Thus, the low end of the unsigned range is the last value produced. auto KnownEnd = KnownBits::lshr(KnownStart, KnownBits::makeConstant(TotalShift)); return ConstantRange::getNonEmpty(KnownEnd.getMinValue(), KnownStart.getMaxValue() + 1); } case Instruction::Shl: { // Iff no bits are shifted out, value increases on every shift. auto KnownEnd = KnownBits::shl(KnownStart, KnownBits::makeConstant(TotalShift)); if (TotalShift.ult(KnownStart.countMinLeadingZeros())) return ConstantRange(KnownStart.getMinValue(), KnownEnd.getMaxValue() + 1); break; } }; return FullSet; } /// Determine the range for a particular SCEV. If SignHint is /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges /// with a "cleaner" unsigned (resp. signed) representation. const ConstantRange & ScalarEvolution::getRangeRef(const SCEV *S, ScalarEvolution::RangeSignHint SignHint) { DenseMap &Cache = SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges : SignedRanges; ConstantRange::PreferredRangeType RangeType = SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? ConstantRange::Unsigned : ConstantRange::Signed; // See if we've computed this range already. DenseMap::iterator I = Cache.find(S); if (I != Cache.end()) return I->second; if (const SCEVConstant *C = dyn_cast(S)) return setRange(C, SignHint, ConstantRange(C->getAPInt())); unsigned BitWidth = getTypeSizeInBits(S->getType()); ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); using OBO = OverflowingBinaryOperator; // If the value has known zeros, the maximum value will have those known zeros // as well. uint32_t TZ = GetMinTrailingZeros(S); if (TZ != 0) { if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) ConservativeResult = ConstantRange(APInt::getMinValue(BitWidth), APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); else ConservativeResult = ConstantRange( APInt::getSignedMinValue(BitWidth), APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); } if (const SCEVAddExpr *Add = dyn_cast(S)) { ConstantRange X = getRangeRef(Add->getOperand(0), SignHint); unsigned WrapType = OBO::AnyWrap; if (Add->hasNoSignedWrap()) WrapType |= OBO::NoSignedWrap; if (Add->hasNoUnsignedWrap()) WrapType |= OBO::NoUnsignedWrap; for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) X = X.addWithNoWrap(getRangeRef(Add->getOperand(i), SignHint), WrapType, RangeType); return setRange(Add, SignHint, ConservativeResult.intersectWith(X, RangeType)); } if (const SCEVMulExpr *Mul = dyn_cast(S)) { ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint); for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint)); return setRange(Mul, SignHint, ConservativeResult.intersectWith(X, RangeType)); } if (isa(S) || isa(S)) { Intrinsic::ID ID; switch (S->getSCEVType()) { case scUMaxExpr: ID = Intrinsic::umax; break; case scSMaxExpr: ID = Intrinsic::smax; break; case scUMinExpr: case scSequentialUMinExpr: ID = Intrinsic::umin; break; case scSMinExpr: ID = Intrinsic::smin; break; default: llvm_unreachable("Unknown SCEVMinMaxExpr/SCEVSequentialMinMaxExpr."); } const auto *NAry = cast(S); ConstantRange X = getRangeRef(NAry->getOperand(0), SignHint); for (unsigned i = 1, e = NAry->getNumOperands(); i != e; ++i) X = X.intrinsic(ID, {X, getRangeRef(NAry->getOperand(i), SignHint)}); return setRange(S, SignHint, ConservativeResult.intersectWith(X, RangeType)); } if (const SCEVUDivExpr *UDiv = dyn_cast(S)) { ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint); ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint); return setRange(UDiv, SignHint, ConservativeResult.intersectWith(X.udiv(Y), RangeType)); } if (const SCEVZeroExtendExpr *ZExt = dyn_cast(S)) { ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint); return setRange(ZExt, SignHint, ConservativeResult.intersectWith(X.zeroExtend(BitWidth), RangeType)); } if (const SCEVSignExtendExpr *SExt = dyn_cast(S)) { ConstantRange X = getRangeRef(SExt->getOperand(), SignHint); return setRange(SExt, SignHint, ConservativeResult.intersectWith(X.signExtend(BitWidth), RangeType)); } if (const SCEVPtrToIntExpr *PtrToInt = dyn_cast(S)) { ConstantRange X = getRangeRef(PtrToInt->getOperand(), SignHint); return setRange(PtrToInt, SignHint, X); } if (const SCEVTruncateExpr *Trunc = dyn_cast(S)) { ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint); return setRange(Trunc, SignHint, ConservativeResult.intersectWith(X.truncate(BitWidth), RangeType)); } if (const SCEVAddRecExpr *AddRec = dyn_cast(S)) { // If there's no unsigned wrap, the value will never be less than its // initial value. if (AddRec->hasNoUnsignedWrap()) { APInt UnsignedMinValue = getUnsignedRangeMin(AddRec->getStart()); if (!UnsignedMinValue.isZero()) ConservativeResult = ConservativeResult.intersectWith( ConstantRange(UnsignedMinValue, APInt(BitWidth, 0)), RangeType); } // If there's no signed wrap, and all the operands except initial value have // the same sign or zero, the value won't ever be: // 1: smaller than initial value if operands are non negative, // 2: bigger than initial value if operands are non positive. // For both cases, value can not cross signed min/max boundary. if (AddRec->hasNoSignedWrap()) { bool AllNonNeg = true; bool AllNonPos = true; for (unsigned i = 1, e = AddRec->getNumOperands(); i != e; ++i) { if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false; if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false; } if (AllNonNeg) ConservativeResult = ConservativeResult.intersectWith( ConstantRange::getNonEmpty(getSignedRangeMin(AddRec->getStart()), APInt::getSignedMinValue(BitWidth)), RangeType); else if (AllNonPos) ConservativeResult = ConservativeResult.intersectWith( ConstantRange::getNonEmpty( APInt::getSignedMinValue(BitWidth), getSignedRangeMax(AddRec->getStart()) + 1), RangeType); } // TODO: non-affine addrec if (AddRec->isAffine()) { const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(AddRec->getLoop()); if (!isa(MaxBECount) && getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { auto RangeFromAffine = getRangeForAffineAR( AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, BitWidth); ConservativeResult = ConservativeResult.intersectWith(RangeFromAffine, RangeType); auto RangeFromFactoring = getRangeViaFactoring( AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, BitWidth); ConservativeResult = ConservativeResult.intersectWith(RangeFromFactoring, RangeType); } // Now try symbolic BE count and more powerful methods. if (UseExpensiveRangeSharpening) { const SCEV *SymbolicMaxBECount = getSymbolicMaxBackedgeTakenCount(AddRec->getLoop()); if (!isa(SymbolicMaxBECount) && getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && AddRec->hasNoSelfWrap()) { auto RangeFromAffineNew = getRangeForAffineNoSelfWrappingAR( AddRec, SymbolicMaxBECount, BitWidth, SignHint); ConservativeResult = ConservativeResult.intersectWith(RangeFromAffineNew, RangeType); } } } return setRange(AddRec, SignHint, std::move(ConservativeResult)); } if (const SCEVUnknown *U = dyn_cast(S)) { // Check if the IR explicitly contains !range metadata. Optional MDRange = GetRangeFromMetadata(U->getValue()); if (MDRange) ConservativeResult = ConservativeResult.intersectWith(MDRange.value(), RangeType); // Use facts about recurrences in the underlying IR. Note that add // recurrences are AddRecExprs and thus don't hit this path. This // primarily handles shift recurrences. auto CR = getRangeForUnknownRecurrence(U); ConservativeResult = ConservativeResult.intersectWith(CR); // See if ValueTracking can give us a useful range. const DataLayout &DL = getDataLayout(); KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT); if (Known.getBitWidth() != BitWidth) Known = Known.zextOrTrunc(BitWidth); // ValueTracking may be able to compute a tighter result for the number of // sign bits than for the value of those sign bits. unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT); if (U->getType()->isPointerTy()) { // If the pointer size is larger than the index size type, this can cause // NS to be larger than BitWidth. So compensate for this. unsigned ptrSize = DL.getPointerTypeSizeInBits(U->getType()); int ptrIdxDiff = ptrSize - BitWidth; if (ptrIdxDiff > 0 && ptrSize > BitWidth && NS > (unsigned)ptrIdxDiff) NS -= ptrIdxDiff; } if (NS > 1) { // If we know any of the sign bits, we know all of the sign bits. if (!Known.Zero.getHiBits(NS).isZero()) Known.Zero.setHighBits(NS); if (!Known.One.getHiBits(NS).isZero()) Known.One.setHighBits(NS); } if (Known.getMinValue() != Known.getMaxValue() + 1) ConservativeResult = ConservativeResult.intersectWith( ConstantRange(Known.getMinValue(), Known.getMaxValue() + 1), RangeType); if (NS > 1) ConservativeResult = ConservativeResult.intersectWith( ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1), RangeType); // A range of Phi is a subset of union of all ranges of its input. if (const PHINode *Phi = dyn_cast(U->getValue())) { // Make sure that we do not run over cycled Phis. if (PendingPhiRanges.insert(Phi).second) { ConstantRange RangeFromOps(BitWidth, /*isFullSet=*/false); for (const auto &Op : Phi->operands()) { auto OpRange = getRangeRef(getSCEV(Op), SignHint); RangeFromOps = RangeFromOps.unionWith(OpRange); // No point to continue if we already have a full set. if (RangeFromOps.isFullSet()) break; } ConservativeResult = ConservativeResult.intersectWith(RangeFromOps, RangeType); bool Erased = PendingPhiRanges.erase(Phi); assert(Erased && "Failed to erase Phi properly?"); (void) Erased; } } // vscale can't be equal to zero if (const auto *II = dyn_cast(U->getValue())) if (II->getIntrinsicID() == Intrinsic::vscale) { ConstantRange Disallowed = APInt::getZero(BitWidth); ConservativeResult = ConservativeResult.difference(Disallowed); } return setRange(U, SignHint, std::move(ConservativeResult)); } return setRange(S, SignHint, std::move(ConservativeResult)); } // Given a StartRange, Step and MaxBECount for an expression compute a range of // values that the expression can take. Initially, the expression has a value // from StartRange and then is changed by Step up to MaxBECount times. Signed // argument defines if we treat Step as signed or unsigned. static ConstantRange getRangeForAffineARHelper(APInt Step, const ConstantRange &StartRange, const APInt &MaxBECount, unsigned BitWidth, bool Signed) { // If either Step or MaxBECount is 0, then the expression won't change, and we // just need to return the initial range. if (Step == 0 || MaxBECount == 0) return StartRange; // If we don't know anything about the initial value (i.e. StartRange is // FullRange), then we don't know anything about the final range either. // Return FullRange. if (StartRange.isFullSet()) return ConstantRange::getFull(BitWidth); // If Step is signed and negative, then we use its absolute value, but we also // note that we're moving in the opposite direction. bool Descending = Signed && Step.isNegative(); if (Signed) // This is correct even for INT_SMIN. Let's look at i8 to illustrate this: // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128. // This equations hold true due to the well-defined wrap-around behavior of // APInt. Step = Step.abs(); // Check if Offset is more than full span of BitWidth. If it is, the // expression is guaranteed to overflow. if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount)) return ConstantRange::getFull(BitWidth); // Offset is by how much the expression can change. Checks above guarantee no // overflow here. APInt Offset = Step * MaxBECount; // Minimum value of the final range will match the minimal value of StartRange // if the expression is increasing and will be decreased by Offset otherwise. // Maximum value of the final range will match the maximal value of StartRange // if the expression is decreasing and will be increased by Offset otherwise. APInt StartLower = StartRange.getLower(); APInt StartUpper = StartRange.getUpper() - 1; APInt MovedBoundary = Descending ? (StartLower - std::move(Offset)) : (StartUpper + std::move(Offset)); // It's possible that the new minimum/maximum value will fall into the initial // range (due to wrap around). This means that the expression can take any // value in this bitwidth, and we have to return full range. if (StartRange.contains(MovedBoundary)) return ConstantRange::getFull(BitWidth); APInt NewLower = Descending ? std::move(MovedBoundary) : std::move(StartLower); APInt NewUpper = Descending ? std::move(StartUpper) : std::move(MovedBoundary); NewUpper += 1; // No overflow detected, return [StartLower, StartUpper + Offset + 1) range. return ConstantRange::getNonEmpty(std::move(NewLower), std::move(NewUpper)); } ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start, const SCEV *Step, const SCEV *MaxBECount, unsigned BitWidth) { assert(!isa(MaxBECount) && getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && "Precondition!"); MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType()); APInt MaxBECountValue = getUnsignedRangeMax(MaxBECount); // First, consider step signed. ConstantRange StartSRange = getSignedRange(Start); ConstantRange StepSRange = getSignedRange(Step); // If Step can be both positive and negative, we need to find ranges for the // maximum absolute step values in both directions and union them. ConstantRange SR = getRangeForAffineARHelper(StepSRange.getSignedMin(), StartSRange, MaxBECountValue, BitWidth, /* Signed = */ true); SR = SR.unionWith(getRangeForAffineARHelper(StepSRange.getSignedMax(), StartSRange, MaxBECountValue, BitWidth, /* Signed = */ true)); // Next, consider step unsigned. ConstantRange UR = getRangeForAffineARHelper( getUnsignedRangeMax(Step), getUnsignedRange(Start), MaxBECountValue, BitWidth, /* Signed = */ false); // Finally, intersect signed and unsigned ranges. return SR.intersectWith(UR, ConstantRange::Smallest); } ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR( const SCEVAddRecExpr *AddRec, const SCEV *MaxBECount, unsigned BitWidth, ScalarEvolution::RangeSignHint SignHint) { assert(AddRec->isAffine() && "Non-affine AddRecs are not suppored!\n"); assert(AddRec->hasNoSelfWrap() && "This only works for non-self-wrapping AddRecs!"); const bool IsSigned = SignHint == HINT_RANGE_SIGNED; const SCEV *Step = AddRec->getStepRecurrence(*this); // Only deal with constant step to save compile time. if (!isa(Step)) return ConstantRange::getFull(BitWidth); // Let's make sure that we can prove that we do not self-wrap during // MaxBECount iterations. We need this because MaxBECount is a maximum // iteration count estimate, and we might infer nw from some exit for which we // do not know max exit count (or any other side reasoning). // TODO: Turn into assert at some point. if (getTypeSizeInBits(MaxBECount->getType()) > getTypeSizeInBits(AddRec->getType())) return ConstantRange::getFull(BitWidth); MaxBECount = getNoopOrZeroExtend(MaxBECount, AddRec->getType()); const SCEV *RangeWidth = getMinusOne(AddRec->getType()); const SCEV *StepAbs = getUMinExpr(Step, getNegativeSCEV(Step)); const SCEV *MaxItersWithoutWrap = getUDivExpr(RangeWidth, StepAbs); if (!isKnownPredicateViaConstantRanges(ICmpInst::ICMP_ULE, MaxBECount, MaxItersWithoutWrap)) return ConstantRange::getFull(BitWidth); ICmpInst::Predicate LEPred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; ICmpInst::Predicate GEPred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this); // We know that there is no self-wrap. Let's take Start and End values and // look at all intermediate values V1, V2, ..., Vn that IndVar takes during // the iteration. They either lie inside the range [Min(Start, End), // Max(Start, End)] or outside it: // // Case 1: RangeMin ... Start V1 ... VN End ... RangeMax; // Case 2: RangeMin Vk ... V1 Start ... End Vn ... Vk + 1 RangeMax; // // No self wrap flag guarantees that the intermediate values cannot be BOTH // outside and inside the range [Min(Start, End), Max(Start, End)]. Using that // knowledge, let's try to prove that we are dealing with Case 1. It is so if // Start <= End and step is positive, or Start >= End and step is negative. const SCEV *Start = AddRec->getStart(); ConstantRange StartRange = getRangeRef(Start, SignHint); ConstantRange EndRange = getRangeRef(End, SignHint); ConstantRange RangeBetween = StartRange.unionWith(EndRange); // If they already cover full iteration space, we will know nothing useful // even if we prove what we want to prove. if (RangeBetween.isFullSet()) return RangeBetween; // Only deal with ranges that do not wrap (i.e. RangeMin < RangeMax). bool IsWrappedSet = IsSigned ? RangeBetween.isSignWrappedSet() : RangeBetween.isWrappedSet(); if (IsWrappedSet) return ConstantRange::getFull(BitWidth); if (isKnownPositive(Step) && isKnownPredicateViaConstantRanges(LEPred, Start, End)) return RangeBetween; else if (isKnownNegative(Step) && isKnownPredicateViaConstantRanges(GEPred, Start, End)) return RangeBetween; return ConstantRange::getFull(BitWidth); } ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start, const SCEV *Step, const SCEV *MaxBECount, unsigned BitWidth) { // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q}) // == RangeOf({A,+,P}) union RangeOf({B,+,Q}) struct SelectPattern { Value *Condition = nullptr; APInt TrueValue; APInt FalseValue; explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth, const SCEV *S) { Optional CastOp; APInt Offset(BitWidth, 0); assert(SE.getTypeSizeInBits(S->getType()) == BitWidth && "Should be!"); // Peel off a constant offset: if (auto *SA = dyn_cast(S)) { // In the future we could consider being smarter here and handle // {Start+Step,+,Step} too. if (SA->getNumOperands() != 2 || !isa(SA->getOperand(0))) return; Offset = cast(SA->getOperand(0))->getAPInt(); S = SA->getOperand(1); } // Peel off a cast operation if (auto *SCast = dyn_cast(S)) { CastOp = SCast->getSCEVType(); S = SCast->getOperand(); } using namespace llvm::PatternMatch; auto *SU = dyn_cast(S); const APInt *TrueVal, *FalseVal; if (!SU || !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal), m_APInt(FalseVal)))) { Condition = nullptr; return; } TrueValue = *TrueVal; FalseValue = *FalseVal; // Re-apply the cast we peeled off earlier if (CastOp) switch (*CastOp) { default: llvm_unreachable("Unknown SCEV cast type!"); case scTruncate: TrueValue = TrueValue.trunc(BitWidth); FalseValue = FalseValue.trunc(BitWidth); break; case scZeroExtend: TrueValue = TrueValue.zext(BitWidth); FalseValue = FalseValue.zext(BitWidth); break; case scSignExtend: TrueValue = TrueValue.sext(BitWidth); FalseValue = FalseValue.sext(BitWidth); break; } // Re-apply the constant offset we peeled off earlier TrueValue += Offset; FalseValue += Offset; } bool isRecognized() { return Condition != nullptr; } }; SelectPattern StartPattern(*this, BitWidth, Start); if (!StartPattern.isRecognized()) return ConstantRange::getFull(BitWidth); SelectPattern StepPattern(*this, BitWidth, Step); if (!StepPattern.isRecognized()) return ConstantRange::getFull(BitWidth); if (StartPattern.Condition != StepPattern.Condition) { // We don't handle this case today; but we could, by considering four // possibilities below instead of two. I'm not sure if there are cases where // that will help over what getRange already does, though. return ConstantRange::getFull(BitWidth); } // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to // construct arbitrary general SCEV expressions here. This function is called // from deep in the call stack, and calling getSCEV (on a sext instruction, // say) can end up caching a suboptimal value. // FIXME: without the explicit `this` receiver below, MSVC errors out with // C2352 and C2512 (otherwise it isn't needed). const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue); const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue); const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue); const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue); ConstantRange TrueRange = this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth); ConstantRange FalseRange = this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth); return TrueRange.unionWith(FalseRange); } SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) { if (isa(V)) return SCEV::FlagAnyWrap; const BinaryOperator *BinOp = cast(V); // Return early if there are no flags to propagate to the SCEV. SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; if (BinOp->hasNoUnsignedWrap()) Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); if (BinOp->hasNoSignedWrap()) Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); if (Flags == SCEV::FlagAnyWrap) return SCEV::FlagAnyWrap; return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap; } const Instruction * ScalarEvolution::getNonTrivialDefiningScopeBound(const SCEV *S) { if (auto *AddRec = dyn_cast(S)) return &*AddRec->getLoop()->getHeader()->begin(); if (auto *U = dyn_cast(S)) if (auto *I = dyn_cast(U->getValue())) return I; return nullptr; } /// Fills \p Ops with unique operands of \p S, if it has operands. If not, /// \p Ops remains unmodified. static void collectUniqueOps(const SCEV *S, SmallVectorImpl &Ops) { SmallPtrSet Unique; auto InsertUnique = [&](const SCEV *S) { if (Unique.insert(S).second) Ops.push_back(S); }; if (auto *S2 = dyn_cast(S)) for (const auto *Op : S2->operands()) InsertUnique(Op); else if (auto *S2 = dyn_cast(S)) for (const auto *Op : S2->operands()) InsertUnique(Op); else if (auto *S2 = dyn_cast(S)) for (const auto *Op : S2->operands()) InsertUnique(Op); } const Instruction * ScalarEvolution::getDefiningScopeBound(ArrayRef Ops, bool &Precise) { Precise = true; // Do a bounded search of the def relation of the requested SCEVs. SmallSet Visited; SmallVector Worklist; auto pushOp = [&](const SCEV *S) { if (!Visited.insert(S).second) return; // Threshold of 30 here is arbitrary. if (Visited.size() > 30) { Precise = false; return; } Worklist.push_back(S); }; for (const auto *S : Ops) pushOp(S); const Instruction *Bound = nullptr; while (!Worklist.empty()) { auto *S = Worklist.pop_back_val(); if (auto *DefI = getNonTrivialDefiningScopeBound(S)) { if (!Bound || DT.dominates(Bound, DefI)) Bound = DefI; } else { SmallVector Ops; collectUniqueOps(S, Ops); for (const auto *Op : Ops) pushOp(Op); } } return Bound ? Bound : &*F.getEntryBlock().begin(); } const Instruction * ScalarEvolution::getDefiningScopeBound(ArrayRef Ops) { bool Discard; return getDefiningScopeBound(Ops, Discard); } bool ScalarEvolution::isGuaranteedToTransferExecutionTo(const Instruction *A, const Instruction *B) { if (A->getParent() == B->getParent() && isGuaranteedToTransferExecutionToSuccessor(A->getIterator(), B->getIterator())) return true; auto *BLoop = LI.getLoopFor(B->getParent()); if (BLoop && BLoop->getHeader() == B->getParent() && BLoop->getLoopPreheader() == A->getParent() && isGuaranteedToTransferExecutionToSuccessor(A->getIterator(), A->getParent()->end()) && isGuaranteedToTransferExecutionToSuccessor(B->getParent()->begin(), B->getIterator())) return true; return false; } bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) { // Only proceed if we can prove that I does not yield poison. if (!programUndefinedIfPoison(I)) return false; // At this point we know that if I is executed, then it does not wrap // according to at least one of NSW or NUW. If I is not executed, then we do // not know if the calculation that I represents would wrap. Multiple // instructions can map to the same SCEV. If we apply NSW or NUW from I to // the SCEV, we must guarantee no wrapping for that SCEV also when it is // derived from other instructions that map to the same SCEV. We cannot make // that guarantee for cases where I is not executed. So we need to find a // upper bound on the defining scope for the SCEV, and prove that I is // executed every time we enter that scope. When the bounding scope is a // loop (the common case), this is equivalent to proving I executes on every // iteration of that loop. SmallVector SCEVOps; for (const Use &Op : I->operands()) { // I could be an extractvalue from a call to an overflow intrinsic. // TODO: We can do better here in some cases. if (isSCEVable(Op->getType())) SCEVOps.push_back(getSCEV(Op)); } auto *DefI = getDefiningScopeBound(SCEVOps); return isGuaranteedToTransferExecutionTo(DefI, I); } bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) { // If we know that \c I can never be poison period, then that's enough. if (isSCEVExprNeverPoison(I)) return true; // For an add recurrence specifically, we assume that infinite loops without // side effects are undefined behavior, and then reason as follows: // // If the add recurrence is poison in any iteration, it is poison on all // future iterations (since incrementing poison yields poison). If the result // of the add recurrence is fed into the loop latch condition and the loop // does not contain any throws or exiting blocks other than the latch, we now // have the ability to "choose" whether the backedge is taken or not (by // choosing a sufficiently evil value for the poison feeding into the branch) // for every iteration including and after the one in which \p I first became // poison. There are two possibilities (let's call the iteration in which \p // I first became poison as K): // // 1. In the set of iterations including and after K, the loop body executes // no side effects. In this case executing the backege an infinte number // of times will yield undefined behavior. // // 2. In the set of iterations including and after K, the loop body executes // at least one side effect. In this case, that specific instance of side // effect is control dependent on poison, which also yields undefined // behavior. auto *ExitingBB = L->getExitingBlock(); auto *LatchBB = L->getLoopLatch(); if (!ExitingBB || !LatchBB || ExitingBB != LatchBB) return false; SmallPtrSet Pushed; SmallVector PoisonStack; // We start by assuming \c I, the post-inc add recurrence, is poison. Only // things that are known to be poison under that assumption go on the // PoisonStack. Pushed.insert(I); PoisonStack.push_back(I); bool LatchControlDependentOnPoison = false; while (!PoisonStack.empty() && !LatchControlDependentOnPoison) { const Instruction *Poison = PoisonStack.pop_back_val(); for (const auto *PoisonUser : Poison->users()) { if (propagatesPoison(cast(PoisonUser))) { if (Pushed.insert(cast(PoisonUser)).second) PoisonStack.push_back(cast(PoisonUser)); } else if (auto *BI = dyn_cast(PoisonUser)) { assert(BI->isConditional() && "Only possibility!"); if (BI->getParent() == LatchBB) { LatchControlDependentOnPoison = true; break; } } } } return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L); } ScalarEvolution::LoopProperties ScalarEvolution::getLoopProperties(const Loop *L) { using LoopProperties = ScalarEvolution::LoopProperties; auto Itr = LoopPropertiesCache.find(L); if (Itr == LoopPropertiesCache.end()) { auto HasSideEffects = [](Instruction *I) { if (auto *SI = dyn_cast(I)) return !SI->isSimple(); return I->mayThrow() || I->mayWriteToMemory(); }; LoopProperties LP = {/* HasNoAbnormalExits */ true, /*HasNoSideEffects*/ true}; for (auto *BB : L->getBlocks()) for (auto &I : *BB) { if (!isGuaranteedToTransferExecutionToSuccessor(&I)) LP.HasNoAbnormalExits = false; if (HasSideEffects(&I)) LP.HasNoSideEffects = false; if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects) break; // We're already as pessimistic as we can get. } auto InsertPair = LoopPropertiesCache.insert({L, LP}); assert(InsertPair.second && "We just checked!"); Itr = InsertPair.first; } return Itr->second; } bool ScalarEvolution::loopIsFiniteByAssumption(const Loop *L) { // A mustprogress loop without side effects must be finite. // TODO: The check used here is very conservative. It's only *specific* // side effects which are well defined in infinite loops. return isFinite(L) || (isMustProgress(L) && loopHasNoSideEffects(L)); } const SCEV *ScalarEvolution::createSCEVIter(Value *V) { // Worklist item with a Value and a bool indicating whether all operands have // been visited already. using PointerTy = PointerIntPair; SmallVector Stack; Stack.emplace_back(V, true); Stack.emplace_back(V, false); while (!Stack.empty()) { auto E = Stack.pop_back_val(); Value *CurV = E.getPointer(); if (getExistingSCEV(CurV)) continue; SmallVector Ops; const SCEV *CreatedSCEV = nullptr; // If all operands have been visited already, create the SCEV. if (E.getInt()) { CreatedSCEV = createSCEV(CurV); } else { // Otherwise get the operands we need to create SCEV's for before creating // the SCEV for CurV. If the SCEV for CurV can be constructed trivially, // just use it. CreatedSCEV = getOperandsToCreate(CurV, Ops); } if (CreatedSCEV) { insertValueToMap(CurV, CreatedSCEV); } else { // Queue CurV for SCEV creation, followed by its's operands which need to // be constructed first. Stack.emplace_back(CurV, true); for (Value *Op : Ops) Stack.emplace_back(Op, false); } } return getExistingSCEV(V); } const SCEV * ScalarEvolution::getOperandsToCreate(Value *V, SmallVectorImpl &Ops) { if (!isSCEVable(V->getType())) return getUnknown(V); if (Instruction *I = dyn_cast(V)) { // Don't attempt to analyze instructions in blocks that aren't // reachable. Such instructions don't matter, and they aren't required // to obey basic rules for definitions dominating uses which this // analysis depends on. if (!DT.isReachableFromEntry(I->getParent())) return getUnknown(PoisonValue::get(V->getType())); } else if (ConstantInt *CI = dyn_cast(V)) return getConstant(CI); else if (GlobalAlias *GA = dyn_cast(V)) { if (!GA->isInterposable()) { Ops.push_back(GA->getAliasee()); return nullptr; } return getUnknown(V); } else if (!isa(V)) return getUnknown(V); Operator *U = cast(V); if (auto BO = MatchBinaryOp(U, DT)) { bool IsConstArg = isa(BO->RHS); switch (BO->Opcode) { case Instruction::Add: { // For additions and multiplications, traverse add/mul chains for which we // can potentially create a single SCEV, to reduce the number of // get{Add,Mul}Expr calls. do { if (BO->Op) { if (BO->Op != V && getExistingSCEV(BO->Op)) { Ops.push_back(BO->Op); break; } } Ops.push_back(BO->RHS); auto NewBO = MatchBinaryOp(BO->LHS, DT); if (!NewBO || (NewBO->Opcode != Instruction::Add && NewBO->Opcode != Instruction::Sub)) { Ops.push_back(BO->LHS); break; } BO = NewBO; } while (true); return nullptr; } case Instruction::Mul: { do { if (BO->Op) { if (BO->Op != V && getExistingSCEV(BO->Op)) { Ops.push_back(BO->Op); break; } } Ops.push_back(BO->RHS); auto NewBO = MatchBinaryOp(BO->LHS, DT); if (!NewBO || NewBO->Opcode != Instruction::Mul) { Ops.push_back(BO->LHS); break; } BO = NewBO; } while (true); return nullptr; } case Instruction::Sub: case Instruction::UDiv: case Instruction::URem: break; case Instruction::AShr: case Instruction::Shl: case Instruction::Xor: if (!IsConstArg) return nullptr; break; case Instruction::And: case Instruction::Or: if (!IsConstArg && BO->LHS->getType()->isIntegerTy(1)) return nullptr; break; case Instruction::LShr: return getUnknown(V); default: llvm_unreachable("Unhandled binop"); break; } Ops.push_back(BO->LHS); Ops.push_back(BO->RHS); return nullptr; } switch (U->getOpcode()) { case Instruction::Trunc: case Instruction::ZExt: case Instruction::SExt: case Instruction::PtrToInt: Ops.push_back(U->getOperand(0)); return nullptr; case Instruction::BitCast: if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) { Ops.push_back(U->getOperand(0)); return nullptr; } return getUnknown(V); case Instruction::SDiv: case Instruction::SRem: Ops.push_back(U->getOperand(0)); Ops.push_back(U->getOperand(1)); return nullptr; case Instruction::GetElementPtr: assert(cast(U)->getSourceElementType()->isSized() && "GEP source element type must be sized"); for (Value *Index : U->operands()) Ops.push_back(Index); return nullptr; case Instruction::IntToPtr: return getUnknown(V); case Instruction::PHI: // Keep constructing SCEVs' for phis recursively for now. return nullptr; case Instruction::Select: { // Check if U is a select that can be simplified to a SCEVUnknown. auto CanSimplifyToUnknown = [this, U]() { if (U->getType()->isIntegerTy(1) || isa(U->getOperand(0))) return false; auto *ICI = dyn_cast(U->getOperand(0)); if (!ICI) return false; Value *LHS = ICI->getOperand(0); Value *RHS = ICI->getOperand(1); if (ICI->getPredicate() == CmpInst::ICMP_EQ || ICI->getPredicate() == CmpInst::ICMP_NE) { if (!(isa(RHS) && cast(RHS)->isZero())) return true; } else if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(U->getType())) return true; return false; }; if (CanSimplifyToUnknown()) return getUnknown(U); for (Value *Inc : U->operands()) Ops.push_back(Inc); return nullptr; break; } case Instruction::Call: case Instruction::Invoke: if (Value *RV = cast(U)->getReturnedArgOperand()) { Ops.push_back(RV); return nullptr; } if (auto *II = dyn_cast(U)) { switch (II->getIntrinsicID()) { case Intrinsic::abs: Ops.push_back(II->getArgOperand(0)); return nullptr; case Intrinsic::umax: case Intrinsic::umin: case Intrinsic::smax: case Intrinsic::smin: case Intrinsic::usub_sat: case Intrinsic::uadd_sat: Ops.push_back(II->getArgOperand(0)); Ops.push_back(II->getArgOperand(1)); return nullptr; case Intrinsic::start_loop_iterations: case Intrinsic::annotation: case Intrinsic::ptr_annotation: Ops.push_back(II->getArgOperand(0)); return nullptr; default: break; } } break; } return nullptr; } const SCEV *ScalarEvolution::createSCEV(Value *V) { if (!isSCEVable(V->getType())) return getUnknown(V); if (Instruction *I = dyn_cast(V)) { // Don't attempt to analyze instructions in blocks that aren't // reachable. Such instructions don't matter, and they aren't required // to obey basic rules for definitions dominating uses which this // analysis depends on. if (!DT.isReachableFromEntry(I->getParent())) return getUnknown(PoisonValue::get(V->getType())); } else if (ConstantInt *CI = dyn_cast(V)) return getConstant(CI); else if (GlobalAlias *GA = dyn_cast(V)) return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee()); else if (!isa(V)) return getUnknown(V); const SCEV *LHS; const SCEV *RHS; Operator *U = cast(V); if (auto BO = MatchBinaryOp(U, DT)) { switch (BO->Opcode) { case Instruction::Add: { // The simple thing to do would be to just call getSCEV on both operands // and call getAddExpr with the result. However if we're looking at a // bunch of things all added together, this can be quite inefficient, // because it leads to N-1 getAddExpr calls for N ultimate operands. // Instead, gather up all the operands and make a single getAddExpr call. // LLVM IR canonical form means we need only traverse the left operands. SmallVector AddOps; do { if (BO->Op) { if (auto *OpSCEV = getExistingSCEV(BO->Op)) { AddOps.push_back(OpSCEV); break; } // If a NUW or NSW flag can be applied to the SCEV for this // addition, then compute the SCEV for this addition by itself // with a separate call to getAddExpr. We need to do that // instead of pushing the operands of the addition onto AddOps, // since the flags are only known to apply to this particular // addition - they may not apply to other additions that can be // formed with operands from AddOps. const SCEV *RHS = getSCEV(BO->RHS); SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); if (Flags != SCEV::FlagAnyWrap) { const SCEV *LHS = getSCEV(BO->LHS); if (BO->Opcode == Instruction::Sub) AddOps.push_back(getMinusSCEV(LHS, RHS, Flags)); else AddOps.push_back(getAddExpr(LHS, RHS, Flags)); break; } } if (BO->Opcode == Instruction::Sub) AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS))); else AddOps.push_back(getSCEV(BO->RHS)); auto NewBO = MatchBinaryOp(BO->LHS, DT); if (!NewBO || (NewBO->Opcode != Instruction::Add && NewBO->Opcode != Instruction::Sub)) { AddOps.push_back(getSCEV(BO->LHS)); break; } BO = NewBO; } while (true); return getAddExpr(AddOps); } case Instruction::Mul: { SmallVector MulOps; do { if (BO->Op) { if (auto *OpSCEV = getExistingSCEV(BO->Op)) { MulOps.push_back(OpSCEV); break; } SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); if (Flags != SCEV::FlagAnyWrap) { LHS = getSCEV(BO->LHS); RHS = getSCEV(BO->RHS); MulOps.push_back(getMulExpr(LHS, RHS, Flags)); break; } } MulOps.push_back(getSCEV(BO->RHS)); auto NewBO = MatchBinaryOp(BO->LHS, DT); if (!NewBO || NewBO->Opcode != Instruction::Mul) { MulOps.push_back(getSCEV(BO->LHS)); break; } BO = NewBO; } while (true); return getMulExpr(MulOps); } case Instruction::UDiv: LHS = getSCEV(BO->LHS); RHS = getSCEV(BO->RHS); return getUDivExpr(LHS, RHS); case Instruction::URem: LHS = getSCEV(BO->LHS); RHS = getSCEV(BO->RHS); return getURemExpr(LHS, RHS); case Instruction::Sub: { SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; if (BO->Op) Flags = getNoWrapFlagsFromUB(BO->Op); LHS = getSCEV(BO->LHS); RHS = getSCEV(BO->RHS); return getMinusSCEV(LHS, RHS, Flags); } case Instruction::And: // For an expression like x&255 that merely masks off the high bits, // use zext(trunc(x)) as the SCEV expression. if (ConstantInt *CI = dyn_cast(BO->RHS)) { if (CI->isZero()) return getSCEV(BO->RHS); if (CI->isMinusOne()) return getSCEV(BO->LHS); const APInt &A = CI->getValue(); // Instcombine's ShrinkDemandedConstant may strip bits out of // constants, obscuring what would otherwise be a low-bits mask. // Use computeKnownBits to compute what ShrinkDemandedConstant // knew about to reconstruct a low-bits mask value. unsigned LZ = A.countLeadingZeros(); unsigned TZ = A.countTrailingZeros(); unsigned BitWidth = A.getBitWidth(); KnownBits Known(BitWidth); computeKnownBits(BO->LHS, Known, getDataLayout(), 0, &AC, nullptr, &DT); APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ); if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) { const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ)); const SCEV *LHS = getSCEV(BO->LHS); const SCEV *ShiftedLHS = nullptr; if (auto *LHSMul = dyn_cast(LHS)) { if (auto *OpC = dyn_cast(LHSMul->getOperand(0))) { // For an expression like (x * 8) & 8, simplify the multiply. unsigned MulZeros = OpC->getAPInt().countTrailingZeros(); unsigned GCD = std::min(MulZeros, TZ); APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD); SmallVector MulOps; MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD))); MulOps.append(LHSMul->op_begin() + 1, LHSMul->op_end()); auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags()); ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt)); } } if (!ShiftedLHS) ShiftedLHS = getUDivExpr(LHS, MulCount); return getMulExpr( getZeroExtendExpr( getTruncateExpr(ShiftedLHS, IntegerType::get(getContext(), BitWidth - LZ - TZ)), BO->LHS->getType()), MulCount); } } // Binary `and` is a bit-wise `umin`. if (BO->LHS->getType()->isIntegerTy(1)) { LHS = getSCEV(BO->LHS); RHS = getSCEV(BO->RHS); return getUMinExpr(LHS, RHS); } break; case Instruction::Or: // If the RHS of the Or is a constant, we may have something like: // X*4+1 which got turned into X*4|1. Handle this as an Add so loop // optimizations will transparently handle this case. // // In order for this transformation to be safe, the LHS must be of the // form X*(2^n) and the Or constant must be less than 2^n. if (ConstantInt *CI = dyn_cast(BO->RHS)) { const SCEV *LHS = getSCEV(BO->LHS); const APInt &CIVal = CI->getValue(); if (GetMinTrailingZeros(LHS) >= (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { // Build a plain add SCEV. return getAddExpr(LHS, getSCEV(CI), (SCEV::NoWrapFlags)(SCEV::FlagNUW | SCEV::FlagNSW)); } } // Binary `or` is a bit-wise `umax`. if (BO->LHS->getType()->isIntegerTy(1)) { LHS = getSCEV(BO->LHS); RHS = getSCEV(BO->RHS); return getUMaxExpr(LHS, RHS); } break; case Instruction::Xor: if (ConstantInt *CI = dyn_cast(BO->RHS)) { // If the RHS of xor is -1, then this is a not operation. if (CI->isMinusOne()) return getNotSCEV(getSCEV(BO->LHS)); // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. // This is a variant of the check for xor with -1, and it handles // the case where instcombine has trimmed non-demanded bits out // of an xor with -1. if (auto *LBO = dyn_cast(BO->LHS)) if (ConstantInt *LCI = dyn_cast(LBO->getOperand(1))) if (LBO->getOpcode() == Instruction::And && LCI->getValue() == CI->getValue()) if (const SCEVZeroExtendExpr *Z = dyn_cast(getSCEV(BO->LHS))) { Type *UTy = BO->LHS->getType(); const SCEV *Z0 = Z->getOperand(); Type *Z0Ty = Z0->getType(); unsigned Z0TySize = getTypeSizeInBits(Z0Ty); // If C is a low-bits mask, the zero extend is serving to // mask off the high bits. Complement the operand and // re-apply the zext. if (CI->getValue().isMask(Z0TySize)) return getZeroExtendExpr(getNotSCEV(Z0), UTy); // If C is a single bit, it may be in the sign-bit position // before the zero-extend. In this case, represent the xor // using an add, which is equivalent, and re-apply the zext. APInt Trunc = CI->getValue().trunc(Z0TySize); if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() && Trunc.isSignMask()) return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), UTy); } } break; case Instruction::Shl: // Turn shift left of a constant amount into a multiply. if (ConstantInt *SA = dyn_cast(BO->RHS)) { uint32_t BitWidth = cast(SA->getType())->getBitWidth(); // If the shift count is not less than the bitwidth, the result of // the shift is undefined. Don't try to analyze it, because the // resolution chosen here may differ from the resolution chosen in // other parts of the compiler. if (SA->getValue().uge(BitWidth)) break; // We can safely preserve the nuw flag in all cases. It's also safe to // turn a nuw nsw shl into a nuw nsw mul. However, nsw in isolation // requires special handling. It can be preserved as long as we're not // left shifting by bitwidth - 1. auto Flags = SCEV::FlagAnyWrap; if (BO->Op) { auto MulFlags = getNoWrapFlagsFromUB(BO->Op); if ((MulFlags & SCEV::FlagNSW) && ((MulFlags & SCEV::FlagNUW) || SA->getValue().ult(BitWidth - 1))) Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNSW); if (MulFlags & SCEV::FlagNUW) Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNUW); } ConstantInt *X = ConstantInt::get( getContext(), APInt::getOneBitSet(BitWidth, SA->getZExtValue())); return getMulExpr(getSCEV(BO->LHS), getConstant(X), Flags); } break; case Instruction::AShr: { // AShr X, C, where C is a constant. ConstantInt *CI = dyn_cast(BO->RHS); if (!CI) break; Type *OuterTy = BO->LHS->getType(); uint64_t BitWidth = getTypeSizeInBits(OuterTy); // If the shift count is not less than the bitwidth, the result of // the shift is undefined. Don't try to analyze it, because the // resolution chosen here may differ from the resolution chosen in // other parts of the compiler. if (CI->getValue().uge(BitWidth)) break; if (CI->isZero()) return getSCEV(BO->LHS); // shift by zero --> noop uint64_t AShrAmt = CI->getZExtValue(); Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt); Operator *L = dyn_cast(BO->LHS); if (L && L->getOpcode() == Instruction::Shl) { // X = Shl A, n // Y = AShr X, m // Both n and m are constant. const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0)); if (L->getOperand(1) == BO->RHS) // For a two-shift sext-inreg, i.e. n = m, // use sext(trunc(x)) as the SCEV expression. return getSignExtendExpr( getTruncateExpr(ShlOp0SCEV, TruncTy), OuterTy); ConstantInt *ShlAmtCI = dyn_cast(L->getOperand(1)); if (ShlAmtCI && ShlAmtCI->getValue().ult(BitWidth)) { uint64_t ShlAmt = ShlAmtCI->getZExtValue(); if (ShlAmt > AShrAmt) { // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV // expression. We already checked that ShlAmt < BitWidth, so // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as // ShlAmt - AShrAmt < Amt. APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt, ShlAmt - AShrAmt); return getSignExtendExpr( getMulExpr(getTruncateExpr(ShlOp0SCEV, TruncTy), getConstant(Mul)), OuterTy); } } } break; } } } switch (U->getOpcode()) { case Instruction::Trunc: return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); case Instruction::ZExt: return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); case Instruction::SExt: if (auto BO = MatchBinaryOp(U->getOperand(0), DT)) { // The NSW flag of a subtract does not always survive the conversion to // A + (-1)*B. By pushing sign extension onto its operands we are much // more likely to preserve NSW and allow later AddRec optimisations. // // NOTE: This is effectively duplicating this logic from getSignExtend: // sext((A + B + ...)) --> (sext(A) + sext(B) + ...) // but by that point the NSW information has potentially been lost. if (BO->Opcode == Instruction::Sub && BO->IsNSW) { Type *Ty = U->getType(); auto *V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty); auto *V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty); return getMinusSCEV(V1, V2, SCEV::FlagNSW); } } return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); case Instruction::BitCast: // BitCasts are no-op casts so we just eliminate the cast. if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) return getSCEV(U->getOperand(0)); break; case Instruction::PtrToInt: { // Pointer to integer cast is straight-forward, so do model it. const SCEV *Op = getSCEV(U->getOperand(0)); Type *DstIntTy = U->getType(); // But only if effective SCEV (integer) type is wide enough to represent // all possible pointer values. const SCEV *IntOp = getPtrToIntExpr(Op, DstIntTy); if (isa(IntOp)) return getUnknown(V); return IntOp; } case Instruction::IntToPtr: // Just don't deal with inttoptr casts. return getUnknown(V); case Instruction::SDiv: // If both operands are non-negative, this is just an udiv. if (isKnownNonNegative(getSCEV(U->getOperand(0))) && isKnownNonNegative(getSCEV(U->getOperand(1)))) return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); break; case Instruction::SRem: // If both operands are non-negative, this is just an urem. if (isKnownNonNegative(getSCEV(U->getOperand(0))) && isKnownNonNegative(getSCEV(U->getOperand(1)))) return getURemExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); break; case Instruction::GetElementPtr: return createNodeForGEP(cast(U)); case Instruction::PHI: return createNodeForPHI(cast(U)); case Instruction::Select: return createNodeForSelectOrPHI(U, U->getOperand(0), U->getOperand(1), U->getOperand(2)); case Instruction::Call: case Instruction::Invoke: if (Value *RV = cast(U)->getReturnedArgOperand()) return getSCEV(RV); if (auto *II = dyn_cast(U)) { switch (II->getIntrinsicID()) { case Intrinsic::abs: return getAbsExpr( getSCEV(II->getArgOperand(0)), /*IsNSW=*/cast(II->getArgOperand(1))->isOne()); case Intrinsic::umax: LHS = getSCEV(II->getArgOperand(0)); RHS = getSCEV(II->getArgOperand(1)); return getUMaxExpr(LHS, RHS); case Intrinsic::umin: LHS = getSCEV(II->getArgOperand(0)); RHS = getSCEV(II->getArgOperand(1)); return getUMinExpr(LHS, RHS); case Intrinsic::smax: LHS = getSCEV(II->getArgOperand(0)); RHS = getSCEV(II->getArgOperand(1)); return getSMaxExpr(LHS, RHS); case Intrinsic::smin: LHS = getSCEV(II->getArgOperand(0)); RHS = getSCEV(II->getArgOperand(1)); return getSMinExpr(LHS, RHS); case Intrinsic::usub_sat: { const SCEV *X = getSCEV(II->getArgOperand(0)); const SCEV *Y = getSCEV(II->getArgOperand(1)); const SCEV *ClampedY = getUMinExpr(X, Y); return getMinusSCEV(X, ClampedY, SCEV::FlagNUW); } case Intrinsic::uadd_sat: { const SCEV *X = getSCEV(II->getArgOperand(0)); const SCEV *Y = getSCEV(II->getArgOperand(1)); const SCEV *ClampedX = getUMinExpr(X, getNotSCEV(Y)); return getAddExpr(ClampedX, Y, SCEV::FlagNUW); } case Intrinsic::start_loop_iterations: case Intrinsic::annotation: case Intrinsic::ptr_annotation: // A start_loop_iterations or llvm.annotation or llvm.prt.annotation is // just eqivalent to the first operand for SCEV purposes. return getSCEV(II->getArgOperand(0)); default: break; } } break; } return getUnknown(V); } //===----------------------------------------------------------------------===// // Iteration Count Computation Code // const SCEV *ScalarEvolution::getTripCountFromExitCount(const SCEV *ExitCount, bool Extend) { if (isa(ExitCount)) return getCouldNotCompute(); auto *ExitCountType = ExitCount->getType(); assert(ExitCountType->isIntegerTy()); if (!Extend) return getAddExpr(ExitCount, getOne(ExitCountType)); auto *WiderType = Type::getIntNTy(ExitCountType->getContext(), 1 + ExitCountType->getScalarSizeInBits()); return getAddExpr(getNoopOrZeroExtend(ExitCount, WiderType), getOne(WiderType)); } static unsigned getConstantTripCount(const SCEVConstant *ExitCount) { if (!ExitCount) return 0; ConstantInt *ExitConst = ExitCount->getValue(); // Guard against huge trip counts. if (ExitConst->getValue().getActiveBits() > 32) return 0; // In case of integer overflow, this returns 0, which is correct. return ((unsigned)ExitConst->getZExtValue()) + 1; } unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) { auto *ExitCount = dyn_cast(getBackedgeTakenCount(L, Exact)); return getConstantTripCount(ExitCount); } unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L, const BasicBlock *ExitingBlock) { assert(ExitingBlock && "Must pass a non-null exiting block!"); assert(L->isLoopExiting(ExitingBlock) && "Exiting block must actually branch out of the loop!"); const SCEVConstant *ExitCount = dyn_cast(getExitCount(L, ExitingBlock)); return getConstantTripCount(ExitCount); } unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) { const auto *MaxExitCount = dyn_cast(getConstantMaxBackedgeTakenCount(L)); return getConstantTripCount(MaxExitCount); } const SCEV *ScalarEvolution::getConstantMaxTripCountFromArray(const Loop *L) { // We can't infer from Array in Irregular Loop. // FIXME: It's hard to infer loop bound from array operated in Nested Loop. if (!L->isLoopSimplifyForm() || !L->isInnermost()) return getCouldNotCompute(); // FIXME: To make the scene more typical, we only analysis loops that have // one exiting block and that block must be the latch. To make it easier to // capture loops that have memory access and memory access will be executed // in each iteration. const BasicBlock *LoopLatch = L->getLoopLatch(); assert(LoopLatch && "See defination of simplify form loop."); if (L->getExitingBlock() != LoopLatch) return getCouldNotCompute(); const DataLayout &DL = getDataLayout(); SmallVector InferCountColl; for (auto *BB : L->getBlocks()) { // Go here, we can know that Loop is a single exiting and simplified form // loop. Make sure that infer from Memory Operation in those BBs must be // executed in loop. First step, we can make sure that max execution time // of MemAccessBB in loop represents latch max excution time. // If MemAccessBB does not dom Latch, skip. // Entry // │ // ┌─────▼─────┐ // │Loop Header◄─────┐ // └──┬──────┬─┘ │ // │ │ │ // ┌────────▼──┐ ┌─▼─────┐ │ // │MemAccessBB│ │OtherBB│ │ // └────────┬──┘ └─┬─────┘ │ // │ │ │ // ┌─▼──────▼─┐ │ // │Loop Latch├─────┘ // └────┬─────┘ // ▼ // Exit if (!DT.dominates(BB, LoopLatch)) continue; for (Instruction &Inst : *BB) { // Find Memory Operation Instruction. auto *GEP = getLoadStorePointerOperand(&Inst); if (!GEP) continue; auto *ElemSize = dyn_cast(getElementSize(&Inst)); // Do not infer from scalar type, eg."ElemSize = sizeof()". if (!ElemSize) continue; // Use a existing polynomial recurrence on the trip count. auto *AddRec = dyn_cast(getSCEV(GEP)); if (!AddRec) continue; auto *ArrBase = dyn_cast(getPointerBase(AddRec)); auto *Step = dyn_cast(AddRec->getStepRecurrence(*this)); if (!ArrBase || !Step) continue; assert(isLoopInvariant(ArrBase, L) && "See addrec definition"); // Only handle { %array + step }, // FIXME: {(SCEVAddRecExpr) + step } could not be analysed here. if (AddRec->getStart() != ArrBase) continue; // Memory operation pattern which have gaps. // Or repeat memory opreation. // And index of GEP wraps arround. if (Step->getAPInt().getActiveBits() > 32 || Step->getAPInt().getZExtValue() != ElemSize->getAPInt().getZExtValue() || Step->isZero() || Step->getAPInt().isNegative()) continue; // Only infer from stack array which has certain size. // Make sure alloca instruction is not excuted in loop. AllocaInst *AllocateInst = dyn_cast(ArrBase->getValue()); if (!AllocateInst || L->contains(AllocateInst->getParent())) continue; // Make sure only handle normal array. auto *Ty = dyn_cast(AllocateInst->getAllocatedType()); auto *ArrSize = dyn_cast(AllocateInst->getArraySize()); if (!Ty || !ArrSize || !ArrSize->isOne()) continue; // FIXME: Since gep indices are silently zext to the indexing type, // we will have a narrow gep index which wraps around rather than // increasing strictly, we shoule ensure that step is increasing // strictly by the loop iteration. // Now we can infer a max execution time by MemLength/StepLength. const SCEV *MemSize = getConstant(Step->getType(), DL.getTypeAllocSize(Ty)); auto *MaxExeCount = dyn_cast(getUDivCeilSCEV(MemSize, Step)); if (!MaxExeCount || MaxExeCount->getAPInt().getActiveBits() > 32) continue; // If the loop reaches the maximum number of executions, we can not // access bytes starting outside the statically allocated size without // being immediate UB. But it is allowed to enter loop header one more // time. auto *InferCount = dyn_cast( getAddExpr(MaxExeCount, getOne(MaxExeCount->getType()))); // Discard the maximum number of execution times under 32bits. if (!InferCount || InferCount->getAPInt().getActiveBits() > 32) continue; InferCountColl.push_back(InferCount); } } if (InferCountColl.size() == 0) return getCouldNotCompute(); return getUMinFromMismatchedTypes(InferCountColl); } unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) { SmallVector ExitingBlocks; L->getExitingBlocks(ExitingBlocks); Optional Res = None; for (auto *ExitingBB : ExitingBlocks) { unsigned Multiple = getSmallConstantTripMultiple(L, ExitingBB); if (!Res) Res = Multiple; Res = (unsigned)GreatestCommonDivisor64(*Res, Multiple); } return Res.value_or(1); } unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, const SCEV *ExitCount) { if (ExitCount == getCouldNotCompute()) return 1; // Get the trip count const SCEV *TCExpr = getTripCountFromExitCount(ExitCount); const SCEVConstant *TC = dyn_cast(TCExpr); if (!TC) // Attempt to factor more general cases. Returns the greatest power of // two divisor. If overflow happens, the trip count expression is still // divisible by the greatest power of 2 divisor returned. return 1U << std::min((uint32_t)31, GetMinTrailingZeros(applyLoopGuards(TCExpr, L))); ConstantInt *Result = TC->getValue(); // Guard against huge trip counts (this requires checking // for zero to handle the case where the trip count == -1 and the // addition wraps). if (!Result || Result->getValue().getActiveBits() > 32 || Result->getValue().getActiveBits() == 0) return 1; return (unsigned)Result->getZExtValue(); } /// Returns the largest constant divisor of the trip count of this loop as a /// normal unsigned value, if possible. This means that the actual trip count is /// always a multiple of the returned value (don't forget the trip count could /// very well be zero as well!). /// /// Returns 1 if the trip count is unknown or not guaranteed to be the /// multiple of a constant (which is also the case if the trip count is simply /// constant, use getSmallConstantTripCount for that case), Will also return 1 /// if the trip count is very large (>= 2^32). /// /// As explained in the comments for getSmallConstantTripCount, this assumes /// that control exits the loop via ExitingBlock. unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, const BasicBlock *ExitingBlock) { assert(ExitingBlock && "Must pass a non-null exiting block!"); assert(L->isLoopExiting(ExitingBlock) && "Exiting block must actually branch out of the loop!"); const SCEV *ExitCount = getExitCount(L, ExitingBlock); return getSmallConstantTripMultiple(L, ExitCount); } const SCEV *ScalarEvolution::getExitCount(const Loop *L, const BasicBlock *ExitingBlock, ExitCountKind Kind) { switch (Kind) { case Exact: case SymbolicMaximum: return getBackedgeTakenInfo(L).getExact(ExitingBlock, this); case ConstantMaximum: return getBackedgeTakenInfo(L).getConstantMax(ExitingBlock, this); }; llvm_unreachable("Invalid ExitCountKind!"); } const SCEV * ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L, SmallVector &Preds) { return getPredicatedBackedgeTakenInfo(L).getExact(L, this, &Preds); } const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L, ExitCountKind Kind) { switch (Kind) { case Exact: return getBackedgeTakenInfo(L).getExact(L, this); case ConstantMaximum: return getBackedgeTakenInfo(L).getConstantMax(this); case SymbolicMaximum: return getBackedgeTakenInfo(L).getSymbolicMax(L, this); }; llvm_unreachable("Invalid ExitCountKind!"); } bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) { return getBackedgeTakenInfo(L).isConstantMaxOrZero(this); } /// Push PHI nodes in the header of the given loop onto the given Worklist. static void PushLoopPHIs(const Loop *L, SmallVectorImpl &Worklist, SmallPtrSetImpl &Visited) { BasicBlock *Header = L->getHeader(); // Push all Loop-header PHIs onto the Worklist stack. for (PHINode &PN : Header->phis()) if (Visited.insert(&PN).second) Worklist.push_back(&PN); } const ScalarEvolution::BackedgeTakenInfo & ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) { auto &BTI = getBackedgeTakenInfo(L); if (BTI.hasFullInfo()) return BTI; auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); if (!Pair.second) return Pair.first->second; BackedgeTakenInfo Result = computeBackedgeTakenCount(L, /*AllowPredicates=*/true); return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result); } ScalarEvolution::BackedgeTakenInfo & ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { // Initially insert an invalid entry for this loop. If the insertion // succeeds, proceed to actually compute a backedge-taken count and // update the value. The temporary CouldNotCompute value tells SCEV // code elsewhere that it shouldn't attempt to request a new // backedge-taken count, which could result in infinite recursion. std::pair::iterator, bool> Pair = BackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); if (!Pair.second) return Pair.first->second; // computeBackedgeTakenCount may allocate memory for its result. Inserting it // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result // must be cleared in this scope. BackedgeTakenInfo Result = computeBackedgeTakenCount(L); // In product build, there are no usage of statistic. (void)NumTripCountsComputed; (void)NumTripCountsNotComputed; #if LLVM_ENABLE_STATS || !defined(NDEBUG) const SCEV *BEExact = Result.getExact(L, this); if (BEExact != getCouldNotCompute()) { assert(isLoopInvariant(BEExact, L) && isLoopInvariant(Result.getConstantMax(this), L) && "Computed backedge-taken count isn't loop invariant for loop!"); ++NumTripCountsComputed; } else if (Result.getConstantMax(this) == getCouldNotCompute() && isa(L->getHeader()->begin())) { // Only count loops that have phi nodes as not being computable. ++NumTripCountsNotComputed; } #endif // LLVM_ENABLE_STATS || !defined(NDEBUG) // Now that we know more about the trip count for this loop, forget any // existing SCEV values for PHI nodes in this loop since they are only // conservative estimates made without the benefit of trip count // information. This invalidation is not necessary for correctness, and is // only done to produce more precise results. if (Result.hasAnyInfo()) { // Invalidate any expression using an addrec in this loop. SmallVector ToForget; auto LoopUsersIt = LoopUsers.find(L); if (LoopUsersIt != LoopUsers.end()) append_range(ToForget, LoopUsersIt->second); forgetMemoizedResults(ToForget); // Invalidate constant-evolved loop header phis. for (PHINode &PN : L->getHeader()->phis()) ConstantEvolutionLoopExitValue.erase(&PN); } // Re-lookup the insert position, since the call to // computeBackedgeTakenCount above could result in a // recusive call to getBackedgeTakenInfo (on a different // loop), which would invalidate the iterator computed // earlier. return BackedgeTakenCounts.find(L)->second = std::move(Result); } void ScalarEvolution::forgetAllLoops() { // This method is intended to forget all info about loops. It should // invalidate caches as if the following happened: // - The trip counts of all loops have changed arbitrarily // - Every llvm::Value has been updated in place to produce a different // result. BackedgeTakenCounts.clear(); PredicatedBackedgeTakenCounts.clear(); BECountUsers.clear(); LoopPropertiesCache.clear(); ConstantEvolutionLoopExitValue.clear(); ValueExprMap.clear(); ValuesAtScopes.clear(); ValuesAtScopesUsers.clear(); LoopDispositions.clear(); BlockDispositions.clear(); UnsignedRanges.clear(); SignedRanges.clear(); ExprValueMap.clear(); HasRecMap.clear(); MinTrailingZerosCache.clear(); PredicatedSCEVRewrites.clear(); } void ScalarEvolution::forgetLoop(const Loop *L) { SmallVector LoopWorklist(1, L); SmallVector Worklist; SmallPtrSet Visited; SmallVector ToForget; // Iterate over all the loops and sub-loops to drop SCEV information. while (!LoopWorklist.empty()) { auto *CurrL = LoopWorklist.pop_back_val(); // Drop any stored trip count value. forgetBackedgeTakenCounts(CurrL, /* Predicated */ false); forgetBackedgeTakenCounts(CurrL, /* Predicated */ true); // Drop information about predicated SCEV rewrites for this loop. for (auto I = PredicatedSCEVRewrites.begin(); I != PredicatedSCEVRewrites.end();) { std::pair Entry = I->first; if (Entry.second == CurrL) PredicatedSCEVRewrites.erase(I++); else ++I; } auto LoopUsersItr = LoopUsers.find(CurrL); if (LoopUsersItr != LoopUsers.end()) { ToForget.insert(ToForget.end(), LoopUsersItr->second.begin(), LoopUsersItr->second.end()); } // Drop information about expressions based on loop-header PHIs. PushLoopPHIs(CurrL, Worklist, Visited); while (!Worklist.empty()) { Instruction *I = Worklist.pop_back_val(); ValueExprMapType::iterator It = ValueExprMap.find_as(static_cast(I)); if (It != ValueExprMap.end()) { eraseValueFromMap(It->first); ToForget.push_back(It->second); if (PHINode *PN = dyn_cast(I)) ConstantEvolutionLoopExitValue.erase(PN); } PushDefUseChildren(I, Worklist, Visited); } LoopPropertiesCache.erase(CurrL); // Forget all contained loops too, to avoid dangling entries in the // ValuesAtScopes map. LoopWorklist.append(CurrL->begin(), CurrL->end()); } forgetMemoizedResults(ToForget); } void ScalarEvolution::forgetTopmostLoop(const Loop *L) { forgetLoop(L->getOutermostLoop()); } void ScalarEvolution::forgetValue(Value *V) { Instruction *I = dyn_cast(V); if (!I) return; // Drop information about expressions based on loop-header PHIs. SmallVector Worklist; SmallPtrSet Visited; SmallVector ToForget; Worklist.push_back(I); Visited.insert(I); while (!Worklist.empty()) { I = Worklist.pop_back_val(); ValueExprMapType::iterator It = ValueExprMap.find_as(static_cast(I)); if (It != ValueExprMap.end()) { eraseValueFromMap(It->first); ToForget.push_back(It->second); if (PHINode *PN = dyn_cast(I)) ConstantEvolutionLoopExitValue.erase(PN); } PushDefUseChildren(I, Worklist, Visited); } forgetMemoizedResults(ToForget); } void ScalarEvolution::forgetLoopDispositions(const Loop *L) { LoopDispositions.clear(); } /// Get the exact loop backedge taken count considering all loop exits. A /// computable result can only be returned for loops with all exiting blocks /// dominating the latch. howFarToZero assumes that the limit of each loop test /// is never skipped. This is a valid assumption as long as the loop exits via /// that test. For precise results, it is the caller's responsibility to specify /// the relevant loop exiting block using getExact(ExitingBlock, SE). const SCEV * ScalarEvolution::BackedgeTakenInfo::getExact(const Loop *L, ScalarEvolution *SE, SmallVector *Preds) const { // If any exits were not computable, the loop is not computable. if (!isComplete() || ExitNotTaken.empty()) return SE->getCouldNotCompute(); const BasicBlock *Latch = L->getLoopLatch(); // All exiting blocks we have collected must dominate the only backedge. if (!Latch) return SE->getCouldNotCompute(); // All exiting blocks we have gathered dominate loop's latch, so exact trip // count is simply a minimum out of all these calculated exit counts. SmallVector Ops; for (const auto &ENT : ExitNotTaken) { const SCEV *BECount = ENT.ExactNotTaken; assert(BECount != SE->getCouldNotCompute() && "Bad exit SCEV!"); assert(SE->DT.dominates(ENT.ExitingBlock, Latch) && "We should only have known counts for exiting blocks that dominate " "latch!"); Ops.push_back(BECount); if (Preds) for (const auto *P : ENT.Predicates) Preds->push_back(P); assert((Preds || ENT.hasAlwaysTruePredicate()) && "Predicate should be always true!"); } // If an earlier exit exits on the first iteration (exit count zero), then // a later poison exit count should not propagate into the result. This are // exactly the semantics provided by umin_seq. return SE->getUMinFromMismatchedTypes(Ops, /* Sequential */ true); } /// Get the exact not taken count for this loop exit. const SCEV * ScalarEvolution::BackedgeTakenInfo::getExact(const BasicBlock *ExitingBlock, ScalarEvolution *SE) const { for (const auto &ENT : ExitNotTaken) if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) return ENT.ExactNotTaken; return SE->getCouldNotCompute(); } const SCEV *ScalarEvolution::BackedgeTakenInfo::getConstantMax( const BasicBlock *ExitingBlock, ScalarEvolution *SE) const { for (const auto &ENT : ExitNotTaken) if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) return ENT.MaxNotTaken; return SE->getCouldNotCompute(); } /// getConstantMax - Get the constant max backedge taken count for the loop. const SCEV * ScalarEvolution::BackedgeTakenInfo::getConstantMax(ScalarEvolution *SE) const { auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { return !ENT.hasAlwaysTruePredicate(); }; if (!getConstantMax() || any_of(ExitNotTaken, PredicateNotAlwaysTrue)) return SE->getCouldNotCompute(); assert((isa(getConstantMax()) || isa(getConstantMax())) && "No point in having a non-constant max backedge taken count!"); return getConstantMax(); } const SCEV * ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(const Loop *L, ScalarEvolution *SE) { if (!SymbolicMax) SymbolicMax = SE->computeSymbolicMaxBackedgeTakenCount(L); return SymbolicMax; } bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero( ScalarEvolution *SE) const { auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { return !ENT.hasAlwaysTruePredicate(); }; return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue); } ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E) : ExitLimit(E, E, false, None) { } ScalarEvolution::ExitLimit::ExitLimit( const SCEV *E, const SCEV *M, bool MaxOrZero, ArrayRef *> PredSetList) : ExactNotTaken(E), MaxNotTaken(M), MaxOrZero(MaxOrZero) { // If we prove the max count is zero, so is the symbolic bound. This happens // in practice due to differences in a) how context sensitive we've chosen // to be and b) how we reason about bounds impied by UB. if (MaxNotTaken->isZero()) ExactNotTaken = MaxNotTaken; assert((isa(ExactNotTaken) || !isa(MaxNotTaken)) && "Exact is not allowed to be less precise than Max"); assert((isa(MaxNotTaken) || isa(MaxNotTaken)) && "No point in having a non-constant max backedge taken count!"); for (const auto *PredSet : PredSetList) for (const auto *P : *PredSet) addPredicate(P); assert((isa(E) || !E->getType()->isPointerTy()) && "Backedge count should be int"); assert((isa(M) || !M->getType()->isPointerTy()) && "Max backedge count should be int"); } ScalarEvolution::ExitLimit::ExitLimit( const SCEV *E, const SCEV *M, bool MaxOrZero, const SmallPtrSetImpl &PredSet) : ExitLimit(E, M, MaxOrZero, {&PredSet}) { } ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E, const SCEV *M, bool MaxOrZero) : ExitLimit(E, M, MaxOrZero, None) { } /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each /// computable exit into a persistent ExitNotTakenInfo array. ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo( ArrayRef ExitCounts, bool IsComplete, const SCEV *ConstantMax, bool MaxOrZero) : ConstantMax(ConstantMax), IsComplete(IsComplete), MaxOrZero(MaxOrZero) { using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; ExitNotTaken.reserve(ExitCounts.size()); std::transform( ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken), [&](const EdgeExitInfo &EEI) { BasicBlock *ExitBB = EEI.first; const ExitLimit &EL = EEI.second; return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken, EL.Predicates); }); assert((isa(ConstantMax) || isa(ConstantMax)) && "No point in having a non-constant max backedge taken count!"); } /// Compute the number of times the backedge of the specified loop will execute. ScalarEvolution::BackedgeTakenInfo ScalarEvolution::computeBackedgeTakenCount(const Loop *L, bool AllowPredicates) { SmallVector ExitingBlocks; L->getExitingBlocks(ExitingBlocks); using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; SmallVector ExitCounts; bool CouldComputeBECount = true; BasicBlock *Latch = L->getLoopLatch(); // may be NULL. const SCEV *MustExitMaxBECount = nullptr; const SCEV *MayExitMaxBECount = nullptr; bool MustExitMaxOrZero = false; // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts // and compute maxBECount. // Do a union of all the predicates here. for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { BasicBlock *ExitBB = ExitingBlocks[i]; // We canonicalize untaken exits to br (constant), ignore them so that // proving an exit untaken doesn't negatively impact our ability to reason // about the loop as whole. if (auto *BI = dyn_cast(ExitBB->getTerminator())) if (auto *CI = dyn_cast(BI->getCondition())) { bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); if (ExitIfTrue == CI->isZero()) continue; } ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates); assert((AllowPredicates || EL.Predicates.empty()) && "Predicated exit limit when predicates are not allowed!"); // 1. For each exit that can be computed, add an entry to ExitCounts. // CouldComputeBECount is true only if all exits can be computed. if (EL.ExactNotTaken == getCouldNotCompute()) // We couldn't compute an exact value for this exit, so // we won't be able to compute an exact value for the loop. CouldComputeBECount = false; else ExitCounts.emplace_back(ExitBB, EL); // 2. Derive the loop's MaxBECount from each exit's max number of // non-exiting iterations. Partition the loop exits into two kinds: // LoopMustExits and LoopMayExits. // // If the exit dominates the loop latch, it is a LoopMustExit otherwise it // is a LoopMayExit. If any computable LoopMustExit is found, then // MaxBECount is the minimum EL.MaxNotTaken of computable // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum // EL.MaxNotTaken, where CouldNotCompute is considered greater than any // computable EL.MaxNotTaken. if (EL.MaxNotTaken != getCouldNotCompute() && Latch && DT.dominates(ExitBB, Latch)) { if (!MustExitMaxBECount) { MustExitMaxBECount = EL.MaxNotTaken; MustExitMaxOrZero = EL.MaxOrZero; } else { MustExitMaxBECount = getUMinFromMismatchedTypes(MustExitMaxBECount, EL.MaxNotTaken); } } else if (MayExitMaxBECount != getCouldNotCompute()) { if (!MayExitMaxBECount || EL.MaxNotTaken == getCouldNotCompute()) MayExitMaxBECount = EL.MaxNotTaken; else { MayExitMaxBECount = getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.MaxNotTaken); } } } const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount : (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute()); // The loop backedge will be taken the maximum or zero times if there's // a single exit that must be taken the maximum or zero times. bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1); // Remember which SCEVs are used in exit limits for invalidation purposes. // We only care about non-constant SCEVs here, so we can ignore EL.MaxNotTaken // and MaxBECount, which must be SCEVConstant. for (const auto &Pair : ExitCounts) if (!isa(Pair.second.ExactNotTaken)) BECountUsers[Pair.second.ExactNotTaken].insert({L, AllowPredicates}); return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount, MaxBECount, MaxOrZero); } ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock, bool AllowPredicates) { assert(L->contains(ExitingBlock) && "Exit count for non-loop block?"); // If our exiting block does not dominate the latch, then its connection with // loop's exit limit may be far from trivial. const BasicBlock *Latch = L->getLoopLatch(); if (!Latch || !DT.dominates(ExitingBlock, Latch)) return getCouldNotCompute(); bool IsOnlyExit = (L->getExitingBlock() != nullptr); Instruction *Term = ExitingBlock->getTerminator(); if (BranchInst *BI = dyn_cast(Term)) { assert(BI->isConditional() && "If unconditional, it can't be in loop!"); bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); assert(ExitIfTrue == L->contains(BI->getSuccessor(1)) && "It should have one successor in loop and one exit block!"); // Proceed to the next level to examine the exit condition expression. return computeExitLimitFromCond( L, BI->getCondition(), ExitIfTrue, /*ControlsExit=*/IsOnlyExit, AllowPredicates); } if (SwitchInst *SI = dyn_cast(Term)) { // For switch, make sure that there is a single exit from the loop. BasicBlock *Exit = nullptr; for (auto *SBB : successors(ExitingBlock)) if (!L->contains(SBB)) { if (Exit) // Multiple exit successors. return getCouldNotCompute(); Exit = SBB; } assert(Exit && "Exiting block must have at least one exit"); return computeExitLimitFromSingleExitSwitch(L, SI, Exit, /*ControlsExit=*/IsOnlyExit); } return getCouldNotCompute(); } ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond( const Loop *L, Value *ExitCond, bool ExitIfTrue, bool ControlsExit, bool AllowPredicates) { ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates); return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates); } Optional ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond, bool ExitIfTrue, bool ControlsExit, bool AllowPredicates) { (void)this->L; (void)this->ExitIfTrue; (void)this->AllowPredicates; assert(this->L == L && this->ExitIfTrue == ExitIfTrue && this->AllowPredicates == AllowPredicates && "Variance in assumed invariant key components!"); auto Itr = TripCountMap.find({ExitCond, ControlsExit}); if (Itr == TripCountMap.end()) return None; return Itr->second; } void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond, bool ExitIfTrue, bool ControlsExit, bool AllowPredicates, const ExitLimit &EL) { assert(this->L == L && this->ExitIfTrue == ExitIfTrue && this->AllowPredicates == AllowPredicates && "Variance in assumed invariant key components!"); auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL}); assert(InsertResult.second && "Expected successful insertion!"); (void)InsertResult; (void)ExitIfTrue; } ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached( ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, bool ControlsExit, bool AllowPredicates) { if (auto MaybeEL = Cache.find(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates)) return *MaybeEL; ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates); Cache.insert(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates, EL); return EL; } ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl( ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, bool ControlsExit, bool AllowPredicates) { // Handle BinOp conditions (And, Or). if (auto LimitFromBinOp = computeExitLimitFromCondFromBinOp( Cache, L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates)) return *LimitFromBinOp; // With an icmp, it may be feasible to compute an exact backedge-taken count. // Proceed to the next level to examine the icmp. if (ICmpInst *ExitCondICmp = dyn_cast(ExitCond)) { ExitLimit EL = computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit); if (EL.hasFullInfo() || !AllowPredicates) return EL; // Try again, but use SCEV predicates this time. return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit, /*AllowPredicates=*/true); } // Check for a constant condition. These are normally stripped out by // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to // preserve the CFG and is temporarily leaving constant conditions // in place. if (ConstantInt *CI = dyn_cast(ExitCond)) { if (ExitIfTrue == !CI->getZExtValue()) // The backedge is always taken. return getCouldNotCompute(); else // The backedge is never taken. return getZero(CI->getType()); } // If we're exiting based on the overflow flag of an x.with.overflow intrinsic // with a constant step, we can form an equivalent icmp predicate and figure // out how many iterations will be taken before we exit. const WithOverflowInst *WO; const APInt *C; if (match(ExitCond, m_ExtractValue<1>(m_WithOverflowInst(WO))) && match(WO->getRHS(), m_APInt(C))) { ConstantRange NWR = ConstantRange::makeExactNoWrapRegion(WO->getBinaryOp(), *C, WO->getNoWrapKind()); CmpInst::Predicate Pred; APInt NewRHSC, Offset; NWR.getEquivalentICmp(Pred, NewRHSC, Offset); if (!ExitIfTrue) Pred = ICmpInst::getInversePredicate(Pred); auto *LHS = getSCEV(WO->getLHS()); if (Offset != 0) LHS = getAddExpr(LHS, getConstant(Offset)); auto EL = computeExitLimitFromICmp(L, Pred, LHS, getConstant(NewRHSC), ControlsExit, AllowPredicates); if (EL.hasAnyInfo()) return EL; } // If it's not an integer or pointer comparison then compute it the hard way. return computeExitCountExhaustively(L, ExitCond, ExitIfTrue); } Optional ScalarEvolution::computeExitLimitFromCondFromBinOp( ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, bool ControlsExit, bool AllowPredicates) { // Check if the controlling expression for this loop is an And or Or. Value *Op0, *Op1; bool IsAnd = false; if (match(ExitCond, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) IsAnd = true; else if (match(ExitCond, m_LogicalOr(m_Value(Op0), m_Value(Op1)))) IsAnd = false; else return None; // EitherMayExit is true in these two cases: // br (and Op0 Op1), loop, exit // br (or Op0 Op1), exit, loop bool EitherMayExit = IsAnd ^ ExitIfTrue; ExitLimit EL0 = computeExitLimitFromCondCached(Cache, L, Op0, ExitIfTrue, ControlsExit && !EitherMayExit, AllowPredicates); ExitLimit EL1 = computeExitLimitFromCondCached(Cache, L, Op1, ExitIfTrue, ControlsExit && !EitherMayExit, AllowPredicates); // Be robust against unsimplified IR for the form "op i1 X, NeutralElement" const Constant *NeutralElement = ConstantInt::get(ExitCond->getType(), IsAnd); if (isa(Op1)) return Op1 == NeutralElement ? EL0 : EL1; if (isa(Op0)) return Op0 == NeutralElement ? EL1 : EL0; const SCEV *BECount = getCouldNotCompute(); const SCEV *MaxBECount = getCouldNotCompute(); if (EitherMayExit) { // Both conditions must be same for the loop to continue executing. // Choose the less conservative count. if (EL0.ExactNotTaken != getCouldNotCompute() && EL1.ExactNotTaken != getCouldNotCompute()) { BECount = getUMinFromMismatchedTypes( EL0.ExactNotTaken, EL1.ExactNotTaken, /*Sequential=*/!isa(ExitCond)); } if (EL0.MaxNotTaken == getCouldNotCompute()) MaxBECount = EL1.MaxNotTaken; else if (EL1.MaxNotTaken == getCouldNotCompute()) MaxBECount = EL0.MaxNotTaken; else MaxBECount = getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); } else { // Both conditions must be same at the same time for the loop to exit. // For now, be conservative. if (EL0.ExactNotTaken == EL1.ExactNotTaken) BECount = EL0.ExactNotTaken; } // There are cases (e.g. PR26207) where computeExitLimitFromCond is able // to be more aggressive when computing BECount than when computing // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken // to not. if (isa(MaxBECount) && !isa(BECount)) MaxBECount = getConstant(getUnsignedRangeMax(BECount)); return ExitLimit(BECount, MaxBECount, false, { &EL0.Predicates, &EL1.Predicates }); } ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromICmp(const Loop *L, ICmpInst *ExitCond, bool ExitIfTrue, bool ControlsExit, bool AllowPredicates) { // If the condition was exit on true, convert the condition to exit on false ICmpInst::Predicate Pred; if (!ExitIfTrue) Pred = ExitCond->getPredicate(); else Pred = ExitCond->getInversePredicate(); const ICmpInst::Predicate OriginalPred = Pred; const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); ExitLimit EL = computeExitLimitFromICmp(L, Pred, LHS, RHS, ControlsExit, AllowPredicates); if (EL.hasAnyInfo()) return EL; auto *ExhaustiveCount = computeExitCountExhaustively(L, ExitCond, ExitIfTrue); if (!isa(ExhaustiveCount)) return ExhaustiveCount; return computeShiftCompareExitLimit(ExitCond->getOperand(0), ExitCond->getOperand(1), L, OriginalPred); } ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromICmp(const Loop *L, ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, bool ControlsExit, bool AllowPredicates) { // Try to evaluate any dependencies out of the loop. LHS = getSCEVAtScope(LHS, L); RHS = getSCEVAtScope(RHS, L); // At this point, we would like to compute how many iterations of the // loop the predicate will return true for these inputs. if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) { // If there is a loop-invariant, force it into the RHS. std::swap(LHS, RHS); Pred = ICmpInst::getSwappedPredicate(Pred); } bool ControllingFiniteLoop = ControlsExit && loopHasNoAbnormalExits(L) && loopIsFiniteByAssumption(L); // Simplify the operands before analyzing them. (void)SimplifyICmpOperands(Pred, LHS, RHS, /*Depth=*/0, (EnableFiniteLoopControl ? ControllingFiniteLoop : false)); // If we have a comparison of a chrec against a constant, try to use value // ranges to answer this query. if (const SCEVConstant *RHSC = dyn_cast(RHS)) if (const SCEVAddRecExpr *AddRec = dyn_cast(LHS)) if (AddRec->getLoop() == L) { // Form the constant range. ConstantRange CompRange = ConstantRange::makeExactICmpRegion(Pred, RHSC->getAPInt()); const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); if (!isa(Ret)) return Ret; } // If this loop must exit based on this condition (or execute undefined // behaviour), and we can prove the test sequence produced must repeat // the same values on self-wrap of the IV, then we can infer that IV // doesn't self wrap because if it did, we'd have an infinite (undefined) // loop. if (ControllingFiniteLoop && isLoopInvariant(RHS, L)) { // TODO: We can peel off any functions which are invertible *in L*. Loop // invariant terms are effectively constants for our purposes here. auto *InnerLHS = LHS; if (auto *ZExt = dyn_cast(LHS)) InnerLHS = ZExt->getOperand(); if (const SCEVAddRecExpr *AR = dyn_cast(InnerLHS)) { auto *StrideC = dyn_cast(AR->getStepRecurrence(*this)); if (!AR->hasNoSelfWrap() && AR->getLoop() == L && AR->isAffine() && StrideC && StrideC->getAPInt().isPowerOf2()) { auto Flags = AR->getNoWrapFlags(); Flags = setFlags(Flags, SCEV::FlagNW); SmallVector Operands{AR->operands()}; Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); setNoWrapFlags(const_cast(AR), Flags); } } } switch (Pred) { case ICmpInst::ICMP_NE: { // while (X != Y) // Convert to: while (X-Y != 0) if (LHS->getType()->isPointerTy()) { LHS = getLosslessPtrToIntExpr(LHS); if (isa(LHS)) return LHS; } if (RHS->getType()->isPointerTy()) { RHS = getLosslessPtrToIntExpr(RHS); if (isa(RHS)) return RHS; } ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit, AllowPredicates); if (EL.hasAnyInfo()) return EL; break; } case ICmpInst::ICMP_EQ: { // while (X == Y) // Convert to: while (X-Y == 0) if (LHS->getType()->isPointerTy()) { LHS = getLosslessPtrToIntExpr(LHS); if (isa(LHS)) return LHS; } if (RHS->getType()->isPointerTy()) { RHS = getLosslessPtrToIntExpr(RHS); if (isa(RHS)) return RHS; } ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L); if (EL.hasAnyInfo()) return EL; break; } case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_ULT: { // while (X < Y) bool IsSigned = Pred == ICmpInst::ICMP_SLT; ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit, AllowPredicates); if (EL.hasAnyInfo()) return EL; break; } case ICmpInst::ICMP_SGT: case ICmpInst::ICMP_UGT: { // while (X > Y) bool IsSigned = Pred == ICmpInst::ICMP_SGT; ExitLimit EL = howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit, AllowPredicates); if (EL.hasAnyInfo()) return EL; break; } default: break; } return getCouldNotCompute(); } ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L, SwitchInst *Switch, BasicBlock *ExitingBlock, bool ControlsExit) { assert(!L->contains(ExitingBlock) && "Not an exiting block!"); // Give up if the exit is the default dest of a switch. if (Switch->getDefaultDest() == ExitingBlock) return getCouldNotCompute(); assert(L->contains(Switch->getDefaultDest()) && "Default case must not exit the loop!"); const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L); const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock)); // while (X != Y) --> while (X-Y != 0) ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit); if (EL.hasAnyInfo()) return EL; return getCouldNotCompute(); } static ConstantInt * EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, ScalarEvolution &SE) { const SCEV *InVal = SE.getConstant(C); const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); assert(isa(Val) && "Evaluation of SCEV at constant didn't fold correctly?"); return cast(Val)->getValue(); } ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit( Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) { ConstantInt *RHS = dyn_cast(RHSV); if (!RHS) return getCouldNotCompute(); const BasicBlock *Latch = L->getLoopLatch(); if (!Latch) return getCouldNotCompute(); const BasicBlock *Predecessor = L->getLoopPredecessor(); if (!Predecessor) return getCouldNotCompute(); // Return true if V is of the form "LHS `shift_op` ". // Return LHS in OutLHS and shift_opt in OutOpCode. auto MatchPositiveShift = [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) { using namespace PatternMatch; ConstantInt *ShiftAmt; if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) OutOpCode = Instruction::LShr; else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) OutOpCode = Instruction::AShr; else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) OutOpCode = Instruction::Shl; else return false; return ShiftAmt->getValue().isStrictlyPositive(); }; // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in // // loop: // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ] // %iv.shifted = lshr i32 %iv, // // Return true on a successful match. Return the corresponding PHI node (%iv // above) in PNOut and the opcode of the shift operation in OpCodeOut. auto MatchShiftRecurrence = [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) { Optional PostShiftOpCode; { Instruction::BinaryOps OpC; Value *V; // If we encounter a shift instruction, "peel off" the shift operation, // and remember that we did so. Later when we inspect %iv's backedge // value, we will make sure that the backedge value uses the same // operation. // // Note: the peeled shift operation does not have to be the same // instruction as the one feeding into the PHI's backedge value. We only // really care about it being the same *kind* of shift instruction -- // that's all that is required for our later inferences to hold. if (MatchPositiveShift(LHS, V, OpC)) { PostShiftOpCode = OpC; LHS = V; } } PNOut = dyn_cast(LHS); if (!PNOut || PNOut->getParent() != L->getHeader()) return false; Value *BEValue = PNOut->getIncomingValueForBlock(Latch); Value *OpLHS; return // The backedge value for the PHI node must be a shift by a positive // amount MatchPositiveShift(BEValue, OpLHS, OpCodeOut) && // of the PHI node itself OpLHS == PNOut && // and the kind of shift should be match the kind of shift we peeled // off, if any. (!PostShiftOpCode || *PostShiftOpCode == OpCodeOut); }; PHINode *PN; Instruction::BinaryOps OpCode; if (!MatchShiftRecurrence(LHS, PN, OpCode)) return getCouldNotCompute(); const DataLayout &DL = getDataLayout(); // The key rationale for this optimization is that for some kinds of shift // recurrences, the value of the recurrence "stabilizes" to either 0 or -1 // within a finite number of iterations. If the condition guarding the // backedge (in the sense that the backedge is taken if the condition is true) // is false for the value the shift recurrence stabilizes to, then we know // that the backedge is taken only a finite number of times. ConstantInt *StableValue = nullptr; switch (OpCode) { default: llvm_unreachable("Impossible case!"); case Instruction::AShr: { // {K,ashr,} stabilizes to signum(K) in at most // bitwidth(K) iterations. Value *FirstValue = PN->getIncomingValueForBlock(Predecessor); KnownBits Known = computeKnownBits(FirstValue, DL, 0, &AC, Predecessor->getTerminator(), &DT); auto *Ty = cast(RHS->getType()); if (Known.isNonNegative()) StableValue = ConstantInt::get(Ty, 0); else if (Known.isNegative()) StableValue = ConstantInt::get(Ty, -1, true); else return getCouldNotCompute(); break; } case Instruction::LShr: case Instruction::Shl: // Both {K,lshr,} and {K,shl,} // stabilize to 0 in at most bitwidth(K) iterations. StableValue = ConstantInt::get(cast(RHS->getType()), 0); break; } auto *Result = ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI); assert(Result->getType()->isIntegerTy(1) && "Otherwise cannot be an operand to a branch instruction"); if (Result->isZeroValue()) { unsigned BitWidth = getTypeSizeInBits(RHS->getType()); const SCEV *UpperBound = getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth); return ExitLimit(getCouldNotCompute(), UpperBound, false); } return getCouldNotCompute(); } /// Return true if we can constant fold an instruction of the specified type, /// assuming that all operands were constants. static bool CanConstantFold(const Instruction *I) { if (isa(I) || isa(I) || isa(I) || isa(I) || isa(I) || isa(I) || isa(I)) return true; if (const CallInst *CI = dyn_cast(I)) if (const Function *F = CI->getCalledFunction()) return canConstantFoldCallTo(CI, F); return false; } /// Determine whether this instruction can constant evolve within this loop /// assuming its operands can all constant evolve. static bool canConstantEvolve(Instruction *I, const Loop *L) { // An instruction outside of the loop can't be derived from a loop PHI. if (!L->contains(I)) return false; if (isa(I)) { // We don't currently keep track of the control flow needed to evaluate // PHIs, so we cannot handle PHIs inside of loops. return L->getHeader() == I->getParent(); } // If we won't be able to constant fold this expression even if the operands // are constants, bail early. return CanConstantFold(I); } /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by /// recursing through each instruction operand until reaching a loop header phi. static PHINode * getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, DenseMap &PHIMap, unsigned Depth) { if (Depth > MaxConstantEvolvingDepth) return nullptr; // Otherwise, we can evaluate this instruction if all of its operands are // constant or derived from a PHI node themselves. PHINode *PHI = nullptr; for (Value *Op : UseInst->operands()) { if (isa(Op)) continue; Instruction *OpInst = dyn_cast(Op); if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr; PHINode *P = dyn_cast(OpInst); if (!P) // If this operand is already visited, reuse the prior result. // We may have P != PHI if this is the deepest point at which the // inconsistent paths meet. P = PHIMap.lookup(OpInst); if (!P) { // Recurse and memoize the results, whether a phi is found or not. // This recursive call invalidates pointers into PHIMap. P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1); PHIMap[OpInst] = P; } if (!P) return nullptr; // Not evolving from PHI if (PHI && PHI != P) return nullptr; // Evolving from multiple different PHIs. PHI = P; } // This is a expression evolving from a constant PHI! return PHI; } /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node /// in the loop that V is derived from. We allow arbitrary operations along the /// way, but the operands of an operation must either be constants or a value /// derived from a constant PHI. If this expression does not fit with these /// constraints, return null. static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { Instruction *I = dyn_cast(V); if (!I || !canConstantEvolve(I, L)) return nullptr; if (PHINode *PN = dyn_cast(I)) return PN; // Record non-constant instructions contained by the loop. DenseMap PHIMap; return getConstantEvolvingPHIOperands(I, L, PHIMap, 0); } /// EvaluateExpression - Given an expression that passes the /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node /// in the loop has the value PHIVal. If we can't fold this expression for some /// reason, return null. static Constant *EvaluateExpression(Value *V, const Loop *L, DenseMap &Vals, const DataLayout &DL, const TargetLibraryInfo *TLI) { // Convenient constant check, but redundant for recursive calls. if (Constant *C = dyn_cast(V)) return C; Instruction *I = dyn_cast(V); if (!I) return nullptr; if (Constant *C = Vals.lookup(I)) return C; // An instruction inside the loop depends on a value outside the loop that we // weren't given a mapping for, or a value such as a call inside the loop. if (!canConstantEvolve(I, L)) return nullptr; // An unmapped PHI can be due to a branch or another loop inside this loop, // or due to this not being the initial iteration through a loop where we // couldn't compute the evolution of this particular PHI last time. if (isa(I)) return nullptr; std::vector Operands(I->getNumOperands()); for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { Instruction *Operand = dyn_cast(I->getOperand(i)); if (!Operand) { Operands[i] = dyn_cast(I->getOperand(i)); if (!Operands[i]) return nullptr; continue; } Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI); Vals[Operand] = C; if (!C) return nullptr; Operands[i] = C; } return ConstantFoldInstOperands(I, Operands, DL, TLI); } // If every incoming value to PN except the one for BB is a specific Constant, // return that, else return nullptr. static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) { Constant *IncomingVal = nullptr; for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { if (PN->getIncomingBlock(i) == BB) continue; auto *CurrentVal = dyn_cast(PN->getIncomingValue(i)); if (!CurrentVal) return nullptr; if (IncomingVal != CurrentVal) { if (IncomingVal) return nullptr; IncomingVal = CurrentVal; } } return IncomingVal; } /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is /// in the header of its containing loop, we know the loop executes a /// constant number of times, and the PHI node is just a recurrence /// involving constants, fold it. Constant * ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, const APInt &BEs, const Loop *L) { auto I = ConstantEvolutionLoopExitValue.find(PN); if (I != ConstantEvolutionLoopExitValue.end()) return I->second; if (BEs.ugt(MaxBruteForceIterations)) return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it. Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; DenseMap CurrentIterVals; BasicBlock *Header = L->getHeader(); assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); BasicBlock *Latch = L->getLoopLatch(); if (!Latch) return nullptr; for (PHINode &PHI : Header->phis()) { if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) CurrentIterVals[&PHI] = StartCST; } if (!CurrentIterVals.count(PN)) return RetVal = nullptr; Value *BEValue = PN->getIncomingValueForBlock(Latch); // Execute the loop symbolically to determine the exit value. assert(BEs.getActiveBits() < CHAR_BIT * sizeof(unsigned) && "BEs is <= MaxBruteForceIterations which is an 'unsigned'!"); unsigned NumIterations = BEs.getZExtValue(); // must be in range unsigned IterationNum = 0; const DataLayout &DL = getDataLayout(); for (; ; ++IterationNum) { if (IterationNum == NumIterations) return RetVal = CurrentIterVals[PN]; // Got exit value! // Compute the value of the PHIs for the next iteration. // EvaluateExpression adds non-phi values to the CurrentIterVals map. DenseMap NextIterVals; Constant *NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); if (!NextPHI) return nullptr; // Couldn't evaluate! NextIterVals[PN] = NextPHI; bool StoppedEvolving = NextPHI == CurrentIterVals[PN]; // Also evaluate the other PHI nodes. However, we don't get to stop if we // cease to be able to evaluate one of them or if they stop evolving, // because that doesn't necessarily prevent us from computing PN. SmallVector, 8> PHIsToCompute; for (const auto &I : CurrentIterVals) { PHINode *PHI = dyn_cast(I.first); if (!PHI || PHI == PN || PHI->getParent() != Header) continue; PHIsToCompute.emplace_back(PHI, I.second); } // We use two distinct loops because EvaluateExpression may invalidate any // iterators into CurrentIterVals. for (const auto &I : PHIsToCompute) { PHINode *PHI = I.first; Constant *&NextPHI = NextIterVals[PHI]; if (!NextPHI) { // Not already computed. Value *BEValue = PHI->getIncomingValueForBlock(Latch); NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); } if (NextPHI != I.second) StoppedEvolving = false; } // If all entries in CurrentIterVals == NextIterVals then we can stop // iterating, the loop can't continue to change. if (StoppedEvolving) return RetVal = CurrentIterVals[PN]; CurrentIterVals.swap(NextIterVals); } } const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L, Value *Cond, bool ExitWhen) { PHINode *PN = getConstantEvolvingPHI(Cond, L); if (!PN) return getCouldNotCompute(); // If the loop is canonicalized, the PHI will have exactly two entries. // That's the only form we support here. if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); DenseMap CurrentIterVals; BasicBlock *Header = L->getHeader(); assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); BasicBlock *Latch = L->getLoopLatch(); assert(Latch && "Should follow from NumIncomingValues == 2!"); for (PHINode &PHI : Header->phis()) { if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) CurrentIterVals[&PHI] = StartCST; } if (!CurrentIterVals.count(PN)) return getCouldNotCompute(); // Okay, we find a PHI node that defines the trip count of this loop. Execute // the loop symbolically to determine when the condition gets a value of // "ExitWhen". unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. const DataLayout &DL = getDataLayout(); for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){ auto *CondVal = dyn_cast_or_null( EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI)); // Couldn't symbolically evaluate. if (!CondVal) return getCouldNotCompute(); if (CondVal->getValue() == uint64_t(ExitWhen)) { ++NumBruteForceTripCountsComputed; return getConstant(Type::getInt32Ty(getContext()), IterationNum); } // Update all the PHI nodes for the next iteration. DenseMap NextIterVals; // Create a list of which PHIs we need to compute. We want to do this before // calling EvaluateExpression on them because that may invalidate iterators // into CurrentIterVals. SmallVector PHIsToCompute; for (const auto &I : CurrentIterVals) { PHINode *PHI = dyn_cast(I.first); if (!PHI || PHI->getParent() != Header) continue; PHIsToCompute.push_back(PHI); } for (PHINode *PHI : PHIsToCompute) { Constant *&NextPHI = NextIterVals[PHI]; if (NextPHI) continue; // Already computed! Value *BEValue = PHI->getIncomingValueForBlock(Latch); NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); } CurrentIterVals.swap(NextIterVals); } // Too many iterations were needed to evaluate. return getCouldNotCompute(); } const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { SmallVector, 2> &Values = ValuesAtScopes[V]; // Check to see if we've folded this expression at this loop before. for (auto &LS : Values) if (LS.first == L) return LS.second ? LS.second : V; Values.emplace_back(L, nullptr); // Otherwise compute it. const SCEV *C = computeSCEVAtScope(V, L); for (auto &LS : reverse(ValuesAtScopes[V])) if (LS.first == L) { LS.second = C; if (!isa(C)) ValuesAtScopesUsers[C].push_back({L, V}); break; } return C; } /// This builds up a Constant using the ConstantExpr interface. That way, we /// will return Constants for objects which aren't represented by a /// SCEVConstant, because SCEVConstant is restricted to ConstantInt. /// Returns NULL if the SCEV isn't representable as a Constant. static Constant *BuildConstantFromSCEV(const SCEV *V) { switch (V->getSCEVType()) { case scCouldNotCompute: case scAddRecExpr: return nullptr; case scConstant: return cast(V)->getValue(); case scUnknown: return dyn_cast(cast(V)->getValue()); case scSignExtend: { const SCEVSignExtendExpr *SS = cast(V); if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand())) return ConstantExpr::getSExt(CastOp, SS->getType()); return nullptr; } case scZeroExtend: { const SCEVZeroExtendExpr *SZ = cast(V); if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand())) return ConstantExpr::getZExt(CastOp, SZ->getType()); return nullptr; } case scPtrToInt: { const SCEVPtrToIntExpr *P2I = cast(V); if (Constant *CastOp = BuildConstantFromSCEV(P2I->getOperand())) return ConstantExpr::getPtrToInt(CastOp, P2I->getType()); return nullptr; } case scTruncate: { const SCEVTruncateExpr *ST = cast(V); if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) return ConstantExpr::getTrunc(CastOp, ST->getType()); return nullptr; } case scAddExpr: { const SCEVAddExpr *SA = cast(V); Constant *C = nullptr; for (const SCEV *Op : SA->operands()) { Constant *OpC = BuildConstantFromSCEV(Op); if (!OpC) return nullptr; if (!C) { C = OpC; continue; } assert(!C->getType()->isPointerTy() && "Can only have one pointer, and it must be last"); if (auto *PT = dyn_cast(OpC->getType())) { // The offsets have been converted to bytes. We can add bytes to an // i8* by GEP with the byte count in the first index. Type *DestPtrTy = Type::getInt8PtrTy(PT->getContext(), PT->getAddressSpace()); OpC = ConstantExpr::getBitCast(OpC, DestPtrTy); C = ConstantExpr::getGetElementPtr(Type::getInt8Ty(C->getContext()), OpC, C); } else { C = ConstantExpr::getAdd(C, OpC); } } return C; } case scMulExpr: { const SCEVMulExpr *SM = cast(V); Constant *C = nullptr; for (const SCEV *Op : SM->operands()) { assert(!Op->getType()->isPointerTy() && "Can't multiply pointers"); Constant *OpC = BuildConstantFromSCEV(Op); if (!OpC) return nullptr; C = C ? ConstantExpr::getMul(C, OpC) : OpC; } return C; } case scUDivExpr: case scSMaxExpr: case scUMaxExpr: case scSMinExpr: case scUMinExpr: case scSequentialUMinExpr: return nullptr; // TODO: smax, umax, smin, umax, umin_seq. } llvm_unreachable("Unknown SCEV kind!"); } const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { if (isa(V)) return V; // If this instruction is evolved from a constant-evolving PHI, compute the // exit value from the loop without using SCEVs. if (const SCEVUnknown *SU = dyn_cast(V)) { if (Instruction *I = dyn_cast(SU->getValue())) { if (PHINode *PN = dyn_cast(I)) { const Loop *CurrLoop = this->LI[I->getParent()]; // Looking for loop exit value. if (CurrLoop && CurrLoop->getParentLoop() == L && PN->getParent() == CurrLoop->getHeader()) { // Okay, there is no closed form solution for the PHI node. Check // to see if the loop that contains it has a known backedge-taken // count. If so, we may be able to force computation of the exit // value. const SCEV *BackedgeTakenCount = getBackedgeTakenCount(CurrLoop); // This trivial case can show up in some degenerate cases where // the incoming IR has not yet been fully simplified. if (BackedgeTakenCount->isZero()) { Value *InitValue = nullptr; bool MultipleInitValues = false; for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) { if (!CurrLoop->contains(PN->getIncomingBlock(i))) { if (!InitValue) InitValue = PN->getIncomingValue(i); else if (InitValue != PN->getIncomingValue(i)) { MultipleInitValues = true; break; } } } if (!MultipleInitValues && InitValue) return getSCEV(InitValue); } // Do we have a loop invariant value flowing around the backedge // for a loop which must execute the backedge? if (!isa(BackedgeTakenCount) && isKnownPositive(BackedgeTakenCount) && PN->getNumIncomingValues() == 2) { unsigned InLoopPred = CurrLoop->contains(PN->getIncomingBlock(0)) ? 0 : 1; Value *BackedgeVal = PN->getIncomingValue(InLoopPred); if (CurrLoop->isLoopInvariant(BackedgeVal)) return getSCEV(BackedgeVal); } if (auto *BTCC = dyn_cast(BackedgeTakenCount)) { // Okay, we know how many times the containing loop executes. If // this is a constant evolving PHI node, get the final value at // the specified iteration number. Constant *RV = getConstantEvolutionLoopExitValue( PN, BTCC->getAPInt(), CurrLoop); if (RV) return getSCEV(RV); } } // If there is a single-input Phi, evaluate it at our scope. If we can // prove that this replacement does not break LCSSA form, use new value. if (PN->getNumOperands() == 1) { const SCEV *Input = getSCEV(PN->getOperand(0)); const SCEV *InputAtScope = getSCEVAtScope(Input, L); // TODO: We can generalize it using LI.replacementPreservesLCSSAForm, // for the simplest case just support constants. if (isa(InputAtScope)) return InputAtScope; } } // Okay, this is an expression that we cannot symbolically evaluate // into a SCEV. Check to see if it's possible to symbolically evaluate // the arguments into constants, and if so, try to constant propagate the // result. This is particularly useful for computing loop exit values. if (CanConstantFold(I)) { SmallVector Operands; bool MadeImprovement = false; for (Value *Op : I->operands()) { if (Constant *C = dyn_cast(Op)) { Operands.push_back(C); continue; } // If any of the operands is non-constant and if they are // non-integer and non-pointer, don't even try to analyze them // with scev techniques. if (!isSCEVable(Op->getType())) return V; const SCEV *OrigV = getSCEV(Op); const SCEV *OpV = getSCEVAtScope(OrigV, L); MadeImprovement |= OrigV != OpV; Constant *C = BuildConstantFromSCEV(OpV); if (!C) return V; if (C->getType() != Op->getType()) C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, Op->getType(), false), C, Op->getType()); Operands.push_back(C); } // Check to see if getSCEVAtScope actually made an improvement. if (MadeImprovement) { Constant *C = nullptr; const DataLayout &DL = getDataLayout(); C = ConstantFoldInstOperands(I, Operands, DL, &TLI); if (!C) return V; return getSCEV(C); } } } // This is some other type of SCEVUnknown, just return it. return V; } if (isa(V) || isa(V)) { const auto *Comm = cast(V); // Avoid performing the look-up in the common case where the specified // expression has no loop-variant portions. for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); if (OpAtScope != Comm->getOperand(i)) { // Okay, at least one of these operands is loop variant but might be // foldable. Build a new instance of the folded commutative expression. SmallVector NewOps(Comm->op_begin(), Comm->op_begin()+i); NewOps.push_back(OpAtScope); for (++i; i != e; ++i) { OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); NewOps.push_back(OpAtScope); } if (isa(Comm)) return getAddExpr(NewOps, Comm->getNoWrapFlags()); if (isa(Comm)) return getMulExpr(NewOps, Comm->getNoWrapFlags()); if (isa(Comm)) return getMinMaxExpr(Comm->getSCEVType(), NewOps); if (isa(Comm)) return getSequentialMinMaxExpr(Comm->getSCEVType(), NewOps); llvm_unreachable("Unknown commutative / sequential min/max SCEV type!"); } } // If we got here, all operands are loop invariant. return Comm; } if (const SCEVUDivExpr *Div = dyn_cast(V)) { const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); if (LHS == Div->getLHS() && RHS == Div->getRHS()) return Div; // must be loop invariant return getUDivExpr(LHS, RHS); } // If this is a loop recurrence for a loop that does not contain L, then we // are dealing with the final value computed by the loop. if (const SCEVAddRecExpr *AddRec = dyn_cast(V)) { // First, attempt to evaluate each operand. // Avoid performing the look-up in the common case where the specified // expression has no loop-variant portions. for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); if (OpAtScope == AddRec->getOperand(i)) continue; // Okay, at least one of these operands is loop variant but might be // foldable. Build a new instance of the folded commutative expression. SmallVector NewOps(AddRec->op_begin(), AddRec->op_begin()+i); NewOps.push_back(OpAtScope); for (++i; i != e; ++i) NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); const SCEV *FoldedRec = getAddRecExpr(NewOps, AddRec->getLoop(), AddRec->getNoWrapFlags(SCEV::FlagNW)); AddRec = dyn_cast(FoldedRec); // The addrec may be folded to a nonrecurrence, for example, if the // induction variable is multiplied by zero after constant folding. Go // ahead and return the folded value. if (!AddRec) return FoldedRec; break; } // If the scope is outside the addrec's loop, evaluate it by using the // loop exit value of the addrec. if (!AddRec->getLoop()->contains(L)) { // To evaluate this recurrence, we need to know how many times the AddRec // loop iterates. Compute this now. const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; // Then, evaluate the AddRec. return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); } return AddRec; } if (const SCEVCastExpr *Cast = dyn_cast(V)) { const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); if (Op == Cast->getOperand()) return Cast; // must be loop invariant return getCastExpr(Cast->getSCEVType(), Op, Cast->getType()); } llvm_unreachable("Unknown SCEV type!"); } const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { return getSCEVAtScope(getSCEV(V), L); } const SCEV *ScalarEvolution::stripInjectiveFunctions(const SCEV *S) const { if (const SCEVZeroExtendExpr *ZExt = dyn_cast(S)) return stripInjectiveFunctions(ZExt->getOperand()); if (const SCEVSignExtendExpr *SExt = dyn_cast(S)) return stripInjectiveFunctions(SExt->getOperand()); return S; } /// Finds the minimum unsigned root of the following equation: /// /// A * X = B (mod N) /// /// where N = 2^BW and BW is the common bit width of A and B. The signedness of /// A and B isn't important. /// /// If the equation does not have a solution, SCEVCouldNotCompute is returned. static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, ScalarEvolution &SE) { uint32_t BW = A.getBitWidth(); assert(BW == SE.getTypeSizeInBits(B->getType())); assert(A != 0 && "A must be non-zero."); // 1. D = gcd(A, N) // // The gcd of A and N may have only one prime factor: 2. The number of // trailing zeros in A is its multiplicity uint32_t Mult2 = A.countTrailingZeros(); // D = 2^Mult2 // 2. Check if B is divisible by D. // // B is divisible by D if and only if the multiplicity of prime factor 2 for B // is not less than multiplicity of this prime factor for D. if (SE.GetMinTrailingZeros(B) < Mult2) return SE.getCouldNotCompute(); // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic // modulo (N / D). // // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent // (N / D) in general. The inverse itself always fits into BW bits, though, // so we immediately truncate it. APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D APInt Mod(BW + 1, 0); Mod.setBit(BW - Mult2); // Mod = N / D APInt I = AD.multiplicativeInverse(Mod).trunc(BW); // 4. Compute the minimum unsigned root of the equation: // I * (B / D) mod (N / D) // To simplify the computation, we factor out the divide by D: // (I * B mod N) / D const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2)); return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D); } /// For a given quadratic addrec, generate coefficients of the corresponding /// quadratic equation, multiplied by a common value to ensure that they are /// integers. /// The returned value is a tuple { A, B, C, M, BitWidth }, where /// Ax^2 + Bx + C is the quadratic function, M is the value that A, B and C /// were multiplied by, and BitWidth is the bit width of the original addrec /// coefficients. /// This function returns None if the addrec coefficients are not compile- /// time constants. static Optional> GetQuadraticEquation(const SCEVAddRecExpr *AddRec) { assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); const SCEVConstant *LC = dyn_cast(AddRec->getOperand(0)); const SCEVConstant *MC = dyn_cast(AddRec->getOperand(1)); const SCEVConstant *NC = dyn_cast(AddRec->getOperand(2)); LLVM_DEBUG(dbgs() << __func__ << ": analyzing quadratic addrec: " << *AddRec << '\n'); // We currently can only solve this if the coefficients are constants. if (!LC || !MC || !NC) { LLVM_DEBUG(dbgs() << __func__ << ": coefficients are not constant\n"); return None; } APInt L = LC->getAPInt(); APInt M = MC->getAPInt(); APInt N = NC->getAPInt(); assert(!N.isZero() && "This is not a quadratic addrec"); unsigned BitWidth = LC->getAPInt().getBitWidth(); unsigned NewWidth = BitWidth + 1; LLVM_DEBUG(dbgs() << __func__ << ": addrec coeff bw: " << BitWidth << '\n'); // The sign-extension (as opposed to a zero-extension) here matches the // extension used in SolveQuadraticEquationWrap (with the same motivation). N = N.sext(NewWidth); M = M.sext(NewWidth); L = L.sext(NewWidth); // The increments are M, M+N, M+2N, ..., so the accumulated values are // L+M, (L+M)+(M+N), (L+M)+(M+N)+(M+2N), ..., that is, // L+M, L+2M+N, L+3M+3N, ... // After n iterations the accumulated value Acc is L + nM + n(n-1)/2 N. // // The equation Acc = 0 is then // L + nM + n(n-1)/2 N = 0, or 2L + 2M n + n(n-1) N = 0. // In a quadratic form it becomes: // N n^2 + (2M-N) n + 2L = 0. APInt A = N; APInt B = 2 * M - A; APInt C = 2 * L; APInt T = APInt(NewWidth, 2); LLVM_DEBUG(dbgs() << __func__ << ": equation " << A << "x^2 + " << B << "x + " << C << ", coeff bw: " << NewWidth << ", multiplied by " << T << '\n'); return std::make_tuple(A, B, C, T, BitWidth); } /// Helper function to compare optional APInts: /// (a) if X and Y both exist, return min(X, Y), /// (b) if neither X nor Y exist, return None, /// (c) if exactly one of X and Y exists, return that value. static Optional MinOptional(Optional X, Optional Y) { if (X && Y) { unsigned W = std::max(X->getBitWidth(), Y->getBitWidth()); APInt XW = X->sext(W); APInt YW = Y->sext(W); return XW.slt(YW) ? *X : *Y; } if (!X && !Y) return None; return X ? *X : *Y; } /// Helper function to truncate an optional APInt to a given BitWidth. /// When solving addrec-related equations, it is preferable to return a value /// that has the same bit width as the original addrec's coefficients. If the /// solution fits in the original bit width, truncate it (except for i1). /// Returning a value of a different bit width may inhibit some optimizations. /// /// In general, a solution to a quadratic equation generated from an addrec /// may require BW+1 bits, where BW is the bit width of the addrec's /// coefficients. The reason is that the coefficients of the quadratic /// equation are BW+1 bits wide (to avoid truncation when converting from /// the addrec to the equation). static Optional TruncIfPossible(Optional X, unsigned BitWidth) { if (!X) return None; unsigned W = X->getBitWidth(); if (BitWidth > 1 && BitWidth < W && X->isIntN(BitWidth)) return X->trunc(BitWidth); return X; } /// Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n /// iterations. The values L, M, N are assumed to be signed, and they /// should all have the same bit widths. /// Find the least n >= 0 such that c(n) = 0 in the arithmetic modulo 2^BW, /// where BW is the bit width of the addrec's coefficients. /// If the calculated value is a BW-bit integer (for BW > 1), it will be /// returned as such, otherwise the bit width of the returned value may /// be greater than BW. /// /// This function returns None if /// (a) the addrec coefficients are not constant, or /// (b) SolveQuadraticEquationWrap was unable to find a solution. For cases /// like x^2 = 5, no integer solutions exist, in other cases an integer /// solution may exist, but SolveQuadraticEquationWrap may fail to find it. static Optional SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { APInt A, B, C, M; unsigned BitWidth; auto T = GetQuadraticEquation(AddRec); if (!T) return None; std::tie(A, B, C, M, BitWidth) = *T; LLVM_DEBUG(dbgs() << __func__ << ": solving for unsigned overflow\n"); Optional X = APIntOps::SolveQuadraticEquationWrap(A, B, C, BitWidth+1); if (!X) return None; ConstantInt *CX = ConstantInt::get(SE.getContext(), *X); ConstantInt *V = EvaluateConstantChrecAtConstant(AddRec, CX, SE); if (!V->isZero()) return None; return TruncIfPossible(X, BitWidth); } /// Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n /// iterations. The values M, N are assumed to be signed, and they /// should all have the same bit widths. /// Find the least n such that c(n) does not belong to the given range, /// while c(n-1) does. /// /// This function returns None if /// (a) the addrec coefficients are not constant, or /// (b) SolveQuadraticEquationWrap was unable to find a solution for the /// bounds of the range. static Optional SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec, const ConstantRange &Range, ScalarEvolution &SE) { assert(AddRec->getOperand(0)->isZero() && "Starting value of addrec should be 0"); LLVM_DEBUG(dbgs() << __func__ << ": solving boundary crossing for range " << Range << ", addrec " << *AddRec << '\n'); // This case is handled in getNumIterationsInRange. Here we can assume that // we start in the range. assert(Range.contains(APInt(SE.getTypeSizeInBits(AddRec->getType()), 0)) && "Addrec's initial value should be in range"); APInt A, B, C, M; unsigned BitWidth; auto T = GetQuadraticEquation(AddRec); if (!T) return None; // Be careful about the return value: there can be two reasons for not // returning an actual number. First, if no solutions to the equations // were found, and second, if the solutions don't leave the given range. // The first case means that the actual solution is "unknown", the second // means that it's known, but not valid. If the solution is unknown, we // cannot make any conclusions. // Return a pair: the optional solution and a flag indicating if the // solution was found. auto SolveForBoundary = [&](APInt Bound) -> std::pair,bool> { // Solve for signed overflow and unsigned overflow, pick the lower // solution. LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: checking boundary " << Bound << " (before multiplying by " << M << ")\n"); Bound *= M; // The quadratic equation multiplier. Optional SO = None; if (BitWidth > 1) { LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " "signed overflow\n"); SO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, BitWidth); } LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " "unsigned overflow\n"); Optional UO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, BitWidth+1); auto LeavesRange = [&] (const APInt &X) { ConstantInt *C0 = ConstantInt::get(SE.getContext(), X); ConstantInt *V0 = EvaluateConstantChrecAtConstant(AddRec, C0, SE); if (Range.contains(V0->getValue())) return false; // X should be at least 1, so X-1 is non-negative. ConstantInt *C1 = ConstantInt::get(SE.getContext(), X-1); ConstantInt *V1 = EvaluateConstantChrecAtConstant(AddRec, C1, SE); if (Range.contains(V1->getValue())) return true; return false; }; // If SolveQuadraticEquationWrap returns None, it means that there can // be a solution, but the function failed to find it. We cannot treat it // as "no solution". if (!SO || !UO) return { None, false }; // Check the smaller value first to see if it leaves the range. // At this point, both SO and UO must have values. Optional Min = MinOptional(SO, UO); if (LeavesRange(*Min)) return { Min, true }; Optional Max = Min == SO ? UO : SO; if (LeavesRange(*Max)) return { Max, true }; // Solutions were found, but were eliminated, hence the "true". return { None, true }; }; std::tie(A, B, C, M, BitWidth) = *T; // Lower bound is inclusive, subtract 1 to represent the exiting value. APInt Lower = Range.getLower().sext(A.getBitWidth()) - 1; APInt Upper = Range.getUpper().sext(A.getBitWidth()); auto SL = SolveForBoundary(Lower); auto SU = SolveForBoundary(Upper); // If any of the solutions was unknown, no meaninigful conclusions can // be made. if (!SL.second || !SU.second) return None; // Claim: The correct solution is not some value between Min and Max. // // Justification: Assuming that Min and Max are different values, one of // them is when the first signed overflow happens, the other is when the // first unsigned overflow happens. Crossing the range boundary is only // possible via an overflow (treating 0 as a special case of it, modeling // an overflow as crossing k*2^W for some k). // // The interesting case here is when Min was eliminated as an invalid // solution, but Max was not. The argument is that if there was another // overflow between Min and Max, it would also have been eliminated if // it was considered. // // For a given boundary, it is possible to have two overflows of the same // type (signed/unsigned) without having the other type in between: this // can happen when the vertex of the parabola is between the iterations // corresponding to the overflows. This is only possible when the two // overflows cross k*2^W for the same k. In such case, if the second one // left the range (and was the first one to do so), the first overflow // would have to enter the range, which would mean that either we had left // the range before or that we started outside of it. Both of these cases // are contradictions. // // Claim: In the case where SolveForBoundary returns None, the correct // solution is not some value between the Max for this boundary and the // Min of the other boundary. // // Justification: Assume that we had such Max_A and Min_B corresponding // to range boundaries A and B and such that Max_A < Min_B. If there was // a solution between Max_A and Min_B, it would have to be caused by an // overflow corresponding to either A or B. It cannot correspond to B, // since Min_B is the first occurrence of such an overflow. If it // corresponded to A, it would have to be either a signed or an unsigned // overflow that is larger than both eliminated overflows for A. But // between the eliminated overflows and this overflow, the values would // cover the entire value space, thus crossing the other boundary, which // is a contradiction. return TruncIfPossible(MinOptional(SL.first, SU.first), BitWidth); } ScalarEvolution::ExitLimit ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit, bool AllowPredicates) { // This is only used for loops with a "x != y" exit test. The exit condition // is now expressed as a single expression, V = x-y. So the exit test is // effectively V != 0. We know and take advantage of the fact that this // expression only being used in a comparison by zero context. SmallPtrSet Predicates; // If the value is a constant if (const SCEVConstant *C = dyn_cast(V)) { // If the value is already zero, the branch will execute zero times. if (C->getValue()->isZero()) return C; return getCouldNotCompute(); // Otherwise it will loop infinitely. } const SCEVAddRecExpr *AddRec = dyn_cast(stripInjectiveFunctions(V)); if (!AddRec && AllowPredicates) // Try to make this an AddRec using runtime tests, in the first X // iterations of this loop, where X is the SCEV expression found by the // algorithm below. AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates); if (!AddRec || AddRec->getLoop() != L) return getCouldNotCompute(); // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of // the quadratic equation to solve it. if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { // We can only use this value if the chrec ends up with an exact zero // value at this index. When solving for "X*X != 5", for example, we // should not accept a root of 2. if (auto S = SolveQuadraticAddRecExact(AddRec, *this)) { const auto *R = cast(getConstant(*S)); return ExitLimit(R, R, false, Predicates); } return getCouldNotCompute(); } // Otherwise we can only handle this if it is affine. if (!AddRec->isAffine()) return getCouldNotCompute(); // If this is an affine expression, the execution count of this branch is // the minimum unsigned root of the following equation: // // Start + Step*N = 0 (mod 2^BW) // // equivalent to: // // Step*N = -Start (mod 2^BW) // // where BW is the common bit width of Start and Step. // Get the initial value for the loop. const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); // For now we handle only constant steps. // // TODO: Handle a nonconstant Step given AddRec. If the // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step. // We have not yet seen any such cases. const SCEVConstant *StepC = dyn_cast(Step); if (!StepC || StepC->getValue()->isZero()) return getCouldNotCompute(); // For positive steps (counting up until unsigned overflow): // N = -Start/Step (as unsigned) // For negative steps (counting down to zero): // N = Start/-Step // First compute the unsigned distance from zero in the direction of Step. bool CountDown = StepC->getAPInt().isNegative(); const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start); // Handle unitary steps, which cannot wraparound. // 1*N = -Start; -1*N = Start (mod 2^BW), so: // N = Distance (as unsigned) if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) { APInt MaxBECount = getUnsignedRangeMax(applyLoopGuards(Distance, L)); MaxBECount = APIntOps::umin(MaxBECount, getUnsignedRangeMax(Distance)); // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated, // we end up with a loop whose backedge-taken count is n - 1. Detect this // case, and see if we can improve the bound. // // Explicitly handling this here is necessary because getUnsignedRange // isn't context-sensitive; it doesn't know that we only care about the // range inside the loop. const SCEV *Zero = getZero(Distance->getType()); const SCEV *One = getOne(Distance->getType()); const SCEV *DistancePlusOne = getAddExpr(Distance, One); if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) { // If Distance + 1 doesn't overflow, we can compute the maximum distance // as "unsigned_max(Distance + 1) - 1". ConstantRange CR = getUnsignedRange(DistancePlusOne); MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1); } return ExitLimit(Distance, getConstant(MaxBECount), false, Predicates); } // If the condition controls loop exit (the loop exits only if the expression // is true) and the addition is no-wrap we can use unsigned divide to // compute the backedge count. In this case, the step may not divide the // distance, but we don't care because if the condition is "missed" the loop // will have undefined behavior due to wrapping. if (ControlsExit && AddRec->hasNoSelfWrap() && loopHasNoAbnormalExits(AddRec->getLoop())) { const SCEV *Exact = getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step); const SCEV *Max = getCouldNotCompute(); if (Exact != getCouldNotCompute()) { APInt MaxInt = getUnsignedRangeMax(applyLoopGuards(Exact, L)); Max = getConstant(APIntOps::umin(MaxInt, getUnsignedRangeMax(Exact))); } return ExitLimit(Exact, Max, false, Predicates); } // Solve the general equation. const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(), getNegativeSCEV(Start), *this); const SCEV *M = E; if (E != getCouldNotCompute()) { APInt MaxWithGuards = getUnsignedRangeMax(applyLoopGuards(E, L)); M = getConstant(APIntOps::umin(MaxWithGuards, getUnsignedRangeMax(E))); } return ExitLimit(E, M, false, Predicates); } ScalarEvolution::ExitLimit ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) { // Loops that look like: while (X == 0) are very strange indeed. We don't // handle them yet except for the trivial case. This could be expanded in the // future as needed. // If the value is a constant, check to see if it is known to be non-zero // already. If so, the backedge will execute zero times. if (const SCEVConstant *C = dyn_cast(V)) { if (!C->getValue()->isZero()) return getZero(C->getType()); return getCouldNotCompute(); // Otherwise it will loop infinitely. } // We could implement others, but I really doubt anyone writes loops like // this, and if they did, they would already be constant folded. return getCouldNotCompute(); } std::pair ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(const BasicBlock *BB) const { // If the block has a unique predecessor, then there is no path from the // predecessor to the block that does not go through the direct edge // from the predecessor to the block. if (const BasicBlock *Pred = BB->getSinglePredecessor()) return {Pred, BB}; // A loop's header is defined to be a block that dominates the loop. // If the header has a unique predecessor outside the loop, it must be // a block that has exactly one successor that can reach the loop. if (const Loop *L = LI.getLoopFor(BB)) return {L->getLoopPredecessor(), L->getHeader()}; return {nullptr, nullptr}; } /// SCEV structural equivalence is usually sufficient for testing whether two /// expressions are equal, however for the purposes of looking for a condition /// guarding a loop, it can be useful to be a little more general, since a /// front-end may have replicated the controlling expression. static bool HasSameValue(const SCEV *A, const SCEV *B) { // Quick check to see if they are the same SCEV. if (A == B) return true; auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) { // Not all instructions that are "identical" compute the same value. For // instance, two distinct alloca instructions allocating the same type are // identical and do not read memory; but compute distinct values. return A->isIdenticalTo(B) && (isa(A) || isa(A)); }; // Otherwise, if they're both SCEVUnknown, it's possible that they hold // two different instructions with the same value. Check for this case. if (const SCEVUnknown *AU = dyn_cast(A)) if (const SCEVUnknown *BU = dyn_cast(B)) if (const Instruction *AI = dyn_cast(AU->getValue())) if (const Instruction *BI = dyn_cast(BU->getValue())) if (ComputesEqualValues(AI, BI)) return true; // Otherwise assume they may have a different value. return false; } bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, const SCEV *&LHS, const SCEV *&RHS, unsigned Depth, bool ControllingFiniteLoop) { bool Changed = false; // Simplifies ICMP to trivial true or false by turning it into '0 == 0' or // '0 != 0'. auto TrivialCase = [&](bool TriviallyTrue) { LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); Pred = TriviallyTrue ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE; return true; }; // If we hit the max recursion limit bail out. if (Depth >= 3) return false; // Canonicalize a constant to the right side. if (const SCEVConstant *LHSC = dyn_cast(LHS)) { // Check for both operands constant. if (const SCEVConstant *RHSC = dyn_cast(RHS)) { if (ConstantExpr::getICmp(Pred, LHSC->getValue(), RHSC->getValue())->isNullValue()) return TrivialCase(false); else return TrivialCase(true); } // Otherwise swap the operands to put the constant on the right. std::swap(LHS, RHS); Pred = ICmpInst::getSwappedPredicate(Pred); Changed = true; } // If we're comparing an addrec with a value which is loop-invariant in the // addrec's loop, put the addrec on the left. Also make a dominance check, // as both operands could be addrecs loop-invariant in each other's loop. if (const SCEVAddRecExpr *AR = dyn_cast(RHS)) { const Loop *L = AR->getLoop(); if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) { std::swap(LHS, RHS); Pred = ICmpInst::getSwappedPredicate(Pred); Changed = true; } } // If there's a constant operand, canonicalize comparisons with boundary // cases, and canonicalize *-or-equal comparisons to regular comparisons. if (const SCEVConstant *RC = dyn_cast(RHS)) { const APInt &RA = RC->getAPInt(); bool SimplifiedByConstantRange = false; if (!ICmpInst::isEquality(Pred)) { ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA); if (ExactCR.isFullSet()) return TrivialCase(true); else if (ExactCR.isEmptySet()) return TrivialCase(false); APInt NewRHS; CmpInst::Predicate NewPred; if (ExactCR.getEquivalentICmp(NewPred, NewRHS) && ICmpInst::isEquality(NewPred)) { // We were able to convert an inequality to an equality. Pred = NewPred; RHS = getConstant(NewRHS); Changed = SimplifiedByConstantRange = true; } } if (!SimplifiedByConstantRange) { switch (Pred) { default: break; case ICmpInst::ICMP_EQ: case ICmpInst::ICMP_NE: // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b. if (!RA) if (const SCEVAddExpr *AE = dyn_cast(LHS)) if (const SCEVMulExpr *ME = dyn_cast(AE->getOperand(0))) if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 && ME->getOperand(0)->isAllOnesValue()) { RHS = AE->getOperand(1); LHS = ME->getOperand(1); Changed = true; } break; // The "Should have been caught earlier!" messages refer to the fact // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above // should have fired on the corresponding cases, and canonicalized the // check to trivial case. case ICmpInst::ICMP_UGE: assert(!RA.isMinValue() && "Should have been caught earlier!"); Pred = ICmpInst::ICMP_UGT; RHS = getConstant(RA - 1); Changed = true; break; case ICmpInst::ICMP_ULE: assert(!RA.isMaxValue() && "Should have been caught earlier!"); Pred = ICmpInst::ICMP_ULT; RHS = getConstant(RA + 1); Changed = true; break; case ICmpInst::ICMP_SGE: assert(!RA.isMinSignedValue() && "Should have been caught earlier!"); Pred = ICmpInst::ICMP_SGT; RHS = getConstant(RA - 1); Changed = true; break; case ICmpInst::ICMP_SLE: assert(!RA.isMaxSignedValue() && "Should have been caught earlier!"); Pred = ICmpInst::ICMP_SLT; RHS = getConstant(RA + 1); Changed = true; break; } } } // Check for obvious equality. if (HasSameValue(LHS, RHS)) { if (ICmpInst::isTrueWhenEqual(Pred)) return TrivialCase(true); if (ICmpInst::isFalseWhenEqual(Pred)) return TrivialCase(false); } // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by // adding or subtracting 1 from one of the operands. This can be done for // one of two reasons: // 1) The range of the RHS does not include the (signed/unsigned) boundaries // 2) The loop is finite, with this comparison controlling the exit. Since the // loop is finite, the bound cannot include the corresponding boundary // (otherwise it would loop forever). switch (Pred) { case ICmpInst::ICMP_SLE: if (ControllingFiniteLoop || !getSignedRangeMax(RHS).isMaxSignedValue()) { RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, SCEV::FlagNSW); Pred = ICmpInst::ICMP_SLT; Changed = true; } else if (!getSignedRangeMin(LHS).isMinSignedValue()) { LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, SCEV::FlagNSW); Pred = ICmpInst::ICMP_SLT; Changed = true; } break; case ICmpInst::ICMP_SGE: if (ControllingFiniteLoop || !getSignedRangeMin(RHS).isMinSignedValue()) { RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, SCEV::FlagNSW); Pred = ICmpInst::ICMP_SGT; Changed = true; } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) { LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, SCEV::FlagNSW); Pred = ICmpInst::ICMP_SGT; Changed = true; } break; case ICmpInst::ICMP_ULE: if (ControllingFiniteLoop || !getUnsignedRangeMax(RHS).isMaxValue()) { RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, SCEV::FlagNUW); Pred = ICmpInst::ICMP_ULT; Changed = true; } else if (!getUnsignedRangeMin(LHS).isMinValue()) { LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS); Pred = ICmpInst::ICMP_ULT; Changed = true; } break; case ICmpInst::ICMP_UGE: if (ControllingFiniteLoop || !getUnsignedRangeMin(RHS).isMinValue()) { RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS); Pred = ICmpInst::ICMP_UGT; Changed = true; } else if (!getUnsignedRangeMax(LHS).isMaxValue()) { LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, SCEV::FlagNUW); Pred = ICmpInst::ICMP_UGT; Changed = true; } break; default: break; } // TODO: More simplifications are possible here. // Recursively simplify until we either hit a recursion limit or nothing // changes. if (Changed) return SimplifyICmpOperands(Pred, LHS, RHS, Depth + 1, ControllingFiniteLoop); return Changed; } bool ScalarEvolution::isKnownNegative(const SCEV *S) { return getSignedRangeMax(S).isNegative(); } bool ScalarEvolution::isKnownPositive(const SCEV *S) { return getSignedRangeMin(S).isStrictlyPositive(); } bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { return !getSignedRangeMin(S).isNegative(); } bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { return !getSignedRangeMax(S).isStrictlyPositive(); } bool ScalarEvolution::isKnownNonZero(const SCEV *S) { return getUnsignedRangeMin(S) != 0; } std::pair ScalarEvolution::SplitIntoInitAndPostInc(const Loop *L, const SCEV *S) { // Compute SCEV on entry of loop L. const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *this); if (Start == getCouldNotCompute()) return { Start, Start }; // Compute post increment SCEV for loop L. const SCEV *PostInc = SCEVPostIncRewriter::rewrite(S, L, *this); assert(PostInc != getCouldNotCompute() && "Unexpected could not compute"); return { Start, PostInc }; } bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { // First collect all loops. SmallPtrSet LoopsUsed; getUsedLoops(LHS, LoopsUsed); getUsedLoops(RHS, LoopsUsed); if (LoopsUsed.empty()) return false; // Domination relationship must be a linear order on collected loops. #ifndef NDEBUG for (const auto *L1 : LoopsUsed) for (const auto *L2 : LoopsUsed) assert((DT.dominates(L1->getHeader(), L2->getHeader()) || DT.dominates(L2->getHeader(), L1->getHeader())) && "Domination relationship is not a linear order"); #endif const Loop *MDL = *std::max_element(LoopsUsed.begin(), LoopsUsed.end(), [&](const Loop *L1, const Loop *L2) { return DT.properlyDominates(L1->getHeader(), L2->getHeader()); }); // Get init and post increment value for LHS. auto SplitLHS = SplitIntoInitAndPostInc(MDL, LHS); // if LHS contains unknown non-invariant SCEV then bail out. if (SplitLHS.first == getCouldNotCompute()) return false; assert (SplitLHS.second != getCouldNotCompute() && "Unexpected CNC"); // Get init and post increment value for RHS. auto SplitRHS = SplitIntoInitAndPostInc(MDL, RHS); // if RHS contains unknown non-invariant SCEV then bail out. if (SplitRHS.first == getCouldNotCompute()) return false; assert (SplitRHS.second != getCouldNotCompute() && "Unexpected CNC"); // It is possible that init SCEV contains an invariant load but it does // not dominate MDL and is not available at MDL loop entry, so we should // check it here. if (!isAvailableAtLoopEntry(SplitLHS.first, MDL) || !isAvailableAtLoopEntry(SplitRHS.first, MDL)) return false; // It seems backedge guard check is faster than entry one so in some cases // it can speed up whole estimation by short circuit return isLoopBackedgeGuardedByCond(MDL, Pred, SplitLHS.second, SplitRHS.second) && isLoopEntryGuardedByCond(MDL, Pred, SplitLHS.first, SplitRHS.first); } bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { // Canonicalize the inputs first. (void)SimplifyICmpOperands(Pred, LHS, RHS); if (isKnownViaInduction(Pred, LHS, RHS)) return true; if (isKnownPredicateViaSplitting(Pred, LHS, RHS)) return true; // Otherwise see what can be done with some simple reasoning. return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS); } Optional ScalarEvolution::evaluatePredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { if (isKnownPredicate(Pred, LHS, RHS)) return true; else if (isKnownPredicate(ICmpInst::getInversePredicate(Pred), LHS, RHS)) return false; return None; } bool ScalarEvolution::isKnownPredicateAt(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI) { // TODO: Analyze guards and assumes from Context's block. return isKnownPredicate(Pred, LHS, RHS) || isBasicBlockEntryGuardedByCond(CtxI->getParent(), Pred, LHS, RHS); } Optional ScalarEvolution::evaluatePredicateAt(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI) { Optional KnownWithoutContext = evaluatePredicate(Pred, LHS, RHS); if (KnownWithoutContext) return KnownWithoutContext; if (isBasicBlockEntryGuardedByCond(CtxI->getParent(), Pred, LHS, RHS)) return true; else if (isBasicBlockEntryGuardedByCond(CtxI->getParent(), ICmpInst::getInversePredicate(Pred), LHS, RHS)) return false; return None; } bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred, const SCEVAddRecExpr *LHS, const SCEV *RHS) { const Loop *L = LHS->getLoop(); return isLoopEntryGuardedByCond(L, Pred, LHS->getStart(), RHS) && isLoopBackedgeGuardedByCond(L, Pred, LHS->getPostIncExpr(*this), RHS); } Optional ScalarEvolution::getMonotonicPredicateType(const SCEVAddRecExpr *LHS, ICmpInst::Predicate Pred) { auto Result = getMonotonicPredicateTypeImpl(LHS, Pred); #ifndef NDEBUG // Verify an invariant: inverting the predicate should turn a monotonically // increasing change to a monotonically decreasing one, and vice versa. if (Result) { auto ResultSwapped = getMonotonicPredicateTypeImpl(LHS, ICmpInst::getSwappedPredicate(Pred)); assert(ResultSwapped && "should be able to analyze both!"); assert(ResultSwapped.value() != Result.value() && "monotonicity should flip as we flip the predicate"); } #endif return Result; } Optional ScalarEvolution::getMonotonicPredicateTypeImpl(const SCEVAddRecExpr *LHS, ICmpInst::Predicate Pred) { // A zero step value for LHS means the induction variable is essentially a // loop invariant value. We don't really depend on the predicate actually // flipping from false to true (for increasing predicates, and the other way // around for decreasing predicates), all we care about is that *if* the // predicate changes then it only changes from false to true. // // A zero step value in itself is not very useful, but there may be places // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be // as general as possible. // Only handle LE/LT/GE/GT predicates. if (!ICmpInst::isRelational(Pred)) return None; bool IsGreater = ICmpInst::isGE(Pred) || ICmpInst::isGT(Pred); assert((IsGreater || ICmpInst::isLE(Pred) || ICmpInst::isLT(Pred)) && "Should be greater or less!"); // Check that AR does not wrap. if (ICmpInst::isUnsigned(Pred)) { if (!LHS->hasNoUnsignedWrap()) return None; return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; } else { assert(ICmpInst::isSigned(Pred) && "Relational predicate is either signed or unsigned!"); if (!LHS->hasNoSignedWrap()) return None; const SCEV *Step = LHS->getStepRecurrence(*this); if (isKnownNonNegative(Step)) return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; if (isKnownNonPositive(Step)) return !IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; return None; } } Optional ScalarEvolution::getLoopInvariantPredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L) { // If there is a loop-invariant, force it into the RHS, otherwise bail out. if (!isLoopInvariant(RHS, L)) { if (!isLoopInvariant(LHS, L)) return None; std::swap(LHS, RHS); Pred = ICmpInst::getSwappedPredicate(Pred); } const SCEVAddRecExpr *ArLHS = dyn_cast(LHS); if (!ArLHS || ArLHS->getLoop() != L) return None; auto MonotonicType = getMonotonicPredicateType(ArLHS, Pred); if (!MonotonicType) return None; // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to // true as the loop iterates, and the backedge is control dependent on // "ArLHS `Pred` RHS" == true then we can reason as follows: // // * if the predicate was false in the first iteration then the predicate // is never evaluated again, since the loop exits without taking the // backedge. // * if the predicate was true in the first iteration then it will // continue to be true for all future iterations since it is // monotonically increasing. // // For both the above possibilities, we can replace the loop varying // predicate with its value on the first iteration of the loop (which is // loop invariant). // // A similar reasoning applies for a monotonically decreasing predicate, by // replacing true with false and false with true in the above two bullets. bool Increasing = *MonotonicType == ScalarEvolution::MonotonicallyIncreasing; auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred); if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS)) return None; return ScalarEvolution::LoopInvariantPredicate(Pred, ArLHS->getStart(), RHS); } Optional ScalarEvolution::getLoopInvariantExitCondDuringFirstIterations( ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI, const SCEV *MaxIter) { // Try to prove the following set of facts: // - The predicate is monotonic in the iteration space. // - If the check does not fail on the 1st iteration: // - No overflow will happen during first MaxIter iterations; // - It will not fail on the MaxIter'th iteration. // If the check does fail on the 1st iteration, we leave the loop and no // other checks matter. // If there is a loop-invariant, force it into the RHS, otherwise bail out. if (!isLoopInvariant(RHS, L)) { if (!isLoopInvariant(LHS, L)) return None; std::swap(LHS, RHS); Pred = ICmpInst::getSwappedPredicate(Pred); } auto *AR = dyn_cast(LHS); if (!AR || AR->getLoop() != L) return None; // The predicate must be relational (i.e. <, <=, >=, >). if (!ICmpInst::isRelational(Pred)) return None; // TODO: Support steps other than +/- 1. const SCEV *Step = AR->getStepRecurrence(*this); auto *One = getOne(Step->getType()); auto *MinusOne = getNegativeSCEV(One); if (Step != One && Step != MinusOne) return None; // Type mismatch here means that MaxIter is potentially larger than max // unsigned value in start type, which mean we cannot prove no wrap for the // indvar. if (AR->getType() != MaxIter->getType()) return None; // Value of IV on suggested last iteration. const SCEV *Last = AR->evaluateAtIteration(MaxIter, *this); // Does it still meet the requirement? if (!isLoopBackedgeGuardedByCond(L, Pred, Last, RHS)) return None; // Because step is +/- 1 and MaxIter has same type as Start (i.e. it does // not exceed max unsigned value of this type), this effectively proves // that there is no wrap during the iteration. To prove that there is no // signed/unsigned wrap, we need to check that // Start <= Last for step = 1 or Start >= Last for step = -1. ICmpInst::Predicate NoOverflowPred = CmpInst::isSigned(Pred) ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; if (Step == MinusOne) NoOverflowPred = CmpInst::getSwappedPredicate(NoOverflowPred); const SCEV *Start = AR->getStart(); if (!isKnownPredicateAt(NoOverflowPred, Start, Last, CtxI)) return None; // Everything is fine. return ScalarEvolution::LoopInvariantPredicate(Pred, Start, RHS); } bool ScalarEvolution::isKnownPredicateViaConstantRanges( ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { if (HasSameValue(LHS, RHS)) return ICmpInst::isTrueWhenEqual(Pred); // This code is split out from isKnownPredicate because it is called from // within isLoopEntryGuardedByCond. auto CheckRanges = [&](const ConstantRange &RangeLHS, const ConstantRange &RangeRHS) { return RangeLHS.icmp(Pred, RangeRHS); }; // The check at the top of the function catches the case where the values are // known to be equal. if (Pred == CmpInst::ICMP_EQ) return false; if (Pred == CmpInst::ICMP_NE) { auto SL = getSignedRange(LHS); auto SR = getSignedRange(RHS); if (CheckRanges(SL, SR)) return true; auto UL = getUnsignedRange(LHS); auto UR = getUnsignedRange(RHS); if (CheckRanges(UL, UR)) return true; auto *Diff = getMinusSCEV(LHS, RHS); return !isa(Diff) && isKnownNonZero(Diff); } if (CmpInst::isSigned(Pred)) { auto SL = getSignedRange(LHS); auto SR = getSignedRange(RHS); return CheckRanges(SL, SR); } auto UL = getUnsignedRange(LHS); auto UR = getUnsignedRange(RHS); return CheckRanges(UL, UR); } bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { // Match X to (A + C1) and Y to (A + C2), where // C1 and C2 are constant integers. If either X or Y are not add expressions, // consider them as X + 0 and Y + 0 respectively. C1 and C2 are returned via // OutC1 and OutC2. auto MatchBinaryAddToConst = [this](const SCEV *X, const SCEV *Y, APInt &OutC1, APInt &OutC2, SCEV::NoWrapFlags ExpectedFlags) { const SCEV *XNonConstOp, *XConstOp; const SCEV *YNonConstOp, *YConstOp; SCEV::NoWrapFlags XFlagsPresent; SCEV::NoWrapFlags YFlagsPresent; if (!splitBinaryAdd(X, XConstOp, XNonConstOp, XFlagsPresent)) { XConstOp = getZero(X->getType()); XNonConstOp = X; XFlagsPresent = ExpectedFlags; } if (!isa(XConstOp) || (XFlagsPresent & ExpectedFlags) != ExpectedFlags) return false; if (!splitBinaryAdd(Y, YConstOp, YNonConstOp, YFlagsPresent)) { YConstOp = getZero(Y->getType()); YNonConstOp = Y; YFlagsPresent = ExpectedFlags; } if (!isa(YConstOp) || (YFlagsPresent & ExpectedFlags) != ExpectedFlags) return false; if (YNonConstOp != XNonConstOp) return false; OutC1 = cast(XConstOp)->getAPInt(); OutC2 = cast(YConstOp)->getAPInt(); return true; }; APInt C1; APInt C2; switch (Pred) { default: break; case ICmpInst::ICMP_SGE: std::swap(LHS, RHS); LLVM_FALLTHROUGH; case ICmpInst::ICMP_SLE: // (X + C1) s<= (X + C2) if C1 s<= C2. if (MatchBinaryAddToConst(LHS, RHS, C1, C2, SCEV::FlagNSW) && C1.sle(C2)) return true; break; case ICmpInst::ICMP_SGT: std::swap(LHS, RHS); LLVM_FALLTHROUGH; case ICmpInst::ICMP_SLT: // (X + C1) s< (X + C2) if C1 s< C2. if (MatchBinaryAddToConst(LHS, RHS, C1, C2, SCEV::FlagNSW) && C1.slt(C2)) return true; break; case ICmpInst::ICMP_UGE: std::swap(LHS, RHS); LLVM_FALLTHROUGH; case ICmpInst::ICMP_ULE: // (X + C1) u<= (X + C2) for C1 u<= C2. if (MatchBinaryAddToConst(RHS, LHS, C2, C1, SCEV::FlagNUW) && C1.ule(C2)) return true; break; case ICmpInst::ICMP_UGT: std::swap(LHS, RHS); LLVM_FALLTHROUGH; case ICmpInst::ICMP_ULT: // (X + C1) u< (X + C2) if C1 u< C2. if (MatchBinaryAddToConst(RHS, LHS, C2, C1, SCEV::FlagNUW) && C1.ult(C2)) return true; break; } return false; } bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate) return false; // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on // the stack can result in exponential time complexity. SaveAndRestore Restore(ProvingSplitPredicate, true); // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L // // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use // isKnownPredicate. isKnownPredicate is more powerful, but also more // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the // interesting cases seen in practice. We can consider "upgrading" L >= 0 to // use isKnownPredicate later if needed. return isKnownNonNegative(RHS) && isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) && isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS); } bool ScalarEvolution::isImpliedViaGuard(const BasicBlock *BB, ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { // No need to even try if we know the module has no guards. if (!HasGuards) return false; return any_of(*BB, [&](const Instruction &I) { using namespace llvm::PatternMatch; Value *Condition; return match(&I, m_Intrinsic( m_Value(Condition))) && isImpliedCond(Pred, LHS, RHS, Condition, false); }); } /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is /// protected by a conditional between LHS and RHS. This is used to /// to eliminate casts. bool ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { // Interpret a null as meaning no loop, where there is obviously no guard // (interprocedural conditions notwithstanding). Do not bother about // unreachable loops. if (!L || !DT.isReachableFromEntry(L->getHeader())) return true; if (VerifyIR) assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()) && "This cannot be done on broken IR!"); if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) return true; BasicBlock *Latch = L->getLoopLatch(); if (!Latch) return false; BranchInst *LoopContinuePredicate = dyn_cast(Latch->getTerminator()); if (LoopContinuePredicate && LoopContinuePredicate->isConditional() && isImpliedCond(Pred, LHS, RHS, LoopContinuePredicate->getCondition(), LoopContinuePredicate->getSuccessor(0) != L->getHeader())) return true; // We don't want more than one activation of the following loops on the stack // -- that can lead to O(n!) time complexity. if (WalkingBEDominatingConds) return false; SaveAndRestore ClearOnExit(WalkingBEDominatingConds, true); // See if we can exploit a trip count to prove the predicate. const auto &BETakenInfo = getBackedgeTakenInfo(L); const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this); if (LatchBECount != getCouldNotCompute()) { // We know that Latch branches back to the loop header exactly // LatchBECount times. This means the backdege condition at Latch is // equivalent to "{0,+,1} u< LatchBECount". Type *Ty = LatchBECount->getType(); auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW); const SCEV *LoopCounter = getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags); if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter, LatchBECount)) return true; } // Check conditions due to any @llvm.assume intrinsics. for (auto &AssumeVH : AC.assumptions()) { if (!AssumeVH) continue; auto *CI = cast(AssumeVH); if (!DT.dominates(CI, Latch->getTerminator())) continue; if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) return true; } if (isImpliedViaGuard(Latch, Pred, LHS, RHS)) return true; for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()]; DTN != HeaderDTN; DTN = DTN->getIDom()) { assert(DTN && "should reach the loop header before reaching the root!"); BasicBlock *BB = DTN->getBlock(); if (isImpliedViaGuard(BB, Pred, LHS, RHS)) return true; BasicBlock *PBB = BB->getSinglePredecessor(); if (!PBB) continue; BranchInst *ContinuePredicate = dyn_cast(PBB->getTerminator()); if (!ContinuePredicate || !ContinuePredicate->isConditional()) continue; Value *Condition = ContinuePredicate->getCondition(); // If we have an edge `E` within the loop body that dominates the only // latch, the condition guarding `E` also guards the backedge. This // reasoning works only for loops with a single latch. BasicBlockEdge DominatingEdge(PBB, BB); if (DominatingEdge.isSingleEdge()) { // We're constructively (and conservatively) enumerating edges within the // loop body that dominate the latch. The dominator tree better agree // with us on this: assert(DT.dominates(DominatingEdge, Latch) && "should be!"); if (isImpliedCond(Pred, LHS, RHS, Condition, BB != ContinuePredicate->getSuccessor(0))) return true; } } return false; } bool ScalarEvolution::isBasicBlockEntryGuardedByCond(const BasicBlock *BB, ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { // Do not bother proving facts for unreachable code. if (!DT.isReachableFromEntry(BB)) return true; if (VerifyIR) assert(!verifyFunction(*BB->getParent(), &dbgs()) && "This cannot be done on broken IR!"); // If we cannot prove strict comparison (e.g. a > b), maybe we can prove // the facts (a >= b && a != b) separately. A typical situation is when the // non-strict comparison is known from ranges and non-equality is known from // dominating predicates. If we are proving strict comparison, we always try // to prove non-equality and non-strict comparison separately. auto NonStrictPredicate = ICmpInst::getNonStrictPredicate(Pred); const bool ProvingStrictComparison = (Pred != NonStrictPredicate); bool ProvedNonStrictComparison = false; bool ProvedNonEquality = false; auto SplitAndProve = [&](std::function Fn) -> bool { if (!ProvedNonStrictComparison) ProvedNonStrictComparison = Fn(NonStrictPredicate); if (!ProvedNonEquality) ProvedNonEquality = Fn(ICmpInst::ICMP_NE); if (ProvedNonStrictComparison && ProvedNonEquality) return true; return false; }; if (ProvingStrictComparison) { auto ProofFn = [&](ICmpInst::Predicate P) { return isKnownViaNonRecursiveReasoning(P, LHS, RHS); }; if (SplitAndProve(ProofFn)) return true; } // Try to prove (Pred, LHS, RHS) using isImpliedCond. auto ProveViaCond = [&](const Value *Condition, bool Inverse) { const Instruction *CtxI = &BB->front(); if (isImpliedCond(Pred, LHS, RHS, Condition, Inverse, CtxI)) return true; if (ProvingStrictComparison) { auto ProofFn = [&](ICmpInst::Predicate P) { return isImpliedCond(P, LHS, RHS, Condition, Inverse, CtxI); }; if (SplitAndProve(ProofFn)) return true; } return false; }; // Starting at the block's predecessor, climb up the predecessor chain, as long // as there are predecessors that can be found that have unique successors // leading to the original block. const Loop *ContainingLoop = LI.getLoopFor(BB); const BasicBlock *PredBB; if (ContainingLoop && ContainingLoop->getHeader() == BB) PredBB = ContainingLoop->getLoopPredecessor(); else PredBB = BB->getSinglePredecessor(); for (std::pair Pair(PredBB, BB); Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { const BranchInst *BlockEntryPredicate = dyn_cast(Pair.first->getTerminator()); if (!BlockEntryPredicate || BlockEntryPredicate->isUnconditional()) continue; if (ProveViaCond(BlockEntryPredicate->getCondition(), BlockEntryPredicate->getSuccessor(0) != Pair.second)) return true; } // Check conditions due to any @llvm.assume intrinsics. for (auto &AssumeVH : AC.assumptions()) { if (!AssumeVH) continue; auto *CI = cast(AssumeVH); if (!DT.dominates(CI, BB)) continue; if (ProveViaCond(CI->getArgOperand(0), false)) return true; } // Check conditions due to any @llvm.experimental.guard intrinsics. auto *GuardDecl = F.getParent()->getFunction( Intrinsic::getName(Intrinsic::experimental_guard)); if (GuardDecl) for (const auto *GU : GuardDecl->users()) if (const auto *Guard = dyn_cast(GU)) if (Guard->getFunction() == BB->getParent() && DT.dominates(Guard, BB)) if (ProveViaCond(Guard->getArgOperand(0), false)) return true; return false; } bool ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { // Interpret a null as meaning no loop, where there is obviously no guard // (interprocedural conditions notwithstanding). if (!L) return false; // Both LHS and RHS must be available at loop entry. assert(isAvailableAtLoopEntry(LHS, L) && "LHS is not available at Loop Entry"); assert(isAvailableAtLoopEntry(RHS, L) && "RHS is not available at Loop Entry"); if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) return true; return isBasicBlockEntryGuardedByCond(L->getHeader(), Pred, LHS, RHS); } bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Value *FoundCondValue, bool Inverse, const Instruction *CtxI) { // False conditions implies anything. Do not bother analyzing it further. if (FoundCondValue == ConstantInt::getBool(FoundCondValue->getContext(), Inverse)) return true; if (!PendingLoopPredicates.insert(FoundCondValue).second) return false; auto ClearOnExit = make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); }); // Recursively handle And and Or conditions. const Value *Op0, *Op1; if (match(FoundCondValue, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) { if (!Inverse) return isImpliedCond(Pred, LHS, RHS, Op0, Inverse, CtxI) || isImpliedCond(Pred, LHS, RHS, Op1, Inverse, CtxI); } else if (match(FoundCondValue, m_LogicalOr(m_Value(Op0), m_Value(Op1)))) { if (Inverse) return isImpliedCond(Pred, LHS, RHS, Op0, Inverse, CtxI) || isImpliedCond(Pred, LHS, RHS, Op1, Inverse, CtxI); } const ICmpInst *ICI = dyn_cast(FoundCondValue); if (!ICI) return false; // Now that we found a conditional branch that dominates the loop or controls // the loop latch. Check to see if it is the comparison we are looking for. ICmpInst::Predicate FoundPred; if (Inverse) FoundPred = ICI->getInversePredicate(); else FoundPred = ICI->getPredicate(); const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS, CtxI); } bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, ICmpInst::Predicate FoundPred, const SCEV *FoundLHS, const SCEV *FoundRHS, const Instruction *CtxI) { // Balance the types. if (getTypeSizeInBits(LHS->getType()) < getTypeSizeInBits(FoundLHS->getType())) { // For unsigned and equality predicates, try to prove that both found // operands fit into narrow unsigned range. If so, try to prove facts in // narrow types. if (!CmpInst::isSigned(FoundPred) && !FoundLHS->getType()->isPointerTy() && !FoundRHS->getType()->isPointerTy()) { auto *NarrowType = LHS->getType(); auto *WideType = FoundLHS->getType(); auto BitWidth = getTypeSizeInBits(NarrowType); const SCEV *MaxValue = getZeroExtendExpr( getConstant(APInt::getMaxValue(BitWidth)), WideType); if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, FoundLHS, MaxValue) && isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, FoundRHS, MaxValue)) { const SCEV *TruncFoundLHS = getTruncateExpr(FoundLHS, NarrowType); const SCEV *TruncFoundRHS = getTruncateExpr(FoundRHS, NarrowType); if (isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, TruncFoundLHS, TruncFoundRHS, CtxI)) return true; } } if (LHS->getType()->isPointerTy() || RHS->getType()->isPointerTy()) return false; if (CmpInst::isSigned(Pred)) { LHS = getSignExtendExpr(LHS, FoundLHS->getType()); RHS = getSignExtendExpr(RHS, FoundLHS->getType()); } else { LHS = getZeroExtendExpr(LHS, FoundLHS->getType()); RHS = getZeroExtendExpr(RHS, FoundLHS->getType()); } } else if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(FoundLHS->getType())) { if (FoundLHS->getType()->isPointerTy() || FoundRHS->getType()->isPointerTy()) return false; if (CmpInst::isSigned(FoundPred)) { FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); } else { FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); } } return isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS, CtxI); } bool ScalarEvolution::isImpliedCondBalancedTypes( ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, ICmpInst::Predicate FoundPred, const SCEV *FoundLHS, const SCEV *FoundRHS, const Instruction *CtxI) { assert(getTypeSizeInBits(LHS->getType()) == getTypeSizeInBits(FoundLHS->getType()) && "Types should be balanced!"); // Canonicalize the query to match the way instcombine will have // canonicalized the comparison. if (SimplifyICmpOperands(Pred, LHS, RHS)) if (LHS == RHS) return CmpInst::isTrueWhenEqual(Pred); if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) if (FoundLHS == FoundRHS) return CmpInst::isFalseWhenEqual(FoundPred); // Check to see if we can make the LHS or RHS match. if (LHS == FoundRHS || RHS == FoundLHS) { if (isa(RHS)) { std::swap(FoundLHS, FoundRHS); FoundPred = ICmpInst::getSwappedPredicate(FoundPred); } else { std::swap(LHS, RHS); Pred = ICmpInst::getSwappedPredicate(Pred); } } // Check whether the found predicate is the same as the desired predicate. if (FoundPred == Pred) return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, CtxI); // Check whether swapping the found predicate makes it the same as the // desired predicate. if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { // We can write the implication // 0. LHS Pred RHS <- FoundLHS SwapPred FoundRHS // using one of the following ways: // 1. LHS Pred RHS <- FoundRHS Pred FoundLHS // 2. RHS SwapPred LHS <- FoundLHS SwapPred FoundRHS // 3. LHS Pred RHS <- ~FoundLHS Pred ~FoundRHS // 4. ~LHS SwapPred ~RHS <- FoundLHS SwapPred FoundRHS // Forms 1. and 2. require swapping the operands of one condition. Don't // do this if it would break canonical constant/addrec ordering. if (!isa(RHS) && !isa(LHS)) return isImpliedCondOperands(FoundPred, RHS, LHS, FoundLHS, FoundRHS, CtxI); if (!isa(FoundRHS) && !isa(FoundLHS)) return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS, CtxI); // There's no clear preference between forms 3. and 4., try both. Avoid // forming getNotSCEV of pointer values as the resulting subtract is // not legal. if (!LHS->getType()->isPointerTy() && !RHS->getType()->isPointerTy() && isImpliedCondOperands(FoundPred, getNotSCEV(LHS), getNotSCEV(RHS), FoundLHS, FoundRHS, CtxI)) return true; if (!FoundLHS->getType()->isPointerTy() && !FoundRHS->getType()->isPointerTy() && isImpliedCondOperands(Pred, LHS, RHS, getNotSCEV(FoundLHS), getNotSCEV(FoundRHS), CtxI)) return true; return false; } auto IsSignFlippedPredicate = [](CmpInst::Predicate P1, CmpInst::Predicate P2) { assert(P1 != P2 && "Handled earlier!"); return CmpInst::isRelational(P2) && P1 == CmpInst::getFlippedSignednessPredicate(P2); }; if (IsSignFlippedPredicate(Pred, FoundPred)) { // Unsigned comparison is the same as signed comparison when both the // operands are non-negative or negative. if ((isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) || (isKnownNegative(FoundLHS) && isKnownNegative(FoundRHS))) return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, CtxI); // Create local copies that we can freely swap and canonicalize our // conditions to "le/lt". ICmpInst::Predicate CanonicalPred = Pred, CanonicalFoundPred = FoundPred; const SCEV *CanonicalLHS = LHS, *CanonicalRHS = RHS, *CanonicalFoundLHS = FoundLHS, *CanonicalFoundRHS = FoundRHS; if (ICmpInst::isGT(CanonicalPred) || ICmpInst::isGE(CanonicalPred)) { CanonicalPred = ICmpInst::getSwappedPredicate(CanonicalPred); CanonicalFoundPred = ICmpInst::getSwappedPredicate(CanonicalFoundPred); std::swap(CanonicalLHS, CanonicalRHS); std::swap(CanonicalFoundLHS, CanonicalFoundRHS); } assert((ICmpInst::isLT(CanonicalPred) || ICmpInst::isLE(CanonicalPred)) && "Must be!"); assert((ICmpInst::isLT(CanonicalFoundPred) || ICmpInst::isLE(CanonicalFoundPred)) && "Must be!"); if (ICmpInst::isSigned(CanonicalPred) && isKnownNonNegative(CanonicalRHS)) // Use implication: // x =s 0 --> x x (FoundLHS) || isa(FoundRHS))) { const SCEVConstant *C = nullptr; const SCEV *V = nullptr; if (isa(FoundLHS)) { C = cast(FoundLHS); V = FoundRHS; } else { C = cast(FoundRHS); V = FoundLHS; } // The guarding predicate tells us that C != V. If the known range // of V is [C, t), we can sharpen the range to [C + 1, t). The // range we consider has to correspond to same signedness as the // predicate we're interested in folding. APInt Min = ICmpInst::isSigned(Pred) ? getSignedRangeMin(V) : getUnsignedRangeMin(V); if (Min == C->getAPInt()) { // Given (V >= Min && V != Min) we conclude V >= (Min + 1). // This is true even if (Min + 1) wraps around -- in case of // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)). APInt SharperMin = Min + 1; switch (Pred) { case ICmpInst::ICMP_SGE: case ICmpInst::ICMP_UGE: // We know V `Pred` SharperMin. If this implies LHS `Pred` // RHS, we're done. if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(SharperMin), CtxI)) return true; LLVM_FALLTHROUGH; case ICmpInst::ICMP_SGT: case ICmpInst::ICMP_UGT: // We know from the range information that (V `Pred` Min || // V == Min). We know from the guarding condition that !(V // == Min). This gives us // // V `Pred` Min || V == Min && !(V == Min) // => V `Pred` Min // // If V `Pred` Min implies LHS `Pred` RHS, we're done. if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min), CtxI)) return true; break; // `LHS < RHS` and `LHS <= RHS` are handled in the same way as `RHS > LHS` and `RHS >= LHS` respectively. case ICmpInst::ICMP_SLE: case ICmpInst::ICMP_ULE: if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS, LHS, V, getConstant(SharperMin), CtxI)) return true; LLVM_FALLTHROUGH; case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_ULT: if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS, LHS, V, getConstant(Min), CtxI)) return true; break; default: // No change break; } } } // Check whether the actual condition is beyond sufficient. if (FoundPred == ICmpInst::ICMP_EQ) if (ICmpInst::isTrueWhenEqual(Pred)) if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, CtxI)) return true; if (Pred == ICmpInst::ICMP_NE) if (!ICmpInst::isTrueWhenEqual(FoundPred)) if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS, CtxI)) return true; // Otherwise assume the worst. return false; } bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr, const SCEV *&L, const SCEV *&R, SCEV::NoWrapFlags &Flags) { const auto *AE = dyn_cast(Expr); if (!AE || AE->getNumOperands() != 2) return false; L = AE->getOperand(0); R = AE->getOperand(1); Flags = AE->getNoWrapFlags(); return true; } Optional ScalarEvolution::computeConstantDifference(const SCEV *More, const SCEV *Less) { // We avoid subtracting expressions here because this function is usually // fairly deep in the call stack (i.e. is called many times). // X - X = 0. if (More == Less) return APInt(getTypeSizeInBits(More->getType()), 0); if (isa(Less) && isa(More)) { const auto *LAR = cast(Less); const auto *MAR = cast(More); if (LAR->getLoop() != MAR->getLoop()) return None; // We look at affine expressions only; not for correctness but to keep // getStepRecurrence cheap. if (!LAR->isAffine() || !MAR->isAffine()) return None; if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this)) return None; Less = LAR->getStart(); More = MAR->getStart(); // fall through } if (isa(Less) && isa(More)) { const auto &M = cast(More)->getAPInt(); const auto &L = cast(Less)->getAPInt(); return M - L; } SCEV::NoWrapFlags Flags; const SCEV *LLess = nullptr, *RLess = nullptr; const SCEV *LMore = nullptr, *RMore = nullptr; const SCEVConstant *C1 = nullptr, *C2 = nullptr; // Compare (X + C1) vs X. if (splitBinaryAdd(Less, LLess, RLess, Flags)) if ((C1 = dyn_cast(LLess))) if (RLess == More) return -(C1->getAPInt()); // Compare X vs (X + C2). if (splitBinaryAdd(More, LMore, RMore, Flags)) if ((C2 = dyn_cast(LMore))) if (RMore == Less) return C2->getAPInt(); // Compare (X + C1) vs (X + C2). if (C1 && C2 && RLess == RMore) return C2->getAPInt() - C1->getAPInt(); return None; } bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart( ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const SCEV *FoundLHS, const SCEV *FoundRHS, const Instruction *CtxI) { // Try to recognize the following pattern: // // FoundRHS = ... // ... // loop: // FoundLHS = {Start,+,W} // context_bb: // Basic block from the same loop // known(Pred, FoundLHS, FoundRHS) // // If some predicate is known in the context of a loop, it is also known on // each iteration of this loop, including the first iteration. Therefore, in // this case, `FoundLHS Pred FoundRHS` implies `Start Pred FoundRHS`. Try to // prove the original pred using this fact. if (!CtxI) return false; const BasicBlock *ContextBB = CtxI->getParent(); // Make sure AR varies in the context block. if (auto *AR = dyn_cast(FoundLHS)) { const Loop *L = AR->getLoop(); // Make sure that context belongs to the loop and executes on 1st iteration // (if it ever executes at all). if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch())) return false; if (!isAvailableAtLoopEntry(FoundRHS, AR->getLoop())) return false; return isImpliedCondOperands(Pred, LHS, RHS, AR->getStart(), FoundRHS); } if (auto *AR = dyn_cast(FoundRHS)) { const Loop *L = AR->getLoop(); // Make sure that context belongs to the loop and executes on 1st iteration // (if it ever executes at all). if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch())) return false; if (!isAvailableAtLoopEntry(FoundLHS, AR->getLoop())) return false; return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, AR->getStart()); } return false; } bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow( ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const SCEV *FoundLHS, const SCEV *FoundRHS) { if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT) return false; const auto *AddRecLHS = dyn_cast(LHS); if (!AddRecLHS) return false; const auto *AddRecFoundLHS = dyn_cast(FoundLHS); if (!AddRecFoundLHS) return false; // We'd like to let SCEV reason about control dependencies, so we constrain // both the inequalities to be about add recurrences on the same loop. This // way we can use isLoopEntryGuardedByCond later. const Loop *L = AddRecFoundLHS->getLoop(); if (L != AddRecLHS->getLoop()) return false; // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1) // // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C) // ... (2) // // Informal proof for (2), assuming (1) [*]: // // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**] // // Then // // FoundLHS s< FoundRHS s< INT_MIN - C // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ] // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ] // <=> (FoundLHS + INT_MIN + C + INT_MIN) s< // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ] // <=> FoundLHS + C s< FoundRHS + C // // [*]: (1) can be proved by ruling out overflow. // // [**]: This can be proved by analyzing all the four possibilities: // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and // (A s>= 0, B s>= 0). // // Note: // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C" // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS + // C)". Optional LDiff = computeConstantDifference(LHS, FoundLHS); Optional RDiff = computeConstantDifference(RHS, FoundRHS); if (!LDiff || !RDiff || *LDiff != *RDiff) return false; if (LDiff->isMinValue()) return true; APInt FoundRHSLimit; if (Pred == CmpInst::ICMP_ULT) { FoundRHSLimit = -(*RDiff); } else { assert(Pred == CmpInst::ICMP_SLT && "Checked above!"); FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff; } // Try to prove (1) or (2), as needed. return isAvailableAtLoopEntry(FoundRHS, L) && isLoopEntryGuardedByCond(L, Pred, FoundRHS, getConstant(FoundRHSLimit)); } bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const SCEV *FoundLHS, const SCEV *FoundRHS, unsigned Depth) { const PHINode *LPhi = nullptr, *RPhi = nullptr; auto ClearOnExit = make_scope_exit([&]() { if (LPhi) { bool Erased = PendingMerges.erase(LPhi); assert(Erased && "Failed to erase LPhi!"); (void)Erased; } if (RPhi) { bool Erased = PendingMerges.erase(RPhi); assert(Erased && "Failed to erase RPhi!"); (void)Erased; } }); // Find respective Phis and check that they are not being pending. if (const SCEVUnknown *LU = dyn_cast(LHS)) if (auto *Phi = dyn_cast(LU->getValue())) { if (!PendingMerges.insert(Phi).second) return false; LPhi = Phi; } if (const SCEVUnknown *RU = dyn_cast(RHS)) if (auto *Phi = dyn_cast(RU->getValue())) { // If we detect a loop of Phi nodes being processed by this method, for // example: // // %a = phi i32 [ %some1, %preheader ], [ %b, %latch ] // %b = phi i32 [ %some2, %preheader ], [ %a, %latch ] // // we don't want to deal with a case that complex, so return conservative // answer false. if (!PendingMerges.insert(Phi).second) return false; RPhi = Phi; } // If none of LHS, RHS is a Phi, nothing to do here. if (!LPhi && !RPhi) return false; // If there is a SCEVUnknown Phi we are interested in, make it left. if (!LPhi) { std::swap(LHS, RHS); std::swap(FoundLHS, FoundRHS); std::swap(LPhi, RPhi); Pred = ICmpInst::getSwappedPredicate(Pred); } assert(LPhi && "LPhi should definitely be a SCEVUnknown Phi!"); const BasicBlock *LBB = LPhi->getParent(); const SCEVAddRecExpr *RAR = dyn_cast(RHS); auto ProvedEasily = [&](const SCEV *S1, const SCEV *S2) { return isKnownViaNonRecursiveReasoning(Pred, S1, S2) || isImpliedCondOperandsViaRanges(Pred, S1, S2, FoundLHS, FoundRHS) || isImpliedViaOperations(Pred, S1, S2, FoundLHS, FoundRHS, Depth); }; if (RPhi && RPhi->getParent() == LBB) { // Case one: RHS is also a SCEVUnknown Phi from the same basic block. // If we compare two Phis from the same block, and for each entry block // the predicate is true for incoming values from this block, then the // predicate is also true for the Phis. for (const BasicBlock *IncBB : predecessors(LBB)) { const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); const SCEV *R = getSCEV(RPhi->getIncomingValueForBlock(IncBB)); if (!ProvedEasily(L, R)) return false; } } else if (RAR && RAR->getLoop()->getHeader() == LBB) { // Case two: RHS is also a Phi from the same basic block, and it is an // AddRec. It means that there is a loop which has both AddRec and Unknown // PHIs, for it we can compare incoming values of AddRec from above the loop // and latch with their respective incoming values of LPhi. // TODO: Generalize to handle loops with many inputs in a header. if (LPhi->getNumIncomingValues() != 2) return false; auto *RLoop = RAR->getLoop(); auto *Predecessor = RLoop->getLoopPredecessor(); assert(Predecessor && "Loop with AddRec with no predecessor?"); const SCEV *L1 = getSCEV(LPhi->getIncomingValueForBlock(Predecessor)); if (!ProvedEasily(L1, RAR->getStart())) return false; auto *Latch = RLoop->getLoopLatch(); assert(Latch && "Loop with AddRec with no latch?"); const SCEV *L2 = getSCEV(LPhi->getIncomingValueForBlock(Latch)); if (!ProvedEasily(L2, RAR->getPostIncExpr(*this))) return false; } else { // In all other cases go over inputs of LHS and compare each of them to RHS, // the predicate is true for (LHS, RHS) if it is true for all such pairs. // At this point RHS is either a non-Phi, or it is a Phi from some block // different from LBB. for (const BasicBlock *IncBB : predecessors(LBB)) { // Check that RHS is available in this block. if (!dominates(RHS, IncBB)) return false; const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); // Make sure L does not refer to a value from a potentially previous // iteration of a loop. if (!properlyDominates(L, LBB)) return false; if (!ProvedEasily(L, RHS)) return false; } } return true; } bool ScalarEvolution::isImpliedCondOperandsViaShift(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const SCEV *FoundLHS, const SCEV *FoundRHS) { // We want to imply LHS < RHS from LHS < (RHS >> shiftvalue). First, make // sure that we are dealing with same LHS. if (RHS == FoundRHS) { std::swap(LHS, RHS); std::swap(FoundLHS, FoundRHS); Pred = ICmpInst::getSwappedPredicate(Pred); } if (LHS != FoundLHS) return false; auto *SUFoundRHS = dyn_cast(FoundRHS); if (!SUFoundRHS) return false; Value *Shiftee, *ShiftValue; using namespace PatternMatch; if (match(SUFoundRHS->getValue(), m_LShr(m_Value(Shiftee), m_Value(ShiftValue)))) { auto *ShifteeS = getSCEV(Shiftee); // Prove one of the following: // LHS > shiftvalue) && shiftee <=u RHS ---> LHS > shiftvalue) && shiftee <=u RHS ---> LHS <=u RHS // LHS > shiftvalue) && shiftee <=s RHS && shiftee >=s 0 // ---> LHS > shiftvalue) && shiftee <=s RHS && shiftee >=s 0 // ---> LHS <=s RHS if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) return isKnownPredicate(ICmpInst::ICMP_ULE, ShifteeS, RHS); if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) if (isKnownNonNegative(ShifteeS)) return isKnownPredicate(ICmpInst::ICMP_SLE, ShifteeS, RHS); } return false; } bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const SCEV *FoundLHS, const SCEV *FoundRHS, const Instruction *CtxI) { if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS)) return true; if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS)) return true; if (isImpliedCondOperandsViaShift(Pred, LHS, RHS, FoundLHS, FoundRHS)) return true; if (isImpliedCondOperandsViaAddRecStart(Pred, LHS, RHS, FoundLHS, FoundRHS, CtxI)) return true; return isImpliedCondOperandsHelper(Pred, LHS, RHS, FoundLHS, FoundRHS); } /// Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values? template static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr, const SCEV *Candidate) { const MinMaxExprType *MinMaxExpr = dyn_cast(MaybeMinMaxExpr); if (!MinMaxExpr) return false; return is_contained(MinMaxExpr->operands(), Candidate); } static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { // If both sides are affine addrecs for the same loop, with equal // steps, and we know the recurrences don't wrap, then we only // need to check the predicate on the starting values. if (!ICmpInst::isRelational(Pred)) return false; const SCEVAddRecExpr *LAR = dyn_cast(LHS); if (!LAR) return false; const SCEVAddRecExpr *RAR = dyn_cast(RHS); if (!RAR) return false; if (LAR->getLoop() != RAR->getLoop()) return false; if (!LAR->isAffine() || !RAR->isAffine()) return false; if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE)) return false; SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ? SCEV::FlagNSW : SCEV::FlagNUW; if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW)) return false; return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart()); } /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max /// expression? static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { switch (Pred) { default: return false; case ICmpInst::ICMP_SGE: std::swap(LHS, RHS); LLVM_FALLTHROUGH; case ICmpInst::ICMP_SLE: return // min(A, ...) <= A IsMinMaxConsistingOf(LHS, RHS) || // A <= max(A, ...) IsMinMaxConsistingOf(RHS, LHS); case ICmpInst::ICMP_UGE: std::swap(LHS, RHS); LLVM_FALLTHROUGH; case ICmpInst::ICMP_ULE: return // min(A, ...) <= A // FIXME: what about umin_seq? IsMinMaxConsistingOf(LHS, RHS) || // A <= max(A, ...) IsMinMaxConsistingOf(RHS, LHS); } llvm_unreachable("covered switch fell through?!"); } bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const SCEV *FoundLHS, const SCEV *FoundRHS, unsigned Depth) { assert(getTypeSizeInBits(LHS->getType()) == getTypeSizeInBits(RHS->getType()) && "LHS and RHS have different sizes?"); assert(getTypeSizeInBits(FoundLHS->getType()) == getTypeSizeInBits(FoundRHS->getType()) && "FoundLHS and FoundRHS have different sizes?"); // We want to avoid hurting the compile time with analysis of too big trees. if (Depth > MaxSCEVOperationsImplicationDepth) return false; // We only want to work with GT comparison so far. if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SLT) { Pred = CmpInst::getSwappedPredicate(Pred); std::swap(LHS, RHS); std::swap(FoundLHS, FoundRHS); } // For unsigned, try to reduce it to corresponding signed comparison. if (Pred == ICmpInst::ICMP_UGT) // We can replace unsigned predicate with its signed counterpart if all // involved values are non-negative. // TODO: We could have better support for unsigned. if (isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) { // Knowing that both FoundLHS and FoundRHS are non-negative, and knowing // FoundLHS >u FoundRHS, we also know that FoundLHS >s FoundRHS. Let us // use this fact to prove that LHS and RHS are non-negative. const SCEV *MinusOne = getMinusOne(LHS->getType()); if (isImpliedCondOperands(ICmpInst::ICMP_SGT, LHS, MinusOne, FoundLHS, FoundRHS) && isImpliedCondOperands(ICmpInst::ICMP_SGT, RHS, MinusOne, FoundLHS, FoundRHS)) Pred = ICmpInst::ICMP_SGT; } if (Pred != ICmpInst::ICMP_SGT) return false; auto GetOpFromSExt = [&](const SCEV *S) { if (auto *Ext = dyn_cast(S)) return Ext->getOperand(); // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off // the constant in some cases. return S; }; // Acquire values from extensions. auto *OrigLHS = LHS; auto *OrigFoundLHS = FoundLHS; LHS = GetOpFromSExt(LHS); FoundLHS = GetOpFromSExt(FoundLHS); // Is the SGT predicate can be proved trivially or using the found context. auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) { return isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGT, S1, S2) || isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS, FoundRHS, Depth + 1); }; if (auto *LHSAddExpr = dyn_cast(LHS)) { // We want to avoid creation of any new non-constant SCEV. Since we are // going to compare the operands to RHS, we should be certain that we don't // need any size extensions for this. So let's decline all cases when the // sizes of types of LHS and RHS do not match. // TODO: Maybe try to get RHS from sext to catch more cases? if (getTypeSizeInBits(LHS->getType()) != getTypeSizeInBits(RHS->getType())) return false; // Should not overflow. if (!LHSAddExpr->hasNoSignedWrap()) return false; auto *LL = LHSAddExpr->getOperand(0); auto *LR = LHSAddExpr->getOperand(1); auto *MinusOne = getMinusOne(RHS->getType()); // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context. auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) { return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS); }; // Try to prove the following rule: // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS). // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS). if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL)) return true; } else if (auto *LHSUnknownExpr = dyn_cast(LHS)) { Value *LL, *LR; // FIXME: Once we have SDiv implemented, we can get rid of this matching. using namespace llvm::PatternMatch; if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) { // Rules for division. // We are going to perform some comparisons with Denominator and its // derivative expressions. In general case, creating a SCEV for it may // lead to a complex analysis of the entire graph, and in particular it // can request trip count recalculation for the same loop. This would // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid // this, we only want to create SCEVs that are constants in this section. // So we bail if Denominator is not a constant. if (!isa(LR)) return false; auto *Denominator = cast(getSCEV(LR)); // We want to make sure that LHS = FoundLHS / Denominator. If it is so, // then a SCEV for the numerator already exists and matches with FoundLHS. auto *Numerator = getExistingSCEV(LL); if (!Numerator || Numerator->getType() != FoundLHS->getType()) return false; // Make sure that the numerator matches with FoundLHS and the denominator // is positive. if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator)) return false; auto *DTy = Denominator->getType(); auto *FRHSTy = FoundRHS->getType(); if (DTy->isPointerTy() != FRHSTy->isPointerTy()) // One of types is a pointer and another one is not. We cannot extend // them properly to a wider type, so let us just reject this case. // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help // to avoid this check. return false; // Given that: // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0. auto *WTy = getWiderType(DTy, FRHSTy); auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy); auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy); // Try to prove the following rule: // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS). // For example, given that FoundLHS > 2. It means that FoundLHS is at // least 3. If we divide it by Denominator < 4, we will have at least 1. auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2)); if (isKnownNonPositive(RHS) && IsSGTViaContext(FoundRHSExt, DenomMinusTwo)) return true; // Try to prove the following rule: // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS). // For example, given that FoundLHS > -3. Then FoundLHS is at least -2. // If we divide it by Denominator > 2, then: // 1. If FoundLHS is negative, then the result is 0. // 2. If FoundLHS is non-negative, then the result is non-negative. // Anyways, the result is non-negative. auto *MinusOne = getMinusOne(WTy); auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt); if (isKnownNegative(RHS) && IsSGTViaContext(FoundRHSExt, NegDenomMinusOne)) return true; } } // If our expression contained SCEVUnknown Phis, and we split it down and now // need to prove something for them, try to prove the predicate for every // possible incoming values of those Phis. if (isImpliedViaMerge(Pred, OrigLHS, RHS, OrigFoundLHS, FoundRHS, Depth + 1)) return true; return false; } static bool isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { // zext x u<= sext x, sext x s<= zext x switch (Pred) { case ICmpInst::ICMP_SGE: std::swap(LHS, RHS); LLVM_FALLTHROUGH; case ICmpInst::ICMP_SLE: { // If operand >=s 0 then ZExt == SExt. If operand (LHS); const SCEVZeroExtendExpr *ZExt = dyn_cast(RHS); if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand()) return true; break; } case ICmpInst::ICMP_UGE: std::swap(LHS, RHS); LLVM_FALLTHROUGH; case ICmpInst::ICMP_ULE: { // If operand >=s 0 then ZExt == SExt. If operand (LHS); const SCEVSignExtendExpr *SExt = dyn_cast(RHS); if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand()) return true; break; } default: break; }; return false; } bool ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { return isKnownPredicateExtendIdiom(Pred, LHS, RHS) || isKnownPredicateViaConstantRanges(Pred, LHS, RHS) || IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) || IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) || isKnownPredicateViaNoOverflow(Pred, LHS, RHS); } bool ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const SCEV *FoundLHS, const SCEV *FoundRHS) { switch (Pred) { default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); case ICmpInst::ICMP_EQ: case ICmpInst::ICMP_NE: if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) return true; break; case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) && isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS)) return true; break; case ICmpInst::ICMP_SGT: case ICmpInst::ICMP_SGE: if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) && isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS)) return true; break; case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) && isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS)) return true; break; case ICmpInst::ICMP_UGT: case ICmpInst::ICMP_UGE: if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) && isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS)) return true; break; } // Maybe it can be proved via operations? if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS)) return true; return false; } bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const SCEV *FoundLHS, const SCEV *FoundRHS) { if (!isa(RHS) || !isa(FoundRHS)) // The restriction on `FoundRHS` be lifted easily -- it exists only to // reduce the compile time impact of this optimization. return false; Optional Addend = computeConstantDifference(LHS, FoundLHS); if (!Addend) return false; const APInt &ConstFoundRHS = cast(FoundRHS)->getAPInt(); // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the // antecedent "`FoundLHS` `Pred` `FoundRHS`". ConstantRange FoundLHSRange = ConstantRange::makeExactICmpRegion(Pred, ConstFoundRHS); // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`: ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend)); // We can also compute the range of values for `LHS` that satisfy the // consequent, "`LHS` `Pred` `RHS`": const APInt &ConstRHS = cast(RHS)->getAPInt(); // The antecedent implies the consequent if every value of `LHS` that // satisfies the antecedent also satisfies the consequent. return LHSRange.icmp(Pred, ConstRHS); } bool ScalarEvolution::canIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, bool IsSigned) { assert(isKnownPositive(Stride) && "Positive stride expected!"); unsigned BitWidth = getTypeSizeInBits(RHS->getType()); const SCEV *One = getOne(Stride->getType()); if (IsSigned) { APInt MaxRHS = getSignedRangeMax(RHS); APInt MaxValue = APInt::getSignedMaxValue(BitWidth); APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow! return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS); } APInt MaxRHS = getUnsignedRangeMax(RHS); APInt MaxValue = APInt::getMaxValue(BitWidth); APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow! return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS); } bool ScalarEvolution::canIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, bool IsSigned) { unsigned BitWidth = getTypeSizeInBits(RHS->getType()); const SCEV *One = getOne(Stride->getType()); if (IsSigned) { APInt MinRHS = getSignedRangeMin(RHS); APInt MinValue = APInt::getSignedMinValue(BitWidth); APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow! return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS); } APInt MinRHS = getUnsignedRangeMin(RHS); APInt MinValue = APInt::getMinValue(BitWidth); APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow! return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS); } const SCEV *ScalarEvolution::getUDivCeilSCEV(const SCEV *N, const SCEV *D) { // umin(N, 1) + floor((N - umin(N, 1)) / D) // This is equivalent to "1 + floor((N - 1) / D)" for N != 0. The umin // expression fixes the case of N=0. const SCEV *MinNOne = getUMinExpr(N, getOne(N->getType())); const SCEV *NMinusOne = getMinusSCEV(N, MinNOne); return getAddExpr(MinNOne, getUDivExpr(NMinusOne, D)); } const SCEV *ScalarEvolution::computeMaxBECountForLT(const SCEV *Start, const SCEV *Stride, const SCEV *End, unsigned BitWidth, bool IsSigned) { // The logic in this function assumes we can represent a positive stride. // If we can't, the backedge-taken count must be zero. if (IsSigned && BitWidth == 1) return getZero(Stride->getType()); // This code has only been closely audited for negative strides in the // unsigned comparison case, it may be correct for signed comparison, but // that needs to be established. assert((!IsSigned || !isKnownNonPositive(Stride)) && "Stride is expected strictly positive for signed case!"); // Calculate the maximum backedge count based on the range of values // permitted by Start, End, and Stride. APInt MinStart = IsSigned ? getSignedRangeMin(Start) : getUnsignedRangeMin(Start); APInt MinStride = IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride); // We assume either the stride is positive, or the backedge-taken count // is zero. So force StrideForMaxBECount to be at least one. APInt One(BitWidth, 1); APInt StrideForMaxBECount = IsSigned ? APIntOps::smax(One, MinStride) : APIntOps::umax(One, MinStride); APInt MaxValue = IsSigned ? APInt::getSignedMaxValue(BitWidth) : APInt::getMaxValue(BitWidth); APInt Limit = MaxValue - (StrideForMaxBECount - 1); // Although End can be a MAX expression we estimate MaxEnd considering only // the case End = RHS of the loop termination condition. This is safe because // in the other case (End - Start) is zero, leading to a zero maximum backedge // taken count. APInt MaxEnd = IsSigned ? APIntOps::smin(getSignedRangeMax(End), Limit) : APIntOps::umin(getUnsignedRangeMax(End), Limit); // MaxBECount = ceil((max(MaxEnd, MinStart) - MinStart) / Stride) MaxEnd = IsSigned ? APIntOps::smax(MaxEnd, MinStart) : APIntOps::umax(MaxEnd, MinStart); return getUDivCeilSCEV(getConstant(MaxEnd - MinStart) /* Delta */, getConstant(StrideForMaxBECount) /* Step */); } ScalarEvolution::ExitLimit ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS, const Loop *L, bool IsSigned, bool ControlsExit, bool AllowPredicates) { SmallPtrSet Predicates; const SCEVAddRecExpr *IV = dyn_cast(LHS); bool PredicatedIV = false; auto canAssumeNoSelfWrap = [&](const SCEVAddRecExpr *AR) { // Can we prove this loop *must* be UB if overflow of IV occurs? // Reasoning goes as follows: // * Suppose the IV did self wrap. // * If Stride evenly divides the iteration space, then once wrap // occurs, the loop must revisit the same values. // * We know that RHS is invariant, and that none of those values // caused this exit to be taken previously. Thus, this exit is // dynamically dead. // * If this is the sole exit, then a dead exit implies the loop // must be infinite if there are no abnormal exits. // * If the loop were infinite, then it must either not be mustprogress // or have side effects. Otherwise, it must be UB. // * It can't (by assumption), be UB so we have contradicted our // premise and can conclude the IV did not in fact self-wrap. if (!isLoopInvariant(RHS, L)) return false; auto *StrideC = dyn_cast(AR->getStepRecurrence(*this)); if (!StrideC || !StrideC->getAPInt().isPowerOf2()) return false; if (!ControlsExit || !loopHasNoAbnormalExits(L)) return false; return loopIsFiniteByAssumption(L); }; if (!IV) { if (auto *ZExt = dyn_cast(LHS)) { const SCEVAddRecExpr *AR = dyn_cast(ZExt->getOperand()); if (AR && AR->getLoop() == L && AR->isAffine()) { auto canProveNUW = [&]() { if (!isLoopInvariant(RHS, L)) return false; if (!isKnownNonZero(AR->getStepRecurrence(*this))) // We need the sequence defined by AR to strictly increase in the // unsigned integer domain for the logic below to hold. return false; const unsigned InnerBitWidth = getTypeSizeInBits(AR->getType()); const unsigned OuterBitWidth = getTypeSizeInBits(RHS->getType()); // If RHS <=u Limit, then there must exist a value V in the sequence // defined by AR (e.g. {Start,+,Step}) such that V >u RHS, and // V <=u UINT_MAX. Thus, we must exit the loop before unsigned // overflow occurs. This limit also implies that a signed comparison // (in the wide bitwidth) is equivalent to an unsigned comparison as // the high bits on both sides must be zero. APInt StrideMax = getUnsignedRangeMax(AR->getStepRecurrence(*this)); APInt Limit = APInt::getMaxValue(InnerBitWidth) - (StrideMax - 1); Limit = Limit.zext(OuterBitWidth); return getUnsignedRangeMax(applyLoopGuards(RHS, L)).ule(Limit); }; auto Flags = AR->getNoWrapFlags(); if (!hasFlags(Flags, SCEV::FlagNUW) && canProveNUW()) Flags = setFlags(Flags, SCEV::FlagNUW); setNoWrapFlags(const_cast(AR), Flags); if (AR->hasNoUnsignedWrap()) { // Emulate what getZeroExtendExpr would have done during construction // if we'd been able to infer the fact just above at that time. const SCEV *Step = AR->getStepRecurrence(*this); Type *Ty = ZExt->getType(); auto *S = getAddRecExpr( getExtendAddRecStart(AR, Ty, this, 0), getZeroExtendExpr(Step, Ty, 0), L, AR->getNoWrapFlags()); IV = dyn_cast(S); } } } } if (!IV && AllowPredicates) { // Try to make this an AddRec using runtime tests, in the first X // iterations of this loop, where X is the SCEV expression found by the // algorithm below. IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); PredicatedIV = true; } // Avoid weird loops if (!IV || IV->getLoop() != L || !IV->isAffine()) return getCouldNotCompute(); // A precondition of this method is that the condition being analyzed // reaches an exiting branch which dominates the latch. Given that, we can // assume that an increment which violates the nowrap specification and // produces poison must cause undefined behavior when the resulting poison // value is branched upon and thus we can conclude that the backedge is // taken no more often than would be required to produce that poison value. // Note that a well defined loop can exit on the iteration which violates // the nowrap specification if there is another exit (either explicit or // implicit/exceptional) which causes the loop to execute before the // exiting instruction we're analyzing would trigger UB. auto WrapType = IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW; bool NoWrap = ControlsExit && IV->getNoWrapFlags(WrapType); ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; const SCEV *Stride = IV->getStepRecurrence(*this); bool PositiveStride = isKnownPositive(Stride); // Avoid negative or zero stride values. if (!PositiveStride) { // We can compute the correct backedge taken count for loops with unknown // strides if we can prove that the loop is not an infinite loop with side // effects. Here's the loop structure we are trying to handle - // // i = start // do { // A[i] = i; // i += s; // } while (i < end); // // The backedge taken count for such loops is evaluated as - // (max(end, start + stride) - start - 1) /u stride // // The additional preconditions that we need to check to prove correctness // of the above formula is as follows - // // a) IV is either nuw or nsw depending upon signedness (indicated by the // NoWrap flag). // b) the loop is guaranteed to be finite (e.g. is mustprogress and has // no side effects within the loop) // c) loop has a single static exit (with no abnormal exits) // // Precondition a) implies that if the stride is negative, this is a single // trip loop. The backedge taken count formula reduces to zero in this case. // // Precondition b) and c) combine to imply that if rhs is invariant in L, // then a zero stride means the backedge can't be taken without executing // undefined behavior. // // The positive stride case is the same as isKnownPositive(Stride) returning // true (original behavior of the function). // if (PredicatedIV || !NoWrap || !loopIsFiniteByAssumption(L) || !loopHasNoAbnormalExits(L)) return getCouldNotCompute(); // This bailout is protecting the logic in computeMaxBECountForLT which // has not yet been sufficiently auditted or tested with negative strides. // We used to filter out all known-non-positive cases here, we're in the // process of being less restrictive bit by bit. if (IsSigned && isKnownNonPositive(Stride)) return getCouldNotCompute(); if (!isKnownNonZero(Stride)) { // If we have a step of zero, and RHS isn't invariant in L, we don't know // if it might eventually be greater than start and if so, on which // iteration. We can't even produce a useful upper bound. if (!isLoopInvariant(RHS, L)) return getCouldNotCompute(); // We allow a potentially zero stride, but we need to divide by stride // below. Since the loop can't be infinite and this check must control // the sole exit, we can infer the exit must be taken on the first // iteration (e.g. backedge count = 0) if the stride is zero. Given that, // we know the numerator in the divides below must be zero, so we can // pick an arbitrary non-zero value for the denominator (e.g. stride) // and produce the right result. // FIXME: Handle the case where Stride is poison? auto wouldZeroStrideBeUB = [&]() { // Proof by contradiction. Suppose the stride were zero. If we can // prove that the backedge *is* taken on the first iteration, then since // we know this condition controls the sole exit, we must have an // infinite loop. We can't have a (well defined) infinite loop per // check just above. // Note: The (Start - Stride) term is used to get the start' term from // (start' + stride,+,stride). Remember that we only care about the // result of this expression when stride == 0 at runtime. auto *StartIfZero = getMinusSCEV(IV->getStart(), Stride); return isLoopEntryGuardedByCond(L, Cond, StartIfZero, RHS); }; if (!wouldZeroStrideBeUB()) { Stride = getUMaxExpr(Stride, getOne(Stride->getType())); } } } else if (!Stride->isOne() && !NoWrap) { auto isUBOnWrap = [&]() { // From no-self-wrap, we need to then prove no-(un)signed-wrap. This // follows trivially from the fact that every (un)signed-wrapped, but // not self-wrapped value must be LT than the last value before // (un)signed wrap. Since we know that last value didn't exit, nor // will any smaller one. return canAssumeNoSelfWrap(IV); }; // Avoid proven overflow cases: this will ensure that the backedge taken // count will not generate any unsigned overflow. Relaxed no-overflow // conditions exploit NoWrapFlags, allowing to optimize in presence of // undefined behaviors like the case of C language. if (canIVOverflowOnLT(RHS, Stride, IsSigned) && !isUBOnWrap()) return getCouldNotCompute(); } // On all paths just preceeding, we established the following invariant: // IV can be assumed not to overflow up to and including the exiting // iteration. We proved this in one of two ways: // 1) We can show overflow doesn't occur before the exiting iteration // 1a) canIVOverflowOnLT, and b) step of one // 2) We can show that if overflow occurs, the loop must execute UB // before any possible exit. // Note that we have not yet proved RHS invariant (in general). const SCEV *Start = IV->getStart(); // Preserve pointer-typed Start/RHS to pass to isLoopEntryGuardedByCond. // If we convert to integers, isLoopEntryGuardedByCond will miss some cases. // Use integer-typed versions for actual computation; we can't subtract // pointers in general. const SCEV *OrigStart = Start; const SCEV *OrigRHS = RHS; if (Start->getType()->isPointerTy()) { Start = getLosslessPtrToIntExpr(Start); if (isa(Start)) return Start; } if (RHS->getType()->isPointerTy()) { RHS = getLosslessPtrToIntExpr(RHS); if (isa(RHS)) return RHS; } // When the RHS is not invariant, we do not know the end bound of the loop and // cannot calculate the ExactBECount needed by ExitLimit. However, we can // calculate the MaxBECount, given the start, stride and max value for the end // bound of the loop (RHS), and the fact that IV does not overflow (which is // checked above). if (!isLoopInvariant(RHS, L)) { const SCEV *MaxBECount = computeMaxBECountForLT( Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount, false /*MaxOrZero*/, Predicates); } // We use the expression (max(End,Start)-Start)/Stride to describe the // backedge count, as if the backedge is taken at least once max(End,Start) // is End and so the result is as above, and if not max(End,Start) is Start // so we get a backedge count of zero. const SCEV *BECount = nullptr; auto *OrigStartMinusStride = getMinusSCEV(OrigStart, Stride); assert(isAvailableAtLoopEntry(OrigStartMinusStride, L) && "Must be!"); assert(isAvailableAtLoopEntry(OrigStart, L) && "Must be!"); assert(isAvailableAtLoopEntry(OrigRHS, L) && "Must be!"); // Can we prove (max(RHS,Start) > Start - Stride? if (isLoopEntryGuardedByCond(L, Cond, OrigStartMinusStride, OrigStart) && isLoopEntryGuardedByCond(L, Cond, OrigStartMinusStride, OrigRHS)) { // In this case, we can use a refined formula for computing backedge taken // count. The general formula remains: // "End-Start /uceiling Stride" where "End = max(RHS,Start)" // We want to use the alternate formula: // "((End - 1) - (Start - Stride)) /u Stride" // Let's do a quick case analysis to show these are equivalent under // our precondition that max(RHS,Start) > Start - Stride. // * For RHS <= Start, the backedge-taken count must be zero. // "((End - 1) - (Start - Stride)) /u Stride" reduces to // "((Start - 1) - (Start - Stride)) /u Stride" which simplies to // "Stride - 1 /u Stride" which is indeed zero for all non-zero values // of Stride. For 0 stride, we've use umin(1,Stride) above, reducing // this to the stride of 1 case. // * For RHS >= Start, the backedge count must be "RHS-Start /uceil Stride". // "((End - 1) - (Start - Stride)) /u Stride" reduces to // "((RHS - 1) - (Start - Stride)) /u Stride" reassociates to // "((RHS - (Start - Stride) - 1) /u Stride". // Our preconditions trivially imply no overflow in that form. const SCEV *MinusOne = getMinusOne(Stride->getType()); const SCEV *Numerator = getMinusSCEV(getAddExpr(RHS, MinusOne), getMinusSCEV(Start, Stride)); BECount = getUDivExpr(Numerator, Stride); } const SCEV *BECountIfBackedgeTaken = nullptr; if (!BECount) { auto canProveRHSGreaterThanEqualStart = [&]() { auto CondGE = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; if (isLoopEntryGuardedByCond(L, CondGE, OrigRHS, OrigStart)) return true; // (RHS > Start - 1) implies RHS >= Start. // * "RHS >= Start" is trivially equivalent to "RHS > Start - 1" if // "Start - 1" doesn't overflow. // * For signed comparison, if Start - 1 does overflow, it's equal // to INT_MAX, and "RHS >s INT_MAX" is trivially false. // * For unsigned comparison, if Start - 1 does overflow, it's equal // to UINT_MAX, and "RHS >u UINT_MAX" is trivially false. // // FIXME: Should isLoopEntryGuardedByCond do this for us? auto CondGT = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; auto *StartMinusOne = getAddExpr(OrigStart, getMinusOne(OrigStart->getType())); return isLoopEntryGuardedByCond(L, CondGT, OrigRHS, StartMinusOne); }; // If we know that RHS >= Start in the context of loop, then we know that // max(RHS, Start) = RHS at this point. const SCEV *End; if (canProveRHSGreaterThanEqualStart()) { End = RHS; } else { // If RHS < Start, the backedge will be taken zero times. So in // general, we can write the backedge-taken count as: // // RHS >= Start ? ceil(RHS - Start) / Stride : 0 // // We convert it to the following to make it more convenient for SCEV: // // ceil(max(RHS, Start) - Start) / Stride End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start); // See what would happen if we assume the backedge is taken. This is // used to compute MaxBECount. BECountIfBackedgeTaken = getUDivCeilSCEV(getMinusSCEV(RHS, Start), Stride); } // At this point, we know: // // 1. If IsSigned, Start <=s End; otherwise, Start <=u End // 2. The index variable doesn't overflow. // // Therefore, we know N exists such that // (Start + Stride * N) >= End, and computing "(Start + Stride * N)" // doesn't overflow. // // Using this information, try to prove whether the addition in // "(Start - End) + (Stride - 1)" has unsigned overflow. const SCEV *One = getOne(Stride->getType()); bool MayAddOverflow = [&] { if (auto *StrideC = dyn_cast(Stride)) { if (StrideC->getAPInt().isPowerOf2()) { // Suppose Stride is a power of two, and Start/End are unsigned // integers. Let UMAX be the largest representable unsigned // integer. // // By the preconditions of this function, we know // "(Start + Stride * N) >= End", and this doesn't overflow. // As a formula: // // End <= (Start + Stride * N) <= UMAX // // Subtracting Start from all the terms: // // End - Start <= Stride * N <= UMAX - Start // // Since Start is unsigned, UMAX - Start <= UMAX. Therefore: // // End - Start <= Stride * N <= UMAX // // Stride * N is a multiple of Stride. Therefore, // // End - Start <= Stride * N <= UMAX - (UMAX mod Stride) // // Since Stride is a power of two, UMAX + 1 is divisible by Stride. // Therefore, UMAX mod Stride == Stride - 1. So we can write: // // End - Start <= Stride * N <= UMAX - Stride - 1 // // Dropping the middle term: // // End - Start <= UMAX - Stride - 1 // // Adding Stride - 1 to both sides: // // (End - Start) + (Stride - 1) <= UMAX // // In other words, the addition doesn't have unsigned overflow. // // A similar proof works if we treat Start/End as signed values. // Just rewrite steps before "End - Start <= Stride * N <= UMAX" to // use signed max instead of unsigned max. Note that we're trying // to prove a lack of unsigned overflow in either case. return false; } } if (Start == Stride || Start == getMinusSCEV(Stride, One)) { // If Start is equal to Stride, (End - Start) + (Stride - 1) == End - 1. // If !IsSigned, 0 (BECount)) { MaxBECount = BECount; } else if (BECountIfBackedgeTaken && isa(BECountIfBackedgeTaken)) { // If we know exactly how many times the backedge will be taken if it's // taken at least once, then the backedge count will either be that or // zero. MaxBECount = BECountIfBackedgeTaken; MaxOrZero = true; } else { MaxBECount = computeMaxBECountForLT( Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); } if (isa(MaxBECount) && !isa(BECount)) MaxBECount = getConstant(getUnsignedRangeMax(BECount)); return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates); } ScalarEvolution::ExitLimit ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS, const Loop *L, bool IsSigned, bool ControlsExit, bool AllowPredicates) { SmallPtrSet Predicates; // We handle only IV > Invariant if (!isLoopInvariant(RHS, L)) return getCouldNotCompute(); const SCEVAddRecExpr *IV = dyn_cast(LHS); if (!IV && AllowPredicates) // Try to make this an AddRec using runtime tests, in the first X // iterations of this loop, where X is the SCEV expression found by the // algorithm below. IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); // Avoid weird loops if (!IV || IV->getLoop() != L || !IV->isAffine()) return getCouldNotCompute(); auto WrapType = IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW; bool NoWrap = ControlsExit && IV->getNoWrapFlags(WrapType); ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this)); // Avoid negative or zero stride values if (!isKnownPositive(Stride)) return getCouldNotCompute(); // Avoid proven overflow cases: this will ensure that the backedge taken count // will not generate any unsigned overflow. Relaxed no-overflow conditions // exploit NoWrapFlags, allowing to optimize in presence of undefined // behaviors like the case of C language. if (!Stride->isOne() && !NoWrap) if (canIVOverflowOnGT(RHS, Stride, IsSigned)) return getCouldNotCompute(); const SCEV *Start = IV->getStart(); const SCEV *End = RHS; if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) { // If we know that Start >= RHS in the context of loop, then we know that // min(RHS, Start) = RHS at this point. if (isLoopEntryGuardedByCond( L, IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE, Start, RHS)) End = RHS; else End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start); } if (Start->getType()->isPointerTy()) { Start = getLosslessPtrToIntExpr(Start); if (isa(Start)) return Start; } if (End->getType()->isPointerTy()) { End = getLosslessPtrToIntExpr(End); if (isa(End)) return End; } // Compute ((Start - End) + (Stride - 1)) / Stride. // FIXME: This can overflow. Holding off on fixing this for now; // howManyGreaterThans will hopefully be gone soon. const SCEV *One = getOne(Stride->getType()); const SCEV *BECount = getUDivExpr( getAddExpr(getMinusSCEV(Start, End), getMinusSCEV(Stride, One)), Stride); APInt MaxStart = IsSigned ? getSignedRangeMax(Start) : getUnsignedRangeMax(Start); APInt MinStride = IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride); unsigned BitWidth = getTypeSizeInBits(LHS->getType()); APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1) : APInt::getMinValue(BitWidth) + (MinStride - 1); // Although End can be a MIN expression we estimate MinEnd considering only // the case End = RHS. This is safe because in the other case (Start - End) // is zero, leading to a zero maximum backedge taken count. APInt MinEnd = IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit) : APIntOps::umax(getUnsignedRangeMin(RHS), Limit); const SCEV *MaxBECount = isa(BECount) ? BECount : getUDivCeilSCEV(getConstant(MaxStart - MinEnd), getConstant(MinStride)); if (isa(MaxBECount)) MaxBECount = BECount; return ExitLimit(BECount, MaxBECount, false, Predicates); } const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range, ScalarEvolution &SE) const { if (Range.isFullSet()) // Infinite loop. return SE.getCouldNotCompute(); // If the start is a non-zero constant, shift the range to simplify things. if (const SCEVConstant *SC = dyn_cast(getStart())) if (!SC->getValue()->isZero()) { SmallVector Operands(operands()); Operands[0] = SE.getZero(SC->getType()); const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(), getNoWrapFlags(FlagNW)); if (const auto *ShiftedAddRec = dyn_cast(Shifted)) return ShiftedAddRec->getNumIterationsInRange( Range.subtract(SC->getAPInt()), SE); // This is strange and shouldn't happen. return SE.getCouldNotCompute(); } // The only time we can solve this is when we have all constant indices. // Otherwise, we cannot determine the overflow conditions. if (any_of(operands(), [](const SCEV *Op) { return !isa(Op); })) return SE.getCouldNotCompute(); // Okay at this point we know that all elements of the chrec are constants and // that the start element is zero. // First check to see if the range contains zero. If not, the first // iteration exits. unsigned BitWidth = SE.getTypeSizeInBits(getType()); if (!Range.contains(APInt(BitWidth, 0))) return SE.getZero(getType()); if (isAffine()) { // If this is an affine expression then we have this situation: // Solve {0,+,A} in Range === Ax in Range // We know that zero is in the range. If A is positive then we know that // the upper value of the range must be the first possible exit value. // If A is negative then the lower of the range is the last possible loop // value. Also note that we already checked for a full range. APInt A = cast(getOperand(1))->getAPInt(); APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower(); // The exit value should be (End+A)/A. APInt ExitVal = (End + A).udiv(A); ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); // Evaluate at the exit value. If we really did fall out of the valid // range, then we computed our trip count, otherwise wrap around or other // things must have happened. ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); if (Range.contains(Val->getValue())) return SE.getCouldNotCompute(); // Something strange happened // Ensure that the previous value is in the range. assert(Range.contains( EvaluateConstantChrecAtConstant(this, ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) && "Linear scev computation is off in a bad way!"); return SE.getConstant(ExitValue); } if (isQuadratic()) { if (auto S = SolveQuadraticAddRecRange(this, Range, SE)) return SE.getConstant(*S); } return SE.getCouldNotCompute(); } const SCEVAddRecExpr * SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const { assert(getNumOperands() > 1 && "AddRec with zero step?"); // There is a temptation to just call getAddExpr(this, getStepRecurrence(SE)), // but in this case we cannot guarantee that the value returned will be an // AddRec because SCEV does not have a fixed point where it stops // simplification: it is legal to return ({rec1} + {rec2}). For example, it // may happen if we reach arithmetic depth limit while simplifying. So we // construct the returned value explicitly. SmallVector Ops; // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and // (this + Step) is {A+B,+,B+C,+...,+,N}. for (unsigned i = 0, e = getNumOperands() - 1; i < e; ++i) Ops.push_back(SE.getAddExpr(getOperand(i), getOperand(i + 1))); // We know that the last operand is not a constant zero (otherwise it would // have been popped out earlier). This guarantees us that if the result has // the same last operand, then it will also not be popped out, meaning that // the returned value will be an AddRec. const SCEV *Last = getOperand(getNumOperands() - 1); assert(!Last->isZero() && "Recurrency with zero step?"); Ops.push_back(Last); return cast(SE.getAddRecExpr(Ops, getLoop(), SCEV::FlagAnyWrap)); } // Return true when S contains at least an undef value. bool ScalarEvolution::containsUndefs(const SCEV *S) const { return SCEVExprContains(S, [](const SCEV *S) { if (const auto *SU = dyn_cast(S)) return isa(SU->getValue()); return false; }); } // Return true when S contains a value that is a nullptr. bool ScalarEvolution::containsErasedValue(const SCEV *S) const { return SCEVExprContains(S, [](const SCEV *S) { if (const auto *SU = dyn_cast(S)) return SU->getValue() == nullptr; return false; }); } /// Return the size of an element read or written by Inst. const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) { Type *Ty; if (StoreInst *Store = dyn_cast(Inst)) Ty = Store->getValueOperand()->getType(); else if (LoadInst *Load = dyn_cast(Inst)) Ty = Load->getType(); else return nullptr; Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty)); return getSizeOfExpr(ETy, Ty); } //===----------------------------------------------------------------------===// // SCEVCallbackVH Class Implementation //===----------------------------------------------------------------------===// void ScalarEvolution::SCEVCallbackVH::deleted() { assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); if (PHINode *PN = dyn_cast(getValPtr())) SE->ConstantEvolutionLoopExitValue.erase(PN); SE->eraseValueFromMap(getValPtr()); // this now dangles! } void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); // Forget all the expressions associated with users of the old value, // so that future queries will recompute the expressions using the new // value. Value *Old = getValPtr(); SmallVector Worklist(Old->users()); SmallPtrSet Visited; while (!Worklist.empty()) { User *U = Worklist.pop_back_val(); // Deleting the Old value will cause this to dangle. Postpone // that until everything else is done. if (U == Old) continue; if (!Visited.insert(U).second) continue; if (PHINode *PN = dyn_cast(U)) SE->ConstantEvolutionLoopExitValue.erase(PN); SE->eraseValueFromMap(U); llvm::append_range(Worklist, U->users()); } // Delete the Old value. if (PHINode *PN = dyn_cast(Old)) SE->ConstantEvolutionLoopExitValue.erase(PN); SE->eraseValueFromMap(Old); // this now dangles! } ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) : CallbackVH(V), SE(se) {} //===----------------------------------------------------------------------===// // ScalarEvolution Class Implementation //===----------------------------------------------------------------------===// ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI, AssumptionCache &AC, DominatorTree &DT, LoopInfo &LI) : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI), CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64), LoopDispositions(64), BlockDispositions(64) { // To use guards for proving predicates, we need to scan every instruction in // relevant basic blocks, and not just terminators. Doing this is a waste of // time if the IR does not actually contain any calls to // @llvm.experimental.guard, so do a quick check and remember this beforehand. // // This pessimizes the case where a pass that preserves ScalarEvolution wants // to _add_ guards to the module when there weren't any before, and wants // ScalarEvolution to optimize based on those guards. For now we prefer to be // efficient in lieu of being smart in that rather obscure case. auto *GuardDecl = F.getParent()->getFunction( Intrinsic::getName(Intrinsic::experimental_guard)); HasGuards = GuardDecl && !GuardDecl->use_empty(); } ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg) : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT), LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)), ValueExprMap(std::move(Arg.ValueExprMap)), PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)), PendingPhiRanges(std::move(Arg.PendingPhiRanges)), PendingMerges(std::move(Arg.PendingMerges)), MinTrailingZerosCache(std::move(Arg.MinTrailingZerosCache)), BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)), PredicatedBackedgeTakenCounts( std::move(Arg.PredicatedBackedgeTakenCounts)), BECountUsers(std::move(Arg.BECountUsers)), ConstantEvolutionLoopExitValue( std::move(Arg.ConstantEvolutionLoopExitValue)), ValuesAtScopes(std::move(Arg.ValuesAtScopes)), ValuesAtScopesUsers(std::move(Arg.ValuesAtScopesUsers)), LoopDispositions(std::move(Arg.LoopDispositions)), LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)), BlockDispositions(std::move(Arg.BlockDispositions)), SCEVUsers(std::move(Arg.SCEVUsers)), UnsignedRanges(std::move(Arg.UnsignedRanges)), SignedRanges(std::move(Arg.SignedRanges)), UniqueSCEVs(std::move(Arg.UniqueSCEVs)), UniquePreds(std::move(Arg.UniquePreds)), SCEVAllocator(std::move(Arg.SCEVAllocator)), LoopUsers(std::move(Arg.LoopUsers)), PredicatedSCEVRewrites(std::move(Arg.PredicatedSCEVRewrites)), FirstUnknown(Arg.FirstUnknown) { Arg.FirstUnknown = nullptr; } ScalarEvolution::~ScalarEvolution() { // Iterate through all the SCEVUnknown instances and call their // destructors, so that they release their references to their values. for (SCEVUnknown *U = FirstUnknown; U;) { SCEVUnknown *Tmp = U; U = U->Next; Tmp->~SCEVUnknown(); } FirstUnknown = nullptr; ExprValueMap.clear(); ValueExprMap.clear(); HasRecMap.clear(); BackedgeTakenCounts.clear(); PredicatedBackedgeTakenCounts.clear(); assert(PendingLoopPredicates.empty() && "isImpliedCond garbage"); assert(PendingPhiRanges.empty() && "getRangeRef garbage"); assert(PendingMerges.empty() && "isImpliedViaMerge garbage"); assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!"); assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!"); } bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { return !isa(getBackedgeTakenCount(L)); } static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, const Loop *L) { // Print all inner loops first for (Loop *I : *L) PrintLoopInfo(OS, SE, I); OS << "Loop "; L->getHeader()->printAsOperand(OS, /*PrintType=*/false); OS << ": "; SmallVector ExitingBlocks; L->getExitingBlocks(ExitingBlocks); if (ExitingBlocks.size() != 1) OS << " "; if (SE->hasLoopInvariantBackedgeTakenCount(L)) OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L) << "\n"; else OS << "Unpredictable backedge-taken count.\n"; if (ExitingBlocks.size() > 1) for (BasicBlock *ExitingBlock : ExitingBlocks) { OS << " exit count for " << ExitingBlock->getName() << ": " << *SE->getExitCount(L, ExitingBlock) << "\n"; } OS << "Loop "; L->getHeader()->printAsOperand(OS, /*PrintType=*/false); OS << ": "; if (!isa(SE->getConstantMaxBackedgeTakenCount(L))) { OS << "max backedge-taken count is " << *SE->getConstantMaxBackedgeTakenCount(L); if (SE->isBackedgeTakenCountMaxOrZero(L)) OS << ", actual taken count either this or zero."; } else { OS << "Unpredictable max backedge-taken count. "; } OS << "\n" "Loop "; L->getHeader()->printAsOperand(OS, /*PrintType=*/false); OS << ": "; SmallVector Preds; auto PBT = SE->getPredicatedBackedgeTakenCount(L, Preds); if (!isa(PBT)) { OS << "Predicated backedge-taken count is " << *PBT << "\n"; OS << " Predicates:\n"; for (const auto *P : Preds) P->print(OS, 4); } else { OS << "Unpredictable predicated backedge-taken count. "; } OS << "\n"; if (SE->hasLoopInvariantBackedgeTakenCount(L)) { OS << "Loop "; L->getHeader()->printAsOperand(OS, /*PrintType=*/false); OS << ": "; OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n"; } } static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) { switch (LD) { case ScalarEvolution::LoopVariant: return "Variant"; case ScalarEvolution::LoopInvariant: return "Invariant"; case ScalarEvolution::LoopComputable: return "Computable"; } llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!"); } void ScalarEvolution::print(raw_ostream &OS) const { // ScalarEvolution's implementation of the print method is to print // out SCEV values of all instructions that are interesting. Doing // this potentially causes it to create new SCEV objects though, // which technically conflicts with the const qualifier. This isn't // observable from outside the class though, so casting away the // const isn't dangerous. ScalarEvolution &SE = *const_cast(this); if (ClassifyExpressions) { OS << "Classifying expressions for: "; F.printAsOperand(OS, /*PrintType=*/false); OS << "\n"; for (Instruction &I : instructions(F)) if (isSCEVable(I.getType()) && !isa(I)) { OS << I << '\n'; OS << " --> "; const SCEV *SV = SE.getSCEV(&I); SV->print(OS); if (!isa(SV)) { OS << " U: "; SE.getUnsignedRange(SV).print(OS); OS << " S: "; SE.getSignedRange(SV).print(OS); } const Loop *L = LI.getLoopFor(I.getParent()); const SCEV *AtUse = SE.getSCEVAtScope(SV, L); if (AtUse != SV) { OS << " --> "; AtUse->print(OS); if (!isa(AtUse)) { OS << " U: "; SE.getUnsignedRange(AtUse).print(OS); OS << " S: "; SE.getSignedRange(AtUse).print(OS); } } if (L) { OS << "\t\t" "Exits: "; const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); if (!SE.isLoopInvariant(ExitValue, L)) { OS << "<>"; } else { OS << *ExitValue; } bool First = true; for (const auto *Iter = L; Iter; Iter = Iter->getParentLoop()) { if (First) { OS << "\t\t" "LoopDispositions: { "; First = false; } else { OS << ", "; } Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false); OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter)); } for (const auto *InnerL : depth_first(L)) { if (InnerL == L) continue; if (First) { OS << "\t\t" "LoopDispositions: { "; First = false; } else { OS << ", "; } InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false); OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL)); } OS << " }"; } OS << "\n"; } } OS << "Determining loop execution counts for: "; F.printAsOperand(OS, /*PrintType=*/false); OS << "\n"; for (Loop *I : LI) PrintLoopInfo(OS, &SE, I); } ScalarEvolution::LoopDisposition ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) { auto &Values = LoopDispositions[S]; for (auto &V : Values) { if (V.getPointer() == L) return V.getInt(); } Values.emplace_back(L, LoopVariant); LoopDisposition D = computeLoopDisposition(S, L); auto &Values2 = LoopDispositions[S]; for (auto &V : llvm::reverse(Values2)) { if (V.getPointer() == L) { V.setInt(D); break; } } return D; } ScalarEvolution::LoopDisposition ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) { switch (S->getSCEVType()) { case scConstant: return LoopInvariant; case scPtrToInt: case scTruncate: case scZeroExtend: case scSignExtend: return getLoopDisposition(cast(S)->getOperand(), L); case scAddRecExpr: { const SCEVAddRecExpr *AR = cast(S); // If L is the addrec's loop, it's computable. if (AR->getLoop() == L) return LoopComputable; // Add recurrences are never invariant in the function-body (null loop). if (!L) return LoopVariant; // Everything that is not defined at loop entry is variant. if (DT.dominates(L->getHeader(), AR->getLoop()->getHeader())) return LoopVariant; assert(!L->contains(AR->getLoop()) && "Containing loop's header does not" " dominate the contained loop's header?"); // This recurrence is invariant w.r.t. L if AR's loop contains L. if (AR->getLoop()->contains(L)) return LoopInvariant; // This recurrence is variant w.r.t. L if any of its operands // are variant. for (const auto *Op : AR->operands()) if (!isLoopInvariant(Op, L)) return LoopVariant; // Otherwise it's loop-invariant. return LoopInvariant; } case scAddExpr: case scMulExpr: case scUMaxExpr: case scSMaxExpr: case scUMinExpr: case scSMinExpr: case scSequentialUMinExpr: { bool HasVarying = false; for (const auto *Op : cast(S)->operands()) { LoopDisposition D = getLoopDisposition(Op, L); if (D == LoopVariant) return LoopVariant; if (D == LoopComputable) HasVarying = true; } return HasVarying ? LoopComputable : LoopInvariant; } case scUDivExpr: { const SCEVUDivExpr *UDiv = cast(S); LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L); if (LD == LoopVariant) return LoopVariant; LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L); if (RD == LoopVariant) return LoopVariant; return (LD == LoopInvariant && RD == LoopInvariant) ? LoopInvariant : LoopComputable; } case scUnknown: // All non-instruction values are loop invariant. All instructions are loop // invariant if they are not contained in the specified loop. // Instructions are never considered invariant in the function body // (null loop) because they are defined within the "loop". if (auto *I = dyn_cast(cast(S)->getValue())) return (L && !L->contains(I)) ? LoopInvariant : LoopVariant; return LoopInvariant; case scCouldNotCompute: llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); } llvm_unreachable("Unknown SCEV kind!"); } bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) { return getLoopDisposition(S, L) == LoopInvariant; } bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) { return getLoopDisposition(S, L) == LoopComputable; } ScalarEvolution::BlockDisposition ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) { auto &Values = BlockDispositions[S]; for (auto &V : Values) { if (V.getPointer() == BB) return V.getInt(); } Values.emplace_back(BB, DoesNotDominateBlock); BlockDisposition D = computeBlockDisposition(S, BB); auto &Values2 = BlockDispositions[S]; for (auto &V : llvm::reverse(Values2)) { if (V.getPointer() == BB) { V.setInt(D); break; } } return D; } ScalarEvolution::BlockDisposition ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { switch (S->getSCEVType()) { case scConstant: return ProperlyDominatesBlock; case scPtrToInt: case scTruncate: case scZeroExtend: case scSignExtend: return getBlockDisposition(cast(S)->getOperand(), BB); case scAddRecExpr: { // This uses a "dominates" query instead of "properly dominates" query // to test for proper dominance too, because the instruction which // produces the addrec's value is a PHI, and a PHI effectively properly // dominates its entire containing block. const SCEVAddRecExpr *AR = cast(S); if (!DT.dominates(AR->getLoop()->getHeader(), BB)) return DoesNotDominateBlock; // Fall through into SCEVNAryExpr handling. LLVM_FALLTHROUGH; } case scAddExpr: case scMulExpr: case scUMaxExpr: case scSMaxExpr: case scUMinExpr: case scSMinExpr: case scSequentialUMinExpr: { const SCEVNAryExpr *NAry = cast(S); bool Proper = true; for (const SCEV *NAryOp : NAry->operands()) { BlockDisposition D = getBlockDisposition(NAryOp, BB); if (D == DoesNotDominateBlock) return DoesNotDominateBlock; if (D == DominatesBlock) Proper = false; } return Proper ? ProperlyDominatesBlock : DominatesBlock; } case scUDivExpr: { const SCEVUDivExpr *UDiv = cast(S); const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS(); BlockDisposition LD = getBlockDisposition(LHS, BB); if (LD == DoesNotDominateBlock) return DoesNotDominateBlock; BlockDisposition RD = getBlockDisposition(RHS, BB); if (RD == DoesNotDominateBlock) return DoesNotDominateBlock; return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ? ProperlyDominatesBlock : DominatesBlock; } case scUnknown: if (Instruction *I = dyn_cast(cast(S)->getValue())) { if (I->getParent() == BB) return DominatesBlock; if (DT.properlyDominates(I->getParent(), BB)) return ProperlyDominatesBlock; return DoesNotDominateBlock; } return ProperlyDominatesBlock; case scCouldNotCompute: llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); } llvm_unreachable("Unknown SCEV kind!"); } bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) { return getBlockDisposition(S, BB) >= DominatesBlock; } bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) { return getBlockDisposition(S, BB) == ProperlyDominatesBlock; } bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const { return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; }); } void ScalarEvolution::forgetBackedgeTakenCounts(const Loop *L, bool Predicated) { auto &BECounts = Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts; auto It = BECounts.find(L); if (It != BECounts.end()) { for (const ExitNotTakenInfo &ENT : It->second.ExitNotTaken) { if (!isa(ENT.ExactNotTaken)) { auto UserIt = BECountUsers.find(ENT.ExactNotTaken); assert(UserIt != BECountUsers.end()); UserIt->second.erase({L, Predicated}); } } BECounts.erase(It); } } void ScalarEvolution::forgetMemoizedResults(ArrayRef SCEVs) { SmallPtrSet ToForget(SCEVs.begin(), SCEVs.end()); SmallVector Worklist(ToForget.begin(), ToForget.end()); while (!Worklist.empty()) { const SCEV *Curr = Worklist.pop_back_val(); auto Users = SCEVUsers.find(Curr); if (Users != SCEVUsers.end()) for (const auto *User : Users->second) if (ToForget.insert(User).second) Worklist.push_back(User); } for (const auto *S : ToForget) forgetMemoizedResultsImpl(S); for (auto I = PredicatedSCEVRewrites.begin(); I != PredicatedSCEVRewrites.end();) { std::pair Entry = I->first; if (ToForget.count(Entry.first)) PredicatedSCEVRewrites.erase(I++); else ++I; } } void ScalarEvolution::forgetMemoizedResultsImpl(const SCEV *S) { LoopDispositions.erase(S); BlockDispositions.erase(S); UnsignedRanges.erase(S); SignedRanges.erase(S); HasRecMap.erase(S); MinTrailingZerosCache.erase(S); auto ExprIt = ExprValueMap.find(S); if (ExprIt != ExprValueMap.end()) { for (Value *V : ExprIt->second) { auto ValueIt = ValueExprMap.find_as(V); if (ValueIt != ValueExprMap.end()) ValueExprMap.erase(ValueIt); } ExprValueMap.erase(ExprIt); } auto ScopeIt = ValuesAtScopes.find(S); if (ScopeIt != ValuesAtScopes.end()) { for (const auto &Pair : ScopeIt->second) if (!isa_and_nonnull(Pair.second)) erase_value(ValuesAtScopesUsers[Pair.second], std::make_pair(Pair.first, S)); ValuesAtScopes.erase(ScopeIt); } auto ScopeUserIt = ValuesAtScopesUsers.find(S); if (ScopeUserIt != ValuesAtScopesUsers.end()) { for (const auto &Pair : ScopeUserIt->second) erase_value(ValuesAtScopes[Pair.second], std::make_pair(Pair.first, S)); ValuesAtScopesUsers.erase(ScopeUserIt); } auto BEUsersIt = BECountUsers.find(S); if (BEUsersIt != BECountUsers.end()) { // Work on a copy, as forgetBackedgeTakenCounts() will modify the original. auto Copy = BEUsersIt->second; for (const auto &Pair : Copy) forgetBackedgeTakenCounts(Pair.getPointer(), Pair.getInt()); BECountUsers.erase(BEUsersIt); } } void ScalarEvolution::getUsedLoops(const SCEV *S, SmallPtrSetImpl &LoopsUsed) { struct FindUsedLoops { FindUsedLoops(SmallPtrSetImpl &LoopsUsed) : LoopsUsed(LoopsUsed) {} SmallPtrSetImpl &LoopsUsed; bool follow(const SCEV *S) { if (auto *AR = dyn_cast(S)) LoopsUsed.insert(AR->getLoop()); return true; } bool isDone() const { return false; } }; FindUsedLoops F(LoopsUsed); SCEVTraversal(F).visitAll(S); } void ScalarEvolution::getReachableBlocks( SmallPtrSetImpl &Reachable, Function &F) { SmallVector Worklist; Worklist.push_back(&F.getEntryBlock()); while (!Worklist.empty()) { BasicBlock *BB = Worklist.pop_back_val(); if (!Reachable.insert(BB).second) continue; Value *Cond; BasicBlock *TrueBB, *FalseBB; if (match(BB->getTerminator(), m_Br(m_Value(Cond), m_BasicBlock(TrueBB), m_BasicBlock(FalseBB)))) { if (auto *C = dyn_cast(Cond)) { Worklist.push_back(C->isOne() ? TrueBB : FalseBB); continue; } if (auto *Cmp = dyn_cast(Cond)) { const SCEV *L = getSCEV(Cmp->getOperand(0)); const SCEV *R = getSCEV(Cmp->getOperand(1)); if (isKnownPredicateViaConstantRanges(Cmp->getPredicate(), L, R)) { Worklist.push_back(TrueBB); continue; } if (isKnownPredicateViaConstantRanges(Cmp->getInversePredicate(), L, R)) { Worklist.push_back(FalseBB); continue; } } } append_range(Worklist, successors(BB)); } } void ScalarEvolution::verify() const { ScalarEvolution &SE = *const_cast(this); ScalarEvolution SE2(F, TLI, AC, DT, LI); SmallVector LoopStack(LI.begin(), LI.end()); // Map's SCEV expressions from one ScalarEvolution "universe" to another. struct SCEVMapper : public SCEVRewriteVisitor { SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor(SE) {} const SCEV *visitConstant(const SCEVConstant *Constant) { return SE.getConstant(Constant->getAPInt()); } const SCEV *visitUnknown(const SCEVUnknown *Expr) { return SE.getUnknown(Expr->getValue()); } const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { return SE.getCouldNotCompute(); } }; SCEVMapper SCM(SE2); SmallPtrSet ReachableBlocks; SE2.getReachableBlocks(ReachableBlocks, F); auto GetDelta = [&](const SCEV *Old, const SCEV *New) -> const SCEV * { if (containsUndefs(Old) || containsUndefs(New)) { // SCEV treats "undef" as an unknown but consistent value (i.e. it does // not propagate undef aggressively). This means we can (and do) fail // verification in cases where a transform makes a value go from "undef" // to "undef+1" (say). The transform is fine, since in both cases the // result is "undef", but SCEV thinks the value increased by 1. return nullptr; } // Unless VerifySCEVStrict is set, we only compare constant deltas. const SCEV *Delta = SE2.getMinusSCEV(Old, New); if (!VerifySCEVStrict && !isa(Delta)) return nullptr; return Delta; }; while (!LoopStack.empty()) { auto *L = LoopStack.pop_back_val(); llvm::append_range(LoopStack, *L); // Only verify BECounts in reachable loops. For an unreachable loop, // any BECount is legal. if (!ReachableBlocks.contains(L->getHeader())) continue; // Only verify cached BECounts. Computing new BECounts may change the // results of subsequent SCEV uses. auto It = BackedgeTakenCounts.find(L); if (It == BackedgeTakenCounts.end()) continue; auto *CurBECount = SCM.visit(It->second.getExact(L, const_cast(this))); auto *NewBECount = SE2.getBackedgeTakenCount(L); if (CurBECount == SE2.getCouldNotCompute() || NewBECount == SE2.getCouldNotCompute()) { // NB! This situation is legal, but is very suspicious -- whatever pass // change the loop to make a trip count go from could not compute to // computable or vice-versa *should have* invalidated SCEV. However, we // choose not to assert here (for now) since we don't want false // positives. continue; } if (SE.getTypeSizeInBits(CurBECount->getType()) > SE.getTypeSizeInBits(NewBECount->getType())) NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType()); else if (SE.getTypeSizeInBits(CurBECount->getType()) < SE.getTypeSizeInBits(NewBECount->getType())) CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType()); const SCEV *Delta = GetDelta(CurBECount, NewBECount); if (Delta && !Delta->isZero()) { dbgs() << "Trip Count for " << *L << " Changed!\n"; dbgs() << "Old: " << *CurBECount << "\n"; dbgs() << "New: " << *NewBECount << "\n"; dbgs() << "Delta: " << *Delta << "\n"; std::abort(); } } // Collect all valid loops currently in LoopInfo. SmallPtrSet ValidLoops; SmallVector Worklist(LI.begin(), LI.end()); while (!Worklist.empty()) { Loop *L = Worklist.pop_back_val(); if (ValidLoops.insert(L).second) Worklist.append(L->begin(), L->end()); } for (const auto &KV : ValueExprMap) { #ifndef NDEBUG // Check for SCEV expressions referencing invalid/deleted loops. if (auto *AR = dyn_cast(KV.second)) { assert(ValidLoops.contains(AR->getLoop()) && "AddRec references invalid loop"); } #endif // Check that the value is also part of the reverse map. auto It = ExprValueMap.find(KV.second); if (It == ExprValueMap.end() || !It->second.contains(KV.first)) { dbgs() << "Value " << *KV.first << " is in ValueExprMap but not in ExprValueMap\n"; std::abort(); } if (auto *I = dyn_cast(&*KV.first)) { if (!ReachableBlocks.contains(I->getParent())) continue; const SCEV *OldSCEV = SCM.visit(KV.second); const SCEV *NewSCEV = SE2.getSCEV(I); const SCEV *Delta = GetDelta(OldSCEV, NewSCEV); if (Delta && !Delta->isZero()) { dbgs() << "SCEV for value " << *I << " changed!\n" << "Old: " << *OldSCEV << "\n" << "New: " << *NewSCEV << "\n" << "Delta: " << *Delta << "\n"; std::abort(); } } } for (const auto &KV : ExprValueMap) { for (Value *V : KV.second) { auto It = ValueExprMap.find_as(V); if (It == ValueExprMap.end()) { dbgs() << "Value " << *V << " is in ExprValueMap but not in ValueExprMap\n"; std::abort(); } if (It->second != KV.first) { dbgs() << "Value " << *V << " mapped to " << *It->second << " rather than " << *KV.first << "\n"; std::abort(); } } } // Verify integrity of SCEV users. for (const auto &S : UniqueSCEVs) { SmallVector Ops; collectUniqueOps(&S, Ops); for (const auto *Op : Ops) { // We do not store dependencies of constants. if (isa(Op)) continue; auto It = SCEVUsers.find(Op); if (It != SCEVUsers.end() && It->second.count(&S)) continue; dbgs() << "Use of operand " << *Op << " by user " << S << " is not being tracked!\n"; std::abort(); } } // Verify integrity of ValuesAtScopes users. for (const auto &ValueAndVec : ValuesAtScopes) { const SCEV *Value = ValueAndVec.first; for (const auto &LoopAndValueAtScope : ValueAndVec.second) { const Loop *L = LoopAndValueAtScope.first; const SCEV *ValueAtScope = LoopAndValueAtScope.second; if (!isa(ValueAtScope)) { auto It = ValuesAtScopesUsers.find(ValueAtScope); if (It != ValuesAtScopesUsers.end() && is_contained(It->second, std::make_pair(L, Value))) continue; dbgs() << "Value: " << *Value << ", Loop: " << *L << ", ValueAtScope: " << *ValueAtScope << " missing in ValuesAtScopesUsers\n"; std::abort(); } } } for (const auto &ValueAtScopeAndVec : ValuesAtScopesUsers) { const SCEV *ValueAtScope = ValueAtScopeAndVec.first; for (const auto &LoopAndValue : ValueAtScopeAndVec.second) { const Loop *L = LoopAndValue.first; const SCEV *Value = LoopAndValue.second; assert(!isa(Value)); auto It = ValuesAtScopes.find(Value); if (It != ValuesAtScopes.end() && is_contained(It->second, std::make_pair(L, ValueAtScope))) continue; dbgs() << "Value: " << *Value << ", Loop: " << *L << ", ValueAtScope: " << *ValueAtScope << " missing in ValuesAtScopes\n"; std::abort(); } } // Verify integrity of BECountUsers. auto VerifyBECountUsers = [&](bool Predicated) { auto &BECounts = Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts; for (const auto &LoopAndBEInfo : BECounts) { for (const ExitNotTakenInfo &ENT : LoopAndBEInfo.second.ExitNotTaken) { if (!isa(ENT.ExactNotTaken)) { auto UserIt = BECountUsers.find(ENT.ExactNotTaken); if (UserIt != BECountUsers.end() && UserIt->second.contains({ LoopAndBEInfo.first, Predicated })) continue; dbgs() << "Value " << *ENT.ExactNotTaken << " for loop " << *LoopAndBEInfo.first << " missing from BECountUsers\n"; std::abort(); } } } }; VerifyBECountUsers(/* Predicated */ false); VerifyBECountUsers(/* Predicated */ true); } bool ScalarEvolution::invalidate( Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv) { // Invalidate the ScalarEvolution object whenever it isn't preserved or one // of its dependencies is invalidated. auto PAC = PA.getChecker(); return !(PAC.preserved() || PAC.preservedSet>()) || Inv.invalidate(F, PA) || Inv.invalidate(F, PA) || Inv.invalidate(F, PA); } AnalysisKey ScalarEvolutionAnalysis::Key; ScalarEvolution ScalarEvolutionAnalysis::run(Function &F, FunctionAnalysisManager &AM) { return ScalarEvolution(F, AM.getResult(F), AM.getResult(F), AM.getResult(F), AM.getResult(F)); } PreservedAnalyses ScalarEvolutionVerifierPass::run(Function &F, FunctionAnalysisManager &AM) { AM.getResult(F).verify(); return PreservedAnalyses::all(); } PreservedAnalyses ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) { // For compatibility with opt's -analyze feature under legacy pass manager // which was not ported to NPM. This keeps tests using // update_analyze_test_checks.py working. OS << "Printing analysis 'Scalar Evolution Analysis' for function '" << F.getName() << "':\n"; AM.getResult(F).print(OS); return PreservedAnalyses::all(); } INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution", "Scalar Evolution Analysis", false, true) INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution", "Scalar Evolution Analysis", false, true) char ScalarEvolutionWrapperPass::ID = 0; ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) { initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry()); } bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) { SE.reset(new ScalarEvolution( F, getAnalysis().getTLI(F), getAnalysis().getAssumptionCache(F), getAnalysis().getDomTree(), getAnalysis().getLoopInfo())); return false; } void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); } void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const { SE->print(OS); } void ScalarEvolutionWrapperPass::verifyAnalysis() const { if (!VerifySCEV) return; SE->verify(); } void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesAll(); AU.addRequiredTransitive(); AU.addRequiredTransitive(); AU.addRequiredTransitive(); AU.addRequiredTransitive(); } const SCEVPredicate *ScalarEvolution::getEqualPredicate(const SCEV *LHS, const SCEV *RHS) { return getComparePredicate(ICmpInst::ICMP_EQ, LHS, RHS); } const SCEVPredicate * ScalarEvolution::getComparePredicate(const ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { FoldingSetNodeID ID; assert(LHS->getType() == RHS->getType() && "Type mismatch between LHS and RHS"); // Unique this node based on the arguments ID.AddInteger(SCEVPredicate::P_Compare); ID.AddInteger(Pred); ID.AddPointer(LHS); ID.AddPointer(RHS); void *IP = nullptr; if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) return S; SCEVComparePredicate *Eq = new (SCEVAllocator) SCEVComparePredicate(ID.Intern(SCEVAllocator), Pred, LHS, RHS); UniquePreds.InsertNode(Eq, IP); return Eq; } const SCEVPredicate *ScalarEvolution::getWrapPredicate( const SCEVAddRecExpr *AR, SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { FoldingSetNodeID ID; // Unique this node based on the arguments ID.AddInteger(SCEVPredicate::P_Wrap); ID.AddPointer(AR); ID.AddInteger(AddedFlags); void *IP = nullptr; if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) return S; auto *OF = new (SCEVAllocator) SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags); UniquePreds.InsertNode(OF, IP); return OF; } namespace { class SCEVPredicateRewriter : public SCEVRewriteVisitor { public: /// Rewrites \p S in the context of a loop L and the SCEV predication /// infrastructure. /// /// If \p Pred is non-null, the SCEV expression is rewritten to respect the /// equivalences present in \p Pred. /// /// If \p NewPreds is non-null, rewrite is free to add further predicates to /// \p NewPreds such that the result will be an AddRecExpr. static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, SmallPtrSetImpl *NewPreds, const SCEVPredicate *Pred) { SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred); return Rewriter.visit(S); } const SCEV *visitUnknown(const SCEVUnknown *Expr) { if (Pred) { if (auto *U = dyn_cast(Pred)) { for (const auto *Pred : U->getPredicates()) if (const auto *IPred = dyn_cast(Pred)) if (IPred->getLHS() == Expr && IPred->getPredicate() == ICmpInst::ICMP_EQ) return IPred->getRHS(); } else if (const auto *IPred = dyn_cast(Pred)) { if (IPred->getLHS() == Expr && IPred->getPredicate() == ICmpInst::ICMP_EQ) return IPred->getRHS(); } } return convertToAddRecWithPreds(Expr); } const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { const SCEV *Operand = visit(Expr->getOperand()); const SCEVAddRecExpr *AR = dyn_cast(Operand); if (AR && AR->getLoop() == L && AR->isAffine()) { // This couldn't be folded because the operand didn't have the nuw // flag. Add the nusw flag as an assumption that we could make. const SCEV *Step = AR->getStepRecurrence(SE); Type *Ty = Expr->getType(); if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW)) return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty), SE.getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); } return SE.getZeroExtendExpr(Operand, Expr->getType()); } const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { const SCEV *Operand = visit(Expr->getOperand()); const SCEVAddRecExpr *AR = dyn_cast(Operand); if (AR && AR->getLoop() == L && AR->isAffine()) { // This couldn't be folded because the operand didn't have the nsw // flag. Add the nssw flag as an assumption that we could make. const SCEV *Step = AR->getStepRecurrence(SE); Type *Ty = Expr->getType(); if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW)) return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty), SE.getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); } return SE.getSignExtendExpr(Operand, Expr->getType()); } private: explicit SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE, SmallPtrSetImpl *NewPreds, const SCEVPredicate *Pred) : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {} bool addOverflowAssumption(const SCEVPredicate *P) { if (!NewPreds) { // Check if we've already made this assumption. return Pred && Pred->implies(P); } NewPreds->insert(P); return true; } bool addOverflowAssumption(const SCEVAddRecExpr *AR, SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { auto *A = SE.getWrapPredicate(AR, AddedFlags); return addOverflowAssumption(A); } // If \p Expr represents a PHINode, we try to see if it can be represented // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible // to add this predicate as a runtime overflow check, we return the AddRec. // If \p Expr does not meet these conditions (is not a PHI node, or we // couldn't create an AddRec for it, or couldn't add the predicate), we just // return \p Expr. const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) { if (!isa(Expr->getValue())) return Expr; Optional>> PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr); if (!PredicatedRewrite) return Expr; for (const auto *P : PredicatedRewrite->second){ // Wrap predicates from outer loops are not supported. if (auto *WP = dyn_cast(P)) { if (L != WP->getExpr()->getLoop()) return Expr; } if (!addOverflowAssumption(P)) return Expr; } return PredicatedRewrite->first; } SmallPtrSetImpl *NewPreds; const SCEVPredicate *Pred; const Loop *L; }; } // end anonymous namespace const SCEV * ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L, const SCEVPredicate &Preds) { return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds); } const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates( const SCEV *S, const Loop *L, SmallPtrSetImpl &Preds) { SmallPtrSet TransformPreds; S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr); auto *AddRec = dyn_cast(S); if (!AddRec) return nullptr; // Since the transformation was successful, we can now transfer the SCEV // predicates. for (const auto *P : TransformPreds) Preds.insert(P); return AddRec; } /// SCEV predicates SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID, SCEVPredicateKind Kind) : FastID(ID), Kind(Kind) {} SCEVComparePredicate::SCEVComparePredicate(const FoldingSetNodeIDRef ID, const ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) : SCEVPredicate(ID, P_Compare), Pred(Pred), LHS(LHS), RHS(RHS) { assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match"); assert(LHS != RHS && "LHS and RHS are the same SCEV"); } bool SCEVComparePredicate::implies(const SCEVPredicate *N) const { const auto *Op = dyn_cast(N); if (!Op) return false; if (Pred != ICmpInst::ICMP_EQ) return false; return Op->LHS == LHS && Op->RHS == RHS; } bool SCEVComparePredicate::isAlwaysTrue() const { return false; } void SCEVComparePredicate::print(raw_ostream &OS, unsigned Depth) const { if (Pred == ICmpInst::ICMP_EQ) OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n"; else OS.indent(Depth) << "Compare predicate: " << *LHS << " " << CmpInst::getPredicateName(Pred) << ") " << *RHS << "\n"; } SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID, const SCEVAddRecExpr *AR, IncrementWrapFlags Flags) : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {} const SCEVAddRecExpr *SCEVWrapPredicate::getExpr() const { return AR; } bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const { const auto *Op = dyn_cast(N); return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags; } bool SCEVWrapPredicate::isAlwaysTrue() const { SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags(); IncrementWrapFlags IFlags = Flags; if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags) IFlags = clearFlags(IFlags, IncrementNSSW); return IFlags == IncrementAnyWrap; } void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const { OS.indent(Depth) << *getExpr() << " Added Flags: "; if (SCEVWrapPredicate::IncrementNUSW & getFlags()) OS << ""; if (SCEVWrapPredicate::IncrementNSSW & getFlags()) OS << ""; OS << "\n"; } SCEVWrapPredicate::IncrementWrapFlags SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR, ScalarEvolution &SE) { IncrementWrapFlags ImpliedFlags = IncrementAnyWrap; SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags(); // We can safely transfer the NSW flag as NSSW. if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags) ImpliedFlags = IncrementNSSW; if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) { // If the increment is positive, the SCEV NUW flag will also imply the // WrapPredicate NUSW flag. if (const auto *Step = dyn_cast(AR->getStepRecurrence(SE))) if (Step->getValue()->getValue().isNonNegative()) ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW); } return ImpliedFlags; } /// Union predicates don't get cached so create a dummy set ID for it. SCEVUnionPredicate::SCEVUnionPredicate(ArrayRef Preds) : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) { for (const auto *P : Preds) add(P); } bool SCEVUnionPredicate::isAlwaysTrue() const { return all_of(Preds, [](const SCEVPredicate *I) { return I->isAlwaysTrue(); }); } bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const { if (const auto *Set = dyn_cast(N)) return all_of(Set->Preds, [this](const SCEVPredicate *I) { return this->implies(I); }); return any_of(Preds, [N](const SCEVPredicate *I) { return I->implies(N); }); } void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const { for (const auto *Pred : Preds) Pred->print(OS, Depth); } void SCEVUnionPredicate::add(const SCEVPredicate *N) { if (const auto *Set = dyn_cast(N)) { for (const auto *Pred : Set->Preds) add(Pred); return; } Preds.push_back(N); } PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE, Loop &L) : SE(SE), L(L) { SmallVector Empty; Preds = std::make_unique(Empty); } void ScalarEvolution::registerUser(const SCEV *User, ArrayRef Ops) { for (const auto *Op : Ops) // We do not expect that forgetting cached data for SCEVConstants will ever // open any prospects for sharpening or introduce any correctness issues, // so we don't bother storing their dependencies. if (!isa(Op)) SCEVUsers[Op].insert(User); } const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) { const SCEV *Expr = SE.getSCEV(V); RewriteEntry &Entry = RewriteMap[Expr]; // If we already have an entry and the version matches, return it. if (Entry.second && Generation == Entry.first) return Entry.second; // We found an entry but it's stale. Rewrite the stale entry // according to the current predicate. if (Entry.second) Expr = Entry.second; const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, *Preds); Entry = {Generation, NewSCEV}; return NewSCEV; } const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() { if (!BackedgeCount) { SmallVector Preds; BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, Preds); for (const auto *P : Preds) addPredicate(*P); } return BackedgeCount; } void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) { if (Preds->implies(&Pred)) return; auto &OldPreds = Preds->getPredicates(); SmallVector NewPreds(OldPreds.begin(), OldPreds.end()); NewPreds.push_back(&Pred); Preds = std::make_unique(NewPreds); updateGeneration(); } const SCEVPredicate &PredicatedScalarEvolution::getPredicate() const { return *Preds; } void PredicatedScalarEvolution::updateGeneration() { // If the generation number wrapped recompute everything. if (++Generation == 0) { for (auto &II : RewriteMap) { const SCEV *Rewritten = II.second.second; II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, *Preds)}; } } } void PredicatedScalarEvolution::setNoOverflow( Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { const SCEV *Expr = getSCEV(V); const auto *AR = cast(Expr); auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE); // Clear the statically implied flags. Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags); addPredicate(*SE.getWrapPredicate(AR, Flags)); auto II = FlagsMap.insert({V, Flags}); if (!II.second) II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second); } bool PredicatedScalarEvolution::hasNoOverflow( Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { const SCEV *Expr = getSCEV(V); const auto *AR = cast(Expr); Flags = SCEVWrapPredicate::clearFlags( Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE)); auto II = FlagsMap.find(V); if (II != FlagsMap.end()) Flags = SCEVWrapPredicate::clearFlags(Flags, II->second); return Flags == SCEVWrapPredicate::IncrementAnyWrap; } const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) { const SCEV *Expr = this->getSCEV(V); SmallPtrSet NewPreds; auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds); if (!New) return nullptr; for (const auto *P : NewPreds) addPredicate(*P); RewriteMap[SE.getSCEV(V)] = {Generation, New}; return New; } PredicatedScalarEvolution::PredicatedScalarEvolution( const PredicatedScalarEvolution &Init) : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), Preds(std::make_unique(Init.Preds->getPredicates())), Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) { for (auto I : Init.FlagsMap) FlagsMap.insert(I); } void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const { // For each block. for (auto *BB : L.getBlocks()) for (auto &I : *BB) { if (!SE.isSCEVable(I.getType())) continue; auto *Expr = SE.getSCEV(&I); auto II = RewriteMap.find(Expr); if (II == RewriteMap.end()) continue; // Don't print things that are not interesting. if (II->second.second == Expr) continue; OS.indent(Depth) << "[PSE]" << I << ":\n"; OS.indent(Depth + 2) << *Expr << "\n"; OS.indent(Depth + 2) << "--> " << *II->second.second << "\n"; } } // Match the mathematical pattern A - (A / B) * B, where A and B can be // arbitrary expressions. Also match zext (trunc A to iB) to iY, which is used // for URem with constant power-of-2 second operands. // It's not always easy, as A and B can be folded (imagine A is X / 2, and B is // 4, A / B becomes X / 8). bool ScalarEvolution::matchURem(const SCEV *Expr, const SCEV *&LHS, const SCEV *&RHS) { // Try to match 'zext (trunc A to iB) to iY', which is used // for URem with constant power-of-2 second operands. Make sure the size of // the operand A matches the size of the whole expressions. if (const auto *ZExt = dyn_cast(Expr)) if (const auto *Trunc = dyn_cast(ZExt->getOperand(0))) { LHS = Trunc->getOperand(); // Bail out if the type of the LHS is larger than the type of the // expression for now. if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(Expr->getType())) return false; if (LHS->getType() != Expr->getType()) LHS = getZeroExtendExpr(LHS, Expr->getType()); RHS = getConstant(APInt(getTypeSizeInBits(Expr->getType()), 1) << getTypeSizeInBits(Trunc->getType())); return true; } const auto *Add = dyn_cast(Expr); if (Add == nullptr || Add->getNumOperands() != 2) return false; const SCEV *A = Add->getOperand(1); const auto *Mul = dyn_cast(Add->getOperand(0)); if (Mul == nullptr) return false; const auto MatchURemWithDivisor = [&](const SCEV *B) { // (SomeExpr + (-(SomeExpr / B) * B)). if (Expr == getURemExpr(A, B)) { LHS = A; RHS = B; return true; } return false; }; // (SomeExpr + (-1 * (SomeExpr / B) * B)). if (Mul->getNumOperands() == 3 && isa(Mul->getOperand(0))) return MatchURemWithDivisor(Mul->getOperand(1)) || MatchURemWithDivisor(Mul->getOperand(2)); // (SomeExpr + ((-SomeExpr / B) * B)) or (SomeExpr + ((SomeExpr / B) * -B)). if (Mul->getNumOperands() == 2) return MatchURemWithDivisor(Mul->getOperand(1)) || MatchURemWithDivisor(Mul->getOperand(0)) || MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(1))) || MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(0))); return false; } const SCEV * ScalarEvolution::computeSymbolicMaxBackedgeTakenCount(const Loop *L) { SmallVector ExitingBlocks; L->getExitingBlocks(ExitingBlocks); // Form an expression for the maximum exit count possible for this loop. We // merge the max and exact information to approximate a version of // getConstantMaxBackedgeTakenCount which isn't restricted to just constants. SmallVector ExitCounts; for (BasicBlock *ExitingBB : ExitingBlocks) { const SCEV *ExitCount = getExitCount(L, ExitingBB); if (isa(ExitCount)) ExitCount = getExitCount(L, ExitingBB, ScalarEvolution::ConstantMaximum); if (!isa(ExitCount)) { assert(DT.dominates(ExitingBB, L->getLoopLatch()) && "We should only have known counts for exiting blocks that " "dominate latch!"); ExitCounts.push_back(ExitCount); } } if (ExitCounts.empty()) return getCouldNotCompute(); return getUMinFromMismatchedTypes(ExitCounts); } /// A rewriter to replace SCEV expressions in Map with the corresponding entry /// in the map. It skips AddRecExpr because we cannot guarantee that the /// replacement is loop invariant in the loop of the AddRec. /// /// At the moment only rewriting SCEVUnknown and SCEVZeroExtendExpr is /// supported. class SCEVLoopGuardRewriter : public SCEVRewriteVisitor { const DenseMap ⤅ public: SCEVLoopGuardRewriter(ScalarEvolution &SE, DenseMap &M) : SCEVRewriteVisitor(SE), Map(M) {} const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { return Expr; } const SCEV *visitUnknown(const SCEVUnknown *Expr) { auto I = Map.find(Expr); if (I == Map.end()) return Expr; return I->second; } const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { auto I = Map.find(Expr); if (I == Map.end()) return SCEVRewriteVisitor::visitZeroExtendExpr( Expr); return I->second; } }; const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) { SmallVector ExprsToRewrite; auto CollectCondition = [&](ICmpInst::Predicate Predicate, const SCEV *LHS, const SCEV *RHS, DenseMap &RewriteMap) { // WARNING: It is generally unsound to apply any wrap flags to the proposed // replacement SCEV which isn't directly implied by the structure of that // SCEV. In particular, using contextual facts to imply flags is *NOT* // legal. See the scoping rules for flags in the header to understand why. // If LHS is a constant, apply information to the other expression. if (isa(LHS)) { std::swap(LHS, RHS); Predicate = CmpInst::getSwappedPredicate(Predicate); } // Check for a condition of the form (-C1 + X < C2). InstCombine will // create this form when combining two checks of the form (X u< C2 + C1) and // (X >=u C1). auto MatchRangeCheckIdiom = [this, Predicate, LHS, RHS, &RewriteMap, &ExprsToRewrite]() { auto *AddExpr = dyn_cast(LHS); if (!AddExpr || AddExpr->getNumOperands() != 2) return false; auto *C1 = dyn_cast(AddExpr->getOperand(0)); auto *LHSUnknown = dyn_cast(AddExpr->getOperand(1)); auto *C2 = dyn_cast(RHS); if (!C1 || !C2 || !LHSUnknown) return false; auto ExactRegion = ConstantRange::makeExactICmpRegion(Predicate, C2->getAPInt()) .sub(C1->getAPInt()); // Bail out, unless we have a non-wrapping, monotonic range. if (ExactRegion.isWrappedSet() || ExactRegion.isFullSet()) return false; auto I = RewriteMap.find(LHSUnknown); const SCEV *RewrittenLHS = I != RewriteMap.end() ? I->second : LHSUnknown; RewriteMap[LHSUnknown] = getUMaxExpr( getConstant(ExactRegion.getUnsignedMin()), getUMinExpr(RewrittenLHS, getConstant(ExactRegion.getUnsignedMax()))); ExprsToRewrite.push_back(LHSUnknown); return true; }; if (MatchRangeCheckIdiom()) return; // If we have LHS == 0, check if LHS is computing a property of some unknown // SCEV %v which we can rewrite %v to express explicitly. const SCEVConstant *RHSC = dyn_cast(RHS); if (Predicate == CmpInst::ICMP_EQ && RHSC && RHSC->getValue()->isNullValue()) { // If LHS is A % B, i.e. A % B == 0, rewrite A to (A /u B) * B to // explicitly express that. const SCEV *URemLHS = nullptr; const SCEV *URemRHS = nullptr; if (matchURem(LHS, URemLHS, URemRHS)) { if (const SCEVUnknown *LHSUnknown = dyn_cast(URemLHS)) { auto Multiple = getMulExpr(getUDivExpr(URemLHS, URemRHS), URemRHS); RewriteMap[LHSUnknown] = Multiple; ExprsToRewrite.push_back(LHSUnknown); return; } } } // Do not apply information for constants or if RHS contains an AddRec. if (isa(LHS) || containsAddRecurrence(RHS)) return; // If RHS is SCEVUnknown, make sure the information is applied to it. if (!isa(LHS) && isa(RHS)) { std::swap(LHS, RHS); Predicate = CmpInst::getSwappedPredicate(Predicate); } // Limit to expressions that can be rewritten. if (!isa(LHS) && !isa(LHS)) return; // Check whether LHS has already been rewritten. In that case we want to // chain further rewrites onto the already rewritten value. auto I = RewriteMap.find(LHS); const SCEV *RewrittenLHS = I != RewriteMap.end() ? I->second : LHS; const SCEV *RewrittenRHS = nullptr; switch (Predicate) { case CmpInst::ICMP_ULT: RewrittenRHS = getUMinExpr(RewrittenLHS, getMinusSCEV(RHS, getOne(RHS->getType()))); break; case CmpInst::ICMP_SLT: RewrittenRHS = getSMinExpr(RewrittenLHS, getMinusSCEV(RHS, getOne(RHS->getType()))); break; case CmpInst::ICMP_ULE: RewrittenRHS = getUMinExpr(RewrittenLHS, RHS); break; case CmpInst::ICMP_SLE: RewrittenRHS = getSMinExpr(RewrittenLHS, RHS); break; case CmpInst::ICMP_UGT: RewrittenRHS = getUMaxExpr(RewrittenLHS, getAddExpr(RHS, getOne(RHS->getType()))); break; case CmpInst::ICMP_SGT: RewrittenRHS = getSMaxExpr(RewrittenLHS, getAddExpr(RHS, getOne(RHS->getType()))); break; case CmpInst::ICMP_UGE: RewrittenRHS = getUMaxExpr(RewrittenLHS, RHS); break; case CmpInst::ICMP_SGE: RewrittenRHS = getSMaxExpr(RewrittenLHS, RHS); break; case CmpInst::ICMP_EQ: if (isa(RHS)) RewrittenRHS = RHS; break; case CmpInst::ICMP_NE: if (isa(RHS) && cast(RHS)->getValue()->isNullValue()) RewrittenRHS = getUMaxExpr(RewrittenLHS, getOne(RHS->getType())); break; default: break; } if (RewrittenRHS) { RewriteMap[LHS] = RewrittenRHS; if (LHS == RewrittenLHS) ExprsToRewrite.push_back(LHS); } }; SmallVector> Terms; // First, collect information from assumptions dominating the loop. for (auto &AssumeVH : AC.assumptions()) { if (!AssumeVH) continue; auto *AssumeI = cast(AssumeVH); if (!DT.dominates(AssumeI, L->getHeader())) continue; Terms.emplace_back(AssumeI->getOperand(0), true); } // Second, collect conditions from dominating branches. Starting at the loop // predecessor, climb up the predecessor chain, as long as there are // predecessors that can be found that have unique successors leading to the // original header. // TODO: share this logic with isLoopEntryGuardedByCond. for (std::pair Pair( L->getLoopPredecessor(), L->getHeader()); Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { const BranchInst *LoopEntryPredicate = dyn_cast(Pair.first->getTerminator()); if (!LoopEntryPredicate || LoopEntryPredicate->isUnconditional()) continue; Terms.emplace_back(LoopEntryPredicate->getCondition(), LoopEntryPredicate->getSuccessor(0) == Pair.second); } // Now apply the information from the collected conditions to RewriteMap. // Conditions are processed in reverse order, so the earliest conditions is // processed first. This ensures the SCEVs with the shortest dependency chains // are constructed first. DenseMap RewriteMap; for (auto &E : reverse(Terms)) { bool EnterIfTrue = E.second; SmallVector Worklist; SmallPtrSet Visited; Worklist.push_back(E.first); while (!Worklist.empty()) { Value *Cond = Worklist.pop_back_val(); if (!Visited.insert(Cond).second) continue; if (auto *Cmp = dyn_cast(Cond)) { auto Predicate = EnterIfTrue ? Cmp->getPredicate() : Cmp->getInversePredicate(); const auto *LHS = getSCEV(Cmp->getOperand(0)); const auto *RHS = getSCEV(Cmp->getOperand(1)); CollectCondition(Predicate, LHS, RHS, RewriteMap); continue; } Value *L, *R; if (EnterIfTrue ? match(Cond, m_LogicalAnd(m_Value(L), m_Value(R))) : match(Cond, m_LogicalOr(m_Value(L), m_Value(R)))) { Worklist.push_back(L); Worklist.push_back(R); } } } if (RewriteMap.empty()) return Expr; // Now that all rewrite information is collect, rewrite the collected // expressions with the information in the map. This applies information to // sub-expressions. if (ExprsToRewrite.size() > 1) { for (const SCEV *Expr : ExprsToRewrite) { const SCEV *RewriteTo = RewriteMap[Expr]; RewriteMap.erase(Expr); SCEVLoopGuardRewriter Rewriter(*this, RewriteMap); RewriteMap.insert({Expr, Rewriter.visit(RewriteTo)}); } } SCEVLoopGuardRewriter Rewriter(*this, RewriteMap); return Rewriter.visit(Expr); } diff --git a/llvm/lib/Support/X86TargetParser.cpp b/llvm/lib/Support/X86TargetParser.cpp index 2567f3ed8034..0daaa6d815bf 100644 --- a/llvm/lib/Support/X86TargetParser.cpp +++ b/llvm/lib/Support/X86TargetParser.cpp @@ -1,711 +1,711 @@ //===-- X86TargetParser - Parser for X86 features ---------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements a target parser to recognise X86 hardware features. // //===----------------------------------------------------------------------===// #include "llvm/Support/X86TargetParser.h" #include "llvm/ADT/StringSwitch.h" #include using namespace llvm; using namespace llvm::X86; namespace { /// Container class for CPU features. /// This is a constexpr reimplementation of a subset of std::bitset. It would be /// nice to use std::bitset directly, but it doesn't support constant /// initialization. class FeatureBitset { static constexpr unsigned NUM_FEATURE_WORDS = (X86::CPU_FEATURE_MAX + 31) / 32; // This cannot be a std::array, operator[] is not constexpr until C++17. uint32_t Bits[NUM_FEATURE_WORDS] = {}; public: constexpr FeatureBitset() = default; constexpr FeatureBitset(std::initializer_list Init) { for (auto I : Init) set(I); } bool any() const { return llvm::any_of(Bits, [](uint64_t V) { return V != 0; }); } constexpr FeatureBitset &set(unsigned I) { // GCC <6.2 crashes if this is written in a single statement. uint32_t NewBits = Bits[I / 32] | (uint32_t(1) << (I % 32)); Bits[I / 32] = NewBits; return *this; } constexpr bool operator[](unsigned I) const { uint32_t Mask = uint32_t(1) << (I % 32); return (Bits[I / 32] & Mask) != 0; } constexpr FeatureBitset &operator&=(const FeatureBitset &RHS) { for (unsigned I = 0, E = array_lengthof(Bits); I != E; ++I) { // GCC <6.2 crashes if this is written in a single statement. uint32_t NewBits = Bits[I] & RHS.Bits[I]; Bits[I] = NewBits; } return *this; } constexpr FeatureBitset &operator|=(const FeatureBitset &RHS) { for (unsigned I = 0, E = array_lengthof(Bits); I != E; ++I) { // GCC <6.2 crashes if this is written in a single statement. uint32_t NewBits = Bits[I] | RHS.Bits[I]; Bits[I] = NewBits; } return *this; } // gcc 5.3 miscompiles this if we try to write this using operator&=. constexpr FeatureBitset operator&(const FeatureBitset &RHS) const { FeatureBitset Result; for (unsigned I = 0, E = array_lengthof(Bits); I != E; ++I) Result.Bits[I] = Bits[I] & RHS.Bits[I]; return Result; } // gcc 5.3 miscompiles this if we try to write this using operator&=. constexpr FeatureBitset operator|(const FeatureBitset &RHS) const { FeatureBitset Result; for (unsigned I = 0, E = array_lengthof(Bits); I != E; ++I) Result.Bits[I] = Bits[I] | RHS.Bits[I]; return Result; } constexpr FeatureBitset operator~() const { FeatureBitset Result; for (unsigned I = 0, E = array_lengthof(Bits); I != E; ++I) Result.Bits[I] = ~Bits[I]; return Result; } constexpr bool operator!=(const FeatureBitset &RHS) const { for (unsigned I = 0, E = array_lengthof(Bits); I != E; ++I) if (Bits[I] != RHS.Bits[I]) return true; return false; } }; struct ProcInfo { StringLiteral Name; X86::CPUKind Kind; unsigned KeyFeature; FeatureBitset Features; }; struct FeatureInfo { StringLiteral Name; FeatureBitset ImpliedFeatures; }; } // end anonymous namespace #define X86_FEATURE(ENUM, STRING) \ constexpr FeatureBitset Feature##ENUM = {X86::FEATURE_##ENUM}; #include "llvm/Support/X86TargetParser.def" // Pentium with MMX. constexpr FeatureBitset FeaturesPentiumMMX = FeatureX87 | FeatureCMPXCHG8B | FeatureMMX; // Pentium 2 and 3. constexpr FeatureBitset FeaturesPentium2 = FeatureX87 | FeatureCMPXCHG8B | FeatureMMX | FeatureFXSR; constexpr FeatureBitset FeaturesPentium3 = FeaturesPentium2 | FeatureSSE; // Pentium 4 CPUs constexpr FeatureBitset FeaturesPentium4 = FeaturesPentium3 | FeatureSSE2; constexpr FeatureBitset FeaturesPrescott = FeaturesPentium4 | FeatureSSE3; constexpr FeatureBitset FeaturesNocona = FeaturesPrescott | Feature64BIT | FeatureCMPXCHG16B; // Basic 64-bit capable CPU. constexpr FeatureBitset FeaturesX86_64 = FeaturesPentium4 | Feature64BIT; constexpr FeatureBitset FeaturesX86_64_V2 = FeaturesX86_64 | FeatureSAHF | FeaturePOPCNT | FeatureCRC32 | FeatureSSE4_2 | FeatureCMPXCHG16B; constexpr FeatureBitset FeaturesX86_64_V3 = FeaturesX86_64_V2 | FeatureAVX2 | FeatureBMI | FeatureBMI2 | FeatureF16C | FeatureFMA | FeatureLZCNT | FeatureMOVBE | FeatureXSAVE; constexpr FeatureBitset FeaturesX86_64_V4 = FeaturesX86_64_V3 | FeatureAVX512BW | FeatureAVX512CD | FeatureAVX512DQ | FeatureAVX512VL; // Intel Core CPUs constexpr FeatureBitset FeaturesCore2 = FeaturesNocona | FeatureSAHF | FeatureSSSE3; constexpr FeatureBitset FeaturesPenryn = FeaturesCore2 | FeatureSSE4_1; constexpr FeatureBitset FeaturesNehalem = FeaturesPenryn | FeaturePOPCNT | FeatureCRC32 | FeatureSSE4_2; constexpr FeatureBitset FeaturesWestmere = FeaturesNehalem | FeaturePCLMUL; constexpr FeatureBitset FeaturesSandyBridge = FeaturesWestmere | FeatureAVX | FeatureXSAVE | FeatureXSAVEOPT; constexpr FeatureBitset FeaturesIvyBridge = FeaturesSandyBridge | FeatureF16C | FeatureFSGSBASE | FeatureRDRND; constexpr FeatureBitset FeaturesHaswell = FeaturesIvyBridge | FeatureAVX2 | FeatureBMI | FeatureBMI2 | FeatureFMA | FeatureINVPCID | FeatureLZCNT | FeatureMOVBE; constexpr FeatureBitset FeaturesBroadwell = FeaturesHaswell | FeatureADX | FeaturePRFCHW | FeatureRDSEED; // Intel Knights Landing and Knights Mill // Knights Landing has feature parity with Broadwell. constexpr FeatureBitset FeaturesKNL = FeaturesBroadwell | FeatureAES | FeatureAVX512F | FeatureAVX512CD | FeatureAVX512ER | FeatureAVX512PF | FeaturePREFETCHWT1; constexpr FeatureBitset FeaturesKNM = FeaturesKNL | FeatureAVX512VPOPCNTDQ; // Intel Skylake processors. constexpr FeatureBitset FeaturesSkylakeClient = FeaturesBroadwell | FeatureAES | FeatureCLFLUSHOPT | FeatureXSAVEC | FeatureXSAVES | FeatureSGX; // SkylakeServer inherits all SkylakeClient features except SGX. // FIXME: That doesn't match gcc. constexpr FeatureBitset FeaturesSkylakeServer = (FeaturesSkylakeClient & ~FeatureSGX) | FeatureAVX512F | FeatureAVX512CD | FeatureAVX512DQ | FeatureAVX512BW | FeatureAVX512VL | FeatureCLWB | FeaturePKU; constexpr FeatureBitset FeaturesCascadeLake = FeaturesSkylakeServer | FeatureAVX512VNNI; constexpr FeatureBitset FeaturesCooperLake = FeaturesCascadeLake | FeatureAVX512BF16; // Intel 10nm processors. constexpr FeatureBitset FeaturesCannonlake = FeaturesSkylakeClient | FeatureAVX512F | FeatureAVX512CD | FeatureAVX512DQ | FeatureAVX512BW | FeatureAVX512VL | FeatureAVX512IFMA | FeatureAVX512VBMI | FeaturePKU | FeatureSHA; constexpr FeatureBitset FeaturesICLClient = FeaturesCannonlake | FeatureAVX512BITALG | FeatureAVX512VBMI2 | FeatureAVX512VNNI | FeatureAVX512VPOPCNTDQ | FeatureGFNI | FeatureRDPID | FeatureVAES | FeatureVPCLMULQDQ; constexpr FeatureBitset FeaturesRocketlake = FeaturesICLClient & ~FeatureSGX; constexpr FeatureBitset FeaturesICLServer = FeaturesICLClient | FeatureCLWB | FeaturePCONFIG | FeatureWBNOINVD; constexpr FeatureBitset FeaturesTigerlake = FeaturesICLClient | FeatureAVX512VP2INTERSECT | FeatureMOVDIR64B | FeatureCLWB | FeatureMOVDIRI | FeatureSHSTK | FeatureKL | FeatureWIDEKL; constexpr FeatureBitset FeaturesSapphireRapids = FeaturesICLServer | FeatureAMX_BF16 | FeatureAMX_INT8 | FeatureAMX_TILE | - FeatureAVX512BF16 | FeatureAVX512FP16 | FeatureAVX512VP2INTERSECT | - FeatureAVXVNNI | FeatureCLDEMOTE | FeatureENQCMD | FeatureMOVDIR64B | - FeatureMOVDIRI | FeaturePTWRITE | FeatureSERIALIZE | FeatureSHSTK | - FeatureTSXLDTRK | FeatureUINTR | FeatureWAITPKG; + FeatureAVX512BF16 | FeatureAVX512FP16 | FeatureAVXVNNI | FeatureCLDEMOTE | + FeatureENQCMD | FeatureMOVDIR64B | FeatureMOVDIRI | FeaturePTWRITE | + FeatureSERIALIZE | FeatureSHSTK | FeatureTSXLDTRK | FeatureUINTR | + FeatureWAITPKG; // Intel Atom processors. // Bonnell has feature parity with Core2 and adds MOVBE. constexpr FeatureBitset FeaturesBonnell = FeaturesCore2 | FeatureMOVBE; // Silvermont has parity with Westmere and Bonnell plus PRFCHW and RDRND. constexpr FeatureBitset FeaturesSilvermont = FeaturesBonnell | FeaturesWestmere | FeaturePRFCHW | FeatureRDRND; constexpr FeatureBitset FeaturesGoldmont = FeaturesSilvermont | FeatureAES | FeatureCLFLUSHOPT | FeatureFSGSBASE | FeatureRDSEED | FeatureSHA | FeatureXSAVE | FeatureXSAVEC | FeatureXSAVEOPT | FeatureXSAVES; constexpr FeatureBitset FeaturesGoldmontPlus = FeaturesGoldmont | FeaturePTWRITE | FeatureRDPID | FeatureSGX; constexpr FeatureBitset FeaturesTremont = FeaturesGoldmontPlus | FeatureCLWB | FeatureGFNI; constexpr FeatureBitset FeaturesAlderlake = FeaturesTremont | FeatureADX | FeatureBMI | FeatureBMI2 | FeatureF16C | FeatureFMA | FeatureINVPCID | FeatureLZCNT | FeaturePCONFIG | FeaturePKU | FeatureSERIALIZE | FeatureSHSTK | FeatureVAES | FeatureVPCLMULQDQ | FeatureCLDEMOTE | FeatureMOVDIR64B | FeatureMOVDIRI | FeatureWAITPKG | FeatureAVXVNNI | FeatureHRESET | FeatureWIDEKL; // Geode Processor. constexpr FeatureBitset FeaturesGeode = FeatureX87 | FeatureCMPXCHG8B | FeatureMMX | Feature3DNOW | Feature3DNOWA; // K6 processor. constexpr FeatureBitset FeaturesK6 = FeatureX87 | FeatureCMPXCHG8B | FeatureMMX; // K7 and K8 architecture processors. constexpr FeatureBitset FeaturesAthlon = FeatureX87 | FeatureCMPXCHG8B | FeatureMMX | Feature3DNOW | Feature3DNOWA; constexpr FeatureBitset FeaturesAthlonXP = FeaturesAthlon | FeatureFXSR | FeatureSSE; constexpr FeatureBitset FeaturesK8 = FeaturesAthlonXP | FeatureSSE2 | Feature64BIT; constexpr FeatureBitset FeaturesK8SSE3 = FeaturesK8 | FeatureSSE3; constexpr FeatureBitset FeaturesAMDFAM10 = FeaturesK8SSE3 | FeatureCMPXCHG16B | FeatureLZCNT | FeaturePOPCNT | FeaturePRFCHW | FeatureSAHF | FeatureSSE4_A; // Bobcat architecture processors. constexpr FeatureBitset FeaturesBTVER1 = FeatureX87 | FeatureCMPXCHG8B | FeatureCMPXCHG16B | Feature64BIT | FeatureFXSR | FeatureLZCNT | FeatureMMX | FeaturePOPCNT | FeaturePRFCHW | FeatureSSE | FeatureSSE2 | FeatureSSE3 | FeatureSSSE3 | FeatureSSE4_A | FeatureSAHF; constexpr FeatureBitset FeaturesBTVER2 = FeaturesBTVER1 | FeatureAES | FeatureAVX | FeatureBMI | FeatureCRC32 | FeatureF16C | FeatureMOVBE | FeaturePCLMUL | FeatureXSAVE | FeatureXSAVEOPT; // AMD Bulldozer architecture processors. constexpr FeatureBitset FeaturesBDVER1 = FeatureX87 | FeatureAES | FeatureAVX | FeatureCMPXCHG8B | FeatureCMPXCHG16B | FeatureCRC32 | Feature64BIT | FeatureFMA4 | FeatureFXSR | FeatureLWP | FeatureLZCNT | FeatureMMX | FeaturePCLMUL | FeaturePOPCNT | FeaturePRFCHW | FeatureSAHF | FeatureSSE | FeatureSSE2 | FeatureSSE3 | FeatureSSSE3 | FeatureSSE4_1 | FeatureSSE4_2 | FeatureSSE4_A | FeatureXOP | FeatureXSAVE; constexpr FeatureBitset FeaturesBDVER2 = FeaturesBDVER1 | FeatureBMI | FeatureFMA | FeatureF16C | FeatureTBM; constexpr FeatureBitset FeaturesBDVER3 = FeaturesBDVER2 | FeatureFSGSBASE | FeatureXSAVEOPT; constexpr FeatureBitset FeaturesBDVER4 = FeaturesBDVER3 | FeatureAVX2 | FeatureBMI2 | FeatureMOVBE | FeatureMWAITX | FeatureRDRND; // AMD Zen architecture processors. constexpr FeatureBitset FeaturesZNVER1 = FeatureX87 | FeatureADX | FeatureAES | FeatureAVX | FeatureAVX2 | FeatureBMI | FeatureBMI2 | FeatureCLFLUSHOPT | FeatureCLZERO | FeatureCMPXCHG8B | FeatureCMPXCHG16B | FeatureCRC32 | Feature64BIT | FeatureF16C | FeatureFMA | FeatureFSGSBASE | FeatureFXSR | FeatureLZCNT | FeatureMMX | FeatureMOVBE | FeatureMWAITX | FeaturePCLMUL | FeaturePOPCNT | FeaturePRFCHW | FeatureRDRND | FeatureRDSEED | FeatureSAHF | FeatureSHA | FeatureSSE | FeatureSSE2 | FeatureSSE3 | FeatureSSSE3 | FeatureSSE4_1 | FeatureSSE4_2 | FeatureSSE4_A | FeatureXSAVE | FeatureXSAVEC | FeatureXSAVEOPT | FeatureXSAVES; constexpr FeatureBitset FeaturesZNVER2 = FeaturesZNVER1 | FeatureCLWB | FeatureRDPID | FeatureRDPRU | FeatureWBNOINVD; static constexpr FeatureBitset FeaturesZNVER3 = FeaturesZNVER2 | FeatureINVPCID | FeaturePKU | FeatureVAES | FeatureVPCLMULQDQ; constexpr ProcInfo Processors[] = { // Empty processor. Include X87 and CMPXCHG8 for backwards compatibility. { {""}, CK_None, ~0U, FeatureX87 | FeatureCMPXCHG8B }, // i386-generation processors. { {"i386"}, CK_i386, ~0U, FeatureX87 }, // i486-generation processors. { {"i486"}, CK_i486, ~0U, FeatureX87 }, { {"winchip-c6"}, CK_WinChipC6, ~0U, FeaturesPentiumMMX }, { {"winchip2"}, CK_WinChip2, ~0U, FeaturesPentiumMMX | Feature3DNOW }, { {"c3"}, CK_C3, ~0U, FeaturesPentiumMMX | Feature3DNOW }, // i586-generation processors, P5 microarchitecture based. { {"i586"}, CK_i586, ~0U, FeatureX87 | FeatureCMPXCHG8B }, { {"pentium"}, CK_Pentium, ~0U, FeatureX87 | FeatureCMPXCHG8B }, { {"pentium-mmx"}, CK_PentiumMMX, ~0U, FeaturesPentiumMMX }, // i686-generation processors, P6 / Pentium M microarchitecture based. { {"pentiumpro"}, CK_PentiumPro, ~0U, FeatureX87 | FeatureCMPXCHG8B }, { {"i686"}, CK_i686, ~0U, FeatureX87 | FeatureCMPXCHG8B }, { {"pentium2"}, CK_Pentium2, ~0U, FeaturesPentium2 }, { {"pentium3"}, CK_Pentium3, ~0U, FeaturesPentium3 }, { {"pentium3m"}, CK_Pentium3, ~0U, FeaturesPentium3 }, { {"pentium-m"}, CK_PentiumM, ~0U, FeaturesPentium4 }, { {"c3-2"}, CK_C3_2, ~0U, FeaturesPentium3 }, { {"yonah"}, CK_Yonah, ~0U, FeaturesPrescott }, // Netburst microarchitecture based processors. { {"pentium4"}, CK_Pentium4, ~0U, FeaturesPentium4 }, { {"pentium4m"}, CK_Pentium4, ~0U, FeaturesPentium4 }, { {"prescott"}, CK_Prescott, ~0U, FeaturesPrescott }, { {"nocona"}, CK_Nocona, ~0U, FeaturesNocona }, // Core microarchitecture based processors. { {"core2"}, CK_Core2, ~0U, FeaturesCore2 }, { {"penryn"}, CK_Penryn, ~0U, FeaturesPenryn }, // Atom processors { {"bonnell"}, CK_Bonnell, FEATURE_SSSE3, FeaturesBonnell }, { {"atom"}, CK_Bonnell, FEATURE_SSSE3, FeaturesBonnell }, { {"silvermont"}, CK_Silvermont, FEATURE_SSE4_2, FeaturesSilvermont }, { {"slm"}, CK_Silvermont, FEATURE_SSE4_2, FeaturesSilvermont }, { {"goldmont"}, CK_Goldmont, FEATURE_SSE4_2, FeaturesGoldmont }, { {"goldmont-plus"}, CK_GoldmontPlus, FEATURE_SSE4_2, FeaturesGoldmontPlus }, { {"tremont"}, CK_Tremont, FEATURE_SSE4_2, FeaturesTremont }, // Nehalem microarchitecture based processors. { {"nehalem"}, CK_Nehalem, FEATURE_SSE4_2, FeaturesNehalem }, { {"corei7"}, CK_Nehalem, FEATURE_SSE4_2, FeaturesNehalem }, // Westmere microarchitecture based processors. { {"westmere"}, CK_Westmere, FEATURE_PCLMUL, FeaturesWestmere }, // Sandy Bridge microarchitecture based processors. { {"sandybridge"}, CK_SandyBridge, FEATURE_AVX, FeaturesSandyBridge }, { {"corei7-avx"}, CK_SandyBridge, FEATURE_AVX, FeaturesSandyBridge }, // Ivy Bridge microarchitecture based processors. { {"ivybridge"}, CK_IvyBridge, FEATURE_AVX, FeaturesIvyBridge }, { {"core-avx-i"}, CK_IvyBridge, FEATURE_AVX, FeaturesIvyBridge }, // Haswell microarchitecture based processors. { {"haswell"}, CK_Haswell, FEATURE_AVX2, FeaturesHaswell }, { {"core-avx2"}, CK_Haswell, FEATURE_AVX2, FeaturesHaswell }, // Broadwell microarchitecture based processors. { {"broadwell"}, CK_Broadwell, FEATURE_AVX2, FeaturesBroadwell }, // Skylake client microarchitecture based processors. { {"skylake"}, CK_SkylakeClient, FEATURE_AVX2, FeaturesSkylakeClient }, // Skylake server microarchitecture based processors. { {"skylake-avx512"}, CK_SkylakeServer, FEATURE_AVX512F, FeaturesSkylakeServer }, { {"skx"}, CK_SkylakeServer, FEATURE_AVX512F, FeaturesSkylakeServer }, // Cascadelake Server microarchitecture based processors. { {"cascadelake"}, CK_Cascadelake, FEATURE_AVX512VNNI, FeaturesCascadeLake }, // Cooperlake Server microarchitecture based processors. { {"cooperlake"}, CK_Cooperlake, FEATURE_AVX512BF16, FeaturesCooperLake }, // Cannonlake client microarchitecture based processors. { {"cannonlake"}, CK_Cannonlake, FEATURE_AVX512VBMI, FeaturesCannonlake }, // Icelake client microarchitecture based processors. { {"icelake-client"}, CK_IcelakeClient, FEATURE_AVX512VBMI2, FeaturesICLClient }, // Rocketlake microarchitecture based processors. { {"rocketlake"}, CK_Rocketlake, FEATURE_AVX512VBMI2, FeaturesRocketlake }, // Icelake server microarchitecture based processors. { {"icelake-server"}, CK_IcelakeServer, FEATURE_AVX512VBMI2, FeaturesICLServer }, // Tigerlake microarchitecture based processors. { {"tigerlake"}, CK_Tigerlake, FEATURE_AVX512VP2INTERSECT, FeaturesTigerlake }, // Sapphire Rapids microarchitecture based processors. - { {"sapphirerapids"}, CK_SapphireRapids, FEATURE_AVX512VP2INTERSECT, FeaturesSapphireRapids }, + { {"sapphirerapids"}, CK_SapphireRapids, FEATURE_AVX512BF16, FeaturesSapphireRapids }, // Alderlake microarchitecture based processors. { {"alderlake"}, CK_Alderlake, FEATURE_AVX2, FeaturesAlderlake }, // Knights Landing processor. { {"knl"}, CK_KNL, FEATURE_AVX512F, FeaturesKNL }, // Knights Mill processor. { {"knm"}, CK_KNM, FEATURE_AVX5124FMAPS, FeaturesKNM }, // Lakemont microarchitecture based processors. { {"lakemont"}, CK_Lakemont, ~0U, FeatureCMPXCHG8B }, // K6 architecture processors. { {"k6"}, CK_K6, ~0U, FeaturesK6 }, { {"k6-2"}, CK_K6_2, ~0U, FeaturesK6 | Feature3DNOW }, { {"k6-3"}, CK_K6_3, ~0U, FeaturesK6 | Feature3DNOW }, // K7 architecture processors. { {"athlon"}, CK_Athlon, ~0U, FeaturesAthlon }, { {"athlon-tbird"}, CK_Athlon, ~0U, FeaturesAthlon }, { {"athlon-xp"}, CK_AthlonXP, ~0U, FeaturesAthlonXP }, { {"athlon-mp"}, CK_AthlonXP, ~0U, FeaturesAthlonXP }, { {"athlon-4"}, CK_AthlonXP, ~0U, FeaturesAthlonXP }, // K8 architecture processors. { {"k8"}, CK_K8, ~0U, FeaturesK8 }, { {"athlon64"}, CK_K8, ~0U, FeaturesK8 }, { {"athlon-fx"}, CK_K8, ~0U, FeaturesK8 }, { {"opteron"}, CK_K8, ~0U, FeaturesK8 }, { {"k8-sse3"}, CK_K8SSE3, ~0U, FeaturesK8SSE3 }, { {"athlon64-sse3"}, CK_K8SSE3, ~0U, FeaturesK8SSE3 }, { {"opteron-sse3"}, CK_K8SSE3, ~0U, FeaturesK8SSE3 }, { {"amdfam10"}, CK_AMDFAM10, FEATURE_SSE4_A, FeaturesAMDFAM10 }, { {"barcelona"}, CK_AMDFAM10, FEATURE_SSE4_A, FeaturesAMDFAM10 }, // Bobcat architecture processors. { {"btver1"}, CK_BTVER1, FEATURE_SSE4_A, FeaturesBTVER1 }, { {"btver2"}, CK_BTVER2, FEATURE_BMI, FeaturesBTVER2 }, // Bulldozer architecture processors. { {"bdver1"}, CK_BDVER1, FEATURE_XOP, FeaturesBDVER1 }, { {"bdver2"}, CK_BDVER2, FEATURE_FMA, FeaturesBDVER2 }, { {"bdver3"}, CK_BDVER3, FEATURE_FMA, FeaturesBDVER3 }, { {"bdver4"}, CK_BDVER4, FEATURE_AVX2, FeaturesBDVER4 }, // Zen architecture processors. { {"znver1"}, CK_ZNVER1, FEATURE_AVX2, FeaturesZNVER1 }, { {"znver2"}, CK_ZNVER2, FEATURE_AVX2, FeaturesZNVER2 }, { {"znver3"}, CK_ZNVER3, FEATURE_AVX2, FeaturesZNVER3 }, // Generic 64-bit processor. { {"x86-64"}, CK_x86_64, ~0U, FeaturesX86_64 }, { {"x86-64-v2"}, CK_x86_64_v2, ~0U, FeaturesX86_64_V2 }, { {"x86-64-v3"}, CK_x86_64_v3, ~0U, FeaturesX86_64_V3 }, { {"x86-64-v4"}, CK_x86_64_v4, ~0U, FeaturesX86_64_V4 }, // Geode processors. { {"geode"}, CK_Geode, ~0U, FeaturesGeode }, }; constexpr const char *NoTuneList[] = {"x86-64-v2", "x86-64-v3", "x86-64-v4"}; X86::CPUKind llvm::X86::parseArchX86(StringRef CPU, bool Only64Bit) { for (const auto &P : Processors) if (P.Name == CPU && (P.Features[FEATURE_64BIT] || !Only64Bit)) return P.Kind; return CK_None; } X86::CPUKind llvm::X86::parseTuneCPU(StringRef CPU, bool Only64Bit) { if (llvm::is_contained(NoTuneList, CPU)) return CK_None; return parseArchX86(CPU, Only64Bit); } void llvm::X86::fillValidCPUArchList(SmallVectorImpl &Values, bool Only64Bit) { for (const auto &P : Processors) if (!P.Name.empty() && (P.Features[FEATURE_64BIT] || !Only64Bit)) Values.emplace_back(P.Name); } void llvm::X86::fillValidTuneCPUList(SmallVectorImpl &Values, bool Only64Bit) { for (const ProcInfo &P : Processors) if (!P.Name.empty() && (P.Features[FEATURE_64BIT] || !Only64Bit) && !llvm::is_contained(NoTuneList, P.Name)) Values.emplace_back(P.Name); } ProcessorFeatures llvm::X86::getKeyFeature(X86::CPUKind Kind) { // FIXME: Can we avoid a linear search here? The table might be sorted by // CPUKind so we could binary search? for (const auto &P : Processors) { if (P.Kind == Kind) { assert(P.KeyFeature != ~0U && "Processor does not have a key feature."); return static_cast(P.KeyFeature); } } llvm_unreachable("Unable to find CPU kind!"); } // Features with no dependencies. constexpr FeatureBitset ImpliedFeatures64BIT = {}; constexpr FeatureBitset ImpliedFeaturesADX = {}; constexpr FeatureBitset ImpliedFeaturesBMI = {}; constexpr FeatureBitset ImpliedFeaturesBMI2 = {}; constexpr FeatureBitset ImpliedFeaturesCLDEMOTE = {}; constexpr FeatureBitset ImpliedFeaturesCLFLUSHOPT = {}; constexpr FeatureBitset ImpliedFeaturesCLWB = {}; constexpr FeatureBitset ImpliedFeaturesCLZERO = {}; constexpr FeatureBitset ImpliedFeaturesCMOV = {}; constexpr FeatureBitset ImpliedFeaturesCMPXCHG16B = {}; constexpr FeatureBitset ImpliedFeaturesCMPXCHG8B = {}; constexpr FeatureBitset ImpliedFeaturesCRC32 = {}; constexpr FeatureBitset ImpliedFeaturesENQCMD = {}; constexpr FeatureBitset ImpliedFeaturesFSGSBASE = {}; constexpr FeatureBitset ImpliedFeaturesFXSR = {}; constexpr FeatureBitset ImpliedFeaturesINVPCID = {}; constexpr FeatureBitset ImpliedFeaturesLWP = {}; constexpr FeatureBitset ImpliedFeaturesLZCNT = {}; constexpr FeatureBitset ImpliedFeaturesMWAITX = {}; constexpr FeatureBitset ImpliedFeaturesMOVBE = {}; constexpr FeatureBitset ImpliedFeaturesMOVDIR64B = {}; constexpr FeatureBitset ImpliedFeaturesMOVDIRI = {}; constexpr FeatureBitset ImpliedFeaturesPCONFIG = {}; constexpr FeatureBitset ImpliedFeaturesPOPCNT = {}; constexpr FeatureBitset ImpliedFeaturesPKU = {}; constexpr FeatureBitset ImpliedFeaturesPREFETCHWT1 = {}; constexpr FeatureBitset ImpliedFeaturesPRFCHW = {}; constexpr FeatureBitset ImpliedFeaturesPTWRITE = {}; constexpr FeatureBitset ImpliedFeaturesRDPID = {}; constexpr FeatureBitset ImpliedFeaturesRDPRU = {}; constexpr FeatureBitset ImpliedFeaturesRDRND = {}; constexpr FeatureBitset ImpliedFeaturesRDSEED = {}; constexpr FeatureBitset ImpliedFeaturesRTM = {}; constexpr FeatureBitset ImpliedFeaturesSAHF = {}; constexpr FeatureBitset ImpliedFeaturesSERIALIZE = {}; constexpr FeatureBitset ImpliedFeaturesSGX = {}; constexpr FeatureBitset ImpliedFeaturesSHSTK = {}; constexpr FeatureBitset ImpliedFeaturesTBM = {}; constexpr FeatureBitset ImpliedFeaturesTSXLDTRK = {}; constexpr FeatureBitset ImpliedFeaturesUINTR = {}; constexpr FeatureBitset ImpliedFeaturesWAITPKG = {}; constexpr FeatureBitset ImpliedFeaturesWBNOINVD = {}; constexpr FeatureBitset ImpliedFeaturesVZEROUPPER = {}; constexpr FeatureBitset ImpliedFeaturesX87 = {}; constexpr FeatureBitset ImpliedFeaturesXSAVE = {}; // Not really CPU features, but need to be in the table because clang uses // target features to communicate them to the backend. constexpr FeatureBitset ImpliedFeaturesRETPOLINE_EXTERNAL_THUNK = {}; constexpr FeatureBitset ImpliedFeaturesRETPOLINE_INDIRECT_BRANCHES = {}; constexpr FeatureBitset ImpliedFeaturesRETPOLINE_INDIRECT_CALLS = {}; constexpr FeatureBitset ImpliedFeaturesLVI_CFI = {}; constexpr FeatureBitset ImpliedFeaturesLVI_LOAD_HARDENING = {}; // XSAVE features are dependent on basic XSAVE. constexpr FeatureBitset ImpliedFeaturesXSAVEC = FeatureXSAVE; constexpr FeatureBitset ImpliedFeaturesXSAVEOPT = FeatureXSAVE; constexpr FeatureBitset ImpliedFeaturesXSAVES = FeatureXSAVE; // MMX->3DNOW->3DNOWA chain. constexpr FeatureBitset ImpliedFeaturesMMX = {}; constexpr FeatureBitset ImpliedFeatures3DNOW = FeatureMMX; constexpr FeatureBitset ImpliedFeatures3DNOWA = Feature3DNOW; // SSE/AVX/AVX512F chain. constexpr FeatureBitset ImpliedFeaturesSSE = {}; constexpr FeatureBitset ImpliedFeaturesSSE2 = FeatureSSE; constexpr FeatureBitset ImpliedFeaturesSSE3 = FeatureSSE2; constexpr FeatureBitset ImpliedFeaturesSSSE3 = FeatureSSE3; constexpr FeatureBitset ImpliedFeaturesSSE4_1 = FeatureSSSE3; constexpr FeatureBitset ImpliedFeaturesSSE4_2 = FeatureSSE4_1; constexpr FeatureBitset ImpliedFeaturesAVX = FeatureSSE4_2; constexpr FeatureBitset ImpliedFeaturesAVX2 = FeatureAVX; constexpr FeatureBitset ImpliedFeaturesAVX512F = FeatureAVX2 | FeatureF16C | FeatureFMA; // Vector extensions that build on SSE or AVX. constexpr FeatureBitset ImpliedFeaturesAES = FeatureSSE2; constexpr FeatureBitset ImpliedFeaturesF16C = FeatureAVX; constexpr FeatureBitset ImpliedFeaturesFMA = FeatureAVX; constexpr FeatureBitset ImpliedFeaturesGFNI = FeatureSSE2; constexpr FeatureBitset ImpliedFeaturesPCLMUL = FeatureSSE2; constexpr FeatureBitset ImpliedFeaturesSHA = FeatureSSE2; constexpr FeatureBitset ImpliedFeaturesVAES = FeatureAES | FeatureAVX; constexpr FeatureBitset ImpliedFeaturesVPCLMULQDQ = FeatureAVX | FeaturePCLMUL; // AVX512 features. constexpr FeatureBitset ImpliedFeaturesAVX512CD = FeatureAVX512F; constexpr FeatureBitset ImpliedFeaturesAVX512BW = FeatureAVX512F; constexpr FeatureBitset ImpliedFeaturesAVX512DQ = FeatureAVX512F; constexpr FeatureBitset ImpliedFeaturesAVX512ER = FeatureAVX512F; constexpr FeatureBitset ImpliedFeaturesAVX512PF = FeatureAVX512F; constexpr FeatureBitset ImpliedFeaturesAVX512VL = FeatureAVX512F; constexpr FeatureBitset ImpliedFeaturesAVX512BF16 = FeatureAVX512BW; constexpr FeatureBitset ImpliedFeaturesAVX512BITALG = FeatureAVX512BW; constexpr FeatureBitset ImpliedFeaturesAVX512IFMA = FeatureAVX512F; constexpr FeatureBitset ImpliedFeaturesAVX512VNNI = FeatureAVX512F; constexpr FeatureBitset ImpliedFeaturesAVX512VPOPCNTDQ = FeatureAVX512F; constexpr FeatureBitset ImpliedFeaturesAVX512VBMI = FeatureAVX512BW; constexpr FeatureBitset ImpliedFeaturesAVX512VBMI2 = FeatureAVX512BW; constexpr FeatureBitset ImpliedFeaturesAVX512VP2INTERSECT = FeatureAVX512F; // FIXME: These two aren't really implemented and just exist in the feature // list for __builtin_cpu_supports. So omit their dependencies. constexpr FeatureBitset ImpliedFeaturesAVX5124FMAPS = {}; constexpr FeatureBitset ImpliedFeaturesAVX5124VNNIW = {}; // SSE4_A->FMA4->XOP chain. constexpr FeatureBitset ImpliedFeaturesSSE4_A = FeatureSSE3; constexpr FeatureBitset ImpliedFeaturesFMA4 = FeatureAVX | FeatureSSE4_A; constexpr FeatureBitset ImpliedFeaturesXOP = FeatureFMA4; // AMX Features constexpr FeatureBitset ImpliedFeaturesAMX_TILE = {}; constexpr FeatureBitset ImpliedFeaturesAMX_BF16 = FeatureAMX_TILE; constexpr FeatureBitset ImpliedFeaturesAMX_INT8 = FeatureAMX_TILE; constexpr FeatureBitset ImpliedFeaturesHRESET = {}; static constexpr FeatureBitset ImpliedFeaturesAVX512FP16 = FeatureAVX512BW | FeatureAVX512DQ | FeatureAVX512VL; // Key Locker Features constexpr FeatureBitset ImpliedFeaturesKL = FeatureSSE2; constexpr FeatureBitset ImpliedFeaturesWIDEKL = FeatureKL; // AVXVNNI Features constexpr FeatureBitset ImpliedFeaturesAVXVNNI = FeatureAVX2; constexpr FeatureInfo FeatureInfos[X86::CPU_FEATURE_MAX] = { #define X86_FEATURE(ENUM, STR) {{STR}, ImpliedFeatures##ENUM}, #include "llvm/Support/X86TargetParser.def" }; void llvm::X86::getFeaturesForCPU(StringRef CPU, SmallVectorImpl &EnabledFeatures) { auto I = llvm::find_if(Processors, [&](const ProcInfo &P) { return P.Name == CPU; }); assert(I != std::end(Processors) && "Processor not found!"); FeatureBitset Bits = I->Features; // Remove the 64-bit feature which we only use to validate if a CPU can // be used with 64-bit mode. Bits &= ~Feature64BIT; // Add the string version of all set bits. for (unsigned i = 0; i != CPU_FEATURE_MAX; ++i) if (Bits[i] && !FeatureInfos[i].Name.empty()) EnabledFeatures.push_back(FeatureInfos[i].Name); } // For each feature that is (transitively) implied by this feature, set it. static void getImpliedEnabledFeatures(FeatureBitset &Bits, const FeatureBitset &Implies) { // Fast path: Implies is often empty. if (!Implies.any()) return; FeatureBitset Prev; Bits |= Implies; do { Prev = Bits; for (unsigned i = CPU_FEATURE_MAX; i;) if (Bits[--i]) Bits |= FeatureInfos[i].ImpliedFeatures; } while (Prev != Bits); } /// Create bit vector of features that are implied disabled if the feature /// passed in Value is disabled. static void getImpliedDisabledFeatures(FeatureBitset &Bits, unsigned Value) { // Check all features looking for any dependent on this feature. If we find // one, mark it and recursively find any feature that depend on it. FeatureBitset Prev; Bits.set(Value); do { Prev = Bits; for (unsigned i = 0; i != CPU_FEATURE_MAX; ++i) if ((FeatureInfos[i].ImpliedFeatures & Bits).any()) Bits.set(i); } while (Prev != Bits); } void llvm::X86::updateImpliedFeatures( StringRef Feature, bool Enabled, StringMap &Features) { auto I = llvm::find_if( FeatureInfos, [&](const FeatureInfo &FI) { return FI.Name == Feature; }); if (I == std::end(FeatureInfos)) { // FIXME: This shouldn't happen, but may not have all features in the table // yet. return; } FeatureBitset ImpliedBits; if (Enabled) getImpliedEnabledFeatures(ImpliedBits, I->ImpliedFeatures); else getImpliedDisabledFeatures(ImpliedBits, std::distance(std::begin(FeatureInfos), I)); // Update the map entry for all implied features. for (unsigned i = 0; i != CPU_FEATURE_MAX; ++i) if (ImpliedBits[i] && !FeatureInfos[i].Name.empty()) Features[FeatureInfos[i].Name] = Enabled; } uint64_t llvm::X86::getCpuSupportsMask(ArrayRef FeatureStrs) { // Processor features and mapping to processor feature value. uint64_t FeaturesMask = 0; for (const StringRef &FeatureStr : FeatureStrs) { unsigned Feature = StringSwitch(FeatureStr) #define X86_FEATURE_COMPAT(ENUM, STR, PRIORITY) \ .Case(STR, llvm::X86::FEATURE_##ENUM) #include "llvm/Support/X86TargetParser.def" ; FeaturesMask |= (1ULL << Feature); } return FeaturesMask; } unsigned llvm::X86::getFeaturePriority(ProcessorFeatures Feat) { #ifndef NDEBUG // Check that priorities are set properly in the .def file. We expect that // "compat" features are assigned non-duplicate consecutive priorities // starting from zero (0, 1, ..., num_features - 1). #define X86_FEATURE_COMPAT(ENUM, STR, PRIORITY) PRIORITY, unsigned Priorities[] = { #include "llvm/Support/X86TargetParser.def" std::numeric_limits::max() // Need to consume last comma. }; std::array HelperList; std::iota(HelperList.begin(), HelperList.end(), 0); assert(std::is_permutation(HelperList.begin(), HelperList.end(), std::begin(Priorities), std::prev(std::end(Priorities))) && "Priorities don't form consecutive range!"); #endif switch (Feat) { #define X86_FEATURE_COMPAT(ENUM, STR, PRIORITY) \ case X86::FEATURE_##ENUM: \ return PRIORITY; #include "llvm/Support/X86TargetParser.def" default: llvm_unreachable("No Feature Priority for non-CPUSupports Features"); } } diff --git a/llvm/lib/Target/X86/X86.td b/llvm/lib/Target/X86/X86.td index fa0a6bd415dc..f98916e81cee 100644 --- a/llvm/lib/Target/X86/X86.td +++ b/llvm/lib/Target/X86/X86.td @@ -1,1659 +1,1658 @@ //===-- X86.td - Target definition file for the Intel X86 --*- tablegen -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This is a target description file for the Intel i386 architecture, referred // to here as the "X86" architecture. // //===----------------------------------------------------------------------===// // Get the target-independent interfaces which we are implementing... // include "llvm/Target/Target.td" //===----------------------------------------------------------------------===// // X86 Subtarget state // // disregarding specific ABI / programming model def Is64Bit : SubtargetFeature<"64bit-mode", "Is64Bit", "true", "64-bit mode (x86_64)">; def Is32Bit : SubtargetFeature<"32bit-mode", "Is32Bit", "true", "32-bit mode (80386)">; def Is16Bit : SubtargetFeature<"16bit-mode", "Is16Bit", "true", "16-bit mode (i8086)">; //===----------------------------------------------------------------------===// // X86 Subtarget ISA features //===----------------------------------------------------------------------===// def FeatureX87 : SubtargetFeature<"x87","HasX87", "true", "Enable X87 float instructions">; def FeatureNOPL : SubtargetFeature<"nopl", "HasNOPL", "true", "Enable NOPL instruction (generally pentium pro+)">; def FeatureCMOV : SubtargetFeature<"cmov","HasCMOV", "true", "Enable conditional move instructions">; def FeatureCX8 : SubtargetFeature<"cx8", "HasCX8", "true", "Support CMPXCHG8B instructions">; def FeatureCRC32 : SubtargetFeature<"crc32", "HasCRC32", "true", "Enable SSE 4.2 CRC32 instruction (used when SSE4.2 is supported but function is GPR only)">; def FeaturePOPCNT : SubtargetFeature<"popcnt", "HasPOPCNT", "true", "Support POPCNT instruction">; def FeatureFXSR : SubtargetFeature<"fxsr", "HasFXSR", "true", "Support fxsave/fxrestore instructions">; def FeatureXSAVE : SubtargetFeature<"xsave", "HasXSAVE", "true", "Support xsave instructions">; def FeatureXSAVEOPT: SubtargetFeature<"xsaveopt", "HasXSAVEOPT", "true", "Support xsaveopt instructions", [FeatureXSAVE]>; def FeatureXSAVEC : SubtargetFeature<"xsavec", "HasXSAVEC", "true", "Support xsavec instructions", [FeatureXSAVE]>; def FeatureXSAVES : SubtargetFeature<"xsaves", "HasXSAVES", "true", "Support xsaves instructions", [FeatureXSAVE]>; def FeatureSSE1 : SubtargetFeature<"sse", "X86SSELevel", "SSE1", "Enable SSE instructions">; def FeatureSSE2 : SubtargetFeature<"sse2", "X86SSELevel", "SSE2", "Enable SSE2 instructions", [FeatureSSE1]>; def FeatureSSE3 : SubtargetFeature<"sse3", "X86SSELevel", "SSE3", "Enable SSE3 instructions", [FeatureSSE2]>; def FeatureSSSE3 : SubtargetFeature<"ssse3", "X86SSELevel", "SSSE3", "Enable SSSE3 instructions", [FeatureSSE3]>; def FeatureSSE41 : SubtargetFeature<"sse4.1", "X86SSELevel", "SSE41", "Enable SSE 4.1 instructions", [FeatureSSSE3]>; def FeatureSSE42 : SubtargetFeature<"sse4.2", "X86SSELevel", "SSE42", "Enable SSE 4.2 instructions", [FeatureSSE41]>; // The MMX subtarget feature is separate from the rest of the SSE features // because it's important (for odd compatibility reasons) to be able to // turn it off explicitly while allowing SSE+ to be on. def FeatureMMX : SubtargetFeature<"mmx","X863DNowLevel", "MMX", "Enable MMX instructions">; def Feature3DNow : SubtargetFeature<"3dnow", "X863DNowLevel", "ThreeDNow", "Enable 3DNow! instructions", [FeatureMMX]>; def Feature3DNowA : SubtargetFeature<"3dnowa", "X863DNowLevel", "ThreeDNowA", "Enable 3DNow! Athlon instructions", [Feature3DNow]>; // All x86-64 hardware has SSE2, but we don't mark SSE2 as an implied // feature, because SSE2 can be disabled (e.g. for compiling OS kernels) // without disabling 64-bit mode. Nothing should imply this feature bit. It // is used to enforce that only 64-bit capable CPUs are used in 64-bit mode. def FeatureX86_64 : SubtargetFeature<"64bit", "HasX86_64", "true", "Support 64-bit instructions">; def FeatureCX16 : SubtargetFeature<"cx16", "HasCX16", "true", "64-bit with cmpxchg16b (this is true for most x86-64 chips, but not the first AMD chips)", [FeatureCX8]>; def FeatureSSE4A : SubtargetFeature<"sse4a", "HasSSE4A", "true", "Support SSE 4a instructions", [FeatureSSE3]>; def FeatureAVX : SubtargetFeature<"avx", "X86SSELevel", "AVX", "Enable AVX instructions", [FeatureSSE42]>; def FeatureAVX2 : SubtargetFeature<"avx2", "X86SSELevel", "AVX2", "Enable AVX2 instructions", [FeatureAVX]>; def FeatureFMA : SubtargetFeature<"fma", "HasFMA", "true", "Enable three-operand fused multiple-add", [FeatureAVX]>; def FeatureF16C : SubtargetFeature<"f16c", "HasF16C", "true", "Support 16-bit floating point conversion instructions", [FeatureAVX]>; def FeatureAVX512 : SubtargetFeature<"avx512f", "X86SSELevel", "AVX512", "Enable AVX-512 instructions", [FeatureAVX2, FeatureFMA, FeatureF16C]>; def FeatureERI : SubtargetFeature<"avx512er", "HasERI", "true", "Enable AVX-512 Exponential and Reciprocal Instructions", [FeatureAVX512]>; def FeatureCDI : SubtargetFeature<"avx512cd", "HasCDI", "true", "Enable AVX-512 Conflict Detection Instructions", [FeatureAVX512]>; def FeatureVPOPCNTDQ : SubtargetFeature<"avx512vpopcntdq", "HasVPOPCNTDQ", "true", "Enable AVX-512 Population Count Instructions", [FeatureAVX512]>; def FeaturePFI : SubtargetFeature<"avx512pf", "HasPFI", "true", "Enable AVX-512 PreFetch Instructions", [FeatureAVX512]>; def FeaturePREFETCHWT1 : SubtargetFeature<"prefetchwt1", "HasPREFETCHWT1", "true", "Prefetch with Intent to Write and T1 Hint">; def FeatureDQI : SubtargetFeature<"avx512dq", "HasDQI", "true", "Enable AVX-512 Doubleword and Quadword Instructions", [FeatureAVX512]>; def FeatureBWI : SubtargetFeature<"avx512bw", "HasBWI", "true", "Enable AVX-512 Byte and Word Instructions", [FeatureAVX512]>; def FeatureVLX : SubtargetFeature<"avx512vl", "HasVLX", "true", "Enable AVX-512 Vector Length eXtensions", [FeatureAVX512]>; def FeatureVBMI : SubtargetFeature<"avx512vbmi", "HasVBMI", "true", "Enable AVX-512 Vector Byte Manipulation Instructions", [FeatureBWI]>; def FeatureVBMI2 : SubtargetFeature<"avx512vbmi2", "HasVBMI2", "true", "Enable AVX-512 further Vector Byte Manipulation Instructions", [FeatureBWI]>; def FeatureIFMA : SubtargetFeature<"avx512ifma", "HasIFMA", "true", "Enable AVX-512 Integer Fused Multiple-Add", [FeatureAVX512]>; def FeaturePKU : SubtargetFeature<"pku", "HasPKU", "true", "Enable protection keys">; def FeatureVNNI : SubtargetFeature<"avx512vnni", "HasVNNI", "true", "Enable AVX-512 Vector Neural Network Instructions", [FeatureAVX512]>; def FeatureAVXVNNI : SubtargetFeature<"avxvnni", "HasAVXVNNI", "true", "Support AVX_VNNI encoding", [FeatureAVX2]>; def FeatureBF16 : SubtargetFeature<"avx512bf16", "HasBF16", "true", "Support bfloat16 floating point", [FeatureBWI]>; def FeatureBITALG : SubtargetFeature<"avx512bitalg", "HasBITALG", "true", "Enable AVX-512 Bit Algorithms", [FeatureBWI]>; def FeatureVP2INTERSECT : SubtargetFeature<"avx512vp2intersect", "HasVP2INTERSECT", "true", "Enable AVX-512 vp2intersect", [FeatureAVX512]>; // FIXME: FP16 scalar intrinsics use the type v8f16, which is supposed to be // guarded under condition hasVLX. So we imply it in FeatureFP16 currently. // FIXME: FP16 conversion between f16 and i64 customize type v8i64, which is // supposed to be guarded under condition hasDQI. So we imply it in FeatureFP16 // currently. def FeatureFP16 : SubtargetFeature<"avx512fp16", "HasFP16", "true", "Support 16-bit floating point", [FeatureBWI, FeatureVLX, FeatureDQI]>; def FeaturePCLMUL : SubtargetFeature<"pclmul", "HasPCLMUL", "true", "Enable packed carry-less multiplication instructions", [FeatureSSE2]>; def FeatureGFNI : SubtargetFeature<"gfni", "HasGFNI", "true", "Enable Galois Field Arithmetic Instructions", [FeatureSSE2]>; def FeatureVPCLMULQDQ : SubtargetFeature<"vpclmulqdq", "HasVPCLMULQDQ", "true", "Enable vpclmulqdq instructions", [FeatureAVX, FeaturePCLMUL]>; def FeatureFMA4 : SubtargetFeature<"fma4", "HasFMA4", "true", "Enable four-operand fused multiple-add", [FeatureAVX, FeatureSSE4A]>; def FeatureXOP : SubtargetFeature<"xop", "HasXOP", "true", "Enable XOP instructions", [FeatureFMA4]>; def FeatureSSEUnalignedMem : SubtargetFeature<"sse-unaligned-mem", "HasSSEUnalignedMem", "true", "Allow unaligned memory operands with SSE instructions (this may require setting a configuration bit in the processor)">; def FeatureAES : SubtargetFeature<"aes", "HasAES", "true", "Enable AES instructions", [FeatureSSE2]>; def FeatureVAES : SubtargetFeature<"vaes", "HasVAES", "true", "Promote selected AES instructions to AVX512/AVX registers", [FeatureAVX, FeatureAES]>; def FeatureTBM : SubtargetFeature<"tbm", "HasTBM", "true", "Enable TBM instructions">; def FeatureLWP : SubtargetFeature<"lwp", "HasLWP", "true", "Enable LWP instructions">; def FeatureMOVBE : SubtargetFeature<"movbe", "HasMOVBE", "true", "Support MOVBE instruction">; def FeatureRDRAND : SubtargetFeature<"rdrnd", "HasRDRAND", "true", "Support RDRAND instruction">; def FeatureFSGSBase : SubtargetFeature<"fsgsbase", "HasFSGSBase", "true", "Support FS/GS Base instructions">; def FeatureLZCNT : SubtargetFeature<"lzcnt", "HasLZCNT", "true", "Support LZCNT instruction">; def FeatureBMI : SubtargetFeature<"bmi", "HasBMI", "true", "Support BMI instructions">; def FeatureBMI2 : SubtargetFeature<"bmi2", "HasBMI2", "true", "Support BMI2 instructions">; def FeatureRTM : SubtargetFeature<"rtm", "HasRTM", "true", "Support RTM instructions">; def FeatureADX : SubtargetFeature<"adx", "HasADX", "true", "Support ADX instructions">; def FeatureSHA : SubtargetFeature<"sha", "HasSHA", "true", "Enable SHA instructions", [FeatureSSE2]>; // Processor supports CET SHSTK - Control-Flow Enforcement Technology // using Shadow Stack def FeatureSHSTK : SubtargetFeature<"shstk", "HasSHSTK", "true", "Support CET Shadow-Stack instructions">; def FeaturePRFCHW : SubtargetFeature<"prfchw", "HasPRFCHW", "true", "Support PRFCHW instructions">; def FeatureRDSEED : SubtargetFeature<"rdseed", "HasRDSEED", "true", "Support RDSEED instruction">; def FeatureLAHFSAHF64 : SubtargetFeature<"sahf", "HasLAHFSAHF64", "true", "Support LAHF and SAHF instructions in 64-bit mode">; def FeatureMWAITX : SubtargetFeature<"mwaitx", "HasMWAITX", "true", "Enable MONITORX/MWAITX timer functionality">; def FeatureCLZERO : SubtargetFeature<"clzero", "HasCLZERO", "true", "Enable Cache Line Zero">; def FeatureCLDEMOTE : SubtargetFeature<"cldemote", "HasCLDEMOTE", "true", "Enable Cache Line Demote">; def FeaturePTWRITE : SubtargetFeature<"ptwrite", "HasPTWRITE", "true", "Support ptwrite instruction">; def FeatureAMXTILE : SubtargetFeature<"amx-tile", "HasAMXTILE", "true", "Support AMX-TILE instructions">; def FeatureAMXINT8 : SubtargetFeature<"amx-int8", "HasAMXINT8", "true", "Support AMX-INT8 instructions", [FeatureAMXTILE]>; def FeatureAMXBF16 : SubtargetFeature<"amx-bf16", "HasAMXBF16", "true", "Support AMX-BF16 instructions", [FeatureAMXTILE]>; def FeatureINVPCID : SubtargetFeature<"invpcid", "HasINVPCID", "true", "Invalidate Process-Context Identifier">; def FeatureSGX : SubtargetFeature<"sgx", "HasSGX", "true", "Enable Software Guard Extensions">; def FeatureCLFLUSHOPT : SubtargetFeature<"clflushopt", "HasCLFLUSHOPT", "true", "Flush A Cache Line Optimized">; def FeatureCLWB : SubtargetFeature<"clwb", "HasCLWB", "true", "Cache Line Write Back">; def FeatureWBNOINVD : SubtargetFeature<"wbnoinvd", "HasWBNOINVD", "true", "Write Back No Invalidate">; def FeatureRDPID : SubtargetFeature<"rdpid", "HasRDPID", "true", "Support RDPID instructions">; def FeatureRDPRU : SubtargetFeature<"rdpru", "HasRDPRU", "true", "Support RDPRU instructions">; def FeatureWAITPKG : SubtargetFeature<"waitpkg", "HasWAITPKG", "true", "Wait and pause enhancements">; def FeatureENQCMD : SubtargetFeature<"enqcmd", "HasENQCMD", "true", "Has ENQCMD instructions">; def FeatureKL : SubtargetFeature<"kl", "HasKL", "true", "Support Key Locker kl Instructions", [FeatureSSE2]>; def FeatureWIDEKL : SubtargetFeature<"widekl", "HasWIDEKL", "true", "Support Key Locker wide Instructions", [FeatureKL]>; def FeatureHRESET : SubtargetFeature<"hreset", "HasHRESET", "true", "Has hreset instruction">; def FeatureSERIALIZE : SubtargetFeature<"serialize", "HasSERIALIZE", "true", "Has serialize instruction">; def FeatureTSXLDTRK : SubtargetFeature<"tsxldtrk", "HasTSXLDTRK", "true", "Support TSXLDTRK instructions">; def FeatureUINTR : SubtargetFeature<"uintr", "HasUINTR", "true", "Has UINTR Instructions">; def FeaturePCONFIG : SubtargetFeature<"pconfig", "HasPCONFIG", "true", "platform configuration instruction">; def FeatureMOVDIRI : SubtargetFeature<"movdiri", "HasMOVDIRI", "true", "Support movdiri instruction (direct store integer)">; def FeatureMOVDIR64B : SubtargetFeature<"movdir64b", "HasMOVDIR64B", "true", "Support movdir64b instruction (direct store 64 bytes)">; // Ivy Bridge and newer processors have enhanced REP MOVSB and STOSB (aka // "string operations"). See "REP String Enhancement" in the Intel Software // Development Manual. This feature essentially means that REP MOVSB will copy // using the largest available size instead of copying bytes one by one, making // it at least as fast as REPMOVS{W,D,Q}. def FeatureERMSB : SubtargetFeature< "ermsb", "HasERMSB", "true", "REP MOVS/STOS are fast">; // Icelake and newer processors have Fast Short REP MOV. def FeatureFSRM : SubtargetFeature< "fsrm", "HasFSRM", "true", "REP MOVSB of short lengths is faster">; def FeatureSoftFloat : SubtargetFeature<"soft-float", "UseSoftFloat", "true", "Use software floating point features">; //===----------------------------------------------------------------------===// // X86 Subtarget Security Mitigation features //===----------------------------------------------------------------------===// // Lower indirect calls using a special construct called a `retpoline` to // mitigate potential Spectre v2 attacks against them. def FeatureRetpolineIndirectCalls : SubtargetFeature< "retpoline-indirect-calls", "UseRetpolineIndirectCalls", "true", "Remove speculation of indirect calls from the generated code">; // Lower indirect branches and switches either using conditional branch trees // or using a special construct called a `retpoline` to mitigate potential // Spectre v2 attacks against them. def FeatureRetpolineIndirectBranches : SubtargetFeature< "retpoline-indirect-branches", "UseRetpolineIndirectBranches", "true", "Remove speculation of indirect branches from the generated code">; // Deprecated umbrella feature for enabling both `retpoline-indirect-calls` and // `retpoline-indirect-branches` above. def FeatureRetpoline : SubtargetFeature<"retpoline", "DeprecatedUseRetpoline", "true", "Remove speculation of indirect branches from the " "generated code, either by avoiding them entirely or " "lowering them with a speculation blocking construct", [FeatureRetpolineIndirectCalls, FeatureRetpolineIndirectBranches]>; // Rely on external thunks for the emitted retpoline calls. This allows users // to provide their own custom thunk definitions in highly specialized // environments such as a kernel that does boot-time hot patching. def FeatureRetpolineExternalThunk : SubtargetFeature< "retpoline-external-thunk", "UseRetpolineExternalThunk", "true", "When lowering an indirect call or branch using a `retpoline`, rely " "on the specified user provided thunk rather than emitting one " "ourselves. Only has effect when combined with some other retpoline " "feature", [FeatureRetpolineIndirectCalls]>; // Mitigate LVI attacks against indirect calls/branches and call returns def FeatureLVIControlFlowIntegrity : SubtargetFeature< "lvi-cfi", "UseLVIControlFlowIntegrity", "true", "Prevent indirect calls/branches from using a memory operand, and " "precede all indirect calls/branches from a register with an " "LFENCE instruction to serialize control flow. Also decompose RET " "instructions into a POP+LFENCE+JMP sequence.">; // Enable SESES to mitigate speculative execution attacks def FeatureSpeculativeExecutionSideEffectSuppression : SubtargetFeature< "seses", "UseSpeculativeExecutionSideEffectSuppression", "true", "Prevent speculative execution side channel timing attacks by " "inserting a speculation barrier before memory reads, memory writes, " "and conditional branches. Implies LVI Control Flow integrity.", [FeatureLVIControlFlowIntegrity]>; // Mitigate LVI attacks against data loads def FeatureLVILoadHardening : SubtargetFeature< "lvi-load-hardening", "UseLVILoadHardening", "true", "Insert LFENCE instructions to prevent data speculatively injected " "into loads from being used maliciously.">; def FeatureTaggedGlobals : SubtargetFeature< "tagged-globals", "AllowTaggedGlobals", "true", "Use an instruction sequence for taking the address of a global " "that allows a memory tag in the upper address bits.">; // Control codegen mitigation against Straight Line Speculation vulnerability. def FeatureHardenSlsRet : SubtargetFeature< "harden-sls-ret", "HardenSlsRet", "true", "Harden against straight line speculation across RET instructions.">; def FeatureHardenSlsIJmp : SubtargetFeature< "harden-sls-ijmp", "HardenSlsIJmp", "true", "Harden against straight line speculation across indirect JMP instructions.">; //===----------------------------------------------------------------------===// // X86 Subtarget Tuning features //===----------------------------------------------------------------------===// def TuningSlowSHLD : SubtargetFeature<"slow-shld", "IsSHLDSlow", "true", "SHLD instruction is slow">; def TuningSlowPMULLD : SubtargetFeature<"slow-pmulld", "IsPMULLDSlow", "true", "PMULLD instruction is slow (compared to PMULLW/PMULHW and PMULUDQ)">; def TuningSlowPMADDWD : SubtargetFeature<"slow-pmaddwd", "IsPMADDWDSlow", "true", "PMADDWD is slower than PMULLD">; // FIXME: This should not apply to CPUs that do not have SSE. def TuningSlowUAMem16 : SubtargetFeature<"slow-unaligned-mem-16", "IsUnalignedMem16Slow", "true", "Slow unaligned 16-byte memory access">; def TuningSlowUAMem32 : SubtargetFeature<"slow-unaligned-mem-32", "IsUnalignedMem32Slow", "true", "Slow unaligned 32-byte memory access">; def TuningLEAForSP : SubtargetFeature<"lea-sp", "UseLeaForSP", "true", "Use LEA for adjusting the stack pointer (this is an optimization for Intel Atom processors)">; // True if 8-bit divisions are significantly faster than // 32-bit divisions and should be used when possible. def TuningSlowDivide32 : SubtargetFeature<"idivl-to-divb", "HasSlowDivide32", "true", "Use 8-bit divide for positive values less than 256">; // True if 32-bit divides are significantly faster than // 64-bit divisions and should be used when possible. def TuningSlowDivide64 : SubtargetFeature<"idivq-to-divl", "HasSlowDivide64", "true", "Use 32-bit divide for positive values less than 2^32">; def TuningPadShortFunctions : SubtargetFeature<"pad-short-functions", "PadShortFunctions", "true", "Pad short functions (to prevent a stall when returning too early)">; // On some processors, instructions that implicitly take two memory operands are // slow. In practice, this means that CALL, PUSH, and POP with memory operands // should be avoided in favor of a MOV + register CALL/PUSH/POP. def TuningSlowTwoMemOps : SubtargetFeature<"slow-two-mem-ops", "SlowTwoMemOps", "true", "Two memory operand instructions are slow">; // True if the LEA instruction inputs have to be ready at address generation // (AG) time. def TuningLEAUsesAG : SubtargetFeature<"lea-uses-ag", "LeaUsesAG", "true", "LEA instruction needs inputs at AG stage">; def TuningSlowLEA : SubtargetFeature<"slow-lea", "SlowLEA", "true", "LEA instruction with certain arguments is slow">; // True if the LEA instruction has all three source operands: base, index, // and offset or if the LEA instruction uses base and index registers where // the base is EBP, RBP,or R13 def TuningSlow3OpsLEA : SubtargetFeature<"slow-3ops-lea", "Slow3OpsLEA", "true", "LEA instruction with 3 ops or certain registers is slow">; // True if INC and DEC instructions are slow when writing to flags def TuningSlowIncDec : SubtargetFeature<"slow-incdec", "SlowIncDec", "true", "INC and DEC instructions are slower than ADD and SUB">; def TuningPOPCNTFalseDeps : SubtargetFeature<"false-deps-popcnt", "HasPOPCNTFalseDeps", "true", "POPCNT has a false dependency on dest register">; def TuningLZCNTFalseDeps : SubtargetFeature<"false-deps-lzcnt-tzcnt", "HasLZCNTFalseDeps", "true", "LZCNT/TZCNT have a false dependency on dest register">; def TuningMULCFalseDeps : SubtargetFeature<"false-deps-mulc", "HasMULCFalseDeps", "true", "VF[C]MULCPH/SH has a false dependency on dest register">; def TuningPERMFalseDeps : SubtargetFeature<"false-deps-perm", "HasPERMFalseDeps", "true", "VPERMD/Q/PS/PD has a false dependency on dest register">; def TuningRANGEFalseDeps : SubtargetFeature<"false-deps-range", "HasRANGEFalseDeps", "true", "VRANGEPD/PS/SD/SS has a false dependency on dest register">; def TuningGETMANTFalseDeps : SubtargetFeature<"false-deps-getmant", "HasGETMANTFalseDeps", "true", "VGETMANTSS/SD/SH and VGETMANDPS/PD(memory version) has a" " false dependency on dest register">; def TuningMULLQFalseDeps : SubtargetFeature<"false-deps-mullq", "HasMULLQFalseDeps", "true", "VPMULLQ has a false dependency on dest register">; def TuningSBBDepBreaking : SubtargetFeature<"sbb-dep-breaking", "HasSBBDepBreaking", "true", "SBB with same register has no source dependency">; // On recent X86 (port bound) processors, its preferable to combine to a single shuffle // using a variable mask over multiple fixed shuffles. def TuningFastVariableCrossLaneShuffle : SubtargetFeature<"fast-variable-crosslane-shuffle", "HasFastVariableCrossLaneShuffle", "true", "Cross-lane shuffles with variable masks are fast">; def TuningFastVariablePerLaneShuffle : SubtargetFeature<"fast-variable-perlane-shuffle", "HasFastVariablePerLaneShuffle", "true", "Per-lane shuffles with variable masks are fast">; // On some X86 processors, a vzeroupper instruction should be inserted after // using ymm/zmm registers before executing code that may use SSE instructions. def TuningInsertVZEROUPPER : SubtargetFeature<"vzeroupper", "InsertVZEROUPPER", "true", "Should insert vzeroupper instructions">; // TuningFastScalarFSQRT should be enabled if scalar FSQRT has shorter latency // than the corresponding NR code. TuningFastVectorFSQRT should be enabled if // vector FSQRT has higher throughput than the corresponding NR code. // The idea is that throughput bound code is likely to be vectorized, so for // vectorized code we should care about the throughput of SQRT operations. // But if the code is scalar that probably means that the code has some kind of // dependency and we should care more about reducing the latency. // True if hardware SQRTSS instruction is at least as fast (latency) as // RSQRTSS followed by a Newton-Raphson iteration. def TuningFastScalarFSQRT : SubtargetFeature<"fast-scalar-fsqrt", "HasFastScalarFSQRT", "true", "Scalar SQRT is fast (disable Newton-Raphson)">; // True if hardware SQRTPS/VSQRTPS instructions are at least as fast // (throughput) as RSQRTPS/VRSQRTPS followed by a Newton-Raphson iteration. def TuningFastVectorFSQRT : SubtargetFeature<"fast-vector-fsqrt", "HasFastVectorFSQRT", "true", "Vector SQRT is fast (disable Newton-Raphson)">; // If lzcnt has equivalent latency/throughput to most simple integer ops, it can // be used to replace test/set sequences. def TuningFastLZCNT : SubtargetFeature< "fast-lzcnt", "HasFastLZCNT", "true", "LZCNT instructions are as fast as most simple integer ops">; // If the target can efficiently decode NOPs upto 7-bytes in length. def TuningFast7ByteNOP : SubtargetFeature< "fast-7bytenop", "HasFast7ByteNOP", "true", "Target can quickly decode up to 7 byte NOPs">; // If the target can efficiently decode NOPs upto 11-bytes in length. def TuningFast11ByteNOP : SubtargetFeature< "fast-11bytenop", "HasFast11ByteNOP", "true", "Target can quickly decode up to 11 byte NOPs">; // If the target can efficiently decode NOPs upto 15-bytes in length. def TuningFast15ByteNOP : SubtargetFeature< "fast-15bytenop", "HasFast15ByteNOP", "true", "Target can quickly decode up to 15 byte NOPs">; // Sandy Bridge and newer processors can use SHLD with the same source on both // inputs to implement rotate to avoid the partial flag update of the normal // rotate instructions. def TuningFastSHLDRotate : SubtargetFeature< "fast-shld-rotate", "HasFastSHLDRotate", "true", "SHLD can be used as a faster rotate">; // Bulldozer and newer processors can merge CMP/TEST (but not other // instructions) with conditional branches. def TuningBranchFusion : SubtargetFeature<"branchfusion", "HasBranchFusion", "true", "CMP/TEST can be fused with conditional branches">; // Sandy Bridge and newer processors have many instructions that can be // fused with conditional branches and pass through the CPU as a single // operation. def TuningMacroFusion : SubtargetFeature<"macrofusion", "HasMacroFusion", "true", "Various instructions can be fused with conditional branches">; // Gather is available since Haswell (AVX2 set). So technically, we can // generate Gathers on all AVX2 processors. But the overhead on HSW is high. // Skylake Client processor has faster Gathers than HSW and performance is // similar to Skylake Server (AVX-512). def TuningFastGather : SubtargetFeature<"fast-gather", "HasFastGather", "true", "Indicates if gather is reasonably fast (this is true for Skylake client and all AVX-512 CPUs)">; def TuningPrefer128Bit : SubtargetFeature<"prefer-128-bit", "Prefer128Bit", "true", "Prefer 128-bit AVX instructions">; def TuningPrefer256Bit : SubtargetFeature<"prefer-256-bit", "Prefer256Bit", "true", "Prefer 256-bit AVX instructions">; def TuningPreferMaskRegisters : SubtargetFeature<"prefer-mask-registers", "PreferMaskRegisters", "true", "Prefer AVX512 mask registers over PTEST/MOVMSK">; def TuningFastBEXTR : SubtargetFeature<"fast-bextr", "HasFastBEXTR", "true", "Indicates that the BEXTR instruction is implemented as a single uop " "with good throughput">; // Combine vector math operations with shuffles into horizontal math // instructions if a CPU implements horizontal operations (introduced with // SSE3) with better latency/throughput than the alternative sequence. def TuningFastHorizontalOps : SubtargetFeature< "fast-hops", "HasFastHorizontalOps", "true", "Prefer horizontal vector math instructions (haddp, phsub, etc.) over " "normal vector instructions with shuffles">; def TuningFastScalarShiftMasks : SubtargetFeature< "fast-scalar-shift-masks", "HasFastScalarShiftMasks", "true", "Prefer a left/right scalar logical shift pair over a shift+and pair">; def TuningFastVectorShiftMasks : SubtargetFeature< "fast-vector-shift-masks", "HasFastVectorShiftMasks", "true", "Prefer a left/right vector logical shift pair over a shift+and pair">; def TuningFastMOVBE : SubtargetFeature<"fast-movbe", "HasFastMOVBE", "true", "Prefer a movbe over a single-use load + bswap / single-use bswap + store">; def TuningUseSLMArithCosts : SubtargetFeature<"use-slm-arith-costs", "UseSLMArithCosts", "true", "Use Silvermont specific arithmetic costs">; def TuningUseGLMDivSqrtCosts : SubtargetFeature<"use-glm-div-sqrt-costs", "UseGLMDivSqrtCosts", "true", "Use Goldmont specific floating point div/sqrt costs">; //===----------------------------------------------------------------------===// // X86 CPU Families // TODO: Remove these - use general tuning features to determine codegen. //===----------------------------------------------------------------------===// // Bonnell def ProcIntelAtom : SubtargetFeature<"", "IsAtom", "true", "Is Intel Atom processor">; //===----------------------------------------------------------------------===// // Register File Description //===----------------------------------------------------------------------===// include "X86RegisterInfo.td" include "X86RegisterBanks.td" //===----------------------------------------------------------------------===// // Instruction Descriptions //===----------------------------------------------------------------------===// include "X86Schedule.td" include "X86InstrInfo.td" include "X86SchedPredicates.td" def X86InstrInfo : InstrInfo; //===----------------------------------------------------------------------===// // X86 Scheduler Models //===----------------------------------------------------------------------===// include "X86ScheduleAtom.td" include "X86SchedSandyBridge.td" include "X86SchedHaswell.td" include "X86SchedBroadwell.td" include "X86ScheduleSLM.td" include "X86ScheduleZnver1.td" include "X86ScheduleZnver2.td" include "X86ScheduleZnver3.td" include "X86ScheduleBdVer2.td" include "X86ScheduleBtVer2.td" include "X86SchedSkylakeClient.td" include "X86SchedSkylakeServer.td" include "X86SchedIceLake.td" //===----------------------------------------------------------------------===// // X86 Processor Feature Lists //===----------------------------------------------------------------------===// def ProcessorFeatures { // x86-64 and x86-64-v[234] list X86_64V1Features = [ FeatureX87, FeatureCX8, FeatureCMOV, FeatureMMX, FeatureSSE2, FeatureFXSR, FeatureNOPL, FeatureX86_64, ]; list X86_64V2Features = !listconcat(X86_64V1Features, [ FeatureCX16, FeatureLAHFSAHF64, FeatureCRC32, FeaturePOPCNT, FeatureSSE42 ]); list X86_64V3Features = !listconcat(X86_64V2Features, [ FeatureAVX2, FeatureBMI, FeatureBMI2, FeatureF16C, FeatureFMA, FeatureLZCNT, FeatureMOVBE, FeatureXSAVE ]); list X86_64V4Features = !listconcat(X86_64V3Features, [ FeatureBWI, FeatureCDI, FeatureDQI, FeatureVLX, ]); // Nehalem list NHMFeatures = X86_64V2Features; list NHMTuning = [TuningMacroFusion, TuningInsertVZEROUPPER]; // Westmere list WSMAdditionalFeatures = [FeaturePCLMUL]; list WSMTuning = NHMTuning; list WSMFeatures = !listconcat(NHMFeatures, WSMAdditionalFeatures); // Sandybridge list SNBAdditionalFeatures = [FeatureAVX, FeatureXSAVE, FeatureXSAVEOPT]; list SNBTuning = [TuningMacroFusion, TuningSlow3OpsLEA, TuningSlowDivide64, TuningSlowUAMem32, TuningFastScalarFSQRT, TuningFastSHLDRotate, TuningFast15ByteNOP, TuningPOPCNTFalseDeps, TuningInsertVZEROUPPER]; list SNBFeatures = !listconcat(WSMFeatures, SNBAdditionalFeatures); // Ivybridge list IVBAdditionalFeatures = [FeatureRDRAND, FeatureF16C, FeatureFSGSBase]; list IVBTuning = SNBTuning; list IVBFeatures = !listconcat(SNBFeatures, IVBAdditionalFeatures); // Haswell list HSWAdditionalFeatures = [FeatureAVX2, FeatureBMI, FeatureBMI2, FeatureERMSB, FeatureFMA, FeatureINVPCID, FeatureLZCNT, FeatureMOVBE]; list HSWTuning = [TuningMacroFusion, TuningSlow3OpsLEA, TuningSlowDivide64, TuningFastScalarFSQRT, TuningFastSHLDRotate, TuningFast15ByteNOP, TuningFastVariableCrossLaneShuffle, TuningFastVariablePerLaneShuffle, TuningPOPCNTFalseDeps, TuningLZCNTFalseDeps, TuningInsertVZEROUPPER]; list HSWFeatures = !listconcat(IVBFeatures, HSWAdditionalFeatures); // Broadwell list BDWAdditionalFeatures = [FeatureADX, FeatureRDSEED, FeaturePRFCHW]; list BDWTuning = HSWTuning; list BDWFeatures = !listconcat(HSWFeatures, BDWAdditionalFeatures); // Skylake list SKLAdditionalFeatures = [FeatureAES, FeatureXSAVEC, FeatureXSAVES, FeatureCLFLUSHOPT]; list SKLTuning = [TuningFastGather, TuningMacroFusion, TuningSlow3OpsLEA, TuningSlowDivide64, TuningFastScalarFSQRT, TuningFastVectorFSQRT, TuningFastSHLDRotate, TuningFast15ByteNOP, TuningFastVariableCrossLaneShuffle, TuningFastVariablePerLaneShuffle, TuningPOPCNTFalseDeps, TuningInsertVZEROUPPER]; list SKLFeatures = !listconcat(BDWFeatures, SKLAdditionalFeatures); // Skylake-AVX512 list SKXAdditionalFeatures = [FeatureAES, FeatureXSAVEC, FeatureXSAVES, FeatureCLFLUSHOPT, FeatureAVX512, FeatureCDI, FeatureDQI, FeatureBWI, FeatureVLX, FeaturePKU, FeatureCLWB]; list SKXTuning = [TuningFastGather, TuningMacroFusion, TuningSlow3OpsLEA, TuningSlowDivide64, TuningFastScalarFSQRT, TuningFastVectorFSQRT, TuningFastSHLDRotate, TuningFast15ByteNOP, TuningFastVariableCrossLaneShuffle, TuningFastVariablePerLaneShuffle, TuningPrefer256Bit, TuningPOPCNTFalseDeps, TuningInsertVZEROUPPER]; list SKXFeatures = !listconcat(BDWFeatures, SKXAdditionalFeatures); // Cascadelake list CLXAdditionalFeatures = [FeatureVNNI]; list CLXTuning = SKXTuning; list CLXFeatures = !listconcat(SKXFeatures, CLXAdditionalFeatures); // Cooperlake list CPXAdditionalFeatures = [FeatureBF16]; list CPXTuning = SKXTuning; list CPXFeatures = !listconcat(CLXFeatures, CPXAdditionalFeatures); // Cannonlake list CNLAdditionalFeatures = [FeatureAVX512, FeatureCDI, FeatureDQI, FeatureBWI, FeatureVLX, FeaturePKU, FeatureVBMI, FeatureIFMA, FeatureSHA]; list CNLTuning = [TuningFastGather, TuningMacroFusion, TuningSlow3OpsLEA, TuningSlowDivide64, TuningFastScalarFSQRT, TuningFastVectorFSQRT, TuningFastSHLDRotate, TuningFast15ByteNOP, TuningFastVariableCrossLaneShuffle, TuningFastVariablePerLaneShuffle, TuningPrefer256Bit, TuningInsertVZEROUPPER]; list CNLFeatures = !listconcat(SKLFeatures, CNLAdditionalFeatures); // Icelake list ICLAdditionalFeatures = [FeatureBITALG, FeatureVAES, FeatureVBMI2, FeatureVNNI, FeatureVPCLMULQDQ, FeatureVPOPCNTDQ, FeatureGFNI, FeatureRDPID, FeatureFSRM]; list ICLTuning = [TuningFastGather, TuningMacroFusion, TuningSlow3OpsLEA, TuningSlowDivide64, TuningFastScalarFSQRT, TuningFastVectorFSQRT, TuningFastSHLDRotate, TuningFast15ByteNOP, TuningFastVariableCrossLaneShuffle, TuningFastVariablePerLaneShuffle, TuningPrefer256Bit, TuningInsertVZEROUPPER]; list ICLFeatures = !listconcat(CNLFeatures, ICLAdditionalFeatures); // Icelake Server list ICXAdditionalFeatures = [FeaturePCONFIG, FeatureCLWB, FeatureWBNOINVD]; list ICXTuning = ICLTuning; list ICXFeatures = !listconcat(ICLFeatures, ICXAdditionalFeatures); // Tigerlake list TGLAdditionalFeatures = [FeatureVP2INTERSECT, FeatureCLWB, FeatureMOVDIRI, FeatureMOVDIR64B, FeatureSHSTK]; list TGLTuning = ICLTuning; list TGLFeatures = !listconcat(ICLFeatures, TGLAdditionalFeatures ); // Sapphirerapids list SPRAdditionalFeatures = [FeatureAMXTILE, FeatureAMXINT8, FeatureAMXBF16, FeatureBF16, FeatureSERIALIZE, FeatureCLDEMOTE, FeatureWAITPKG, FeaturePTWRITE, FeatureFP16, FeatureAVXVNNI, FeatureTSXLDTRK, FeatureENQCMD, FeatureSHSTK, - FeatureVP2INTERSECT, FeatureMOVDIRI, FeatureMOVDIR64B, FeatureUINTR]; list SPRAdditionalTuning = [TuningMULCFalseDeps, TuningPERMFalseDeps, TuningRANGEFalseDeps, TuningGETMANTFalseDeps, TuningMULLQFalseDeps]; list SPRTuning = !listconcat(ICXTuning, SPRAdditionalTuning); list SPRFeatures = !listconcat(ICXFeatures, SPRAdditionalFeatures); // Atom list AtomFeatures = [FeatureX87, FeatureCX8, FeatureCMOV, FeatureMMX, FeatureSSSE3, FeatureFXSR, FeatureNOPL, FeatureX86_64, FeatureCX16, FeatureMOVBE, FeatureLAHFSAHF64]; list AtomTuning = [ProcIntelAtom, TuningSlowUAMem16, TuningLEAForSP, TuningSlowDivide32, TuningSlowDivide64, TuningSlowTwoMemOps, TuningLEAUsesAG, TuningPadShortFunctions, TuningInsertVZEROUPPER]; // Silvermont list SLMAdditionalFeatures = [FeatureSSE42, FeatureCRC32, FeaturePOPCNT, FeaturePCLMUL, FeaturePRFCHW, FeatureRDRAND]; list SLMTuning = [TuningUseSLMArithCosts, TuningSlowTwoMemOps, TuningSlowLEA, TuningSlowIncDec, TuningSlowDivide64, TuningSlowPMULLD, TuningFast7ByteNOP, TuningFastMOVBE, TuningPOPCNTFalseDeps, TuningInsertVZEROUPPER]; list SLMFeatures = !listconcat(AtomFeatures, SLMAdditionalFeatures); // Goldmont list GLMAdditionalFeatures = [FeatureAES, FeatureSHA, FeatureRDSEED, FeatureXSAVE, FeatureXSAVEOPT, FeatureXSAVEC, FeatureXSAVES, FeatureCLFLUSHOPT, FeatureFSGSBase]; list GLMTuning = [TuningUseGLMDivSqrtCosts, TuningSlowTwoMemOps, TuningSlowLEA, TuningSlowIncDec, TuningFastMOVBE, TuningPOPCNTFalseDeps, TuningInsertVZEROUPPER]; list GLMFeatures = !listconcat(SLMFeatures, GLMAdditionalFeatures); // Goldmont Plus list GLPAdditionalFeatures = [FeaturePTWRITE, FeatureRDPID]; list GLPTuning = [TuningUseGLMDivSqrtCosts, TuningSlowTwoMemOps, TuningSlowLEA, TuningSlowIncDec, TuningFastMOVBE, TuningInsertVZEROUPPER]; list GLPFeatures = !listconcat(GLMFeatures, GLPAdditionalFeatures); // Tremont list TRMAdditionalFeatures = [FeatureCLWB, FeatureGFNI]; list TRMTuning = GLPTuning; list TRMFeatures = !listconcat(GLPFeatures, TRMAdditionalFeatures); // Alderlake list ADLAdditionalFeatures = [FeatureSERIALIZE, FeaturePCONFIG, FeatureSHSTK, FeatureWIDEKL, FeatureINVPCID, FeatureADX, FeatureFMA, FeatureVAES, FeatureVPCLMULQDQ, FeatureF16C, FeatureBMI, FeatureBMI2, FeatureLZCNT, FeatureAVXVNNI, FeaturePKU, FeatureHRESET, FeatureCLDEMOTE, FeatureMOVDIRI, FeatureMOVDIR64B, FeatureWAITPKG]; list ADLAdditionalTuning = [TuningPERMFalseDeps]; list ADLTuning = !listconcat(SKLTuning, ADLAdditionalTuning); list ADLFeatures = !listconcat(TRMFeatures, ADLAdditionalFeatures); // Knights Landing list KNLFeatures = [FeatureX87, FeatureCX8, FeatureCMOV, FeatureMMX, FeatureFXSR, FeatureNOPL, FeatureX86_64, FeatureCX16, FeatureCRC32, FeaturePOPCNT, FeaturePCLMUL, FeatureXSAVE, FeatureXSAVEOPT, FeatureLAHFSAHF64, FeatureAES, FeatureRDRAND, FeatureF16C, FeatureFSGSBase, FeatureAVX512, FeatureERI, FeatureCDI, FeaturePFI, FeaturePREFETCHWT1, FeatureADX, FeatureRDSEED, FeatureMOVBE, FeatureLZCNT, FeatureBMI, FeatureBMI2, FeatureFMA, FeaturePRFCHW]; list KNLTuning = [TuningSlowDivide64, TuningSlow3OpsLEA, TuningSlowIncDec, TuningSlowTwoMemOps, TuningPreferMaskRegisters, TuningFastGather, TuningFastMOVBE, TuningSlowPMADDWD]; // TODO Add AVX5124FMAPS/AVX5124VNNIW features list KNMFeatures = !listconcat(KNLFeatures, [FeatureVPOPCNTDQ]); // Barcelona list BarcelonaFeatures = [FeatureX87, FeatureCX8, FeatureSSE4A, Feature3DNowA, FeatureFXSR, FeatureNOPL, FeatureCX16, FeaturePRFCHW, FeatureLZCNT, FeaturePOPCNT, FeatureLAHFSAHF64, FeatureCMOV, FeatureX86_64]; list BarcelonaTuning = [TuningFastScalarShiftMasks, TuningSlowSHLD, TuningSBBDepBreaking, TuningInsertVZEROUPPER]; // Bobcat list BtVer1Features = [FeatureX87, FeatureCX8, FeatureCMOV, FeatureMMX, FeatureSSSE3, FeatureSSE4A, FeatureFXSR, FeatureNOPL, FeatureX86_64, FeatureCX16, FeaturePRFCHW, FeatureLZCNT, FeaturePOPCNT, FeatureLAHFSAHF64]; list BtVer1Tuning = [TuningFast15ByteNOP, TuningFastScalarShiftMasks, TuningFastVectorShiftMasks, TuningSlowSHLD, TuningSBBDepBreaking, TuningInsertVZEROUPPER]; // Jaguar list BtVer2AdditionalFeatures = [FeatureAVX, FeatureAES, FeatureCRC32, FeaturePCLMUL, FeatureBMI, FeatureF16C, FeatureMOVBE, FeatureXSAVE, FeatureXSAVEOPT]; list BtVer2Tuning = [TuningFastLZCNT, TuningFastBEXTR, TuningFastHorizontalOps, TuningFast15ByteNOP, TuningFastScalarShiftMasks, TuningFastVectorShiftMasks, TuningFastMOVBE, TuningSBBDepBreaking, TuningSlowSHLD]; list BtVer2Features = !listconcat(BtVer1Features, BtVer2AdditionalFeatures); // Bulldozer list BdVer1Features = [FeatureX87, FeatureCX8, FeatureCMOV, FeatureXOP, FeatureX86_64, FeatureCX16, FeatureAES, FeatureCRC32, FeaturePRFCHW, FeaturePCLMUL, FeatureMMX, FeatureFXSR, FeatureNOPL, FeatureLZCNT, FeaturePOPCNT, FeatureXSAVE, FeatureLWP, FeatureLAHFSAHF64]; list BdVer1Tuning = [TuningSlowSHLD, TuningFast11ByteNOP, TuningFastScalarShiftMasks, TuningBranchFusion, TuningSBBDepBreaking, TuningInsertVZEROUPPER]; // PileDriver list BdVer2AdditionalFeatures = [FeatureF16C, FeatureBMI, FeatureTBM, FeatureFMA]; list BdVer2AdditionalTuning = [TuningFastBEXTR, TuningFastMOVBE]; list BdVer2Tuning = !listconcat(BdVer1Tuning, BdVer2AdditionalTuning); list BdVer2Features = !listconcat(BdVer1Features, BdVer2AdditionalFeatures); // Steamroller list BdVer3AdditionalFeatures = [FeatureXSAVEOPT, FeatureFSGSBase]; list BdVer3Tuning = BdVer2Tuning; list BdVer3Features = !listconcat(BdVer2Features, BdVer3AdditionalFeatures); // Excavator list BdVer4AdditionalFeatures = [FeatureAVX2, FeatureBMI2, FeatureMOVBE, FeatureRDRAND, FeatureMWAITX]; list BdVer4Tuning = BdVer3Tuning; list BdVer4Features = !listconcat(BdVer3Features, BdVer4AdditionalFeatures); // AMD Zen Processors common ISAs list ZNFeatures = [FeatureADX, FeatureAES, FeatureAVX2, FeatureBMI, FeatureBMI2, FeatureCLFLUSHOPT, FeatureCLZERO, FeatureCMOV, FeatureX86_64, FeatureCX16, FeatureCRC32, FeatureF16C, FeatureFMA, FeatureFSGSBase, FeatureFXSR, FeatureNOPL, FeatureLAHFSAHF64, FeatureLZCNT, FeatureMMX, FeatureMOVBE, FeatureMWAITX, FeaturePCLMUL, FeaturePOPCNT, FeaturePRFCHW, FeatureRDRAND, FeatureRDSEED, FeatureSHA, FeatureSSE4A, FeatureX87, FeatureXSAVE, FeatureXSAVEC, FeatureXSAVEOPT, FeatureXSAVES]; list ZNTuning = [TuningFastLZCNT, TuningFastBEXTR, TuningFast15ByteNOP, TuningBranchFusion, TuningFastScalarFSQRT, TuningFastVectorFSQRT, TuningFastScalarShiftMasks, TuningFastVariablePerLaneShuffle, TuningFastMOVBE, TuningSlowSHLD, TuningSBBDepBreaking, TuningInsertVZEROUPPER]; list ZN2AdditionalFeatures = [FeatureCLWB, FeatureRDPID, FeatureRDPRU, FeatureWBNOINVD]; list ZN2Tuning = ZNTuning; list ZN2Features = !listconcat(ZNFeatures, ZN2AdditionalFeatures); list ZN3AdditionalFeatures = [FeatureFSRM, FeatureINVPCID, FeaturePKU, FeatureVAES, FeatureVPCLMULQDQ]; list ZN3AdditionalTuning = [TuningMacroFusion]; list ZN3Tuning = !listconcat(ZN2Tuning, ZN3AdditionalTuning); list ZN3Features = !listconcat(ZN2Features, ZN3AdditionalFeatures); } //===----------------------------------------------------------------------===// // X86 processors supported. //===----------------------------------------------------------------------===// class Proc Features, list TuneFeatures> : ProcessorModel; class ProcModel Features, list TuneFeatures> : ProcessorModel; // NOTE: CMPXCHG8B is here for legacy compatibility so that it is only disabled // if i386/i486 is specifically requested. // NOTE: 64Bit is here as "generic" is the default llc CPU. The X86Subtarget // constructor checks that any CPU used in 64-bit mode has FeatureX86_64 // enabled. It has no effect on code generation. // NOTE: As a default tuning, "generic" aims to produce code optimized for the // most common X86 processors. The tunings might be changed over time. It is // recommended to use "tune-cpu"="x86-64" in function attribute for consistency. def : ProcModel<"generic", SandyBridgeModel, [FeatureX87, FeatureCX8, FeatureX86_64], [TuningSlow3OpsLEA, TuningSlowDivide64, TuningMacroFusion, TuningFastScalarFSQRT, TuningFast15ByteNOP, TuningInsertVZEROUPPER]>; def : Proc<"i386", [FeatureX87], [TuningSlowUAMem16, TuningInsertVZEROUPPER]>; def : Proc<"i486", [FeatureX87], [TuningSlowUAMem16, TuningInsertVZEROUPPER]>; def : Proc<"i586", [FeatureX87, FeatureCX8], [TuningSlowUAMem16, TuningInsertVZEROUPPER]>; def : Proc<"pentium", [FeatureX87, FeatureCX8], [TuningSlowUAMem16, TuningInsertVZEROUPPER]>; def : Proc<"pentium-mmx", [FeatureX87, FeatureCX8, FeatureMMX], [TuningSlowUAMem16, TuningInsertVZEROUPPER]>; def : Proc<"i686", [FeatureX87, FeatureCX8, FeatureCMOV], [TuningSlowUAMem16, TuningInsertVZEROUPPER]>; def : Proc<"pentiumpro", [FeatureX87, FeatureCX8, FeatureCMOV, FeatureNOPL], [TuningSlowUAMem16, TuningInsertVZEROUPPER]>; def : Proc<"pentium2", [FeatureX87, FeatureCX8, FeatureMMX, FeatureCMOV, FeatureFXSR, FeatureNOPL], [TuningSlowUAMem16, TuningInsertVZEROUPPER]>; foreach P = ["pentium3", "pentium3m"] in { def : Proc; } // Enable the PostRAScheduler for SSE2 and SSE3 class cpus. // The intent is to enable it for pentium4 which is the current default // processor in a vanilla 32-bit clang compilation when no specific // architecture is specified. This generally gives a nice performance // increase on silvermont, with largely neutral behavior on other // contemporary large core processors. // pentium-m, pentium4m, prescott and nocona are included as a preventative // measure to avoid performance surprises, in case clang's default cpu // changes slightly. def : ProcModel<"pentium-m", GenericPostRAModel, [FeatureX87, FeatureCX8, FeatureMMX, FeatureSSE2, FeatureFXSR, FeatureNOPL, FeatureCMOV], [TuningSlowUAMem16, TuningInsertVZEROUPPER]>; foreach P = ["pentium4", "pentium4m"] in { def : ProcModel; } // Intel Quark. def : Proc<"lakemont", [FeatureCX8], [TuningSlowUAMem16, TuningInsertVZEROUPPER]>; // Intel Core Duo. def : ProcModel<"yonah", SandyBridgeModel, [FeatureX87, FeatureCX8, FeatureMMX, FeatureSSE3, FeatureFXSR, FeatureNOPL, FeatureCMOV], [TuningSlowUAMem16, TuningInsertVZEROUPPER]>; // NetBurst. def : ProcModel<"prescott", GenericPostRAModel, [FeatureX87, FeatureCX8, FeatureMMX, FeatureSSE3, FeatureFXSR, FeatureNOPL, FeatureCMOV], [TuningSlowUAMem16, TuningInsertVZEROUPPER]>; def : ProcModel<"nocona", GenericPostRAModel, [ FeatureX87, FeatureCX8, FeatureCMOV, FeatureMMX, FeatureSSE3, FeatureFXSR, FeatureNOPL, FeatureX86_64, FeatureCX16, ], [ TuningSlowUAMem16, TuningInsertVZEROUPPER ]>; // Intel Core 2 Solo/Duo. def : ProcModel<"core2", SandyBridgeModel, [ FeatureX87, FeatureCX8, FeatureCMOV, FeatureMMX, FeatureSSSE3, FeatureFXSR, FeatureNOPL, FeatureX86_64, FeatureCX16, FeatureLAHFSAHF64 ], [ TuningMacroFusion, TuningSlowUAMem16, TuningInsertVZEROUPPER ]>; def : ProcModel<"penryn", SandyBridgeModel, [ FeatureX87, FeatureCX8, FeatureCMOV, FeatureMMX, FeatureSSE41, FeatureFXSR, FeatureNOPL, FeatureX86_64, FeatureCX16, FeatureLAHFSAHF64 ], [ TuningMacroFusion, TuningSlowUAMem16, TuningInsertVZEROUPPER ]>; // Atom CPUs. foreach P = ["bonnell", "atom"] in { def : ProcModel; } foreach P = ["silvermont", "slm"] in { def : ProcModel; } def : ProcModel<"goldmont", SLMModel, ProcessorFeatures.GLMFeatures, ProcessorFeatures.GLMTuning>; def : ProcModel<"goldmont-plus", SLMModel, ProcessorFeatures.GLPFeatures, ProcessorFeatures.GLPTuning>; def : ProcModel<"tremont", SLMModel, ProcessorFeatures.TRMFeatures, ProcessorFeatures.TRMTuning>; // "Arrandale" along with corei3 and corei5 foreach P = ["nehalem", "corei7"] in { def : ProcModel; } // Westmere is the corei3/i5/i7 path from nehalem to sandybridge def : ProcModel<"westmere", SandyBridgeModel, ProcessorFeatures.WSMFeatures, ProcessorFeatures.WSMTuning>; foreach P = ["sandybridge", "corei7-avx"] in { def : ProcModel; } foreach P = ["ivybridge", "core-avx-i"] in { def : ProcModel; } foreach P = ["haswell", "core-avx2"] in { def : ProcModel; } def : ProcModel<"broadwell", BroadwellModel, ProcessorFeatures.BDWFeatures, ProcessorFeatures.BDWTuning>; def : ProcModel<"skylake", SkylakeClientModel, ProcessorFeatures.SKLFeatures, ProcessorFeatures.SKLTuning>; // FIXME: define KNL scheduler model def : ProcModel<"knl", HaswellModel, ProcessorFeatures.KNLFeatures, ProcessorFeatures.KNLTuning>; def : ProcModel<"knm", HaswellModel, ProcessorFeatures.KNMFeatures, ProcessorFeatures.KNLTuning>; foreach P = ["skylake-avx512", "skx"] in { def : ProcModel; } def : ProcModel<"cascadelake", SkylakeServerModel, ProcessorFeatures.CLXFeatures, ProcessorFeatures.CLXTuning>; def : ProcModel<"cooperlake", SkylakeServerModel, ProcessorFeatures.CPXFeatures, ProcessorFeatures.CPXTuning>; def : ProcModel<"cannonlake", SkylakeServerModel, ProcessorFeatures.CNLFeatures, ProcessorFeatures.CNLTuning>; def : ProcModel<"icelake-client", IceLakeModel, ProcessorFeatures.ICLFeatures, ProcessorFeatures.ICLTuning>; def : ProcModel<"rocketlake", IceLakeModel, ProcessorFeatures.ICLFeatures, ProcessorFeatures.ICLTuning>; def : ProcModel<"icelake-server", IceLakeModel, ProcessorFeatures.ICXFeatures, ProcessorFeatures.ICXTuning>; def : ProcModel<"tigerlake", IceLakeModel, ProcessorFeatures.TGLFeatures, ProcessorFeatures.TGLTuning>; def : ProcModel<"sapphirerapids", SkylakeServerModel, ProcessorFeatures.SPRFeatures, ProcessorFeatures.SPRTuning>; def : ProcModel<"alderlake", SkylakeClientModel, ProcessorFeatures.ADLFeatures, ProcessorFeatures.ADLTuning>; // AMD CPUs. def : Proc<"k6", [FeatureX87, FeatureCX8, FeatureMMX], [TuningSlowUAMem16, TuningInsertVZEROUPPER]>; def : Proc<"k6-2", [FeatureX87, FeatureCX8, Feature3DNow], [TuningSlowUAMem16, TuningInsertVZEROUPPER]>; def : Proc<"k6-3", [FeatureX87, FeatureCX8, Feature3DNow], [TuningSlowUAMem16, TuningInsertVZEROUPPER]>; foreach P = ["athlon", "athlon-tbird"] in { def : Proc; } foreach P = ["athlon-4", "athlon-xp", "athlon-mp"] in { def : Proc; } foreach P = ["k8", "opteron", "athlon64", "athlon-fx"] in { def : Proc; } foreach P = ["k8-sse3", "opteron-sse3", "athlon64-sse3"] in { def : Proc; } foreach P = ["amdfam10", "barcelona"] in { def : Proc; } // Bobcat def : Proc<"btver1", ProcessorFeatures.BtVer1Features, ProcessorFeatures.BtVer1Tuning>; // Jaguar def : ProcModel<"btver2", BtVer2Model, ProcessorFeatures.BtVer2Features, ProcessorFeatures.BtVer2Tuning>; // Bulldozer def : ProcModel<"bdver1", BdVer2Model, ProcessorFeatures.BdVer1Features, ProcessorFeatures.BdVer1Tuning>; // Piledriver def : ProcModel<"bdver2", BdVer2Model, ProcessorFeatures.BdVer2Features, ProcessorFeatures.BdVer2Tuning>; // Steamroller def : Proc<"bdver3", ProcessorFeatures.BdVer3Features, ProcessorFeatures.BdVer3Tuning>; // Excavator def : Proc<"bdver4", ProcessorFeatures.BdVer4Features, ProcessorFeatures.BdVer4Tuning>; def : ProcModel<"znver1", Znver1Model, ProcessorFeatures.ZNFeatures, ProcessorFeatures.ZNTuning>; def : ProcModel<"znver2", Znver2Model, ProcessorFeatures.ZN2Features, ProcessorFeatures.ZN2Tuning>; def : ProcModel<"znver3", Znver3Model, ProcessorFeatures.ZN3Features, ProcessorFeatures.ZN3Tuning>; def : Proc<"geode", [FeatureX87, FeatureCX8, Feature3DNowA], [TuningSlowUAMem16, TuningInsertVZEROUPPER]>; def : Proc<"winchip-c6", [FeatureX87, FeatureMMX], [TuningSlowUAMem16, TuningInsertVZEROUPPER]>; def : Proc<"winchip2", [FeatureX87, Feature3DNow], [TuningSlowUAMem16, TuningInsertVZEROUPPER]>; def : Proc<"c3", [FeatureX87, Feature3DNow], [TuningSlowUAMem16, TuningInsertVZEROUPPER]>; def : Proc<"c3-2", [FeatureX87, FeatureCX8, FeatureMMX, FeatureSSE1, FeatureFXSR, FeatureCMOV], [TuningSlowUAMem16, TuningInsertVZEROUPPER]>; // We also provide a generic 64-bit specific x86 processor model which tries to // be good for modern chips without enabling instruction set encodings past the // basic SSE2 and 64-bit ones. It disables slow things from any mainstream and // modern 64-bit x86 chip, and enables features that are generally beneficial. // // We currently use the Sandy Bridge model as the default scheduling model as // we use it across Nehalem, Westmere, Sandy Bridge, and Ivy Bridge which // covers a huge swath of x86 processors. If there are specific scheduling // knobs which need to be tuned differently for AMD chips, we might consider // forming a common base for them. def : ProcModel<"x86-64", SandyBridgeModel, ProcessorFeatures.X86_64V1Features, [ TuningSlow3OpsLEA, TuningSlowDivide64, TuningSlowIncDec, TuningMacroFusion, TuningInsertVZEROUPPER ]>; // x86-64 micro-architecture levels. def : ProcModel<"x86-64-v2", SandyBridgeModel, ProcessorFeatures.X86_64V2Features, ProcessorFeatures.SNBTuning>; // Close to Haswell. def : ProcModel<"x86-64-v3", HaswellModel, ProcessorFeatures.X86_64V3Features, ProcessorFeatures.HSWTuning>; // Close to the AVX-512 level implemented by Xeon Scalable Processors. def : ProcModel<"x86-64-v4", SkylakeServerModel, ProcessorFeatures.X86_64V4Features, ProcessorFeatures.SKXTuning>; //===----------------------------------------------------------------------===// // Calling Conventions //===----------------------------------------------------------------------===// include "X86CallingConv.td" //===----------------------------------------------------------------------===// // Assembly Parser //===----------------------------------------------------------------------===// def ATTAsmParserVariant : AsmParserVariant { int Variant = 0; // Variant name. string Name = "att"; // Discard comments in assembly strings. string CommentDelimiter = "#"; // Recognize hard coded registers. string RegisterPrefix = "%"; } def IntelAsmParserVariant : AsmParserVariant { int Variant = 1; // Variant name. string Name = "intel"; // Discard comments in assembly strings. string CommentDelimiter = ";"; // Recognize hard coded registers. string RegisterPrefix = ""; } //===----------------------------------------------------------------------===// // Assembly Printers //===----------------------------------------------------------------------===// // The X86 target supports two different syntaxes for emitting machine code. // This is controlled by the -x86-asm-syntax={att|intel} def ATTAsmWriter : AsmWriter { string AsmWriterClassName = "ATTInstPrinter"; int Variant = 0; } def IntelAsmWriter : AsmWriter { string AsmWriterClassName = "IntelInstPrinter"; int Variant = 1; } def X86 : Target { // Information about the instructions... let InstructionSet = X86InstrInfo; let AssemblyParserVariants = [ATTAsmParserVariant, IntelAsmParserVariant]; let AssemblyWriters = [ATTAsmWriter, IntelAsmWriter]; let AllowRegisterRenaming = 1; } //===----------------------------------------------------------------------===// // Pfm Counters //===----------------------------------------------------------------------===// include "X86PfmCounters.td" diff --git a/llvm/lib/Transforms/Utils/LoopVersioning.cpp b/llvm/lib/Transforms/Utils/LoopVersioning.cpp index 97f29527bb95..6309eed7963d 100644 --- a/llvm/lib/Transforms/Utils/LoopVersioning.cpp +++ b/llvm/lib/Transforms/Utils/LoopVersioning.cpp @@ -1,366 +1,368 @@ //===- LoopVersioning.cpp - Utility to version a loop ---------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines a utility class to perform loop versioning. The versioned // loop speculates that otherwise may-aliasing memory accesses don't overlap and // emits checks to prove this. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Utils/LoopVersioning.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/InstSimplifyFolder.h" #include "llvm/Analysis/LoopAccessAnalysis.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/ScalarEvolution.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/MDBuilder.h" #include "llvm/IR/PassManager.h" #include "llvm/InitializePasses.h" #include "llvm/Support/CommandLine.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Cloning.h" #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" using namespace llvm; static cl::opt AnnotateNoAlias("loop-version-annotate-no-alias", cl::init(true), cl::Hidden, cl::desc("Add no-alias annotation for instructions that " "are disambiguated by memchecks")); LoopVersioning::LoopVersioning(const LoopAccessInfo &LAI, ArrayRef Checks, Loop *L, LoopInfo *LI, DominatorTree *DT, ScalarEvolution *SE) : VersionedLoop(L), AliasChecks(Checks.begin(), Checks.end()), Preds(LAI.getPSE().getPredicate()), LAI(LAI), LI(LI), DT(DT), SE(SE) { } void LoopVersioning::versionLoop( const SmallVectorImpl &DefsUsedOutside) { assert(VersionedLoop->getUniqueExitBlock() && "No single exit block"); assert(VersionedLoop->isLoopSimplifyForm() && "Loop is not in loop-simplify form"); Value *MemRuntimeCheck; Value *SCEVRuntimeCheck; Value *RuntimeCheck = nullptr; // Add the memcheck in the original preheader (this is empty initially). BasicBlock *RuntimeCheckBB = VersionedLoop->getLoopPreheader(); const auto &RtPtrChecking = *LAI.getRuntimePointerChecking(); SCEVExpander Exp2(*RtPtrChecking.getSE(), VersionedLoop->getHeader()->getModule()->getDataLayout(), "induction"); MemRuntimeCheck = addRuntimeChecks(RuntimeCheckBB->getTerminator(), VersionedLoop, AliasChecks, Exp2); SCEVExpander Exp(*SE, RuntimeCheckBB->getModule()->getDataLayout(), "scev.check"); SCEVRuntimeCheck = Exp.expandCodeForPredicate(&Preds, RuntimeCheckBB->getTerminator()); IRBuilder Builder( RuntimeCheckBB->getContext(), InstSimplifyFolder(RuntimeCheckBB->getModule()->getDataLayout())); if (MemRuntimeCheck && SCEVRuntimeCheck) { Builder.SetInsertPoint(RuntimeCheckBB->getTerminator()); RuntimeCheck = Builder.CreateOr(MemRuntimeCheck, SCEVRuntimeCheck, "lver.safe"); } else RuntimeCheck = MemRuntimeCheck ? MemRuntimeCheck : SCEVRuntimeCheck; assert(RuntimeCheck && "called even though we don't need " "any runtime checks"); // Rename the block to make the IR more readable. RuntimeCheckBB->setName(VersionedLoop->getHeader()->getName() + ".lver.check"); // Create empty preheader for the loop (and after cloning for the // non-versioned loop). BasicBlock *PH = SplitBlock(RuntimeCheckBB, RuntimeCheckBB->getTerminator(), DT, LI, nullptr, VersionedLoop->getHeader()->getName() + ".ph"); // Clone the loop including the preheader. // // FIXME: This does not currently preserve SimplifyLoop because the exit // block is a join between the two loops. SmallVector NonVersionedLoopBlocks; NonVersionedLoop = cloneLoopWithPreheader(PH, RuntimeCheckBB, VersionedLoop, VMap, ".lver.orig", LI, DT, NonVersionedLoopBlocks); remapInstructionsInBlocks(NonVersionedLoopBlocks, VMap); // Insert the conditional branch based on the result of the memchecks. Instruction *OrigTerm = RuntimeCheckBB->getTerminator(); Builder.SetInsertPoint(OrigTerm); Builder.CreateCondBr(RuntimeCheck, NonVersionedLoop->getLoopPreheader(), VersionedLoop->getLoopPreheader()); OrigTerm->eraseFromParent(); // The loops merge in the original exit block. This is now dominated by the // memchecking block. DT->changeImmediateDominator(VersionedLoop->getExitBlock(), RuntimeCheckBB); // Adds the necessary PHI nodes for the versioned loops based on the // loop-defined values used outside of the loop. addPHINodes(DefsUsedOutside); formDedicatedExitBlocks(NonVersionedLoop, DT, LI, nullptr, true); formDedicatedExitBlocks(VersionedLoop, DT, LI, nullptr, true); assert(NonVersionedLoop->isLoopSimplifyForm() && VersionedLoop->isLoopSimplifyForm() && "The versioned loops should be in simplify form."); } void LoopVersioning::addPHINodes( const SmallVectorImpl &DefsUsedOutside) { BasicBlock *PHIBlock = VersionedLoop->getExitBlock(); assert(PHIBlock && "No single successor to loop exit block"); PHINode *PN; // First add a single-operand PHI for each DefsUsedOutside if one does not // exists yet. for (auto *Inst : DefsUsedOutside) { // See if we have a single-operand PHI with the value defined by the // original loop. for (auto I = PHIBlock->begin(); (PN = dyn_cast(I)); ++I) { - if (PN->getIncomingValue(0) == Inst) + if (PN->getIncomingValue(0) == Inst) { + SE->forgetValue(PN); break; + } } // If not create it. if (!PN) { PN = PHINode::Create(Inst->getType(), 2, Inst->getName() + ".lver", &PHIBlock->front()); SmallVector UsersToUpdate; for (User *U : Inst->users()) if (!VersionedLoop->contains(cast(U)->getParent())) UsersToUpdate.push_back(U); for (User *U : UsersToUpdate) U->replaceUsesOfWith(Inst, PN); PN->addIncoming(Inst, VersionedLoop->getExitingBlock()); } } // Then for each PHI add the operand for the edge from the cloned loop. for (auto I = PHIBlock->begin(); (PN = dyn_cast(I)); ++I) { assert(PN->getNumOperands() == 1 && "Exit block should only have on predecessor"); // If the definition was cloned used that otherwise use the same value. Value *ClonedValue = PN->getIncomingValue(0); auto Mapped = VMap.find(ClonedValue); if (Mapped != VMap.end()) ClonedValue = Mapped->second; PN->addIncoming(ClonedValue, NonVersionedLoop->getExitingBlock()); } } void LoopVersioning::prepareNoAliasMetadata() { // We need to turn the no-alias relation between pointer checking groups into // no-aliasing annotations between instructions. // // We accomplish this by mapping each pointer checking group (a set of // pointers memchecked together) to an alias scope and then also mapping each // group to the list of scopes it can't alias. const RuntimePointerChecking *RtPtrChecking = LAI.getRuntimePointerChecking(); LLVMContext &Context = VersionedLoop->getHeader()->getContext(); // First allocate an aliasing scope for each pointer checking group. // // While traversing through the checking groups in the loop, also create a // reverse map from pointers to the pointer checking group they were assigned // to. MDBuilder MDB(Context); MDNode *Domain = MDB.createAnonymousAliasScopeDomain("LVerDomain"); for (const auto &Group : RtPtrChecking->CheckingGroups) { GroupToScope[&Group] = MDB.createAnonymousAliasScope(Domain); for (unsigned PtrIdx : Group.Members) PtrToGroup[RtPtrChecking->getPointerInfo(PtrIdx).PointerValue] = &Group; } // Go through the checks and for each pointer group, collect the scopes for // each non-aliasing pointer group. DenseMap> GroupToNonAliasingScopes; for (const auto &Check : AliasChecks) GroupToNonAliasingScopes[Check.first].push_back(GroupToScope[Check.second]); // Finally, transform the above to actually map to scope list which is what // the metadata uses. for (auto Pair : GroupToNonAliasingScopes) GroupToNonAliasingScopeList[Pair.first] = MDNode::get(Context, Pair.second); } void LoopVersioning::annotateLoopWithNoAlias() { if (!AnnotateNoAlias) return; // First prepare the maps. prepareNoAliasMetadata(); // Add the scope and no-alias metadata to the instructions. for (Instruction *I : LAI.getDepChecker().getMemoryInstructions()) { annotateInstWithNoAlias(I); } } void LoopVersioning::annotateInstWithNoAlias(Instruction *VersionedInst, const Instruction *OrigInst) { if (!AnnotateNoAlias) return; LLVMContext &Context = VersionedLoop->getHeader()->getContext(); const Value *Ptr = isa(OrigInst) ? cast(OrigInst)->getPointerOperand() : cast(OrigInst)->getPointerOperand(); // Find the group for the pointer and then add the scope metadata. auto Group = PtrToGroup.find(Ptr); if (Group != PtrToGroup.end()) { VersionedInst->setMetadata( LLVMContext::MD_alias_scope, MDNode::concatenate( VersionedInst->getMetadata(LLVMContext::MD_alias_scope), MDNode::get(Context, GroupToScope[Group->second]))); // Add the no-alias metadata. auto NonAliasingScopeList = GroupToNonAliasingScopeList.find(Group->second); if (NonAliasingScopeList != GroupToNonAliasingScopeList.end()) VersionedInst->setMetadata( LLVMContext::MD_noalias, MDNode::concatenate( VersionedInst->getMetadata(LLVMContext::MD_noalias), NonAliasingScopeList->second)); } } namespace { bool runImpl(LoopInfo *LI, function_ref GetLAA, DominatorTree *DT, ScalarEvolution *SE) { // Build up a worklist of inner-loops to version. This is necessary as the // act of versioning a loop creates new loops and can invalidate iterators // across the loops. SmallVector Worklist; for (Loop *TopLevelLoop : *LI) for (Loop *L : depth_first(TopLevelLoop)) // We only handle inner-most loops. if (L->isInnermost()) Worklist.push_back(L); // Now walk the identified inner loops. bool Changed = false; for (Loop *L : Worklist) { if (!L->isLoopSimplifyForm() || !L->isRotatedForm() || !L->getExitingBlock()) continue; const LoopAccessInfo &LAI = GetLAA(*L); if (!LAI.hasConvergentOp() && (LAI.getNumRuntimePointerChecks() || !LAI.getPSE().getPredicate().isAlwaysTrue())) { LoopVersioning LVer(LAI, LAI.getRuntimePointerChecking()->getChecks(), L, LI, DT, SE); LVer.versionLoop(); LVer.annotateLoopWithNoAlias(); Changed = true; } } return Changed; } /// Also expose this is a pass. Currently this is only used for /// unit-testing. It adds all memchecks necessary to remove all may-aliasing /// array accesses from the loop. class LoopVersioningLegacyPass : public FunctionPass { public: LoopVersioningLegacyPass() : FunctionPass(ID) { initializeLoopVersioningLegacyPassPass(*PassRegistry::getPassRegistry()); } bool runOnFunction(Function &F) override { auto *LI = &getAnalysis().getLoopInfo(); auto GetLAA = [&](Loop &L) -> const LoopAccessInfo & { return getAnalysis().getInfo(&L); }; auto *DT = &getAnalysis().getDomTree(); auto *SE = &getAnalysis().getSE(); return runImpl(LI, GetLAA, DT, SE); } void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired(); AU.addPreserved(); AU.addRequired(); AU.addRequired(); AU.addPreserved(); AU.addRequired(); } static char ID; }; } #define LVER_OPTION "loop-versioning" #define DEBUG_TYPE LVER_OPTION char LoopVersioningLegacyPass::ID; static const char LVer_name[] = "Loop Versioning"; INITIALIZE_PASS_BEGIN(LoopVersioningLegacyPass, LVER_OPTION, LVer_name, false, false) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) INITIALIZE_PASS_END(LoopVersioningLegacyPass, LVER_OPTION, LVer_name, false, false) namespace llvm { FunctionPass *createLoopVersioningLegacyPass() { return new LoopVersioningLegacyPass(); } PreservedAnalyses LoopVersioningPass::run(Function &F, FunctionAnalysisManager &AM) { auto &SE = AM.getResult(F); auto &LI = AM.getResult(F); auto &TTI = AM.getResult(F); auto &DT = AM.getResult(F); auto &TLI = AM.getResult(F); auto &AA = AM.getResult(F); auto &AC = AM.getResult(F); auto &LAM = AM.getResult(F).getManager(); auto GetLAA = [&](Loop &L) -> const LoopAccessInfo & { LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI, nullptr, nullptr, nullptr}; return LAM.getResult(L, AR); }; if (runImpl(&LI, GetLAA, &DT, &SE)) return PreservedAnalyses::none(); return PreservedAnalyses::all(); } } // namespace llvm