diff --git a/contrib/llvm-project/llvm/include/llvm/Support/MathExtras.h b/contrib/llvm-project/llvm/include/llvm/Support/MathExtras.h index aa4f4d2ed42e..afb4fa262152 100644 --- a/contrib/llvm-project/llvm/include/llvm/Support/MathExtras.h +++ b/contrib/llvm-project/llvm/include/llvm/Support/MathExtras.h @@ -1,649 +1,657 @@ //===-- llvm/Support/MathExtras.h - Useful math functions -------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file contains some functions that are useful for math stuff. // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_MATHEXTRAS_H #define LLVM_SUPPORT_MATHEXTRAS_H #include "llvm/ADT/bit.h" #include "llvm/Support/Compiler.h" #include #include #include #include #include #include namespace llvm { /// Mathematical constants. namespace numbers { // TODO: Track C++20 std::numbers. // TODO: Favor using the hexadecimal FP constants (requires C++17). constexpr double e = 2.7182818284590452354, // (0x1.5bf0a8b145749P+1) https://oeis.org/A001113 egamma = .57721566490153286061, // (0x1.2788cfc6fb619P-1) https://oeis.org/A001620 ln2 = .69314718055994530942, // (0x1.62e42fefa39efP-1) https://oeis.org/A002162 ln10 = 2.3025850929940456840, // (0x1.24bb1bbb55516P+1) https://oeis.org/A002392 log2e = 1.4426950408889634074, // (0x1.71547652b82feP+0) log10e = .43429448190325182765, // (0x1.bcb7b1526e50eP-2) pi = 3.1415926535897932385, // (0x1.921fb54442d18P+1) https://oeis.org/A000796 inv_pi = .31830988618379067154, // (0x1.45f306bc9c883P-2) https://oeis.org/A049541 sqrtpi = 1.7724538509055160273, // (0x1.c5bf891b4ef6bP+0) https://oeis.org/A002161 inv_sqrtpi = .56418958354775628695, // (0x1.20dd750429b6dP-1) https://oeis.org/A087197 sqrt2 = 1.4142135623730950488, // (0x1.6a09e667f3bcdP+0) https://oeis.org/A00219 inv_sqrt2 = .70710678118654752440, // (0x1.6a09e667f3bcdP-1) sqrt3 = 1.7320508075688772935, // (0x1.bb67ae8584caaP+0) https://oeis.org/A002194 inv_sqrt3 = .57735026918962576451, // (0x1.279a74590331cP-1) phi = 1.6180339887498948482; // (0x1.9e3779b97f4a8P+0) https://oeis.org/A001622 constexpr float ef = 2.71828183F, // (0x1.5bf0a8P+1) https://oeis.org/A001113 egammaf = .577215665F, // (0x1.2788d0P-1) https://oeis.org/A001620 ln2f = .693147181F, // (0x1.62e430P-1) https://oeis.org/A002162 ln10f = 2.30258509F, // (0x1.26bb1cP+1) https://oeis.org/A002392 log2ef = 1.44269504F, // (0x1.715476P+0) log10ef = .434294482F, // (0x1.bcb7b2P-2) pif = 3.14159265F, // (0x1.921fb6P+1) https://oeis.org/A000796 inv_pif = .318309886F, // (0x1.45f306P-2) https://oeis.org/A049541 sqrtpif = 1.77245385F, // (0x1.c5bf8aP+0) https://oeis.org/A002161 inv_sqrtpif = .564189584F, // (0x1.20dd76P-1) https://oeis.org/A087197 sqrt2f = 1.41421356F, // (0x1.6a09e6P+0) https://oeis.org/A002193 inv_sqrt2f = .707106781F, // (0x1.6a09e6P-1) sqrt3f = 1.73205081F, // (0x1.bb67aeP+0) https://oeis.org/A002194 inv_sqrt3f = .577350269F, // (0x1.279a74P-1) phif = 1.61803399F; // (0x1.9e377aP+0) https://oeis.org/A001622 } // namespace numbers /// Create a bitmask with the N right-most bits set to 1, and all other /// bits set to 0. Only unsigned types are allowed. template T maskTrailingOnes(unsigned N) { static_assert(std::is_unsigned_v, "Invalid type!"); const unsigned Bits = CHAR_BIT * sizeof(T); assert(N <= Bits && "Invalid bit index"); return N == 0 ? 0 : (T(-1) >> (Bits - N)); } /// Create a bitmask with the N left-most bits set to 1, and all other /// bits set to 0. Only unsigned types are allowed. template T maskLeadingOnes(unsigned N) { return ~maskTrailingOnes(CHAR_BIT * sizeof(T) - N); } /// Create a bitmask with the N right-most bits set to 0, and all other /// bits set to 1. Only unsigned types are allowed. template T maskTrailingZeros(unsigned N) { return maskLeadingOnes(CHAR_BIT * sizeof(T) - N); } /// Create a bitmask with the N left-most bits set to 0, and all other /// bits set to 1. Only unsigned types are allowed. template T maskLeadingZeros(unsigned N) { return maskTrailingOnes(CHAR_BIT * sizeof(T) - N); } /// Macro compressed bit reversal table for 256 bits. /// /// http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable static const unsigned char BitReverseTable256[256] = { #define R2(n) n, n + 2 * 64, n + 1 * 64, n + 3 * 64 #define R4(n) R2(n), R2(n + 2 * 16), R2(n + 1 * 16), R2(n + 3 * 16) #define R6(n) R4(n), R4(n + 2 * 4), R4(n + 1 * 4), R4(n + 3 * 4) R6(0), R6(2), R6(1), R6(3) #undef R2 #undef R4 #undef R6 }; /// Reverse the bits in \p Val. template T reverseBits(T Val) { #if __has_builtin(__builtin_bitreverse8) if constexpr (std::is_same_v) return __builtin_bitreverse8(Val); #endif #if __has_builtin(__builtin_bitreverse16) if constexpr (std::is_same_v) return __builtin_bitreverse16(Val); #endif #if __has_builtin(__builtin_bitreverse32) if constexpr (std::is_same_v) return __builtin_bitreverse32(Val); #endif #if __has_builtin(__builtin_bitreverse64) if constexpr (std::is_same_v) return __builtin_bitreverse64(Val); #endif unsigned char in[sizeof(Val)]; unsigned char out[sizeof(Val)]; std::memcpy(in, &Val, sizeof(Val)); for (unsigned i = 0; i < sizeof(Val); ++i) out[(sizeof(Val) - i) - 1] = BitReverseTable256[in[i]]; std::memcpy(&Val, out, sizeof(Val)); return Val; } // NOTE: The following support functions use the _32/_64 extensions instead of // type overloading so that signed and unsigned integers can be used without // ambiguity. /// Return the high 32 bits of a 64 bit value. constexpr inline uint32_t Hi_32(uint64_t Value) { return static_cast(Value >> 32); } /// Return the low 32 bits of a 64 bit value. constexpr inline uint32_t Lo_32(uint64_t Value) { return static_cast(Value); } /// Make a 64-bit integer from a high / low pair of 32-bit integers. constexpr inline uint64_t Make_64(uint32_t High, uint32_t Low) { return ((uint64_t)High << 32) | (uint64_t)Low; } /// Checks if an integer fits into the given bit width. template constexpr inline bool isInt(int64_t x) { if constexpr (N == 8) return static_cast(x) == x; if constexpr (N == 16) return static_cast(x) == x; if constexpr (N == 32) return static_cast(x) == x; if constexpr (N < 64) return -(INT64_C(1) << (N - 1)) <= x && x < (INT64_C(1) << (N - 1)); (void)x; // MSVC v19.25 warns that x is unused. return true; } /// Checks if a signed integer is an N bit number shifted left by S. template constexpr inline bool isShiftedInt(int64_t x) { static_assert( N > 0, "isShiftedInt<0> doesn't make sense (refers to a 0-bit number."); static_assert(N + S <= 64, "isShiftedInt with N + S > 64 is too wide."); return isInt(x) && (x % (UINT64_C(1) << S) == 0); } /// Checks if an unsigned integer fits into the given bit width. template constexpr inline bool isUInt(uint64_t x) { static_assert(N > 0, "isUInt<0> doesn't make sense"); if constexpr (N == 8) return static_cast(x) == x; if constexpr (N == 16) return static_cast(x) == x; if constexpr (N == 32) return static_cast(x) == x; if constexpr (N < 64) return x < (UINT64_C(1) << (N)); (void)x; // MSVC v19.25 warns that x is unused. return true; } /// Checks if a unsigned integer is an N bit number shifted left by S. template constexpr inline bool isShiftedUInt(uint64_t x) { static_assert( N > 0, "isShiftedUInt<0> doesn't make sense (refers to a 0-bit number)"); static_assert(N + S <= 64, "isShiftedUInt with N + S > 64 is too wide."); // Per the two static_asserts above, S must be strictly less than 64. So // 1 << S is not undefined behavior. return isUInt(x) && (x % (UINT64_C(1) << S) == 0); } /// Gets the maximum value for a N-bit unsigned integer. inline uint64_t maxUIntN(uint64_t N) { assert(N > 0 && N <= 64 && "integer width out of range"); // uint64_t(1) << 64 is undefined behavior, so we can't do // (uint64_t(1) << N) - 1 // without checking first that N != 64. But this works and doesn't have a // branch. return UINT64_MAX >> (64 - N); } /// Gets the minimum value for a N-bit signed integer. inline int64_t minIntN(int64_t N) { assert(N > 0 && N <= 64 && "integer width out of range"); return UINT64_C(1) + ~(UINT64_C(1) << (N - 1)); } /// Gets the maximum value for a N-bit signed integer. inline int64_t maxIntN(int64_t N) { assert(N > 0 && N <= 64 && "integer width out of range"); // This relies on two's complement wraparound when N == 64, so we convert to // int64_t only at the very end to avoid UB. return (UINT64_C(1) << (N - 1)) - 1; } /// Checks if an unsigned integer fits into the given (dynamic) bit width. inline bool isUIntN(unsigned N, uint64_t x) { return N >= 64 || x <= maxUIntN(N); } /// Checks if an signed integer fits into the given (dynamic) bit width. inline bool isIntN(unsigned N, int64_t x) { return N >= 64 || (minIntN(N) <= x && x <= maxIntN(N)); } /// Return true if the argument is a non-empty sequence of ones starting at the /// least significant bit with the remainder zero (32 bit version). /// Ex. isMask_32(0x0000FFFFU) == true. constexpr inline bool isMask_32(uint32_t Value) { return Value && ((Value + 1) & Value) == 0; } /// Return true if the argument is a non-empty sequence of ones starting at the /// least significant bit with the remainder zero (64 bit version). constexpr inline bool isMask_64(uint64_t Value) { return Value && ((Value + 1) & Value) == 0; } /// Return true if the argument contains a non-empty sequence of ones with the /// remainder zero (32 bit version.) Ex. isShiftedMask_32(0x0000FF00U) == true. constexpr inline bool isShiftedMask_32(uint32_t Value) { return Value && isMask_32((Value - 1) | Value); } /// Return true if the argument contains a non-empty sequence of ones with the /// remainder zero (64 bit version.) constexpr inline bool isShiftedMask_64(uint64_t Value) { return Value && isMask_64((Value - 1) | Value); } /// Return true if the argument is a power of two > 0. /// Ex. isPowerOf2_32(0x00100000U) == true (32 bit edition.) constexpr inline bool isPowerOf2_32(uint32_t Value) { return llvm::has_single_bit(Value); } /// Return true if the argument is a power of two > 0 (64 bit edition.) constexpr inline bool isPowerOf2_64(uint64_t Value) { return llvm::has_single_bit(Value); } /// Return true if the argument contains a non-empty sequence of ones with the /// remainder zero (32 bit version.) Ex. isShiftedMask_32(0x0000FF00U) == true. /// If true, \p MaskIdx will specify the index of the lowest set bit and \p /// MaskLen is updated to specify the length of the mask, else neither are /// updated. inline bool isShiftedMask_32(uint32_t Value, unsigned &MaskIdx, unsigned &MaskLen) { if (!isShiftedMask_32(Value)) return false; MaskIdx = llvm::countr_zero(Value); MaskLen = llvm::popcount(Value); return true; } /// Return true if the argument contains a non-empty sequence of ones with the /// remainder zero (64 bit version.) If true, \p MaskIdx will specify the index /// of the lowest set bit and \p MaskLen is updated to specify the length of the /// mask, else neither are updated. inline bool isShiftedMask_64(uint64_t Value, unsigned &MaskIdx, unsigned &MaskLen) { if (!isShiftedMask_64(Value)) return false; MaskIdx = llvm::countr_zero(Value); MaskLen = llvm::popcount(Value); return true; } /// Compile time Log2. /// Valid only for positive powers of two. template constexpr inline size_t CTLog2() { static_assert(kValue > 0 && llvm::isPowerOf2_64(kValue), "Value is not a valid power of 2"); return 1 + CTLog2(); } template <> constexpr inline size_t CTLog2<1>() { return 0; } /// Return the floor log base 2 of the specified value, -1 if the value is zero. /// (32 bit edition.) /// Ex. Log2_32(32) == 5, Log2_32(1) == 0, Log2_32(0) == -1, Log2_32(6) == 2 inline unsigned Log2_32(uint32_t Value) { return 31 - llvm::countl_zero(Value); } /// Return the floor log base 2 of the specified value, -1 if the value is zero. /// (64 bit edition.) inline unsigned Log2_64(uint64_t Value) { return 63 - llvm::countl_zero(Value); } /// Return the ceil log base 2 of the specified value, 32 if the value is zero. /// (32 bit edition). /// Ex. Log2_32_Ceil(32) == 5, Log2_32_Ceil(1) == 0, Log2_32_Ceil(6) == 3 inline unsigned Log2_32_Ceil(uint32_t Value) { return 32 - llvm::countl_zero(Value - 1); } /// Return the ceil log base 2 of the specified value, 64 if the value is zero. /// (64 bit edition.) inline unsigned Log2_64_Ceil(uint64_t Value) { return 64 - llvm::countl_zero(Value - 1); } /// A and B are either alignments or offsets. Return the minimum alignment that /// may be assumed after adding the two together. constexpr inline uint64_t MinAlign(uint64_t A, uint64_t B) { // The largest power of 2 that divides both A and B. // // Replace "-Value" by "1+~Value" in the following commented code to avoid // MSVC warning C4146 // return (A | B) & -(A | B); return (A | B) & (1 + ~(A | B)); } /// Returns the next power of two (in 64-bits) that is strictly greater than A. /// Returns zero on overflow. constexpr inline uint64_t NextPowerOf2(uint64_t A) { A |= (A >> 1); A |= (A >> 2); A |= (A >> 4); A |= (A >> 8); A |= (A >> 16); A |= (A >> 32); return A + 1; } /// Returns the power of two which is greater than or equal to the given value. /// Essentially, it is a ceil operation across the domain of powers of two. inline uint64_t PowerOf2Ceil(uint64_t A) { if (!A || A > UINT64_MAX / 2) return 0; return UINT64_C(1) << Log2_64_Ceil(A); } /// Returns the next integer (mod 2**64) that is greater than or equal to /// \p Value and is a multiple of \p Align. \p Align must be non-zero. /// /// Examples: /// \code /// alignTo(5, 8) = 8 /// alignTo(17, 8) = 24 /// alignTo(~0LL, 8) = 0 /// alignTo(321, 255) = 510 /// \endcode inline uint64_t alignTo(uint64_t Value, uint64_t Align) { assert(Align != 0u && "Align can't be 0."); return (Value + Align - 1) / Align * Align; } inline uint64_t alignToPowerOf2(uint64_t Value, uint64_t Align) { assert(Align != 0 && (Align & (Align - 1)) == 0 && "Align must be a power of 2"); // Replace unary minus to avoid compilation error on Windows: // "unary minus operator applied to unsigned type, result still unsigned" uint64_t negAlign = (~Align) + 1; return (Value + Align - 1) & negAlign; } /// If non-zero \p Skew is specified, the return value will be a minimal integer /// that is greater than or equal to \p Size and equal to \p A * N + \p Skew for /// some integer N. If \p Skew is larger than \p A, its value is adjusted to '\p /// Skew mod \p A'. \p Align must be non-zero. /// /// Examples: /// \code /// alignTo(5, 8, 7) = 7 /// alignTo(17, 8, 1) = 17 /// alignTo(~0LL, 8, 3) = 3 /// alignTo(321, 255, 42) = 552 /// \endcode inline uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew) { assert(Align != 0u && "Align can't be 0."); Skew %= Align; return alignTo(Value - Skew, Align) + Skew; } /// Returns the next integer (mod 2**64) that is greater than or equal to /// \p Value and is a multiple of \c Align. \c Align must be non-zero. template constexpr inline uint64_t alignTo(uint64_t Value) { static_assert(Align != 0u, "Align must be non-zero"); return (Value + Align - 1) / Align * Align; } /// Returns the integer ceil(Numerator / Denominator). inline uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator) { return alignTo(Numerator, Denominator) / Denominator; } /// Returns the integer nearest(Numerator / Denominator). inline uint64_t divideNearest(uint64_t Numerator, uint64_t Denominator) { return (Numerator + (Denominator / 2)) / Denominator; } /// Returns the largest uint64_t less than or equal to \p Value and is /// \p Skew mod \p Align. \p Align must be non-zero inline uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew = 0) { assert(Align != 0u && "Align can't be 0."); Skew %= Align; return (Value - Skew) / Align * Align + Skew; } /// Sign-extend the number in the bottom B bits of X to a 32-bit integer. /// Requires 0 < B <= 32. template constexpr inline int32_t SignExtend32(uint32_t X) { static_assert(B > 0, "Bit width can't be 0."); static_assert(B <= 32, "Bit width out of range."); return int32_t(X << (32 - B)) >> (32 - B); } /// Sign-extend the number in the bottom B bits of X to a 32-bit integer. /// Requires 0 < B <= 32. inline int32_t SignExtend32(uint32_t X, unsigned B) { assert(B > 0 && "Bit width can't be 0."); assert(B <= 32 && "Bit width out of range."); return int32_t(X << (32 - B)) >> (32 - B); } /// Sign-extend the number in the bottom B bits of X to a 64-bit integer. /// Requires 0 < B <= 64. template constexpr inline int64_t SignExtend64(uint64_t x) { static_assert(B > 0, "Bit width can't be 0."); static_assert(B <= 64, "Bit width out of range."); return int64_t(x << (64 - B)) >> (64 - B); } /// Sign-extend the number in the bottom B bits of X to a 64-bit integer. /// Requires 0 < B <= 64. inline int64_t SignExtend64(uint64_t X, unsigned B) { assert(B > 0 && "Bit width can't be 0."); assert(B <= 64 && "Bit width out of range."); return int64_t(X << (64 - B)) >> (64 - B); } /// Subtract two unsigned integers, X and Y, of type T and return the absolute /// value of the result. template std::enable_if_t, T> AbsoluteDifference(T X, T Y) { return X > Y ? (X - Y) : (Y - X); } /// Add two unsigned integers, X and Y, of type T. Clamp the result to the /// maximum representable value of T on overflow. ResultOverflowed indicates if /// the result is larger than the maximum representable value of type T. template std::enable_if_t, T> SaturatingAdd(T X, T Y, bool *ResultOverflowed = nullptr) { bool Dummy; bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy; // Hacker's Delight, p. 29 T Z = X + Y; Overflowed = (Z < X || Z < Y); if (Overflowed) return std::numeric_limits::max(); else return Z; } /// Add multiple unsigned integers of type T. Clamp the result to the /// maximum representable value of T on overflow. template std::enable_if_t, T> SaturatingAdd(T X, T Y, T Z, Ts... Args) { bool Overflowed = false; T XY = SaturatingAdd(X, Y, &Overflowed); if (Overflowed) return SaturatingAdd(std::numeric_limits::max(), T(1), Args...); return SaturatingAdd(XY, Z, Args...); } /// Multiply two unsigned integers, X and Y, of type T. Clamp the result to the /// maximum representable value of T on overflow. ResultOverflowed indicates if /// the result is larger than the maximum representable value of type T. template std::enable_if_t, T> SaturatingMultiply(T X, T Y, bool *ResultOverflowed = nullptr) { bool Dummy; bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy; // Hacker's Delight, p. 30 has a different algorithm, but we don't use that // because it fails for uint16_t (where multiplication can have undefined // behavior due to promotion to int), and requires a division in addition // to the multiplication. Overflowed = false; // Log2(Z) would be either Log2Z or Log2Z + 1. // Special case: if X or Y is 0, Log2_64 gives -1, and Log2Z // will necessarily be less than Log2Max as desired. int Log2Z = Log2_64(X) + Log2_64(Y); const T Max = std::numeric_limits::max(); int Log2Max = Log2_64(Max); if (Log2Z < Log2Max) { return X * Y; } if (Log2Z > Log2Max) { Overflowed = true; return Max; } // We're going to use the top bit, and maybe overflow one // bit past it. Multiply all but the bottom bit then add // that on at the end. T Z = (X >> 1) * Y; if (Z & ~(Max >> 1)) { Overflowed = true; return Max; } Z <<= 1; if (X & 1) return SaturatingAdd(Z, Y, ResultOverflowed); return Z; } /// Multiply two unsigned integers, X and Y, and add the unsigned integer, A to /// the product. Clamp the result to the maximum representable value of T on /// overflow. ResultOverflowed indicates if the result is larger than the /// maximum representable value of type T. template std::enable_if_t, T> SaturatingMultiplyAdd(T X, T Y, T A, bool *ResultOverflowed = nullptr) { bool Dummy; bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy; T Product = SaturatingMultiply(X, Y, &Overflowed); if (Overflowed) return Product; return SaturatingAdd(A, Product, &Overflowed); } /// Use this rather than HUGE_VALF; the latter causes warnings on MSVC. extern const float huge_valf; /// Add two signed integers, computing the two's complement truncated result, /// returning true if overflow occurred. template std::enable_if_t, T> AddOverflow(T X, T Y, T &Result) { #if __has_builtin(__builtin_add_overflow) return __builtin_add_overflow(X, Y, &Result); #else // Perform the unsigned addition. using U = std::make_unsigned_t; const U UX = static_cast(X); const U UY = static_cast(Y); const U UResult = UX + UY; // Convert to signed. Result = static_cast(UResult); // Adding two positive numbers should result in a positive number. if (X > 0 && Y > 0) return Result <= 0; // Adding two negatives should result in a negative number. if (X < 0 && Y < 0) return Result >= 0; return false; #endif } /// Subtract two signed integers, computing the two's complement truncated /// result, returning true if an overflow ocurred. template std::enable_if_t, T> SubOverflow(T X, T Y, T &Result) { #if __has_builtin(__builtin_sub_overflow) return __builtin_sub_overflow(X, Y, &Result); #else // Perform the unsigned addition. using U = std::make_unsigned_t; const U UX = static_cast(X); const U UY = static_cast(Y); const U UResult = UX - UY; // Convert to signed. Result = static_cast(UResult); // Subtracting a positive number from a negative results in a negative number. if (X <= 0 && Y > 0) return Result >= 0; // Subtracting a negative number from a positive results in a positive number. if (X >= 0 && Y < 0) return Result <= 0; return false; #endif } /// Multiply two signed integers, computing the two's complement truncated /// result, returning true if an overflow ocurred. template std::enable_if_t, T> MulOverflow(T X, T Y, T &Result) { // Perform the unsigned multiplication on absolute values. using U = std::make_unsigned_t; const U UX = X < 0 ? (0 - static_cast(X)) : static_cast(X); const U UY = Y < 0 ? (0 - static_cast(Y)) : static_cast(Y); const U UResult = UX * UY; // Convert to signed. const bool IsNegative = (X < 0) ^ (Y < 0); Result = IsNegative ? (0 - UResult) : UResult; // If any of the args was 0, result is 0 and no overflow occurs. if (UX == 0 || UY == 0) return false; // UX and UY are in [1, 2^n], where n is the number of digits. // Check how the max allowed absolute value (2^n for negative, 2^(n-1) for // positive) divided by an argument compares to the other. if (IsNegative) return UX > (static_cast(std::numeric_limits::max()) + U(1)) / UY; else return UX > (static_cast(std::numeric_limits::max())) / UY; } +/// Type to force float point values onto the stack, so that x86 doesn't add +/// hidden precision, avoiding rounding differences on various platforms. +#if defined(__i386__) || defined(_M_IX86) +using stack_float_t = volatile float; +#else +using stack_float_t = float; +#endif + } // End llvm namespace #endif diff --git a/contrib/llvm-project/llvm/lib/CodeGen/CalcSpillWeights.cpp b/contrib/llvm-project/llvm/lib/CodeGen/CalcSpillWeights.cpp index f3cb7fa5af61..fa7ef669ec11 100644 --- a/contrib/llvm-project/llvm/lib/CodeGen/CalcSpillWeights.cpp +++ b/contrib/llvm-project/llvm/lib/CodeGen/CalcSpillWeights.cpp @@ -1,343 +1,344 @@ //===- CalcSpillWeights.cpp -----------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "llvm/CodeGen/CalcSpillWeights.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallSet.h" #include "llvm/CodeGen/LiveInterval.h" #include "llvm/CodeGen/LiveIntervals.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineInstr.h" #include "llvm/CodeGen/MachineLoopInfo.h" #include "llvm/CodeGen/MachineOperand.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/StackMaps.h" #include "llvm/CodeGen/TargetInstrInfo.h" #include "llvm/CodeGen/TargetRegisterInfo.h" #include "llvm/CodeGen/TargetSubtargetInfo.h" #include "llvm/CodeGen/VirtRegMap.h" #include "llvm/Support/Debug.h" +#include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" #include #include using namespace llvm; #define DEBUG_TYPE "calcspillweights" void VirtRegAuxInfo::calculateSpillWeightsAndHints() { LLVM_DEBUG(dbgs() << "********** Compute Spill Weights **********\n" << "********** Function: " << MF.getName() << '\n'); MachineRegisterInfo &MRI = MF.getRegInfo(); for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) { Register Reg = Register::index2VirtReg(I); if (MRI.reg_nodbg_empty(Reg)) continue; calculateSpillWeightAndHint(LIS.getInterval(Reg)); } } // Return the preferred allocation register for reg, given a COPY instruction. Register VirtRegAuxInfo::copyHint(const MachineInstr *MI, unsigned Reg, const TargetRegisterInfo &TRI, const MachineRegisterInfo &MRI) { unsigned Sub, HSub; Register HReg; if (MI->getOperand(0).getReg() == Reg) { Sub = MI->getOperand(0).getSubReg(); HReg = MI->getOperand(1).getReg(); HSub = MI->getOperand(1).getSubReg(); } else { Sub = MI->getOperand(1).getSubReg(); HReg = MI->getOperand(0).getReg(); HSub = MI->getOperand(0).getSubReg(); } if (!HReg) return 0; if (HReg.isVirtual()) return Sub == HSub ? HReg : Register(); const TargetRegisterClass *RC = MRI.getRegClass(Reg); MCRegister CopiedPReg = HSub ? TRI.getSubReg(HReg, HSub) : HReg.asMCReg(); if (RC->contains(CopiedPReg)) return CopiedPReg; // Check if reg:sub matches so that a super register could be hinted. if (Sub) return TRI.getMatchingSuperReg(CopiedPReg, Sub, RC); return 0; } // Check if all values in LI are rematerializable bool VirtRegAuxInfo::isRematerializable(const LiveInterval &LI, const LiveIntervals &LIS, const VirtRegMap &VRM, const TargetInstrInfo &TII) { Register Reg = LI.reg(); Register Original = VRM.getOriginal(Reg); for (LiveInterval::const_vni_iterator I = LI.vni_begin(), E = LI.vni_end(); I != E; ++I) { const VNInfo *VNI = *I; if (VNI->isUnused()) continue; if (VNI->isPHIDef()) return false; MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def); assert(MI && "Dead valno in interval"); // Trace copies introduced by live range splitting. The inline // spiller can rematerialize through these copies, so the spill // weight must reflect this. while (TII.isFullCopyInstr(*MI)) { // The copy destination must match the interval register. if (MI->getOperand(0).getReg() != Reg) return false; // Get the source register. Reg = MI->getOperand(1).getReg(); // If the original (pre-splitting) registers match this // copy came from a split. if (!Reg.isVirtual() || VRM.getOriginal(Reg) != Original) return false; // Follow the copy live-in value. const LiveInterval &SrcLI = LIS.getInterval(Reg); LiveQueryResult SrcQ = SrcLI.Query(VNI->def); VNI = SrcQ.valueIn(); assert(VNI && "Copy from non-existing value"); if (VNI->isPHIDef()) return false; MI = LIS.getInstructionFromIndex(VNI->def); assert(MI && "Dead valno in interval"); } if (!TII.isTriviallyReMaterializable(*MI)) return false; } return true; } bool VirtRegAuxInfo::isLiveAtStatepointVarArg(LiveInterval &LI) { return any_of(VRM.getRegInfo().reg_operands(LI.reg()), [](MachineOperand &MO) { MachineInstr *MI = MO.getParent(); if (MI->getOpcode() != TargetOpcode::STATEPOINT) return false; return StatepointOpers(MI).getVarIdx() <= MO.getOperandNo(); }); } void VirtRegAuxInfo::calculateSpillWeightAndHint(LiveInterval &LI) { float Weight = weightCalcHelper(LI); // Check if unspillable. if (Weight < 0) return; LI.setWeight(Weight); } static bool canMemFoldInlineAsm(LiveInterval &LI, const MachineRegisterInfo &MRI) { for (const MachineOperand &MO : MRI.reg_operands(LI.reg())) { const MachineInstr *MI = MO.getParent(); if (MI->isInlineAsm() && MI->mayFoldInlineAsmRegOp(MI->getOperandNo(&MO))) return true; } return false; } float VirtRegAuxInfo::weightCalcHelper(LiveInterval &LI, SlotIndex *Start, SlotIndex *End) { MachineRegisterInfo &MRI = MF.getRegInfo(); const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); MachineBasicBlock *MBB = nullptr; float TotalWeight = 0; unsigned NumInstr = 0; // Number of instructions using LI SmallPtrSet Visited; std::pair TargetHint = MRI.getRegAllocationHint(LI.reg()); if (LI.isSpillable()) { Register Reg = LI.reg(); Register Original = VRM.getOriginal(Reg); const LiveInterval &OrigInt = LIS.getInterval(Original); // li comes from a split of OrigInt. If OrigInt was marked // as not spillable, make sure the new interval is marked // as not spillable as well. if (!OrigInt.isSpillable()) LI.markNotSpillable(); } // Don't recompute spill weight for an unspillable register. bool IsSpillable = LI.isSpillable(); bool IsLocalSplitArtifact = Start && End; // Do not update future local split artifacts. bool ShouldUpdateLI = !IsLocalSplitArtifact; if (IsLocalSplitArtifact) { MachineBasicBlock *LocalMBB = LIS.getMBBFromIndex(*End); assert(LocalMBB == LIS.getMBBFromIndex(*Start) && "start and end are expected to be in the same basic block"); // Local split artifact will have 2 additional copy instructions and they // will be in the same BB. // localLI = COPY other // ... // other = COPY localLI TotalWeight += LiveIntervals::getSpillWeight(true, false, &MBFI, LocalMBB); TotalWeight += LiveIntervals::getSpillWeight(false, true, &MBFI, LocalMBB); NumInstr += 2; } // CopyHint is a sortable hint derived from a COPY instruction. struct CopyHint { const Register Reg; const float Weight; CopyHint(Register R, float W) : Reg(R), Weight(W) {} bool operator<(const CopyHint &Rhs) const { // Always prefer any physreg hint. if (Reg.isPhysical() != Rhs.Reg.isPhysical()) return Reg.isPhysical(); if (Weight != Rhs.Weight) return (Weight > Rhs.Weight); return Reg.id() < Rhs.Reg.id(); // Tie-breaker. } }; bool IsExiting = false; std::set CopyHints; DenseMap Hint; for (MachineRegisterInfo::reg_instr_nodbg_iterator I = MRI.reg_instr_nodbg_begin(LI.reg()), E = MRI.reg_instr_nodbg_end(); I != E;) { MachineInstr *MI = &*(I++); // For local split artifacts, we are interested only in instructions between // the expected start and end of the range. SlotIndex SI = LIS.getInstructionIndex(*MI); if (IsLocalSplitArtifact && ((SI < *Start) || (SI > *End))) continue; NumInstr++; bool identityCopy = false; auto DestSrc = TII.isCopyInstr(*MI); if (DestSrc) { const MachineOperand *DestRegOp = DestSrc->Destination; const MachineOperand *SrcRegOp = DestSrc->Source; identityCopy = DestRegOp->getReg() == SrcRegOp->getReg() && DestRegOp->getSubReg() == SrcRegOp->getSubReg(); } if (identityCopy || MI->isImplicitDef()) continue; if (!Visited.insert(MI).second) continue; // For terminators that produce values, ask the backend if the register is // not spillable. if (TII.isUnspillableTerminator(MI) && MI->definesRegister(LI.reg())) { LI.markNotSpillable(); return -1.0f; } - float Weight = 1.0f; + // Force Weight onto the stack so that x86 doesn't add hidden precision, + // similar to HWeight below. + stack_float_t Weight = 1.0f; if (IsSpillable) { // Get loop info for mi. if (MI->getParent() != MBB) { MBB = MI->getParent(); const MachineLoop *Loop = Loops.getLoopFor(MBB); IsExiting = Loop ? Loop->isLoopExiting(MBB) : false; } // Calculate instr weight. bool Reads, Writes; std::tie(Reads, Writes) = MI->readsWritesVirtualRegister(LI.reg()); Weight = LiveIntervals::getSpillWeight(Writes, Reads, &MBFI, *MI); // Give extra weight to what looks like a loop induction variable update. if (Writes && IsExiting && LIS.isLiveOutOfMBB(LI, MBB)) Weight *= 3; TotalWeight += Weight; } // Get allocation hints from copies. if (!TII.isCopyInstr(*MI)) continue; Register HintReg = copyHint(MI, LI.reg(), TRI, MRI); if (!HintReg) continue; - // Force hweight onto the stack so that x86 doesn't add hidden precision, + // Force HWeight onto the stack so that x86 doesn't add hidden precision, // making the comparison incorrectly pass (i.e., 1 > 1 == true??). - // - // FIXME: we probably shouldn't use floats at all. - volatile float HWeight = Hint[HintReg] += Weight; + stack_float_t HWeight = Hint[HintReg] += Weight; if (HintReg.isVirtual() || MRI.isAllocatable(HintReg)) CopyHints.insert(CopyHint(HintReg, HWeight)); } // Pass all the sorted copy hints to mri. if (ShouldUpdateLI && CopyHints.size()) { // Remove a generic hint if previously added by target. if (TargetHint.first == 0 && TargetHint.second) MRI.clearSimpleHint(LI.reg()); SmallSet HintedRegs; for (const auto &Hint : CopyHints) { if (!HintedRegs.insert(Hint.Reg).second || (TargetHint.first != 0 && Hint.Reg == TargetHint.second)) // Don't add the same reg twice or the target-type hint again. continue; MRI.addRegAllocationHint(LI.reg(), Hint.Reg); } // Weakly boost the spill weight of hinted registers. TotalWeight *= 1.01F; } // If the live interval was already unspillable, leave it that way. if (!IsSpillable) return -1.0; // Mark li as unspillable if all live ranges are tiny and the interval // is not live at any reg mask. If the interval is live at a reg mask // spilling may be required. If li is live as use in statepoint instruction // spilling may be required due to if we mark interval with use in statepoint // as not spillable we are risky to end up with no register to allocate. // At the same time STATEPOINT instruction is perfectly fine to have this // operand on stack, so spilling such interval and folding its load from stack // into instruction itself makes perfect sense. if (ShouldUpdateLI && LI.isZeroLength(LIS.getSlotIndexes()) && !LI.isLiveAtIndexes(LIS.getRegMaskSlots()) && !isLiveAtStatepointVarArg(LI) && !canMemFoldInlineAsm(LI, MRI)) { LI.markNotSpillable(); return -1.0; } // If all of the definitions of the interval are re-materializable, // it is a preferred candidate for spilling. // FIXME: this gets much more complicated once we support non-trivial // re-materialization. if (isRematerializable(LI, LIS, VRM, *MF.getSubtarget().getInstrInfo())) TotalWeight *= 0.5F; if (IsLocalSplitArtifact) return normalize(TotalWeight, Start->distance(*End), NumInstr); return normalize(TotalWeight, LI.getSize(), NumInstr); }