diff --git a/lib/msun/ld128/s_expl.c b/lib/msun/ld128/s_expl.c index 5fc43802b950..0274a8f302db 100644 --- a/lib/msun/ld128/s_expl.c +++ b/lib/msun/ld128/s_expl.c @@ -1,326 +1,322 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2009-2013 Steven G. Kargl * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Optimized by Bruce D. Evans. */ #include __FBSDID("$FreeBSD$"); /* * ld128 version of s_expl.c. See ../ld80/s_expl.c for most comments. */ #include #include "fpmath.h" #include "math.h" #include "math_private.h" #include "k_expl.h" /* XXX Prevent compilers from erroneously constant folding these: */ static const volatile long double huge = 0x1p10000L, tiny = 0x1p-10000L; static const long double twom10000 = 0x1p-10000L; static const long double /* log(2**16384 - 0.5) rounded towards zero: */ /* log(2**16384 - 0.5 + 1) rounded towards zero for expm1l() is the same: */ o_threshold = 11356.523406294143949491931077970763428L, /* log(2**(-16381-64-1)) rounded towards zero: */ u_threshold = -11433.462743336297878837243843452621503L; long double expl(long double x) { union IEEEl2bits u; long double hi, lo, t, twopk; int k; uint16_t hx, ix; - DOPRINT_START(&x); - /* Filter out exceptional cases. */ u.e = x; hx = u.xbits.expsign; ix = hx & 0x7fff; if (ix >= BIAS + 13) { /* |x| >= 8192 or x is NaN */ if (ix == BIAS + LDBL_MAX_EXP) { if (hx & 0x8000) /* x is -Inf or -NaN */ - RETURNP(-1 / x); - RETURNP(x + x); /* x is +Inf or +NaN */ + RETURNF(-1 / x); + RETURNF(x + x); /* x is +Inf or +NaN */ } if (x > o_threshold) - RETURNP(huge * huge); + RETURNF(huge * huge); if (x < u_threshold) - RETURNP(tiny * tiny); + RETURNF(tiny * tiny); } else if (ix < BIAS - 114) { /* |x| < 0x1p-114 */ - RETURN2P(1, x); /* 1 with inexact iff x != 0 */ + RETURNF(1 + x); /* 1 with inexact iff x != 0 */ } ENTERI(); twopk = 1; __k_expl(x, &hi, &lo, &k); t = SUM2P(hi, lo); /* Scale by 2**k. */ /* * XXX sparc64 multiplication was so slow that scalbnl() is faster, * but performance on aarch64 and riscv hasn't yet been quantified. */ if (k >= LDBL_MIN_EXP) { if (k == LDBL_MAX_EXP) RETURNI(t * 2 * 0x1p16383L); SET_LDBL_EXPSIGN(twopk, BIAS + k); RETURNI(t * twopk); } else { SET_LDBL_EXPSIGN(twopk, BIAS + k + 10000); RETURNI(t * twopk * twom10000); } } /* * Our T1 and T2 are chosen to be approximately the points where method * A and method B have the same accuracy. Tang's T1 and T2 are the * points where method A's accuracy changes by a full bit. For Tang, * this drop in accuracy makes method A immediately less accurate than * method B, but our larger INTERVALS makes method A 2 bits more * accurate so it remains the most accurate method significantly * closer to the origin despite losing the full bit in our extended * range for it. * * Split the interval [T1, T2] into two intervals [T1, T3] and [T3, T2]. * Setting T3 to 0 would require the |x| < 0x1p-113 condition to appear * in both subintervals, so set T3 = 2**-5, which places the condition * into the [T1, T3] interval. * * XXX we now do this more to (partially) balance the number of terms * in the C and D polys than to avoid checking the condition in both * intervals. * * XXX these micro-optimizations are excessive. */ static const double T1 = -0.1659, /* ~-30.625/128 * log(2) */ T2 = 0.1659, /* ~30.625/128 * log(2) */ T3 = 0.03125; /* * Domain [-0.1659, 0.03125], range ~[2.9134e-44, 1.8404e-37]: * |(exp(x)-1-x-x**2/2)/x - p(x)| < 2**-122.03 * * XXX none of the long double C or D coeffs except C10 is correctly printed. * If you re-print their values in %.35Le format, the result is always * different. For example, the last 2 digits in C3 should be 59, not 67. * 67 is apparently from rounding an extra-precision value to 36 decimal * places. */ static const long double C3 = 1.66666666666666666666666666666666667e-1L, C4 = 4.16666666666666666666666666666666645e-2L, C5 = 8.33333333333333333333333333333371638e-3L, C6 = 1.38888888888888888888888888891188658e-3L, C7 = 1.98412698412698412698412697235950394e-4L, C8 = 2.48015873015873015873015112487849040e-5L, C9 = 2.75573192239858906525606685484412005e-6L, C10 = 2.75573192239858906612966093057020362e-7L, C11 = 2.50521083854417203619031960151253944e-8L, C12 = 2.08767569878679576457272282566520649e-9L, C13 = 1.60590438367252471783548748824255707e-10L; /* * XXX this has 1 more coeff than needed. * XXX can start the double coeffs but not the double mults at C10. * With my coeffs (C10-C17 double; s = best_s): * Domain [-0.1659, 0.03125], range ~[-1.1976e-37, 1.1976e-37]: * |(exp(x)-1-x-x**2/2)/x - p(x)| ~< 2**-122.65 */ static const double C14 = 1.1470745580491932e-11, /* 0x1.93974a81dae30p-37 */ C15 = 7.6471620181090468e-13, /* 0x1.ae7f3820adab1p-41 */ C16 = 4.7793721460260450e-14, /* 0x1.ae7cd18a18eacp-45 */ C17 = 2.8074757356658877e-15, /* 0x1.949992a1937d9p-49 */ C18 = 1.4760610323699476e-16; /* 0x1.545b43aabfbcdp-53 */ /* * Domain [0.03125, 0.1659], range ~[-2.7676e-37, -1.0367e-38]: * |(exp(x)-1-x-x**2/2)/x - p(x)| < 2**-121.44 */ static const long double D3 = 1.66666666666666666666666666666682245e-1L, D4 = 4.16666666666666666666666666634228324e-2L, D5 = 8.33333333333333333333333364022244481e-3L, D6 = 1.38888888888888888888887138722762072e-3L, D7 = 1.98412698412698412699085805424661471e-4L, D8 = 2.48015873015873015687993712101479612e-5L, D9 = 2.75573192239858944101036288338208042e-6L, D10 = 2.75573192239853161148064676533754048e-7L, D11 = 2.50521083855084570046480450935267433e-8L, D12 = 2.08767569819738524488686318024854942e-9L, D13 = 1.60590442297008495301927448122499313e-10L; /* * XXX this has 1 more coeff than needed. * XXX can start the double coeffs but not the double mults at D11. * With my coeffs (D11-D16 double): * Domain [0.03125, 0.1659], range ~[-1.1980e-37, 1.1980e-37]: * |(exp(x)-1-x-x**2/2)/x - p(x)| ~< 2**-122.65 */ static const double D14 = 1.1470726176204336e-11, /* 0x1.93971dc395d9ep-37 */ D15 = 7.6478532249581686e-13, /* 0x1.ae892e3D16fcep-41 */ D16 = 4.7628892832607741e-14, /* 0x1.ad00Dfe41feccp-45 */ D17 = 3.0524857220358650e-15; /* 0x1.D7e8d886Df921p-49 */ long double expm1l(long double x) { union IEEEl2bits u, v; long double hx2_hi, hx2_lo, q, r, r1, t, twomk, twopk, x_hi; long double x_lo, x2; double dr, dx, fn, r2; int k, n, n2; uint16_t hx, ix; - DOPRINT_START(&x); - /* Filter out exceptional cases. */ u.e = x; hx = u.xbits.expsign; ix = hx & 0x7fff; if (ix >= BIAS + 7) { /* |x| >= 128 or x is NaN */ if (ix == BIAS + LDBL_MAX_EXP) { if (hx & 0x8000) /* x is -Inf or -NaN */ - RETURNP(-1 / x - 1); - RETURNP(x + x); /* x is +Inf or +NaN */ + RETURNF(-1 / x - 1); + RETURNF(x + x); /* x is +Inf or +NaN */ } if (x > o_threshold) - RETURNP(huge * huge); + RETURNF(huge * huge); /* * expm1l() never underflows, but it must avoid * unrepresentable large negative exponents. We used a * much smaller threshold for large |x| above than in * expl() so as to handle not so large negative exponents * in the same way as large ones here. */ if (hx & 0x8000) /* x <= -128 */ - RETURN2P(tiny, -1); /* good for x < -114ln2 - eps */ + RETURNF(tiny - 1); /* good for x < -114ln2 - eps */ } ENTERI(); if (T1 < x && x < T2) { x2 = x * x; dx = x; if (x < T3) { if (ix < BIAS - 113) { /* |x| < 0x1p-113 */ /* x (rounded) with inexact if x != 0: */ - RETURNPI(x == 0 ? x : + RETURNI(x == 0 ? x : (0x1p200 * x + fabsl(x)) * 0x1p-200); } q = x * x2 * C3 + x2 * x2 * (C4 + x * (C5 + x * (C6 + x * (C7 + x * (C8 + x * (C9 + x * (C10 + x * (C11 + x * (C12 + x * (C13 + dx * (C14 + dx * (C15 + dx * (C16 + dx * (C17 + dx * C18)))))))))))))); } else { q = x * x2 * D3 + x2 * x2 * (D4 + x * (D5 + x * (D6 + x * (D7 + x * (D8 + x * (D9 + x * (D10 + x * (D11 + x * (D12 + x * (D13 + dx * (D14 + dx * (D15 + dx * (D16 + dx * D17))))))))))))); } x_hi = (float)x; x_lo = x - x_hi; hx2_hi = x_hi * x_hi / 2; hx2_lo = x_lo * (x + x_hi) / 2; if (ix >= BIAS - 7) - RETURN2PI(hx2_hi + x_hi, hx2_lo + x_lo + q); + RETURNI((hx2_hi + x_hi) + (hx2_lo + x_lo + q)); else - RETURN2PI(x, hx2_lo + q + hx2_hi); + RETURNI(x + (hx2_lo + q + hx2_hi)); } /* Reduce x to (k*ln2 + endpoint[n2] + r1 + r2). */ fn = rnint((double)x * INV_L); n = irint(fn); n2 = (unsigned)n % INTERVALS; k = n >> LOG2_INTERVALS; r1 = x - fn * L1; r2 = fn * -L2; r = r1 + r2; /* Prepare scale factor. */ v.e = 1; v.xbits.expsign = BIAS + k; twopk = v.e; /* * Evaluate lower terms of * expl(endpoint[n2] + r1 + r2) = tbl[n2] * expl(r1 + r2). */ dr = r; q = r2 + r * r * (A2 + r * (A3 + r * (A4 + r * (A5 + r * (A6 + dr * (A7 + dr * (A8 + dr * (A9 + dr * A10)))))))); t = tbl[n2].lo + tbl[n2].hi; if (k == 0) { t = SUM2P(tbl[n2].hi - 1, tbl[n2].lo * (r1 + 1) + t * q + tbl[n2].hi * r1); RETURNI(t); } if (k == -1) { t = SUM2P(tbl[n2].hi - 2, tbl[n2].lo * (r1 + 1) + t * q + tbl[n2].hi * r1); RETURNI(t / 2); } if (k < -7) { t = SUM2P(tbl[n2].hi, tbl[n2].lo + t * (q + r1)); RETURNI(t * twopk - 1); } if (k > 2 * LDBL_MANT_DIG - 1) { t = SUM2P(tbl[n2].hi, tbl[n2].lo + t * (q + r1)); if (k == LDBL_MAX_EXP) RETURNI(t * 2 * 0x1p16383L - 1); RETURNI(t * twopk - 1); } v.xbits.expsign = BIAS - k; twomk = v.e; if (k > LDBL_MANT_DIG - 1) t = SUM2P(tbl[n2].hi, tbl[n2].lo - twomk + t * (q + r1)); else t = SUM2P(tbl[n2].hi - twomk, tbl[n2].lo + t * (q + r1)); RETURNI(t * twopk); } diff --git a/lib/msun/ld128/s_logl.c b/lib/msun/ld128/s_logl.c index 40a22c0f1a8c..bc538840a760 100644 --- a/lib/msun/ld128/s_logl.c +++ b/lib/msun/ld128/s_logl.c @@ -1,740 +1,734 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2007-2013 Bruce D. Evans * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /** * Implementation of the natural logarithm of x for 128-bit format. * * First decompose x into its base 2 representation: * * log(x) = log(X * 2**k), where X is in [1, 2) * = log(X) + k * log(2). * * Let X = X_i + e, where X_i is the center of one of the intervals * [-1.0/256, 1.0/256), [1.0/256, 3.0/256), .... [2.0-1.0/256, 2.0+1.0/256) * and X is in this interval. Then * * log(X) = log(X_i + e) * = log(X_i * (1 + e / X_i)) * = log(X_i) + log(1 + e / X_i). * * The values log(X_i) are tabulated below. Let d = e / X_i and use * * log(1 + d) = p(d) * * where p(d) = d - 0.5*d*d + ... is a special minimax polynomial of * suitably high degree. * * To get sufficiently small roundoff errors, k * log(2), log(X_i), and * sometimes (if |k| is not large) the first term in p(d) must be evaluated * and added up in extra precision. Extra precision is not needed for the * rest of p(d). In the worst case when k = 0 and log(X_i) is 0, the final * error is controlled mainly by the error in the second term in p(d). The * error in this term itself is at most 0.5 ulps from the d*d operation in * it. The error in this term relative to the first term is thus at most * 0.5 * |-0.5| * |d| < 1.0/1024 ulps. We aim for an accumulated error of * at most twice this at the point of the final rounding step. Thus the * final error should be at most 0.5 + 1.0/512 = 0.5020 ulps. Exhaustive * testing of a float variant of this function showed a maximum final error * of 0.5008 ulps. Non-exhaustive testing of a double variant of this * function showed a maximum final error of 0.5078 ulps (near 1+1.0/256). * * We made the maximum of |d| (and thus the total relative error and the * degree of p(d)) small by using a large number of intervals. Using * centers of intervals instead of endpoints reduces this maximum by a * factor of 2 for a given number of intervals. p(d) is special only * in beginning with the Taylor coefficients 0 + 1*d, which tends to happen * naturally. The most accurate minimax polynomial of a given degree might * be different, but then we wouldn't want it since we would have to do * extra work to avoid roundoff error (especially for P0*d instead of d). */ #ifdef DEBUG #include #include #endif #include "fpmath.h" #include "math.h" #ifndef NO_STRUCT_RETURN #define STRUCT_RETURN #endif #include "math_private.h" #if !defined(NO_UTAB) && !defined(NO_UTABL) #define USE_UTAB #endif /* * Domain [-0.005280, 0.004838], range ~[-1.1577e-37, 1.1582e-37]: * |log(1 + d)/d - p(d)| < 2**-122.7 */ static const long double P2 = -0.5L, P3 = 3.33333333333333333333333333333233795e-1L, /* 0x15555555555555555555555554d42.0p-114L */ P4 = -2.49999999999999999999999999941139296e-1L, /* -0x1ffffffffffffffffffffffdab14e.0p-115L */ P5 = 2.00000000000000000000000085468039943e-1L, /* 0x19999999999999999999a6d3567f4.0p-115L */ P6 = -1.66666666666666666666696142372698408e-1L, /* -0x15555555555555555567267a58e13.0p-115L */ P7 = 1.42857142857142857119522943477166120e-1L, /* 0x1249249249249248ed79a0ae434de.0p-115L */ P8 = -1.24999999999999994863289015033581301e-1L; /* -0x1fffffffffffffa13e91765e46140.0p-116L */ /* Double precision gives ~ 53 + log2(P9 * max(|d|)**8) ~= 120 bits. */ static const double P9 = 1.1111111111111401e-1, /* 0x1c71c71c71c7ed.0p-56 */ P10 = -1.0000000000040135e-1, /* -0x199999999a0a92.0p-56 */ P11 = 9.0909090728136258e-2, /* 0x1745d173962111.0p-56 */ P12 = -8.3333318851855284e-2, /* -0x1555551722c7a3.0p-56 */ P13 = 7.6928634666404178e-2, /* 0x13b1985204a4ae.0p-56 */ P14 = -7.1626810078462499e-2; /* -0x12562276cdc5d0.0p-56 */ static volatile const double zero = 0; #define INTERVALS 128 #define LOG2_INTERVALS 7 #define TSIZE (INTERVALS + 1) #define G(i) (T[(i)].G) #define F_hi(i) (T[(i)].F_hi) #define F_lo(i) (T[(i)].F_lo) #define ln2_hi F_hi(TSIZE - 1) #define ln2_lo F_lo(TSIZE - 1) #define E(i) (U[(i)].E) #define H(i) (U[(i)].H) static const struct { float G; /* 1/(1 + i/128) rounded to 8/9 bits */ float F_hi; /* log(1 / G_i) rounded (see below) */ /* The compiler will insert 8 bytes of padding here. */ long double F_lo; /* next 113 bits for log(1 / G_i) */ } T[TSIZE] = { /* * ln2_hi and each F_hi(i) are rounded to a number of bits that * makes F_hi(i) + dk*ln2_hi exact for all i and all dk. * * The last entry (for X just below 2) is used to define ln2_hi * and ln2_lo, to ensure that F_hi(i) and F_lo(i) cancel exactly * with dk*ln2_hi and dk*ln2_lo, respectively, when dk = -1. * This is needed for accuracy when x is just below 1. (To avoid * special cases, such x are "reduced" strangely to X just below * 2 and dk = -1, and then the exact cancellation is needed * because any the error from any non-exactness would be too * large). * * The relevant range of dk is [-16445, 16383]. The maximum number * of bits in F_hi(i) that works is very dependent on i but has * a minimum of 93. We only need about 12 bits in F_hi(i) for * it to provide enough extra precision. * * We round F_hi(i) to 24 bits so that it can have type float, * mainly to minimize the size of the table. Using all 24 bits * in a float for it automatically satisfies the above constraints. */ 0x800000.0p-23, 0, 0, 0xfe0000.0p-24, 0x8080ac.0p-30, -0x14ee431dae6674afa0c4bfe16e8fd.0p-144L, 0xfc0000.0p-24, 0x8102b3.0p-29, -0x1db29ee2d83717be918e1119642ab.0p-144L, 0xfa0000.0p-24, 0xc24929.0p-29, 0x1191957d173697cf302cc9476f561.0p-143L, 0xf80000.0p-24, 0x820aec.0p-28, 0x13ce8888e02e78eba9b1113bc1c18.0p-142L, 0xf60000.0p-24, 0xa33577.0p-28, -0x17a4382ce6eb7bfa509bec8da5f22.0p-142L, 0xf48000.0p-24, 0xbc42cb.0p-28, -0x172a21161a107674986dcdca6709c.0p-143L, 0xf30000.0p-24, 0xd57797.0p-28, -0x1e09de07cb958897a3ea46e84abb3.0p-142L, 0xf10000.0p-24, 0xf7518e.0p-28, 0x1ae1eec1b036c484993c549c4bf40.0p-151L, 0xef0000.0p-24, 0x8cb9df.0p-27, -0x1d7355325d560d9e9ab3d6ebab580.0p-141L, 0xed8000.0p-24, 0x999ec0.0p-27, -0x1f9f02d256d5037108f4ec21e48cd.0p-142L, 0xec0000.0p-24, 0xa6988b.0p-27, -0x16fc0a9d12c17a70f7a684c596b12.0p-143L, 0xea0000.0p-24, 0xb80698.0p-27, 0x15d581c1e8da99ded322fb08b8462.0p-141L, 0xe80000.0p-24, 0xc99af3.0p-27, -0x1535b3ba8f150ae09996d7bb4653e.0p-143L, 0xe70000.0p-24, 0xd273b2.0p-27, 0x163786f5251aefe0ded34c8318f52.0p-145L, 0xe50000.0p-24, 0xe442c0.0p-27, 0x1bc4b2368e32d56699c1799a244d4.0p-144L, 0xe38000.0p-24, 0xf1b83f.0p-27, 0x1c6090f684e6766abceccab1d7174.0p-141L, 0xe20000.0p-24, 0xff448a.0p-27, -0x1890aa69ac9f4215f93936b709efb.0p-142L, 0xe08000.0p-24, 0x8673f6.0p-26, 0x1b9985194b6affd511b534b72a28e.0p-140L, 0xdf0000.0p-24, 0x8d515c.0p-26, -0x1dc08d61c6ef1d9b2ef7e68680598.0p-143L, 0xdd8000.0p-24, 0x943a9e.0p-26, -0x1f72a2dac729b3f46662238a9425a.0p-142L, 0xdc0000.0p-24, 0x9b2fe6.0p-26, -0x1fd4dfd3a0afb9691aed4d5e3df94.0p-140L, 0xda8000.0p-24, 0xa2315d.0p-26, -0x11b26121629c46c186384993e1c93.0p-142L, 0xd90000.0p-24, 0xa93f2f.0p-26, 0x1286d633e8e5697dc6a402a56fce1.0p-141L, 0xd78000.0p-24, 0xb05988.0p-26, 0x16128eba9367707ebfa540e45350c.0p-144L, 0xd60000.0p-24, 0xb78094.0p-26, 0x16ead577390d31ef0f4c9d43f79b2.0p-140L, 0xd50000.0p-24, 0xbc4c6c.0p-26, 0x151131ccf7c7b75e7d900b521c48d.0p-141L, 0xd38000.0p-24, 0xc3890a.0p-26, -0x115e2cd714bd06508aeb00d2ae3e9.0p-140L, 0xd20000.0p-24, 0xcad2d7.0p-26, -0x1847f406ebd3af80485c2f409633c.0p-142L, 0xd10000.0p-24, 0xcfb620.0p-26, 0x1c2259904d686581799fbce0b5f19.0p-141L, 0xcf8000.0p-24, 0xd71653.0p-26, 0x1ece57a8d5ae54f550444ecf8b995.0p-140L, 0xce0000.0p-24, 0xde843a.0p-26, -0x1f109d4bc4595412b5d2517aaac13.0p-141L, 0xcd0000.0p-24, 0xe37fde.0p-26, 0x1bc03dc271a74d3a85b5b43c0e727.0p-141L, 0xcb8000.0p-24, 0xeb050c.0p-26, -0x1bf2badc0df841a71b79dd5645b46.0p-145L, 0xca0000.0p-24, 0xf29878.0p-26, -0x18efededd89fbe0bcfbe6d6db9f66.0p-147L, 0xc90000.0p-24, 0xf7ad6f.0p-26, 0x1373ff977baa6911c7bafcb4d84fb.0p-141L, 0xc80000.0p-24, 0xfcc8e3.0p-26, 0x196766f2fb328337cc050c6d83b22.0p-140L, 0xc68000.0p-24, 0x823f30.0p-25, 0x19bd076f7c434e5fcf1a212e2a91e.0p-139L, 0xc58000.0p-24, 0x84d52c.0p-25, -0x1a327257af0f465e5ecab5f2a6f81.0p-139L, 0xc40000.0p-24, 0x88bc74.0p-25, 0x113f23def19c5a0fe396f40f1dda9.0p-141L, 0xc30000.0p-24, 0x8b5ae6.0p-25, 0x1759f6e6b37de945a049a962e66c6.0p-139L, 0xc20000.0p-24, 0x8dfccb.0p-25, 0x1ad35ca6ed5147bdb6ddcaf59c425.0p-141L, 0xc10000.0p-24, 0x90a22b.0p-25, 0x1a1d71a87deba46bae9827221dc98.0p-139L, 0xbf8000.0p-24, 0x94a0d8.0p-25, -0x139e5210c2b730e28aba001a9b5e0.0p-140L, 0xbe8000.0p-24, 0x974f16.0p-25, -0x18f6ebcff3ed72e23e13431adc4a5.0p-141L, 0xbd8000.0p-24, 0x9a00f1.0p-25, -0x1aa268be39aab7148e8d80caa10b7.0p-139L, 0xbc8000.0p-24, 0x9cb672.0p-25, -0x14c8815839c5663663d15faed7771.0p-139L, 0xbb0000.0p-24, 0xa0cda1.0p-25, 0x1eaf46390dbb2438273918db7df5c.0p-141L, 0xba0000.0p-24, 0xa38c6e.0p-25, 0x138e20d831f698298adddd7f32686.0p-141L, 0xb90000.0p-24, 0xa64f05.0p-25, -0x1e8d3c41123615b147a5d47bc208f.0p-142L, 0xb80000.0p-24, 0xa91570.0p-25, 0x1ce28f5f3840b263acb4351104631.0p-140L, 0xb70000.0p-24, 0xabdfbb.0p-25, -0x186e5c0a42423457e22d8c650b355.0p-139L, 0xb60000.0p-24, 0xaeadef.0p-25, -0x14d41a0b2a08a465dc513b13f567d.0p-143L, 0xb50000.0p-24, 0xb18018.0p-25, 0x16755892770633947ffe651e7352f.0p-139L, 0xb40000.0p-24, 0xb45642.0p-25, -0x16395ebe59b15228bfe8798d10ff0.0p-142L, 0xb30000.0p-24, 0xb73077.0p-25, 0x1abc65c8595f088b61a335f5b688c.0p-140L, 0xb20000.0p-24, 0xba0ec4.0p-25, -0x1273089d3dad88e7d353e9967d548.0p-139L, 0xb10000.0p-24, 0xbcf133.0p-25, 0x10f9f67b1f4bbf45de06ecebfaf6d.0p-139L, 0xb00000.0p-24, 0xbfd7d2.0p-25, -0x109fab904864092b34edda19a831e.0p-140L, 0xaf0000.0p-24, 0xc2c2ac.0p-25, -0x1124680aa43333221d8a9b475a6ba.0p-139L, 0xae8000.0p-24, 0xc439b3.0p-25, -0x1f360cc4710fbfe24b633f4e8d84d.0p-140L, 0xad8000.0p-24, 0xc72afd.0p-25, -0x132d91f21d89c89c45003fc5d7807.0p-140L, 0xac8000.0p-24, 0xca20a2.0p-25, -0x16bf9b4d1f8da8002f2449e174504.0p-139L, 0xab8000.0p-24, 0xcd1aae.0p-25, 0x19deb5ce6a6a8717d5626e16acc7d.0p-141L, 0xaa8000.0p-24, 0xd0192f.0p-25, 0x1a29fb48f7d3ca87dabf351aa41f4.0p-139L, 0xaa0000.0p-24, 0xd19a20.0p-25, 0x1127d3c6457f9d79f51dcc73014c9.0p-141L, 0xa90000.0p-24, 0xd49f6a.0p-25, -0x1ba930e486a0ac42d1bf9199188e7.0p-141L, 0xa80000.0p-24, 0xd7a94b.0p-25, -0x1b6e645f31549dd1160bcc45c7e2c.0p-139L, 0xa70000.0p-24, 0xdab7d0.0p-25, 0x1118a425494b610665377f15625b6.0p-140L, 0xa68000.0p-24, 0xdc40d5.0p-25, 0x1966f24d29d3a2d1b2176010478be.0p-140L, 0xa58000.0p-24, 0xdf566d.0p-25, -0x1d8e52eb2248f0c95dd83626d7333.0p-142L, 0xa48000.0p-24, 0xe270ce.0p-25, -0x1ee370f96e6b67ccb006a5b9890ea.0p-140L, 0xa40000.0p-24, 0xe3ffce.0p-25, 0x1d155324911f56db28da4d629d00a.0p-140L, 0xa30000.0p-24, 0xe72179.0p-25, -0x1fe6e2f2f867d8f4d60c713346641.0p-140L, 0xa20000.0p-24, 0xea4812.0p-25, 0x1b7be9add7f4d3b3d406b6cbf3ce5.0p-140L, 0xa18000.0p-24, 0xebdd3d.0p-25, 0x1b3cfb3f7511dd73692609040ccc2.0p-139L, 0xa08000.0p-24, 0xef0b5b.0p-25, -0x1220de1f7301901b8ad85c25afd09.0p-139L, 0xa00000.0p-24, 0xf0a451.0p-25, -0x176364c9ac81cc8a4dfb804de6867.0p-140L, 0x9f0000.0p-24, 0xf3da16.0p-25, 0x1eed6b9aafac8d42f78d3e65d3727.0p-141L, 0x9e8000.0p-24, 0xf576e9.0p-25, 0x1d593218675af269647b783d88999.0p-139L, 0x9d8000.0p-24, 0xf8b47c.0p-25, -0x13e8eb7da053e063714615f7cc91d.0p-144L, 0x9d0000.0p-24, 0xfa553f.0p-25, 0x1c063259bcade02951686d5373aec.0p-139L, 0x9c0000.0p-24, 0xfd9ac5.0p-25, 0x1ef491085fa3c1649349630531502.0p-139L, 0x9b8000.0p-24, 0xff3f8c.0p-25, 0x1d607a7c2b8c5320619fb9433d841.0p-139L, 0x9a8000.0p-24, 0x814697.0p-24, -0x12ad3817004f3f0bdff99f932b273.0p-138L, 0x9a0000.0p-24, 0x821b06.0p-24, -0x189fc53117f9e54e78103a2bc1767.0p-141L, 0x990000.0p-24, 0x83c5f8.0p-24, 0x14cf15a048907b7d7f47ddb45c5a3.0p-139L, 0x988000.0p-24, 0x849c7d.0p-24, 0x1cbb1d35fb82873b04a9af1dd692c.0p-138L, 0x978000.0p-24, 0x864ba6.0p-24, 0x1128639b814f9b9770d8cb6573540.0p-138L, 0x970000.0p-24, 0x87244c.0p-24, 0x184733853300f002e836dfd47bd41.0p-139L, 0x968000.0p-24, 0x87fdaa.0p-24, 0x109d23aef77dd5cd7cc94306fb3ff.0p-140L, 0x958000.0p-24, 0x89b293.0p-24, -0x1a81ef367a59de2b41eeebd550702.0p-138L, 0x950000.0p-24, 0x8a8e20.0p-24, -0x121ad3dbb2f45275c917a30df4ac9.0p-138L, 0x948000.0p-24, 0x8b6a6a.0p-24, -0x1cfb981628af71a89df4e6df2e93b.0p-139L, 0x938000.0p-24, 0x8d253a.0p-24, -0x1d21730ea76cfdec367828734cae5.0p-139L, 0x930000.0p-24, 0x8e03c2.0p-24, 0x135cc00e566f76b87333891e0dec4.0p-138L, 0x928000.0p-24, 0x8ee30d.0p-24, -0x10fcb5df257a263e3bf446c6e3f69.0p-140L, 0x918000.0p-24, 0x90a3ee.0p-24, -0x16e171b15433d723a4c7380a448d8.0p-139L, 0x910000.0p-24, 0x918587.0p-24, -0x1d050da07f3236f330972da2a7a87.0p-139L, 0x908000.0p-24, 0x9267e7.0p-24, 0x1be03669a5268d21148c6002becd3.0p-139L, 0x8f8000.0p-24, 0x942f04.0p-24, 0x10b28e0e26c336af90e00533323ba.0p-139L, 0x8f0000.0p-24, 0x9513c3.0p-24, 0x1a1d820da57cf2f105a89060046aa.0p-138L, 0x8e8000.0p-24, 0x95f950.0p-24, -0x19ef8f13ae3cf162409d8ea99d4c0.0p-139L, 0x8e0000.0p-24, 0x96dfab.0p-24, -0x109e417a6e507b9dc10dac743ad7a.0p-138L, 0x8d0000.0p-24, 0x98aed2.0p-24, 0x10d01a2c5b0e97c4990b23d9ac1f5.0p-139L, 0x8c8000.0p-24, 0x9997a2.0p-24, -0x1d6a50d4b61ea74540bdd2aa99a42.0p-138L, 0x8c0000.0p-24, 0x9a8145.0p-24, 0x1b3b190b83f9527e6aba8f2d783c1.0p-138L, 0x8b8000.0p-24, 0x9b6bbf.0p-24, 0x13a69fad7e7abe7ba81c664c107e0.0p-138L, 0x8b0000.0p-24, 0x9c5711.0p-24, -0x11cd12316f576aad348ae79867223.0p-138L, 0x8a8000.0p-24, 0x9d433b.0p-24, 0x1c95c444b807a246726b304ccae56.0p-139L, 0x898000.0p-24, 0x9f1e22.0p-24, -0x1b9c224ea698c2f9b47466d6123fe.0p-139L, 0x890000.0p-24, 0xa00ce1.0p-24, 0x125ca93186cf0f38b4619a2483399.0p-141L, 0x888000.0p-24, 0xa0fc80.0p-24, -0x1ee38a7bc228b3597043be78eaf49.0p-139L, 0x880000.0p-24, 0xa1ed00.0p-24, -0x1a0db876613d204147dc69a07a649.0p-138L, 0x878000.0p-24, 0xa2de62.0p-24, 0x193224e8516c008d3602a7b41c6e8.0p-139L, 0x870000.0p-24, 0xa3d0a9.0p-24, 0x1fa28b4d2541aca7d5844606b2421.0p-139L, 0x868000.0p-24, 0xa4c3d6.0p-24, 0x1c1b5760fb4571acbcfb03f16daf4.0p-138L, 0x858000.0p-24, 0xa6acea.0p-24, 0x1fed5d0f65949c0a345ad743ae1ae.0p-140L, 0x850000.0p-24, 0xa7a2d4.0p-24, 0x1ad270c9d749362382a7688479e24.0p-140L, 0x848000.0p-24, 0xa899ab.0p-24, 0x199ff15ce532661ea9643a3a2d378.0p-139L, 0x840000.0p-24, 0xa99171.0p-24, 0x1a19e15ccc45d257530a682b80490.0p-139L, 0x838000.0p-24, 0xaa8a28.0p-24, -0x121a14ec532b35ba3e1f868fd0b5e.0p-140L, 0x830000.0p-24, 0xab83d1.0p-24, 0x1aee319980bff3303dd481779df69.0p-139L, 0x828000.0p-24, 0xac7e6f.0p-24, -0x18ffd9e3900345a85d2d86161742e.0p-140L, 0x820000.0p-24, 0xad7a03.0p-24, -0x1e4db102ce29f79b026b64b42caa1.0p-140L, 0x818000.0p-24, 0xae768f.0p-24, 0x17c35c55a04a82ab19f77652d977a.0p-141L, 0x810000.0p-24, 0xaf7415.0p-24, 0x1448324047019b48d7b98c1cf7234.0p-138L, 0x808000.0p-24, 0xb07298.0p-24, -0x1750ee3915a197e9c7359dd94152f.0p-138L, 0x800000.0p-24, 0xb17218.0p-24, -0x105c610ca86c3898cff81a12a17e2.0p-141L, }; #ifdef USE_UTAB static const struct { float H; /* 1 + i/INTERVALS (exact) */ float E; /* H(i) * G(i) - 1 (exact) */ } U[TSIZE] = { 0x800000.0p-23, 0, 0x810000.0p-23, -0x800000.0p-37, 0x820000.0p-23, -0x800000.0p-35, 0x830000.0p-23, -0x900000.0p-34, 0x840000.0p-23, -0x800000.0p-33, 0x850000.0p-23, -0xc80000.0p-33, 0x860000.0p-23, -0xa00000.0p-36, 0x870000.0p-23, 0x940000.0p-33, 0x880000.0p-23, 0x800000.0p-35, 0x890000.0p-23, -0xc80000.0p-34, 0x8a0000.0p-23, 0xe00000.0p-36, 0x8b0000.0p-23, 0x900000.0p-33, 0x8c0000.0p-23, -0x800000.0p-35, 0x8d0000.0p-23, -0xe00000.0p-33, 0x8e0000.0p-23, 0x880000.0p-33, 0x8f0000.0p-23, -0xa80000.0p-34, 0x900000.0p-23, -0x800000.0p-35, 0x910000.0p-23, 0x800000.0p-37, 0x920000.0p-23, 0x900000.0p-35, 0x930000.0p-23, 0xd00000.0p-35, 0x940000.0p-23, 0xe00000.0p-35, 0x950000.0p-23, 0xc00000.0p-35, 0x960000.0p-23, 0xe00000.0p-36, 0x970000.0p-23, -0x800000.0p-38, 0x980000.0p-23, -0xc00000.0p-35, 0x990000.0p-23, -0xd00000.0p-34, 0x9a0000.0p-23, 0x880000.0p-33, 0x9b0000.0p-23, 0xe80000.0p-35, 0x9c0000.0p-23, -0x800000.0p-35, 0x9d0000.0p-23, 0xb40000.0p-33, 0x9e0000.0p-23, 0x880000.0p-34, 0x9f0000.0p-23, -0xe00000.0p-35, 0xa00000.0p-23, 0x800000.0p-33, 0xa10000.0p-23, -0x900000.0p-36, 0xa20000.0p-23, -0xb00000.0p-33, 0xa30000.0p-23, -0xa00000.0p-36, 0xa40000.0p-23, 0x800000.0p-33, 0xa50000.0p-23, -0xf80000.0p-35, 0xa60000.0p-23, 0x880000.0p-34, 0xa70000.0p-23, -0x900000.0p-33, 0xa80000.0p-23, -0x800000.0p-35, 0xa90000.0p-23, 0x900000.0p-34, 0xaa0000.0p-23, 0xa80000.0p-33, 0xab0000.0p-23, -0xac0000.0p-34, 0xac0000.0p-23, -0x800000.0p-37, 0xad0000.0p-23, 0xf80000.0p-35, 0xae0000.0p-23, 0xf80000.0p-34, 0xaf0000.0p-23, -0xac0000.0p-33, 0xb00000.0p-23, -0x800000.0p-33, 0xb10000.0p-23, -0xb80000.0p-34, 0xb20000.0p-23, -0x800000.0p-34, 0xb30000.0p-23, -0xb00000.0p-35, 0xb40000.0p-23, -0x800000.0p-35, 0xb50000.0p-23, -0xe00000.0p-36, 0xb60000.0p-23, -0x800000.0p-35, 0xb70000.0p-23, -0xb00000.0p-35, 0xb80000.0p-23, -0x800000.0p-34, 0xb90000.0p-23, -0xb80000.0p-34, 0xba0000.0p-23, -0x800000.0p-33, 0xbb0000.0p-23, -0xac0000.0p-33, 0xbc0000.0p-23, 0x980000.0p-33, 0xbd0000.0p-23, 0xbc0000.0p-34, 0xbe0000.0p-23, 0xe00000.0p-36, 0xbf0000.0p-23, -0xb80000.0p-35, 0xc00000.0p-23, -0x800000.0p-33, 0xc10000.0p-23, 0xa80000.0p-33, 0xc20000.0p-23, 0x900000.0p-34, 0xc30000.0p-23, -0x800000.0p-35, 0xc40000.0p-23, -0x900000.0p-33, 0xc50000.0p-23, 0x820000.0p-33, 0xc60000.0p-23, 0x800000.0p-38, 0xc70000.0p-23, -0x820000.0p-33, 0xc80000.0p-23, 0x800000.0p-33, 0xc90000.0p-23, -0xa00000.0p-36, 0xca0000.0p-23, -0xb00000.0p-33, 0xcb0000.0p-23, 0x840000.0p-34, 0xcc0000.0p-23, -0xd00000.0p-34, 0xcd0000.0p-23, 0x800000.0p-33, 0xce0000.0p-23, -0xe00000.0p-35, 0xcf0000.0p-23, 0xa60000.0p-33, 0xd00000.0p-23, -0x800000.0p-35, 0xd10000.0p-23, 0xb40000.0p-33, 0xd20000.0p-23, -0x800000.0p-35, 0xd30000.0p-23, 0xaa0000.0p-33, 0xd40000.0p-23, -0xe00000.0p-35, 0xd50000.0p-23, 0x880000.0p-33, 0xd60000.0p-23, -0xd00000.0p-34, 0xd70000.0p-23, 0x9c0000.0p-34, 0xd80000.0p-23, -0xb00000.0p-33, 0xd90000.0p-23, -0x800000.0p-38, 0xda0000.0p-23, 0xa40000.0p-33, 0xdb0000.0p-23, -0xdc0000.0p-34, 0xdc0000.0p-23, 0xc00000.0p-35, 0xdd0000.0p-23, 0xca0000.0p-33, 0xde0000.0p-23, -0xb80000.0p-34, 0xdf0000.0p-23, 0xd00000.0p-35, 0xe00000.0p-23, 0xc00000.0p-33, 0xe10000.0p-23, -0xf40000.0p-34, 0xe20000.0p-23, 0x800000.0p-37, 0xe30000.0p-23, 0x860000.0p-33, 0xe40000.0p-23, -0xc80000.0p-33, 0xe50000.0p-23, -0xa80000.0p-34, 0xe60000.0p-23, 0xe00000.0p-36, 0xe70000.0p-23, 0x880000.0p-33, 0xe80000.0p-23, -0xe00000.0p-33, 0xe90000.0p-23, -0xfc0000.0p-34, 0xea0000.0p-23, -0x800000.0p-35, 0xeb0000.0p-23, 0xe80000.0p-35, 0xec0000.0p-23, 0x900000.0p-33, 0xed0000.0p-23, 0xe20000.0p-33, 0xee0000.0p-23, -0xac0000.0p-33, 0xef0000.0p-23, -0xc80000.0p-34, 0xf00000.0p-23, -0x800000.0p-35, 0xf10000.0p-23, 0x800000.0p-35, 0xf20000.0p-23, 0xb80000.0p-34, 0xf30000.0p-23, 0x940000.0p-33, 0xf40000.0p-23, 0xc80000.0p-33, 0xf50000.0p-23, -0xf20000.0p-33, 0xf60000.0p-23, -0xc80000.0p-33, 0xf70000.0p-23, -0xa20000.0p-33, 0xf80000.0p-23, -0x800000.0p-33, 0xf90000.0p-23, -0xc40000.0p-34, 0xfa0000.0p-23, -0x900000.0p-34, 0xfb0000.0p-23, -0xc80000.0p-35, 0xfc0000.0p-23, -0x800000.0p-35, 0xfd0000.0p-23, -0x900000.0p-36, 0xfe0000.0p-23, -0x800000.0p-37, 0xff0000.0p-23, -0x800000.0p-39, 0x800000.0p-22, 0, }; #endif /* USE_UTAB */ #ifdef STRUCT_RETURN #define RETURN1(rp, v) do { \ (rp)->hi = (v); \ (rp)->lo_set = 0; \ return; \ } while (0) #define RETURN2(rp, h, l) do { \ (rp)->hi = (h); \ (rp)->lo = (l); \ (rp)->lo_set = 1; \ return; \ } while (0) struct ld { long double hi; long double lo; int lo_set; }; #else #define RETURN1(rp, v) RETURNF(v) #define RETURN2(rp, h, l) RETURNI((h) + (l)) #endif #ifdef STRUCT_RETURN static inline __always_inline void k_logl(long double x, struct ld *rp) #else long double logl(long double x) #endif { long double d, val_hi, val_lo; double dd, dk; uint64_t lx, llx; int i, k; uint16_t hx; EXTRACT_LDBL128_WORDS(hx, lx, llx, x); k = -16383; #if 0 /* Hard to do efficiently. Don't do it until we support all modes. */ if (x == 1) RETURN1(rp, 0); /* log(1) = +0 in all rounding modes */ #endif if (hx == 0 || hx >= 0x8000) { /* zero, negative or subnormal? */ if (((hx & 0x7fff) | lx | llx) == 0) RETURN1(rp, -1 / zero); /* log(+-0) = -Inf */ if (hx != 0) /* log(neg or NaN) = qNaN: */ RETURN1(rp, (x - x) / zero); x *= 0x1.0p113; /* subnormal; scale up x */ EXTRACT_LDBL128_WORDS(hx, lx, llx, x); k = -16383 - 113; } else if (hx >= 0x7fff) RETURN1(rp, x + x); /* log(Inf or NaN) = Inf or qNaN */ #ifndef STRUCT_RETURN ENTERI(); #endif k += hx; dk = k; /* Scale x to be in [1, 2). */ SET_LDBL_EXPSIGN(x, 0x3fff); /* 0 <= i <= INTERVALS: */ #define L2I (49 - LOG2_INTERVALS) i = (lx + (1LL << (L2I - 2))) >> (L2I - 1); /* * -0.005280 < d < 0.004838. In particular, the infinite- * precision |d| is <= 2**-7. Rounding of G(i) to 8 bits * ensures that d is representable without extra precision for * this bound on |d| (since when this calculation is expressed * as x*G(i)-1, the multiplication needs as many extra bits as * G(i) has and the subtraction cancels 8 bits). But for * most i (107 cases out of 129), the infinite-precision |d| * is <= 2**-8. G(i) is rounded to 9 bits for such i to give * better accuracy (this works by improving the bound on |d|, * which in turn allows rounding to 9 bits in more cases). * This is only important when the original x is near 1 -- it * lets us avoid using a special method to give the desired * accuracy for such x. */ if (0) d = x * G(i) - 1; else { #ifdef USE_UTAB d = (x - H(i)) * G(i) + E(i); #else long double x_hi; double x_lo; /* * Split x into x_hi + x_lo to calculate x*G(i)-1 exactly. * G(i) has at most 9 bits, so the splitting point is not * critical. */ INSERT_LDBL128_WORDS(x_hi, 0x3fff, lx, llx & 0xffffffffff000000ULL); x_lo = x - x_hi; d = x_hi * G(i) - 1 + x_lo * G(i); #endif } /* * Our algorithm depends on exact cancellation of F_lo(i) and * F_hi(i) with dk*ln_2_lo and dk*ln2_hi when k is -1 and i is * at the end of the table. This and other technical complications * make it difficult to avoid the double scaling in (dk*ln2) * * log(base) for base != e without losing more accuracy and/or * efficiency than is gained. */ /* * Use double precision operations wherever possible, since * long double operations are emulated and were very slow on * the old sparc64 and unknown on the newer aarch64 and riscv * machines. Also, don't try to improve parallelism by * increasing the number of operations, since any parallelism * on such machines is needed for the emulation. Horner's * method is good for this, and is also good for accuracy. * Horner's method doesn't handle the `lo' term well, either * for efficiency or accuracy. However, for accuracy we * evaluate d * d * P2 separately to take advantage of by P2 * being exact, and this gives a good place to sum the 'lo' * term too. */ dd = (double)d; val_lo = d * d * d * (P3 + d * (P4 + d * (P5 + d * (P6 + d * (P7 + d * (P8 + dd * (P9 + dd * (P10 + dd * (P11 + dd * (P12 + dd * (P13 + dd * P14))))))))))) + (F_lo(i) + dk * ln2_lo) + d * d * P2; val_hi = d; #ifdef DEBUG if (fetestexcept(FE_UNDERFLOW)) breakpoint(); #endif _3sumF(val_hi, val_lo, F_hi(i) + dk * ln2_hi); RETURN2(rp, val_hi, val_lo); } long double log1pl(long double x) { long double d, d_hi, f_lo, val_hi, val_lo; long double f_hi, twopminusk; double d_lo, dd, dk; uint64_t lx, llx; int i, k; int16_t ax, hx; - DOPRINT_START(&x); EXTRACT_LDBL128_WORDS(hx, lx, llx, x); if (hx < 0x3fff) { /* x < 1, or x neg NaN */ ax = hx & 0x7fff; if (ax >= 0x3fff) { /* x <= -1, or x neg NaN */ if (ax == 0x3fff && (lx | llx) == 0) - RETURNP(-1 / zero); /* log1p(-1) = -Inf */ + RETURNF(-1 / zero); /* log1p(-1) = -Inf */ /* log1p(x < 1, or x NaN) = qNaN: */ - RETURNP((x - x) / (x - x)); + RETURNF((x - x) / (x - x)); } if (ax <= 0x3f8d) { /* |x| < 2**-113 */ if ((int)x == 0) - RETURNP(x); /* x with inexact if x != 0 */ + RETURNF(x); /* x with inexact if x != 0 */ } f_hi = 1; f_lo = x; } else if (hx >= 0x7fff) { /* x +Inf or non-neg NaN */ - RETURNP(x + x); /* log1p(Inf or NaN) = Inf or qNaN */ + RETURNF(x + x); /* log1p(Inf or NaN) = Inf or qNaN */ } else if (hx < 0x40e1) { /* 1 <= x < 2**226 */ f_hi = x; f_lo = 1; } else { /* 2**226 <= x < +Inf */ f_hi = x; f_lo = 0; /* avoid underflow of the P3 term */ } ENTERI(); x = f_hi + f_lo; f_lo = (f_hi - x) + f_lo; EXTRACT_LDBL128_WORDS(hx, lx, llx, x); k = -16383; k += hx; dk = k; SET_LDBL_EXPSIGN(x, 0x3fff); twopminusk = 1; SET_LDBL_EXPSIGN(twopminusk, 0x7ffe - (hx & 0x7fff)); f_lo *= twopminusk; i = (lx + (1LL << (L2I - 2))) >> (L2I - 1); /* * x*G(i)-1 (with a reduced x) can be represented exactly, as * above, but now we need to evaluate the polynomial on d = * (x+f_lo)*G(i)-1 and extra precision is needed for that. * Since x+x_lo is a hi+lo decomposition and subtracting 1 * doesn't lose too many bits, an inexact calculation for * f_lo*G(i) is good enough. */ if (0) d_hi = x * G(i) - 1; else { #ifdef USE_UTAB d_hi = (x - H(i)) * G(i) + E(i); #else long double x_hi; double x_lo; INSERT_LDBL128_WORDS(x_hi, 0x3fff, lx, llx & 0xffffffffff000000ULL); x_lo = x - x_hi; d_hi = x_hi * G(i) - 1 + x_lo * G(i); #endif } d_lo = f_lo * G(i); /* * This is _2sumF(d_hi, d_lo) inlined. The condition * (d_hi == 0 || |d_hi| >= |d_lo|) for using _2sumF() is not * always satisifed, so it is not clear that this works, but * it works in practice. It works even if it gives a wrong * normalized d_lo, since |d_lo| > |d_hi| implies that i is * nonzero and d is tiny, so the F(i) term dominates d_lo. * In float precision: * (By exhaustive testing, the worst case is d_hi = 0x1.bp-25. * And if d is only a little tinier than that, we would have * another underflow problem for the P3 term; this is also ruled * out by exhaustive testing.) */ d = d_hi + d_lo; d_lo = d_hi - d + d_lo; d_hi = d; dd = (double)d; val_lo = d * d * d * (P3 + d * (P4 + d * (P5 + d * (P6 + d * (P7 + d * (P8 + dd * (P9 + dd * (P10 + dd * (P11 + dd * (P12 + dd * (P13 + dd * P14))))))))))) + (F_lo(i) + dk * ln2_lo + d_lo) + d * d * P2; val_hi = d_hi; #ifdef DEBUG if (fetestexcept(FE_UNDERFLOW)) breakpoint(); #endif _3sumF(val_hi, val_lo, F_hi(i) + dk * ln2_hi); - RETURN2PI(val_hi, val_lo); + RETURNI(val_hi + val_lo); } #ifdef STRUCT_RETURN long double logl(long double x) { struct ld r; ENTERI(); - DOPRINT_START(&x); k_logl(x, &r); RETURNSPI(&r); } /* * 29+113 bit decompositions. The bits are distributed so that the products * of the hi terms are exact in double precision. The types are chosen so * that the products of the hi terms are done in at least double precision, * without any explicit conversions. More natural choices would require a * slow long double precision multiplication. */ static const double invln10_hi = 4.3429448176175356e-1, /* 0x1bcb7b15000000.0p-54 */ invln2_hi = 1.4426950402557850e0; /* 0x17154765000000.0p-52 */ static const long double invln10_lo = 1.41498268538580090791605082294397000e-10L, /* 0x137287195355baaafad33dc323ee3.0p-145L */ invln2_lo = 6.33178418956604368501892137426645911e-10L, /* 0x15c17f0bbbe87fed0691d3e88eb57.0p-143L */ invln10_lo_plus_hi = invln10_lo + invln10_hi, invln2_lo_plus_hi = invln2_lo + invln2_hi; long double log10l(long double x) { struct ld r; long double hi, lo; ENTERI(); - DOPRINT_START(&x); k_logl(x, &r); if (!r.lo_set) - RETURNPI(r.hi); + RETURNI(r.hi); _2sumF(r.hi, r.lo); hi = (float)r.hi; lo = r.lo + (r.hi - hi); - RETURN2PI(invln10_hi * hi, - invln10_lo_plus_hi * lo + invln10_lo * hi); + RETURNI(invln10_hi * hi + (invln10_lo_plus_hi * lo + invln10_lo * hi)); } long double log2l(long double x) { struct ld r; long double hi, lo; ENTERI(); - DOPRINT_START(&x); k_logl(x, &r); if (!r.lo_set) - RETURNPI(r.hi); + RETURNI(r.hi); _2sumF(r.hi, r.lo); hi = (float)r.hi; lo = r.lo + (r.hi - hi); - RETURN2PI(invln2_hi * hi, - invln2_lo_plus_hi * lo + invln2_lo * hi); + RETURNI(invln2_hi * hi + (invln2_lo_plus_hi * lo + invln2_lo * hi)); } #endif /* STRUCT_RETURN */ diff --git a/lib/msun/ld80/s_expl.c b/lib/msun/ld80/s_expl.c index 0571377a3f8d..a41de4d6d1d8 100644 --- a/lib/msun/ld80/s_expl.c +++ b/lib/msun/ld80/s_expl.c @@ -1,279 +1,275 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2009-2013 Steven G. Kargl * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Optimized by Bruce D. Evans. */ #include __FBSDID("$FreeBSD$"); /** * Compute the exponential of x for Intel 80-bit format. This is based on: * * PTP Tang, "Table-driven implementation of the exponential function * in IEEE floating-point arithmetic," ACM Trans. Math. Soft., 15, * 144-157 (1989). * * where the 32 table entries have been expanded to INTERVALS (see below). */ #include #ifdef __i386__ #include #endif #include "fpmath.h" #include "math.h" #include "math_private.h" #include "k_expl.h" /* XXX Prevent compilers from erroneously constant folding these: */ static const volatile long double huge = 0x1p10000L, tiny = 0x1p-10000L; static const long double twom10000 = 0x1p-10000L; static const union IEEEl2bits /* log(2**16384 - 0.5) rounded towards zero: */ /* log(2**16384 - 0.5 + 1) rounded towards zero for expm1l() is the same: */ o_thresholdu = LD80C(0xb17217f7d1cf79ab, 13, 11356.5234062941439488L), #define o_threshold (o_thresholdu.e) /* log(2**(-16381-64-1)) rounded towards zero: */ u_thresholdu = LD80C(0xb21dfe7f09e2baa9, 13, -11399.4985314888605581L); #define u_threshold (u_thresholdu.e) long double expl(long double x) { union IEEEl2bits u; long double hi, lo, t, twopk; int k; uint16_t hx, ix; - DOPRINT_START(&x); - /* Filter out exceptional cases. */ u.e = x; hx = u.xbits.expsign; ix = hx & 0x7fff; if (ix >= BIAS + 13) { /* |x| >= 8192 or x is NaN */ if (ix == BIAS + LDBL_MAX_EXP) { if (hx & 0x8000) /* x is -Inf, -NaN or unsupported */ - RETURNP(-1 / x); - RETURNP(x + x); /* x is +Inf, +NaN or unsupported */ + RETURNF(-1 / x); + RETURNF(x + x); /* x is +Inf, +NaN or unsupported */ } if (x > o_threshold) - RETURNP(huge * huge); + RETURNF(huge * huge); if (x < u_threshold) - RETURNP(tiny * tiny); + RETURNF(tiny * tiny); } else if (ix < BIAS - 75) { /* |x| < 0x1p-75 (includes pseudos) */ - RETURN2P(1, x); /* 1 with inexact iff x != 0 */ + RETURNF(1 + x); /* 1 with inexact iff x != 0 */ } ENTERI(); twopk = 1; __k_expl(x, &hi, &lo, &k); t = SUM2P(hi, lo); /* Scale by 2**k. */ if (k >= LDBL_MIN_EXP) { if (k == LDBL_MAX_EXP) RETURNI(t * 2 * 0x1p16383L); SET_LDBL_EXPSIGN(twopk, BIAS + k); RETURNI(t * twopk); } else { SET_LDBL_EXPSIGN(twopk, BIAS + k + 10000); RETURNI(t * twopk * twom10000); } } /** * Compute expm1l(x) for Intel 80-bit format. This is based on: * * PTP Tang, "Table-driven implementation of the Expm1 function * in IEEE floating-point arithmetic," ACM Trans. Math. Soft., 18, * 211-222 (1992). */ /* * Our T1 and T2 are chosen to be approximately the points where method * A and method B have the same accuracy. Tang's T1 and T2 are the * points where method A's accuracy changes by a full bit. For Tang, * this drop in accuracy makes method A immediately less accurate than * method B, but our larger INTERVALS makes method A 2 bits more * accurate so it remains the most accurate method significantly * closer to the origin despite losing the full bit in our extended * range for it. */ static const double T1 = -0.1659, /* ~-30.625/128 * log(2) */ T2 = 0.1659; /* ~30.625/128 * log(2) */ /* * Domain [-0.1659, 0.1659], range ~[-2.6155e-22, 2.5507e-23]: * |(exp(x)-1-x-x**2/2)/x - p(x)| < 2**-71.6 * * XXX the coeffs aren't very carefully rounded, and I get 2.8 more bits, * but unlike for ld128 we can't drop any terms. */ static const union IEEEl2bits B3 = LD80C(0xaaaaaaaaaaaaaaab, -3, 1.66666666666666666671e-1L), B4 = LD80C(0xaaaaaaaaaaaaaaac, -5, 4.16666666666666666712e-2L); static const double B5 = 8.3333333333333245e-3, /* 0x1.111111111110cp-7 */ B6 = 1.3888888888888861e-3, /* 0x1.6c16c16c16c0ap-10 */ B7 = 1.9841269841532042e-4, /* 0x1.a01a01a0319f9p-13 */ B8 = 2.4801587302069236e-5, /* 0x1.a01a01a03cbbcp-16 */ B9 = 2.7557316558468562e-6, /* 0x1.71de37fd33d67p-19 */ B10 = 2.7557315829785151e-7, /* 0x1.27e4f91418144p-22 */ B11 = 2.5063168199779829e-8, /* 0x1.ae94fabdc6b27p-26 */ B12 = 2.0887164654459567e-9; /* 0x1.1f122d6413fe1p-29 */ long double expm1l(long double x) { union IEEEl2bits u, v; long double fn, hx2_hi, hx2_lo, q, r, r1, r2, t, twomk, twopk, x_hi; long double x_lo, x2, z; long double x4; int k, n, n2; uint16_t hx, ix; - DOPRINT_START(&x); - /* Filter out exceptional cases. */ u.e = x; hx = u.xbits.expsign; ix = hx & 0x7fff; if (ix >= BIAS + 6) { /* |x| >= 64 or x is NaN */ if (ix == BIAS + LDBL_MAX_EXP) { if (hx & 0x8000) /* x is -Inf, -NaN or unsupported */ - RETURNP(-1 / x - 1); - RETURNP(x + x); /* x is +Inf, +NaN or unsupported */ + RETURNF(-1 / x - 1); + RETURNF(x + x); /* x is +Inf, +NaN or unsupported */ } if (x > o_threshold) - RETURNP(huge * huge); + RETURNF(huge * huge); /* * expm1l() never underflows, but it must avoid * unrepresentable large negative exponents. We used a * much smaller threshold for large |x| above than in * expl() so as to handle not so large negative exponents * in the same way as large ones here. */ if (hx & 0x8000) /* x <= -64 */ - RETURN2P(tiny, -1); /* good for x < -65ln2 - eps */ + RETURNF(tiny - 1); /* good for x < -65ln2 - eps */ } ENTERI(); if (T1 < x && x < T2) { if (ix < BIAS - 74) { /* |x| < 0x1p-74 (includes pseudos) */ /* x (rounded) with inexact if x != 0: */ - RETURNPI(x == 0 ? x : + RETURNI(x == 0 ? x : (0x1p100 * x + fabsl(x)) * 0x1p-100); } x2 = x * x; x4 = x2 * x2; q = x4 * (x2 * (x4 * /* * XXX the number of terms is no longer good for * pairwise grouping of all except B3, and the * grouping is no longer from highest down. */ (x2 * B12 + (x * B11 + B10)) + (x2 * (x * B9 + B8) + (x * B7 + B6))) + (x * B5 + B4.e)) + x2 * x * B3.e; x_hi = (float)x; x_lo = x - x_hi; hx2_hi = x_hi * x_hi / 2; hx2_lo = x_lo * (x + x_hi) / 2; if (ix >= BIAS - 7) - RETURN2PI(hx2_hi + x_hi, hx2_lo + x_lo + q); + RETURNI((hx2_hi + x_hi) + (hx2_lo + x_lo + q)); else - RETURN2PI(x, hx2_lo + q + hx2_hi); + RETURNI(x + (hx2_lo + q + hx2_hi)); } /* Reduce x to (k*ln2 + endpoint[n2] + r1 + r2). */ fn = rnintl(x * INV_L); n = irint(fn); n2 = (unsigned)n % INTERVALS; k = n >> LOG2_INTERVALS; r1 = x - fn * L1; r2 = fn * -L2; r = r1 + r2; /* Prepare scale factor. */ v.e = 1; v.xbits.expsign = BIAS + k; twopk = v.e; /* * Evaluate lower terms of * expl(endpoint[n2] + r1 + r2) = tbl[n2] * expl(r1 + r2). */ z = r * r; q = r2 + z * (A2 + r * A3) + z * z * (A4 + r * A5) + z * z * z * A6; t = (long double)tbl[n2].lo + tbl[n2].hi; if (k == 0) { t = SUM2P(tbl[n2].hi - 1, tbl[n2].lo * (r1 + 1) + t * q + tbl[n2].hi * r1); RETURNI(t); } if (k == -1) { t = SUM2P(tbl[n2].hi - 2, tbl[n2].lo * (r1 + 1) + t * q + tbl[n2].hi * r1); RETURNI(t / 2); } if (k < -7) { t = SUM2P(tbl[n2].hi, tbl[n2].lo + t * (q + r1)); RETURNI(t * twopk - 1); } if (k > 2 * LDBL_MANT_DIG - 1) { t = SUM2P(tbl[n2].hi, tbl[n2].lo + t * (q + r1)); if (k == LDBL_MAX_EXP) RETURNI(t * 2 * 0x1p16383L - 1); RETURNI(t * twopk - 1); } v.xbits.expsign = BIAS - k; twomk = v.e; if (k > LDBL_MANT_DIG - 1) t = SUM2P(tbl[n2].hi, tbl[n2].lo - twomk + t * (q + r1)); else t = SUM2P(tbl[n2].hi - twomk, tbl[n2].lo + t * (q + r1)); RETURNI(t * twopk); } diff --git a/lib/msun/ld80/s_logl.c b/lib/msun/ld80/s_logl.c index dac5bfb9e25e..abe778249549 100644 --- a/lib/msun/ld80/s_logl.c +++ b/lib/msun/ld80/s_logl.c @@ -1,722 +1,718 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2007-2013 Bruce D. Evans * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /** * Implementation of the natural logarithm of x for Intel 80-bit format. * * First decompose x into its base 2 representation: * * log(x) = log(X * 2**k), where X is in [1, 2) * = log(X) + k * log(2). * * Let X = X_i + e, where X_i is the center of one of the intervals * [-1.0/256, 1.0/256), [1.0/256, 3.0/256), .... [2.0-1.0/256, 2.0+1.0/256) * and X is in this interval. Then * * log(X) = log(X_i + e) * = log(X_i * (1 + e / X_i)) * = log(X_i) + log(1 + e / X_i). * * The values log(X_i) are tabulated below. Let d = e / X_i and use * * log(1 + d) = p(d) * * where p(d) = d - 0.5*d*d + ... is a special minimax polynomial of * suitably high degree. * * To get sufficiently small roundoff errors, k * log(2), log(X_i), and * sometimes (if |k| is not large) the first term in p(d) must be evaluated * and added up in extra precision. Extra precision is not needed for the * rest of p(d). In the worst case when k = 0 and log(X_i) is 0, the final * error is controlled mainly by the error in the second term in p(d). The * error in this term itself is at most 0.5 ulps from the d*d operation in * it. The error in this term relative to the first term is thus at most * 0.5 * |-0.5| * |d| < 1.0/1024 ulps. We aim for an accumulated error of * at most twice this at the point of the final rounding step. Thus the * final error should be at most 0.5 + 1.0/512 = 0.5020 ulps. Exhaustive * testing of a float variant of this function showed a maximum final error * of 0.5008 ulps. Non-exhaustive testing of a double variant of this * function showed a maximum final error of 0.5078 ulps (near 1+1.0/256). * * We made the maximum of |d| (and thus the total relative error and the * degree of p(d)) small by using a large number of intervals. Using * centers of intervals instead of endpoints reduces this maximum by a * factor of 2 for a given number of intervals. p(d) is special only * in beginning with the Taylor coefficients 0 + 1*d, which tends to happen * naturally. The most accurate minimax polynomial of a given degree might * be different, but then we wouldn't want it since we would have to do * extra work to avoid roundoff error (especially for P0*d instead of d). */ #ifdef DEBUG #include #include #endif #ifdef __i386__ #include #endif #include "fpmath.h" #include "math.h" #define i386_SSE_GOOD #ifndef NO_STRUCT_RETURN #define STRUCT_RETURN #endif #include "math_private.h" #if !defined(NO_UTAB) && !defined(NO_UTABL) #define USE_UTAB #endif /* * Domain [-0.005280, 0.004838], range ~[-5.1736e-22, 5.1738e-22]: * |log(1 + d)/d - p(d)| < 2**-70.7 */ static const double P2 = -0.5, P3 = 3.3333333333333359e-1, /* 0x1555555555555a.0p-54 */ P4 = -2.5000000000004424e-1, /* -0x1000000000031d.0p-54 */ P5 = 1.9999999992970016e-1, /* 0x1999999972f3c7.0p-55 */ P6 = -1.6666666072191585e-1, /* -0x15555548912c09.0p-55 */ P7 = 1.4286227413310518e-1, /* 0x12494f9d9def91.0p-55 */ P8 = -1.2518388626763144e-1; /* -0x1006068cc0b97c.0p-55 */ static volatile const double zero = 0; #define INTERVALS 128 #define LOG2_INTERVALS 7 #define TSIZE (INTERVALS + 1) #define G(i) (T[(i)].G) #define F_hi(i) (T[(i)].F_hi) #define F_lo(i) (T[(i)].F_lo) #define ln2_hi F_hi(TSIZE - 1) #define ln2_lo F_lo(TSIZE - 1) #define E(i) (U[(i)].E) #define H(i) (U[(i)].H) static const struct { float G; /* 1/(1 + i/128) rounded to 8/9 bits */ float F_hi; /* log(1 / G_i) rounded (see below) */ double F_lo; /* next 53 bits for log(1 / G_i) */ } T[TSIZE] = { /* * ln2_hi and each F_hi(i) are rounded to a number of bits that * makes F_hi(i) + dk*ln2_hi exact for all i and all dk. * * The last entry (for X just below 2) is used to define ln2_hi * and ln2_lo, to ensure that F_hi(i) and F_lo(i) cancel exactly * with dk*ln2_hi and dk*ln2_lo, respectively, when dk = -1. * This is needed for accuracy when x is just below 1. (To avoid * special cases, such x are "reduced" strangely to X just below * 2 and dk = -1, and then the exact cancellation is needed * because any the error from any non-exactness would be too * large). * * We want to share this table between double precision and ld80, * so the relevant range of dk is the larger one of ld80 * ([-16445, 16383]) and the relevant exactness requirement is * the stricter one of double precision. The maximum number of * bits in F_hi(i) that works is very dependent on i but has * a minimum of 33. We only need about 12 bits in F_hi(i) for * it to provide enough extra precision in double precision (11 * more than that are required for ld80). * * We round F_hi(i) to 24 bits so that it can have type float, * mainly to minimize the size of the table. Using all 24 bits * in a float for it automatically satisfies the above constraints. */ { 0x800000.0p-23, 0, 0 }, { 0xfe0000.0p-24, 0x8080ac.0p-30, -0x14ee431dae6675.0p-84 }, { 0xfc0000.0p-24, 0x8102b3.0p-29, -0x1db29ee2d83718.0p-84 }, { 0xfa0000.0p-24, 0xc24929.0p-29, 0x1191957d173698.0p-83 }, { 0xf80000.0p-24, 0x820aec.0p-28, 0x13ce8888e02e79.0p-82 }, { 0xf60000.0p-24, 0xa33577.0p-28, -0x17a4382ce6eb7c.0p-82 }, { 0xf48000.0p-24, 0xbc42cb.0p-28, -0x172a21161a1076.0p-83 }, { 0xf30000.0p-24, 0xd57797.0p-28, -0x1e09de07cb9589.0p-82 }, { 0xf10000.0p-24, 0xf7518e.0p-28, 0x1ae1eec1b036c5.0p-91 }, { 0xef0000.0p-24, 0x8cb9df.0p-27, -0x1d7355325d560e.0p-81 }, { 0xed8000.0p-24, 0x999ec0.0p-27, -0x1f9f02d256d503.0p-82 }, { 0xec0000.0p-24, 0xa6988b.0p-27, -0x16fc0a9d12c17a.0p-83 }, { 0xea0000.0p-24, 0xb80698.0p-27, 0x15d581c1e8da9a.0p-81 }, { 0xe80000.0p-24, 0xc99af3.0p-27, -0x1535b3ba8f150b.0p-83 }, { 0xe70000.0p-24, 0xd273b2.0p-27, 0x163786f5251af0.0p-85 }, { 0xe50000.0p-24, 0xe442c0.0p-27, 0x1bc4b2368e32d5.0p-84 }, { 0xe38000.0p-24, 0xf1b83f.0p-27, 0x1c6090f684e676.0p-81 }, { 0xe20000.0p-24, 0xff448a.0p-27, -0x1890aa69ac9f42.0p-82 }, { 0xe08000.0p-24, 0x8673f6.0p-26, 0x1b9985194b6b00.0p-80 }, { 0xdf0000.0p-24, 0x8d515c.0p-26, -0x1dc08d61c6ef1e.0p-83 }, { 0xdd8000.0p-24, 0x943a9e.0p-26, -0x1f72a2dac729b4.0p-82 }, { 0xdc0000.0p-24, 0x9b2fe6.0p-26, -0x1fd4dfd3a0afb9.0p-80 }, { 0xda8000.0p-24, 0xa2315d.0p-26, -0x11b26121629c47.0p-82 }, { 0xd90000.0p-24, 0xa93f2f.0p-26, 0x1286d633e8e569.0p-81 }, { 0xd78000.0p-24, 0xb05988.0p-26, 0x16128eba936770.0p-84 }, { 0xd60000.0p-24, 0xb78094.0p-26, 0x16ead577390d32.0p-80 }, { 0xd50000.0p-24, 0xbc4c6c.0p-26, 0x151131ccf7c7b7.0p-81 }, { 0xd38000.0p-24, 0xc3890a.0p-26, -0x115e2cd714bd06.0p-80 }, { 0xd20000.0p-24, 0xcad2d7.0p-26, -0x1847f406ebd3b0.0p-82 }, { 0xd10000.0p-24, 0xcfb620.0p-26, 0x1c2259904d6866.0p-81 }, { 0xcf8000.0p-24, 0xd71653.0p-26, 0x1ece57a8d5ae55.0p-80 }, { 0xce0000.0p-24, 0xde843a.0p-26, -0x1f109d4bc45954.0p-81 }, { 0xcd0000.0p-24, 0xe37fde.0p-26, 0x1bc03dc271a74d.0p-81 }, { 0xcb8000.0p-24, 0xeb050c.0p-26, -0x1bf2badc0df842.0p-85 }, { 0xca0000.0p-24, 0xf29878.0p-26, -0x18efededd89fbe.0p-87 }, { 0xc90000.0p-24, 0xf7ad6f.0p-26, 0x1373ff977baa69.0p-81 }, { 0xc80000.0p-24, 0xfcc8e3.0p-26, 0x196766f2fb3283.0p-80 }, { 0xc68000.0p-24, 0x823f30.0p-25, 0x19bd076f7c434e.0p-79 }, { 0xc58000.0p-24, 0x84d52c.0p-25, -0x1a327257af0f46.0p-79 }, { 0xc40000.0p-24, 0x88bc74.0p-25, 0x113f23def19c5a.0p-81 }, { 0xc30000.0p-24, 0x8b5ae6.0p-25, 0x1759f6e6b37de9.0p-79 }, { 0xc20000.0p-24, 0x8dfccb.0p-25, 0x1ad35ca6ed5148.0p-81 }, { 0xc10000.0p-24, 0x90a22b.0p-25, 0x1a1d71a87deba4.0p-79 }, { 0xbf8000.0p-24, 0x94a0d8.0p-25, -0x139e5210c2b731.0p-80 }, { 0xbe8000.0p-24, 0x974f16.0p-25, -0x18f6ebcff3ed73.0p-81 }, { 0xbd8000.0p-24, 0x9a00f1.0p-25, -0x1aa268be39aab7.0p-79 }, { 0xbc8000.0p-24, 0x9cb672.0p-25, -0x14c8815839c566.0p-79 }, { 0xbb0000.0p-24, 0xa0cda1.0p-25, 0x1eaf46390dbb24.0p-81 }, { 0xba0000.0p-24, 0xa38c6e.0p-25, 0x138e20d831f698.0p-81 }, { 0xb90000.0p-24, 0xa64f05.0p-25, -0x1e8d3c41123616.0p-82 }, { 0xb80000.0p-24, 0xa91570.0p-25, 0x1ce28f5f3840b2.0p-80 }, { 0xb70000.0p-24, 0xabdfbb.0p-25, -0x186e5c0a424234.0p-79 }, { 0xb60000.0p-24, 0xaeadef.0p-25, -0x14d41a0b2a08a4.0p-83 }, { 0xb50000.0p-24, 0xb18018.0p-25, 0x16755892770634.0p-79 }, { 0xb40000.0p-24, 0xb45642.0p-25, -0x16395ebe59b152.0p-82 }, { 0xb30000.0p-24, 0xb73077.0p-25, 0x1abc65c8595f09.0p-80 }, { 0xb20000.0p-24, 0xba0ec4.0p-25, -0x1273089d3dad89.0p-79 }, { 0xb10000.0p-24, 0xbcf133.0p-25, 0x10f9f67b1f4bbf.0p-79 }, { 0xb00000.0p-24, 0xbfd7d2.0p-25, -0x109fab90486409.0p-80 }, { 0xaf0000.0p-24, 0xc2c2ac.0p-25, -0x1124680aa43333.0p-79 }, { 0xae8000.0p-24, 0xc439b3.0p-25, -0x1f360cc4710fc0.0p-80 }, { 0xad8000.0p-24, 0xc72afd.0p-25, -0x132d91f21d89c9.0p-80 }, { 0xac8000.0p-24, 0xca20a2.0p-25, -0x16bf9b4d1f8da8.0p-79 }, { 0xab8000.0p-24, 0xcd1aae.0p-25, 0x19deb5ce6a6a87.0p-81 }, { 0xaa8000.0p-24, 0xd0192f.0p-25, 0x1a29fb48f7d3cb.0p-79 }, { 0xaa0000.0p-24, 0xd19a20.0p-25, 0x1127d3c6457f9d.0p-81 }, { 0xa90000.0p-24, 0xd49f6a.0p-25, -0x1ba930e486a0ac.0p-81 }, { 0xa80000.0p-24, 0xd7a94b.0p-25, -0x1b6e645f31549e.0p-79 }, { 0xa70000.0p-24, 0xdab7d0.0p-25, 0x1118a425494b61.0p-80 }, { 0xa68000.0p-24, 0xdc40d5.0p-25, 0x1966f24d29d3a3.0p-80 }, { 0xa58000.0p-24, 0xdf566d.0p-25, -0x1d8e52eb2248f1.0p-82 }, { 0xa48000.0p-24, 0xe270ce.0p-25, -0x1ee370f96e6b68.0p-80 }, { 0xa40000.0p-24, 0xe3ffce.0p-25, 0x1d155324911f57.0p-80 }, { 0xa30000.0p-24, 0xe72179.0p-25, -0x1fe6e2f2f867d9.0p-80 }, { 0xa20000.0p-24, 0xea4812.0p-25, 0x1b7be9add7f4d4.0p-80 }, { 0xa18000.0p-24, 0xebdd3d.0p-25, 0x1b3cfb3f7511dd.0p-79 }, { 0xa08000.0p-24, 0xef0b5b.0p-25, -0x1220de1f730190.0p-79 }, { 0xa00000.0p-24, 0xf0a451.0p-25, -0x176364c9ac81cd.0p-80 }, { 0x9f0000.0p-24, 0xf3da16.0p-25, 0x1eed6b9aafac8d.0p-81 }, { 0x9e8000.0p-24, 0xf576e9.0p-25, 0x1d593218675af2.0p-79 }, { 0x9d8000.0p-24, 0xf8b47c.0p-25, -0x13e8eb7da053e0.0p-84 }, { 0x9d0000.0p-24, 0xfa553f.0p-25, 0x1c063259bcade0.0p-79 }, { 0x9c0000.0p-24, 0xfd9ac5.0p-25, 0x1ef491085fa3c1.0p-79 }, { 0x9b8000.0p-24, 0xff3f8c.0p-25, 0x1d607a7c2b8c53.0p-79 }, { 0x9a8000.0p-24, 0x814697.0p-24, -0x12ad3817004f3f.0p-78 }, { 0x9a0000.0p-24, 0x821b06.0p-24, -0x189fc53117f9e5.0p-81 }, { 0x990000.0p-24, 0x83c5f8.0p-24, 0x14cf15a048907b.0p-79 }, { 0x988000.0p-24, 0x849c7d.0p-24, 0x1cbb1d35fb8287.0p-78 }, { 0x978000.0p-24, 0x864ba6.0p-24, 0x1128639b814f9c.0p-78 }, { 0x970000.0p-24, 0x87244c.0p-24, 0x184733853300f0.0p-79 }, { 0x968000.0p-24, 0x87fdaa.0p-24, 0x109d23aef77dd6.0p-80 }, { 0x958000.0p-24, 0x89b293.0p-24, -0x1a81ef367a59de.0p-78 }, { 0x950000.0p-24, 0x8a8e20.0p-24, -0x121ad3dbb2f452.0p-78 }, { 0x948000.0p-24, 0x8b6a6a.0p-24, -0x1cfb981628af72.0p-79 }, { 0x938000.0p-24, 0x8d253a.0p-24, -0x1d21730ea76cfe.0p-79 }, { 0x930000.0p-24, 0x8e03c2.0p-24, 0x135cc00e566f77.0p-78 }, { 0x928000.0p-24, 0x8ee30d.0p-24, -0x10fcb5df257a26.0p-80 }, { 0x918000.0p-24, 0x90a3ee.0p-24, -0x16e171b15433d7.0p-79 }, { 0x910000.0p-24, 0x918587.0p-24, -0x1d050da07f3237.0p-79 }, { 0x908000.0p-24, 0x9267e7.0p-24, 0x1be03669a5268d.0p-79 }, { 0x8f8000.0p-24, 0x942f04.0p-24, 0x10b28e0e26c337.0p-79 }, { 0x8f0000.0p-24, 0x9513c3.0p-24, 0x1a1d820da57cf3.0p-78 }, { 0x8e8000.0p-24, 0x95f950.0p-24, -0x19ef8f13ae3cf1.0p-79 }, { 0x8e0000.0p-24, 0x96dfab.0p-24, -0x109e417a6e507c.0p-78 }, { 0x8d0000.0p-24, 0x98aed2.0p-24, 0x10d01a2c5b0e98.0p-79 }, { 0x8c8000.0p-24, 0x9997a2.0p-24, -0x1d6a50d4b61ea7.0p-78 }, { 0x8c0000.0p-24, 0x9a8145.0p-24, 0x1b3b190b83f952.0p-78 }, { 0x8b8000.0p-24, 0x9b6bbf.0p-24, 0x13a69fad7e7abe.0p-78 }, { 0x8b0000.0p-24, 0x9c5711.0p-24, -0x11cd12316f576b.0p-78 }, { 0x8a8000.0p-24, 0x9d433b.0p-24, 0x1c95c444b807a2.0p-79 }, { 0x898000.0p-24, 0x9f1e22.0p-24, -0x1b9c224ea698c3.0p-79 }, { 0x890000.0p-24, 0xa00ce1.0p-24, 0x125ca93186cf0f.0p-81 }, { 0x888000.0p-24, 0xa0fc80.0p-24, -0x1ee38a7bc228b3.0p-79 }, { 0x880000.0p-24, 0xa1ed00.0p-24, -0x1a0db876613d20.0p-78 }, { 0x878000.0p-24, 0xa2de62.0p-24, 0x193224e8516c01.0p-79 }, { 0x870000.0p-24, 0xa3d0a9.0p-24, 0x1fa28b4d2541ad.0p-79 }, { 0x868000.0p-24, 0xa4c3d6.0p-24, 0x1c1b5760fb4572.0p-78 }, { 0x858000.0p-24, 0xa6acea.0p-24, 0x1fed5d0f65949c.0p-80 }, { 0x850000.0p-24, 0xa7a2d4.0p-24, 0x1ad270c9d74936.0p-80 }, { 0x848000.0p-24, 0xa899ab.0p-24, 0x199ff15ce53266.0p-79 }, { 0x840000.0p-24, 0xa99171.0p-24, 0x1a19e15ccc45d2.0p-79 }, { 0x838000.0p-24, 0xaa8a28.0p-24, -0x121a14ec532b36.0p-80 }, { 0x830000.0p-24, 0xab83d1.0p-24, 0x1aee319980bff3.0p-79 }, { 0x828000.0p-24, 0xac7e6f.0p-24, -0x18ffd9e3900346.0p-80 }, { 0x820000.0p-24, 0xad7a03.0p-24, -0x1e4db102ce29f8.0p-80 }, { 0x818000.0p-24, 0xae768f.0p-24, 0x17c35c55a04a83.0p-81 }, { 0x810000.0p-24, 0xaf7415.0p-24, 0x1448324047019b.0p-78 }, { 0x808000.0p-24, 0xb07298.0p-24, -0x1750ee3915a198.0p-78 }, { 0x800000.0p-24, 0xb17218.0p-24, -0x105c610ca86c39.0p-81 }, }; #ifdef USE_UTAB static const struct { float H; /* 1 + i/INTERVALS (exact) */ float E; /* H(i) * G(i) - 1 (exact) */ } U[TSIZE] = { { 0x800000.0p-23, 0 }, { 0x810000.0p-23, -0x800000.0p-37 }, { 0x820000.0p-23, -0x800000.0p-35 }, { 0x830000.0p-23, -0x900000.0p-34 }, { 0x840000.0p-23, -0x800000.0p-33 }, { 0x850000.0p-23, -0xc80000.0p-33 }, { 0x860000.0p-23, -0xa00000.0p-36 }, { 0x870000.0p-23, 0x940000.0p-33 }, { 0x880000.0p-23, 0x800000.0p-35 }, { 0x890000.0p-23, -0xc80000.0p-34 }, { 0x8a0000.0p-23, 0xe00000.0p-36 }, { 0x8b0000.0p-23, 0x900000.0p-33 }, { 0x8c0000.0p-23, -0x800000.0p-35 }, { 0x8d0000.0p-23, -0xe00000.0p-33 }, { 0x8e0000.0p-23, 0x880000.0p-33 }, { 0x8f0000.0p-23, -0xa80000.0p-34 }, { 0x900000.0p-23, -0x800000.0p-35 }, { 0x910000.0p-23, 0x800000.0p-37 }, { 0x920000.0p-23, 0x900000.0p-35 }, { 0x930000.0p-23, 0xd00000.0p-35 }, { 0x940000.0p-23, 0xe00000.0p-35 }, { 0x950000.0p-23, 0xc00000.0p-35 }, { 0x960000.0p-23, 0xe00000.0p-36 }, { 0x970000.0p-23, -0x800000.0p-38 }, { 0x980000.0p-23, -0xc00000.0p-35 }, { 0x990000.0p-23, -0xd00000.0p-34 }, { 0x9a0000.0p-23, 0x880000.0p-33 }, { 0x9b0000.0p-23, 0xe80000.0p-35 }, { 0x9c0000.0p-23, -0x800000.0p-35 }, { 0x9d0000.0p-23, 0xb40000.0p-33 }, { 0x9e0000.0p-23, 0x880000.0p-34 }, { 0x9f0000.0p-23, -0xe00000.0p-35 }, { 0xa00000.0p-23, 0x800000.0p-33 }, { 0xa10000.0p-23, -0x900000.0p-36 }, { 0xa20000.0p-23, -0xb00000.0p-33 }, { 0xa30000.0p-23, -0xa00000.0p-36 }, { 0xa40000.0p-23, 0x800000.0p-33 }, { 0xa50000.0p-23, -0xf80000.0p-35 }, { 0xa60000.0p-23, 0x880000.0p-34 }, { 0xa70000.0p-23, -0x900000.0p-33 }, { 0xa80000.0p-23, -0x800000.0p-35 }, { 0xa90000.0p-23, 0x900000.0p-34 }, { 0xaa0000.0p-23, 0xa80000.0p-33 }, { 0xab0000.0p-23, -0xac0000.0p-34 }, { 0xac0000.0p-23, -0x800000.0p-37 }, { 0xad0000.0p-23, 0xf80000.0p-35 }, { 0xae0000.0p-23, 0xf80000.0p-34 }, { 0xaf0000.0p-23, -0xac0000.0p-33 }, { 0xb00000.0p-23, -0x800000.0p-33 }, { 0xb10000.0p-23, -0xb80000.0p-34 }, { 0xb20000.0p-23, -0x800000.0p-34 }, { 0xb30000.0p-23, -0xb00000.0p-35 }, { 0xb40000.0p-23, -0x800000.0p-35 }, { 0xb50000.0p-23, -0xe00000.0p-36 }, { 0xb60000.0p-23, -0x800000.0p-35 }, { 0xb70000.0p-23, -0xb00000.0p-35 }, { 0xb80000.0p-23, -0x800000.0p-34 }, { 0xb90000.0p-23, -0xb80000.0p-34 }, { 0xba0000.0p-23, -0x800000.0p-33 }, { 0xbb0000.0p-23, -0xac0000.0p-33 }, { 0xbc0000.0p-23, 0x980000.0p-33 }, { 0xbd0000.0p-23, 0xbc0000.0p-34 }, { 0xbe0000.0p-23, 0xe00000.0p-36 }, { 0xbf0000.0p-23, -0xb80000.0p-35 }, { 0xc00000.0p-23, -0x800000.0p-33 }, { 0xc10000.0p-23, 0xa80000.0p-33 }, { 0xc20000.0p-23, 0x900000.0p-34 }, { 0xc30000.0p-23, -0x800000.0p-35 }, { 0xc40000.0p-23, -0x900000.0p-33 }, { 0xc50000.0p-23, 0x820000.0p-33 }, { 0xc60000.0p-23, 0x800000.0p-38 }, { 0xc70000.0p-23, -0x820000.0p-33 }, { 0xc80000.0p-23, 0x800000.0p-33 }, { 0xc90000.0p-23, -0xa00000.0p-36 }, { 0xca0000.0p-23, -0xb00000.0p-33 }, { 0xcb0000.0p-23, 0x840000.0p-34 }, { 0xcc0000.0p-23, -0xd00000.0p-34 }, { 0xcd0000.0p-23, 0x800000.0p-33 }, { 0xce0000.0p-23, -0xe00000.0p-35 }, { 0xcf0000.0p-23, 0xa60000.0p-33 }, { 0xd00000.0p-23, -0x800000.0p-35 }, { 0xd10000.0p-23, 0xb40000.0p-33 }, { 0xd20000.0p-23, -0x800000.0p-35 }, { 0xd30000.0p-23, 0xaa0000.0p-33 }, { 0xd40000.0p-23, -0xe00000.0p-35 }, { 0xd50000.0p-23, 0x880000.0p-33 }, { 0xd60000.0p-23, -0xd00000.0p-34 }, { 0xd70000.0p-23, 0x9c0000.0p-34 }, { 0xd80000.0p-23, -0xb00000.0p-33 }, { 0xd90000.0p-23, -0x800000.0p-38 }, { 0xda0000.0p-23, 0xa40000.0p-33 }, { 0xdb0000.0p-23, -0xdc0000.0p-34 }, { 0xdc0000.0p-23, 0xc00000.0p-35 }, { 0xdd0000.0p-23, 0xca0000.0p-33 }, { 0xde0000.0p-23, -0xb80000.0p-34 }, { 0xdf0000.0p-23, 0xd00000.0p-35 }, { 0xe00000.0p-23, 0xc00000.0p-33 }, { 0xe10000.0p-23, -0xf40000.0p-34 }, { 0xe20000.0p-23, 0x800000.0p-37 }, { 0xe30000.0p-23, 0x860000.0p-33 }, { 0xe40000.0p-23, -0xc80000.0p-33 }, { 0xe50000.0p-23, -0xa80000.0p-34 }, { 0xe60000.0p-23, 0xe00000.0p-36 }, { 0xe70000.0p-23, 0x880000.0p-33 }, { 0xe80000.0p-23, -0xe00000.0p-33 }, { 0xe90000.0p-23, -0xfc0000.0p-34 }, { 0xea0000.0p-23, -0x800000.0p-35 }, { 0xeb0000.0p-23, 0xe80000.0p-35 }, { 0xec0000.0p-23, 0x900000.0p-33 }, { 0xed0000.0p-23, 0xe20000.0p-33 }, { 0xee0000.0p-23, -0xac0000.0p-33 }, { 0xef0000.0p-23, -0xc80000.0p-34 }, { 0xf00000.0p-23, -0x800000.0p-35 }, { 0xf10000.0p-23, 0x800000.0p-35 }, { 0xf20000.0p-23, 0xb80000.0p-34 }, { 0xf30000.0p-23, 0x940000.0p-33 }, { 0xf40000.0p-23, 0xc80000.0p-33 }, { 0xf50000.0p-23, -0xf20000.0p-33 }, { 0xf60000.0p-23, -0xc80000.0p-33 }, { 0xf70000.0p-23, -0xa20000.0p-33 }, { 0xf80000.0p-23, -0x800000.0p-33 }, { 0xf90000.0p-23, -0xc40000.0p-34 }, { 0xfa0000.0p-23, -0x900000.0p-34 }, { 0xfb0000.0p-23, -0xc80000.0p-35 }, { 0xfc0000.0p-23, -0x800000.0p-35 }, { 0xfd0000.0p-23, -0x900000.0p-36 }, { 0xfe0000.0p-23, -0x800000.0p-37 }, { 0xff0000.0p-23, -0x800000.0p-39 }, { 0x800000.0p-22, 0 }, }; #endif /* USE_UTAB */ #ifdef STRUCT_RETURN #define RETURN1(rp, v) do { \ (rp)->hi = (v); \ (rp)->lo_set = 0; \ return; \ } while (0) #define RETURN2(rp, h, l) do { \ (rp)->hi = (h); \ (rp)->lo = (l); \ (rp)->lo_set = 1; \ return; \ } while (0) struct ld { long double hi; long double lo; int lo_set; }; #else #define RETURN1(rp, v) RETURNF(v) #define RETURN2(rp, h, l) RETURNI((h) + (l)) #endif #ifdef STRUCT_RETURN static inline __always_inline void k_logl(long double x, struct ld *rp) #else long double logl(long double x) #endif { long double d, dk, val_hi, val_lo, z; uint64_t ix, lx; int i, k; uint16_t hx; EXTRACT_LDBL80_WORDS(hx, lx, x); k = -16383; #if 0 /* Hard to do efficiently. Don't do it until we support all modes. */ if (x == 1) RETURN1(rp, 0); /* log(1) = +0 in all rounding modes */ #endif if (hx == 0 || hx >= 0x8000) { /* zero, negative or subnormal? */ if (((hx & 0x7fff) | lx) == 0) RETURN1(rp, -1 / zero); /* log(+-0) = -Inf */ if (hx != 0) /* log(neg or [pseudo-]NaN) = qNaN: */ RETURN1(rp, (x - x) / zero); x *= 0x1.0p65; /* subnormal; scale up x */ /* including pseudo-subnormals */ EXTRACT_LDBL80_WORDS(hx, lx, x); k = -16383 - 65; } else if (hx >= 0x7fff || (lx & 0x8000000000000000ULL) == 0) RETURN1(rp, x + x); /* log(Inf or NaN) = Inf or qNaN */ /* log(pseudo-Inf) = qNaN */ /* log(pseudo-NaN) = qNaN */ /* log(unnormal) = qNaN */ #ifndef STRUCT_RETURN ENTERI(); #endif k += hx; ix = lx & 0x7fffffffffffffffULL; dk = k; /* Scale x to be in [1, 2). */ SET_LDBL_EXPSIGN(x, 0x3fff); /* 0 <= i <= INTERVALS: */ #define L2I (64 - LOG2_INTERVALS) i = (ix + (1LL << (L2I - 2))) >> (L2I - 1); /* * -0.005280 < d < 0.004838. In particular, the infinite- * precision |d| is <= 2**-7. Rounding of G(i) to 8 bits * ensures that d is representable without extra precision for * this bound on |d| (since when this calculation is expressed * as x*G(i)-1, the multiplication needs as many extra bits as * G(i) has and the subtraction cancels 8 bits). But for * most i (107 cases out of 129), the infinite-precision |d| * is <= 2**-8. G(i) is rounded to 9 bits for such i to give * better accuracy (this works by improving the bound on |d|, * which in turn allows rounding to 9 bits in more cases). * This is only important when the original x is near 1 -- it * lets us avoid using a special method to give the desired * accuracy for such x. */ if (0) d = x * G(i) - 1; else { #ifdef USE_UTAB d = (x - H(i)) * G(i) + E(i); #else long double x_hi, x_lo; float fx_hi; /* * Split x into x_hi + x_lo to calculate x*G(i)-1 exactly. * G(i) has at most 9 bits, so the splitting point is not * critical. */ SET_FLOAT_WORD(fx_hi, (lx >> 40) | 0x3f800000); x_hi = fx_hi; x_lo = x - x_hi; d = x_hi * G(i) - 1 + x_lo * G(i); #endif } /* * Our algorithm depends on exact cancellation of F_lo(i) and * F_hi(i) with dk*ln_2_lo and dk*ln2_hi when k is -1 and i is * at the end of the table. This and other technical complications * make it difficult to avoid the double scaling in (dk*ln2) * * log(base) for base != e without losing more accuracy and/or * efficiency than is gained. */ z = d * d; val_lo = z * d * z * (z * (d * P8 + P7) + (d * P6 + P5)) + (F_lo(i) + dk * ln2_lo + z * d * (d * P4 + P3)) + z * P2; val_hi = d; #ifdef DEBUG if (fetestexcept(FE_UNDERFLOW)) breakpoint(); #endif _3sumF(val_hi, val_lo, F_hi(i) + dk * ln2_hi); RETURN2(rp, val_hi, val_lo); } long double log1pl(long double x) { long double d, d_hi, d_lo, dk, f_lo, val_hi, val_lo, z; long double f_hi, twopminusk; uint64_t ix, lx; int i, k; int16_t ax, hx; - DOPRINT_START(&x); EXTRACT_LDBL80_WORDS(hx, lx, x); if (hx < 0x3fff) { /* x < 1, or x neg NaN */ ax = hx & 0x7fff; if (ax >= 0x3fff) { /* x <= -1, or x neg NaN */ if (ax == 0x3fff && lx == 0x8000000000000000ULL) - RETURNP(-1 / zero); /* log1p(-1) = -Inf */ + RETURNF(-1 / zero); /* log1p(-1) = -Inf */ /* log1p(x < 1, or x [pseudo-]NaN) = qNaN: */ - RETURNP((x - x) / (x - x)); + RETURNF((x - x) / (x - x)); } if (ax <= 0x3fbe) { /* |x| < 2**-64 */ if ((int)x == 0) - RETURNP(x); /* x with inexact if x != 0 */ + RETURNF(x); /* x with inexact if x != 0 */ } f_hi = 1; f_lo = x; } else if (hx >= 0x7fff) { /* x +Inf or non-neg NaN */ - RETURNP(x + x); /* log1p(Inf or NaN) = Inf or qNaN */ + RETURNF(x + x); /* log1p(Inf or NaN) = Inf or qNaN */ /* log1p(pseudo-Inf) = qNaN */ /* log1p(pseudo-NaN) = qNaN */ /* log1p(unnormal) = qNaN */ } else if (hx < 0x407f) { /* 1 <= x < 2**128 */ f_hi = x; f_lo = 1; } else { /* 2**128 <= x < +Inf */ f_hi = x; f_lo = 0; /* avoid underflow of the P5 term */ } ENTERI(); x = f_hi + f_lo; f_lo = (f_hi - x) + f_lo; EXTRACT_LDBL80_WORDS(hx, lx, x); k = -16383; k += hx; ix = lx & 0x7fffffffffffffffULL; dk = k; SET_LDBL_EXPSIGN(x, 0x3fff); twopminusk = 1; SET_LDBL_EXPSIGN(twopminusk, 0x7ffe - (hx & 0x7fff)); f_lo *= twopminusk; i = (ix + (1LL << (L2I - 2))) >> (L2I - 1); /* * x*G(i)-1 (with a reduced x) can be represented exactly, as * above, but now we need to evaluate the polynomial on d = * (x+f_lo)*G(i)-1 and extra precision is needed for that. * Since x+x_lo is a hi+lo decomposition and subtracting 1 * doesn't lose too many bits, an inexact calculation for * f_lo*G(i) is good enough. */ if (0) d_hi = x * G(i) - 1; else { #ifdef USE_UTAB d_hi = (x - H(i)) * G(i) + E(i); #else long double x_hi, x_lo; float fx_hi; SET_FLOAT_WORD(fx_hi, (lx >> 40) | 0x3f800000); x_hi = fx_hi; x_lo = x - x_hi; d_hi = x_hi * G(i) - 1 + x_lo * G(i); #endif } d_lo = f_lo * G(i); /* * This is _2sumF(d_hi, d_lo) inlined. The condition * (d_hi == 0 || |d_hi| >= |d_lo|) for using _2sumF() is not * always satisifed, so it is not clear that this works, but * it works in practice. It works even if it gives a wrong * normalized d_lo, since |d_lo| > |d_hi| implies that i is * nonzero and d is tiny, so the F(i) term dominates d_lo. * In float precision: * (By exhaustive testing, the worst case is d_hi = 0x1.bp-25. * And if d is only a little tinier than that, we would have * another underflow problem for the P3 term; this is also ruled * out by exhaustive testing.) */ d = d_hi + d_lo; d_lo = d_hi - d + d_lo; d_hi = d; z = d * d; val_lo = z * d * z * (z * (d * P8 + P7) + (d * P6 + P5)) + (F_lo(i) + dk * ln2_lo + d_lo + z * d * (d * P4 + P3)) + z * P2; val_hi = d_hi; #ifdef DEBUG if (fetestexcept(FE_UNDERFLOW)) breakpoint(); #endif _3sumF(val_hi, val_lo, F_hi(i) + dk * ln2_hi); - RETURN2PI(val_hi, val_lo); + RETURNI(val_hi + val_lo); } #ifdef STRUCT_RETURN long double logl(long double x) { struct ld r; ENTERI(); - DOPRINT_START(&x); k_logl(x, &r); RETURNSPI(&r); } /* Use macros since GCC < 8 rejects static const expressions in initializers. */ #define invln10_hi 4.3429448190317999e-1 /* 0x1bcb7b1526e000.0p-54 */ #define invln10_lo 7.1842412889749798e-14 /* 0x1438ca9aadd558.0p-96 */ #define invln2_hi 1.4426950408887933e0 /* 0x171547652b8000.0p-52 */ #define invln2_lo 1.7010652264631490e-13 /* 0x17f0bbbe87fed0.0p-95 */ /* Let the compiler pre-calculate this sum to avoid FE_INEXACT at run time. */ static const double invln10_lo_plus_hi = invln10_lo + invln10_hi; static const double invln2_lo_plus_hi = invln2_lo + invln2_hi; long double log10l(long double x) { struct ld r; long double hi, lo; ENTERI(); - DOPRINT_START(&x); k_logl(x, &r); if (!r.lo_set) - RETURNPI(r.hi); + RETURNI(r.hi); _2sumF(r.hi, r.lo); hi = (float)r.hi; lo = r.lo + (r.hi - hi); - RETURN2PI(invln10_hi * hi, - invln10_lo_plus_hi * lo + invln10_lo * hi); + RETURNI(invln10_hi * hi + + (invln10_lo_plus_hi * lo + invln10_lo * hi)); } long double log2l(long double x) { struct ld r; long double hi, lo; ENTERI(); - DOPRINT_START(&x); k_logl(x, &r); if (!r.lo_set) - RETURNPI(r.hi); + RETURNI(r.hi); _2sumF(r.hi, r.lo); hi = (float)r.hi; lo = r.lo + (r.hi - hi); - RETURN2PI(invln2_hi * hi, - invln2_lo_plus_hi * lo + invln2_lo * hi); + RETURNI(invln2_hi * hi + + (invln2_lo_plus_hi * lo + invln2_lo * hi)); } #endif /* STRUCT_RETURN */ diff --git a/lib/msun/src/math_private.h b/lib/msun/src/math_private.h index ec2da21f955c..a853ad4f9b4c 100644 --- a/lib/msun/src/math_private.h +++ b/lib/msun/src/math_private.h @@ -1,924 +1,819 @@ /* * ==================================================== * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. * * Developed at SunPro, a Sun Microsystems, Inc. business. * Permission to use, copy, modify, and distribute this * software is freely granted, provided that this notice * is preserved. * ==================================================== */ /* * from: @(#)fdlibm.h 5.1 93/09/24 * $FreeBSD$ */ #ifndef _MATH_PRIVATE_H_ #define _MATH_PRIVATE_H_ #include #include /* * The original fdlibm code used statements like: * n0 = ((*(int*)&one)>>29)^1; * index of high word * * ix0 = *(n0+(int*)&x); * high word of x * * ix1 = *((1-n0)+(int*)&x); * low word of x * * to dig two 32 bit words out of the 64 bit IEEE floating point * value. That is non-ANSI, and, moreover, the gcc instruction * scheduler gets it wrong. We instead use the following macros. * Unlike the original code, we determine the endianness at compile * time, not at run time; I don't see much benefit to selecting * endianness at run time. */ /* * A union which permits us to convert between a double and two 32 bit * ints. */ #ifdef __arm__ #if defined(__VFP_FP__) || defined(__ARM_EABI__) #define IEEE_WORD_ORDER BYTE_ORDER #else #define IEEE_WORD_ORDER BIG_ENDIAN #endif #else /* __arm__ */ #define IEEE_WORD_ORDER BYTE_ORDER #endif /* A union which permits us to convert between a long double and four 32 bit ints. */ #if IEEE_WORD_ORDER == BIG_ENDIAN typedef union { long double value; struct { u_int32_t mswhi; u_int32_t mswlo; u_int32_t lswhi; u_int32_t lswlo; } parts32; struct { u_int64_t msw; u_int64_t lsw; } parts64; } ieee_quad_shape_type; #endif #if IEEE_WORD_ORDER == LITTLE_ENDIAN typedef union { long double value; struct { u_int32_t lswlo; u_int32_t lswhi; u_int32_t mswlo; u_int32_t mswhi; } parts32; struct { u_int64_t lsw; u_int64_t msw; } parts64; } ieee_quad_shape_type; #endif #if IEEE_WORD_ORDER == BIG_ENDIAN typedef union { double value; struct { u_int32_t msw; u_int32_t lsw; } parts; struct { u_int64_t w; } xparts; } ieee_double_shape_type; #endif #if IEEE_WORD_ORDER == LITTLE_ENDIAN typedef union { double value; struct { u_int32_t lsw; u_int32_t msw; } parts; struct { u_int64_t w; } xparts; } ieee_double_shape_type; #endif /* Get two 32 bit ints from a double. */ #define EXTRACT_WORDS(ix0,ix1,d) \ do { \ ieee_double_shape_type ew_u; \ ew_u.value = (d); \ (ix0) = ew_u.parts.msw; \ (ix1) = ew_u.parts.lsw; \ } while (0) /* Get a 64-bit int from a double. */ #define EXTRACT_WORD64(ix,d) \ do { \ ieee_double_shape_type ew_u; \ ew_u.value = (d); \ (ix) = ew_u.xparts.w; \ } while (0) /* Get the more significant 32 bit int from a double. */ #define GET_HIGH_WORD(i,d) \ do { \ ieee_double_shape_type gh_u; \ gh_u.value = (d); \ (i) = gh_u.parts.msw; \ } while (0) /* Get the less significant 32 bit int from a double. */ #define GET_LOW_WORD(i,d) \ do { \ ieee_double_shape_type gl_u; \ gl_u.value = (d); \ (i) = gl_u.parts.lsw; \ } while (0) /* Set a double from two 32 bit ints. */ #define INSERT_WORDS(d,ix0,ix1) \ do { \ ieee_double_shape_type iw_u; \ iw_u.parts.msw = (ix0); \ iw_u.parts.lsw = (ix1); \ (d) = iw_u.value; \ } while (0) /* Set a double from a 64-bit int. */ #define INSERT_WORD64(d,ix) \ do { \ ieee_double_shape_type iw_u; \ iw_u.xparts.w = (ix); \ (d) = iw_u.value; \ } while (0) /* Set the more significant 32 bits of a double from an int. */ #define SET_HIGH_WORD(d,v) \ do { \ ieee_double_shape_type sh_u; \ sh_u.value = (d); \ sh_u.parts.msw = (v); \ (d) = sh_u.value; \ } while (0) /* Set the less significant 32 bits of a double from an int. */ #define SET_LOW_WORD(d,v) \ do { \ ieee_double_shape_type sl_u; \ sl_u.value = (d); \ sl_u.parts.lsw = (v); \ (d) = sl_u.value; \ } while (0) /* * A union which permits us to convert between a float and a 32 bit * int. */ typedef union { float value; /* FIXME: Assumes 32 bit int. */ unsigned int word; } ieee_float_shape_type; /* Get a 32 bit int from a float. */ #define GET_FLOAT_WORD(i,d) \ do { \ ieee_float_shape_type gf_u; \ gf_u.value = (d); \ (i) = gf_u.word; \ } while (0) /* Set a float from a 32 bit int. */ #define SET_FLOAT_WORD(d,i) \ do { \ ieee_float_shape_type sf_u; \ sf_u.word = (i); \ (d) = sf_u.value; \ } while (0) /* * Get expsign and mantissa as 16 bit and 64 bit ints from an 80 bit long * double. */ #define EXTRACT_LDBL80_WORDS(ix0,ix1,d) \ do { \ union IEEEl2bits ew_u; \ ew_u.e = (d); \ (ix0) = ew_u.xbits.expsign; \ (ix1) = ew_u.xbits.man; \ } while (0) /* * Get expsign and mantissa as one 16 bit and two 64 bit ints from a 128 bit * long double. */ #define EXTRACT_LDBL128_WORDS(ix0,ix1,ix2,d) \ do { \ union IEEEl2bits ew_u; \ ew_u.e = (d); \ (ix0) = ew_u.xbits.expsign; \ (ix1) = ew_u.xbits.manh; \ (ix2) = ew_u.xbits.manl; \ } while (0) /* Get expsign as a 16 bit int from a long double. */ #define GET_LDBL_EXPSIGN(i,d) \ do { \ union IEEEl2bits ge_u; \ ge_u.e = (d); \ (i) = ge_u.xbits.expsign; \ } while (0) /* * Set an 80 bit long double from a 16 bit int expsign and a 64 bit int * mantissa. */ #define INSERT_LDBL80_WORDS(d,ix0,ix1) \ do { \ union IEEEl2bits iw_u; \ iw_u.xbits.expsign = (ix0); \ iw_u.xbits.man = (ix1); \ (d) = iw_u.e; \ } while (0) /* * Set a 128 bit long double from a 16 bit int expsign and two 64 bit ints * comprising the mantissa. */ #define INSERT_LDBL128_WORDS(d,ix0,ix1,ix2) \ do { \ union IEEEl2bits iw_u; \ iw_u.xbits.expsign = (ix0); \ iw_u.xbits.manh = (ix1); \ iw_u.xbits.manl = (ix2); \ (d) = iw_u.e; \ } while (0) /* Set expsign of a long double from a 16 bit int. */ #define SET_LDBL_EXPSIGN(d,v) \ do { \ union IEEEl2bits se_u; \ se_u.e = (d); \ se_u.xbits.expsign = (v); \ (d) = se_u.e; \ } while (0) #ifdef __i386__ /* Long double constants are broken on i386. */ #define LD80C(m, ex, v) { \ .xbits.man = __CONCAT(m, ULL), \ .xbits.expsign = (0x3fff + (ex)) | ((v) < 0 ? 0x8000 : 0), \ } #else /* The above works on non-i386 too, but we use this to check v. */ #define LD80C(m, ex, v) { .e = (v), } #endif #ifdef FLT_EVAL_METHOD /* * Attempt to get strict C99 semantics for assignment with non-C99 compilers. */ #if FLT_EVAL_METHOD == 0 || __GNUC__ == 0 #define STRICT_ASSIGN(type, lval, rval) ((lval) = (rval)) #else #define STRICT_ASSIGN(type, lval, rval) do { \ volatile type __lval; \ \ if (sizeof(type) >= sizeof(long double)) \ (lval) = (rval); \ else { \ __lval = (rval); \ (lval) = __lval; \ } \ } while (0) #endif #endif /* FLT_EVAL_METHOD */ /* Support switching the mode to FP_PE if necessary. */ #if defined(__i386__) && !defined(NO_FPSETPREC) #define ENTERI() ENTERIT(long double) #define ENTERIT(returntype) \ returntype __retval; \ fp_prec_t __oprec; \ \ if ((__oprec = fpgetprec()) != FP_PE) \ fpsetprec(FP_PE) #define RETURNI(x) do { \ __retval = (x); \ if (__oprec != FP_PE) \ fpsetprec(__oprec); \ RETURNF(__retval); \ } while (0) #define ENTERV() \ fp_prec_t __oprec; \ \ if ((__oprec = fpgetprec()) != FP_PE) \ fpsetprec(FP_PE) #define RETURNV() do { \ if (__oprec != FP_PE) \ fpsetprec(__oprec); \ return; \ } while (0) #else #define ENTERI() #define ENTERIT(x) #define RETURNI(x) RETURNF(x) #define ENTERV() #define RETURNV() return #endif /* Default return statement if hack*_t() is not used. */ #define RETURNF(v) return (v) /* * 2sum gives the same result as 2sumF without requiring |a| >= |b| or * a == 0, but is slower. */ #define _2sum(a, b) do { \ __typeof(a) __s, __w; \ \ __w = (a) + (b); \ __s = __w - (a); \ (b) = ((a) - (__w - __s)) + ((b) - __s); \ (a) = __w; \ } while (0) /* * 2sumF algorithm. * * "Normalize" the terms in the infinite-precision expression a + b for * the sum of 2 floating point values so that b is as small as possible * relative to 'a'. (The resulting 'a' is the value of the expression in * the same precision as 'a' and the resulting b is the rounding error.) * |a| must be >= |b| or 0, b's type must be no larger than 'a's type, and * exponent overflow or underflow must not occur. This uses a Theorem of * Dekker (1971). See Knuth (1981) 4.2.2 Theorem C. The name "TwoSum" * is apparently due to Skewchuk (1997). * * For this to always work, assignment of a + b to 'a' must not retain any * extra precision in a + b. This is required by C standards but broken * in many compilers. The brokenness cannot be worked around using * STRICT_ASSIGN() like we do elsewhere, since the efficiency of this * algorithm would be destroyed by non-null strict assignments. (The * compilers are correct to be broken -- the efficiency of all floating * point code calculations would be destroyed similarly if they forced the * conversions.) * * Fortunately, a case that works well can usually be arranged by building * any extra precision into the type of 'a' -- 'a' should have type float_t, * double_t or long double. b's type should be no larger than 'a's type. * Callers should use these types with scopes as large as possible, to * reduce their own extra-precision and efficiciency problems. In * particular, they shouldn't convert back and forth just to call here. */ #ifdef DEBUG #define _2sumF(a, b) do { \ __typeof(a) __w; \ volatile __typeof(a) __ia, __ib, __r, __vw; \ \ __ia = (a); \ __ib = (b); \ assert(__ia == 0 || fabsl(__ia) >= fabsl(__ib)); \ \ __w = (a) + (b); \ (b) = ((a) - __w) + (b); \ (a) = __w; \ \ /* The next 2 assertions are weak if (a) is already long double. */ \ assert((long double)__ia + __ib == (long double)(a) + (b)); \ __vw = __ia + __ib; \ __r = __ia - __vw; \ __r += __ib; \ assert(__vw == (a) && __r == (b)); \ } while (0) #else /* !DEBUG */ #define _2sumF(a, b) do { \ __typeof(a) __w; \ \ __w = (a) + (b); \ (b) = ((a) - __w) + (b); \ (a) = __w; \ } while (0) #endif /* DEBUG */ /* * Set x += c, where x is represented in extra precision as a + b. * x must be sufficiently normalized and sufficiently larger than c, * and the result is then sufficiently normalized. * * The details of ordering are that |a| must be >= |c| (so that (a, c) * can be normalized without extra work to swap 'a' with c). The details of * the normalization are that b must be small relative to the normalized 'a'. * Normalization of (a, c) makes the normalized c tiny relative to the * normalized a, so b remains small relative to 'a' in the result. However, * b need not ever be tiny relative to 'a'. For example, b might be about * 2**20 times smaller than 'a' to give about 20 extra bits of precision. * That is usually enough, and adding c (which by normalization is about * 2**53 times smaller than a) cannot change b significantly. However, * cancellation of 'a' with c in normalization of (a, c) may reduce 'a' * significantly relative to b. The caller must ensure that significant * cancellation doesn't occur, either by having c of the same sign as 'a', * or by having |c| a few percent smaller than |a|. Pre-normalization of * (a, b) may help. * * This is a variant of an algorithm of Kahan (see Knuth (1981) 4.2.2 * exercise 19). We gain considerable efficiency by requiring the terms to * be sufficiently normalized and sufficiently increasing. */ #define _3sumF(a, b, c) do { \ __typeof(a) __tmp; \ \ __tmp = (c); \ _2sumF(__tmp, (a)); \ (b) += (a); \ (a) = __tmp; \ } while (0) /* * Common routine to process the arguments to nan(), nanf(), and nanl(). */ void _scan_nan(uint32_t *__words, int __num_words, const char *__s); /* * Mix 0, 1 or 2 NaNs. First add 0 to each arg. This normally just turns * signaling NaNs into quiet NaNs by setting a quiet bit. We do this * because we want to never return a signaling NaN, and also because we * don't want the quiet bit to affect the result. Then mix the converted * args using the specified operation. * * When one arg is NaN, the result is typically that arg quieted. When both * args are NaNs, the result is typically the quietening of the arg whose * mantissa is largest after quietening. When neither arg is NaN, the * result may be NaN because it is indeterminate, or finite for subsequent * construction of a NaN as the indeterminate 0.0L/0.0L. * * Technical complications: the result in bits after rounding to the final * precision might depend on the runtime precision and/or on compiler * optimizations, especially when different register sets are used for * different precisions. Try to make the result not depend on at least the * runtime precision by always doing the main mixing step in long double * precision. Try to reduce dependencies on optimizations by adding the * the 0's in different precisions (unless everything is in long double * precision). */ #define nan_mix(x, y) (nan_mix_op((x), (y), +)) #define nan_mix_op(x, y, op) (((x) + 0.0L) op ((y) + 0)) #ifdef _COMPLEX_H /* * C99 specifies that complex numbers have the same representation as * an array of two elements, where the first element is the real part * and the second element is the imaginary part. */ typedef union { float complex f; float a[2]; } float_complex; typedef union { double complex f; double a[2]; } double_complex; typedef union { long double complex f; long double a[2]; } long_double_complex; #define REALPART(z) ((z).a[0]) #define IMAGPART(z) ((z).a[1]) /* * Inline functions that can be used to construct complex values. * * The C99 standard intends x+I*y to be used for this, but x+I*y is * currently unusable in general since gcc introduces many overflow, * underflow, sign and efficiency bugs by rewriting I*y as * (0.0+I)*(y+0.0*I) and laboriously computing the full complex product. * In particular, I*Inf is corrupted to NaN+I*Inf, and I*-0 is corrupted * to -0.0+I*0.0. * * The C11 standard introduced the macros CMPLX(), CMPLXF() and CMPLXL() * to construct complex values. Compilers that conform to the C99 * standard require the following functions to avoid the above issues. */ #ifndef CMPLXF static __inline float complex CMPLXF(float x, float y) { float_complex z; REALPART(z) = x; IMAGPART(z) = y; return (z.f); } #endif #ifndef CMPLX static __inline double complex CMPLX(double x, double y) { double_complex z; REALPART(z) = x; IMAGPART(z) = y; return (z.f); } #endif #ifndef CMPLXL static __inline long double complex CMPLXL(long double x, long double y) { long_double_complex z; REALPART(z) = x; IMAGPART(z) = y; return (z.f); } #endif #endif /* _COMPLEX_H */ /* * The rnint() family rounds to the nearest integer for a restricted range * range of args (up to about 2**MANT_DIG). We assume that the current * rounding mode is FE_TONEAREST so that this can be done efficiently. * Extra precision causes more problems in practice, and we only centralize * this here to reduce those problems, and have not solved the efficiency * problems. The exp2() family uses a more delicate version of this that * requires extracting bits from the intermediate value, so it is not * centralized here and should copy any solution of the efficiency problems. */ static inline double rnint(__double_t x) { /* * This casts to double to kill any extra precision. This depends * on the cast being applied to a double_t to avoid compiler bugs * (this is a cleaner version of STRICT_ASSIGN()). This is * inefficient if there actually is extra precision, but is hard * to improve on. We use double_t in the API to minimise conversions * for just calling here. Note that we cannot easily change the * magic number to the one that works directly with double_t, since * the rounding precision is variable at runtime on x86 so the * magic number would need to be variable. Assuming that the * rounding precision is always the default is too fragile. This * and many other complications will move when the default is * changed to FP_PE. */ return ((double)(x + 0x1.8p52) - 0x1.8p52); } static inline float rnintf(__float_t x) { /* * As for rnint(), except we could just call that to handle the * extra precision case, usually without losing efficiency. */ return ((float)(x + 0x1.8p23F) - 0x1.8p23F); } #ifdef LDBL_MANT_DIG /* * The complications for extra precision are smaller for rnintl() since it * can safely assume that the rounding precision has been increased from * its default to FP_PE on x86. We don't exploit that here to get small * optimizations from limiting the range to double. We just need it for * the magic number to work with long doubles. ld128 callers should use * rnint() instead of this if possible. ld80 callers should prefer * rnintl() since for amd64 this avoids swapping the register set, while * for i386 it makes no difference (assuming FP_PE), and for other arches * it makes little difference. */ static inline long double rnintl(long double x) { return (x + __CONCAT(0x1.8p, LDBL_MANT_DIG) / 2 - __CONCAT(0x1.8p, LDBL_MANT_DIG) / 2); } #endif /* LDBL_MANT_DIG */ /* * irint() and i64rint() give the same result as casting to their integer * return type provided their arg is a floating point integer. They can * sometimes be more efficient because no rounding is required. */ #if defined(amd64) || defined(__i386__) #define irint(x) \ (sizeof(x) == sizeof(float) && \ sizeof(__float_t) == sizeof(long double) ? irintf(x) : \ sizeof(x) == sizeof(double) && \ sizeof(__double_t) == sizeof(long double) ? irintd(x) : \ sizeof(x) == sizeof(long double) ? irintl(x) : (int)(x)) #else #define irint(x) ((int)(x)) #endif #define i64rint(x) ((int64_t)(x)) /* only needed for ld128 so not opt. */ #if defined(__i386__) static __inline int irintf(float x) { int n; __asm("fistl %0" : "=m" (n) : "t" (x)); return (n); } static __inline int irintd(double x) { int n; __asm("fistl %0" : "=m" (n) : "t" (x)); return (n); } #endif #if defined(__amd64__) || defined(__i386__) static __inline int irintl(long double x) { int n; __asm("fistl %0" : "=m" (n) : "t" (x)); return (n); } #endif #ifdef DEBUG #if defined(__amd64__) || defined(__i386__) #define breakpoint() asm("int $3") #else #include #define breakpoint() raise(SIGTRAP) #endif #endif -/* Write a pari script to test things externally. */ -#ifdef DOPRINT -#include - -#ifndef DOPRINT_SWIZZLE -#define DOPRINT_SWIZZLE 0 -#endif - -#ifdef DOPRINT_LD80 - -#define DOPRINT_START(xp) do { \ - uint64_t __lx; \ - uint16_t __hx; \ - \ - /* Hack to give more-problematic args. */ \ - EXTRACT_LDBL80_WORDS(__hx, __lx, *xp); \ - __lx ^= DOPRINT_SWIZZLE; \ - INSERT_LDBL80_WORDS(*xp, __hx, __lx); \ - printf("x = %.21Lg; ", (long double)*xp); \ -} while (0) -#define DOPRINT_END1(v) \ - printf("y = %.21Lg; z = 0; show(x, y, z);\n", (long double)(v)) -#define DOPRINT_END2(hi, lo) \ - printf("y = %.21Lg; z = %.21Lg; show(x, y, z);\n", \ - (long double)(hi), (long double)(lo)) - -#elif defined(DOPRINT_D64) - -#define DOPRINT_START(xp) do { \ - uint32_t __hx, __lx; \ - \ - EXTRACT_WORDS(__hx, __lx, *xp); \ - __lx ^= DOPRINT_SWIZZLE; \ - INSERT_WORDS(*xp, __hx, __lx); \ - printf("x = %.21Lg; ", (long double)*xp); \ -} while (0) -#define DOPRINT_END1(v) \ - printf("y = %.21Lg; z = 0; show(x, y, z);\n", (long double)(v)) -#define DOPRINT_END2(hi, lo) \ - printf("y = %.21Lg; z = %.21Lg; show(x, y, z);\n", \ - (long double)(hi), (long double)(lo)) - -#elif defined(DOPRINT_F32) - -#define DOPRINT_START(xp) do { \ - uint32_t __hx; \ - \ - GET_FLOAT_WORD(__hx, *xp); \ - __hx ^= DOPRINT_SWIZZLE; \ - SET_FLOAT_WORD(*xp, __hx); \ - printf("x = %.21Lg; ", (long double)*xp); \ -} while (0) -#define DOPRINT_END1(v) \ - printf("y = %.21Lg; z = 0; show(x, y, z);\n", (long double)(v)) -#define DOPRINT_END2(hi, lo) \ - printf("y = %.21Lg; z = %.21Lg; show(x, y, z);\n", \ - (long double)(hi), (long double)(lo)) - -#else /* !DOPRINT_LD80 && !DOPRINT_D64 (LD128 only) */ - -#ifndef DOPRINT_SWIZZLE_HIGH -#define DOPRINT_SWIZZLE_HIGH 0 -#endif - -#define DOPRINT_START(xp) do { \ - uint64_t __lx, __llx; \ - uint16_t __hx; \ - \ - EXTRACT_LDBL128_WORDS(__hx, __lx, __llx, *xp); \ - __llx ^= DOPRINT_SWIZZLE; \ - __lx ^= DOPRINT_SWIZZLE_HIGH; \ - INSERT_LDBL128_WORDS(*xp, __hx, __lx, __llx); \ - printf("x = %.36Lg; ", (long double)*xp); \ -} while (0) -#define DOPRINT_END1(v) \ - printf("y = %.36Lg; z = 0; show(x, y, z);\n", (long double)(v)) -#define DOPRINT_END2(hi, lo) \ - printf("y = %.36Lg; z = %.36Lg; show(x, y, z);\n", \ - (long double)(hi), (long double)(lo)) - -#endif /* DOPRINT_LD80 */ - -#else /* !DOPRINT */ -#define DOPRINT_START(xp) -#define DOPRINT_END1(v) -#define DOPRINT_END2(hi, lo) -#endif /* DOPRINT */ - -#define RETURNP(x) do { \ - DOPRINT_END1(x); \ - RETURNF(x); \ -} while (0) -#define RETURNPI(x) do { \ - DOPRINT_END1(x); \ - RETURNI(x); \ -} while (0) -#define RETURN2P(x, y) do { \ - DOPRINT_END2((x), (y)); \ - RETURNF((x) + (y)); \ -} while (0) -#define RETURN2PI(x, y) do { \ - DOPRINT_END2((x), (y)); \ - RETURNI((x) + (y)); \ -} while (0) #ifdef STRUCT_RETURN #define RETURNSP(rp) do { \ if (!(rp)->lo_set) \ - RETURNP((rp)->hi); \ - RETURN2P((rp)->hi, (rp)->lo); \ + RETURNF((rp)->hi); \ + RETURNF((rp)->hi + (rp)->lo); \ } while (0) #define RETURNSPI(rp) do { \ if (!(rp)->lo_set) \ - RETURNPI((rp)->hi); \ - RETURN2PI((rp)->hi, (rp)->lo); \ + RETURNI((rp)->hi); \ + RETURNI((rp)->hi + (rp)->lo); \ } while (0) #endif + #define SUM2P(x, y) ({ \ const __typeof (x) __x = (x); \ const __typeof (y) __y = (y); \ - \ - DOPRINT_END2(__x, __y); \ __x + __y; \ }) /* * ieee style elementary functions * * We rename functions here to improve other sources' diffability * against fdlibm. */ #define __ieee754_sqrt sqrt #define __ieee754_acos acos #define __ieee754_acosh acosh #define __ieee754_log log #define __ieee754_log2 log2 #define __ieee754_atanh atanh #define __ieee754_asin asin #define __ieee754_atan2 atan2 #define __ieee754_exp exp #define __ieee754_cosh cosh #define __ieee754_fmod fmod #define __ieee754_pow pow #define __ieee754_lgamma lgamma #define __ieee754_gamma gamma #define __ieee754_lgamma_r lgamma_r #define __ieee754_gamma_r gamma_r #define __ieee754_log10 log10 #define __ieee754_sinh sinh #define __ieee754_hypot hypot #define __ieee754_j0 j0 #define __ieee754_j1 j1 #define __ieee754_y0 y0 #define __ieee754_y1 y1 #define __ieee754_jn jn #define __ieee754_yn yn #define __ieee754_remainder remainder #define __ieee754_scalb scalb #define __ieee754_sqrtf sqrtf #define __ieee754_acosf acosf #define __ieee754_acoshf acoshf #define __ieee754_logf logf #define __ieee754_atanhf atanhf #define __ieee754_asinf asinf #define __ieee754_atan2f atan2f #define __ieee754_expf expf #define __ieee754_coshf coshf #define __ieee754_fmodf fmodf #define __ieee754_powf powf #define __ieee754_lgammaf lgammaf #define __ieee754_gammaf gammaf #define __ieee754_lgammaf_r lgammaf_r #define __ieee754_gammaf_r gammaf_r #define __ieee754_log10f log10f #define __ieee754_log2f log2f #define __ieee754_sinhf sinhf #define __ieee754_hypotf hypotf #define __ieee754_j0f j0f #define __ieee754_j1f j1f #define __ieee754_y0f y0f #define __ieee754_y1f y1f #define __ieee754_jnf jnf #define __ieee754_ynf ynf #define __ieee754_remainderf remainderf #define __ieee754_scalbf scalbf /* fdlibm kernel function */ int __kernel_rem_pio2(double*,double*,int,int,int); /* double precision kernel functions */ #ifndef INLINE_REM_PIO2 int __ieee754_rem_pio2(double,double*); #endif double __kernel_sin(double,double,int); double __kernel_cos(double,double); double __kernel_tan(double,double,int); double __ldexp_exp(double,int); #ifdef _COMPLEX_H double complex __ldexp_cexp(double complex,int); #endif /* float precision kernel functions */ #ifndef INLINE_REM_PIO2F int __ieee754_rem_pio2f(float,double*); #endif #ifndef INLINE_KERNEL_SINDF float __kernel_sindf(double); #endif #ifndef INLINE_KERNEL_COSDF float __kernel_cosdf(double); #endif #ifndef INLINE_KERNEL_TANDF float __kernel_tandf(double,int); #endif float __ldexp_expf(float,int); #ifdef _COMPLEX_H float complex __ldexp_cexpf(float complex,int); #endif /* long double precision kernel functions */ long double __kernel_sinl(long double, long double, int); long double __kernel_cosl(long double, long double); long double __kernel_tanl(long double, long double, int); #endif /* !_MATH_PRIVATE_H_ */