compiler_builtins/math/libm_math/generic/
fma.rs

1/* SPDX-License-Identifier: MIT */
2/* origin: musl src/math/fma.c. Ported to generic Rust algorithm in 2025, TG. */
3
4use crate::support::{
5    CastFrom, CastInto, DInt, Float, FpResult, HInt, Int, IntTy, MinInt, Round, Status,
6};
7
8/// Fused multiply-add that works when there is not a larger float size available. Computes
9/// `(x * y) + z`.
10#[inline]
11pub fn fma_round<F>(x: F, y: F, z: F, _round: Round) -> FpResult<F>
12where
13    F: Float,
14    F: CastFrom<F::SignedInt>,
15    F: CastFrom<i8>,
16    F::Int: HInt,
17    u32: CastInto<F::Int>,
18{
19    let one = IntTy::<F>::ONE;
20    let zero = IntTy::<F>::ZERO;
21
22    // Normalize such that the top of the mantissa is zero and we have a guard bit.
23    let nx = Norm::from_float(x);
24    let ny = Norm::from_float(y);
25    let nz = Norm::from_float(z);
26
27    if nx.is_zero_nan_inf() || ny.is_zero_nan_inf() {
28        // Value will overflow, defer to non-fused operations.
29        return FpResult::ok(x * y + z);
30    }
31
32    if nz.is_zero_nan_inf() {
33        if nz.is_zero() {
34            // Empty add component means we only need to multiply.
35            return FpResult::ok(x * y);
36        }
37        // `z` is NaN or infinity, which sets the result.
38        return FpResult::ok(z);
39    }
40
41    // multiply: r = x * y
42    let zhi: F::Int;
43    let zlo: F::Int;
44    let (mut rlo, mut rhi) = nx.m.widen_mul(ny.m).lo_hi();
45
46    // Exponent result of multiplication
47    let mut e: i32 = nx.e + ny.e;
48    // Needed shift to align `z` to the multiplication result
49    let mut d: i32 = nz.e - e;
50    let sbits = F::BITS as i32;
51
52    // Scale `z`. Shift `z <<= kz`, `r >>= kr`, so `kz+kr == d`, set `e = e+kr` (== ez-kz)
53    if d > 0 {
54        // The magnitude of `z` is larger than `x * y`
55        if d < sbits {
56            // Maximum shift of one `F::BITS` means shifted `z` will fit into `2 * F::BITS`. Shift
57            // it into `(zhi, zlo)`. No exponent adjustment necessary.
58            zlo = nz.m << d;
59            zhi = nz.m >> (sbits - d);
60        } else {
61            // Shift larger than `sbits`, `z` only needs the top half `zhi`. Place it there (acts
62            // as a shift by `sbits`).
63            zlo = zero;
64            zhi = nz.m;
65            d -= sbits;
66
67            // `z`'s exponent is large enough that it now needs to be taken into account.
68            e = nz.e - sbits;
69
70            if d == 0 {
71                // Exactly `sbits`, nothing to do
72            } else if d < sbits {
73                // Remaining shift fits within `sbits`. Leave `z` in place, shift `x * y`
74                rlo = (rhi << (sbits - d)) | (rlo >> d);
75                // Set the sticky bit
76                rlo |= IntTy::<F>::from((rlo << (sbits - d)) != zero);
77                rhi = rhi >> d;
78            } else {
79                // `z`'s magnitude is enough that `x * y` is irrelevant. It was nonzero, so set
80                // the sticky bit.
81                rlo = one;
82                rhi = zero;
83            }
84        }
85    } else {
86        // `z`'s magnitude once shifted fits entirely within `zlo`
87        zhi = zero;
88        d = -d;
89        if d == 0 {
90            // No shift needed
91            zlo = nz.m;
92        } else if d < sbits {
93            // Shift s.t. `nz.m` fits into `zlo`
94            let sticky = IntTy::<F>::from((nz.m << (sbits - d)) != zero);
95            zlo = (nz.m >> d) | sticky;
96        } else {
97            // Would be entirely shifted out, only set the sticky bit
98            zlo = one;
99        }
100    }
101
102    /* addition */
103
104    let mut neg = nx.neg ^ ny.neg;
105    let samesign: bool = !neg ^ nz.neg;
106    let mut rhi_nonzero = true;
107
108    if samesign {
109        // r += z
110        rlo = rlo.wrapping_add(zlo);
111        rhi += zhi + IntTy::<F>::from(rlo < zlo);
112    } else {
113        // r -= z
114        let (res, borrow) = rlo.overflowing_sub(zlo);
115        rlo = res;
116        rhi = rhi.wrapping_sub(zhi.wrapping_add(IntTy::<F>::from(borrow)));
117        if (rhi >> (F::BITS - 1)) != zero {
118            rlo = rlo.signed().wrapping_neg().unsigned();
119            rhi = rhi.signed().wrapping_neg().unsigned() - IntTy::<F>::from(rlo != zero);
120            neg = !neg;
121        }
122        rhi_nonzero = rhi != zero;
123    }
124
125    /* Construct result */
126
127    // Shift result into `rhi`, left-aligned. Last bit is sticky
128    if rhi_nonzero {
129        // `d` > 0, need to shift both `rhi` and `rlo` into result
130        e += sbits;
131        d = rhi.leading_zeros() as i32 - 1;
132        rhi = (rhi << d) | (rlo >> (sbits - d));
133        // Update sticky
134        rhi |= IntTy::<F>::from((rlo << d) != zero);
135    } else if rlo != zero {
136        // `rhi` is zero, `rlo` is the entire result and needs to be shifted
137        d = rlo.leading_zeros() as i32 - 1;
138        if d < 0 {
139            // Shift and set sticky
140            rhi = (rlo >> 1) | (rlo & one);
141        } else {
142            rhi = rlo << d;
143        }
144    } else {
145        // exact +/- 0.0
146        return FpResult::ok(x * y + z);
147    }
148
149    e -= d;
150
151    // Use int->float conversion to populate the significand.
152    // i is in [1 << (BITS - 2), (1 << (BITS - 1)) - 1]
153    let mut i: F::SignedInt = rhi.signed();
154
155    if neg {
156        i = -i;
157    }
158
159    // `|r|` is in `[0x1p62,0x1p63]` for `f64`
160    let mut r: F = F::cast_from_lossy(i);
161
162    /* Account for subnormal and rounding */
163
164    // Unbiased exponent for the maximum value of `r`
165    let max_pow = F::BITS - 1 + F::EXP_BIAS;
166
167    let mut status = Status::OK;
168
169    if e < -(max_pow as i32 - 2) {
170        // Result is subnormal before rounding
171        if e == -(max_pow as i32 - 1) {
172            let mut c = F::from_parts(false, max_pow, zero);
173            if neg {
174                c = -c;
175            }
176
177            if r == c {
178                // Min normal after rounding,
179                status.set_underflow(true);
180                r = F::MIN_POSITIVE_NORMAL.copysign(r);
181                return FpResult::new(r, status);
182            }
183
184            if (rhi << (F::SIG_BITS + 1)) != zero {
185                // Account for truncated bits. One bit will be lost in the `scalbn` call, add
186                // another top bit to avoid double rounding if inexact.
187                let iu: F::Int = (rhi >> 1) | (rhi & one) | (one << (F::BITS - 2));
188                i = iu.signed();
189
190                if neg {
191                    i = -i;
192                }
193
194                r = F::cast_from_lossy(i);
195
196                // Remove the top bit
197                r = F::cast_from(2i8) * r - c;
198                status.set_underflow(true);
199            }
200        } else {
201            // Only round once when scaled
202            d = F::EXP_BITS as i32 - 1;
203            let sticky = IntTy::<F>::from(rhi << (F::BITS as i32 - d) != zero);
204            i = (((rhi >> d) | sticky) << d).signed();
205
206            if neg {
207                i = -i;
208            }
209
210            r = F::cast_from_lossy(i);
211        }
212    }
213
214    // Use our exponent to scale the final value.
215    FpResult::new(super::scalbn(r, e), status)
216}
217
218/// Representation of `F` that has handled subnormals.
219#[derive(Clone, Copy, Debug)]
220struct Norm<F: Float> {
221    /// Normalized significand with one guard bit, unsigned.
222    m: F::Int,
223    /// Exponent of the mantissa such that `m * 2^e = x`. Accounts for the shift in the mantissa
224    /// and the guard bit; that is, 1.0 will normalize as `m = 1 << 53` and `e = -53`.
225    e: i32,
226    neg: bool,
227}
228
229impl<F: Float> Norm<F> {
230    /// Unbias the exponent and account for the mantissa's precision, including the guard bit.
231    const EXP_UNBIAS: u32 = F::EXP_BIAS + F::SIG_BITS + 1;
232
233    /// Values greater than this had a saturated exponent (infinity or NaN), OR were zero and we
234    /// adjusted the exponent such that it exceeds this threashold.
235    const ZERO_INF_NAN: u32 = F::EXP_SAT - Self::EXP_UNBIAS;
236
237    fn from_float(x: F) -> Self {
238        let mut ix = x.to_bits();
239        let mut e = x.ex() as i32;
240        let neg = x.is_sign_negative();
241        if e == 0 {
242            // Normalize subnormals by multiplication
243            let scale_i = F::BITS - 1;
244            let scale_f = F::from_parts(false, scale_i + F::EXP_BIAS, F::Int::ZERO);
245            let scaled = x * scale_f;
246            ix = scaled.to_bits();
247            e = scaled.ex() as i32;
248            e = if e == 0 {
249                // If the exponent is still zero, the input was zero. Artifically set this value
250                // such that the final `e` will exceed `ZERO_INF_NAN`.
251                1 << F::EXP_BITS
252            } else {
253                // Otherwise, account for the scaling we just did.
254                e - scale_i as i32
255            };
256        }
257
258        e -= Self::EXP_UNBIAS as i32;
259
260        // Absolute  value, set the implicit bit, and shift to create a guard bit
261        ix &= F::SIG_MASK;
262        ix |= F::IMPLICIT_BIT;
263        ix <<= 1;
264
265        Self { m: ix, e, neg }
266    }
267
268    /// True if the value was zero, infinity, or NaN.
269    fn is_zero_nan_inf(self) -> bool {
270        self.e >= Self::ZERO_INF_NAN as i32
271    }
272
273    /// The only value we have
274    fn is_zero(self) -> bool {
275        // The only exponent that strictly exceeds this value is our sentinel value for zero.
276        self.e > Self::ZERO_INF_NAN as i32
277    }
278}