]>
git.proxmox.com Git - rustc.git/blob - vendor/serde_json/src/lexical/float.rs
1 // Adapted from https://github.com/Alexhuszagh/rust-lexical.
6 use super::rounding
::*;
9 /// Extended precision floating-point type.
11 /// Private implementation, exposed only for testing purposes.
13 #[derive(Clone, Copy, Debug, PartialEq, Eq)]
14 pub(crate) struct ExtendedFloat
{
15 /// Mantissa for the extended-precision float.
17 /// Binary exponent for the extended-precision float.
26 /// Multiply two normalized extended-precision floats, as if by `a*b`.
28 /// The precision is maximal when the numbers are normalized, however,
29 /// decent precision will occur as long as both values have high bits
30 /// set. The result is not normalized.
33 /// 1. Non-signed multiplication of mantissas (requires 2x as many bits as input).
34 /// 2. Normalization of the result (not done here).
35 /// 3. Addition of exponents.
36 pub(crate) fn mul(&self, b
: &ExtendedFloat
) -> ExtendedFloat
{
37 // Logic check, values must be decently normalized prior to multiplication.
38 debug_assert
!((self.mant
& u64::HIMASK
!= 0) && (b
.mant
& u64::HIMASK
!= 0));
40 // Extract high-and-low masks.
41 let ah
= self.mant
>> u64::HALF
;
42 let al
= self.mant
& u64::LOMASK
;
43 let bh
= b
.mant
>> u64::HALF
;
44 let bl
= b
.mant
& u64::LOMASK
;
52 let mut tmp
= (ah_bl
& u64::LOMASK
) + (al_bh
& u64::LOMASK
) + (al_bl
>> u64::HALF
);
54 tmp
+= 1 << (u64::HALF
- 1);
57 mant
: ah_bh
+ (ah_bl
>> u64::HALF
) + (al_bh
>> u64::HALF
) + (tmp
>> u64::HALF
),
58 exp
: self.exp
+ b
.exp
+ u64::FULL
,
62 /// Multiply in-place, as if by `a*b`.
64 /// The result is not normalized.
66 pub(crate) fn imul(&mut self, b
: &ExtendedFloat
) {
72 /// Normalize float-point number.
74 /// Shift the mantissa so the number of leading zeros is 0, or the value
77 /// Get the number of bytes shifted.
79 pub(crate) fn normalize(&mut self) -> u32 {
81 // Using the cltz intrinsic via leading_zeros is way faster (~10x)
82 // than shifting 1-bit at a time, via while loop, and also way
83 // faster (~2x) than an unrolled loop that checks at 32, 16, 4,
86 // Using a modulus of pow2 (which will get optimized to a bitwise
87 // and with 0x3F or faster) is slightly slower than an if/then,
88 // however, removing the if/then will likely optimize more branched
89 // code as it removes conditional logic.
91 // Calculate the number of leading zeros, and then zero-out
92 // any overflowing bits, to avoid shl overflow when self.mant == 0.
93 let shift
= if self.mant
== 0 {
96 self.mant
.leading_zeros()
98 shl(self, shift
as i32);
104 /// Lossy round float-point number to native mantissa boundaries.
106 pub(crate) fn round_to_native
<F
, Algorithm
>(&mut self, algorithm
: Algorithm
)
109 Algorithm
: FnOnce(&mut ExtendedFloat
, i32),
111 round_to_native
::<F
, _
>(self, algorithm
);
116 /// Create extended float from native float.
118 pub fn from_float
<F
: Float
>(f
: F
) -> ExtendedFloat
{
124 /// Convert into default-rounded, lower-precision native float.
126 pub(crate) fn into_float
<F
: Float
>(mut self) -> F
{
127 self.round_to_native
::<F
, _
>(round_nearest_tie_even
);
131 /// Convert into downward-rounded, lower-precision native float.
133 pub(crate) fn into_downward_float
<F
: Float
>(mut self) -> F
{
134 self.round_to_native
::<F
, _
>(round_downward
);
141 // Import ExtendedFloat from native float.
143 pub(crate) fn from_float
<F
>(f
: F
) -> ExtendedFloat
148 mant
: u64::as_cast(f
.mantissa()),
155 // Export extended-precision float to native float.
157 // The extended-precision float must be in native float representation,
158 // with overflow/underflow appropriately handled.
160 pub(crate) fn into_float
<F
>(fp
: ExtendedFloat
) -> F
164 // Export floating-point number.
165 if fp
.mant
== 0 || fp
.exp
< F
::DENORMAL_EXPONENT
{
166 // sub-denormal, underflow
168 } else if fp
.exp
>= F
::MAX_EXPONENT
{
170 F
::from_bits(F
::INFINITY_BITS
)
172 // calculate the exp and fraction bits, and return a float from bits.
174 if (fp
.exp
== F
::DENORMAL_EXPONENT
) && (fp
.mant
& F
::HIDDEN_BIT_MASK
.as_u64()) == 0 {
177 exp
= (fp
.exp
+ F
::EXPONENT_BIAS
) as u64;
179 let exp
= exp
<< F
::MANTISSA_SIZE
;
180 let mant
= fp
.mant
& F
::MANTISSA_MASK
.as_u64();
181 F
::from_bits(F
::Unsigned
::as_cast(mant
| exp
))