]> git.proxmox.com Git - mirror_qemu.git/blob - fpu/softfloat-parts.c.inc
Merge remote-tracking branch 'remotes/vivier2/tags/linux-user-for-6.1-pull-request...
[mirror_qemu.git] / fpu / softfloat-parts.c.inc
1 /*
2 * QEMU float support
3 *
4 * The code in this source file is derived from release 2a of the SoftFloat
5 * IEC/IEEE Floating-point Arithmetic Package. Those parts of the code (and
6 * some later contributions) are provided under that license, as detailed below.
7 * It has subsequently been modified by contributors to the QEMU Project,
8 * so some portions are provided under:
9 * the SoftFloat-2a license
10 * the BSD license
11 * GPL-v2-or-later
12 *
13 * Any future contributions to this file after December 1st 2014 will be
14 * taken to be licensed under the Softfloat-2a license unless specifically
15 * indicated otherwise.
16 */
17
18 static void partsN(return_nan)(FloatPartsN *a, float_status *s)
19 {
20 switch (a->cls) {
21 case float_class_snan:
22 float_raise(float_flag_invalid, s);
23 if (s->default_nan_mode) {
24 parts_default_nan(a, s);
25 } else {
26 parts_silence_nan(a, s);
27 }
28 break;
29 case float_class_qnan:
30 if (s->default_nan_mode) {
31 parts_default_nan(a, s);
32 }
33 break;
34 default:
35 g_assert_not_reached();
36 }
37 }
38
39 static FloatPartsN *partsN(pick_nan)(FloatPartsN *a, FloatPartsN *b,
40 float_status *s)
41 {
42 if (is_snan(a->cls) || is_snan(b->cls)) {
43 float_raise(float_flag_invalid, s);
44 }
45
46 if (s->default_nan_mode) {
47 parts_default_nan(a, s);
48 } else {
49 int cmp = frac_cmp(a, b);
50 if (cmp == 0) {
51 cmp = a->sign < b->sign;
52 }
53
54 if (pickNaN(a->cls, b->cls, cmp > 0, s)) {
55 a = b;
56 }
57 if (is_snan(a->cls)) {
58 parts_silence_nan(a, s);
59 }
60 }
61 return a;
62 }
63
64 static FloatPartsN *partsN(pick_nan_muladd)(FloatPartsN *a, FloatPartsN *b,
65 FloatPartsN *c, float_status *s,
66 int ab_mask, int abc_mask)
67 {
68 int which;
69
70 if (unlikely(abc_mask & float_cmask_snan)) {
71 float_raise(float_flag_invalid, s);
72 }
73
74 which = pickNaNMulAdd(a->cls, b->cls, c->cls,
75 ab_mask == float_cmask_infzero, s);
76
77 if (s->default_nan_mode || which == 3) {
78 /*
79 * Note that this check is after pickNaNMulAdd so that function
80 * has an opportunity to set the Invalid flag for infzero.
81 */
82 parts_default_nan(a, s);
83 return a;
84 }
85
86 switch (which) {
87 case 0:
88 break;
89 case 1:
90 a = b;
91 break;
92 case 2:
93 a = c;
94 break;
95 default:
96 g_assert_not_reached();
97 }
98 if (is_snan(a->cls)) {
99 parts_silence_nan(a, s);
100 }
101 return a;
102 }
103
104 /*
105 * Canonicalize the FloatParts structure. Determine the class,
106 * unbias the exponent, and normalize the fraction.
107 */
108 static void partsN(canonicalize)(FloatPartsN *p, float_status *status,
109 const FloatFmt *fmt)
110 {
111 if (unlikely(p->exp == 0)) {
112 if (likely(frac_eqz(p))) {
113 p->cls = float_class_zero;
114 } else if (status->flush_inputs_to_zero) {
115 float_raise(float_flag_input_denormal, status);
116 p->cls = float_class_zero;
117 frac_clear(p);
118 } else {
119 int shift = frac_normalize(p);
120 p->cls = float_class_normal;
121 p->exp = fmt->frac_shift - fmt->exp_bias - shift + 1;
122 }
123 } else if (likely(p->exp < fmt->exp_max) || fmt->arm_althp) {
124 p->cls = float_class_normal;
125 p->exp -= fmt->exp_bias;
126 frac_shl(p, fmt->frac_shift);
127 p->frac_hi |= DECOMPOSED_IMPLICIT_BIT;
128 } else if (likely(frac_eqz(p))) {
129 p->cls = float_class_inf;
130 } else {
131 frac_shl(p, fmt->frac_shift);
132 p->cls = (parts_is_snan_frac(p->frac_hi, status)
133 ? float_class_snan : float_class_qnan);
134 }
135 }
136
137 /*
138 * Round and uncanonicalize a floating-point number by parts. There
139 * are FRAC_SHIFT bits that may require rounding at the bottom of the
140 * fraction; these bits will be removed. The exponent will be biased
141 * by EXP_BIAS and must be bounded by [EXP_MAX-1, 0].
142 */
143 static void partsN(uncanon)(FloatPartsN *p, float_status *s,
144 const FloatFmt *fmt)
145 {
146 const int exp_max = fmt->exp_max;
147 const int frac_shift = fmt->frac_shift;
148 const uint64_t frac_lsb = fmt->frac_lsb;
149 const uint64_t frac_lsbm1 = fmt->frac_lsbm1;
150 const uint64_t round_mask = fmt->round_mask;
151 const uint64_t roundeven_mask = fmt->roundeven_mask;
152 uint64_t inc;
153 bool overflow_norm;
154 int exp, flags = 0;
155
156 if (unlikely(p->cls != float_class_normal)) {
157 switch (p->cls) {
158 case float_class_zero:
159 p->exp = 0;
160 frac_clear(p);
161 return;
162 case float_class_inf:
163 g_assert(!fmt->arm_althp);
164 p->exp = fmt->exp_max;
165 frac_clear(p);
166 return;
167 case float_class_qnan:
168 case float_class_snan:
169 g_assert(!fmt->arm_althp);
170 p->exp = fmt->exp_max;
171 frac_shr(p, fmt->frac_shift);
172 return;
173 default:
174 break;
175 }
176 g_assert_not_reached();
177 }
178
179 switch (s->float_rounding_mode) {
180 case float_round_nearest_even:
181 overflow_norm = false;
182 inc = ((p->frac_lo & roundeven_mask) != frac_lsbm1 ? frac_lsbm1 : 0);
183 break;
184 case float_round_ties_away:
185 overflow_norm = false;
186 inc = frac_lsbm1;
187 break;
188 case float_round_to_zero:
189 overflow_norm = true;
190 inc = 0;
191 break;
192 case float_round_up:
193 inc = p->sign ? 0 : round_mask;
194 overflow_norm = p->sign;
195 break;
196 case float_round_down:
197 inc = p->sign ? round_mask : 0;
198 overflow_norm = !p->sign;
199 break;
200 case float_round_to_odd:
201 overflow_norm = true;
202 inc = p->frac_lo & frac_lsb ? 0 : round_mask;
203 break;
204 default:
205 g_assert_not_reached();
206 }
207
208 exp = p->exp + fmt->exp_bias;
209 if (likely(exp > 0)) {
210 if (p->frac_lo & round_mask) {
211 flags |= float_flag_inexact;
212 if (frac_addi(p, p, inc)) {
213 frac_shr(p, 1);
214 p->frac_hi |= DECOMPOSED_IMPLICIT_BIT;
215 exp++;
216 }
217 }
218 frac_shr(p, frac_shift);
219
220 if (fmt->arm_althp) {
221 /* ARM Alt HP eschews Inf and NaN for a wider exponent. */
222 if (unlikely(exp > exp_max)) {
223 /* Overflow. Return the maximum normal. */
224 flags = float_flag_invalid;
225 exp = exp_max;
226 frac_allones(p);
227 }
228 } else if (unlikely(exp >= exp_max)) {
229 flags |= float_flag_overflow | float_flag_inexact;
230 if (overflow_norm) {
231 exp = exp_max - 1;
232 frac_allones(p);
233 } else {
234 p->cls = float_class_inf;
235 exp = exp_max;
236 frac_clear(p);
237 }
238 }
239 } else if (s->flush_to_zero) {
240 flags |= float_flag_output_denormal;
241 p->cls = float_class_zero;
242 exp = 0;
243 frac_clear(p);
244 } else {
245 bool is_tiny = s->tininess_before_rounding || exp < 0;
246
247 if (!is_tiny) {
248 FloatPartsN discard;
249 is_tiny = !frac_addi(&discard, p, inc);
250 }
251
252 frac_shrjam(p, 1 - exp);
253
254 if (p->frac_lo & round_mask) {
255 /* Need to recompute round-to-even/round-to-odd. */
256 switch (s->float_rounding_mode) {
257 case float_round_nearest_even:
258 inc = ((p->frac_lo & roundeven_mask) != frac_lsbm1
259 ? frac_lsbm1 : 0);
260 break;
261 case float_round_to_odd:
262 inc = p->frac_lo & frac_lsb ? 0 : round_mask;
263 break;
264 default:
265 break;
266 }
267 flags |= float_flag_inexact;
268 frac_addi(p, p, inc);
269 }
270
271 exp = (p->frac_hi & DECOMPOSED_IMPLICIT_BIT) != 0;
272 frac_shr(p, frac_shift);
273
274 if (is_tiny && (flags & float_flag_inexact)) {
275 flags |= float_flag_underflow;
276 }
277 if (exp == 0 && frac_eqz(p)) {
278 p->cls = float_class_zero;
279 }
280 }
281 p->exp = exp;
282 float_raise(flags, s);
283 }
284
285 /*
286 * Returns the result of adding or subtracting the values of the
287 * floating-point values `a' and `b'. The operation is performed
288 * according to the IEC/IEEE Standard for Binary Floating-Point
289 * Arithmetic.
290 */
291 static FloatPartsN *partsN(addsub)(FloatPartsN *a, FloatPartsN *b,
292 float_status *s, bool subtract)
293 {
294 bool b_sign = b->sign ^ subtract;
295 int ab_mask = float_cmask(a->cls) | float_cmask(b->cls);
296
297 if (a->sign != b_sign) {
298 /* Subtraction */
299 if (likely(ab_mask == float_cmask_normal)) {
300 if (parts_sub_normal(a, b)) {
301 return a;
302 }
303 /* Subtract was exact, fall through to set sign. */
304 ab_mask = float_cmask_zero;
305 }
306
307 if (ab_mask == float_cmask_zero) {
308 a->sign = s->float_rounding_mode == float_round_down;
309 return a;
310 }
311
312 if (unlikely(ab_mask & float_cmask_anynan)) {
313 goto p_nan;
314 }
315
316 if (ab_mask & float_cmask_inf) {
317 if (a->cls != float_class_inf) {
318 /* N - Inf */
319 goto return_b;
320 }
321 if (b->cls != float_class_inf) {
322 /* Inf - N */
323 return a;
324 }
325 /* Inf - Inf */
326 float_raise(float_flag_invalid, s);
327 parts_default_nan(a, s);
328 return a;
329 }
330 } else {
331 /* Addition */
332 if (likely(ab_mask == float_cmask_normal)) {
333 parts_add_normal(a, b);
334 return a;
335 }
336
337 if (ab_mask == float_cmask_zero) {
338 return a;
339 }
340
341 if (unlikely(ab_mask & float_cmask_anynan)) {
342 goto p_nan;
343 }
344
345 if (ab_mask & float_cmask_inf) {
346 a->cls = float_class_inf;
347 return a;
348 }
349 }
350
351 if (b->cls == float_class_zero) {
352 g_assert(a->cls == float_class_normal);
353 return a;
354 }
355
356 g_assert(a->cls == float_class_zero);
357 g_assert(b->cls == float_class_normal);
358 return_b:
359 b->sign = b_sign;
360 return b;
361
362 p_nan:
363 return parts_pick_nan(a, b, s);
364 }
365
366 /*
367 * Returns the result of multiplying the floating-point values `a' and
368 * `b'. The operation is performed according to the IEC/IEEE Standard
369 * for Binary Floating-Point Arithmetic.
370 */
371 static FloatPartsN *partsN(mul)(FloatPartsN *a, FloatPartsN *b,
372 float_status *s)
373 {
374 int ab_mask = float_cmask(a->cls) | float_cmask(b->cls);
375 bool sign = a->sign ^ b->sign;
376
377 if (likely(ab_mask == float_cmask_normal)) {
378 FloatPartsW tmp;
379
380 frac_mulw(&tmp, a, b);
381 frac_truncjam(a, &tmp);
382
383 a->exp += b->exp + 1;
384 if (!(a->frac_hi & DECOMPOSED_IMPLICIT_BIT)) {
385 frac_add(a, a, a);
386 a->exp -= 1;
387 }
388
389 a->sign = sign;
390 return a;
391 }
392
393 /* Inf * Zero == NaN */
394 if (unlikely(ab_mask == float_cmask_infzero)) {
395 float_raise(float_flag_invalid, s);
396 parts_default_nan(a, s);
397 return a;
398 }
399
400 if (unlikely(ab_mask & float_cmask_anynan)) {
401 return parts_pick_nan(a, b, s);
402 }
403
404 /* Multiply by 0 or Inf */
405 if (ab_mask & float_cmask_inf) {
406 a->cls = float_class_inf;
407 a->sign = sign;
408 return a;
409 }
410
411 g_assert(ab_mask & float_cmask_zero);
412 a->cls = float_class_zero;
413 a->sign = sign;
414 return a;
415 }
416
417 /*
418 * Returns the result of multiplying the floating-point values `a' and
419 * `b' then adding 'c', with no intermediate rounding step after the
420 * multiplication. The operation is performed according to the
421 * IEC/IEEE Standard for Binary Floating-Point Arithmetic 754-2008.
422 * The flags argument allows the caller to select negation of the
423 * addend, the intermediate product, or the final result. (The
424 * difference between this and having the caller do a separate
425 * negation is that negating externally will flip the sign bit on NaNs.)
426 *
427 * Requires A and C extracted into a double-sized structure to provide the
428 * extra space for the widening multiply.
429 */
430 static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
431 FloatPartsN *c, int flags, float_status *s)
432 {
433 int ab_mask, abc_mask;
434 FloatPartsW p_widen, c_widen;
435
436 ab_mask = float_cmask(a->cls) | float_cmask(b->cls);
437 abc_mask = float_cmask(c->cls) | ab_mask;
438
439 /*
440 * It is implementation-defined whether the cases of (0,inf,qnan)
441 * and (inf,0,qnan) raise InvalidOperation or not (and what QNaN
442 * they return if they do), so we have to hand this information
443 * off to the target-specific pick-a-NaN routine.
444 */
445 if (unlikely(abc_mask & float_cmask_anynan)) {
446 return parts_pick_nan_muladd(a, b, c, s, ab_mask, abc_mask);
447 }
448
449 if (flags & float_muladd_negate_c) {
450 c->sign ^= 1;
451 }
452
453 /* Compute the sign of the product into A. */
454 a->sign ^= b->sign;
455 if (flags & float_muladd_negate_product) {
456 a->sign ^= 1;
457 }
458
459 if (unlikely(ab_mask != float_cmask_normal)) {
460 if (unlikely(ab_mask == float_cmask_infzero)) {
461 goto d_nan;
462 }
463
464 if (ab_mask & float_cmask_inf) {
465 if (c->cls == float_class_inf && a->sign != c->sign) {
466 goto d_nan;
467 }
468 goto return_inf;
469 }
470
471 g_assert(ab_mask & float_cmask_zero);
472 if (c->cls == float_class_normal) {
473 *a = *c;
474 goto return_normal;
475 }
476 if (c->cls == float_class_zero) {
477 if (a->sign != c->sign) {
478 goto return_sub_zero;
479 }
480 goto return_zero;
481 }
482 g_assert(c->cls == float_class_inf);
483 }
484
485 if (unlikely(c->cls == float_class_inf)) {
486 a->sign = c->sign;
487 goto return_inf;
488 }
489
490 /* Perform the multiplication step. */
491 p_widen.sign = a->sign;
492 p_widen.exp = a->exp + b->exp + 1;
493 frac_mulw(&p_widen, a, b);
494 if (!(p_widen.frac_hi & DECOMPOSED_IMPLICIT_BIT)) {
495 frac_add(&p_widen, &p_widen, &p_widen);
496 p_widen.exp -= 1;
497 }
498
499 /* Perform the addition step. */
500 if (c->cls != float_class_zero) {
501 /* Zero-extend C to less significant bits. */
502 frac_widen(&c_widen, c);
503 c_widen.exp = c->exp;
504
505 if (a->sign == c->sign) {
506 parts_add_normal(&p_widen, &c_widen);
507 } else if (!parts_sub_normal(&p_widen, &c_widen)) {
508 goto return_sub_zero;
509 }
510 }
511
512 /* Narrow with sticky bit, for proper rounding later. */
513 frac_truncjam(a, &p_widen);
514 a->sign = p_widen.sign;
515 a->exp = p_widen.exp;
516
517 return_normal:
518 if (flags & float_muladd_halve_result) {
519 a->exp -= 1;
520 }
521 finish_sign:
522 if (flags & float_muladd_negate_result) {
523 a->sign ^= 1;
524 }
525 return a;
526
527 return_sub_zero:
528 a->sign = s->float_rounding_mode == float_round_down;
529 return_zero:
530 a->cls = float_class_zero;
531 goto finish_sign;
532
533 return_inf:
534 a->cls = float_class_inf;
535 goto finish_sign;
536
537 d_nan:
538 float_raise(float_flag_invalid, s);
539 parts_default_nan(a, s);
540 return a;
541 }
542
543 /*
544 * Returns the result of dividing the floating-point value `a' by the
545 * corresponding value `b'. The operation is performed according to
546 * the IEC/IEEE Standard for Binary Floating-Point Arithmetic.
547 */
548 static FloatPartsN *partsN(div)(FloatPartsN *a, FloatPartsN *b,
549 float_status *s)
550 {
551 int ab_mask = float_cmask(a->cls) | float_cmask(b->cls);
552 bool sign = a->sign ^ b->sign;
553
554 if (likely(ab_mask == float_cmask_normal)) {
555 a->sign = sign;
556 a->exp -= b->exp + frac_div(a, b);
557 return a;
558 }
559
560 /* 0/0 or Inf/Inf => NaN */
561 if (unlikely(ab_mask == float_cmask_zero) ||
562 unlikely(ab_mask == float_cmask_inf)) {
563 float_raise(float_flag_invalid, s);
564 parts_default_nan(a, s);
565 return a;
566 }
567
568 /* All the NaN cases */
569 if (unlikely(ab_mask & float_cmask_anynan)) {
570 return parts_pick_nan(a, b, s);
571 }
572
573 a->sign = sign;
574
575 /* Inf / X */
576 if (a->cls == float_class_inf) {
577 return a;
578 }
579
580 /* 0 / X */
581 if (a->cls == float_class_zero) {
582 return a;
583 }
584
585 /* X / Inf */
586 if (b->cls == float_class_inf) {
587 a->cls = float_class_zero;
588 return a;
589 }
590
591 /* X / 0 => Inf */
592 g_assert(b->cls == float_class_zero);
593 float_raise(float_flag_divbyzero, s);
594 a->cls = float_class_inf;
595 return a;
596 }
597
598 /*
599 * Rounds the floating-point value `a' to an integer, and returns the
600 * result as a floating-point value. The operation is performed
601 * according to the IEC/IEEE Standard for Binary Floating-Point
602 * Arithmetic.
603 *
604 * parts_round_to_int_normal is an internal helper function for
605 * normal numbers only, returning true for inexact but not directly
606 * raising float_flag_inexact.
607 */
608 static bool partsN(round_to_int_normal)(FloatPartsN *a, FloatRoundMode rmode,
609 int scale, int frac_size)
610 {
611 uint64_t frac_lsb, frac_lsbm1, rnd_even_mask, rnd_mask, inc;
612 int shift_adj;
613
614 scale = MIN(MAX(scale, -0x10000), 0x10000);
615 a->exp += scale;
616
617 if (a->exp < 0) {
618 bool one;
619
620 /* All fractional */
621 switch (rmode) {
622 case float_round_nearest_even:
623 one = false;
624 if (a->exp == -1) {
625 FloatPartsN tmp;
626 /* Shift left one, discarding DECOMPOSED_IMPLICIT_BIT */
627 frac_add(&tmp, a, a);
628 /* Anything remaining means frac > 0.5. */
629 one = !frac_eqz(&tmp);
630 }
631 break;
632 case float_round_ties_away:
633 one = a->exp == -1;
634 break;
635 case float_round_to_zero:
636 one = false;
637 break;
638 case float_round_up:
639 one = !a->sign;
640 break;
641 case float_round_down:
642 one = a->sign;
643 break;
644 case float_round_to_odd:
645 one = true;
646 break;
647 default:
648 g_assert_not_reached();
649 }
650
651 frac_clear(a);
652 a->exp = 0;
653 if (one) {
654 a->frac_hi = DECOMPOSED_IMPLICIT_BIT;
655 } else {
656 a->cls = float_class_zero;
657 }
658 return true;
659 }
660
661 if (a->exp >= frac_size) {
662 /* All integral */
663 return false;
664 }
665
666 if (N > 64 && a->exp < N - 64) {
667 /*
668 * Rounding is not in the low word -- shift lsb to bit 2,
669 * which leaves room for sticky and rounding bit.
670 */
671 shift_adj = (N - 1) - (a->exp + 2);
672 frac_shrjam(a, shift_adj);
673 frac_lsb = 1 << 2;
674 } else {
675 shift_adj = 0;
676 frac_lsb = DECOMPOSED_IMPLICIT_BIT >> (a->exp & 63);
677 }
678
679 frac_lsbm1 = frac_lsb >> 1;
680 rnd_mask = frac_lsb - 1;
681 rnd_even_mask = rnd_mask | frac_lsb;
682
683 if (!(a->frac_lo & rnd_mask)) {
684 /* Fractional bits already clear, undo the shift above. */
685 frac_shl(a, shift_adj);
686 return false;
687 }
688
689 switch (rmode) {
690 case float_round_nearest_even:
691 inc = ((a->frac_lo & rnd_even_mask) != frac_lsbm1 ? frac_lsbm1 : 0);
692 break;
693 case float_round_ties_away:
694 inc = frac_lsbm1;
695 break;
696 case float_round_to_zero:
697 inc = 0;
698 break;
699 case float_round_up:
700 inc = a->sign ? 0 : rnd_mask;
701 break;
702 case float_round_down:
703 inc = a->sign ? rnd_mask : 0;
704 break;
705 case float_round_to_odd:
706 inc = a->frac_lo & frac_lsb ? 0 : rnd_mask;
707 break;
708 default:
709 g_assert_not_reached();
710 }
711
712 if (shift_adj == 0) {
713 if (frac_addi(a, a, inc)) {
714 frac_shr(a, 1);
715 a->frac_hi |= DECOMPOSED_IMPLICIT_BIT;
716 a->exp++;
717 }
718 a->frac_lo &= ~rnd_mask;
719 } else {
720 frac_addi(a, a, inc);
721 a->frac_lo &= ~rnd_mask;
722 /* Be careful shifting back, not to overflow */
723 frac_shl(a, shift_adj - 1);
724 if (a->frac_hi & DECOMPOSED_IMPLICIT_BIT) {
725 a->exp++;
726 } else {
727 frac_add(a, a, a);
728 }
729 }
730 return true;
731 }
732
733 static void partsN(round_to_int)(FloatPartsN *a, FloatRoundMode rmode,
734 int scale, float_status *s,
735 const FloatFmt *fmt)
736 {
737 switch (a->cls) {
738 case float_class_qnan:
739 case float_class_snan:
740 parts_return_nan(a, s);
741 break;
742 case float_class_zero:
743 case float_class_inf:
744 break;
745 case float_class_normal:
746 if (parts_round_to_int_normal(a, rmode, scale, fmt->frac_size)) {
747 float_raise(float_flag_inexact, s);
748 }
749 break;
750 default:
751 g_assert_not_reached();
752 }
753 }
754
755 /*
756 * Returns the result of converting the floating-point value `a' to
757 * the two's complement integer format. The conversion is performed
758 * according to the IEC/IEEE Standard for Binary Floating-Point
759 * Arithmetic---which means in particular that the conversion is
760 * rounded according to the current rounding mode. If `a' is a NaN,
761 * the largest positive integer is returned. Otherwise, if the
762 * conversion overflows, the largest integer with the same sign as `a'
763 * is returned.
764 */
765 static int64_t partsN(float_to_sint)(FloatPartsN *p, FloatRoundMode rmode,
766 int scale, int64_t min, int64_t max,
767 float_status *s)
768 {
769 int flags = 0;
770 uint64_t r;
771
772 switch (p->cls) {
773 case float_class_snan:
774 case float_class_qnan:
775 flags = float_flag_invalid;
776 r = max;
777 break;
778
779 case float_class_inf:
780 flags = float_flag_invalid;
781 r = p->sign ? min : max;
782 break;
783
784 case float_class_zero:
785 return 0;
786
787 case float_class_normal:
788 /* TODO: N - 2 is frac_size for rounding; could use input fmt. */
789 if (parts_round_to_int_normal(p, rmode, scale, N - 2)) {
790 flags = float_flag_inexact;
791 }
792
793 if (p->exp <= DECOMPOSED_BINARY_POINT) {
794 r = p->frac_hi >> (DECOMPOSED_BINARY_POINT - p->exp);
795 } else {
796 r = UINT64_MAX;
797 }
798 if (p->sign) {
799 if (r <= -(uint64_t)min) {
800 r = -r;
801 } else {
802 flags = float_flag_invalid;
803 r = min;
804 }
805 } else if (r > max) {
806 flags = float_flag_invalid;
807 r = max;
808 }
809 break;
810
811 default:
812 g_assert_not_reached();
813 }
814
815 float_raise(flags, s);
816 return r;
817 }