1 /* This is a software floating point library which can be used instead
2 of the floating point routines in libgcc1.c for targets without
3 hardware floating point. */
5 /* Copyright 1994-2016 Free Software Foundation, Inc.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 /* As a special exception, if you link this library with other files,
21 some of which are compiled with GCC, to produce an executable,
22 this library does not by itself cause the resulting executable
23 to be covered by the GNU General Public License.
24 This exception does not however invalidate any other reasons why
25 the executable file might be covered by the GNU General Public License. */
27 /* This implements IEEE 754 format arithmetic, but does not provide a
28 mechanism for setting the rounding mode, or for generating or handling
31 The original code by Steve Chamberlain, hacked by Mark Eichin and Jim
32 Wilson, all of Cygnus Support. */
38 #include "sim-basics.h"
42 #include "sim-assert.h"
46 If digits is -1, then print all digits. */
49 print_bits (unsigned64 x
,
52 sim_fpu_print_func print
,
55 unsigned64 bit
= LSBIT64 (msbit
);
76 /* Quick and dirty conversion between a host double and host 64bit int */
85 /* A packed IEEE floating point number.
87 Form is <SIGN:1><BIASEDEXP:NR_EXPBITS><FRAC:NR_FRACBITS> for both
88 32 and 64 bit numbers. This number is interpreted as:
90 Normalized (0 < BIASEDEXP && BIASEDEXP < EXPMAX):
91 (sign ? '-' : '+') 1.<FRAC> x 2 ^ (BIASEDEXP - EXPBIAS)
93 Denormalized (0 == BIASEDEXP && FRAC != 0):
94 (sign ? "-" : "+") 0.<FRAC> x 2 ^ (- EXPBIAS)
96 Zero (0 == BIASEDEXP && FRAC == 0):
97 (sign ? "-" : "+") 0.0
99 Infinity (BIASEDEXP == EXPMAX && FRAC == 0):
100 (sign ? "-" : "+") "infinity"
102 SignalingNaN (BIASEDEXP == EXPMAX && FRAC > 0 && FRAC < QUIET_NAN):
105 QuietNaN (BIASEDEXP == EXPMAX && FRAC > 0 && FRAC > QUIET_NAN):
110 #define NR_EXPBITS (is_double ? 11 : 8)
111 #define NR_FRACBITS (is_double ? 52 : 23)
112 #define SIGNBIT (is_double ? MSBIT64 (0) : MSBIT64 (32))
114 #define EXPMAX32 (255)
115 #define EXMPAX64 (2047)
116 #define EXPMAX ((unsigned) (is_double ? EXMPAX64 : EXPMAX32))
118 #define EXPBIAS32 (127)
119 #define EXPBIAS64 (1023)
120 #define EXPBIAS (is_double ? EXPBIAS64 : EXPBIAS32)
122 #define QUIET_NAN LSBIT64 (NR_FRACBITS - 1)
126 /* An unpacked floating point number.
128 When unpacked, the fraction of both a 32 and 64 bit floating point
129 number is stored using the same format:
131 64 bit - <IMPLICIT_1:1><FRACBITS:52><GUARDS:8><PAD:00>
132 32 bit - <IMPLICIT_1:1><FRACBITS:23><GUARDS:7><PAD:30> */
134 #define NR_PAD32 (30)
136 #define NR_PAD (is_double ? NR_PAD64 : NR_PAD32)
137 #define PADMASK (is_double ? 0 : LSMASK64 (NR_PAD32 - 1, 0))
139 #define NR_GUARDS32 (7 + NR_PAD32)
140 #define NR_GUARDS64 (8 + NR_PAD64)
141 #define NR_GUARDS (is_double ? NR_GUARDS64 : NR_GUARDS32)
142 #define GUARDMASK LSMASK64 (NR_GUARDS - 1, 0)
144 #define GUARDMSB LSBIT64 (NR_GUARDS - 1)
145 #define GUARDLSB LSBIT64 (NR_PAD)
146 #define GUARDROUND LSMASK64 (NR_GUARDS - 2, 0)
148 #define NR_FRAC_GUARD (60)
149 #define IMPLICIT_1 LSBIT64 (NR_FRAC_GUARD)
150 #define IMPLICIT_2 LSBIT64 (NR_FRAC_GUARD + 1)
151 #define IMPLICIT_4 LSBIT64 (NR_FRAC_GUARD + 2)
154 #define FRAC32MASK LSMASK64 (63, NR_FRAC_GUARD - 32 + 1)
156 #define NORMAL_EXPMIN (-(EXPBIAS)+1)
158 #define NORMAL_EXPMAX32 (EXPBIAS32)
159 #define NORMAL_EXPMAX64 (EXPBIAS64)
160 #define NORMAL_EXPMAX (EXPBIAS)
163 /* Integer constants */
165 #define MAX_INT32 ((signed64) LSMASK64 (30, 0))
166 #define MAX_UINT32 LSMASK64 (31, 0)
167 #define MIN_INT32 ((signed64) LSMASK64 (63, 31))
169 #define MAX_INT64 ((signed64) LSMASK64 (62, 0))
170 #define MAX_UINT64 LSMASK64 (63, 0)
171 #define MIN_INT64 ((signed64) LSMASK64 (63, 63))
173 #define MAX_INT (is_64bit ? MAX_INT64 : MAX_INT32)
174 #define MIN_INT (is_64bit ? MIN_INT64 : MIN_INT32)
175 #define MAX_UINT (is_64bit ? MAX_UINT64 : MAX_UINT32)
176 #define NR_INTBITS (is_64bit ? 64 : 32)
178 /* Squeese an unpacked sim_fpu struct into a 32/64 bit integer */
179 STATIC_INLINE_SIM_FPU (unsigned64
)
180 pack_fpu (const sim_fpu
*src
,
191 case sim_fpu_class_qnan
:
194 /* force fraction to correct class */
195 fraction
= src
->fraction
;
196 fraction
>>= NR_GUARDS
;
197 #ifdef SIM_QUIET_NAN_NEGATED
198 fraction
|= QUIET_NAN
- 1;
200 fraction
|= QUIET_NAN
;
203 case sim_fpu_class_snan
:
206 /* force fraction to correct class */
207 fraction
= src
->fraction
;
208 fraction
>>= NR_GUARDS
;
209 #ifdef SIM_QUIET_NAN_NEGATED
210 fraction
|= QUIET_NAN
;
212 fraction
&= ~QUIET_NAN
;
215 case sim_fpu_class_infinity
:
220 case sim_fpu_class_zero
:
225 case sim_fpu_class_number
:
226 case sim_fpu_class_denorm
:
227 ASSERT (src
->fraction
>= IMPLICIT_1
);
228 ASSERT (src
->fraction
< IMPLICIT_2
);
229 if (src
->normal_exp
< NORMAL_EXPMIN
)
231 /* This number's exponent is too low to fit into the bits
232 available in the number We'll denormalize the number by
233 storing zero in the exponent and shift the fraction to
234 the right to make up for it. */
235 int nr_shift
= NORMAL_EXPMIN
- src
->normal_exp
;
236 if (nr_shift
> NR_FRACBITS
)
238 /* underflow, just make the number zero */
247 /* Shift by the value */
248 fraction
= src
->fraction
;
249 fraction
>>= NR_GUARDS
;
250 fraction
>>= nr_shift
;
253 else if (src
->normal_exp
> NORMAL_EXPMAX
)
262 exp
= (src
->normal_exp
+ EXPBIAS
);
264 fraction
= src
->fraction
;
265 /* FIXME: Need to round according to WITH_SIM_FPU_ROUNDING
267 /* Round to nearest: If the guard bits are the all zero, but
268 the first, then we're half way between two numbers,
269 choose the one which makes the lsb of the answer 0. */
270 if ((fraction
& GUARDMASK
) == GUARDMSB
)
272 if ((fraction
& (GUARDMSB
<< 1)))
273 fraction
+= (GUARDMSB
<< 1);
277 /* Add a one to the guards to force round to nearest */
278 fraction
+= GUARDROUND
;
280 if ((fraction
& IMPLICIT_2
)) /* rounding resulted in carry */
285 fraction
>>= NR_GUARDS
;
286 /* When exp == EXPMAX (overflow from carry) fraction must
287 have been made zero */
288 ASSERT ((exp
== EXPMAX
) <= ((fraction
& ~IMPLICIT_1
) == 0));
295 packed
= ((sign
? SIGNBIT
: 0)
296 | (exp
<< NR_FRACBITS
)
297 | LSMASKED64 (fraction
, NR_FRACBITS
- 1, 0));
299 /* trace operation */
306 printf ("pack_fpu: ");
307 printf ("-> %c%0lX.%06lX\n",
308 LSMASKED32 (packed
, 31, 31) ? '8' : '0',
309 (long) LSEXTRACTED32 (packed
, 30, 23),
310 (long) LSEXTRACTED32 (packed
, 23 - 1, 0));
318 /* Unpack a 32/64 bit integer into a sim_fpu structure */
319 STATIC_INLINE_SIM_FPU (void)
320 unpack_fpu (sim_fpu
*dst
, unsigned64 packed
, int is_double
)
322 unsigned64 fraction
= LSMASKED64 (packed
, NR_FRACBITS
- 1, 0);
323 unsigned exp
= LSEXTRACTED64 (packed
, NR_EXPBITS
+ NR_FRACBITS
- 1, NR_FRACBITS
);
324 int sign
= (packed
& SIGNBIT
) != 0;
328 /* Hmm. Looks like 0 */
331 /* tastes like zero */
332 dst
->class = sim_fpu_class_zero
;
338 /* Zero exponent with non zero fraction - it's denormalized,
339 so there isn't a leading implicit one - we'll shift it so
341 dst
->normal_exp
= exp
- EXPBIAS
+ 1;
342 dst
->class = sim_fpu_class_denorm
;
344 fraction
<<= NR_GUARDS
;
345 while (fraction
< IMPLICIT_1
)
350 dst
->fraction
= fraction
;
353 else if (exp
== EXPMAX
)
358 /* Attached to a zero fraction - means infinity */
359 dst
->class = sim_fpu_class_infinity
;
361 /* dst->normal_exp = EXPBIAS; */
362 /* dst->fraction = 0; */
368 /* Non zero fraction, means NaN */
370 dst
->fraction
= (fraction
<< NR_GUARDS
);
371 #ifdef SIM_QUIET_NAN_NEGATED
372 qnan
= (fraction
& QUIET_NAN
) == 0;
374 qnan
= fraction
>= QUIET_NAN
;
377 dst
->class = sim_fpu_class_qnan
;
379 dst
->class = sim_fpu_class_snan
;
384 /* Nothing strange about this number */
385 dst
->class = sim_fpu_class_number
;
387 dst
->fraction
= ((fraction
<< NR_GUARDS
) | IMPLICIT_1
);
388 dst
->normal_exp
= exp
- EXPBIAS
;
391 /* trace operation */
398 printf ("unpack_fpu: %c%02lX.%06lX ->\n",
399 LSMASKED32 (packed
, 31, 31) ? '8' : '0',
400 (long) LSEXTRACTED32 (packed
, 30, 23),
401 (long) LSEXTRACTED32 (packed
, 23 - 1, 0));
408 val
.i
= pack_fpu (dst
, 1);
411 ASSERT (val
.i
== packed
);
415 unsigned32 val
= pack_fpu (dst
, 0);
416 unsigned32 org
= packed
;
423 /* Convert a floating point into an integer */
424 STATIC_INLINE_SIM_FPU (int)
433 if (sim_fpu_is_zero (s
))
438 if (sim_fpu_is_snan (s
))
440 *i
= MIN_INT
; /* FIXME */
441 return sim_fpu_status_invalid_cvi
;
443 if (sim_fpu_is_qnan (s
))
445 *i
= MIN_INT
; /* FIXME */
446 return sim_fpu_status_invalid_cvi
;
448 /* map infinity onto MAX_INT... */
449 if (sim_fpu_is_infinity (s
))
451 *i
= s
->sign
? MIN_INT
: MAX_INT
;
452 return sim_fpu_status_invalid_cvi
;
454 /* it is a number, but a small one */
455 if (s
->normal_exp
< 0)
458 return sim_fpu_status_inexact
;
460 /* Is the floating point MIN_INT or just close? */
461 if (s
->sign
&& s
->normal_exp
== (NR_INTBITS
- 1))
464 ASSERT (s
->fraction
>= IMPLICIT_1
);
465 if (s
->fraction
== IMPLICIT_1
)
466 return 0; /* exact */
467 if (is_64bit
) /* can't round */
468 return sim_fpu_status_invalid_cvi
; /* must be overflow */
469 /* For a 32bit with MAX_INT, rounding is possible */
472 case sim_fpu_round_default
:
474 case sim_fpu_round_zero
:
475 if ((s
->fraction
& FRAC32MASK
) != IMPLICIT_1
)
476 return sim_fpu_status_invalid_cvi
;
478 return sim_fpu_status_inexact
;
480 case sim_fpu_round_near
:
482 if ((s
->fraction
& FRAC32MASK
) != IMPLICIT_1
)
483 return sim_fpu_status_invalid_cvi
;
484 else if ((s
->fraction
& !FRAC32MASK
) >= (~FRAC32MASK
>> 1))
485 return sim_fpu_status_invalid_cvi
;
487 return sim_fpu_status_inexact
;
489 case sim_fpu_round_up
:
490 if ((s
->fraction
& FRAC32MASK
) == IMPLICIT_1
)
491 return sim_fpu_status_inexact
;
493 return sim_fpu_status_invalid_cvi
;
494 case sim_fpu_round_down
:
495 return sim_fpu_status_invalid_cvi
;
498 /* Would right shifting result in the FRAC being shifted into
499 (through) the integer's sign bit? */
500 if (s
->normal_exp
> (NR_INTBITS
- 2))
502 *i
= s
->sign
? MIN_INT
: MAX_INT
;
503 return sim_fpu_status_invalid_cvi
;
505 /* normal number shift it into place */
507 shift
= (s
->normal_exp
- (NR_FRAC_GUARD
));
515 if (tmp
& ((SIGNED64 (1) << shift
) - 1))
516 status
|= sim_fpu_status_inexact
;
519 *i
= s
->sign
? (-tmp
) : (tmp
);
523 /* convert an integer into a floating point */
524 STATIC_INLINE_SIM_FPU (int)
525 i2fpu (sim_fpu
*f
, signed64 i
, int is_64bit
)
530 f
->class = sim_fpu_class_zero
;
536 f
->class = sim_fpu_class_number
;
538 f
->normal_exp
= NR_FRAC_GUARD
;
542 /* Special case for minint, since there is no corresponding
543 +ve integer representation for it */
546 f
->fraction
= IMPLICIT_1
;
547 f
->normal_exp
= NR_INTBITS
- 1;
555 if (f
->fraction
>= IMPLICIT_2
)
559 f
->fraction
= (f
->fraction
>> 1) | (f
->fraction
& 1);
562 while (f
->fraction
>= IMPLICIT_2
);
564 else if (f
->fraction
< IMPLICIT_1
)
571 while (f
->fraction
< IMPLICIT_1
);
575 /* trace operation */
578 printf ("i2fpu: 0x%08lX ->\n", (long) i
);
585 fpu2i (&val
, f
, is_64bit
, sim_fpu_round_zero
);
586 if (i
>= MIN_INT32
&& i
<= MAX_INT32
)
596 /* Convert a floating point into an integer */
597 STATIC_INLINE_SIM_FPU (int)
598 fpu2u (unsigned64
*u
, const sim_fpu
*s
, int is_64bit
)
600 const int is_double
= 1;
603 if (sim_fpu_is_zero (s
))
608 if (sim_fpu_is_nan (s
))
613 /* it is a negative number */
619 /* get reasonable MAX_USI_INT... */
620 if (sim_fpu_is_infinity (s
))
625 /* it is a number, but a small one */
626 if (s
->normal_exp
< 0)
632 if (s
->normal_exp
> (NR_INTBITS
- 1))
638 tmp
= (s
->fraction
& ~PADMASK
);
639 shift
= (s
->normal_exp
- (NR_FRACBITS
+ NR_GUARDS
));
653 /* Convert an unsigned integer into a floating point */
654 STATIC_INLINE_SIM_FPU (int)
655 u2fpu (sim_fpu
*f
, unsigned64 u
, int is_64bit
)
659 f
->class = sim_fpu_class_zero
;
665 f
->class = sim_fpu_class_number
;
667 f
->normal_exp
= NR_FRAC_GUARD
;
670 while (f
->fraction
< IMPLICIT_1
)
680 /* register <-> sim_fpu */
682 INLINE_SIM_FPU (void)
683 sim_fpu_32to (sim_fpu
*f
, unsigned32 s
)
685 unpack_fpu (f
, s
, 0);
689 INLINE_SIM_FPU (void)
690 sim_fpu_232to (sim_fpu
*f
, unsigned32 h
, unsigned32 l
)
694 unpack_fpu (f
, s
, 1);
698 INLINE_SIM_FPU (void)
699 sim_fpu_64to (sim_fpu
*f
, unsigned64 s
)
701 unpack_fpu (f
, s
, 1);
705 INLINE_SIM_FPU (void)
706 sim_fpu_to32 (unsigned32
*s
,
709 *s
= pack_fpu (f
, 0);
713 INLINE_SIM_FPU (void)
714 sim_fpu_to232 (unsigned32
*h
, unsigned32
*l
,
717 unsigned64 s
= pack_fpu (f
, 1);
723 INLINE_SIM_FPU (void)
724 sim_fpu_to64 (unsigned64
*u
,
727 *u
= pack_fpu (f
, 1);
731 INLINE_SIM_FPU (void)
732 sim_fpu_fractionto (sim_fpu
*f
,
738 int shift
= (NR_FRAC_GUARD
- precision
);
739 f
->class = sim_fpu_class_number
;
741 f
->normal_exp
= normal_exp
;
742 /* shift the fraction to where sim-fpu expects it */
744 f
->fraction
= (fraction
<< shift
);
746 f
->fraction
= (fraction
>> -shift
);
747 f
->fraction
|= IMPLICIT_1
;
751 INLINE_SIM_FPU (unsigned64
)
752 sim_fpu_tofraction (const sim_fpu
*d
,
755 /* we have NR_FRAC_GUARD bits, we want only PRECISION bits */
756 int shift
= (NR_FRAC_GUARD
- precision
);
757 unsigned64 fraction
= (d
->fraction
& ~IMPLICIT_1
);
759 return fraction
>> shift
;
761 return fraction
<< -shift
;
767 STATIC_INLINE_SIM_FPU (int)
768 do_normal_overflow (sim_fpu
*f
,
774 case sim_fpu_round_default
:
776 case sim_fpu_round_near
:
777 f
->class = sim_fpu_class_infinity
;
779 case sim_fpu_round_up
:
781 f
->class = sim_fpu_class_infinity
;
783 case sim_fpu_round_down
:
785 f
->class = sim_fpu_class_infinity
;
787 case sim_fpu_round_zero
:
790 f
->normal_exp
= NORMAL_EXPMAX
;
791 f
->fraction
= LSMASK64 (NR_FRAC_GUARD
, NR_GUARDS
);
792 return (sim_fpu_status_overflow
| sim_fpu_status_inexact
);
795 STATIC_INLINE_SIM_FPU (int)
796 do_normal_underflow (sim_fpu
*f
,
802 case sim_fpu_round_default
:
804 case sim_fpu_round_near
:
805 f
->class = sim_fpu_class_zero
;
807 case sim_fpu_round_up
:
809 f
->class = sim_fpu_class_zero
;
811 case sim_fpu_round_down
:
813 f
->class = sim_fpu_class_zero
;
815 case sim_fpu_round_zero
:
816 f
->class = sim_fpu_class_zero
;
819 f
->normal_exp
= NORMAL_EXPMIN
- NR_FRACBITS
;
820 f
->fraction
= IMPLICIT_1
;
821 return (sim_fpu_status_inexact
| sim_fpu_status_underflow
);
826 /* Round a number using NR_GUARDS.
827 Will return the rounded number or F->FRACTION == 0 when underflow */
829 STATIC_INLINE_SIM_FPU (int)
830 do_normal_round (sim_fpu
*f
,
834 unsigned64 guardmask
= LSMASK64 (nr_guards
- 1, 0);
835 unsigned64 guardmsb
= LSBIT64 (nr_guards
- 1);
836 unsigned64 fraclsb
= guardmsb
<< 1;
837 if ((f
->fraction
& guardmask
))
839 int status
= sim_fpu_status_inexact
;
842 case sim_fpu_round_default
:
844 case sim_fpu_round_near
:
845 if ((f
->fraction
& guardmsb
))
847 if ((f
->fraction
& fraclsb
))
849 status
|= sim_fpu_status_rounded
;
851 else if ((f
->fraction
& (guardmask
>> 1)))
853 status
|= sim_fpu_status_rounded
;
857 case sim_fpu_round_up
:
859 status
|= sim_fpu_status_rounded
;
861 case sim_fpu_round_down
:
863 status
|= sim_fpu_status_rounded
;
865 case sim_fpu_round_zero
:
868 f
->fraction
&= ~guardmask
;
869 /* round if needed, handle resulting overflow */
870 if ((status
& sim_fpu_status_rounded
))
872 f
->fraction
+= fraclsb
;
873 if ((f
->fraction
& IMPLICIT_2
))
886 STATIC_INLINE_SIM_FPU (int)
887 do_round (sim_fpu
*f
,
890 sim_fpu_denorm denorm
)
894 case sim_fpu_class_qnan
:
895 case sim_fpu_class_zero
:
896 case sim_fpu_class_infinity
:
899 case sim_fpu_class_snan
:
900 /* Quieten a SignalingNaN */
901 f
->class = sim_fpu_class_qnan
;
902 return sim_fpu_status_invalid_snan
;
904 case sim_fpu_class_number
:
905 case sim_fpu_class_denorm
:
908 ASSERT (f
->fraction
< IMPLICIT_2
);
909 ASSERT (f
->fraction
>= IMPLICIT_1
);
910 if (f
->normal_exp
< NORMAL_EXPMIN
)
912 /* This number's exponent is too low to fit into the bits
913 available in the number. Round off any bits that will be
914 discarded as a result of denormalization. Edge case is
915 the implicit bit shifted to GUARD0 and then rounded
917 int shift
= NORMAL_EXPMIN
- f
->normal_exp
;
918 if (shift
+ NR_GUARDS
<= NR_FRAC_GUARD
+ 1
919 && !(denorm
& sim_fpu_denorm_zero
))
921 status
= do_normal_round (f
, shift
+ NR_GUARDS
, round
);
922 if (f
->fraction
== 0) /* rounding underflowed */
924 status
|= do_normal_underflow (f
, is_double
, round
);
926 else if (f
->normal_exp
< NORMAL_EXPMIN
) /* still underflow? */
928 status
|= sim_fpu_status_denorm
;
929 /* Any loss of precision when denormalizing is
930 underflow. Some processors check for underflow
931 before rounding, some after! */
932 if (status
& sim_fpu_status_inexact
)
933 status
|= sim_fpu_status_underflow
;
934 /* Flag that resultant value has been denormalized */
935 f
->class = sim_fpu_class_denorm
;
937 else if ((denorm
& sim_fpu_denorm_underflow_inexact
))
939 if ((status
& sim_fpu_status_inexact
))
940 status
|= sim_fpu_status_underflow
;
945 status
= do_normal_underflow (f
, is_double
, round
);
948 else if (f
->normal_exp
> NORMAL_EXPMAX
)
951 status
= do_normal_overflow (f
, is_double
, round
);
955 status
= do_normal_round (f
, NR_GUARDS
, round
);
956 if (f
->fraction
== 0)
957 /* f->class = sim_fpu_class_zero; */
958 status
|= do_normal_underflow (f
, is_double
, round
);
959 else if (f
->normal_exp
> NORMAL_EXPMAX
)
960 /* oops! rounding caused overflow */
961 status
|= do_normal_overflow (f
, is_double
, round
);
963 ASSERT ((f
->class == sim_fpu_class_number
964 || f
->class == sim_fpu_class_denorm
)
965 <= (f
->fraction
< IMPLICIT_2
&& f
->fraction
>= IMPLICIT_1
));
973 sim_fpu_round_32 (sim_fpu
*f
,
975 sim_fpu_denorm denorm
)
977 return do_round (f
, 0, round
, denorm
);
981 sim_fpu_round_64 (sim_fpu
*f
,
983 sim_fpu_denorm denorm
)
985 return do_round (f
, 1, round
, denorm
);
993 sim_fpu_add (sim_fpu
*f
,
997 if (sim_fpu_is_snan (l
))
1000 f
->class = sim_fpu_class_qnan
;
1001 return sim_fpu_status_invalid_snan
;
1003 if (sim_fpu_is_snan (r
))
1006 f
->class = sim_fpu_class_qnan
;
1007 return sim_fpu_status_invalid_snan
;
1009 if (sim_fpu_is_qnan (l
))
1014 if (sim_fpu_is_qnan (r
))
1019 if (sim_fpu_is_infinity (l
))
1021 if (sim_fpu_is_infinity (r
)
1022 && l
->sign
!= r
->sign
)
1025 return sim_fpu_status_invalid_isi
;
1030 if (sim_fpu_is_infinity (r
))
1035 if (sim_fpu_is_zero (l
))
1037 if (sim_fpu_is_zero (r
))
1040 f
->sign
= l
->sign
& r
->sign
;
1046 if (sim_fpu_is_zero (r
))
1053 int shift
= l
->normal_exp
- r
->normal_exp
;
1054 unsigned64 lfraction
;
1055 unsigned64 rfraction
;
1056 /* use exp of larger */
1057 if (shift
>= NR_FRAC_GUARD
)
1059 /* left has much bigger magnitute */
1061 return sim_fpu_status_inexact
;
1063 if (shift
<= - NR_FRAC_GUARD
)
1065 /* right has much bigger magnitute */
1067 return sim_fpu_status_inexact
;
1069 lfraction
= l
->fraction
;
1070 rfraction
= r
->fraction
;
1073 f
->normal_exp
= l
->normal_exp
;
1074 if (rfraction
& LSMASK64 (shift
- 1, 0))
1076 status
|= sim_fpu_status_inexact
;
1077 rfraction
|= LSBIT64 (shift
); /* stick LSBit */
1079 rfraction
>>= shift
;
1083 f
->normal_exp
= r
->normal_exp
;
1084 if (lfraction
& LSMASK64 (- shift
- 1, 0))
1086 status
|= sim_fpu_status_inexact
;
1087 lfraction
|= LSBIT64 (- shift
); /* stick LSBit */
1089 lfraction
>>= -shift
;
1093 f
->normal_exp
= r
->normal_exp
;
1096 /* perform the addition */
1098 lfraction
= - lfraction
;
1100 rfraction
= - rfraction
;
1101 f
->fraction
= lfraction
+ rfraction
;
1104 if (f
->fraction
== 0)
1111 f
->class = sim_fpu_class_number
;
1112 if (((signed64
) f
->fraction
) >= 0)
1117 f
->fraction
= - f
->fraction
;
1121 if ((f
->fraction
& IMPLICIT_2
))
1123 f
->fraction
= (f
->fraction
>> 1) | (f
->fraction
& 1);
1126 else if (f
->fraction
< IMPLICIT_1
)
1133 while (f
->fraction
< IMPLICIT_1
);
1135 ASSERT (f
->fraction
>= IMPLICIT_1
&& f
->fraction
< IMPLICIT_2
);
1141 INLINE_SIM_FPU (int)
1142 sim_fpu_sub (sim_fpu
*f
,
1146 if (sim_fpu_is_snan (l
))
1149 f
->class = sim_fpu_class_qnan
;
1150 return sim_fpu_status_invalid_snan
;
1152 if (sim_fpu_is_snan (r
))
1155 f
->class = sim_fpu_class_qnan
;
1156 return sim_fpu_status_invalid_snan
;
1158 if (sim_fpu_is_qnan (l
))
1163 if (sim_fpu_is_qnan (r
))
1168 if (sim_fpu_is_infinity (l
))
1170 if (sim_fpu_is_infinity (r
)
1171 && l
->sign
== r
->sign
)
1174 return sim_fpu_status_invalid_isi
;
1179 if (sim_fpu_is_infinity (r
))
1185 if (sim_fpu_is_zero (l
))
1187 if (sim_fpu_is_zero (r
))
1190 f
->sign
= l
->sign
& !r
->sign
;
1199 if (sim_fpu_is_zero (r
))
1206 int shift
= l
->normal_exp
- r
->normal_exp
;
1207 unsigned64 lfraction
;
1208 unsigned64 rfraction
;
1209 /* use exp of larger */
1210 if (shift
>= NR_FRAC_GUARD
)
1212 /* left has much bigger magnitute */
1214 return sim_fpu_status_inexact
;
1216 if (shift
<= - NR_FRAC_GUARD
)
1218 /* right has much bigger magnitute */
1221 return sim_fpu_status_inexact
;
1223 lfraction
= l
->fraction
;
1224 rfraction
= r
->fraction
;
1227 f
->normal_exp
= l
->normal_exp
;
1228 if (rfraction
& LSMASK64 (shift
- 1, 0))
1230 status
|= sim_fpu_status_inexact
;
1231 rfraction
|= LSBIT64 (shift
); /* stick LSBit */
1233 rfraction
>>= shift
;
1237 f
->normal_exp
= r
->normal_exp
;
1238 if (lfraction
& LSMASK64 (- shift
- 1, 0))
1240 status
|= sim_fpu_status_inexact
;
1241 lfraction
|= LSBIT64 (- shift
); /* stick LSBit */
1243 lfraction
>>= -shift
;
1247 f
->normal_exp
= r
->normal_exp
;
1250 /* perform the subtraction */
1252 lfraction
= - lfraction
;
1254 rfraction
= - rfraction
;
1255 f
->fraction
= lfraction
+ rfraction
;
1258 if (f
->fraction
== 0)
1265 f
->class = sim_fpu_class_number
;
1266 if (((signed64
) f
->fraction
) >= 0)
1271 f
->fraction
= - f
->fraction
;
1275 if ((f
->fraction
& IMPLICIT_2
))
1277 f
->fraction
= (f
->fraction
>> 1) | (f
->fraction
& 1);
1280 else if (f
->fraction
< IMPLICIT_1
)
1287 while (f
->fraction
< IMPLICIT_1
);
1289 ASSERT (f
->fraction
>= IMPLICIT_1
&& f
->fraction
< IMPLICIT_2
);
1295 INLINE_SIM_FPU (int)
1296 sim_fpu_mul (sim_fpu
*f
,
1300 if (sim_fpu_is_snan (l
))
1303 f
->class = sim_fpu_class_qnan
;
1304 return sim_fpu_status_invalid_snan
;
1306 if (sim_fpu_is_snan (r
))
1309 f
->class = sim_fpu_class_qnan
;
1310 return sim_fpu_status_invalid_snan
;
1312 if (sim_fpu_is_qnan (l
))
1317 if (sim_fpu_is_qnan (r
))
1322 if (sim_fpu_is_infinity (l
))
1324 if (sim_fpu_is_zero (r
))
1327 return sim_fpu_status_invalid_imz
;
1330 f
->sign
= l
->sign
^ r
->sign
;
1333 if (sim_fpu_is_infinity (r
))
1335 if (sim_fpu_is_zero (l
))
1338 return sim_fpu_status_invalid_imz
;
1341 f
->sign
= l
->sign
^ r
->sign
;
1344 if (sim_fpu_is_zero (l
) || sim_fpu_is_zero (r
))
1347 f
->sign
= l
->sign
^ r
->sign
;
1350 /* Calculate the mantissa by multiplying both 64bit numbers to get a
1355 unsigned64 nl
= l
->fraction
& 0xffffffff;
1356 unsigned64 nh
= l
->fraction
>> 32;
1357 unsigned64 ml
= r
->fraction
& 0xffffffff;
1358 unsigned64 mh
= r
->fraction
>>32;
1359 unsigned64 pp_ll
= ml
* nl
;
1360 unsigned64 pp_hl
= mh
* nl
;
1361 unsigned64 pp_lh
= ml
* nh
;
1362 unsigned64 pp_hh
= mh
* nh
;
1363 unsigned64 res2
= 0;
1364 unsigned64 res0
= 0;
1365 unsigned64 ps_hh__
= pp_hl
+ pp_lh
;
1366 if (ps_hh__
< pp_hl
)
1367 res2
+= UNSIGNED64 (0x100000000);
1368 pp_hl
= (ps_hh__
<< 32) & UNSIGNED64 (0xffffffff00000000);
1369 res0
= pp_ll
+ pp_hl
;
1372 res2
+= ((ps_hh__
>> 32) & 0xffffffff) + pp_hh
;
1376 f
->normal_exp
= l
->normal_exp
+ r
->normal_exp
;
1377 f
->sign
= l
->sign
^ r
->sign
;
1378 f
->class = sim_fpu_class_number
;
1380 /* Input is bounded by [1,2) ; [2^60,2^61)
1381 Output is bounded by [1,4) ; [2^120,2^122) */
1383 /* Adjust the exponent according to where the decimal point ended
1384 up in the high 64 bit word. In the source the decimal point
1385 was at NR_FRAC_GUARD. */
1386 f
->normal_exp
+= NR_FRAC_GUARD
+ 64 - (NR_FRAC_GUARD
* 2);
1388 /* The high word is bounded according to the above. Consequently
1389 it has never overflowed into IMPLICIT_2. */
1390 ASSERT (high
< LSBIT64 (((NR_FRAC_GUARD
+ 1) * 2) - 64));
1391 ASSERT (high
>= LSBIT64 ((NR_FRAC_GUARD
* 2) - 64));
1392 ASSERT (LSBIT64 (((NR_FRAC_GUARD
+ 1) * 2) - 64) < IMPLICIT_1
);
1399 if (low
& LSBIT64 (63))
1403 while (high
< IMPLICIT_1
);
1405 ASSERT (high
>= IMPLICIT_1
&& high
< IMPLICIT_2
);
1408 f
->fraction
= (high
| 1); /* sticky */
1409 return sim_fpu_status_inexact
;
1420 INLINE_SIM_FPU (int)
1421 sim_fpu_div (sim_fpu
*f
,
1425 if (sim_fpu_is_snan (l
))
1428 f
->class = sim_fpu_class_qnan
;
1429 return sim_fpu_status_invalid_snan
;
1431 if (sim_fpu_is_snan (r
))
1434 f
->class = sim_fpu_class_qnan
;
1435 return sim_fpu_status_invalid_snan
;
1437 if (sim_fpu_is_qnan (l
))
1440 f
->class = sim_fpu_class_qnan
;
1443 if (sim_fpu_is_qnan (r
))
1446 f
->class = sim_fpu_class_qnan
;
1449 if (sim_fpu_is_infinity (l
))
1451 if (sim_fpu_is_infinity (r
))
1454 return sim_fpu_status_invalid_idi
;
1459 f
->sign
= l
->sign
^ r
->sign
;
1463 if (sim_fpu_is_zero (l
))
1465 if (sim_fpu_is_zero (r
))
1468 return sim_fpu_status_invalid_zdz
;
1473 f
->sign
= l
->sign
^ r
->sign
;
1477 if (sim_fpu_is_infinity (r
))
1480 f
->sign
= l
->sign
^ r
->sign
;
1483 if (sim_fpu_is_zero (r
))
1485 f
->class = sim_fpu_class_infinity
;
1486 f
->sign
= l
->sign
^ r
->sign
;
1487 return sim_fpu_status_invalid_div0
;
1490 /* Calculate the mantissa by multiplying both 64bit numbers to get a
1493 /* quotient = ( ( numerator / denominator)
1494 x 2^(numerator exponent - denominator exponent)
1496 unsigned64 numerator
;
1497 unsigned64 denominator
;
1498 unsigned64 quotient
;
1501 f
->class = sim_fpu_class_number
;
1502 f
->sign
= l
->sign
^ r
->sign
;
1503 f
->normal_exp
= l
->normal_exp
- r
->normal_exp
;
1505 numerator
= l
->fraction
;
1506 denominator
= r
->fraction
;
1508 /* Fraction will be less than 1.0 */
1509 if (numerator
< denominator
)
1514 ASSERT (numerator
>= denominator
);
1516 /* Gain extra precision, already used one spare bit */
1517 numerator
<<= NR_SPARE
;
1518 denominator
<<= NR_SPARE
;
1520 /* Does divide one bit at a time. Optimize??? */
1522 bit
= (IMPLICIT_1
<< NR_SPARE
);
1525 if (numerator
>= denominator
)
1528 numerator
-= denominator
;
1534 /* discard (but save) the extra bits */
1535 if ((quotient
& LSMASK64 (NR_SPARE
-1, 0)))
1536 quotient
= (quotient
>> NR_SPARE
) | 1;
1538 quotient
= (quotient
>> NR_SPARE
);
1540 f
->fraction
= quotient
;
1541 ASSERT (f
->fraction
>= IMPLICIT_1
&& f
->fraction
< IMPLICIT_2
);
1544 f
->fraction
|= 1; /* stick remaining bits */
1545 return sim_fpu_status_inexact
;
1553 INLINE_SIM_FPU (int)
1554 sim_fpu_max (sim_fpu
*f
,
1558 if (sim_fpu_is_snan (l
))
1561 f
->class = sim_fpu_class_qnan
;
1562 return sim_fpu_status_invalid_snan
;
1564 if (sim_fpu_is_snan (r
))
1567 f
->class = sim_fpu_class_qnan
;
1568 return sim_fpu_status_invalid_snan
;
1570 if (sim_fpu_is_qnan (l
))
1575 if (sim_fpu_is_qnan (r
))
1580 if (sim_fpu_is_infinity (l
))
1582 if (sim_fpu_is_infinity (r
)
1583 && l
->sign
== r
->sign
)
1586 return sim_fpu_status_invalid_isi
;
1589 *f
= *r
; /* -inf < anything */
1591 *f
= *l
; /* +inf > anthing */
1594 if (sim_fpu_is_infinity (r
))
1597 *f
= *l
; /* anything > -inf */
1599 *f
= *r
; /* anthing < +inf */
1602 if (l
->sign
> r
->sign
)
1604 *f
= *r
; /* -ve < +ve */
1607 if (l
->sign
< r
->sign
)
1609 *f
= *l
; /* +ve > -ve */
1612 ASSERT (l
->sign
== r
->sign
);
1613 if (l
->normal_exp
> r
->normal_exp
1614 || (l
->normal_exp
== r
->normal_exp
&&
1615 l
->fraction
> r
->fraction
))
1619 *f
= *r
; /* -ve < -ve */
1621 *f
= *l
; /* +ve > +ve */
1628 *f
= *l
; /* -ve > -ve */
1630 *f
= *r
; /* +ve < +ve */
1636 INLINE_SIM_FPU (int)
1637 sim_fpu_min (sim_fpu
*f
,
1641 if (sim_fpu_is_snan (l
))
1644 f
->class = sim_fpu_class_qnan
;
1645 return sim_fpu_status_invalid_snan
;
1647 if (sim_fpu_is_snan (r
))
1650 f
->class = sim_fpu_class_qnan
;
1651 return sim_fpu_status_invalid_snan
;
1653 if (sim_fpu_is_qnan (l
))
1658 if (sim_fpu_is_qnan (r
))
1663 if (sim_fpu_is_infinity (l
))
1665 if (sim_fpu_is_infinity (r
)
1666 && l
->sign
== r
->sign
)
1669 return sim_fpu_status_invalid_isi
;
1672 *f
= *l
; /* -inf < anything */
1674 *f
= *r
; /* +inf > anthing */
1677 if (sim_fpu_is_infinity (r
))
1680 *f
= *r
; /* anything > -inf */
1682 *f
= *l
; /* anything < +inf */
1685 if (l
->sign
> r
->sign
)
1687 *f
= *l
; /* -ve < +ve */
1690 if (l
->sign
< r
->sign
)
1692 *f
= *r
; /* +ve > -ve */
1695 ASSERT (l
->sign
== r
->sign
);
1696 if (l
->normal_exp
> r
->normal_exp
1697 || (l
->normal_exp
== r
->normal_exp
&&
1698 l
->fraction
> r
->fraction
))
1702 *f
= *l
; /* -ve < -ve */
1704 *f
= *r
; /* +ve > +ve */
1711 *f
= *r
; /* -ve > -ve */
1713 *f
= *l
; /* +ve < +ve */
1719 INLINE_SIM_FPU (int)
1720 sim_fpu_neg (sim_fpu
*f
,
1723 if (sim_fpu_is_snan (r
))
1726 f
->class = sim_fpu_class_qnan
;
1727 return sim_fpu_status_invalid_snan
;
1729 if (sim_fpu_is_qnan (r
))
1740 INLINE_SIM_FPU (int)
1741 sim_fpu_abs (sim_fpu
*f
,
1746 if (sim_fpu_is_snan (r
))
1748 f
->class = sim_fpu_class_qnan
;
1749 return sim_fpu_status_invalid_snan
;
1755 INLINE_SIM_FPU (int)
1756 sim_fpu_inv (sim_fpu
*f
,
1759 return sim_fpu_div (f
, &sim_fpu_one
, r
);
1763 INLINE_SIM_FPU (int)
1764 sim_fpu_sqrt (sim_fpu
*f
,
1767 if (sim_fpu_is_snan (r
))
1770 return sim_fpu_status_invalid_snan
;
1772 if (sim_fpu_is_qnan (r
))
1777 if (sim_fpu_is_zero (r
))
1779 f
->class = sim_fpu_class_zero
;
1784 if (sim_fpu_is_infinity (r
))
1789 return sim_fpu_status_invalid_sqrt
;
1793 f
->class = sim_fpu_class_infinity
;
1802 return sim_fpu_status_invalid_sqrt
;
1805 /* @(#)e_sqrt.c 5.1 93/09/24 */
1807 * ====================================================
1808 * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
1810 * Developed at SunPro, a Sun Microsystems, Inc. business.
1811 * Permission to use, copy, modify, and distribute this
1812 * software is freely granted, provided that this notice
1814 * ====================================================
1817 /* __ieee754_sqrt(x)
1818 * Return correctly rounded sqrt.
1819 * ------------------------------------------
1820 * | Use the hardware sqrt if you have one |
1821 * ------------------------------------------
1823 * Bit by bit method using integer arithmetic. (Slow, but portable)
1825 * Scale x to y in [1,4) with even powers of 2:
1826 * find an integer k such that 1 <= (y=x*2^(2k)) < 4, then
1827 * sqrt(x) = 2^k * sqrt(y)
1830 - sqrt ( x*2^(2m) ) = sqrt(x).2^m ; m even
1831 - sqrt ( x*2^(2m + 1) ) = sqrt(2.x).2^m ; m odd
1833 - y = ((m even) ? x : 2.x)
1835 - y in [1, 4) ; [IMPLICIT_1,IMPLICIT_4)
1837 - sqrt (y) in [1, 2) ; [IMPLICIT_1,IMPLICIT_2)
1839 * 2. Bit by bit computation
1840 * Let q = sqrt(y) truncated to i bit after binary point (q = 1),
1843 * s = 2*q , and y = 2 * ( y - q ). (1)
1846 * To compute q from q , one checks whether
1850 * (q + 2 ) <= y. (2)
1853 * If (2) is false, then q = q ; otherwise q = q + 2 .
1856 * With some algebric manipulation, it is not difficult to see
1857 * that (2) is equivalent to
1862 * The advantage of (3) is that s and y can be computed by
1864 * the following recurrence formula:
1867 * s = s , y = y ; (4)
1876 * s = s + 2 , y = y - s - 2 (5)
1881 - NOTE: y = 2 (y - s - 2 )
1884 * One may easily use induction to prove (4) and (5).
1885 * Note. Since the left hand side of (3) contain only i+2 bits,
1886 * it does not necessary to do a full (53-bit) comparison
1889 * After generating the 53 bits result, we compute one more bit.
1890 * Together with the remainder, we can decide whether the
1891 * result is exact, bigger than 1/2ulp, or less than 1/2ulp
1892 * (it will never equal to 1/2ulp).
1893 * The rounding mode can be detected by checking whether
1894 * huge + tiny is equal to huge, and whether huge - tiny is
1895 * equal to huge for some floating point number "huge" and "tiny".
1898 * sqrt(+-0) = +-0 ... exact
1900 * sqrt(-ve) = NaN ... with invalid signal
1901 * sqrt(NaN) = NaN ... with invalid signal for signaling NaN
1903 * Other methods : see the appended file at the end of the program below.
1908 /* generate sqrt(x) bit by bit */
1914 f
->class = sim_fpu_class_number
;
1917 f
->normal_exp
= (r
->normal_exp
>> 1); /* exp = [exp/2] */
1919 /* odd exp, double x to make it even */
1920 ASSERT (y
>= IMPLICIT_1
&& y
< IMPLICIT_4
);
1921 if ((r
->normal_exp
& 1))
1925 ASSERT (y
>= IMPLICIT_1
&& y
< (IMPLICIT_2
<< 1));
1927 /* Let loop determine first value of s (either 1 or 2) */
1934 unsigned64 t
= s
+ b
;
1945 ASSERT (q
>= IMPLICIT_1
&& q
< IMPLICIT_2
);
1949 f
->fraction
|= 1; /* stick remaining bits */
1950 return sim_fpu_status_inexact
;
1958 /* int/long <-> sim_fpu */
1960 INLINE_SIM_FPU (int)
1961 sim_fpu_i32to (sim_fpu
*f
,
1963 sim_fpu_round round
)
1969 INLINE_SIM_FPU (int)
1970 sim_fpu_u32to (sim_fpu
*f
,
1972 sim_fpu_round round
)
1978 INLINE_SIM_FPU (int)
1979 sim_fpu_i64to (sim_fpu
*f
,
1981 sim_fpu_round round
)
1987 INLINE_SIM_FPU (int)
1988 sim_fpu_u64to (sim_fpu
*f
,
1990 sim_fpu_round round
)
1997 INLINE_SIM_FPU (int)
1998 sim_fpu_to32i (signed32
*i
,
2000 sim_fpu_round round
)
2003 int status
= fpu2i (&i64
, f
, 0, round
);
2008 INLINE_SIM_FPU (int)
2009 sim_fpu_to32u (unsigned32
*u
,
2011 sim_fpu_round round
)
2014 int status
= fpu2u (&u64
, f
, 0);
2019 INLINE_SIM_FPU (int)
2020 sim_fpu_to64i (signed64
*i
,
2022 sim_fpu_round round
)
2024 return fpu2i (i
, f
, 1, round
);
2028 INLINE_SIM_FPU (int)
2029 sim_fpu_to64u (unsigned64
*u
,
2031 sim_fpu_round round
)
2033 return fpu2u (u
, f
, 1);
2038 /* sim_fpu -> host format */
2041 INLINE_SIM_FPU (float)
2042 sim_fpu_2f (const sim_fpu
*f
)
2049 INLINE_SIM_FPU (double)
2050 sim_fpu_2d (const sim_fpu
*s
)
2053 if (sim_fpu_is_snan (s
))
2057 n
.class = sim_fpu_class_qnan
;
2058 val
.i
= pack_fpu (&n
, 1);
2062 val
.i
= pack_fpu (s
, 1);
2069 INLINE_SIM_FPU (void)
2070 sim_fpu_f2 (sim_fpu
*f
,
2075 unpack_fpu (f
, val
.i
, 1);
2080 INLINE_SIM_FPU (void)
2081 sim_fpu_d2 (sim_fpu
*f
,
2086 unpack_fpu (f
, val
.i
, 1);
2092 INLINE_SIM_FPU (int)
2093 sim_fpu_is_nan (const sim_fpu
*d
)
2097 case sim_fpu_class_qnan
:
2098 case sim_fpu_class_snan
:
2105 INLINE_SIM_FPU (int)
2106 sim_fpu_is_qnan (const sim_fpu
*d
)
2110 case sim_fpu_class_qnan
:
2117 INLINE_SIM_FPU (int)
2118 sim_fpu_is_snan (const sim_fpu
*d
)
2122 case sim_fpu_class_snan
:
2129 INLINE_SIM_FPU (int)
2130 sim_fpu_is_zero (const sim_fpu
*d
)
2134 case sim_fpu_class_zero
:
2141 INLINE_SIM_FPU (int)
2142 sim_fpu_is_infinity (const sim_fpu
*d
)
2146 case sim_fpu_class_infinity
:
2153 INLINE_SIM_FPU (int)
2154 sim_fpu_is_number (const sim_fpu
*d
)
2158 case sim_fpu_class_denorm
:
2159 case sim_fpu_class_number
:
2166 INLINE_SIM_FPU (int)
2167 sim_fpu_is_denorm (const sim_fpu
*d
)
2171 case sim_fpu_class_denorm
:
2179 INLINE_SIM_FPU (int)
2180 sim_fpu_sign (const sim_fpu
*d
)
2186 INLINE_SIM_FPU (int)
2187 sim_fpu_exp (const sim_fpu
*d
)
2189 return d
->normal_exp
;
2193 INLINE_SIM_FPU (unsigned64
)
2194 sim_fpu_fraction (const sim_fpu
*d
)
2200 INLINE_SIM_FPU (unsigned64
)
2201 sim_fpu_guard (const sim_fpu
*d
, int is_double
)
2204 unsigned64 guardmask
= LSMASK64 (NR_GUARDS
- 1, 0);
2205 rv
= (d
->fraction
& guardmask
) >> NR_PAD
;
2210 INLINE_SIM_FPU (int)
2211 sim_fpu_is (const sim_fpu
*d
)
2215 case sim_fpu_class_qnan
:
2216 return SIM_FPU_IS_QNAN
;
2217 case sim_fpu_class_snan
:
2218 return SIM_FPU_IS_SNAN
;
2219 case sim_fpu_class_infinity
:
2221 return SIM_FPU_IS_NINF
;
2223 return SIM_FPU_IS_PINF
;
2224 case sim_fpu_class_number
:
2226 return SIM_FPU_IS_NNUMBER
;
2228 return SIM_FPU_IS_PNUMBER
;
2229 case sim_fpu_class_denorm
:
2231 return SIM_FPU_IS_NDENORM
;
2233 return SIM_FPU_IS_PDENORM
;
2234 case sim_fpu_class_zero
:
2236 return SIM_FPU_IS_NZERO
;
2238 return SIM_FPU_IS_PZERO
;
2245 INLINE_SIM_FPU (int)
2246 sim_fpu_cmp (const sim_fpu
*l
, const sim_fpu
*r
)
2249 sim_fpu_sub (&res
, l
, r
);
2250 return sim_fpu_is (&res
);
2253 INLINE_SIM_FPU (int)
2254 sim_fpu_is_lt (const sim_fpu
*l
, const sim_fpu
*r
)
2257 sim_fpu_lt (&status
, l
, r
);
2261 INLINE_SIM_FPU (int)
2262 sim_fpu_is_le (const sim_fpu
*l
, const sim_fpu
*r
)
2265 sim_fpu_le (&is
, l
, r
);
2269 INLINE_SIM_FPU (int)
2270 sim_fpu_is_eq (const sim_fpu
*l
, const sim_fpu
*r
)
2273 sim_fpu_eq (&is
, l
, r
);
2277 INLINE_SIM_FPU (int)
2278 sim_fpu_is_ne (const sim_fpu
*l
, const sim_fpu
*r
)
2281 sim_fpu_ne (&is
, l
, r
);
2285 INLINE_SIM_FPU (int)
2286 sim_fpu_is_ge (const sim_fpu
*l
, const sim_fpu
*r
)
2289 sim_fpu_ge (&is
, l
, r
);
2293 INLINE_SIM_FPU (int)
2294 sim_fpu_is_gt (const sim_fpu
*l
, const sim_fpu
*r
)
2297 sim_fpu_gt (&is
, l
, r
);
2302 /* Compare operators */
2304 INLINE_SIM_FPU (int)
2305 sim_fpu_lt (int *is
,
2309 if (!sim_fpu_is_nan (l
) && !sim_fpu_is_nan (r
))
2313 lval
.i
= pack_fpu (l
, 1);
2314 rval
.i
= pack_fpu (r
, 1);
2315 (*is
) = (lval
.d
< rval
.d
);
2318 else if (sim_fpu_is_snan (l
) || sim_fpu_is_snan (r
))
2321 return sim_fpu_status_invalid_snan
;
2326 return sim_fpu_status_invalid_qnan
;
2330 INLINE_SIM_FPU (int)
2331 sim_fpu_le (int *is
,
2335 if (!sim_fpu_is_nan (l
) && !sim_fpu_is_nan (r
))
2339 lval
.i
= pack_fpu (l
, 1);
2340 rval
.i
= pack_fpu (r
, 1);
2341 *is
= (lval
.d
<= rval
.d
);
2344 else if (sim_fpu_is_snan (l
) || sim_fpu_is_snan (r
))
2347 return sim_fpu_status_invalid_snan
;
2352 return sim_fpu_status_invalid_qnan
;
2356 INLINE_SIM_FPU (int)
2357 sim_fpu_eq (int *is
,
2361 if (!sim_fpu_is_nan (l
) && !sim_fpu_is_nan (r
))
2365 lval
.i
= pack_fpu (l
, 1);
2366 rval
.i
= pack_fpu (r
, 1);
2367 (*is
) = (lval
.d
== rval
.d
);
2370 else if (sim_fpu_is_snan (l
) || sim_fpu_is_snan (r
))
2373 return sim_fpu_status_invalid_snan
;
2378 return sim_fpu_status_invalid_qnan
;
2382 INLINE_SIM_FPU (int)
2383 sim_fpu_ne (int *is
,
2387 if (!sim_fpu_is_nan (l
) && !sim_fpu_is_nan (r
))
2391 lval
.i
= pack_fpu (l
, 1);
2392 rval
.i
= pack_fpu (r
, 1);
2393 (*is
) = (lval
.d
!= rval
.d
);
2396 else if (sim_fpu_is_snan (l
) || sim_fpu_is_snan (r
))
2399 return sim_fpu_status_invalid_snan
;
2404 return sim_fpu_status_invalid_qnan
;
2408 INLINE_SIM_FPU (int)
2409 sim_fpu_ge (int *is
,
2413 return sim_fpu_le (is
, r
, l
);
2416 INLINE_SIM_FPU (int)
2417 sim_fpu_gt (int *is
,
2421 return sim_fpu_lt (is
, r
, l
);
2425 /* A number of useful constants */
2427 #if EXTERN_SIM_FPU_P
2428 const sim_fpu sim_fpu_zero
= {
2429 sim_fpu_class_zero
, 0, 0, 0
2431 const sim_fpu sim_fpu_qnan
= {
2432 sim_fpu_class_qnan
, 0, 0, 0
2434 const sim_fpu sim_fpu_one
= {
2435 sim_fpu_class_number
, 0, IMPLICIT_1
, 0
2437 const sim_fpu sim_fpu_two
= {
2438 sim_fpu_class_number
, 0, IMPLICIT_1
, 1
2440 const sim_fpu sim_fpu_max32
= {
2441 sim_fpu_class_number
, 0, LSMASK64 (NR_FRAC_GUARD
, NR_GUARDS32
), NORMAL_EXPMAX32
2443 const sim_fpu sim_fpu_max64
= {
2444 sim_fpu_class_number
, 0, LSMASK64 (NR_FRAC_GUARD
, NR_GUARDS64
), NORMAL_EXPMAX64
2451 INLINE_SIM_FPU (void)
2452 sim_fpu_print_fpu (const sim_fpu
*f
,
2453 sim_fpu_print_func
*print
,
2456 sim_fpu_printn_fpu (f
, print
, -1, arg
);
2459 INLINE_SIM_FPU (void)
2460 sim_fpu_printn_fpu (const sim_fpu
*f
,
2461 sim_fpu_print_func
*print
,
2465 print (arg
, "%s", f
->sign
? "-" : "+");
2468 case sim_fpu_class_qnan
:
2470 print_bits (f
->fraction
, NR_FRAC_GUARD
- 1, digits
, print
, arg
);
2471 print (arg
, "*QuietNaN");
2473 case sim_fpu_class_snan
:
2475 print_bits (f
->fraction
, NR_FRAC_GUARD
- 1, digits
, print
, arg
);
2476 print (arg
, "*SignalNaN");
2478 case sim_fpu_class_zero
:
2481 case sim_fpu_class_infinity
:
2484 case sim_fpu_class_number
:
2485 case sim_fpu_class_denorm
:
2487 print_bits (f
->fraction
, NR_FRAC_GUARD
- 1, digits
, print
, arg
);
2488 print (arg
, "*2^%+d", f
->normal_exp
);
2489 ASSERT (f
->fraction
>= IMPLICIT_1
);
2490 ASSERT (f
->fraction
< IMPLICIT_2
);
2495 INLINE_SIM_FPU (void)
2496 sim_fpu_print_status (int status
,
2497 sim_fpu_print_func
*print
,
2501 const char *prefix
= "";
2504 switch ((sim_fpu_status
) (status
& i
))
2506 case sim_fpu_status_denorm
:
2507 print (arg
, "%sD", prefix
);
2509 case sim_fpu_status_invalid_snan
:
2510 print (arg
, "%sSNaN", prefix
);
2512 case sim_fpu_status_invalid_qnan
:
2513 print (arg
, "%sQNaN", prefix
);
2515 case sim_fpu_status_invalid_isi
:
2516 print (arg
, "%sISI", prefix
);
2518 case sim_fpu_status_invalid_idi
:
2519 print (arg
, "%sIDI", prefix
);
2521 case sim_fpu_status_invalid_zdz
:
2522 print (arg
, "%sZDZ", prefix
);
2524 case sim_fpu_status_invalid_imz
:
2525 print (arg
, "%sIMZ", prefix
);
2527 case sim_fpu_status_invalid_cvi
:
2528 print (arg
, "%sCVI", prefix
);
2530 case sim_fpu_status_invalid_cmp
:
2531 print (arg
, "%sCMP", prefix
);
2533 case sim_fpu_status_invalid_sqrt
:
2534 print (arg
, "%sSQRT", prefix
);
2536 case sim_fpu_status_inexact
:
2537 print (arg
, "%sX", prefix
);
2539 case sim_fpu_status_overflow
:
2540 print (arg
, "%sO", prefix
);
2542 case sim_fpu_status_underflow
:
2543 print (arg
, "%sU", prefix
);
2545 case sim_fpu_status_invalid_div0
:
2546 print (arg
, "%s/", prefix
);
2548 case sim_fpu_status_rounded
:
2549 print (arg
, "%sR", prefix
);