1 /* This is a software floating point library which can be used instead
2 of the floating point routines in libgcc1.c for targets without
3 hardware floating point. */
5 /* Copyright 1994, 1997, 1998, 2003, 2007 Free Software Foundation, Inc.
7 This file is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 2, or (at your option) any
12 In addition to the permissions in the GNU General Public License, the
13 Free Software Foundation gives you unlimited permission to link the
14 compiled version of this file with other programs, and to distribute
15 those programs without any restriction coming from the use of this
16 file. (The General Public License restrictions do apply in other
17 respects; for example, they cover modification of the file, and
18 distribution when not linked into another program.)
20 This file is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
25 You should have received a copy of the GNU General Public License
26 along with this program; see the file COPYING. If not, write to
27 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
29 /* As a special exception, if you link this library with other files,
30 some of which are compiled with GCC, to produce an executable,
31 this library does not by itself cause the resulting executable
32 to be covered by the GNU General Public License.
33 This exception does not however invalidate any other reasons why
34 the executable file might be covered by the GNU General Public License. */
36 /* This implements IEEE 754 format arithmetic, but does not provide a
37 mechanism for setting the rounding mode, or for generating or handling
40 The original code by Steve Chamberlain, hacked by Mark Eichin and Jim
41 Wilson, all of Cygnus Support. */
47 #include "sim-basics.h"
51 #include "sim-assert.h"
55 If digits is -1, then print all digits. */
58 print_bits (unsigned64 x
,
61 sim_fpu_print_func print
,
64 unsigned64 bit
= LSBIT64 (msbit
);
77 if (digits
> 0) digits
--;
84 /* Quick and dirty conversion between a host double and host 64bit int */
92 /* A packed IEEE floating point number.
94 Form is <SIGN:1><BIASEDEXP:NR_EXPBITS><FRAC:NR_FRACBITS> for both
95 32 and 64 bit numbers. This number is interpreted as:
97 Normalized (0 < BIASEDEXP && BIASEDEXP < EXPMAX):
98 (sign ? '-' : '+') 1.<FRAC> x 2 ^ (BIASEDEXP - EXPBIAS)
100 Denormalized (0 == BIASEDEXP && FRAC != 0):
101 (sign ? "-" : "+") 0.<FRAC> x 2 ^ (- EXPBIAS)
103 Zero (0 == BIASEDEXP && FRAC == 0):
104 (sign ? "-" : "+") 0.0
106 Infinity (BIASEDEXP == EXPMAX && FRAC == 0):
107 (sign ? "-" : "+") "infinity"
109 SignalingNaN (BIASEDEXP == EXPMAX && FRAC > 0 && FRAC < QUIET_NAN):
112 QuietNaN (BIASEDEXP == EXPMAX && FRAC > 0 && FRAC > QUIET_NAN):
117 #define NR_EXPBITS (is_double ? 11 : 8)
118 #define NR_FRACBITS (is_double ? 52 : 23)
119 #define SIGNBIT (is_double ? MSBIT64 (0) : MSBIT64 (32))
121 #define EXPMAX32 (255)
122 #define EXMPAX64 (2047)
123 #define EXPMAX ((unsigned) (is_double ? EXMPAX64 : EXPMAX32))
125 #define EXPBIAS32 (127)
126 #define EXPBIAS64 (1023)
127 #define EXPBIAS (is_double ? EXPBIAS64 : EXPBIAS32)
129 #define QUIET_NAN LSBIT64 (NR_FRACBITS - 1)
133 /* An unpacked floating point number.
135 When unpacked, the fraction of both a 32 and 64 bit floating point
136 number is stored using the same format:
138 64 bit - <IMPLICIT_1:1><FRACBITS:52><GUARDS:8><PAD:00>
139 32 bit - <IMPLICIT_1:1><FRACBITS:23><GUARDS:7><PAD:30> */
141 #define NR_PAD32 (30)
143 #define NR_PAD (is_double ? NR_PAD64 : NR_PAD32)
144 #define PADMASK (is_double ? 0 : LSMASK64 (NR_PAD32 - 1, 0))
146 #define NR_GUARDS32 (7 + NR_PAD32)
147 #define NR_GUARDS64 (8 + NR_PAD64)
148 #define NR_GUARDS (is_double ? NR_GUARDS64 : NR_GUARDS32)
149 #define GUARDMASK LSMASK64 (NR_GUARDS - 1, 0)
151 #define GUARDMSB LSBIT64 (NR_GUARDS - 1)
152 #define GUARDLSB LSBIT64 (NR_PAD)
153 #define GUARDROUND LSMASK64 (NR_GUARDS - 2, 0)
155 #define NR_FRAC_GUARD (60)
156 #define IMPLICIT_1 LSBIT64 (NR_FRAC_GUARD)
157 #define IMPLICIT_2 LSBIT64 (NR_FRAC_GUARD + 1)
158 #define IMPLICIT_4 LSBIT64 (NR_FRAC_GUARD + 2)
161 #define FRAC32MASK LSMASK64 (63, NR_FRAC_GUARD - 32 + 1)
163 #define NORMAL_EXPMIN (-(EXPBIAS)+1)
165 #define NORMAL_EXPMAX32 (EXPBIAS32)
166 #define NORMAL_EXPMAX64 (EXPBIAS64)
167 #define NORMAL_EXPMAX (EXPBIAS)
170 /* Integer constants */
172 #define MAX_INT32 ((signed64) LSMASK64 (30, 0))
173 #define MAX_UINT32 LSMASK64 (31, 0)
174 #define MIN_INT32 ((signed64) LSMASK64 (63, 31))
176 #define MAX_INT64 ((signed64) LSMASK64 (62, 0))
177 #define MAX_UINT64 LSMASK64 (63, 0)
178 #define MIN_INT64 ((signed64) LSMASK64 (63, 63))
180 #define MAX_INT (is_64bit ? MAX_INT64 : MAX_INT32)
181 #define MIN_INT (is_64bit ? MIN_INT64 : MIN_INT32)
182 #define MAX_UINT (is_64bit ? MAX_UINT64 : MAX_UINT32)
183 #define NR_INTBITS (is_64bit ? 64 : 32)
185 /* Squeese an unpacked sim_fpu struct into a 32/64 bit integer */
186 STATIC_INLINE_SIM_FPU (unsigned64
)
187 pack_fpu (const sim_fpu
*src
,
198 case sim_fpu_class_qnan
:
201 /* force fraction to correct class */
202 fraction
= src
->fraction
;
203 fraction
>>= NR_GUARDS
;
204 #ifdef SIM_QUIET_NAN_NEGATED
205 fraction
|= QUIET_NAN
- 1;
207 fraction
|= QUIET_NAN
;
210 case sim_fpu_class_snan
:
213 /* force fraction to correct class */
214 fraction
= src
->fraction
;
215 fraction
>>= NR_GUARDS
;
216 #ifdef SIM_QUIET_NAN_NEGATED
217 fraction
|= QUIET_NAN
;
219 fraction
&= ~QUIET_NAN
;
222 case sim_fpu_class_infinity
:
227 case sim_fpu_class_zero
:
232 case sim_fpu_class_number
:
233 case sim_fpu_class_denorm
:
234 ASSERT (src
->fraction
>= IMPLICIT_1
);
235 ASSERT (src
->fraction
< IMPLICIT_2
);
236 if (src
->normal_exp
< NORMAL_EXPMIN
)
238 /* This number's exponent is too low to fit into the bits
239 available in the number We'll denormalize the number by
240 storing zero in the exponent and shift the fraction to
241 the right to make up for it. */
242 int nr_shift
= NORMAL_EXPMIN
- src
->normal_exp
;
243 if (nr_shift
> NR_FRACBITS
)
245 /* underflow, just make the number zero */
254 /* Shift by the value */
255 fraction
= src
->fraction
;
256 fraction
>>= NR_GUARDS
;
257 fraction
>>= nr_shift
;
260 else if (src
->normal_exp
> NORMAL_EXPMAX
)
269 exp
= (src
->normal_exp
+ EXPBIAS
);
271 fraction
= src
->fraction
;
272 /* FIXME: Need to round according to WITH_SIM_FPU_ROUNDING
274 /* Round to nearest: If the guard bits are the all zero, but
275 the first, then we're half way between two numbers,
276 choose the one which makes the lsb of the answer 0. */
277 if ((fraction
& GUARDMASK
) == GUARDMSB
)
279 if ((fraction
& (GUARDMSB
<< 1)))
280 fraction
+= (GUARDMSB
<< 1);
284 /* Add a one to the guards to force round to nearest */
285 fraction
+= GUARDROUND
;
287 if ((fraction
& IMPLICIT_2
)) /* rounding resulted in carry */
292 fraction
>>= NR_GUARDS
;
293 /* When exp == EXPMAX (overflow from carry) fraction must
294 have been made zero */
295 ASSERT ((exp
== EXPMAX
) <= ((fraction
& ~IMPLICIT_1
) == 0));
302 packed
= ((sign
? SIGNBIT
: 0)
303 | (exp
<< NR_FRACBITS
)
304 | LSMASKED64 (fraction
, NR_FRACBITS
- 1, 0));
306 /* trace operation */
313 printf ("pack_fpu: ");
314 printf ("-> %c%0lX.%06lX\n",
315 LSMASKED32 (packed
, 31, 31) ? '8' : '0',
316 (long) LSEXTRACTED32 (packed
, 30, 23),
317 (long) LSEXTRACTED32 (packed
, 23 - 1, 0));
325 /* Unpack a 32/64 bit integer into a sim_fpu structure */
326 STATIC_INLINE_SIM_FPU (void)
327 unpack_fpu (sim_fpu
*dst
, unsigned64 packed
, int is_double
)
329 unsigned64 fraction
= LSMASKED64 (packed
, NR_FRACBITS
- 1, 0);
330 unsigned exp
= LSEXTRACTED64 (packed
, NR_EXPBITS
+ NR_FRACBITS
- 1, NR_FRACBITS
);
331 int sign
= (packed
& SIGNBIT
) != 0;
335 /* Hmm. Looks like 0 */
338 /* tastes like zero */
339 dst
->class = sim_fpu_class_zero
;
345 /* Zero exponent with non zero fraction - it's denormalized,
346 so there isn't a leading implicit one - we'll shift it so
348 dst
->normal_exp
= exp
- EXPBIAS
+ 1;
349 dst
->class = sim_fpu_class_denorm
;
351 fraction
<<= NR_GUARDS
;
352 while (fraction
< IMPLICIT_1
)
357 dst
->fraction
= fraction
;
360 else if (exp
== EXPMAX
)
365 /* Attached to a zero fraction - means infinity */
366 dst
->class = sim_fpu_class_infinity
;
368 /* dst->normal_exp = EXPBIAS; */
369 /* dst->fraction = 0; */
375 /* Non zero fraction, means NaN */
377 dst
->fraction
= (fraction
<< NR_GUARDS
);
378 #ifdef SIM_QUIET_NAN_NEGATED
379 qnan
= (fraction
& QUIET_NAN
) == 0;
381 qnan
= fraction
>= QUIET_NAN
;
384 dst
->class = sim_fpu_class_qnan
;
386 dst
->class = sim_fpu_class_snan
;
391 /* Nothing strange about this number */
392 dst
->class = sim_fpu_class_number
;
394 dst
->fraction
= ((fraction
<< NR_GUARDS
) | IMPLICIT_1
);
395 dst
->normal_exp
= exp
- EXPBIAS
;
398 /* trace operation */
405 printf ("unpack_fpu: %c%02lX.%06lX ->\n",
406 LSMASKED32 (packed
, 31, 31) ? '8' : '0',
407 (long) LSEXTRACTED32 (packed
, 30, 23),
408 (long) LSEXTRACTED32 (packed
, 23 - 1, 0));
415 val
.i
= pack_fpu (dst
, 1);
418 ASSERT (val
.i
== packed
);
422 unsigned32 val
= pack_fpu (dst
, 0);
423 unsigned32 org
= packed
;
430 /* Convert a floating point into an integer */
431 STATIC_INLINE_SIM_FPU (int)
440 if (sim_fpu_is_zero (s
))
445 if (sim_fpu_is_snan (s
))
447 *i
= MIN_INT
; /* FIXME */
448 return sim_fpu_status_invalid_cvi
;
450 if (sim_fpu_is_qnan (s
))
452 *i
= MIN_INT
; /* FIXME */
453 return sim_fpu_status_invalid_cvi
;
455 /* map infinity onto MAX_INT... */
456 if (sim_fpu_is_infinity (s
))
458 *i
= s
->sign
? MIN_INT
: MAX_INT
;
459 return sim_fpu_status_invalid_cvi
;
461 /* it is a number, but a small one */
462 if (s
->normal_exp
< 0)
465 return sim_fpu_status_inexact
;
467 /* Is the floating point MIN_INT or just close? */
468 if (s
->sign
&& s
->normal_exp
== (NR_INTBITS
- 1))
471 ASSERT (s
->fraction
>= IMPLICIT_1
);
472 if (s
->fraction
== IMPLICIT_1
)
473 return 0; /* exact */
474 if (is_64bit
) /* can't round */
475 return sim_fpu_status_invalid_cvi
; /* must be overflow */
476 /* For a 32bit with MAX_INT, rounding is possible */
479 case sim_fpu_round_default
:
481 case sim_fpu_round_zero
:
482 if ((s
->fraction
& FRAC32MASK
) != IMPLICIT_1
)
483 return sim_fpu_status_invalid_cvi
;
485 return sim_fpu_status_inexact
;
487 case sim_fpu_round_near
:
489 if ((s
->fraction
& FRAC32MASK
) != IMPLICIT_1
)
490 return sim_fpu_status_invalid_cvi
;
491 else if ((s
->fraction
& !FRAC32MASK
) >= (~FRAC32MASK
>> 1))
492 return sim_fpu_status_invalid_cvi
;
494 return sim_fpu_status_inexact
;
496 case sim_fpu_round_up
:
497 if ((s
->fraction
& FRAC32MASK
) == IMPLICIT_1
)
498 return sim_fpu_status_inexact
;
500 return sim_fpu_status_invalid_cvi
;
501 case sim_fpu_round_down
:
502 return sim_fpu_status_invalid_cvi
;
505 /* Would right shifting result in the FRAC being shifted into
506 (through) the integer's sign bit? */
507 if (s
->normal_exp
> (NR_INTBITS
- 2))
509 *i
= s
->sign
? MIN_INT
: MAX_INT
;
510 return sim_fpu_status_invalid_cvi
;
512 /* normal number shift it into place */
514 shift
= (s
->normal_exp
- (NR_FRAC_GUARD
));
522 if (tmp
& ((SIGNED64 (1) << shift
) - 1))
523 status
|= sim_fpu_status_inexact
;
526 *i
= s
->sign
? (-tmp
) : (tmp
);
530 /* convert an integer into a floating point */
531 STATIC_INLINE_SIM_FPU (int)
532 i2fpu (sim_fpu
*f
, signed64 i
, int is_64bit
)
537 f
->class = sim_fpu_class_zero
;
543 f
->class = sim_fpu_class_number
;
545 f
->normal_exp
= NR_FRAC_GUARD
;
549 /* Special case for minint, since there is no corresponding
550 +ve integer representation for it */
553 f
->fraction
= IMPLICIT_1
;
554 f
->normal_exp
= NR_INTBITS
- 1;
562 if (f
->fraction
>= IMPLICIT_2
)
566 f
->fraction
= (f
->fraction
>> 1) | (f
->fraction
& 1);
569 while (f
->fraction
>= IMPLICIT_2
);
571 else if (f
->fraction
< IMPLICIT_1
)
578 while (f
->fraction
< IMPLICIT_1
);
582 /* trace operation */
585 printf ("i2fpu: 0x%08lX ->\n", (long) i
);
592 fpu2i (&val
, f
, is_64bit
, sim_fpu_round_zero
);
593 if (i
>= MIN_INT32
&& i
<= MAX_INT32
)
603 /* Convert a floating point into an integer */
604 STATIC_INLINE_SIM_FPU (int)
605 fpu2u (unsigned64
*u
, const sim_fpu
*s
, int is_64bit
)
607 const int is_double
= 1;
610 if (sim_fpu_is_zero (s
))
615 if (sim_fpu_is_nan (s
))
620 /* it is a negative number */
626 /* get reasonable MAX_USI_INT... */
627 if (sim_fpu_is_infinity (s
))
632 /* it is a number, but a small one */
633 if (s
->normal_exp
< 0)
639 if (s
->normal_exp
> (NR_INTBITS
- 1))
645 tmp
= (s
->fraction
& ~PADMASK
);
646 shift
= (s
->normal_exp
- (NR_FRACBITS
+ NR_GUARDS
));
660 /* Convert an unsigned integer into a floating point */
661 STATIC_INLINE_SIM_FPU (int)
662 u2fpu (sim_fpu
*f
, unsigned64 u
, int is_64bit
)
666 f
->class = sim_fpu_class_zero
;
672 f
->class = sim_fpu_class_number
;
674 f
->normal_exp
= NR_FRAC_GUARD
;
677 while (f
->fraction
< IMPLICIT_1
)
687 /* register <-> sim_fpu */
689 INLINE_SIM_FPU (void)
690 sim_fpu_32to (sim_fpu
*f
, unsigned32 s
)
692 unpack_fpu (f
, s
, 0);
696 INLINE_SIM_FPU (void)
697 sim_fpu_232to (sim_fpu
*f
, unsigned32 h
, unsigned32 l
)
701 unpack_fpu (f
, s
, 1);
705 INLINE_SIM_FPU (void)
706 sim_fpu_64to (sim_fpu
*f
, unsigned64 s
)
708 unpack_fpu (f
, s
, 1);
712 INLINE_SIM_FPU (void)
713 sim_fpu_to32 (unsigned32
*s
,
716 *s
= pack_fpu (f
, 0);
720 INLINE_SIM_FPU (void)
721 sim_fpu_to232 (unsigned32
*h
, unsigned32
*l
,
724 unsigned64 s
= pack_fpu (f
, 1);
730 INLINE_SIM_FPU (void)
731 sim_fpu_to64 (unsigned64
*u
,
734 *u
= pack_fpu (f
, 1);
738 INLINE_SIM_FPU (void)
739 sim_fpu_fractionto (sim_fpu
*f
,
745 int shift
= (NR_FRAC_GUARD
- precision
);
746 f
->class = sim_fpu_class_number
;
748 f
->normal_exp
= normal_exp
;
749 /* shift the fraction to where sim-fpu expects it */
751 f
->fraction
= (fraction
<< shift
);
753 f
->fraction
= (fraction
>> -shift
);
754 f
->fraction
|= IMPLICIT_1
;
758 INLINE_SIM_FPU (unsigned64
)
759 sim_fpu_tofraction (const sim_fpu
*d
,
762 /* we have NR_FRAC_GUARD bits, we want only PRECISION bits */
763 int shift
= (NR_FRAC_GUARD
- precision
);
764 unsigned64 fraction
= (d
->fraction
& ~IMPLICIT_1
);
766 return fraction
>> shift
;
768 return fraction
<< -shift
;
774 STATIC_INLINE_SIM_FPU (int)
775 do_normal_overflow (sim_fpu
*f
,
781 case sim_fpu_round_default
:
783 case sim_fpu_round_near
:
784 f
->class = sim_fpu_class_infinity
;
786 case sim_fpu_round_up
:
788 f
->class = sim_fpu_class_infinity
;
790 case sim_fpu_round_down
:
792 f
->class = sim_fpu_class_infinity
;
794 case sim_fpu_round_zero
:
797 f
->normal_exp
= NORMAL_EXPMAX
;
798 f
->fraction
= LSMASK64 (NR_FRAC_GUARD
, NR_GUARDS
);
799 return (sim_fpu_status_overflow
| sim_fpu_status_inexact
);
802 STATIC_INLINE_SIM_FPU (int)
803 do_normal_underflow (sim_fpu
*f
,
809 case sim_fpu_round_default
:
811 case sim_fpu_round_near
:
812 f
->class = sim_fpu_class_zero
;
814 case sim_fpu_round_up
:
816 f
->class = sim_fpu_class_zero
;
818 case sim_fpu_round_down
:
820 f
->class = sim_fpu_class_zero
;
822 case sim_fpu_round_zero
:
823 f
->class = sim_fpu_class_zero
;
826 f
->normal_exp
= NORMAL_EXPMIN
- NR_FRACBITS
;
827 f
->fraction
= IMPLICIT_1
;
828 return (sim_fpu_status_inexact
| sim_fpu_status_underflow
);
833 /* Round a number using NR_GUARDS.
834 Will return the rounded number or F->FRACTION == 0 when underflow */
836 STATIC_INLINE_SIM_FPU (int)
837 do_normal_round (sim_fpu
*f
,
841 unsigned64 guardmask
= LSMASK64 (nr_guards
- 1, 0);
842 unsigned64 guardmsb
= LSBIT64 (nr_guards
- 1);
843 unsigned64 fraclsb
= guardmsb
<< 1;
844 if ((f
->fraction
& guardmask
))
846 int status
= sim_fpu_status_inexact
;
849 case sim_fpu_round_default
:
851 case sim_fpu_round_near
:
852 if ((f
->fraction
& guardmsb
))
854 if ((f
->fraction
& fraclsb
))
856 status
|= sim_fpu_status_rounded
;
858 else if ((f
->fraction
& (guardmask
>> 1)))
860 status
|= sim_fpu_status_rounded
;
864 case sim_fpu_round_up
:
866 status
|= sim_fpu_status_rounded
;
868 case sim_fpu_round_down
:
870 status
|= sim_fpu_status_rounded
;
872 case sim_fpu_round_zero
:
875 f
->fraction
&= ~guardmask
;
876 /* round if needed, handle resulting overflow */
877 if ((status
& sim_fpu_status_rounded
))
879 f
->fraction
+= fraclsb
;
880 if ((f
->fraction
& IMPLICIT_2
))
893 STATIC_INLINE_SIM_FPU (int)
894 do_round (sim_fpu
*f
,
897 sim_fpu_denorm denorm
)
901 case sim_fpu_class_qnan
:
902 case sim_fpu_class_zero
:
903 case sim_fpu_class_infinity
:
906 case sim_fpu_class_snan
:
907 /* Quieten a SignalingNaN */
908 f
->class = sim_fpu_class_qnan
;
909 return sim_fpu_status_invalid_snan
;
911 case sim_fpu_class_number
:
912 case sim_fpu_class_denorm
:
915 ASSERT (f
->fraction
< IMPLICIT_2
);
916 ASSERT (f
->fraction
>= IMPLICIT_1
);
917 if (f
->normal_exp
< NORMAL_EXPMIN
)
919 /* This number's exponent is too low to fit into the bits
920 available in the number. Round off any bits that will be
921 discarded as a result of denormalization. Edge case is
922 the implicit bit shifted to GUARD0 and then rounded
924 int shift
= NORMAL_EXPMIN
- f
->normal_exp
;
925 if (shift
+ NR_GUARDS
<= NR_FRAC_GUARD
+ 1
926 && !(denorm
& sim_fpu_denorm_zero
))
928 status
= do_normal_round (f
, shift
+ NR_GUARDS
, round
);
929 if (f
->fraction
== 0) /* rounding underflowed */
931 status
|= do_normal_underflow (f
, is_double
, round
);
933 else if (f
->normal_exp
< NORMAL_EXPMIN
) /* still underflow? */
935 status
|= sim_fpu_status_denorm
;
936 /* Any loss of precision when denormalizing is
937 underflow. Some processors check for underflow
938 before rounding, some after! */
939 if (status
& sim_fpu_status_inexact
)
940 status
|= sim_fpu_status_underflow
;
941 /* Flag that resultant value has been denormalized */
942 f
->class = sim_fpu_class_denorm
;
944 else if ((denorm
& sim_fpu_denorm_underflow_inexact
))
946 if ((status
& sim_fpu_status_inexact
))
947 status
|= sim_fpu_status_underflow
;
952 status
= do_normal_underflow (f
, is_double
, round
);
955 else if (f
->normal_exp
> NORMAL_EXPMAX
)
958 status
= do_normal_overflow (f
, is_double
, round
);
962 status
= do_normal_round (f
, NR_GUARDS
, round
);
963 if (f
->fraction
== 0)
964 /* f->class = sim_fpu_class_zero; */
965 status
|= do_normal_underflow (f
, is_double
, round
);
966 else if (f
->normal_exp
> NORMAL_EXPMAX
)
967 /* oops! rounding caused overflow */
968 status
|= do_normal_overflow (f
, is_double
, round
);
970 ASSERT ((f
->class == sim_fpu_class_number
971 || f
->class == sim_fpu_class_denorm
)
972 <= (f
->fraction
< IMPLICIT_2
&& f
->fraction
>= IMPLICIT_1
));
980 sim_fpu_round_32 (sim_fpu
*f
,
982 sim_fpu_denorm denorm
)
984 return do_round (f
, 0, round
, denorm
);
988 sim_fpu_round_64 (sim_fpu
*f
,
990 sim_fpu_denorm denorm
)
992 return do_round (f
, 1, round
, denorm
);
1000 sim_fpu_add (sim_fpu
*f
,
1004 if (sim_fpu_is_snan (l
))
1007 f
->class = sim_fpu_class_qnan
;
1008 return sim_fpu_status_invalid_snan
;
1010 if (sim_fpu_is_snan (r
))
1013 f
->class = sim_fpu_class_qnan
;
1014 return sim_fpu_status_invalid_snan
;
1016 if (sim_fpu_is_qnan (l
))
1021 if (sim_fpu_is_qnan (r
))
1026 if (sim_fpu_is_infinity (l
))
1028 if (sim_fpu_is_infinity (r
)
1029 && l
->sign
!= r
->sign
)
1032 return sim_fpu_status_invalid_isi
;
1037 if (sim_fpu_is_infinity (r
))
1042 if (sim_fpu_is_zero (l
))
1044 if (sim_fpu_is_zero (r
))
1047 f
->sign
= l
->sign
& r
->sign
;
1053 if (sim_fpu_is_zero (r
))
1060 int shift
= l
->normal_exp
- r
->normal_exp
;
1061 unsigned64 lfraction
;
1062 unsigned64 rfraction
;
1063 /* use exp of larger */
1064 if (shift
>= NR_FRAC_GUARD
)
1066 /* left has much bigger magnitute */
1068 return sim_fpu_status_inexact
;
1070 if (shift
<= - NR_FRAC_GUARD
)
1072 /* right has much bigger magnitute */
1074 return sim_fpu_status_inexact
;
1076 lfraction
= l
->fraction
;
1077 rfraction
= r
->fraction
;
1080 f
->normal_exp
= l
->normal_exp
;
1081 if (rfraction
& LSMASK64 (shift
- 1, 0))
1083 status
|= sim_fpu_status_inexact
;
1084 rfraction
|= LSBIT64 (shift
); /* stick LSBit */
1086 rfraction
>>= shift
;
1090 f
->normal_exp
= r
->normal_exp
;
1091 if (lfraction
& LSMASK64 (- shift
- 1, 0))
1093 status
|= sim_fpu_status_inexact
;
1094 lfraction
|= LSBIT64 (- shift
); /* stick LSBit */
1096 lfraction
>>= -shift
;
1100 f
->normal_exp
= r
->normal_exp
;
1103 /* perform the addition */
1105 lfraction
= - lfraction
;
1107 rfraction
= - rfraction
;
1108 f
->fraction
= lfraction
+ rfraction
;
1111 if (f
->fraction
== 0)
1118 f
->class = sim_fpu_class_number
;
1119 if ((signed64
) f
->fraction
>= 0)
1124 f
->fraction
= - f
->fraction
;
1128 if ((f
->fraction
& IMPLICIT_2
))
1130 f
->fraction
= (f
->fraction
>> 1) | (f
->fraction
& 1);
1133 else if (f
->fraction
< IMPLICIT_1
)
1140 while (f
->fraction
< IMPLICIT_1
);
1142 ASSERT (f
->fraction
>= IMPLICIT_1
&& f
->fraction
< IMPLICIT_2
);
1148 INLINE_SIM_FPU (int)
1149 sim_fpu_sub (sim_fpu
*f
,
1153 if (sim_fpu_is_snan (l
))
1156 f
->class = sim_fpu_class_qnan
;
1157 return sim_fpu_status_invalid_snan
;
1159 if (sim_fpu_is_snan (r
))
1162 f
->class = sim_fpu_class_qnan
;
1163 return sim_fpu_status_invalid_snan
;
1165 if (sim_fpu_is_qnan (l
))
1170 if (sim_fpu_is_qnan (r
))
1175 if (sim_fpu_is_infinity (l
))
1177 if (sim_fpu_is_infinity (r
)
1178 && l
->sign
== r
->sign
)
1181 return sim_fpu_status_invalid_isi
;
1186 if (sim_fpu_is_infinity (r
))
1192 if (sim_fpu_is_zero (l
))
1194 if (sim_fpu_is_zero (r
))
1197 f
->sign
= l
->sign
& !r
->sign
;
1206 if (sim_fpu_is_zero (r
))
1213 int shift
= l
->normal_exp
- r
->normal_exp
;
1214 unsigned64 lfraction
;
1215 unsigned64 rfraction
;
1216 /* use exp of larger */
1217 if (shift
>= NR_FRAC_GUARD
)
1219 /* left has much bigger magnitute */
1221 return sim_fpu_status_inexact
;
1223 if (shift
<= - NR_FRAC_GUARD
)
1225 /* right has much bigger magnitute */
1228 return sim_fpu_status_inexact
;
1230 lfraction
= l
->fraction
;
1231 rfraction
= r
->fraction
;
1234 f
->normal_exp
= l
->normal_exp
;
1235 if (rfraction
& LSMASK64 (shift
- 1, 0))
1237 status
|= sim_fpu_status_inexact
;
1238 rfraction
|= LSBIT64 (shift
); /* stick LSBit */
1240 rfraction
>>= shift
;
1244 f
->normal_exp
= r
->normal_exp
;
1245 if (lfraction
& LSMASK64 (- shift
- 1, 0))
1247 status
|= sim_fpu_status_inexact
;
1248 lfraction
|= LSBIT64 (- shift
); /* stick LSBit */
1250 lfraction
>>= -shift
;
1254 f
->normal_exp
= r
->normal_exp
;
1257 /* perform the subtraction */
1259 lfraction
= - lfraction
;
1261 rfraction
= - rfraction
;
1262 f
->fraction
= lfraction
+ rfraction
;
1265 if (f
->fraction
== 0)
1272 f
->class = sim_fpu_class_number
;
1273 if ((signed64
) f
->fraction
>= 0)
1278 f
->fraction
= - f
->fraction
;
1282 if ((f
->fraction
& IMPLICIT_2
))
1284 f
->fraction
= (f
->fraction
>> 1) | (f
->fraction
& 1);
1287 else if (f
->fraction
< IMPLICIT_1
)
1294 while (f
->fraction
< IMPLICIT_1
);
1296 ASSERT (f
->fraction
>= IMPLICIT_1
&& f
->fraction
< IMPLICIT_2
);
1302 INLINE_SIM_FPU (int)
1303 sim_fpu_mul (sim_fpu
*f
,
1307 if (sim_fpu_is_snan (l
))
1310 f
->class = sim_fpu_class_qnan
;
1311 return sim_fpu_status_invalid_snan
;
1313 if (sim_fpu_is_snan (r
))
1316 f
->class = sim_fpu_class_qnan
;
1317 return sim_fpu_status_invalid_snan
;
1319 if (sim_fpu_is_qnan (l
))
1324 if (sim_fpu_is_qnan (r
))
1329 if (sim_fpu_is_infinity (l
))
1331 if (sim_fpu_is_zero (r
))
1334 return sim_fpu_status_invalid_imz
;
1337 f
->sign
= l
->sign
^ r
->sign
;
1340 if (sim_fpu_is_infinity (r
))
1342 if (sim_fpu_is_zero (l
))
1345 return sim_fpu_status_invalid_imz
;
1348 f
->sign
= l
->sign
^ r
->sign
;
1351 if (sim_fpu_is_zero (l
) || sim_fpu_is_zero (r
))
1354 f
->sign
= l
->sign
^ r
->sign
;
1357 /* Calculate the mantissa by multiplying both 64bit numbers to get a
1362 unsigned64 nl
= l
->fraction
& 0xffffffff;
1363 unsigned64 nh
= l
->fraction
>> 32;
1364 unsigned64 ml
= r
->fraction
& 0xffffffff;
1365 unsigned64 mh
= r
->fraction
>>32;
1366 unsigned64 pp_ll
= ml
* nl
;
1367 unsigned64 pp_hl
= mh
* nl
;
1368 unsigned64 pp_lh
= ml
* nh
;
1369 unsigned64 pp_hh
= mh
* nh
;
1370 unsigned64 res2
= 0;
1371 unsigned64 res0
= 0;
1372 unsigned64 ps_hh__
= pp_hl
+ pp_lh
;
1373 if (ps_hh__
< pp_hl
)
1374 res2
+= UNSIGNED64 (0x100000000);
1375 pp_hl
= (ps_hh__
<< 32) & UNSIGNED64 (0xffffffff00000000);
1376 res0
= pp_ll
+ pp_hl
;
1379 res2
+= ((ps_hh__
>> 32) & 0xffffffff) + pp_hh
;
1383 f
->normal_exp
= l
->normal_exp
+ r
->normal_exp
;
1384 f
->sign
= l
->sign
^ r
->sign
;
1385 f
->class = sim_fpu_class_number
;
1387 /* Input is bounded by [1,2) ; [2^60,2^61)
1388 Output is bounded by [1,4) ; [2^120,2^122) */
1390 /* Adjust the exponent according to where the decimal point ended
1391 up in the high 64 bit word. In the source the decimal point
1392 was at NR_FRAC_GUARD. */
1393 f
->normal_exp
+= NR_FRAC_GUARD
+ 64 - (NR_FRAC_GUARD
* 2);
1395 /* The high word is bounded according to the above. Consequently
1396 it has never overflowed into IMPLICIT_2. */
1397 ASSERT (high
< LSBIT64 (((NR_FRAC_GUARD
+ 1) * 2) - 64));
1398 ASSERT (high
>= LSBIT64 ((NR_FRAC_GUARD
* 2) - 64));
1399 ASSERT (LSBIT64 (((NR_FRAC_GUARD
+ 1) * 2) - 64) < IMPLICIT_1
);
1406 if (low
& LSBIT64 (63))
1410 while (high
< IMPLICIT_1
);
1412 ASSERT (high
>= IMPLICIT_1
&& high
< IMPLICIT_2
);
1415 f
->fraction
= (high
| 1); /* sticky */
1416 return sim_fpu_status_inexact
;
1427 INLINE_SIM_FPU (int)
1428 sim_fpu_div (sim_fpu
*f
,
1432 if (sim_fpu_is_snan (l
))
1435 f
->class = sim_fpu_class_qnan
;
1436 return sim_fpu_status_invalid_snan
;
1438 if (sim_fpu_is_snan (r
))
1441 f
->class = sim_fpu_class_qnan
;
1442 return sim_fpu_status_invalid_snan
;
1444 if (sim_fpu_is_qnan (l
))
1447 f
->class = sim_fpu_class_qnan
;
1450 if (sim_fpu_is_qnan (r
))
1453 f
->class = sim_fpu_class_qnan
;
1456 if (sim_fpu_is_infinity (l
))
1458 if (sim_fpu_is_infinity (r
))
1461 return sim_fpu_status_invalid_idi
;
1466 f
->sign
= l
->sign
^ r
->sign
;
1470 if (sim_fpu_is_zero (l
))
1472 if (sim_fpu_is_zero (r
))
1475 return sim_fpu_status_invalid_zdz
;
1480 f
->sign
= l
->sign
^ r
->sign
;
1484 if (sim_fpu_is_infinity (r
))
1487 f
->sign
= l
->sign
^ r
->sign
;
1490 if (sim_fpu_is_zero (r
))
1492 f
->class = sim_fpu_class_infinity
;
1493 f
->sign
= l
->sign
^ r
->sign
;
1494 return sim_fpu_status_invalid_div0
;
1497 /* Calculate the mantissa by multiplying both 64bit numbers to get a
1500 /* quotient = ( ( numerator / denominator)
1501 x 2^(numerator exponent - denominator exponent)
1503 unsigned64 numerator
;
1504 unsigned64 denominator
;
1505 unsigned64 quotient
;
1508 f
->class = sim_fpu_class_number
;
1509 f
->sign
= l
->sign
^ r
->sign
;
1510 f
->normal_exp
= l
->normal_exp
- r
->normal_exp
;
1512 numerator
= l
->fraction
;
1513 denominator
= r
->fraction
;
1515 /* Fraction will be less than 1.0 */
1516 if (numerator
< denominator
)
1521 ASSERT (numerator
>= denominator
);
1523 /* Gain extra precision, already used one spare bit */
1524 numerator
<<= NR_SPARE
;
1525 denominator
<<= NR_SPARE
;
1527 /* Does divide one bit at a time. Optimize??? */
1529 bit
= (IMPLICIT_1
<< NR_SPARE
);
1532 if (numerator
>= denominator
)
1535 numerator
-= denominator
;
1541 /* discard (but save) the extra bits */
1542 if ((quotient
& LSMASK64 (NR_SPARE
-1, 0)))
1543 quotient
= (quotient
>> NR_SPARE
) | 1;
1545 quotient
= (quotient
>> NR_SPARE
);
1547 f
->fraction
= quotient
;
1548 ASSERT (f
->fraction
>= IMPLICIT_1
&& f
->fraction
< IMPLICIT_2
);
1551 f
->fraction
|= 1; /* stick remaining bits */
1552 return sim_fpu_status_inexact
;
1560 INLINE_SIM_FPU (int)
1561 sim_fpu_max (sim_fpu
*f
,
1565 if (sim_fpu_is_snan (l
))
1568 f
->class = sim_fpu_class_qnan
;
1569 return sim_fpu_status_invalid_snan
;
1571 if (sim_fpu_is_snan (r
))
1574 f
->class = sim_fpu_class_qnan
;
1575 return sim_fpu_status_invalid_snan
;
1577 if (sim_fpu_is_qnan (l
))
1582 if (sim_fpu_is_qnan (r
))
1587 if (sim_fpu_is_infinity (l
))
1589 if (sim_fpu_is_infinity (r
)
1590 && l
->sign
== r
->sign
)
1593 return sim_fpu_status_invalid_isi
;
1596 *f
= *r
; /* -inf < anything */
1598 *f
= *l
; /* +inf > anthing */
1601 if (sim_fpu_is_infinity (r
))
1604 *f
= *l
; /* anything > -inf */
1606 *f
= *r
; /* anthing < +inf */
1609 if (l
->sign
> r
->sign
)
1611 *f
= *r
; /* -ve < +ve */
1614 if (l
->sign
< r
->sign
)
1616 *f
= *l
; /* +ve > -ve */
1619 ASSERT (l
->sign
== r
->sign
);
1620 if (l
->normal_exp
> r
->normal_exp
1621 || (l
->normal_exp
== r
->normal_exp
&&
1622 l
->fraction
> r
->fraction
))
1626 *f
= *r
; /* -ve < -ve */
1628 *f
= *l
; /* +ve > +ve */
1635 *f
= *l
; /* -ve > -ve */
1637 *f
= *r
; /* +ve < +ve */
1643 INLINE_SIM_FPU (int)
1644 sim_fpu_min (sim_fpu
*f
,
1648 if (sim_fpu_is_snan (l
))
1651 f
->class = sim_fpu_class_qnan
;
1652 return sim_fpu_status_invalid_snan
;
1654 if (sim_fpu_is_snan (r
))
1657 f
->class = sim_fpu_class_qnan
;
1658 return sim_fpu_status_invalid_snan
;
1660 if (sim_fpu_is_qnan (l
))
1665 if (sim_fpu_is_qnan (r
))
1670 if (sim_fpu_is_infinity (l
))
1672 if (sim_fpu_is_infinity (r
)
1673 && l
->sign
== r
->sign
)
1676 return sim_fpu_status_invalid_isi
;
1679 *f
= *l
; /* -inf < anything */
1681 *f
= *r
; /* +inf > anthing */
1684 if (sim_fpu_is_infinity (r
))
1687 *f
= *r
; /* anything > -inf */
1689 *f
= *l
; /* anything < +inf */
1692 if (l
->sign
> r
->sign
)
1694 *f
= *l
; /* -ve < +ve */
1697 if (l
->sign
< r
->sign
)
1699 *f
= *r
; /* +ve > -ve */
1702 ASSERT (l
->sign
== r
->sign
);
1703 if (l
->normal_exp
> r
->normal_exp
1704 || (l
->normal_exp
== r
->normal_exp
&&
1705 l
->fraction
> r
->fraction
))
1709 *f
= *l
; /* -ve < -ve */
1711 *f
= *r
; /* +ve > +ve */
1718 *f
= *r
; /* -ve > -ve */
1720 *f
= *l
; /* +ve < +ve */
1726 INLINE_SIM_FPU (int)
1727 sim_fpu_neg (sim_fpu
*f
,
1730 if (sim_fpu_is_snan (r
))
1733 f
->class = sim_fpu_class_qnan
;
1734 return sim_fpu_status_invalid_snan
;
1736 if (sim_fpu_is_qnan (r
))
1747 INLINE_SIM_FPU (int)
1748 sim_fpu_abs (sim_fpu
*f
,
1753 if (sim_fpu_is_snan (r
))
1755 f
->class = sim_fpu_class_qnan
;
1756 return sim_fpu_status_invalid_snan
;
1762 INLINE_SIM_FPU (int)
1763 sim_fpu_inv (sim_fpu
*f
,
1766 return sim_fpu_div (f
, &sim_fpu_one
, r
);
1770 INLINE_SIM_FPU (int)
1771 sim_fpu_sqrt (sim_fpu
*f
,
1774 if (sim_fpu_is_snan (r
))
1777 return sim_fpu_status_invalid_snan
;
1779 if (sim_fpu_is_qnan (r
))
1784 if (sim_fpu_is_zero (r
))
1786 f
->class = sim_fpu_class_zero
;
1791 if (sim_fpu_is_infinity (r
))
1796 return sim_fpu_status_invalid_sqrt
;
1800 f
->class = sim_fpu_class_infinity
;
1809 return sim_fpu_status_invalid_sqrt
;
1812 /* @(#)e_sqrt.c 5.1 93/09/24 */
1814 * ====================================================
1815 * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
1817 * Developed at SunPro, a Sun Microsystems, Inc. business.
1818 * Permission to use, copy, modify, and distribute this
1819 * software is freely granted, provided that this notice
1821 * ====================================================
1824 /* __ieee754_sqrt(x)
1825 * Return correctly rounded sqrt.
1826 * ------------------------------------------
1827 * | Use the hardware sqrt if you have one |
1828 * ------------------------------------------
1830 * Bit by bit method using integer arithmetic. (Slow, but portable)
1832 * Scale x to y in [1,4) with even powers of 2:
1833 * find an integer k such that 1 <= (y=x*2^(2k)) < 4, then
1834 * sqrt(x) = 2^k * sqrt(y)
1837 - sqrt ( x*2^(2m) ) = sqrt(x).2^m ; m even
1838 - sqrt ( x*2^(2m + 1) ) = sqrt(2.x).2^m ; m odd
1840 - y = ((m even) ? x : 2.x)
1842 - y in [1, 4) ; [IMPLICIT_1,IMPLICIT_4)
1844 - sqrt (y) in [1, 2) ; [IMPLICIT_1,IMPLICIT_2)
1846 * 2. Bit by bit computation
1847 * Let q = sqrt(y) truncated to i bit after binary point (q = 1),
1850 * s = 2*q , and y = 2 * ( y - q ). (1)
1853 * To compute q from q , one checks whether
1857 * (q + 2 ) <= y. (2)
1860 * If (2) is false, then q = q ; otherwise q = q + 2 .
1863 * With some algebric manipulation, it is not difficult to see
1864 * that (2) is equivalent to
1869 * The advantage of (3) is that s and y can be computed by
1871 * the following recurrence formula:
1874 * s = s , y = y ; (4)
1883 * s = s + 2 , y = y - s - 2 (5)
1888 - NOTE: y = 2 (y - s - 2 )
1891 * One may easily use induction to prove (4) and (5).
1892 * Note. Since the left hand side of (3) contain only i+2 bits,
1893 * it does not necessary to do a full (53-bit) comparison
1896 * After generating the 53 bits result, we compute one more bit.
1897 * Together with the remainder, we can decide whether the
1898 * result is exact, bigger than 1/2ulp, or less than 1/2ulp
1899 * (it will never equal to 1/2ulp).
1900 * The rounding mode can be detected by checking whether
1901 * huge + tiny is equal to huge, and whether huge - tiny is
1902 * equal to huge for some floating point number "huge" and "tiny".
1905 * sqrt(+-0) = +-0 ... exact
1907 * sqrt(-ve) = NaN ... with invalid signal
1908 * sqrt(NaN) = NaN ... with invalid signal for signaling NaN
1910 * Other methods : see the appended file at the end of the program below.
1915 /* generate sqrt(x) bit by bit */
1921 f
->class = sim_fpu_class_number
;
1924 f
->normal_exp
= (r
->normal_exp
>> 1); /* exp = [exp/2] */
1926 /* odd exp, double x to make it even */
1927 ASSERT (y
>= IMPLICIT_1
&& y
< IMPLICIT_4
);
1928 if ((r
->normal_exp
& 1))
1932 ASSERT (y
>= IMPLICIT_1
&& y
< (IMPLICIT_2
<< 1));
1934 /* Let loop determine first value of s (either 1 or 2) */
1941 unsigned64 t
= s
+ b
;
1952 ASSERT (q
>= IMPLICIT_1
&& q
< IMPLICIT_2
);
1956 f
->fraction
|= 1; /* stick remaining bits */
1957 return sim_fpu_status_inexact
;
1965 /* int/long <-> sim_fpu */
1967 INLINE_SIM_FPU (int)
1968 sim_fpu_i32to (sim_fpu
*f
,
1970 sim_fpu_round round
)
1976 INLINE_SIM_FPU (int)
1977 sim_fpu_u32to (sim_fpu
*f
,
1979 sim_fpu_round round
)
1985 INLINE_SIM_FPU (int)
1986 sim_fpu_i64to (sim_fpu
*f
,
1988 sim_fpu_round round
)
1994 INLINE_SIM_FPU (int)
1995 sim_fpu_u64to (sim_fpu
*f
,
1997 sim_fpu_round round
)
2004 INLINE_SIM_FPU (int)
2005 sim_fpu_to32i (signed32
*i
,
2007 sim_fpu_round round
)
2010 int status
= fpu2i (&i64
, f
, 0, round
);
2015 INLINE_SIM_FPU (int)
2016 sim_fpu_to32u (unsigned32
*u
,
2018 sim_fpu_round round
)
2021 int status
= fpu2u (&u64
, f
, 0);
2026 INLINE_SIM_FPU (int)
2027 sim_fpu_to64i (signed64
*i
,
2029 sim_fpu_round round
)
2031 return fpu2i (i
, f
, 1, round
);
2035 INLINE_SIM_FPU (int)
2036 sim_fpu_to64u (unsigned64
*u
,
2038 sim_fpu_round round
)
2040 return fpu2u (u
, f
, 1);
2045 /* sim_fpu -> host format */
2048 INLINE_SIM_FPU (float)
2049 sim_fpu_2f (const sim_fpu
*f
)
2056 INLINE_SIM_FPU (double)
2057 sim_fpu_2d (const sim_fpu
*s
)
2060 if (sim_fpu_is_snan (s
))
2064 n
.class = sim_fpu_class_qnan
;
2065 val
.i
= pack_fpu (&n
, 1);
2069 val
.i
= pack_fpu (s
, 1);
2076 INLINE_SIM_FPU (void)
2077 sim_fpu_f2 (sim_fpu
*f
,
2082 unpack_fpu (f
, val
.i
, 1);
2087 INLINE_SIM_FPU (void)
2088 sim_fpu_d2 (sim_fpu
*f
,
2093 unpack_fpu (f
, val
.i
, 1);
2099 INLINE_SIM_FPU (int)
2100 sim_fpu_is_nan (const sim_fpu
*d
)
2104 case sim_fpu_class_qnan
:
2105 case sim_fpu_class_snan
:
2112 INLINE_SIM_FPU (int)
2113 sim_fpu_is_qnan (const sim_fpu
*d
)
2117 case sim_fpu_class_qnan
:
2124 INLINE_SIM_FPU (int)
2125 sim_fpu_is_snan (const sim_fpu
*d
)
2129 case sim_fpu_class_snan
:
2136 INLINE_SIM_FPU (int)
2137 sim_fpu_is_zero (const sim_fpu
*d
)
2141 case sim_fpu_class_zero
:
2148 INLINE_SIM_FPU (int)
2149 sim_fpu_is_infinity (const sim_fpu
*d
)
2153 case sim_fpu_class_infinity
:
2160 INLINE_SIM_FPU (int)
2161 sim_fpu_is_number (const sim_fpu
*d
)
2165 case sim_fpu_class_denorm
:
2166 case sim_fpu_class_number
:
2173 INLINE_SIM_FPU (int)
2174 sim_fpu_is_denorm (const sim_fpu
*d
)
2178 case sim_fpu_class_denorm
:
2186 INLINE_SIM_FPU (int)
2187 sim_fpu_sign (const sim_fpu
*d
)
2193 INLINE_SIM_FPU (int)
2194 sim_fpu_exp (const sim_fpu
*d
)
2196 return d
->normal_exp
;
2200 INLINE_SIM_FPU (unsigned64
)
2201 sim_fpu_fraction (const sim_fpu
*d
)
2207 INLINE_SIM_FPU (unsigned64
)
2208 sim_fpu_guard (const sim_fpu
*d
, int is_double
)
2211 unsigned64 guardmask
= LSMASK64 (NR_GUARDS
- 1, 0);
2212 rv
= (d
->fraction
& guardmask
) >> NR_PAD
;
2217 INLINE_SIM_FPU (int)
2218 sim_fpu_is (const sim_fpu
*d
)
2222 case sim_fpu_class_qnan
:
2223 return SIM_FPU_IS_QNAN
;
2224 case sim_fpu_class_snan
:
2225 return SIM_FPU_IS_SNAN
;
2226 case sim_fpu_class_infinity
:
2228 return SIM_FPU_IS_NINF
;
2230 return SIM_FPU_IS_PINF
;
2231 case sim_fpu_class_number
:
2233 return SIM_FPU_IS_NNUMBER
;
2235 return SIM_FPU_IS_PNUMBER
;
2236 case sim_fpu_class_denorm
:
2238 return SIM_FPU_IS_NDENORM
;
2240 return SIM_FPU_IS_PDENORM
;
2241 case sim_fpu_class_zero
:
2243 return SIM_FPU_IS_NZERO
;
2245 return SIM_FPU_IS_PZERO
;
2252 INLINE_SIM_FPU (int)
2253 sim_fpu_cmp (const sim_fpu
*l
, const sim_fpu
*r
)
2256 sim_fpu_sub (&res
, l
, r
);
2257 return sim_fpu_is (&res
);
2260 INLINE_SIM_FPU (int)
2261 sim_fpu_is_lt (const sim_fpu
*l
, const sim_fpu
*r
)
2264 sim_fpu_lt (&status
, l
, r
);
2268 INLINE_SIM_FPU (int)
2269 sim_fpu_is_le (const sim_fpu
*l
, const sim_fpu
*r
)
2272 sim_fpu_le (&is
, l
, r
);
2276 INLINE_SIM_FPU (int)
2277 sim_fpu_is_eq (const sim_fpu
*l
, const sim_fpu
*r
)
2280 sim_fpu_eq (&is
, l
, r
);
2284 INLINE_SIM_FPU (int)
2285 sim_fpu_is_ne (const sim_fpu
*l
, const sim_fpu
*r
)
2288 sim_fpu_ne (&is
, l
, r
);
2292 INLINE_SIM_FPU (int)
2293 sim_fpu_is_ge (const sim_fpu
*l
, const sim_fpu
*r
)
2296 sim_fpu_ge (&is
, l
, r
);
2300 INLINE_SIM_FPU (int)
2301 sim_fpu_is_gt (const sim_fpu
*l
, const sim_fpu
*r
)
2304 sim_fpu_gt (&is
, l
, r
);
2309 /* Compare operators */
2311 INLINE_SIM_FPU (int)
2312 sim_fpu_lt (int *is
,
2316 if (!sim_fpu_is_nan (l
) && !sim_fpu_is_nan (r
))
2320 lval
.i
= pack_fpu (l
, 1);
2321 rval
.i
= pack_fpu (r
, 1);
2322 (*is
) = (lval
.d
< rval
.d
);
2325 else if (sim_fpu_is_snan (l
) || sim_fpu_is_snan (r
))
2328 return sim_fpu_status_invalid_snan
;
2333 return sim_fpu_status_invalid_qnan
;
2337 INLINE_SIM_FPU (int)
2338 sim_fpu_le (int *is
,
2342 if (!sim_fpu_is_nan (l
) && !sim_fpu_is_nan (r
))
2346 lval
.i
= pack_fpu (l
, 1);
2347 rval
.i
= pack_fpu (r
, 1);
2348 *is
= (lval
.d
<= rval
.d
);
2351 else if (sim_fpu_is_snan (l
) || sim_fpu_is_snan (r
))
2354 return sim_fpu_status_invalid_snan
;
2359 return sim_fpu_status_invalid_qnan
;
2363 INLINE_SIM_FPU (int)
2364 sim_fpu_eq (int *is
,
2368 if (!sim_fpu_is_nan (l
) && !sim_fpu_is_nan (r
))
2372 lval
.i
= pack_fpu (l
, 1);
2373 rval
.i
= pack_fpu (r
, 1);
2374 (*is
) = (lval
.d
== rval
.d
);
2377 else if (sim_fpu_is_snan (l
) || sim_fpu_is_snan (r
))
2380 return sim_fpu_status_invalid_snan
;
2385 return sim_fpu_status_invalid_qnan
;
2389 INLINE_SIM_FPU (int)
2390 sim_fpu_ne (int *is
,
2394 if (!sim_fpu_is_nan (l
) && !sim_fpu_is_nan (r
))
2398 lval
.i
= pack_fpu (l
, 1);
2399 rval
.i
= pack_fpu (r
, 1);
2400 (*is
) = (lval
.d
!= rval
.d
);
2403 else if (sim_fpu_is_snan (l
) || sim_fpu_is_snan (r
))
2406 return sim_fpu_status_invalid_snan
;
2411 return sim_fpu_status_invalid_qnan
;
2415 INLINE_SIM_FPU (int)
2416 sim_fpu_ge (int *is
,
2420 return sim_fpu_le (is
, r
, l
);
2423 INLINE_SIM_FPU (int)
2424 sim_fpu_gt (int *is
,
2428 return sim_fpu_lt (is
, r
, l
);
2432 /* A number of useful constants */
2434 #if EXTERN_SIM_FPU_P
2435 const sim_fpu sim_fpu_zero
= {
2438 const sim_fpu sim_fpu_qnan
= {
2441 const sim_fpu sim_fpu_one
= {
2442 sim_fpu_class_number
, 0, IMPLICIT_1
, 0
2444 const sim_fpu sim_fpu_two
= {
2445 sim_fpu_class_number
, 0, IMPLICIT_1
, 1
2447 const sim_fpu sim_fpu_max32
= {
2448 sim_fpu_class_number
, 0, LSMASK64 (NR_FRAC_GUARD
, NR_GUARDS32
), NORMAL_EXPMAX32
2450 const sim_fpu sim_fpu_max64
= {
2451 sim_fpu_class_number
, 0, LSMASK64 (NR_FRAC_GUARD
, NR_GUARDS64
), NORMAL_EXPMAX64
2458 INLINE_SIM_FPU (void)
2459 sim_fpu_print_fpu (const sim_fpu
*f
,
2460 sim_fpu_print_func
*print
,
2463 sim_fpu_printn_fpu (f
, print
, -1, arg
);
2466 INLINE_SIM_FPU (void)
2467 sim_fpu_printn_fpu (const sim_fpu
*f
,
2468 sim_fpu_print_func
*print
,
2472 print (arg
, "%s", f
->sign
? "-" : "+");
2475 case sim_fpu_class_qnan
:
2477 print_bits (f
->fraction
, NR_FRAC_GUARD
- 1, digits
, print
, arg
);
2478 print (arg
, "*QuietNaN");
2480 case sim_fpu_class_snan
:
2482 print_bits (f
->fraction
, NR_FRAC_GUARD
- 1, digits
, print
, arg
);
2483 print (arg
, "*SignalNaN");
2485 case sim_fpu_class_zero
:
2488 case sim_fpu_class_infinity
:
2491 case sim_fpu_class_number
:
2492 case sim_fpu_class_denorm
:
2494 print_bits (f
->fraction
, NR_FRAC_GUARD
- 1, digits
, print
, arg
);
2495 print (arg
, "*2^%+d", f
->normal_exp
);
2496 ASSERT (f
->fraction
>= IMPLICIT_1
);
2497 ASSERT (f
->fraction
< IMPLICIT_2
);
2502 INLINE_SIM_FPU (void)
2503 sim_fpu_print_status (int status
,
2504 sim_fpu_print_func
*print
,
2511 switch ((sim_fpu_status
) (status
& i
))
2513 case sim_fpu_status_denorm
:
2514 print (arg
, "%sD", prefix
);
2516 case sim_fpu_status_invalid_snan
:
2517 print (arg
, "%sSNaN", prefix
);
2519 case sim_fpu_status_invalid_qnan
:
2520 print (arg
, "%sQNaN", prefix
);
2522 case sim_fpu_status_invalid_isi
:
2523 print (arg
, "%sISI", prefix
);
2525 case sim_fpu_status_invalid_idi
:
2526 print (arg
, "%sIDI", prefix
);
2528 case sim_fpu_status_invalid_zdz
:
2529 print (arg
, "%sZDZ", prefix
);
2531 case sim_fpu_status_invalid_imz
:
2532 print (arg
, "%sIMZ", prefix
);
2534 case sim_fpu_status_invalid_cvi
:
2535 print (arg
, "%sCVI", prefix
);
2537 case sim_fpu_status_invalid_cmp
:
2538 print (arg
, "%sCMP", prefix
);
2540 case sim_fpu_status_invalid_sqrt
:
2541 print (arg
, "%sSQRT", prefix
);
2544 case sim_fpu_status_inexact
:
2545 print (arg
, "%sX", prefix
);
2548 case sim_fpu_status_overflow
:
2549 print (arg
, "%sO", prefix
);
2552 case sim_fpu_status_underflow
:
2553 print (arg
, "%sU", prefix
);
2556 case sim_fpu_status_invalid_div0
:
2557 print (arg
, "%s/", prefix
);
2560 case sim_fpu_status_rounded
:
2561 print (arg
, "%sR", prefix
);