Handle correctly passing a bad interpreter name to new-ui
[deliverable/binutils-gdb.git] / sim / common / sim-fpu.c
1 /* This is a software floating point library which can be used instead
2 of the floating point routines in libgcc1.c for targets without
3 hardware floating point. */
4
5 /* Copyright 1994-2016 Free Software Foundation, Inc.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 /* As a special exception, if you link this library with other files,
21 some of which are compiled with GCC, to produce an executable,
22 this library does not by itself cause the resulting executable
23 to be covered by the GNU General Public License.
24 This exception does not however invalidate any other reasons why
25 the executable file might be covered by the GNU General Public License. */
26
27 /* This implements IEEE 754 format arithmetic, but does not provide a
28 mechanism for setting the rounding mode, or for generating or handling
29 exceptions.
30
31 The original code by Steve Chamberlain, hacked by Mark Eichin and Jim
32 Wilson, all of Cygnus Support. */
33
34
35 #ifndef SIM_FPU_C
36 #define SIM_FPU_C
37
38 #include "sim-basics.h"
39 #include "sim-fpu.h"
40
41 #include "sim-io.h"
42 #include "sim-assert.h"
43
44
45 /* Debugging support.
46 If digits is -1, then print all digits. */
47
48 static void
49 print_bits (unsigned64 x,
50 int msbit,
51 int digits,
52 sim_fpu_print_func print,
53 void *arg)
54 {
55 unsigned64 bit = LSBIT64 (msbit);
56 int i = 4;
57 while (bit && digits)
58 {
59 if (i == 0)
60 print (arg, ",");
61
62 if ((x & bit))
63 print (arg, "1");
64 else
65 print (arg, "0");
66 bit >>= 1;
67
68 if (digits > 0)
69 digits--;
70 i = (i + 1) % 4;
71 }
72 }
73
74
75
76 /* Quick and dirty conversion between a host double and host 64bit int. */
77
78 typedef union
79 {
80 double d;
81 unsigned64 i;
82 } sim_fpu_map;
83
84
85 /* A packed IEEE floating point number.
86
87 Form is <SIGN:1><BIASEDEXP:NR_EXPBITS><FRAC:NR_FRACBITS> for both
88 32 and 64 bit numbers. This number is interpreted as:
89
90 Normalized (0 < BIASEDEXP && BIASEDEXP < EXPMAX):
91 (sign ? '-' : '+') 1.<FRAC> x 2 ^ (BIASEDEXP - EXPBIAS)
92
93 Denormalized (0 == BIASEDEXP && FRAC != 0):
94 (sign ? "-" : "+") 0.<FRAC> x 2 ^ (- EXPBIAS)
95
96 Zero (0 == BIASEDEXP && FRAC == 0):
97 (sign ? "-" : "+") 0.0
98
99 Infinity (BIASEDEXP == EXPMAX && FRAC == 0):
100 (sign ? "-" : "+") "infinity"
101
102 SignalingNaN (BIASEDEXP == EXPMAX && FRAC > 0 && FRAC < QUIET_NAN):
103 SNaN.FRAC
104
105 QuietNaN (BIASEDEXP == EXPMAX && FRAC > 0 && FRAC > QUIET_NAN):
106 QNaN.FRAC
107
108 */
109
110 #define NR_EXPBITS (is_double ? 11 : 8)
111 #define NR_FRACBITS (is_double ? 52 : 23)
112 #define SIGNBIT (is_double ? MSBIT64 (0) : MSBIT64 (32))
113
114 #define EXPMAX32 (255)
115 #define EXMPAX64 (2047)
116 #define EXPMAX ((unsigned) (is_double ? EXMPAX64 : EXPMAX32))
117
118 #define EXPBIAS32 (127)
119 #define EXPBIAS64 (1023)
120 #define EXPBIAS (is_double ? EXPBIAS64 : EXPBIAS32)
121
122 #define QUIET_NAN LSBIT64 (NR_FRACBITS - 1)
123
124
125
126 /* An unpacked floating point number.
127
128 When unpacked, the fraction of both a 32 and 64 bit floating point
129 number is stored using the same format:
130
131 64 bit - <IMPLICIT_1:1><FRACBITS:52><GUARDS:8><PAD:00>
132 32 bit - <IMPLICIT_1:1><FRACBITS:23><GUARDS:7><PAD:30> */
133
134 #define NR_PAD32 (30)
135 #define NR_PAD64 (0)
136 #define NR_PAD (is_double ? NR_PAD64 : NR_PAD32)
137 #define PADMASK (is_double ? 0 : LSMASK64 (NR_PAD32 - 1, 0))
138
139 #define NR_GUARDS32 (7 + NR_PAD32)
140 #define NR_GUARDS64 (8 + NR_PAD64)
141 #define NR_GUARDS (is_double ? NR_GUARDS64 : NR_GUARDS32)
142 #define GUARDMASK LSMASK64 (NR_GUARDS - 1, 0)
143
144 #define GUARDMSB LSBIT64 (NR_GUARDS - 1)
145 #define GUARDLSB LSBIT64 (NR_PAD)
146 #define GUARDROUND LSMASK64 (NR_GUARDS - 2, 0)
147
148 #define NR_FRAC_GUARD (60)
149 #define IMPLICIT_1 LSBIT64 (NR_FRAC_GUARD)
150 #define IMPLICIT_2 LSBIT64 (NR_FRAC_GUARD + 1)
151 #define IMPLICIT_4 LSBIT64 (NR_FRAC_GUARD + 2)
152 #define NR_SPARE 2
153
154 #define FRAC32MASK LSMASK64 (63, NR_FRAC_GUARD - 32 + 1)
155
156 #define NORMAL_EXPMIN (-(EXPBIAS)+1)
157
158 #define NORMAL_EXPMAX32 (EXPBIAS32)
159 #define NORMAL_EXPMAX64 (EXPBIAS64)
160 #define NORMAL_EXPMAX (EXPBIAS)
161
162
163 /* Integer constants */
164
165 #define MAX_INT32 ((signed64) LSMASK64 (30, 0))
166 #define MAX_UINT32 LSMASK64 (31, 0)
167 #define MIN_INT32 ((signed64) LSMASK64 (63, 31))
168
169 #define MAX_INT64 ((signed64) LSMASK64 (62, 0))
170 #define MAX_UINT64 LSMASK64 (63, 0)
171 #define MIN_INT64 ((signed64) LSMASK64 (63, 63))
172
173 #define MAX_INT (is_64bit ? MAX_INT64 : MAX_INT32)
174 #define MIN_INT (is_64bit ? MIN_INT64 : MIN_INT32)
175 #define MAX_UINT (is_64bit ? MAX_UINT64 : MAX_UINT32)
176 #define NR_INTBITS (is_64bit ? 64 : 32)
177
178 /* Squeeze an unpacked sim_fpu struct into a 32/64 bit integer. */
179 STATIC_INLINE_SIM_FPU (unsigned64)
180 pack_fpu (const sim_fpu *src,
181 int is_double)
182 {
183 int sign;
184 unsigned64 exp;
185 unsigned64 fraction;
186 unsigned64 packed;
187
188 switch (src->class)
189 {
190 /* Create a NaN. */
191 case sim_fpu_class_qnan:
192 sign = src->sign;
193 exp = EXPMAX;
194 /* Force fraction to correct class. */
195 fraction = src->fraction;
196 fraction >>= NR_GUARDS;
197 #ifdef SIM_QUIET_NAN_NEGATED
198 fraction |= QUIET_NAN - 1;
199 #else
200 fraction |= QUIET_NAN;
201 #endif
202 break;
203 case sim_fpu_class_snan:
204 sign = src->sign;
205 exp = EXPMAX;
206 /* Force fraction to correct class. */
207 fraction = src->fraction;
208 fraction >>= NR_GUARDS;
209 #ifdef SIM_QUIET_NAN_NEGATED
210 fraction |= QUIET_NAN;
211 #else
212 fraction &= ~QUIET_NAN;
213 #endif
214 break;
215 case sim_fpu_class_infinity:
216 sign = src->sign;
217 exp = EXPMAX;
218 fraction = 0;
219 break;
220 case sim_fpu_class_zero:
221 sign = src->sign;
222 exp = 0;
223 fraction = 0;
224 break;
225 case sim_fpu_class_number:
226 case sim_fpu_class_denorm:
227 ASSERT (src->fraction >= IMPLICIT_1);
228 ASSERT (src->fraction < IMPLICIT_2);
229 if (src->normal_exp < NORMAL_EXPMIN)
230 {
231 /* This number's exponent is too low to fit into the bits
232 available in the number We'll denormalize the number by
233 storing zero in the exponent and shift the fraction to
234 the right to make up for it. */
235 int nr_shift = NORMAL_EXPMIN - src->normal_exp;
236 if (nr_shift > NR_FRACBITS)
237 {
238 /* Underflow, just make the number zero. */
239 sign = src->sign;
240 exp = 0;
241 fraction = 0;
242 }
243 else
244 {
245 sign = src->sign;
246 exp = 0;
247 /* Shift by the value. */
248 fraction = src->fraction;
249 fraction >>= NR_GUARDS;
250 fraction >>= nr_shift;
251 }
252 }
253 else if (src->normal_exp > NORMAL_EXPMAX)
254 {
255 /* Infinity */
256 sign = src->sign;
257 exp = EXPMAX;
258 fraction = 0;
259 }
260 else
261 {
262 exp = (src->normal_exp + EXPBIAS);
263 sign = src->sign;
264 fraction = src->fraction;
265 /* FIXME: Need to round according to WITH_SIM_FPU_ROUNDING
266 or some such. */
267 /* Round to nearest: If the guard bits are the all zero, but
268 the first, then we're half way between two numbers,
269 choose the one which makes the lsb of the answer 0. */
270 if ((fraction & GUARDMASK) == GUARDMSB)
271 {
272 if ((fraction & (GUARDMSB << 1)))
273 fraction += (GUARDMSB << 1);
274 }
275 else
276 {
277 /* Add a one to the guards to force round to nearest. */
278 fraction += GUARDROUND;
279 }
280 if ((fraction & IMPLICIT_2)) /* Rounding resulted in carry. */
281 {
282 exp += 1;
283 fraction >>= 1;
284 }
285 fraction >>= NR_GUARDS;
286 /* When exp == EXPMAX (overflow from carry) fraction must
287 have been made zero. */
288 ASSERT ((exp == EXPMAX) <= ((fraction & ~IMPLICIT_1) == 0));
289 }
290 break;
291 default:
292 abort ();
293 }
294
295 packed = ((sign ? SIGNBIT : 0)
296 | (exp << NR_FRACBITS)
297 | LSMASKED64 (fraction, NR_FRACBITS - 1, 0));
298
299 /* Trace operation. */
300 #if 0
301 if (is_double)
302 {
303 }
304 else
305 {
306 printf ("pack_fpu: ");
307 printf ("-> %c%0lX.%06lX\n",
308 LSMASKED32 (packed, 31, 31) ? '8' : '0',
309 (long) LSEXTRACTED32 (packed, 30, 23),
310 (long) LSEXTRACTED32 (packed, 23 - 1, 0));
311 }
312 #endif
313
314 return packed;
315 }
316
317
318 /* Unpack a 32/64 bit integer into a sim_fpu structure. */
319 STATIC_INLINE_SIM_FPU (void)
320 unpack_fpu (sim_fpu *dst, unsigned64 packed, int is_double)
321 {
322 unsigned64 fraction = LSMASKED64 (packed, NR_FRACBITS - 1, 0);
323 unsigned exp = LSEXTRACTED64 (packed, NR_EXPBITS + NR_FRACBITS - 1, NR_FRACBITS);
324 int sign = (packed & SIGNBIT) != 0;
325
326 if (exp == 0)
327 {
328 /* Hmm. Looks like 0 */
329 if (fraction == 0)
330 {
331 /* Tastes like zero. */
332 dst->class = sim_fpu_class_zero;
333 dst->sign = sign;
334 dst->normal_exp = 0;
335 }
336 else
337 {
338 /* Zero exponent with non zero fraction - it's denormalized,
339 so there isn't a leading implicit one - we'll shift it so
340 it gets one. */
341 dst->normal_exp = exp - EXPBIAS + 1;
342 dst->class = sim_fpu_class_denorm;
343 dst->sign = sign;
344 fraction <<= NR_GUARDS;
345 while (fraction < IMPLICIT_1)
346 {
347 fraction <<= 1;
348 dst->normal_exp--;
349 }
350 dst->fraction = fraction;
351 }
352 }
353 else if (exp == EXPMAX)
354 {
355 /* Huge exponent*/
356 if (fraction == 0)
357 {
358 /* Attached to a zero fraction - means infinity. */
359 dst->class = sim_fpu_class_infinity;
360 dst->sign = sign;
361 /* dst->normal_exp = EXPBIAS; */
362 /* dst->fraction = 0; */
363 }
364 else
365 {
366 int qnan;
367
368 /* Non zero fraction, means NaN. */
369 dst->sign = sign;
370 dst->fraction = (fraction << NR_GUARDS);
371 #ifdef SIM_QUIET_NAN_NEGATED
372 qnan = (fraction & QUIET_NAN) == 0;
373 #else
374 qnan = fraction >= QUIET_NAN;
375 #endif
376 if (qnan)
377 dst->class = sim_fpu_class_qnan;
378 else
379 dst->class = sim_fpu_class_snan;
380 }
381 }
382 else
383 {
384 /* Nothing strange about this number. */
385 dst->class = sim_fpu_class_number;
386 dst->sign = sign;
387 dst->fraction = ((fraction << NR_GUARDS) | IMPLICIT_1);
388 dst->normal_exp = exp - EXPBIAS;
389 }
390
391 /* Trace operation. */
392 #if 0
393 if (is_double)
394 {
395 }
396 else
397 {
398 printf ("unpack_fpu: %c%02lX.%06lX ->\n",
399 LSMASKED32 (packed, 31, 31) ? '8' : '0',
400 (long) LSEXTRACTED32 (packed, 30, 23),
401 (long) LSEXTRACTED32 (packed, 23 - 1, 0));
402 }
403 #endif
404
405 /* sanity checks */
406 {
407 sim_fpu_map val;
408 val.i = pack_fpu (dst, 1);
409 if (is_double)
410 {
411 ASSERT (val.i == packed);
412 }
413 else
414 {
415 unsigned32 val = pack_fpu (dst, 0);
416 unsigned32 org = packed;
417 ASSERT (val == org);
418 }
419 }
420 }
421
422
423 /* Convert a floating point into an integer. */
424 STATIC_INLINE_SIM_FPU (int)
425 fpu2i (signed64 *i,
426 const sim_fpu *s,
427 int is_64bit,
428 sim_fpu_round round)
429 {
430 unsigned64 tmp;
431 int shift;
432 int status = 0;
433 if (sim_fpu_is_zero (s))
434 {
435 *i = 0;
436 return 0;
437 }
438 if (sim_fpu_is_snan (s))
439 {
440 *i = MIN_INT; /* FIXME */
441 return sim_fpu_status_invalid_cvi;
442 }
443 if (sim_fpu_is_qnan (s))
444 {
445 *i = MIN_INT; /* FIXME */
446 return sim_fpu_status_invalid_cvi;
447 }
448 /* Map infinity onto MAX_INT... */
449 if (sim_fpu_is_infinity (s))
450 {
451 *i = s->sign ? MIN_INT : MAX_INT;
452 return sim_fpu_status_invalid_cvi;
453 }
454 /* It is a number, but a small one. */
455 if (s->normal_exp < 0)
456 {
457 *i = 0;
458 return sim_fpu_status_inexact;
459 }
460 /* Is the floating point MIN_INT or just close? */
461 if (s->sign && s->normal_exp == (NR_INTBITS - 1))
462 {
463 *i = MIN_INT;
464 ASSERT (s->fraction >= IMPLICIT_1);
465 if (s->fraction == IMPLICIT_1)
466 return 0; /* exact */
467 if (is_64bit) /* can't round */
468 return sim_fpu_status_invalid_cvi; /* must be overflow */
469 /* For a 32bit with MAX_INT, rounding is possible. */
470 switch (round)
471 {
472 case sim_fpu_round_default:
473 abort ();
474 case sim_fpu_round_zero:
475 if ((s->fraction & FRAC32MASK) != IMPLICIT_1)
476 return sim_fpu_status_invalid_cvi;
477 else
478 return sim_fpu_status_inexact;
479 break;
480 case sim_fpu_round_near:
481 {
482 if ((s->fraction & FRAC32MASK) != IMPLICIT_1)
483 return sim_fpu_status_invalid_cvi;
484 else if ((s->fraction & !FRAC32MASK) >= (~FRAC32MASK >> 1))
485 return sim_fpu_status_invalid_cvi;
486 else
487 return sim_fpu_status_inexact;
488 }
489 case sim_fpu_round_up:
490 if ((s->fraction & FRAC32MASK) == IMPLICIT_1)
491 return sim_fpu_status_inexact;
492 else
493 return sim_fpu_status_invalid_cvi;
494 case sim_fpu_round_down:
495 return sim_fpu_status_invalid_cvi;
496 }
497 }
498 /* Would right shifting result in the FRAC being shifted into
499 (through) the integer's sign bit? */
500 if (s->normal_exp > (NR_INTBITS - 2))
501 {
502 *i = s->sign ? MIN_INT : MAX_INT;
503 return sim_fpu_status_invalid_cvi;
504 }
505 /* Normal number, shift it into place. */
506 tmp = s->fraction;
507 shift = (s->normal_exp - (NR_FRAC_GUARD));
508 if (shift > 0)
509 {
510 tmp <<= shift;
511 }
512 else
513 {
514 shift = -shift;
515 if (tmp & ((SIGNED64 (1) << shift) - 1))
516 status |= sim_fpu_status_inexact;
517 tmp >>= shift;
518 }
519 *i = s->sign ? (-tmp) : (tmp);
520 return status;
521 }
522
523 /* Convert an integer into a floating point. */
524 STATIC_INLINE_SIM_FPU (int)
525 i2fpu (sim_fpu *f, signed64 i, int is_64bit)
526 {
527 int status = 0;
528 if (i == 0)
529 {
530 f->class = sim_fpu_class_zero;
531 f->sign = 0;
532 f->normal_exp = 0;
533 }
534 else
535 {
536 f->class = sim_fpu_class_number;
537 f->sign = (i < 0);
538 f->normal_exp = NR_FRAC_GUARD;
539
540 if (f->sign)
541 {
542 /* Special case for minint, since there is no corresponding
543 +ve integer representation for it. */
544 if (i == MIN_INT)
545 {
546 f->fraction = IMPLICIT_1;
547 f->normal_exp = NR_INTBITS - 1;
548 }
549 else
550 f->fraction = (-i);
551 }
552 else
553 f->fraction = i;
554
555 if (f->fraction >= IMPLICIT_2)
556 {
557 do
558 {
559 f->fraction = (f->fraction >> 1) | (f->fraction & 1);
560 f->normal_exp += 1;
561 }
562 while (f->fraction >= IMPLICIT_2);
563 }
564 else if (f->fraction < IMPLICIT_1)
565 {
566 do
567 {
568 f->fraction <<= 1;
569 f->normal_exp -= 1;
570 }
571 while (f->fraction < IMPLICIT_1);
572 }
573 }
574
575 /* trace operation */
576 #if 0
577 {
578 printf ("i2fpu: 0x%08lX ->\n", (long) i);
579 }
580 #endif
581
582 /* sanity check */
583 {
584 signed64 val;
585 fpu2i (&val, f, is_64bit, sim_fpu_round_zero);
586 if (i >= MIN_INT32 && i <= MAX_INT32)
587 {
588 ASSERT (val == i);
589 }
590 }
591
592 return status;
593 }
594
595
596 /* Convert a floating point into an integer. */
597 STATIC_INLINE_SIM_FPU (int)
598 fpu2u (unsigned64 *u, const sim_fpu *s, int is_64bit)
599 {
600 const int is_double = 1;
601 unsigned64 tmp;
602 int shift;
603 if (sim_fpu_is_zero (s))
604 {
605 *u = 0;
606 return 0;
607 }
608 if (sim_fpu_is_nan (s))
609 {
610 *u = 0;
611 return 0;
612 }
613 /* It is a negative number. */
614 if (s->sign)
615 {
616 *u = 0;
617 return 0;
618 }
619 /* Get reasonable MAX_USI_INT... */
620 if (sim_fpu_is_infinity (s))
621 {
622 *u = MAX_UINT;
623 return 0;
624 }
625 /* It is a number, but a small one. */
626 if (s->normal_exp < 0)
627 {
628 *u = 0;
629 return 0;
630 }
631 /* overflow */
632 if (s->normal_exp > (NR_INTBITS - 1))
633 {
634 *u = MAX_UINT;
635 return 0;
636 }
637 /* normal number */
638 tmp = (s->fraction & ~PADMASK);
639 shift = (s->normal_exp - (NR_FRACBITS + NR_GUARDS));
640 if (shift > 0)
641 {
642 tmp <<= shift;
643 }
644 else
645 {
646 shift = -shift;
647 tmp >>= shift;
648 }
649 *u = tmp;
650 return 0;
651 }
652
653 /* Convert an unsigned integer into a floating point. */
654 STATIC_INLINE_SIM_FPU (int)
655 u2fpu (sim_fpu *f, unsigned64 u, int is_64bit)
656 {
657 if (u == 0)
658 {
659 f->class = sim_fpu_class_zero;
660 f->sign = 0;
661 f->normal_exp = 0;
662 }
663 else
664 {
665 f->class = sim_fpu_class_number;
666 f->sign = 0;
667 f->normal_exp = NR_FRAC_GUARD;
668 f->fraction = u;
669
670 while (f->fraction < IMPLICIT_1)
671 {
672 f->fraction <<= 1;
673 f->normal_exp -= 1;
674 }
675 }
676 return 0;
677 }
678
679
680 /* register <-> sim_fpu */
681
682 INLINE_SIM_FPU (void)
683 sim_fpu_32to (sim_fpu *f, unsigned32 s)
684 {
685 unpack_fpu (f, s, 0);
686 }
687
688
689 INLINE_SIM_FPU (void)
690 sim_fpu_232to (sim_fpu *f, unsigned32 h, unsigned32 l)
691 {
692 unsigned64 s = h;
693 s = (s << 32) | l;
694 unpack_fpu (f, s, 1);
695 }
696
697
698 INLINE_SIM_FPU (void)
699 sim_fpu_64to (sim_fpu *f, unsigned64 s)
700 {
701 unpack_fpu (f, s, 1);
702 }
703
704
705 INLINE_SIM_FPU (void)
706 sim_fpu_to32 (unsigned32 *s,
707 const sim_fpu *f)
708 {
709 *s = pack_fpu (f, 0);
710 }
711
712
713 INLINE_SIM_FPU (void)
714 sim_fpu_to232 (unsigned32 *h, unsigned32 *l,
715 const sim_fpu *f)
716 {
717 unsigned64 s = pack_fpu (f, 1);
718 *l = s;
719 *h = (s >> 32);
720 }
721
722
723 INLINE_SIM_FPU (void)
724 sim_fpu_to64 (unsigned64 *u,
725 const sim_fpu *f)
726 {
727 *u = pack_fpu (f, 1);
728 }
729
730
731 INLINE_SIM_FPU (void)
732 sim_fpu_fractionto (sim_fpu *f,
733 int sign,
734 int normal_exp,
735 unsigned64 fraction,
736 int precision)
737 {
738 int shift = (NR_FRAC_GUARD - precision);
739 f->class = sim_fpu_class_number;
740 f->sign = sign;
741 f->normal_exp = normal_exp;
742 /* Shift the fraction to where sim-fpu expects it. */
743 if (shift >= 0)
744 f->fraction = (fraction << shift);
745 else
746 f->fraction = (fraction >> -shift);
747 f->fraction |= IMPLICIT_1;
748 }
749
750
751 INLINE_SIM_FPU (unsigned64)
752 sim_fpu_tofraction (const sim_fpu *d,
753 int precision)
754 {
755 /* We have NR_FRAC_GUARD bits, we want only PRECISION bits. */
756 int shift = (NR_FRAC_GUARD - precision);
757 unsigned64 fraction = (d->fraction & ~IMPLICIT_1);
758 if (shift >= 0)
759 return fraction >> shift;
760 else
761 return fraction << -shift;
762 }
763
764
765 /* Rounding */
766
767 STATIC_INLINE_SIM_FPU (int)
768 do_normal_overflow (sim_fpu *f,
769 int is_double,
770 sim_fpu_round round)
771 {
772 switch (round)
773 {
774 case sim_fpu_round_default:
775 return 0;
776 case sim_fpu_round_near:
777 f->class = sim_fpu_class_infinity;
778 break;
779 case sim_fpu_round_up:
780 if (!f->sign)
781 f->class = sim_fpu_class_infinity;
782 break;
783 case sim_fpu_round_down:
784 if (f->sign)
785 f->class = sim_fpu_class_infinity;
786 break;
787 case sim_fpu_round_zero:
788 break;
789 }
790 f->normal_exp = NORMAL_EXPMAX;
791 f->fraction = LSMASK64 (NR_FRAC_GUARD, NR_GUARDS);
792 return (sim_fpu_status_overflow | sim_fpu_status_inexact);
793 }
794
795 STATIC_INLINE_SIM_FPU (int)
796 do_normal_underflow (sim_fpu *f,
797 int is_double,
798 sim_fpu_round round)
799 {
800 switch (round)
801 {
802 case sim_fpu_round_default:
803 return 0;
804 case sim_fpu_round_near:
805 f->class = sim_fpu_class_zero;
806 break;
807 case sim_fpu_round_up:
808 if (f->sign)
809 f->class = sim_fpu_class_zero;
810 break;
811 case sim_fpu_round_down:
812 if (!f->sign)
813 f->class = sim_fpu_class_zero;
814 break;
815 case sim_fpu_round_zero:
816 f->class = sim_fpu_class_zero;
817 break;
818 }
819 f->normal_exp = NORMAL_EXPMIN - NR_FRACBITS;
820 f->fraction = IMPLICIT_1;
821 return (sim_fpu_status_inexact | sim_fpu_status_underflow);
822 }
823
824
825
826 /* Round a number using NR_GUARDS.
827 Will return the rounded number or F->FRACTION == 0 when underflow. */
828
829 STATIC_INLINE_SIM_FPU (int)
830 do_normal_round (sim_fpu *f,
831 int nr_guards,
832 sim_fpu_round round)
833 {
834 unsigned64 guardmask = LSMASK64 (nr_guards - 1, 0);
835 unsigned64 guardmsb = LSBIT64 (nr_guards - 1);
836 unsigned64 fraclsb = guardmsb << 1;
837 if ((f->fraction & guardmask))
838 {
839 int status = sim_fpu_status_inexact;
840 switch (round)
841 {
842 case sim_fpu_round_default:
843 return 0;
844 case sim_fpu_round_near:
845 if ((f->fraction & guardmsb))
846 {
847 if ((f->fraction & fraclsb))
848 {
849 status |= sim_fpu_status_rounded;
850 }
851 else if ((f->fraction & (guardmask >> 1)))
852 {
853 status |= sim_fpu_status_rounded;
854 }
855 }
856 break;
857 case sim_fpu_round_up:
858 if (!f->sign)
859 status |= sim_fpu_status_rounded;
860 break;
861 case sim_fpu_round_down:
862 if (f->sign)
863 status |= sim_fpu_status_rounded;
864 break;
865 case sim_fpu_round_zero:
866 break;
867 }
868 f->fraction &= ~guardmask;
869 /* Round if needed, handle resulting overflow. */
870 if ((status & sim_fpu_status_rounded))
871 {
872 f->fraction += fraclsb;
873 if ((f->fraction & IMPLICIT_2))
874 {
875 f->fraction >>= 1;
876 f->normal_exp += 1;
877 }
878 }
879 return status;
880 }
881 else
882 return 0;
883 }
884
885
886 STATIC_INLINE_SIM_FPU (int)
887 do_round (sim_fpu *f,
888 int is_double,
889 sim_fpu_round round,
890 sim_fpu_denorm denorm)
891 {
892 switch (f->class)
893 {
894 case sim_fpu_class_qnan:
895 case sim_fpu_class_zero:
896 case sim_fpu_class_infinity:
897 return 0;
898 break;
899 case sim_fpu_class_snan:
900 /* Quieten a SignalingNaN. */
901 f->class = sim_fpu_class_qnan;
902 return sim_fpu_status_invalid_snan;
903 break;
904 case sim_fpu_class_number:
905 case sim_fpu_class_denorm:
906 {
907 int status;
908 ASSERT (f->fraction < IMPLICIT_2);
909 ASSERT (f->fraction >= IMPLICIT_1);
910 if (f->normal_exp < NORMAL_EXPMIN)
911 {
912 /* This number's exponent is too low to fit into the bits
913 available in the number. Round off any bits that will be
914 discarded as a result of denormalization. Edge case is
915 the implicit bit shifted to GUARD0 and then rounded
916 up. */
917 int shift = NORMAL_EXPMIN - f->normal_exp;
918 if (shift + NR_GUARDS <= NR_FRAC_GUARD + 1
919 && !(denorm & sim_fpu_denorm_zero))
920 {
921 status = do_normal_round (f, shift + NR_GUARDS, round);
922 if (f->fraction == 0) /* Rounding underflowed. */
923 {
924 status |= do_normal_underflow (f, is_double, round);
925 }
926 else if (f->normal_exp < NORMAL_EXPMIN) /* still underflow? */
927 {
928 status |= sim_fpu_status_denorm;
929 /* Any loss of precision when denormalizing is
930 underflow. Some processors check for underflow
931 before rounding, some after! */
932 if (status & sim_fpu_status_inexact)
933 status |= sim_fpu_status_underflow;
934 /* Flag that resultant value has been denormalized. */
935 f->class = sim_fpu_class_denorm;
936 }
937 else if ((denorm & sim_fpu_denorm_underflow_inexact))
938 {
939 if ((status & sim_fpu_status_inexact))
940 status |= sim_fpu_status_underflow;
941 }
942 }
943 else
944 {
945 status = do_normal_underflow (f, is_double, round);
946 }
947 }
948 else if (f->normal_exp > NORMAL_EXPMAX)
949 {
950 /* Infinity */
951 status = do_normal_overflow (f, is_double, round);
952 }
953 else
954 {
955 status = do_normal_round (f, NR_GUARDS, round);
956 if (f->fraction == 0)
957 /* f->class = sim_fpu_class_zero; */
958 status |= do_normal_underflow (f, is_double, round);
959 else if (f->normal_exp > NORMAL_EXPMAX)
960 /* Oops! rounding caused overflow. */
961 status |= do_normal_overflow (f, is_double, round);
962 }
963 ASSERT ((f->class == sim_fpu_class_number
964 || f->class == sim_fpu_class_denorm)
965 <= (f->fraction < IMPLICIT_2 && f->fraction >= IMPLICIT_1));
966 return status;
967 }
968 }
969 return 0;
970 }
971
972 INLINE_SIM_FPU (int)
973 sim_fpu_round_32 (sim_fpu *f,
974 sim_fpu_round round,
975 sim_fpu_denorm denorm)
976 {
977 return do_round (f, 0, round, denorm);
978 }
979
980 INLINE_SIM_FPU (int)
981 sim_fpu_round_64 (sim_fpu *f,
982 sim_fpu_round round,
983 sim_fpu_denorm denorm)
984 {
985 return do_round (f, 1, round, denorm);
986 }
987
988
989
990 /* Arithmetic ops */
991
992 INLINE_SIM_FPU (int)
993 sim_fpu_add (sim_fpu *f,
994 const sim_fpu *l,
995 const sim_fpu *r)
996 {
997 if (sim_fpu_is_snan (l))
998 {
999 *f = *l;
1000 f->class = sim_fpu_class_qnan;
1001 return sim_fpu_status_invalid_snan;
1002 }
1003 if (sim_fpu_is_snan (r))
1004 {
1005 *f = *r;
1006 f->class = sim_fpu_class_qnan;
1007 return sim_fpu_status_invalid_snan;
1008 }
1009 if (sim_fpu_is_qnan (l))
1010 {
1011 *f = *l;
1012 return 0;
1013 }
1014 if (sim_fpu_is_qnan (r))
1015 {
1016 *f = *r;
1017 return 0;
1018 }
1019 if (sim_fpu_is_infinity (l))
1020 {
1021 if (sim_fpu_is_infinity (r)
1022 && l->sign != r->sign)
1023 {
1024 *f = sim_fpu_qnan;
1025 return sim_fpu_status_invalid_isi;
1026 }
1027 *f = *l;
1028 return 0;
1029 }
1030 if (sim_fpu_is_infinity (r))
1031 {
1032 *f = *r;
1033 return 0;
1034 }
1035 if (sim_fpu_is_zero (l))
1036 {
1037 if (sim_fpu_is_zero (r))
1038 {
1039 *f = sim_fpu_zero;
1040 f->sign = l->sign & r->sign;
1041 }
1042 else
1043 *f = *r;
1044 return 0;
1045 }
1046 if (sim_fpu_is_zero (r))
1047 {
1048 *f = *l;
1049 return 0;
1050 }
1051 {
1052 int status = 0;
1053 int shift = l->normal_exp - r->normal_exp;
1054 unsigned64 lfraction;
1055 unsigned64 rfraction;
1056 /* use exp of larger */
1057 if (shift >= NR_FRAC_GUARD)
1058 {
1059 /* left has much bigger magnitude */
1060 *f = *l;
1061 return sim_fpu_status_inexact;
1062 }
1063 if (shift <= - NR_FRAC_GUARD)
1064 {
1065 /* right has much bigger magnitude */
1066 *f = *r;
1067 return sim_fpu_status_inexact;
1068 }
1069 lfraction = l->fraction;
1070 rfraction = r->fraction;
1071 if (shift > 0)
1072 {
1073 f->normal_exp = l->normal_exp;
1074 if (rfraction & LSMASK64 (shift - 1, 0))
1075 {
1076 status |= sim_fpu_status_inexact;
1077 rfraction |= LSBIT64 (shift); /* Stick LSBit. */
1078 }
1079 rfraction >>= shift;
1080 }
1081 else if (shift < 0)
1082 {
1083 f->normal_exp = r->normal_exp;
1084 if (lfraction & LSMASK64 (- shift - 1, 0))
1085 {
1086 status |= sim_fpu_status_inexact;
1087 lfraction |= LSBIT64 (- shift); /* Stick LSBit. */
1088 }
1089 lfraction >>= -shift;
1090 }
1091 else
1092 {
1093 f->normal_exp = r->normal_exp;
1094 }
1095
1096 /* Perform the addition. */
1097 if (l->sign)
1098 lfraction = - lfraction;
1099 if (r->sign)
1100 rfraction = - rfraction;
1101 f->fraction = lfraction + rfraction;
1102
1103 /* zero? */
1104 if (f->fraction == 0)
1105 {
1106 *f = sim_fpu_zero;
1107 return 0;
1108 }
1109
1110 /* sign? */
1111 f->class = sim_fpu_class_number;
1112 if (((signed64) f->fraction) >= 0)
1113 f->sign = 0;
1114 else
1115 {
1116 f->sign = 1;
1117 f->fraction = - f->fraction;
1118 }
1119
1120 /* Normalize it. */
1121 if ((f->fraction & IMPLICIT_2))
1122 {
1123 f->fraction = (f->fraction >> 1) | (f->fraction & 1);
1124 f->normal_exp ++;
1125 }
1126 else if (f->fraction < IMPLICIT_1)
1127 {
1128 do
1129 {
1130 f->fraction <<= 1;
1131 f->normal_exp --;
1132 }
1133 while (f->fraction < IMPLICIT_1);
1134 }
1135 ASSERT (f->fraction >= IMPLICIT_1 && f->fraction < IMPLICIT_2);
1136 return status;
1137 }
1138 }
1139
1140
1141 INLINE_SIM_FPU (int)
1142 sim_fpu_sub (sim_fpu *f,
1143 const sim_fpu *l,
1144 const sim_fpu *r)
1145 {
1146 if (sim_fpu_is_snan (l))
1147 {
1148 *f = *l;
1149 f->class = sim_fpu_class_qnan;
1150 return sim_fpu_status_invalid_snan;
1151 }
1152 if (sim_fpu_is_snan (r))
1153 {
1154 *f = *r;
1155 f->class = sim_fpu_class_qnan;
1156 return sim_fpu_status_invalid_snan;
1157 }
1158 if (sim_fpu_is_qnan (l))
1159 {
1160 *f = *l;
1161 return 0;
1162 }
1163 if (sim_fpu_is_qnan (r))
1164 {
1165 *f = *r;
1166 return 0;
1167 }
1168 if (sim_fpu_is_infinity (l))
1169 {
1170 if (sim_fpu_is_infinity (r)
1171 && l->sign == r->sign)
1172 {
1173 *f = sim_fpu_qnan;
1174 return sim_fpu_status_invalid_isi;
1175 }
1176 *f = *l;
1177 return 0;
1178 }
1179 if (sim_fpu_is_infinity (r))
1180 {
1181 *f = *r;
1182 f->sign = !r->sign;
1183 return 0;
1184 }
1185 if (sim_fpu_is_zero (l))
1186 {
1187 if (sim_fpu_is_zero (r))
1188 {
1189 *f = sim_fpu_zero;
1190 f->sign = l->sign & !r->sign;
1191 }
1192 else
1193 {
1194 *f = *r;
1195 f->sign = !r->sign;
1196 }
1197 return 0;
1198 }
1199 if (sim_fpu_is_zero (r))
1200 {
1201 *f = *l;
1202 return 0;
1203 }
1204 {
1205 int status = 0;
1206 int shift = l->normal_exp - r->normal_exp;
1207 unsigned64 lfraction;
1208 unsigned64 rfraction;
1209 /* use exp of larger */
1210 if (shift >= NR_FRAC_GUARD)
1211 {
1212 /* left has much bigger magnitude */
1213 *f = *l;
1214 return sim_fpu_status_inexact;
1215 }
1216 if (shift <= - NR_FRAC_GUARD)
1217 {
1218 /* right has much bigger magnitude */
1219 *f = *r;
1220 f->sign = !r->sign;
1221 return sim_fpu_status_inexact;
1222 }
1223 lfraction = l->fraction;
1224 rfraction = r->fraction;
1225 if (shift > 0)
1226 {
1227 f->normal_exp = l->normal_exp;
1228 if (rfraction & LSMASK64 (shift - 1, 0))
1229 {
1230 status |= sim_fpu_status_inexact;
1231 rfraction |= LSBIT64 (shift); /* Stick LSBit. */
1232 }
1233 rfraction >>= shift;
1234 }
1235 else if (shift < 0)
1236 {
1237 f->normal_exp = r->normal_exp;
1238 if (lfraction & LSMASK64 (- shift - 1, 0))
1239 {
1240 status |= sim_fpu_status_inexact;
1241 lfraction |= LSBIT64 (- shift); /* Stick LSBit. */
1242 }
1243 lfraction >>= -shift;
1244 }
1245 else
1246 {
1247 f->normal_exp = r->normal_exp;
1248 }
1249
1250 /* Perform the subtraction. */
1251 if (l->sign)
1252 lfraction = - lfraction;
1253 if (!r->sign)
1254 rfraction = - rfraction;
1255 f->fraction = lfraction + rfraction;
1256
1257 /* zero? */
1258 if (f->fraction == 0)
1259 {
1260 *f = sim_fpu_zero;
1261 return 0;
1262 }
1263
1264 /* sign? */
1265 f->class = sim_fpu_class_number;
1266 if (((signed64) f->fraction) >= 0)
1267 f->sign = 0;
1268 else
1269 {
1270 f->sign = 1;
1271 f->fraction = - f->fraction;
1272 }
1273
1274 /* Normalize it. */
1275 if ((f->fraction & IMPLICIT_2))
1276 {
1277 f->fraction = (f->fraction >> 1) | (f->fraction & 1);
1278 f->normal_exp ++;
1279 }
1280 else if (f->fraction < IMPLICIT_1)
1281 {
1282 do
1283 {
1284 f->fraction <<= 1;
1285 f->normal_exp --;
1286 }
1287 while (f->fraction < IMPLICIT_1);
1288 }
1289 ASSERT (f->fraction >= IMPLICIT_1 && f->fraction < IMPLICIT_2);
1290 return status;
1291 }
1292 }
1293
1294
1295 INLINE_SIM_FPU (int)
1296 sim_fpu_mul (sim_fpu *f,
1297 const sim_fpu *l,
1298 const sim_fpu *r)
1299 {
1300 if (sim_fpu_is_snan (l))
1301 {
1302 *f = *l;
1303 f->class = sim_fpu_class_qnan;
1304 return sim_fpu_status_invalid_snan;
1305 }
1306 if (sim_fpu_is_snan (r))
1307 {
1308 *f = *r;
1309 f->class = sim_fpu_class_qnan;
1310 return sim_fpu_status_invalid_snan;
1311 }
1312 if (sim_fpu_is_qnan (l))
1313 {
1314 *f = *l;
1315 return 0;
1316 }
1317 if (sim_fpu_is_qnan (r))
1318 {
1319 *f = *r;
1320 return 0;
1321 }
1322 if (sim_fpu_is_infinity (l))
1323 {
1324 if (sim_fpu_is_zero (r))
1325 {
1326 *f = sim_fpu_qnan;
1327 return sim_fpu_status_invalid_imz;
1328 }
1329 *f = *l;
1330 f->sign = l->sign ^ r->sign;
1331 return 0;
1332 }
1333 if (sim_fpu_is_infinity (r))
1334 {
1335 if (sim_fpu_is_zero (l))
1336 {
1337 *f = sim_fpu_qnan;
1338 return sim_fpu_status_invalid_imz;
1339 }
1340 *f = *r;
1341 f->sign = l->sign ^ r->sign;
1342 return 0;
1343 }
1344 if (sim_fpu_is_zero (l) || sim_fpu_is_zero (r))
1345 {
1346 *f = sim_fpu_zero;
1347 f->sign = l->sign ^ r->sign;
1348 return 0;
1349 }
1350 /* Calculate the mantissa by multiplying both 64bit numbers to get a
1351 128 bit number. */
1352 {
1353 unsigned64 low;
1354 unsigned64 high;
1355 unsigned64 nl = l->fraction & 0xffffffff;
1356 unsigned64 nh = l->fraction >> 32;
1357 unsigned64 ml = r->fraction & 0xffffffff;
1358 unsigned64 mh = r->fraction >>32;
1359 unsigned64 pp_ll = ml * nl;
1360 unsigned64 pp_hl = mh * nl;
1361 unsigned64 pp_lh = ml * nh;
1362 unsigned64 pp_hh = mh * nh;
1363 unsigned64 res2 = 0;
1364 unsigned64 res0 = 0;
1365 unsigned64 ps_hh__ = pp_hl + pp_lh;
1366 if (ps_hh__ < pp_hl)
1367 res2 += UNSIGNED64 (0x100000000);
1368 pp_hl = (ps_hh__ << 32) & UNSIGNED64 (0xffffffff00000000);
1369 res0 = pp_ll + pp_hl;
1370 if (res0 < pp_ll)
1371 res2++;
1372 res2 += ((ps_hh__ >> 32) & 0xffffffff) + pp_hh;
1373 high = res2;
1374 low = res0;
1375
1376 f->normal_exp = l->normal_exp + r->normal_exp;
1377 f->sign = l->sign ^ r->sign;
1378 f->class = sim_fpu_class_number;
1379
1380 /* Input is bounded by [1,2) ; [2^60,2^61)
1381 Output is bounded by [1,4) ; [2^120,2^122) */
1382
1383 /* Adjust the exponent according to where the decimal point ended
1384 up in the high 64 bit word. In the source the decimal point
1385 was at NR_FRAC_GUARD. */
1386 f->normal_exp += NR_FRAC_GUARD + 64 - (NR_FRAC_GUARD * 2);
1387
1388 /* The high word is bounded according to the above. Consequently
1389 it has never overflowed into IMPLICIT_2. */
1390 ASSERT (high < LSBIT64 (((NR_FRAC_GUARD + 1) * 2) - 64));
1391 ASSERT (high >= LSBIT64 ((NR_FRAC_GUARD * 2) - 64));
1392 ASSERT (LSBIT64 (((NR_FRAC_GUARD + 1) * 2) - 64) < IMPLICIT_1);
1393
1394 /* Normalize. */
1395 do
1396 {
1397 f->normal_exp--;
1398 high <<= 1;
1399 if (low & LSBIT64 (63))
1400 high |= 1;
1401 low <<= 1;
1402 }
1403 while (high < IMPLICIT_1);
1404
1405 ASSERT (high >= IMPLICIT_1 && high < IMPLICIT_2);
1406 if (low != 0)
1407 {
1408 f->fraction = (high | 1); /* sticky */
1409 return sim_fpu_status_inexact;
1410 }
1411 else
1412 {
1413 f->fraction = high;
1414 return 0;
1415 }
1416 return 0;
1417 }
1418 }
1419
1420 INLINE_SIM_FPU (int)
1421 sim_fpu_div (sim_fpu *f,
1422 const sim_fpu *l,
1423 const sim_fpu *r)
1424 {
1425 if (sim_fpu_is_snan (l))
1426 {
1427 *f = *l;
1428 f->class = sim_fpu_class_qnan;
1429 return sim_fpu_status_invalid_snan;
1430 }
1431 if (sim_fpu_is_snan (r))
1432 {
1433 *f = *r;
1434 f->class = sim_fpu_class_qnan;
1435 return sim_fpu_status_invalid_snan;
1436 }
1437 if (sim_fpu_is_qnan (l))
1438 {
1439 *f = *l;
1440 f->class = sim_fpu_class_qnan;
1441 return 0;
1442 }
1443 if (sim_fpu_is_qnan (r))
1444 {
1445 *f = *r;
1446 f->class = sim_fpu_class_qnan;
1447 return 0;
1448 }
1449 if (sim_fpu_is_infinity (l))
1450 {
1451 if (sim_fpu_is_infinity (r))
1452 {
1453 *f = sim_fpu_qnan;
1454 return sim_fpu_status_invalid_idi;
1455 }
1456 else
1457 {
1458 *f = *l;
1459 f->sign = l->sign ^ r->sign;
1460 return 0;
1461 }
1462 }
1463 if (sim_fpu_is_zero (l))
1464 {
1465 if (sim_fpu_is_zero (r))
1466 {
1467 *f = sim_fpu_qnan;
1468 return sim_fpu_status_invalid_zdz;
1469 }
1470 else
1471 {
1472 *f = *l;
1473 f->sign = l->sign ^ r->sign;
1474 return 0;
1475 }
1476 }
1477 if (sim_fpu_is_infinity (r))
1478 {
1479 *f = sim_fpu_zero;
1480 f->sign = l->sign ^ r->sign;
1481 return 0;
1482 }
1483 if (sim_fpu_is_zero (r))
1484 {
1485 f->class = sim_fpu_class_infinity;
1486 f->sign = l->sign ^ r->sign;
1487 return sim_fpu_status_invalid_div0;
1488 }
1489
1490 /* Calculate the mantissa by multiplying both 64bit numbers to get a
1491 128 bit number. */
1492 {
1493 /* quotient = ( ( numerator / denominator)
1494 x 2^(numerator exponent - denominator exponent)
1495 */
1496 unsigned64 numerator;
1497 unsigned64 denominator;
1498 unsigned64 quotient;
1499 unsigned64 bit;
1500
1501 f->class = sim_fpu_class_number;
1502 f->sign = l->sign ^ r->sign;
1503 f->normal_exp = l->normal_exp - r->normal_exp;
1504
1505 numerator = l->fraction;
1506 denominator = r->fraction;
1507
1508 /* Fraction will be less than 1.0 */
1509 if (numerator < denominator)
1510 {
1511 numerator <<= 1;
1512 f->normal_exp--;
1513 }
1514 ASSERT (numerator >= denominator);
1515
1516 /* Gain extra precision, already used one spare bit. */
1517 numerator <<= NR_SPARE;
1518 denominator <<= NR_SPARE;
1519
1520 /* Does divide one bit at a time. Optimize??? */
1521 quotient = 0;
1522 bit = (IMPLICIT_1 << NR_SPARE);
1523 while (bit)
1524 {
1525 if (numerator >= denominator)
1526 {
1527 quotient |= bit;
1528 numerator -= denominator;
1529 }
1530 bit >>= 1;
1531 numerator <<= 1;
1532 }
1533
1534 /* Discard (but save) the extra bits. */
1535 if ((quotient & LSMASK64 (NR_SPARE -1, 0)))
1536 quotient = (quotient >> NR_SPARE) | 1;
1537 else
1538 quotient = (quotient >> NR_SPARE);
1539
1540 f->fraction = quotient;
1541 ASSERT (f->fraction >= IMPLICIT_1 && f->fraction < IMPLICIT_2);
1542 if (numerator != 0)
1543 {
1544 f->fraction |= 1; /* Stick remaining bits. */
1545 return sim_fpu_status_inexact;
1546 }
1547 else
1548 return 0;
1549 }
1550 }
1551
1552
1553 INLINE_SIM_FPU (int)
1554 sim_fpu_max (sim_fpu *f,
1555 const sim_fpu *l,
1556 const sim_fpu *r)
1557 {
1558 if (sim_fpu_is_snan (l))
1559 {
1560 *f = *l;
1561 f->class = sim_fpu_class_qnan;
1562 return sim_fpu_status_invalid_snan;
1563 }
1564 if (sim_fpu_is_snan (r))
1565 {
1566 *f = *r;
1567 f->class = sim_fpu_class_qnan;
1568 return sim_fpu_status_invalid_snan;
1569 }
1570 if (sim_fpu_is_qnan (l))
1571 {
1572 *f = *l;
1573 return 0;
1574 }
1575 if (sim_fpu_is_qnan (r))
1576 {
1577 *f = *r;
1578 return 0;
1579 }
1580 if (sim_fpu_is_infinity (l))
1581 {
1582 if (sim_fpu_is_infinity (r)
1583 && l->sign == r->sign)
1584 {
1585 *f = sim_fpu_qnan;
1586 return sim_fpu_status_invalid_isi;
1587 }
1588 if (l->sign)
1589 *f = *r; /* -inf < anything */
1590 else
1591 *f = *l; /* +inf > anything */
1592 return 0;
1593 }
1594 if (sim_fpu_is_infinity (r))
1595 {
1596 if (r->sign)
1597 *f = *l; /* anything > -inf */
1598 else
1599 *f = *r; /* anything < +inf */
1600 return 0;
1601 }
1602 if (l->sign > r->sign)
1603 {
1604 *f = *r; /* -ve < +ve */
1605 return 0;
1606 }
1607 if (l->sign < r->sign)
1608 {
1609 *f = *l; /* +ve > -ve */
1610 return 0;
1611 }
1612 ASSERT (l->sign == r->sign);
1613 if (l->normal_exp > r->normal_exp
1614 || (l->normal_exp == r->normal_exp
1615 && l->fraction > r->fraction))
1616 {
1617 /* |l| > |r| */
1618 if (l->sign)
1619 *f = *r; /* -ve < -ve */
1620 else
1621 *f = *l; /* +ve > +ve */
1622 return 0;
1623 }
1624 else
1625 {
1626 /* |l| <= |r| */
1627 if (l->sign)
1628 *f = *l; /* -ve > -ve */
1629 else
1630 *f = *r; /* +ve < +ve */
1631 return 0;
1632 }
1633 }
1634
1635
1636 INLINE_SIM_FPU (int)
1637 sim_fpu_min (sim_fpu *f,
1638 const sim_fpu *l,
1639 const sim_fpu *r)
1640 {
1641 if (sim_fpu_is_snan (l))
1642 {
1643 *f = *l;
1644 f->class = sim_fpu_class_qnan;
1645 return sim_fpu_status_invalid_snan;
1646 }
1647 if (sim_fpu_is_snan (r))
1648 {
1649 *f = *r;
1650 f->class = sim_fpu_class_qnan;
1651 return sim_fpu_status_invalid_snan;
1652 }
1653 if (sim_fpu_is_qnan (l))
1654 {
1655 *f = *l;
1656 return 0;
1657 }
1658 if (sim_fpu_is_qnan (r))
1659 {
1660 *f = *r;
1661 return 0;
1662 }
1663 if (sim_fpu_is_infinity (l))
1664 {
1665 if (sim_fpu_is_infinity (r)
1666 && l->sign == r->sign)
1667 {
1668 *f = sim_fpu_qnan;
1669 return sim_fpu_status_invalid_isi;
1670 }
1671 if (l->sign)
1672 *f = *l; /* -inf < anything */
1673 else
1674 *f = *r; /* +inf > anthing */
1675 return 0;
1676 }
1677 if (sim_fpu_is_infinity (r))
1678 {
1679 if (r->sign)
1680 *f = *r; /* anything > -inf */
1681 else
1682 *f = *l; /* anything < +inf */
1683 return 0;
1684 }
1685 if (l->sign > r->sign)
1686 {
1687 *f = *l; /* -ve < +ve */
1688 return 0;
1689 }
1690 if (l->sign < r->sign)
1691 {
1692 *f = *r; /* +ve > -ve */
1693 return 0;
1694 }
1695 ASSERT (l->sign == r->sign);
1696 if (l->normal_exp > r->normal_exp
1697 || (l->normal_exp == r->normal_exp
1698 && l->fraction > r->fraction))
1699 {
1700 /* |l| > |r| */
1701 if (l->sign)
1702 *f = *l; /* -ve < -ve */
1703 else
1704 *f = *r; /* +ve > +ve */
1705 return 0;
1706 }
1707 else
1708 {
1709 /* |l| <= |r| */
1710 if (l->sign)
1711 *f = *r; /* -ve > -ve */
1712 else
1713 *f = *l; /* +ve < +ve */
1714 return 0;
1715 }
1716 }
1717
1718
1719 INLINE_SIM_FPU (int)
1720 sim_fpu_neg (sim_fpu *f,
1721 const sim_fpu *r)
1722 {
1723 if (sim_fpu_is_snan (r))
1724 {
1725 *f = *r;
1726 f->class = sim_fpu_class_qnan;
1727 return sim_fpu_status_invalid_snan;
1728 }
1729 if (sim_fpu_is_qnan (r))
1730 {
1731 *f = *r;
1732 return 0;
1733 }
1734 *f = *r;
1735 f->sign = !r->sign;
1736 return 0;
1737 }
1738
1739
1740 INLINE_SIM_FPU (int)
1741 sim_fpu_abs (sim_fpu *f,
1742 const sim_fpu *r)
1743 {
1744 *f = *r;
1745 f->sign = 0;
1746 if (sim_fpu_is_snan (r))
1747 {
1748 f->class = sim_fpu_class_qnan;
1749 return sim_fpu_status_invalid_snan;
1750 }
1751 return 0;
1752 }
1753
1754
1755 INLINE_SIM_FPU (int)
1756 sim_fpu_inv (sim_fpu *f,
1757 const sim_fpu *r)
1758 {
1759 return sim_fpu_div (f, &sim_fpu_one, r);
1760 }
1761
1762
1763 INLINE_SIM_FPU (int)
1764 sim_fpu_sqrt (sim_fpu *f,
1765 const sim_fpu *r)
1766 {
1767 if (sim_fpu_is_snan (r))
1768 {
1769 *f = sim_fpu_qnan;
1770 return sim_fpu_status_invalid_snan;
1771 }
1772 if (sim_fpu_is_qnan (r))
1773 {
1774 *f = sim_fpu_qnan;
1775 return 0;
1776 }
1777 if (sim_fpu_is_zero (r))
1778 {
1779 f->class = sim_fpu_class_zero;
1780 f->sign = r->sign;
1781 f->normal_exp = 0;
1782 return 0;
1783 }
1784 if (sim_fpu_is_infinity (r))
1785 {
1786 if (r->sign)
1787 {
1788 *f = sim_fpu_qnan;
1789 return sim_fpu_status_invalid_sqrt;
1790 }
1791 else
1792 {
1793 f->class = sim_fpu_class_infinity;
1794 f->sign = 0;
1795 f->sign = 0;
1796 return 0;
1797 }
1798 }
1799 if (r->sign)
1800 {
1801 *f = sim_fpu_qnan;
1802 return sim_fpu_status_invalid_sqrt;
1803 }
1804
1805 /* @(#)e_sqrt.c 5.1 93/09/24 */
1806 /*
1807 * ====================================================
1808 * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
1809 *
1810 * Developed at SunPro, a Sun Microsystems, Inc. business.
1811 * Permission to use, copy, modify, and distribute this
1812 * software is freely granted, provided that this notice
1813 * is preserved.
1814 * ====================================================
1815 */
1816
1817 /* __ieee754_sqrt(x)
1818 * Return correctly rounded sqrt.
1819 * ------------------------------------------
1820 * | Use the hardware sqrt if you have one |
1821 * ------------------------------------------
1822 * Method:
1823 * Bit by bit method using integer arithmetic. (Slow, but portable)
1824 * 1. Normalization
1825 * Scale x to y in [1,4) with even powers of 2:
1826 * find an integer k such that 1 <= (y=x*2^(2k)) < 4, then
1827 * sqrt(x) = 2^k * sqrt(y)
1828 -
1829 - Since:
1830 - sqrt ( x*2^(2m) ) = sqrt(x).2^m ; m even
1831 - sqrt ( x*2^(2m + 1) ) = sqrt(2.x).2^m ; m odd
1832 - Define:
1833 - y = ((m even) ? x : 2.x)
1834 - Then:
1835 - y in [1, 4) ; [IMPLICIT_1,IMPLICIT_4)
1836 - And:
1837 - sqrt (y) in [1, 2) ; [IMPLICIT_1,IMPLICIT_2)
1838 -
1839 * 2. Bit by bit computation
1840 * Let q = sqrt(y) truncated to i bit after binary point (q = 1),
1841 * i 0
1842 * i+1 2
1843 * s = 2*q , and y = 2 * ( y - q ). (1)
1844 * i i i i
1845 *
1846 * To compute q from q , one checks whether
1847 * i+1 i
1848 *
1849 * -(i+1) 2
1850 * (q + 2 ) <= y. (2)
1851 * i
1852 * -(i+1)
1853 * If (2) is false, then q = q ; otherwise q = q + 2 .
1854 * i+1 i i+1 i
1855 *
1856 * With some algebraic manipulation, it is not difficult to see
1857 * that (2) is equivalent to
1858 * -(i+1)
1859 * s + 2 <= y (3)
1860 * i i
1861 *
1862 * The advantage of (3) is that s and y can be computed by
1863 * i i
1864 * the following recurrence formula:
1865 * if (3) is false
1866 *
1867 * s = s , y = y ; (4)
1868 * i+1 i i+1 i
1869 *
1870 -
1871 - NOTE: y = 2*y
1872 - i+1 i
1873 -
1874 * otherwise,
1875 * -i -(i+1)
1876 * s = s + 2 , y = y - s - 2 (5)
1877 * i+1 i i+1 i i
1878 *
1879 -
1880 - -(i+1)
1881 - NOTE: y = 2 (y - s - 2 )
1882 - i+1 i i
1883 -
1884 * One may easily use induction to prove (4) and (5).
1885 * Note. Since the left hand side of (3) contain only i+2 bits,
1886 * it does not necessary to do a full (53-bit) comparison
1887 * in (3).
1888 * 3. Final rounding
1889 * After generating the 53 bits result, we compute one more bit.
1890 * Together with the remainder, we can decide whether the
1891 * result is exact, bigger than 1/2ulp, or less than 1/2ulp
1892 * (it will never equal to 1/2ulp).
1893 * The rounding mode can be detected by checking whether
1894 * huge + tiny is equal to huge, and whether huge - tiny is
1895 * equal to huge for some floating point number "huge" and "tiny".
1896 *
1897 * Special cases:
1898 * sqrt(+-0) = +-0 ... exact
1899 * sqrt(inf) = inf
1900 * sqrt(-ve) = NaN ... with invalid signal
1901 * sqrt(NaN) = NaN ... with invalid signal for signalling NaN
1902 *
1903 * Other methods : see the appended file at the end of the program below.
1904 *---------------
1905 */
1906
1907 {
1908 /* Generate sqrt(x) bit by bit. */
1909 unsigned64 y;
1910 unsigned64 q;
1911 unsigned64 s;
1912 unsigned64 b;
1913
1914 f->class = sim_fpu_class_number;
1915 f->sign = 0;
1916 y = r->fraction;
1917 f->normal_exp = (r->normal_exp >> 1); /* exp = [exp/2] */
1918
1919 /* Odd exp, double x to make it even. */
1920 ASSERT (y >= IMPLICIT_1 && y < IMPLICIT_4);
1921 if ((r->normal_exp & 1))
1922 {
1923 y += y;
1924 }
1925 ASSERT (y >= IMPLICIT_1 && y < (IMPLICIT_2 << 1));
1926
1927 /* Let loop determine first value of s (either 1 or 2) */
1928 b = IMPLICIT_1;
1929 q = 0;
1930 s = 0;
1931
1932 while (b)
1933 {
1934 unsigned64 t = s + b;
1935 if (t <= y)
1936 {
1937 s |= (b << 1);
1938 y -= t;
1939 q |= b;
1940 }
1941 y <<= 1;
1942 b >>= 1;
1943 }
1944
1945 ASSERT (q >= IMPLICIT_1 && q < IMPLICIT_2);
1946 f->fraction = q;
1947 if (y != 0)
1948 {
1949 f->fraction |= 1; /* Stick remaining bits. */
1950 return sim_fpu_status_inexact;
1951 }
1952 else
1953 return 0;
1954 }
1955 }
1956
1957
1958 /* int/long <-> sim_fpu */
1959
1960 INLINE_SIM_FPU (int)
1961 sim_fpu_i32to (sim_fpu *f,
1962 signed32 i,
1963 sim_fpu_round round)
1964 {
1965 i2fpu (f, i, 0);
1966 return 0;
1967 }
1968
1969 INLINE_SIM_FPU (int)
1970 sim_fpu_u32to (sim_fpu *f,
1971 unsigned32 u,
1972 sim_fpu_round round)
1973 {
1974 u2fpu (f, u, 0);
1975 return 0;
1976 }
1977
1978 INLINE_SIM_FPU (int)
1979 sim_fpu_i64to (sim_fpu *f,
1980 signed64 i,
1981 sim_fpu_round round)
1982 {
1983 i2fpu (f, i, 1);
1984 return 0;
1985 }
1986
1987 INLINE_SIM_FPU (int)
1988 sim_fpu_u64to (sim_fpu *f,
1989 unsigned64 u,
1990 sim_fpu_round round)
1991 {
1992 u2fpu (f, u, 1);
1993 return 0;
1994 }
1995
1996
1997 INLINE_SIM_FPU (int)
1998 sim_fpu_to32i (signed32 *i,
1999 const sim_fpu *f,
2000 sim_fpu_round round)
2001 {
2002 signed64 i64;
2003 int status = fpu2i (&i64, f, 0, round);
2004 *i = i64;
2005 return status;
2006 }
2007
2008 INLINE_SIM_FPU (int)
2009 sim_fpu_to32u (unsigned32 *u,
2010 const sim_fpu *f,
2011 sim_fpu_round round)
2012 {
2013 unsigned64 u64;
2014 int status = fpu2u (&u64, f, 0);
2015 *u = u64;
2016 return status;
2017 }
2018
2019 INLINE_SIM_FPU (int)
2020 sim_fpu_to64i (signed64 *i,
2021 const sim_fpu *f,
2022 sim_fpu_round round)
2023 {
2024 return fpu2i (i, f, 1, round);
2025 }
2026
2027
2028 INLINE_SIM_FPU (int)
2029 sim_fpu_to64u (unsigned64 *u,
2030 const sim_fpu *f,
2031 sim_fpu_round round)
2032 {
2033 return fpu2u (u, f, 1);
2034 }
2035
2036
2037
2038 /* sim_fpu -> host format */
2039
2040 #if 0
2041 INLINE_SIM_FPU (float)
2042 sim_fpu_2f (const sim_fpu *f)
2043 {
2044 return fval.d;
2045 }
2046 #endif
2047
2048
2049 INLINE_SIM_FPU (double)
2050 sim_fpu_2d (const sim_fpu *s)
2051 {
2052 sim_fpu_map val;
2053 if (sim_fpu_is_snan (s))
2054 {
2055 /* gag SNaN's */
2056 sim_fpu n = *s;
2057 n.class = sim_fpu_class_qnan;
2058 val.i = pack_fpu (&n, 1);
2059 }
2060 else
2061 {
2062 val.i = pack_fpu (s, 1);
2063 }
2064 return val.d;
2065 }
2066
2067
2068 #if 0
2069 INLINE_SIM_FPU (void)
2070 sim_fpu_f2 (sim_fpu *f,
2071 float s)
2072 {
2073 sim_fpu_map val;
2074 val.d = s;
2075 unpack_fpu (f, val.i, 1);
2076 }
2077 #endif
2078
2079
2080 INLINE_SIM_FPU (void)
2081 sim_fpu_d2 (sim_fpu *f,
2082 double d)
2083 {
2084 sim_fpu_map val;
2085 val.d = d;
2086 unpack_fpu (f, val.i, 1);
2087 }
2088
2089
2090 /* General */
2091
2092 INLINE_SIM_FPU (int)
2093 sim_fpu_is_nan (const sim_fpu *d)
2094 {
2095 switch (d->class)
2096 {
2097 case sim_fpu_class_qnan:
2098 case sim_fpu_class_snan:
2099 return 1;
2100 default:
2101 return 0;
2102 }
2103 }
2104
2105 INLINE_SIM_FPU (int)
2106 sim_fpu_is_qnan (const sim_fpu *d)
2107 {
2108 switch (d->class)
2109 {
2110 case sim_fpu_class_qnan:
2111 return 1;
2112 default:
2113 return 0;
2114 }
2115 }
2116
2117 INLINE_SIM_FPU (int)
2118 sim_fpu_is_snan (const sim_fpu *d)
2119 {
2120 switch (d->class)
2121 {
2122 case sim_fpu_class_snan:
2123 return 1;
2124 default:
2125 return 0;
2126 }
2127 }
2128
2129 INLINE_SIM_FPU (int)
2130 sim_fpu_is_zero (const sim_fpu *d)
2131 {
2132 switch (d->class)
2133 {
2134 case sim_fpu_class_zero:
2135 return 1;
2136 default:
2137 return 0;
2138 }
2139 }
2140
2141 INLINE_SIM_FPU (int)
2142 sim_fpu_is_infinity (const sim_fpu *d)
2143 {
2144 switch (d->class)
2145 {
2146 case sim_fpu_class_infinity:
2147 return 1;
2148 default:
2149 return 0;
2150 }
2151 }
2152
2153 INLINE_SIM_FPU (int)
2154 sim_fpu_is_number (const sim_fpu *d)
2155 {
2156 switch (d->class)
2157 {
2158 case sim_fpu_class_denorm:
2159 case sim_fpu_class_number:
2160 return 1;
2161 default:
2162 return 0;
2163 }
2164 }
2165
2166 INLINE_SIM_FPU (int)
2167 sim_fpu_is_denorm (const sim_fpu *d)
2168 {
2169 switch (d->class)
2170 {
2171 case sim_fpu_class_denorm:
2172 return 1;
2173 default:
2174 return 0;
2175 }
2176 }
2177
2178
2179 INLINE_SIM_FPU (int)
2180 sim_fpu_sign (const sim_fpu *d)
2181 {
2182 return d->sign;
2183 }
2184
2185
2186 INLINE_SIM_FPU (int)
2187 sim_fpu_exp (const sim_fpu *d)
2188 {
2189 return d->normal_exp;
2190 }
2191
2192
2193 INLINE_SIM_FPU (unsigned64)
2194 sim_fpu_fraction (const sim_fpu *d)
2195 {
2196 return d->fraction;
2197 }
2198
2199
2200 INLINE_SIM_FPU (unsigned64)
2201 sim_fpu_guard (const sim_fpu *d, int is_double)
2202 {
2203 unsigned64 rv;
2204 unsigned64 guardmask = LSMASK64 (NR_GUARDS - 1, 0);
2205 rv = (d->fraction & guardmask) >> NR_PAD;
2206 return rv;
2207 }
2208
2209
2210 INLINE_SIM_FPU (int)
2211 sim_fpu_is (const sim_fpu *d)
2212 {
2213 switch (d->class)
2214 {
2215 case sim_fpu_class_qnan:
2216 return SIM_FPU_IS_QNAN;
2217 case sim_fpu_class_snan:
2218 return SIM_FPU_IS_SNAN;
2219 case sim_fpu_class_infinity:
2220 if (d->sign)
2221 return SIM_FPU_IS_NINF;
2222 else
2223 return SIM_FPU_IS_PINF;
2224 case sim_fpu_class_number:
2225 if (d->sign)
2226 return SIM_FPU_IS_NNUMBER;
2227 else
2228 return SIM_FPU_IS_PNUMBER;
2229 case sim_fpu_class_denorm:
2230 if (d->sign)
2231 return SIM_FPU_IS_NDENORM;
2232 else
2233 return SIM_FPU_IS_PDENORM;
2234 case sim_fpu_class_zero:
2235 if (d->sign)
2236 return SIM_FPU_IS_NZERO;
2237 else
2238 return SIM_FPU_IS_PZERO;
2239 default:
2240 return -1;
2241 abort ();
2242 }
2243 }
2244
2245 INLINE_SIM_FPU (int)
2246 sim_fpu_cmp (const sim_fpu *l, const sim_fpu *r)
2247 {
2248 sim_fpu res;
2249 sim_fpu_sub (&res, l, r);
2250 return sim_fpu_is (&res);
2251 }
2252
2253 INLINE_SIM_FPU (int)
2254 sim_fpu_is_lt (const sim_fpu *l, const sim_fpu *r)
2255 {
2256 int status;
2257 sim_fpu_lt (&status, l, r);
2258 return status;
2259 }
2260
2261 INLINE_SIM_FPU (int)
2262 sim_fpu_is_le (const sim_fpu *l, const sim_fpu *r)
2263 {
2264 int is;
2265 sim_fpu_le (&is, l, r);
2266 return is;
2267 }
2268
2269 INLINE_SIM_FPU (int)
2270 sim_fpu_is_eq (const sim_fpu *l, const sim_fpu *r)
2271 {
2272 int is;
2273 sim_fpu_eq (&is, l, r);
2274 return is;
2275 }
2276
2277 INLINE_SIM_FPU (int)
2278 sim_fpu_is_ne (const sim_fpu *l, const sim_fpu *r)
2279 {
2280 int is;
2281 sim_fpu_ne (&is, l, r);
2282 return is;
2283 }
2284
2285 INLINE_SIM_FPU (int)
2286 sim_fpu_is_ge (const sim_fpu *l, const sim_fpu *r)
2287 {
2288 int is;
2289 sim_fpu_ge (&is, l, r);
2290 return is;
2291 }
2292
2293 INLINE_SIM_FPU (int)
2294 sim_fpu_is_gt (const sim_fpu *l, const sim_fpu *r)
2295 {
2296 int is;
2297 sim_fpu_gt (&is, l, r);
2298 return is;
2299 }
2300
2301
2302 /* Compare operators */
2303
2304 INLINE_SIM_FPU (int)
2305 sim_fpu_lt (int *is,
2306 const sim_fpu *l,
2307 const sim_fpu *r)
2308 {
2309 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2310 {
2311 sim_fpu_map lval;
2312 sim_fpu_map rval;
2313 lval.i = pack_fpu (l, 1);
2314 rval.i = pack_fpu (r, 1);
2315 (*is) = (lval.d < rval.d);
2316 return 0;
2317 }
2318 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2319 {
2320 *is = 0;
2321 return sim_fpu_status_invalid_snan;
2322 }
2323 else
2324 {
2325 *is = 0;
2326 return sim_fpu_status_invalid_qnan;
2327 }
2328 }
2329
2330 INLINE_SIM_FPU (int)
2331 sim_fpu_le (int *is,
2332 const sim_fpu *l,
2333 const sim_fpu *r)
2334 {
2335 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2336 {
2337 sim_fpu_map lval;
2338 sim_fpu_map rval;
2339 lval.i = pack_fpu (l, 1);
2340 rval.i = pack_fpu (r, 1);
2341 *is = (lval.d <= rval.d);
2342 return 0;
2343 }
2344 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2345 {
2346 *is = 0;
2347 return sim_fpu_status_invalid_snan;
2348 }
2349 else
2350 {
2351 *is = 0;
2352 return sim_fpu_status_invalid_qnan;
2353 }
2354 }
2355
2356 INLINE_SIM_FPU (int)
2357 sim_fpu_eq (int *is,
2358 const sim_fpu *l,
2359 const sim_fpu *r)
2360 {
2361 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2362 {
2363 sim_fpu_map lval;
2364 sim_fpu_map rval;
2365 lval.i = pack_fpu (l, 1);
2366 rval.i = pack_fpu (r, 1);
2367 (*is) = (lval.d == rval.d);
2368 return 0;
2369 }
2370 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2371 {
2372 *is = 0;
2373 return sim_fpu_status_invalid_snan;
2374 }
2375 else
2376 {
2377 *is = 0;
2378 return sim_fpu_status_invalid_qnan;
2379 }
2380 }
2381
2382 INLINE_SIM_FPU (int)
2383 sim_fpu_ne (int *is,
2384 const sim_fpu *l,
2385 const sim_fpu *r)
2386 {
2387 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2388 {
2389 sim_fpu_map lval;
2390 sim_fpu_map rval;
2391 lval.i = pack_fpu (l, 1);
2392 rval.i = pack_fpu (r, 1);
2393 (*is) = (lval.d != rval.d);
2394 return 0;
2395 }
2396 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2397 {
2398 *is = 0;
2399 return sim_fpu_status_invalid_snan;
2400 }
2401 else
2402 {
2403 *is = 0;
2404 return sim_fpu_status_invalid_qnan;
2405 }
2406 }
2407
2408 INLINE_SIM_FPU (int)
2409 sim_fpu_ge (int *is,
2410 const sim_fpu *l,
2411 const sim_fpu *r)
2412 {
2413 return sim_fpu_le (is, r, l);
2414 }
2415
2416 INLINE_SIM_FPU (int)
2417 sim_fpu_gt (int *is,
2418 const sim_fpu *l,
2419 const sim_fpu *r)
2420 {
2421 return sim_fpu_lt (is, r, l);
2422 }
2423
2424
2425 /* A number of useful constants */
2426
2427 #if EXTERN_SIM_FPU_P
2428 const sim_fpu sim_fpu_zero = {
2429 sim_fpu_class_zero, 0, 0, 0
2430 };
2431 const sim_fpu sim_fpu_qnan = {
2432 sim_fpu_class_qnan, 0, 0, 0
2433 };
2434 const sim_fpu sim_fpu_one = {
2435 sim_fpu_class_number, 0, IMPLICIT_1, 0
2436 };
2437 const sim_fpu sim_fpu_two = {
2438 sim_fpu_class_number, 0, IMPLICIT_1, 1
2439 };
2440 const sim_fpu sim_fpu_max32 = {
2441 sim_fpu_class_number, 0, LSMASK64 (NR_FRAC_GUARD, NR_GUARDS32), NORMAL_EXPMAX32
2442 };
2443 const sim_fpu sim_fpu_max64 = {
2444 sim_fpu_class_number, 0, LSMASK64 (NR_FRAC_GUARD, NR_GUARDS64), NORMAL_EXPMAX64
2445 };
2446 #endif
2447
2448
2449 /* For debugging */
2450
2451 INLINE_SIM_FPU (void)
2452 sim_fpu_print_fpu (const sim_fpu *f,
2453 sim_fpu_print_func *print,
2454 void *arg)
2455 {
2456 sim_fpu_printn_fpu (f, print, -1, arg);
2457 }
2458
2459 INLINE_SIM_FPU (void)
2460 sim_fpu_printn_fpu (const sim_fpu *f,
2461 sim_fpu_print_func *print,
2462 int digits,
2463 void *arg)
2464 {
2465 print (arg, "%s", f->sign ? "-" : "+");
2466 switch (f->class)
2467 {
2468 case sim_fpu_class_qnan:
2469 print (arg, "0.");
2470 print_bits (f->fraction, NR_FRAC_GUARD - 1, digits, print, arg);
2471 print (arg, "*QuietNaN");
2472 break;
2473 case sim_fpu_class_snan:
2474 print (arg, "0.");
2475 print_bits (f->fraction, NR_FRAC_GUARD - 1, digits, print, arg);
2476 print (arg, "*SignalNaN");
2477 break;
2478 case sim_fpu_class_zero:
2479 print (arg, "0.0");
2480 break;
2481 case sim_fpu_class_infinity:
2482 print (arg, "INF");
2483 break;
2484 case sim_fpu_class_number:
2485 case sim_fpu_class_denorm:
2486 print (arg, "1.");
2487 print_bits (f->fraction, NR_FRAC_GUARD - 1, digits, print, arg);
2488 print (arg, "*2^%+d", f->normal_exp);
2489 ASSERT (f->fraction >= IMPLICIT_1);
2490 ASSERT (f->fraction < IMPLICIT_2);
2491 }
2492 }
2493
2494
2495 INLINE_SIM_FPU (void)
2496 sim_fpu_print_status (int status,
2497 sim_fpu_print_func *print,
2498 void *arg)
2499 {
2500 int i = 1;
2501 const char *prefix = "";
2502 while (status >= i)
2503 {
2504 switch ((sim_fpu_status) (status & i))
2505 {
2506 case sim_fpu_status_denorm:
2507 print (arg, "%sD", prefix);
2508 break;
2509 case sim_fpu_status_invalid_snan:
2510 print (arg, "%sSNaN", prefix);
2511 break;
2512 case sim_fpu_status_invalid_qnan:
2513 print (arg, "%sQNaN", prefix);
2514 break;
2515 case sim_fpu_status_invalid_isi:
2516 print (arg, "%sISI", prefix);
2517 break;
2518 case sim_fpu_status_invalid_idi:
2519 print (arg, "%sIDI", prefix);
2520 break;
2521 case sim_fpu_status_invalid_zdz:
2522 print (arg, "%sZDZ", prefix);
2523 break;
2524 case sim_fpu_status_invalid_imz:
2525 print (arg, "%sIMZ", prefix);
2526 break;
2527 case sim_fpu_status_invalid_cvi:
2528 print (arg, "%sCVI", prefix);
2529 break;
2530 case sim_fpu_status_invalid_cmp:
2531 print (arg, "%sCMP", prefix);
2532 break;
2533 case sim_fpu_status_invalid_sqrt:
2534 print (arg, "%sSQRT", prefix);
2535 break;
2536 case sim_fpu_status_inexact:
2537 print (arg, "%sX", prefix);
2538 break;
2539 case sim_fpu_status_overflow:
2540 print (arg, "%sO", prefix);
2541 break;
2542 case sim_fpu_status_underflow:
2543 print (arg, "%sU", prefix);
2544 break;
2545 case sim_fpu_status_invalid_div0:
2546 print (arg, "%s/", prefix);
2547 break;
2548 case sim_fpu_status_rounded:
2549 print (arg, "%sR", prefix);
2550 break;
2551 }
2552 i <<= 1;
2553 prefix = ",";
2554 }
2555 }
2556
2557 #endif
This page took 0.176481 seconds and 4 git commands to generate.