1 /* This is a software floating point library which can be used instead
2 of the floating point routines in libgcc1.c for targets without
3 hardware floating point. */
4
5 /* Copyright 1994, 1997, 1998, 2003 Free Software Foundation, Inc.
6
7 This file is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 2, or (at your option) any
10 later version.
11
12 In addition to the permissions in the GNU General Public License, the
13 Free Software Foundation gives you unlimited permission to link the
14 compiled version of this file with other programs, and to distribute
15 those programs without any restriction coming from the use of this
16 file. (The General Public License restrictions do apply in other
17 respects; for example, they cover modification of the file, and
18 distribution when not linked into another program.)
19
20 This file is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; see the file COPYING. If not, write to
27 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
28
29 /* As a special exception, if you link this library with other files,
30 some of which are compiled with GCC, to produce an executable,
31 this library does not by itself cause the resulting executable
32 to be covered by the GNU General Public License.
33 This exception does not however invalidate any other reasons why
34 the executable file might be covered by the GNU General Public License. */
35
36 /* This implements IEEE 754 format arithmetic, but does not provide a
37 mechanism for setting the rounding mode, or for generating or handling
38 exceptions.
39
40 The original code by Steve Chamberlain, hacked by Mark Eichin and Jim
41 Wilson, all of Cygnus Support. */
42
43
44 #ifndef SIM_FPU_C
45 #define SIM_FPU_C
46
47 #include "sim-basics.h"
48 #include "sim-fpu.h"
49
50 #include "sim-io.h"
51 #include "sim-assert.h"
52
53
54 /* Debugging support.
55 If digits is -1, then print all digits. */
56
57 static void
print_bits(unsigned64 x,int msbit,int digits,sim_fpu_print_func print,void * arg)58 print_bits (unsigned64 x,
59 int msbit,
60 int digits,
61 sim_fpu_print_func print,
62 void *arg)
63 {
64 unsigned64 bit = LSBIT64 (msbit);
65 int i = 4;
66 while (bit && digits)
67 {
68 if (i == 0)
69 print (arg, ",");
70
71 if ((x & bit))
72 print (arg, "1");
73 else
74 print (arg, "0");
75 bit >>= 1;
76
77 if (digits > 0) digits--;
78 i = (i + 1) % 4;
79 }
80 }
81
82
83
84 /* Quick and dirty conversion between a host double and host 64bit int */
85
86 typedef union {
87 double d;
88 unsigned64 i;
89 } sim_fpu_map;
90
91
92 /* A packed IEEE floating point number.
93
94 Form is <SIGN:1><BIASEDEXP:NR_EXPBITS><FRAC:NR_FRACBITS> for both
95 32 and 64 bit numbers. This number is interpreted as:
96
97 Normalized (0 < BIASEDEXP && BIASEDEXP < EXPMAX):
98 (sign ? '-' : '+') 1.<FRAC> x 2 ^ (BIASEDEXP - EXPBIAS)
99
100 Denormalized (0 == BIASEDEXP && FRAC != 0):
101 (sign ? "-" : "+") 0.<FRAC> x 2 ^ (- EXPBIAS)
102
103 Zero (0 == BIASEDEXP && FRAC == 0):
104 (sign ? "-" : "+") 0.0
105
106 Infinity (BIASEDEXP == EXPMAX && FRAC == 0):
107 (sign ? "-" : "+") "infinity"
108
109 SignalingNaN (BIASEDEXP == EXPMAX && FRAC > 0 && FRAC < QUIET_NAN):
110 SNaN.FRAC
111
112 QuietNaN (BIASEDEXP == EXPMAX && FRAC > 0 && FRAC > QUIET_NAN):
113 QNaN.FRAC
114
115 */
116
117 #define NR_EXPBITS (is_double ? 11 : 8)
118 #define NR_FRACBITS (is_double ? 52 : 23)
119 #define SIGNBIT (is_double ? MSBIT64 (0) : MSBIT64 (32))
120
121 #define EXPMAX32 (255)
122 #define EXMPAX64 (2047)
123 #define EXPMAX ((unsigned) (is_double ? EXMPAX64 : EXPMAX32))
124
125 #define EXPBIAS32 (127)
126 #define EXPBIAS64 (1023)
127 #define EXPBIAS (is_double ? EXPBIAS64 : EXPBIAS32)
128
129 #define QUIET_NAN LSBIT64 (NR_FRACBITS - 1)
130
131
132
133 /* An unpacked floating point number.
134
135 When unpacked, the fraction of both a 32 and 64 bit floating point
136 number is stored using the same format:
137
138 64 bit - <IMPLICIT_1:1><FRACBITS:52><GUARDS:8><PAD:00>
139 32 bit - <IMPLICIT_1:1><FRACBITS:23><GUARDS:7><PAD:30> */
140
141 #define NR_PAD32 (30)
142 #define NR_PAD64 (0)
143 #define NR_PAD (is_double ? NR_PAD64 : NR_PAD32)
144 #define PADMASK (is_double ? 0 : LSMASK64 (NR_PAD32 - 1, 0))
145
146 #define NR_GUARDS32 (7 + NR_PAD32)
147 #define NR_GUARDS64 (8 + NR_PAD64)
148 #define NR_GUARDS (is_double ? NR_GUARDS64 : NR_GUARDS32)
149 #define GUARDMASK LSMASK64 (NR_GUARDS - 1, 0)
150
151 #define GUARDMSB LSBIT64 (NR_GUARDS - 1)
152 #define GUARDLSB LSBIT64 (NR_PAD)
153 #define GUARDROUND LSMASK64 (NR_GUARDS - 2, 0)
154
155 #define NR_FRAC_GUARD (60)
156 #define IMPLICIT_1 LSBIT64 (NR_FRAC_GUARD)
157 #define IMPLICIT_2 LSBIT64 (NR_FRAC_GUARD + 1)
158 #define IMPLICIT_4 LSBIT64 (NR_FRAC_GUARD + 2)
159 #define NR_SPARE 2
160
161 #define FRAC32MASK LSMASK64 (63, NR_FRAC_GUARD - 32 + 1)
162
163 #define NORMAL_EXPMIN (-(EXPBIAS)+1)
164
165 #define NORMAL_EXPMAX32 (EXPBIAS32)
166 #define NORMAL_EXPMAX64 (EXPBIAS64)
167 #define NORMAL_EXPMAX (EXPBIAS)
168
169
170 /* Integer constants */
171
172 #define MAX_INT32 ((signed64) LSMASK64 (30, 0))
173 #define MAX_UINT32 LSMASK64 (31, 0)
174 #define MIN_INT32 ((signed64) LSMASK64 (63, 31))
175
176 #define MAX_INT64 ((signed64) LSMASK64 (62, 0))
177 #define MAX_UINT64 LSMASK64 (63, 0)
178 #define MIN_INT64 ((signed64) LSMASK64 (63, 63))
179
180 #define MAX_INT (is_64bit ? MAX_INT64 : MAX_INT32)
181 #define MIN_INT (is_64bit ? MIN_INT64 : MIN_INT32)
182 #define MAX_UINT (is_64bit ? MAX_UINT64 : MAX_UINT32)
183 #define NR_INTBITS (is_64bit ? 64 : 32)
184
185 /* Squeese an unpacked sim_fpu struct into a 32/64 bit integer */
186 STATIC_INLINE_SIM_FPU (unsigned64)
pack_fpu(const sim_fpu * src,int is_double)187 pack_fpu (const sim_fpu *src,
188 int is_double)
189 {
190 int sign;
191 unsigned64 exp;
192 unsigned64 fraction;
193 unsigned64 packed;
194
195 switch (src->class)
196 {
197 /* create a NaN */
198 case sim_fpu_class_qnan:
199 sign = src->sign;
200 exp = EXPMAX;
201 /* force fraction to correct class */
202 fraction = src->fraction;
203 fraction >>= NR_GUARDS;
204 fraction |= QUIET_NAN;
205 break;
206 case sim_fpu_class_snan:
207 sign = src->sign;
208 exp = EXPMAX;
209 /* force fraction to correct class */
210 fraction = src->fraction;
211 fraction >>= NR_GUARDS;
212 fraction &= ~QUIET_NAN;
213 break;
214 case sim_fpu_class_infinity:
215 sign = src->sign;
216 exp = EXPMAX;
217 fraction = 0;
218 break;
219 case sim_fpu_class_zero:
220 sign = src->sign;
221 exp = 0;
222 fraction = 0;
223 break;
224 case sim_fpu_class_number:
225 case sim_fpu_class_denorm:
226 ASSERT (src->fraction >= IMPLICIT_1);
227 ASSERT (src->fraction < IMPLICIT_2);
228 if (src->normal_exp < NORMAL_EXPMIN)
229 {
230 /* This number's exponent is too low to fit into the bits
231 available in the number We'll denormalize the number by
232 storing zero in the exponent and shift the fraction to
233 the right to make up for it. */
234 int nr_shift = NORMAL_EXPMIN - src->normal_exp;
235 if (nr_shift > NR_FRACBITS)
236 {
237 /* underflow, just make the number zero */
238 sign = src->sign;
239 exp = 0;
240 fraction = 0;
241 }
242 else
243 {
244 sign = src->sign;
245 exp = 0;
246 /* Shift by the value */
247 fraction = src->fraction;
248 fraction >>= NR_GUARDS;
249 fraction >>= nr_shift;
250 }
251 }
252 else if (src->normal_exp > NORMAL_EXPMAX)
253 {
254 /* Infinity */
255 sign = src->sign;
256 exp = EXPMAX;
257 fraction = 0;
258 }
259 else
260 {
261 exp = (src->normal_exp + EXPBIAS);
262 sign = src->sign;
263 fraction = src->fraction;
264 /* FIXME: Need to round according to WITH_SIM_FPU_ROUNDING
265 or some such */
266 /* Round to nearest: If the guard bits are the all zero, but
267 the first, then we're half way between two numbers,
268 choose the one which makes the lsb of the answer 0. */
269 if ((fraction & GUARDMASK) == GUARDMSB)
270 {
271 if ((fraction & (GUARDMSB << 1)))
272 fraction += (GUARDMSB << 1);
273 }
274 else
275 {
276 /* Add a one to the guards to force round to nearest */
277 fraction += GUARDROUND;
278 }
279 if ((fraction & IMPLICIT_2)) /* rounding resulted in carry */
280 {
281 exp += 1;
282 fraction >>= 1;
283 }
284 fraction >>= NR_GUARDS;
285 /* When exp == EXPMAX (overflow from carry) fraction must
286 have been made zero */
287 ASSERT ((exp == EXPMAX) <= ((fraction & ~IMPLICIT_1) == 0));
288 }
289 break;
290 default:
291 abort ();
292 }
293
294 packed = ((sign ? SIGNBIT : 0)
295 | (exp << NR_FRACBITS)
296 | LSMASKED64 (fraction, NR_FRACBITS - 1, 0));
297
298 /* trace operation */
299 #if 0
300 if (is_double)
301 {
302 }
303 else
304 {
305 printf ("pack_fpu: ");
306 printf ("-> %c%0lX.%06lX\n",
307 LSMASKED32 (packed, 31, 31) ? '8' : '0',
308 (long) LSEXTRACTED32 (packed, 30, 23),
309 (long) LSEXTRACTED32 (packed, 23 - 1, 0));
310 }
311 #endif
312
313 return packed;
314 }
315
316
317 /* Unpack a 32/64 bit integer into a sim_fpu structure */
318 STATIC_INLINE_SIM_FPU (void)
unpack_fpu(sim_fpu * dst,unsigned64 packed,int is_double)319 unpack_fpu (sim_fpu *dst, unsigned64 packed, int is_double)
320 {
321 unsigned64 fraction = LSMASKED64 (packed, NR_FRACBITS - 1, 0);
322 unsigned exp = LSEXTRACTED64 (packed, NR_EXPBITS + NR_FRACBITS - 1, NR_FRACBITS);
323 int sign = (packed & SIGNBIT) != 0;
324
325 if (exp == 0)
326 {
327 /* Hmm. Looks like 0 */
328 if (fraction == 0)
329 {
330 /* tastes like zero */
331 dst->class = sim_fpu_class_zero;
332 dst->sign = sign;
333 dst->normal_exp = 0;
334 }
335 else
336 {
337 /* Zero exponent with non zero fraction - it's denormalized,
338 so there isn't a leading implicit one - we'll shift it so
339 it gets one. */
340 dst->normal_exp = exp - EXPBIAS + 1;
341 dst->class = sim_fpu_class_denorm;
342 dst->sign = sign;
343 fraction <<= NR_GUARDS;
344 while (fraction < IMPLICIT_1)
345 {
346 fraction <<= 1;
347 dst->normal_exp--;
348 }
349 dst->fraction = fraction;
350 }
351 }
352 else if (exp == EXPMAX)
353 {
354 /* Huge exponent*/
355 if (fraction == 0)
356 {
357 /* Attached to a zero fraction - means infinity */
358 dst->class = sim_fpu_class_infinity;
359 dst->sign = sign;
360 /* dst->normal_exp = EXPBIAS; */
361 /* dst->fraction = 0; */
362 }
363 else
364 {
365 /* Non zero fraction, means NaN */
366 dst->sign = sign;
367 dst->fraction = (fraction << NR_GUARDS);
368 if (fraction >= QUIET_NAN)
369 dst->class = sim_fpu_class_qnan;
370 else
371 dst->class = sim_fpu_class_snan;
372 }
373 }
374 else
375 {
376 /* Nothing strange about this number */
377 dst->class = sim_fpu_class_number;
378 dst->sign = sign;
379 dst->fraction = ((fraction << NR_GUARDS) | IMPLICIT_1);
380 dst->normal_exp = exp - EXPBIAS;
381 }
382
383 /* trace operation */
384 #if 0
385 if (is_double)
386 {
387 }
388 else
389 {
390 printf ("unpack_fpu: %c%02lX.%06lX ->\n",
391 LSMASKED32 (packed, 31, 31) ? '8' : '0',
392 (long) LSEXTRACTED32 (packed, 30, 23),
393 (long) LSEXTRACTED32 (packed, 23 - 1, 0));
394 }
395 #endif
396
397 /* sanity checks */
398 {
399 sim_fpu_map val;
400 val.i = pack_fpu (dst, 1);
401 if (is_double)
402 {
403 ASSERT (val.i == packed);
404 }
405 else
406 {
407 unsigned32 val = pack_fpu (dst, 0);
408 unsigned32 org = packed;
409 ASSERT (val == org);
410 }
411 }
412 }
413
414
415 /* Convert a floating point into an integer */
416 STATIC_INLINE_SIM_FPU (int)
fpu2i(signed64 * i,const sim_fpu * s,int is_64bit,sim_fpu_round round)417 fpu2i (signed64 *i,
418 const sim_fpu *s,
419 int is_64bit,
420 sim_fpu_round round)
421 {
422 unsigned64 tmp;
423 int shift;
424 int status = 0;
425 if (sim_fpu_is_zero (s))
426 {
427 *i = 0;
428 return 0;
429 }
430 if (sim_fpu_is_snan (s))
431 {
432 *i = MIN_INT; /* FIXME */
433 return sim_fpu_status_invalid_cvi;
434 }
435 if (sim_fpu_is_qnan (s))
436 {
437 *i = MIN_INT; /* FIXME */
438 return sim_fpu_status_invalid_cvi;
439 }
440 /* map infinity onto MAX_INT... */
441 if (sim_fpu_is_infinity (s))
442 {
443 *i = s->sign ? MIN_INT : MAX_INT;
444 return sim_fpu_status_invalid_cvi;
445 }
446 /* it is a number, but a small one */
447 if (s->normal_exp < 0)
448 {
449 *i = 0;
450 return sim_fpu_status_inexact;
451 }
452 /* Is the floating point MIN_INT or just close? */
453 if (s->sign && s->normal_exp == (NR_INTBITS - 1))
454 {
455 *i = MIN_INT;
456 ASSERT (s->fraction >= IMPLICIT_1);
457 if (s->fraction == IMPLICIT_1)
458 return 0; /* exact */
459 if (is_64bit) /* can't round */
460 return sim_fpu_status_invalid_cvi; /* must be overflow */
461 /* For a 32bit with MAX_INT, rounding is possible */
462 switch (round)
463 {
464 case sim_fpu_round_default:
465 abort ();
466 case sim_fpu_round_zero:
467 if ((s->fraction & FRAC32MASK) != IMPLICIT_1)
468 return sim_fpu_status_invalid_cvi;
469 else
470 return sim_fpu_status_inexact;
471 break;
472 case sim_fpu_round_near:
473 {
474 if ((s->fraction & FRAC32MASK) != IMPLICIT_1)
475 return sim_fpu_status_invalid_cvi;
476 else if ((s->fraction & !FRAC32MASK) >= (~FRAC32MASK >> 1))
477 return sim_fpu_status_invalid_cvi;
478 else
479 return sim_fpu_status_inexact;
480 }
481 case sim_fpu_round_up:
482 if ((s->fraction & FRAC32MASK) == IMPLICIT_1)
483 return sim_fpu_status_inexact;
484 else
485 return sim_fpu_status_invalid_cvi;
486 case sim_fpu_round_down:
487 return sim_fpu_status_invalid_cvi;
488 }
489 }
490 /* Would right shifting result in the FRAC being shifted into
491 (through) the integer's sign bit? */
492 if (s->normal_exp > (NR_INTBITS - 2))
493 {
494 *i = s->sign ? MIN_INT : MAX_INT;
495 return sim_fpu_status_invalid_cvi;
496 }
497 /* normal number shift it into place */
498 tmp = s->fraction;
499 shift = (s->normal_exp - (NR_FRAC_GUARD));
500 if (shift > 0)
501 {
502 tmp <<= shift;
503 }
504 else
505 {
506 shift = -shift;
507 if (tmp & ((SIGNED64 (1) << shift) - 1))
508 status |= sim_fpu_status_inexact;
509 tmp >>= shift;
510 }
511 *i = s->sign ? (-tmp) : (tmp);
512 return status;
513 }
514
515 /* convert an integer into a floating point */
516 STATIC_INLINE_SIM_FPU (int)
i2fpu(sim_fpu * f,signed64 i,int is_64bit)517 i2fpu (sim_fpu *f, signed64 i, int is_64bit)
518 {
519 int status = 0;
520 if (i == 0)
521 {
522 f->class = sim_fpu_class_zero;
523 f->sign = 0;
524 f->normal_exp = 0;
525 }
526 else
527 {
528 f->class = sim_fpu_class_number;
529 f->sign = (i < 0);
530 f->normal_exp = NR_FRAC_GUARD;
531
532 if (f->sign)
533 {
534 /* Special case for minint, since there is no corresponding
535 +ve integer representation for it */
536 if (i == MIN_INT)
537 {
538 f->fraction = IMPLICIT_1;
539 f->normal_exp = NR_INTBITS - 1;
540 }
541 else
542 f->fraction = (-i);
543 }
544 else
545 f->fraction = i;
546
547 if (f->fraction >= IMPLICIT_2)
548 {
549 do
550 {
551 f->fraction = (f->fraction >> 1) | (f->fraction & 1);
552 f->normal_exp += 1;
553 }
554 while (f->fraction >= IMPLICIT_2);
555 }
556 else if (f->fraction < IMPLICIT_1)
557 {
558 do
559 {
560 f->fraction <<= 1;
561 f->normal_exp -= 1;
562 }
563 while (f->fraction < IMPLICIT_1);
564 }
565 }
566
567 /* trace operation */
568 #if 0
569 {
570 printf ("i2fpu: 0x%08lX ->\n", (long) i);
571 }
572 #endif
573
574 /* sanity check */
575 {
576 signed64 val;
577 fpu2i (&val, f, is_64bit, sim_fpu_round_zero);
578 if (i >= MIN_INT32 && i <= MAX_INT32)
579 {
580 ASSERT (val == i);
581 }
582 }
583
584 return status;
585 }
586
587
588 /* Convert a floating point into an integer */
589 STATIC_INLINE_SIM_FPU (int)
fpu2u(unsigned64 * u,const sim_fpu * s,int is_64bit)590 fpu2u (unsigned64 *u, const sim_fpu *s, int is_64bit)
591 {
592 const int is_double = 1;
593 unsigned64 tmp;
594 int shift;
595 if (sim_fpu_is_zero (s))
596 {
597 *u = 0;
598 return 0;
599 }
600 if (sim_fpu_is_nan (s))
601 {
602 *u = 0;
603 return 0;
604 }
605 /* it is a negative number */
606 if (s->sign)
607 {
608 *u = 0;
609 return 0;
610 }
611 /* get reasonable MAX_USI_INT... */
612 if (sim_fpu_is_infinity (s))
613 {
614 *u = MAX_UINT;
615 return 0;
616 }
617 /* it is a number, but a small one */
618 if (s->normal_exp < 0)
619 {
620 *u = 0;
621 return 0;
622 }
623 /* overflow */
624 if (s->normal_exp > (NR_INTBITS - 1))
625 {
626 *u = MAX_UINT;
627 return 0;
628 }
629 /* normal number */
630 tmp = (s->fraction & ~PADMASK);
631 shift = (s->normal_exp - (NR_FRACBITS + NR_GUARDS));
632 if (shift > 0)
633 {
634 tmp <<= shift;
635 }
636 else
637 {
638 shift = -shift;
639 tmp >>= shift;
640 }
641 *u = tmp;
642 return 0;
643 }
644
645 /* Convert an unsigned integer into a floating point */
646 STATIC_INLINE_SIM_FPU (int)
u2fpu(sim_fpu * f,unsigned64 u,int is_64bit)647 u2fpu (sim_fpu *f, unsigned64 u, int is_64bit)
648 {
649 if (u == 0)
650 {
651 f->class = sim_fpu_class_zero;
652 f->sign = 0;
653 f->normal_exp = 0;
654 }
655 else
656 {
657 f->class = sim_fpu_class_number;
658 f->sign = 0;
659 f->normal_exp = NR_FRAC_GUARD;
660 f->fraction = u;
661
662 while (f->fraction < IMPLICIT_1)
663 {
664 f->fraction <<= 1;
665 f->normal_exp -= 1;
666 }
667 }
668 return 0;
669 }
670
671
672 /* register <-> sim_fpu */
673
674 INLINE_SIM_FPU (void)
sim_fpu_32to(sim_fpu * f,unsigned32 s)675 sim_fpu_32to (sim_fpu *f, unsigned32 s)
676 {
677 unpack_fpu (f, s, 0);
678 }
679
680
681 INLINE_SIM_FPU (void)
sim_fpu_232to(sim_fpu * f,unsigned32 h,unsigned32 l)682 sim_fpu_232to (sim_fpu *f, unsigned32 h, unsigned32 l)
683 {
684 unsigned64 s = h;
685 s = (s << 32) | l;
686 unpack_fpu (f, s, 1);
687 }
688
689
690 INLINE_SIM_FPU (void)
sim_fpu_64to(sim_fpu * f,unsigned64 s)691 sim_fpu_64to (sim_fpu *f, unsigned64 s)
692 {
693 unpack_fpu (f, s, 1);
694 }
695
696
697 INLINE_SIM_FPU (void)
sim_fpu_to32(unsigned32 * s,const sim_fpu * f)698 sim_fpu_to32 (unsigned32 *s,
699 const sim_fpu *f)
700 {
701 *s = pack_fpu (f, 0);
702 }
703
704
705 INLINE_SIM_FPU (void)
sim_fpu_to232(unsigned32 * h,unsigned32 * l,const sim_fpu * f)706 sim_fpu_to232 (unsigned32 *h, unsigned32 *l,
707 const sim_fpu *f)
708 {
709 unsigned64 s = pack_fpu (f, 1);
710 *l = s;
711 *h = (s >> 32);
712 }
713
714
715 INLINE_SIM_FPU (void)
sim_fpu_to64(unsigned64 * u,const sim_fpu * f)716 sim_fpu_to64 (unsigned64 *u,
717 const sim_fpu *f)
718 {
719 *u = pack_fpu (f, 1);
720 }
721
722
723 INLINE_SIM_FPU (void)
sim_fpu_fractionto(sim_fpu * f,int sign,int normal_exp,unsigned64 fraction,int precision)724 sim_fpu_fractionto (sim_fpu *f,
725 int sign,
726 int normal_exp,
727 unsigned64 fraction,
728 int precision)
729 {
730 int shift = (NR_FRAC_GUARD - precision);
731 f->class = sim_fpu_class_number;
732 f->sign = sign;
733 f->normal_exp = normal_exp;
734 /* shift the fraction to where sim-fpu expects it */
735 if (shift >= 0)
736 f->fraction = (fraction << shift);
737 else
738 f->fraction = (fraction >> -shift);
739 f->fraction |= IMPLICIT_1;
740 }
741
742
743 INLINE_SIM_FPU (unsigned64)
sim_fpu_tofraction(const sim_fpu * d,int precision)744 sim_fpu_tofraction (const sim_fpu *d,
745 int precision)
746 {
747 /* we have NR_FRAC_GUARD bits, we want only PRECISION bits */
748 int shift = (NR_FRAC_GUARD - precision);
749 unsigned64 fraction = (d->fraction & ~IMPLICIT_1);
750 if (shift >= 0)
751 return fraction >> shift;
752 else
753 return fraction << -shift;
754 }
755
756
757 /* Rounding */
758
759 STATIC_INLINE_SIM_FPU (int)
do_normal_overflow(sim_fpu * f,int is_double,sim_fpu_round round)760 do_normal_overflow (sim_fpu *f,
761 int is_double,
762 sim_fpu_round round)
763 {
764 switch (round)
765 {
766 case sim_fpu_round_default:
767 return 0;
768 case sim_fpu_round_near:
769 f->class = sim_fpu_class_infinity;
770 break;
771 case sim_fpu_round_up:
772 if (!f->sign)
773 f->class = sim_fpu_class_infinity;
774 break;
775 case sim_fpu_round_down:
776 if (f->sign)
777 f->class = sim_fpu_class_infinity;
778 break;
779 case sim_fpu_round_zero:
780 break;
781 }
782 f->normal_exp = NORMAL_EXPMAX;
783 f->fraction = LSMASK64 (NR_FRAC_GUARD, NR_GUARDS);
784 return (sim_fpu_status_overflow | sim_fpu_status_inexact);
785 }
786
787 STATIC_INLINE_SIM_FPU (int)
do_normal_underflow(sim_fpu * f,int is_double,sim_fpu_round round)788 do_normal_underflow (sim_fpu *f,
789 int is_double,
790 sim_fpu_round round)
791 {
792 switch (round)
793 {
794 case sim_fpu_round_default:
795 return 0;
796 case sim_fpu_round_near:
797 f->class = sim_fpu_class_zero;
798 break;
799 case sim_fpu_round_up:
800 if (f->sign)
801 f->class = sim_fpu_class_zero;
802 break;
803 case sim_fpu_round_down:
804 if (!f->sign)
805 f->class = sim_fpu_class_zero;
806 break;
807 case sim_fpu_round_zero:
808 f->class = sim_fpu_class_zero;
809 break;
810 }
811 f->normal_exp = NORMAL_EXPMIN - NR_FRACBITS;
812 f->fraction = IMPLICIT_1;
813 return (sim_fpu_status_inexact | sim_fpu_status_underflow);
814 }
815
816
817
818 /* Round a number using NR_GUARDS.
819 Will return the rounded number or F->FRACTION == 0 when underflow */
820
821 STATIC_INLINE_SIM_FPU (int)
do_normal_round(sim_fpu * f,int nr_guards,sim_fpu_round round)822 do_normal_round (sim_fpu *f,
823 int nr_guards,
824 sim_fpu_round round)
825 {
826 unsigned64 guardmask = LSMASK64 (nr_guards - 1, 0);
827 unsigned64 guardmsb = LSBIT64 (nr_guards - 1);
828 unsigned64 fraclsb = guardmsb << 1;
829 if ((f->fraction & guardmask))
830 {
831 int status = sim_fpu_status_inexact;
832 switch (round)
833 {
834 case sim_fpu_round_default:
835 return 0;
836 case sim_fpu_round_near:
837 if ((f->fraction & guardmsb))
838 {
839 if ((f->fraction & fraclsb))
840 {
841 status |= sim_fpu_status_rounded;
842 }
843 else if ((f->fraction & (guardmask >> 1)))
844 {
845 status |= sim_fpu_status_rounded;
846 }
847 }
848 break;
849 case sim_fpu_round_up:
850 if (!f->sign)
851 status |= sim_fpu_status_rounded;
852 break;
853 case sim_fpu_round_down:
854 if (f->sign)
855 status |= sim_fpu_status_rounded;
856 break;
857 case sim_fpu_round_zero:
858 break;
859 }
860 f->fraction &= ~guardmask;
861 /* round if needed, handle resulting overflow */
862 if ((status & sim_fpu_status_rounded))
863 {
864 f->fraction += fraclsb;
865 if ((f->fraction & IMPLICIT_2))
866 {
867 f->fraction >>= 1;
868 f->normal_exp += 1;
869 }
870 }
871 return status;
872 }
873 else
874 return 0;
875 }
876
877
878 STATIC_INLINE_SIM_FPU (int)
do_round(sim_fpu * f,int is_double,sim_fpu_round round,sim_fpu_denorm denorm)879 do_round (sim_fpu *f,
880 int is_double,
881 sim_fpu_round round,
882 sim_fpu_denorm denorm)
883 {
884 switch (f->class)
885 {
886 case sim_fpu_class_qnan:
887 case sim_fpu_class_zero:
888 case sim_fpu_class_infinity:
889 return 0;
890 break;
891 case sim_fpu_class_snan:
892 /* Quieten a SignalingNaN */
893 f->class = sim_fpu_class_qnan;
894 return sim_fpu_status_invalid_snan;
895 break;
896 case sim_fpu_class_number:
897 case sim_fpu_class_denorm:
898 {
899 int status;
900 ASSERT (f->fraction < IMPLICIT_2);
901 ASSERT (f->fraction >= IMPLICIT_1);
902 if (f->normal_exp < NORMAL_EXPMIN)
903 {
904 /* This number's exponent is too low to fit into the bits
905 available in the number. Round off any bits that will be
906 discarded as a result of denormalization. Edge case is
907 the implicit bit shifted to GUARD0 and then rounded
908 up. */
909 int shift = NORMAL_EXPMIN - f->normal_exp;
910 if (shift + NR_GUARDS <= NR_FRAC_GUARD + 1
911 && !(denorm & sim_fpu_denorm_zero))
912 {
913 status = do_normal_round (f, shift + NR_GUARDS, round);
914 if (f->fraction == 0) /* rounding underflowed */
915 {
916 status |= do_normal_underflow (f, is_double, round);
917 }
918 else if (f->normal_exp < NORMAL_EXPMIN) /* still underflow? */
919 {
920 status |= sim_fpu_status_denorm;
921 /* Any loss of precision when denormalizing is
922 underflow. Some processors check for underflow
923 before rounding, some after! */
924 if (status & sim_fpu_status_inexact)
925 status |= sim_fpu_status_underflow;
926 /* Flag that resultant value has been denormalized */
927 f->class = sim_fpu_class_denorm;
928 }
929 else if ((denorm & sim_fpu_denorm_underflow_inexact))
930 {
931 if ((status & sim_fpu_status_inexact))
932 status |= sim_fpu_status_underflow;
933 }
934 }
935 else
936 {
937 status = do_normal_underflow (f, is_double, round);
938 }
939 }
940 else if (f->normal_exp > NORMAL_EXPMAX)
941 {
942 /* Infinity */
943 status = do_normal_overflow (f, is_double, round);
944 }
945 else
946 {
947 status = do_normal_round (f, NR_GUARDS, round);
948 if (f->fraction == 0)
949 /* f->class = sim_fpu_class_zero; */
950 status |= do_normal_underflow (f, is_double, round);
951 else if (f->normal_exp > NORMAL_EXPMAX)
952 /* oops! rounding caused overflow */
953 status |= do_normal_overflow (f, is_double, round);
954 }
955 ASSERT ((f->class == sim_fpu_class_number
956 || f->class == sim_fpu_class_denorm)
957 <= (f->fraction < IMPLICIT_2 && f->fraction >= IMPLICIT_1));
958 return status;
959 }
960 }
961 return 0;
962 }
963
964 INLINE_SIM_FPU (int)
sim_fpu_round_32(sim_fpu * f,sim_fpu_round round,sim_fpu_denorm denorm)965 sim_fpu_round_32 (sim_fpu *f,
966 sim_fpu_round round,
967 sim_fpu_denorm denorm)
968 {
969 return do_round (f, 0, round, denorm);
970 }
971
972 INLINE_SIM_FPU (int)
sim_fpu_round_64(sim_fpu * f,sim_fpu_round round,sim_fpu_denorm denorm)973 sim_fpu_round_64 (sim_fpu *f,
974 sim_fpu_round round,
975 sim_fpu_denorm denorm)
976 {
977 return do_round (f, 1, round, denorm);
978 }
979
980
981
982 /* Arithmetic ops */
983
984 INLINE_SIM_FPU (int)
sim_fpu_add(sim_fpu * f,const sim_fpu * l,const sim_fpu * r)985 sim_fpu_add (sim_fpu *f,
986 const sim_fpu *l,
987 const sim_fpu *r)
988 {
989 if (sim_fpu_is_snan (l))
990 {
991 *f = *l;
992 f->class = sim_fpu_class_qnan;
993 return sim_fpu_status_invalid_snan;
994 }
995 if (sim_fpu_is_snan (r))
996 {
997 *f = *r;
998 f->class = sim_fpu_class_qnan;
999 return sim_fpu_status_invalid_snan;
1000 }
1001 if (sim_fpu_is_qnan (l))
1002 {
1003 *f = *l;
1004 return 0;
1005 }
1006 if (sim_fpu_is_qnan (r))
1007 {
1008 *f = *r;
1009 return 0;
1010 }
1011 if (sim_fpu_is_infinity (l))
1012 {
1013 if (sim_fpu_is_infinity (r)
1014 && l->sign != r->sign)
1015 {
1016 *f = sim_fpu_qnan;
1017 return sim_fpu_status_invalid_isi;
1018 }
1019 *f = *l;
1020 return 0;
1021 }
1022 if (sim_fpu_is_infinity (r))
1023 {
1024 *f = *r;
1025 return 0;
1026 }
1027 if (sim_fpu_is_zero (l))
1028 {
1029 if (sim_fpu_is_zero (r))
1030 {
1031 *f = sim_fpu_zero;
1032 f->sign = l->sign & r->sign;
1033 }
1034 else
1035 *f = *r;
1036 return 0;
1037 }
1038 if (sim_fpu_is_zero (r))
1039 {
1040 *f = *l;
1041 return 0;
1042 }
1043 {
1044 int status = 0;
1045 int shift = l->normal_exp - r->normal_exp;
1046 unsigned64 lfraction;
1047 unsigned64 rfraction;
1048 /* use exp of larger */
1049 if (shift >= NR_FRAC_GUARD)
1050 {
1051 /* left has much bigger magnitute */
1052 *f = *l;
1053 return sim_fpu_status_inexact;
1054 }
1055 if (shift <= - NR_FRAC_GUARD)
1056 {
1057 /* right has much bigger magnitute */
1058 *f = *r;
1059 return sim_fpu_status_inexact;
1060 }
1061 lfraction = l->fraction;
1062 rfraction = r->fraction;
1063 if (shift > 0)
1064 {
1065 f->normal_exp = l->normal_exp;
1066 if (rfraction & LSMASK64 (shift - 1, 0))
1067 {
1068 status |= sim_fpu_status_inexact;
1069 rfraction |= LSBIT64 (shift); /* stick LSBit */
1070 }
1071 rfraction >>= shift;
1072 }
1073 else if (shift < 0)
1074 {
1075 f->normal_exp = r->normal_exp;
1076 if (lfraction & LSMASK64 (- shift - 1, 0))
1077 {
1078 status |= sim_fpu_status_inexact;
1079 lfraction |= LSBIT64 (- shift); /* stick LSBit */
1080 }
1081 lfraction >>= -shift;
1082 }
1083 else
1084 {
1085 f->normal_exp = r->normal_exp;
1086 }
1087
1088 /* perform the addition */
1089 if (l->sign)
1090 lfraction = - lfraction;
1091 if (r->sign)
1092 rfraction = - rfraction;
1093 f->fraction = lfraction + rfraction;
1094
1095 /* zero? */
1096 if (f->fraction == 0)
1097 {
1098 *f = sim_fpu_zero;
1099 return 0;
1100 }
1101
1102 /* sign? */
1103 f->class = sim_fpu_class_number;
1104 if ((signed64) f->fraction >= 0)
1105 f->sign = 0;
1106 else
1107 {
1108 f->sign = 1;
1109 f->fraction = - f->fraction;
1110 }
1111
1112 /* normalize it */
1113 if ((f->fraction & IMPLICIT_2))
1114 {
1115 f->fraction = (f->fraction >> 1) | (f->fraction & 1);
1116 f->normal_exp ++;
1117 }
1118 else if (f->fraction < IMPLICIT_1)
1119 {
1120 do
1121 {
1122 f->fraction <<= 1;
1123 f->normal_exp --;
1124 }
1125 while (f->fraction < IMPLICIT_1);
1126 }
1127 ASSERT (f->fraction >= IMPLICIT_1 && f->fraction < IMPLICIT_2);
1128 return status;
1129 }
1130 }
1131
1132
1133 INLINE_SIM_FPU (int)
sim_fpu_sub(sim_fpu * f,const sim_fpu * l,const sim_fpu * r)1134 sim_fpu_sub (sim_fpu *f,
1135 const sim_fpu *l,
1136 const sim_fpu *r)
1137 {
1138 if (sim_fpu_is_snan (l))
1139 {
1140 *f = *l;
1141 f->class = sim_fpu_class_qnan;
1142 return sim_fpu_status_invalid_snan;
1143 }
1144 if (sim_fpu_is_snan (r))
1145 {
1146 *f = *r;
1147 f->class = sim_fpu_class_qnan;
1148 return sim_fpu_status_invalid_snan;
1149 }
1150 if (sim_fpu_is_qnan (l))
1151 {
1152 *f = *l;
1153 return 0;
1154 }
1155 if (sim_fpu_is_qnan (r))
1156 {
1157 *f = *r;
1158 return 0;
1159 }
1160 if (sim_fpu_is_infinity (l))
1161 {
1162 if (sim_fpu_is_infinity (r)
1163 && l->sign == r->sign)
1164 {
1165 *f = sim_fpu_qnan;
1166 return sim_fpu_status_invalid_isi;
1167 }
1168 *f = *l;
1169 return 0;
1170 }
1171 if (sim_fpu_is_infinity (r))
1172 {
1173 *f = *r;
1174 f->sign = !r->sign;
1175 return 0;
1176 }
1177 if (sim_fpu_is_zero (l))
1178 {
1179 if (sim_fpu_is_zero (r))
1180 {
1181 *f = sim_fpu_zero;
1182 f->sign = l->sign & !r->sign;
1183 }
1184 else
1185 {
1186 *f = *r;
1187 f->sign = !r->sign;
1188 }
1189 return 0;
1190 }
1191 if (sim_fpu_is_zero (r))
1192 {
1193 *f = *l;
1194 return 0;
1195 }
1196 {
1197 int status = 0;
1198 int shift = l->normal_exp - r->normal_exp;
1199 unsigned64 lfraction;
1200 unsigned64 rfraction;
1201 /* use exp of larger */
1202 if (shift >= NR_FRAC_GUARD)
1203 {
1204 /* left has much bigger magnitute */
1205 *f = *l;
1206 return sim_fpu_status_inexact;
1207 }
1208 if (shift <= - NR_FRAC_GUARD)
1209 {
1210 /* right has much bigger magnitute */
1211 *f = *r;
1212 f->sign = !r->sign;
1213 return sim_fpu_status_inexact;
1214 }
1215 lfraction = l->fraction;
1216 rfraction = r->fraction;
1217 if (shift > 0)
1218 {
1219 f->normal_exp = l->normal_exp;
1220 if (rfraction & LSMASK64 (shift - 1, 0))
1221 {
1222 status |= sim_fpu_status_inexact;
1223 rfraction |= LSBIT64 (shift); /* stick LSBit */
1224 }
1225 rfraction >>= shift;
1226 }
1227 else if (shift < 0)
1228 {
1229 f->normal_exp = r->normal_exp;
1230 if (lfraction & LSMASK64 (- shift - 1, 0))
1231 {
1232 status |= sim_fpu_status_inexact;
1233 lfraction |= LSBIT64 (- shift); /* stick LSBit */
1234 }
1235 lfraction >>= -shift;
1236 }
1237 else
1238 {
1239 f->normal_exp = r->normal_exp;
1240 }
1241
1242 /* perform the subtraction */
1243 if (l->sign)
1244 lfraction = - lfraction;
1245 if (!r->sign)
1246 rfraction = - rfraction;
1247 f->fraction = lfraction + rfraction;
1248
1249 /* zero? */
1250 if (f->fraction == 0)
1251 {
1252 *f = sim_fpu_zero;
1253 return 0;
1254 }
1255
1256 /* sign? */
1257 f->class = sim_fpu_class_number;
1258 if ((signed64) f->fraction >= 0)
1259 f->sign = 0;
1260 else
1261 {
1262 f->sign = 1;
1263 f->fraction = - f->fraction;
1264 }
1265
1266 /* normalize it */
1267 if ((f->fraction & IMPLICIT_2))
1268 {
1269 f->fraction = (f->fraction >> 1) | (f->fraction & 1);
1270 f->normal_exp ++;
1271 }
1272 else if (f->fraction < IMPLICIT_1)
1273 {
1274 do
1275 {
1276 f->fraction <<= 1;
1277 f->normal_exp --;
1278 }
1279 while (f->fraction < IMPLICIT_1);
1280 }
1281 ASSERT (f->fraction >= IMPLICIT_1 && f->fraction < IMPLICIT_2);
1282 return status;
1283 }
1284 }
1285
1286
1287 INLINE_SIM_FPU (int)
sim_fpu_mul(sim_fpu * f,const sim_fpu * l,const sim_fpu * r)1288 sim_fpu_mul (sim_fpu *f,
1289 const sim_fpu *l,
1290 const sim_fpu *r)
1291 {
1292 if (sim_fpu_is_snan (l))
1293 {
1294 *f = *l;
1295 f->class = sim_fpu_class_qnan;
1296 return sim_fpu_status_invalid_snan;
1297 }
1298 if (sim_fpu_is_snan (r))
1299 {
1300 *f = *r;
1301 f->class = sim_fpu_class_qnan;
1302 return sim_fpu_status_invalid_snan;
1303 }
1304 if (sim_fpu_is_qnan (l))
1305 {
1306 *f = *l;
1307 return 0;
1308 }
1309 if (sim_fpu_is_qnan (r))
1310 {
1311 *f = *r;
1312 return 0;
1313 }
1314 if (sim_fpu_is_infinity (l))
1315 {
1316 if (sim_fpu_is_zero (r))
1317 {
1318 *f = sim_fpu_qnan;
1319 return sim_fpu_status_invalid_imz;
1320 }
1321 *f = *l;
1322 f->sign = l->sign ^ r->sign;
1323 return 0;
1324 }
1325 if (sim_fpu_is_infinity (r))
1326 {
1327 if (sim_fpu_is_zero (l))
1328 {
1329 *f = sim_fpu_qnan;
1330 return sim_fpu_status_invalid_imz;
1331 }
1332 *f = *r;
1333 f->sign = l->sign ^ r->sign;
1334 return 0;
1335 }
1336 if (sim_fpu_is_zero (l) || sim_fpu_is_zero (r))
1337 {
1338 *f = sim_fpu_zero;
1339 f->sign = l->sign ^ r->sign;
1340 return 0;
1341 }
1342 /* Calculate the mantissa by multiplying both 64bit numbers to get a
1343 128 bit number */
1344 {
1345 unsigned64 low;
1346 unsigned64 high;
1347 unsigned64 nl = l->fraction & 0xffffffff;
1348 unsigned64 nh = l->fraction >> 32;
1349 unsigned64 ml = r->fraction & 0xffffffff;
1350 unsigned64 mh = r->fraction >>32;
1351 unsigned64 pp_ll = ml * nl;
1352 unsigned64 pp_hl = mh * nl;
1353 unsigned64 pp_lh = ml * nh;
1354 unsigned64 pp_hh = mh * nh;
1355 unsigned64 res2 = 0;
1356 unsigned64 res0 = 0;
1357 unsigned64 ps_hh__ = pp_hl + pp_lh;
1358 if (ps_hh__ < pp_hl)
1359 res2 += UNSIGNED64 (0x100000000);
1360 pp_hl = (ps_hh__ << 32) & UNSIGNED64 (0xffffffff00000000);
1361 res0 = pp_ll + pp_hl;
1362 if (res0 < pp_ll)
1363 res2++;
1364 res2 += ((ps_hh__ >> 32) & 0xffffffff) + pp_hh;
1365 high = res2;
1366 low = res0;
1367
1368 f->normal_exp = l->normal_exp + r->normal_exp;
1369 f->sign = l->sign ^ r->sign;
1370 f->class = sim_fpu_class_number;
1371
1372 /* Input is bounded by [1,2) ; [2^60,2^61)
1373 Output is bounded by [1,4) ; [2^120,2^122) */
1374
1375 /* Adjust the exponent according to where the decimal point ended
1376 up in the high 64 bit word. In the source the decimal point
1377 was at NR_FRAC_GUARD. */
1378 f->normal_exp += NR_FRAC_GUARD + 64 - (NR_FRAC_GUARD * 2);
1379
1380 /* The high word is bounded according to the above. Consequently
1381 it has never overflowed into IMPLICIT_2. */
1382 ASSERT (high < LSBIT64 (((NR_FRAC_GUARD + 1) * 2) - 64));
1383 ASSERT (high >= LSBIT64 ((NR_FRAC_GUARD * 2) - 64));
1384 ASSERT (LSBIT64 (((NR_FRAC_GUARD + 1) * 2) - 64) < IMPLICIT_1);
1385
1386 /* normalize */
1387 do
1388 {
1389 f->normal_exp--;
1390 high <<= 1;
1391 if (low & LSBIT64 (63))
1392 high |= 1;
1393 low <<= 1;
1394 }
1395 while (high < IMPLICIT_1);
1396
1397 ASSERT (high >= IMPLICIT_1 && high < IMPLICIT_2);
1398 if (low != 0)
1399 {
1400 f->fraction = (high | 1); /* sticky */
1401 return sim_fpu_status_inexact;
1402 }
1403 else
1404 {
1405 f->fraction = high;
1406 return 0;
1407 }
1408 return 0;
1409 }
1410 }
1411
1412 INLINE_SIM_FPU (int)
sim_fpu_div(sim_fpu * f,const sim_fpu * l,const sim_fpu * r)1413 sim_fpu_div (sim_fpu *f,
1414 const sim_fpu *l,
1415 const sim_fpu *r)
1416 {
1417 if (sim_fpu_is_snan (l))
1418 {
1419 *f = *l;
1420 f->class = sim_fpu_class_qnan;
1421 return sim_fpu_status_invalid_snan;
1422 }
1423 if (sim_fpu_is_snan (r))
1424 {
1425 *f = *r;
1426 f->class = sim_fpu_class_qnan;
1427 return sim_fpu_status_invalid_snan;
1428 }
1429 if (sim_fpu_is_qnan (l))
1430 {
1431 *f = *l;
1432 f->class = sim_fpu_class_qnan;
1433 return 0;
1434 }
1435 if (sim_fpu_is_qnan (r))
1436 {
1437 *f = *r;
1438 f->class = sim_fpu_class_qnan;
1439 return 0;
1440 }
1441 if (sim_fpu_is_infinity (l))
1442 {
1443 if (sim_fpu_is_infinity (r))
1444 {
1445 *f = sim_fpu_qnan;
1446 return sim_fpu_status_invalid_idi;
1447 }
1448 else
1449 {
1450 *f = *l;
1451 f->sign = l->sign ^ r->sign;
1452 return 0;
1453 }
1454 }
1455 if (sim_fpu_is_zero (l))
1456 {
1457 if (sim_fpu_is_zero (r))
1458 {
1459 *f = sim_fpu_qnan;
1460 return sim_fpu_status_invalid_zdz;
1461 }
1462 else
1463 {
1464 *f = *l;
1465 f->sign = l->sign ^ r->sign;
1466 return 0;
1467 }
1468 }
1469 if (sim_fpu_is_infinity (r))
1470 {
1471 *f = sim_fpu_zero;
1472 f->sign = l->sign ^ r->sign;
1473 return 0;
1474 }
1475 if (sim_fpu_is_zero (r))
1476 {
1477 f->class = sim_fpu_class_infinity;
1478 f->sign = l->sign ^ r->sign;
1479 return sim_fpu_status_invalid_div0;
1480 }
1481
1482 /* Calculate the mantissa by multiplying both 64bit numbers to get a
1483 128 bit number */
1484 {
1485 /* quotient = ( ( numerator / denominator)
1486 x 2^(numerator exponent - denominator exponent)
1487 */
1488 unsigned64 numerator;
1489 unsigned64 denominator;
1490 unsigned64 quotient;
1491 unsigned64 bit;
1492
1493 f->class = sim_fpu_class_number;
1494 f->sign = l->sign ^ r->sign;
1495 f->normal_exp = l->normal_exp - r->normal_exp;
1496
1497 numerator = l->fraction;
1498 denominator = r->fraction;
1499
1500 /* Fraction will be less than 1.0 */
1501 if (numerator < denominator)
1502 {
1503 numerator <<= 1;
1504 f->normal_exp--;
1505 }
1506 ASSERT (numerator >= denominator);
1507
1508 /* Gain extra precision, already used one spare bit */
1509 numerator <<= NR_SPARE;
1510 denominator <<= NR_SPARE;
1511
1512 /* Does divide one bit at a time. Optimize??? */
1513 quotient = 0;
1514 bit = (IMPLICIT_1 << NR_SPARE);
1515 while (bit)
1516 {
1517 if (numerator >= denominator)
1518 {
1519 quotient |= bit;
1520 numerator -= denominator;
1521 }
1522 bit >>= 1;
1523 numerator <<= 1;
1524 }
1525
1526 /* discard (but save) the extra bits */
1527 if ((quotient & LSMASK64 (NR_SPARE -1, 0)))
1528 quotient = (quotient >> NR_SPARE) | 1;
1529 else
1530 quotient = (quotient >> NR_SPARE);
1531
1532 f->fraction = quotient;
1533 ASSERT (f->fraction >= IMPLICIT_1 && f->fraction < IMPLICIT_2);
1534 if (numerator != 0)
1535 {
1536 f->fraction |= 1; /* stick remaining bits */
1537 return sim_fpu_status_inexact;
1538 }
1539 else
1540 return 0;
1541 }
1542 }
1543
1544
1545 INLINE_SIM_FPU (int)
sim_fpu_max(sim_fpu * f,const sim_fpu * l,const sim_fpu * r)1546 sim_fpu_max (sim_fpu *f,
1547 const sim_fpu *l,
1548 const sim_fpu *r)
1549 {
1550 if (sim_fpu_is_snan (l))
1551 {
1552 *f = *l;
1553 f->class = sim_fpu_class_qnan;
1554 return sim_fpu_status_invalid_snan;
1555 }
1556 if (sim_fpu_is_snan (r))
1557 {
1558 *f = *r;
1559 f->class = sim_fpu_class_qnan;
1560 return sim_fpu_status_invalid_snan;
1561 }
1562 if (sim_fpu_is_qnan (l))
1563 {
1564 *f = *l;
1565 return 0;
1566 }
1567 if (sim_fpu_is_qnan (r))
1568 {
1569 *f = *r;
1570 return 0;
1571 }
1572 if (sim_fpu_is_infinity (l))
1573 {
1574 if (sim_fpu_is_infinity (r)
1575 && l->sign == r->sign)
1576 {
1577 *f = sim_fpu_qnan;
1578 return sim_fpu_status_invalid_isi;
1579 }
1580 if (l->sign)
1581 *f = *r; /* -inf < anything */
1582 else
1583 *f = *l; /* +inf > anthing */
1584 return 0;
1585 }
1586 if (sim_fpu_is_infinity (r))
1587 {
1588 if (r->sign)
1589 *f = *l; /* anything > -inf */
1590 else
1591 *f = *r; /* anthing < +inf */
1592 return 0;
1593 }
1594 if (l->sign > r->sign)
1595 {
1596 *f = *r; /* -ve < +ve */
1597 return 0;
1598 }
1599 if (l->sign < r->sign)
1600 {
1601 *f = *l; /* +ve > -ve */
1602 return 0;
1603 }
1604 ASSERT (l->sign == r->sign);
1605 if (l->normal_exp > r->normal_exp
1606 || (l->normal_exp == r->normal_exp &&
1607 l->fraction > r->fraction))
1608 {
1609 /* |l| > |r| */
1610 if (l->sign)
1611 *f = *r; /* -ve < -ve */
1612 else
1613 *f = *l; /* +ve > +ve */
1614 return 0;
1615 }
1616 else
1617 {
1618 /* |l| <= |r| */
1619 if (l->sign)
1620 *f = *l; /* -ve > -ve */
1621 else
1622 *f = *r; /* +ve < +ve */
1623 return 0;
1624 }
1625 }
1626
1627
1628 INLINE_SIM_FPU (int)
sim_fpu_min(sim_fpu * f,const sim_fpu * l,const sim_fpu * r)1629 sim_fpu_min (sim_fpu *f,
1630 const sim_fpu *l,
1631 const sim_fpu *r)
1632 {
1633 if (sim_fpu_is_snan (l))
1634 {
1635 *f = *l;
1636 f->class = sim_fpu_class_qnan;
1637 return sim_fpu_status_invalid_snan;
1638 }
1639 if (sim_fpu_is_snan (r))
1640 {
1641 *f = *r;
1642 f->class = sim_fpu_class_qnan;
1643 return sim_fpu_status_invalid_snan;
1644 }
1645 if (sim_fpu_is_qnan (l))
1646 {
1647 *f = *l;
1648 return 0;
1649 }
1650 if (sim_fpu_is_qnan (r))
1651 {
1652 *f = *r;
1653 return 0;
1654 }
1655 if (sim_fpu_is_infinity (l))
1656 {
1657 if (sim_fpu_is_infinity (r)
1658 && l->sign == r->sign)
1659 {
1660 *f = sim_fpu_qnan;
1661 return sim_fpu_status_invalid_isi;
1662 }
1663 if (l->sign)
1664 *f = *l; /* -inf < anything */
1665 else
1666 *f = *r; /* +inf > anthing */
1667 return 0;
1668 }
1669 if (sim_fpu_is_infinity (r))
1670 {
1671 if (r->sign)
1672 *f = *r; /* anything > -inf */
1673 else
1674 *f = *l; /* anything < +inf */
1675 return 0;
1676 }
1677 if (l->sign > r->sign)
1678 {
1679 *f = *l; /* -ve < +ve */
1680 return 0;
1681 }
1682 if (l->sign < r->sign)
1683 {
1684 *f = *r; /* +ve > -ve */
1685 return 0;
1686 }
1687 ASSERT (l->sign == r->sign);
1688 if (l->normal_exp > r->normal_exp
1689 || (l->normal_exp == r->normal_exp &&
1690 l->fraction > r->fraction))
1691 {
1692 /* |l| > |r| */
1693 if (l->sign)
1694 *f = *l; /* -ve < -ve */
1695 else
1696 *f = *r; /* +ve > +ve */
1697 return 0;
1698 }
1699 else
1700 {
1701 /* |l| <= |r| */
1702 if (l->sign)
1703 *f = *r; /* -ve > -ve */
1704 else
1705 *f = *l; /* +ve < +ve */
1706 return 0;
1707 }
1708 }
1709
1710
1711 INLINE_SIM_FPU (int)
sim_fpu_neg(sim_fpu * f,const sim_fpu * r)1712 sim_fpu_neg (sim_fpu *f,
1713 const sim_fpu *r)
1714 {
1715 if (sim_fpu_is_snan (r))
1716 {
1717 *f = *r;
1718 f->class = sim_fpu_class_qnan;
1719 return sim_fpu_status_invalid_snan;
1720 }
1721 if (sim_fpu_is_qnan (r))
1722 {
1723 *f = *r;
1724 return 0;
1725 }
1726 *f = *r;
1727 f->sign = !r->sign;
1728 return 0;
1729 }
1730
1731
1732 INLINE_SIM_FPU (int)
sim_fpu_abs(sim_fpu * f,const sim_fpu * r)1733 sim_fpu_abs (sim_fpu *f,
1734 const sim_fpu *r)
1735 {
1736 if (sim_fpu_is_snan (r))
1737 {
1738 *f = *r;
1739 f->class = sim_fpu_class_qnan;
1740 return sim_fpu_status_invalid_snan;
1741 }
1742 if (sim_fpu_is_qnan (r))
1743 {
1744 *f = *r;
1745 return 0;
1746 }
1747 *f = *r;
1748 f->sign = 0;
1749 return 0;
1750 }
1751
1752
1753 INLINE_SIM_FPU (int)
sim_fpu_inv(sim_fpu * f,const sim_fpu * r)1754 sim_fpu_inv (sim_fpu *f,
1755 const sim_fpu *r)
1756 {
1757 return sim_fpu_div (f, &sim_fpu_one, r);
1758 }
1759
1760
1761 INLINE_SIM_FPU (int)
sim_fpu_sqrt(sim_fpu * f,const sim_fpu * r)1762 sim_fpu_sqrt (sim_fpu *f,
1763 const sim_fpu *r)
1764 {
1765 if (sim_fpu_is_snan (r))
1766 {
1767 *f = sim_fpu_qnan;
1768 return sim_fpu_status_invalid_snan;
1769 }
1770 if (sim_fpu_is_qnan (r))
1771 {
1772 *f = sim_fpu_qnan;
1773 return 0;
1774 }
1775 if (sim_fpu_is_zero (r))
1776 {
1777 f->class = sim_fpu_class_zero;
1778 f->sign = r->sign;
1779 f->normal_exp = 0;
1780 return 0;
1781 }
1782 if (sim_fpu_is_infinity (r))
1783 {
1784 if (r->sign)
1785 {
1786 *f = sim_fpu_qnan;
1787 return sim_fpu_status_invalid_sqrt;
1788 }
1789 else
1790 {
1791 f->class = sim_fpu_class_infinity;
1792 f->sign = 0;
1793 f->sign = 0;
1794 return 0;
1795 }
1796 }
1797 if (r->sign)
1798 {
1799 *f = sim_fpu_qnan;
1800 return sim_fpu_status_invalid_sqrt;
1801 }
1802
1803 /* @(#)e_sqrt.c 5.1 93/09/24 */
1804 /*
1805 * ====================================================
1806 * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
1807 *
1808 * Developed at SunPro, a Sun Microsystems, Inc. business.
1809 * Permission to use, copy, modify, and distribute this
1810 * software is freely granted, provided that this notice
1811 * is preserved.
1812 * ====================================================
1813 */
1814
1815 /* __ieee754_sqrt(x)
1816 * Return correctly rounded sqrt.
1817 * ------------------------------------------
1818 * | Use the hardware sqrt if you have one |
1819 * ------------------------------------------
1820 * Method:
1821 * Bit by bit method using integer arithmetic. (Slow, but portable)
1822 * 1. Normalization
1823 * Scale x to y in [1,4) with even powers of 2:
1824 * find an integer k such that 1 <= (y=x*2^(2k)) < 4, then
1825 * sqrt(x) = 2^k * sqrt(y)
1826 -
1827 - Since:
1828 - sqrt ( x*2^(2m) ) = sqrt(x).2^m ; m even
1829 - sqrt ( x*2^(2m + 1) ) = sqrt(2.x).2^m ; m odd
1830 - Define:
1831 - y = ((m even) ? x : 2.x)
1832 - Then:
1833 - y in [1, 4) ; [IMPLICIT_1,IMPLICIT_4)
1834 - And:
1835 - sqrt (y) in [1, 2) ; [IMPLICIT_1,IMPLICIT_2)
1836 -
1837 * 2. Bit by bit computation
1838 * Let q = sqrt(y) truncated to i bit after binary point (q = 1),
1839 * i 0
1840 * i+1 2
1841 * s = 2*q , and y = 2 * ( y - q ). (1)
1842 * i i i i
1843 *
1844 * To compute q from q , one checks whether
1845 * i+1 i
1846 *
1847 * -(i+1) 2
1848 * (q + 2 ) <= y. (2)
1849 * i
1850 * -(i+1)
1851 * If (2) is false, then q = q ; otherwise q = q + 2 .
1852 * i+1 i i+1 i
1853 *
1854 * With some algebric manipulation, it is not difficult to see
1855 * that (2) is equivalent to
1856 * -(i+1)
1857 * s + 2 <= y (3)
1858 * i i
1859 *
1860 * The advantage of (3) is that s and y can be computed by
1861 * i i
1862 * the following recurrence formula:
1863 * if (3) is false
1864 *
1865 * s = s , y = y ; (4)
1866 * i+1 i i+1 i
1867 *
1868 -
1869 - NOTE: y = 2*y
1870 - i+1 i
1871 -
1872 * otherwise,
1873 * -i -(i+1)
1874 * s = s + 2 , y = y - s - 2 (5)
1875 * i+1 i i+1 i i
1876 *
1877 -
1878 - -(i+1)
1879 - NOTE: y = 2 (y - s - 2 )
1880 - i+1 i i
1881 -
1882 * One may easily use induction to prove (4) and (5).
1883 * Note. Since the left hand side of (3) contain only i+2 bits,
1884 * it does not necessary to do a full (53-bit) comparison
1885 * in (3).
1886 * 3. Final rounding
1887 * After generating the 53 bits result, we compute one more bit.
1888 * Together with the remainder, we can decide whether the
1889 * result is exact, bigger than 1/2ulp, or less than 1/2ulp
1890 * (it will never equal to 1/2ulp).
1891 * The rounding mode can be detected by checking whether
1892 * huge + tiny is equal to huge, and whether huge - tiny is
1893 * equal to huge for some floating point number "huge" and "tiny".
1894 *
1895 * Special cases:
1896 * sqrt(+-0) = +-0 ... exact
1897 * sqrt(inf) = inf
1898 * sqrt(-ve) = NaN ... with invalid signal
1899 * sqrt(NaN) = NaN ... with invalid signal for signaling NaN
1900 *
1901 * Other methods : see the appended file at the end of the program below.
1902 *---------------
1903 */
1904
1905 {
1906 /* generate sqrt(x) bit by bit */
1907 unsigned64 y;
1908 unsigned64 q;
1909 unsigned64 s;
1910 unsigned64 b;
1911
1912 f->class = sim_fpu_class_number;
1913 f->sign = 0;
1914 y = r->fraction;
1915 f->normal_exp = (r->normal_exp >> 1); /* exp = [exp/2] */
1916
1917 /* odd exp, double x to make it even */
1918 ASSERT (y >= IMPLICIT_1 && y < IMPLICIT_4);
1919 if ((r->normal_exp & 1))
1920 {
1921 y += y;
1922 }
1923 ASSERT (y >= IMPLICIT_1 && y < (IMPLICIT_2 << 1));
1924
1925 /* Let loop determine first value of s (either 1 or 2) */
1926 b = IMPLICIT_1;
1927 q = 0;
1928 s = 0;
1929
1930 while (b)
1931 {
1932 unsigned64 t = s + b;
1933 if (t <= y)
1934 {
1935 s |= (b << 1);
1936 y -= t;
1937 q |= b;
1938 }
1939 y <<= 1;
1940 b >>= 1;
1941 }
1942
1943 ASSERT (q >= IMPLICIT_1 && q < IMPLICIT_2);
1944 f->fraction = q;
1945 if (y != 0)
1946 {
1947 f->fraction |= 1; /* stick remaining bits */
1948 return sim_fpu_status_inexact;
1949 }
1950 else
1951 return 0;
1952 }
1953 }
1954
1955
1956 /* int/long <-> sim_fpu */
1957
1958 INLINE_SIM_FPU (int)
sim_fpu_i32to(sim_fpu * f,signed32 i,sim_fpu_round round)1959 sim_fpu_i32to (sim_fpu *f,
1960 signed32 i,
1961 sim_fpu_round round)
1962 {
1963 i2fpu (f, i, 0);
1964 return 0;
1965 }
1966
1967 INLINE_SIM_FPU (int)
sim_fpu_u32to(sim_fpu * f,unsigned32 u,sim_fpu_round round)1968 sim_fpu_u32to (sim_fpu *f,
1969 unsigned32 u,
1970 sim_fpu_round round)
1971 {
1972 u2fpu (f, u, 0);
1973 return 0;
1974 }
1975
1976 INLINE_SIM_FPU (int)
sim_fpu_i64to(sim_fpu * f,signed64 i,sim_fpu_round round)1977 sim_fpu_i64to (sim_fpu *f,
1978 signed64 i,
1979 sim_fpu_round round)
1980 {
1981 i2fpu (f, i, 1);
1982 return 0;
1983 }
1984
1985 INLINE_SIM_FPU (int)
sim_fpu_u64to(sim_fpu * f,unsigned64 u,sim_fpu_round round)1986 sim_fpu_u64to (sim_fpu *f,
1987 unsigned64 u,
1988 sim_fpu_round round)
1989 {
1990 u2fpu (f, u, 1);
1991 return 0;
1992 }
1993
1994
1995 INLINE_SIM_FPU (int)
sim_fpu_to32i(signed32 * i,const sim_fpu * f,sim_fpu_round round)1996 sim_fpu_to32i (signed32 *i,
1997 const sim_fpu *f,
1998 sim_fpu_round round)
1999 {
2000 signed64 i64;
2001 int status = fpu2i (&i64, f, 0, round);
2002 *i = i64;
2003 return status;
2004 }
2005
2006 INLINE_SIM_FPU (int)
sim_fpu_to32u(unsigned32 * u,const sim_fpu * f,sim_fpu_round round)2007 sim_fpu_to32u (unsigned32 *u,
2008 const sim_fpu *f,
2009 sim_fpu_round round)
2010 {
2011 unsigned64 u64;
2012 int status = fpu2u (&u64, f, 0);
2013 *u = u64;
2014 return status;
2015 }
2016
2017 INLINE_SIM_FPU (int)
sim_fpu_to64i(signed64 * i,const sim_fpu * f,sim_fpu_round round)2018 sim_fpu_to64i (signed64 *i,
2019 const sim_fpu *f,
2020 sim_fpu_round round)
2021 {
2022 return fpu2i (i, f, 1, round);
2023 }
2024
2025
2026 INLINE_SIM_FPU (int)
sim_fpu_to64u(unsigned64 * u,const sim_fpu * f,sim_fpu_round round)2027 sim_fpu_to64u (unsigned64 *u,
2028 const sim_fpu *f,
2029 sim_fpu_round round)
2030 {
2031 return fpu2u (u, f, 1);
2032 }
2033
2034
2035
2036 /* sim_fpu -> host format */
2037
2038 #if 0
2039 INLINE_SIM_FPU (float)
2040 sim_fpu_2f (const sim_fpu *f)
2041 {
2042 return fval.d;
2043 }
2044 #endif
2045
2046
2047 INLINE_SIM_FPU (double)
sim_fpu_2d(const sim_fpu * s)2048 sim_fpu_2d (const sim_fpu *s)
2049 {
2050 sim_fpu_map val;
2051 if (sim_fpu_is_snan (s))
2052 {
2053 /* gag SNaN's */
2054 sim_fpu n = *s;
2055 n.class = sim_fpu_class_qnan;
2056 val.i = pack_fpu (&n, 1);
2057 }
2058 else
2059 {
2060 val.i = pack_fpu (s, 1);
2061 }
2062 return val.d;
2063 }
2064
2065
2066 #if 0
2067 INLINE_SIM_FPU (void)
2068 sim_fpu_f2 (sim_fpu *f,
2069 float s)
2070 {
2071 sim_fpu_map val;
2072 val.d = s;
2073 unpack_fpu (f, val.i, 1);
2074 }
2075 #endif
2076
2077
2078 INLINE_SIM_FPU (void)
sim_fpu_d2(sim_fpu * f,double d)2079 sim_fpu_d2 (sim_fpu *f,
2080 double d)
2081 {
2082 sim_fpu_map val;
2083 val.d = d;
2084 unpack_fpu (f, val.i, 1);
2085 }
2086
2087
2088 /* General */
2089
2090 INLINE_SIM_FPU (int)
sim_fpu_is_nan(const sim_fpu * d)2091 sim_fpu_is_nan (const sim_fpu *d)
2092 {
2093 switch (d->class)
2094 {
2095 case sim_fpu_class_qnan:
2096 case sim_fpu_class_snan:
2097 return 1;
2098 default:
2099 return 0;
2100 }
2101 }
2102
2103 INLINE_SIM_FPU (int)
sim_fpu_is_qnan(const sim_fpu * d)2104 sim_fpu_is_qnan (const sim_fpu *d)
2105 {
2106 switch (d->class)
2107 {
2108 case sim_fpu_class_qnan:
2109 return 1;
2110 default:
2111 return 0;
2112 }
2113 }
2114
2115 INLINE_SIM_FPU (int)
sim_fpu_is_snan(const sim_fpu * d)2116 sim_fpu_is_snan (const sim_fpu *d)
2117 {
2118 switch (d->class)
2119 {
2120 case sim_fpu_class_snan:
2121 return 1;
2122 default:
2123 return 0;
2124 }
2125 }
2126
2127 INLINE_SIM_FPU (int)
sim_fpu_is_zero(const sim_fpu * d)2128 sim_fpu_is_zero (const sim_fpu *d)
2129 {
2130 switch (d->class)
2131 {
2132 case sim_fpu_class_zero:
2133 return 1;
2134 default:
2135 return 0;
2136 }
2137 }
2138
2139 INLINE_SIM_FPU (int)
sim_fpu_is_infinity(const sim_fpu * d)2140 sim_fpu_is_infinity (const sim_fpu *d)
2141 {
2142 switch (d->class)
2143 {
2144 case sim_fpu_class_infinity:
2145 return 1;
2146 default:
2147 return 0;
2148 }
2149 }
2150
2151 INLINE_SIM_FPU (int)
sim_fpu_is_number(const sim_fpu * d)2152 sim_fpu_is_number (const sim_fpu *d)
2153 {
2154 switch (d->class)
2155 {
2156 case sim_fpu_class_denorm:
2157 case sim_fpu_class_number:
2158 return 1;
2159 default:
2160 return 0;
2161 }
2162 }
2163
2164 INLINE_SIM_FPU (int)
sim_fpu_is_denorm(const sim_fpu * d)2165 sim_fpu_is_denorm (const sim_fpu *d)
2166 {
2167 switch (d->class)
2168 {
2169 case sim_fpu_class_denorm:
2170 return 1;
2171 default:
2172 return 0;
2173 }
2174 }
2175
2176
2177 INLINE_SIM_FPU (int)
sim_fpu_sign(const sim_fpu * d)2178 sim_fpu_sign (const sim_fpu *d)
2179 {
2180 return d->sign;
2181 }
2182
2183
2184 INLINE_SIM_FPU (int)
sim_fpu_exp(const sim_fpu * d)2185 sim_fpu_exp (const sim_fpu *d)
2186 {
2187 return d->normal_exp;
2188 }
2189
2190
2191 INLINE_SIM_FPU (unsigned64)
sim_fpu_fraction(const sim_fpu * d)2192 sim_fpu_fraction (const sim_fpu *d)
2193 {
2194 return d->fraction;
2195 }
2196
2197
2198 INLINE_SIM_FPU (unsigned64)
sim_fpu_guard(const sim_fpu * d,int is_double)2199 sim_fpu_guard (const sim_fpu *d, int is_double)
2200 {
2201 unsigned64 rv;
2202 unsigned64 guardmask = LSMASK64 (NR_GUARDS - 1, 0);
2203 rv = (d->fraction & guardmask) >> NR_PAD;
2204 return rv;
2205 }
2206
2207
2208 INLINE_SIM_FPU (int)
sim_fpu_is(const sim_fpu * d)2209 sim_fpu_is (const sim_fpu *d)
2210 {
2211 switch (d->class)
2212 {
2213 case sim_fpu_class_qnan:
2214 return SIM_FPU_IS_QNAN;
2215 case sim_fpu_class_snan:
2216 return SIM_FPU_IS_SNAN;
2217 case sim_fpu_class_infinity:
2218 if (d->sign)
2219 return SIM_FPU_IS_NINF;
2220 else
2221 return SIM_FPU_IS_PINF;
2222 case sim_fpu_class_number:
2223 if (d->sign)
2224 return SIM_FPU_IS_NNUMBER;
2225 else
2226 return SIM_FPU_IS_PNUMBER;
2227 case sim_fpu_class_denorm:
2228 if (d->sign)
2229 return SIM_FPU_IS_NDENORM;
2230 else
2231 return SIM_FPU_IS_PDENORM;
2232 case sim_fpu_class_zero:
2233 if (d->sign)
2234 return SIM_FPU_IS_NZERO;
2235 else
2236 return SIM_FPU_IS_PZERO;
2237 default:
2238 return -1;
2239 abort ();
2240 }
2241 }
2242
2243 INLINE_SIM_FPU (int)
sim_fpu_cmp(const sim_fpu * l,const sim_fpu * r)2244 sim_fpu_cmp (const sim_fpu *l, const sim_fpu *r)
2245 {
2246 sim_fpu res;
2247 sim_fpu_sub (&res, l, r);
2248 return sim_fpu_is (&res);
2249 }
2250
2251 INLINE_SIM_FPU (int)
sim_fpu_is_lt(const sim_fpu * l,const sim_fpu * r)2252 sim_fpu_is_lt (const sim_fpu *l, const sim_fpu *r)
2253 {
2254 int status;
2255 sim_fpu_lt (&status, l, r);
2256 return status;
2257 }
2258
2259 INLINE_SIM_FPU (int)
sim_fpu_is_le(const sim_fpu * l,const sim_fpu * r)2260 sim_fpu_is_le (const sim_fpu *l, const sim_fpu *r)
2261 {
2262 int is;
2263 sim_fpu_le (&is, l, r);
2264 return is;
2265 }
2266
2267 INLINE_SIM_FPU (int)
sim_fpu_is_eq(const sim_fpu * l,const sim_fpu * r)2268 sim_fpu_is_eq (const sim_fpu *l, const sim_fpu *r)
2269 {
2270 int is;
2271 sim_fpu_eq (&is, l, r);
2272 return is;
2273 }
2274
2275 INLINE_SIM_FPU (int)
sim_fpu_is_ne(const sim_fpu * l,const sim_fpu * r)2276 sim_fpu_is_ne (const sim_fpu *l, const sim_fpu *r)
2277 {
2278 int is;
2279 sim_fpu_ne (&is, l, r);
2280 return is;
2281 }
2282
2283 INLINE_SIM_FPU (int)
sim_fpu_is_ge(const sim_fpu * l,const sim_fpu * r)2284 sim_fpu_is_ge (const sim_fpu *l, const sim_fpu *r)
2285 {
2286 int is;
2287 sim_fpu_ge (&is, l, r);
2288 return is;
2289 }
2290
2291 INLINE_SIM_FPU (int)
sim_fpu_is_gt(const sim_fpu * l,const sim_fpu * r)2292 sim_fpu_is_gt (const sim_fpu *l, const sim_fpu *r)
2293 {
2294 int is;
2295 sim_fpu_gt (&is, l, r);
2296 return is;
2297 }
2298
2299
2300 /* Compare operators */
2301
2302 INLINE_SIM_FPU (int)
sim_fpu_lt(int * is,const sim_fpu * l,const sim_fpu * r)2303 sim_fpu_lt (int *is,
2304 const sim_fpu *l,
2305 const sim_fpu *r)
2306 {
2307 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2308 {
2309 sim_fpu_map lval;
2310 sim_fpu_map rval;
2311 lval.i = pack_fpu (l, 1);
2312 rval.i = pack_fpu (r, 1);
2313 (*is) = (lval.d < rval.d);
2314 return 0;
2315 }
2316 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2317 {
2318 *is = 0;
2319 return sim_fpu_status_invalid_snan;
2320 }
2321 else
2322 {
2323 *is = 0;
2324 return sim_fpu_status_invalid_qnan;
2325 }
2326 }
2327
2328 INLINE_SIM_FPU (int)
sim_fpu_le(int * is,const sim_fpu * l,const sim_fpu * r)2329 sim_fpu_le (int *is,
2330 const sim_fpu *l,
2331 const sim_fpu *r)
2332 {
2333 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2334 {
2335 sim_fpu_map lval;
2336 sim_fpu_map rval;
2337 lval.i = pack_fpu (l, 1);
2338 rval.i = pack_fpu (r, 1);
2339 *is = (lval.d <= rval.d);
2340 return 0;
2341 }
2342 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2343 {
2344 *is = 0;
2345 return sim_fpu_status_invalid_snan;
2346 }
2347 else
2348 {
2349 *is = 0;
2350 return sim_fpu_status_invalid_qnan;
2351 }
2352 }
2353
2354 INLINE_SIM_FPU (int)
sim_fpu_eq(int * is,const sim_fpu * l,const sim_fpu * r)2355 sim_fpu_eq (int *is,
2356 const sim_fpu *l,
2357 const sim_fpu *r)
2358 {
2359 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2360 {
2361 sim_fpu_map lval;
2362 sim_fpu_map rval;
2363 lval.i = pack_fpu (l, 1);
2364 rval.i = pack_fpu (r, 1);
2365 (*is) = (lval.d == rval.d);
2366 return 0;
2367 }
2368 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2369 {
2370 *is = 0;
2371 return sim_fpu_status_invalid_snan;
2372 }
2373 else
2374 {
2375 *is = 0;
2376 return sim_fpu_status_invalid_qnan;
2377 }
2378 }
2379
2380 INLINE_SIM_FPU (int)
sim_fpu_ne(int * is,const sim_fpu * l,const sim_fpu * r)2381 sim_fpu_ne (int *is,
2382 const sim_fpu *l,
2383 const sim_fpu *r)
2384 {
2385 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2386 {
2387 sim_fpu_map lval;
2388 sim_fpu_map rval;
2389 lval.i = pack_fpu (l, 1);
2390 rval.i = pack_fpu (r, 1);
2391 (*is) = (lval.d != rval.d);
2392 return 0;
2393 }
2394 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2395 {
2396 *is = 0;
2397 return sim_fpu_status_invalid_snan;
2398 }
2399 else
2400 {
2401 *is = 0;
2402 return sim_fpu_status_invalid_qnan;
2403 }
2404 }
2405
2406 INLINE_SIM_FPU (int)
sim_fpu_ge(int * is,const sim_fpu * l,const sim_fpu * r)2407 sim_fpu_ge (int *is,
2408 const sim_fpu *l,
2409 const sim_fpu *r)
2410 {
2411 return sim_fpu_le (is, r, l);
2412 }
2413
2414 INLINE_SIM_FPU (int)
sim_fpu_gt(int * is,const sim_fpu * l,const sim_fpu * r)2415 sim_fpu_gt (int *is,
2416 const sim_fpu *l,
2417 const sim_fpu *r)
2418 {
2419 return sim_fpu_lt (is, r, l);
2420 }
2421
2422
2423 /* A number of useful constants */
2424
2425 #if EXTERN_SIM_FPU_P
2426 const sim_fpu sim_fpu_zero = {
2427 sim_fpu_class_zero,
2428 };
2429 const sim_fpu sim_fpu_qnan = {
2430 sim_fpu_class_qnan,
2431 };
2432 const sim_fpu sim_fpu_one = {
2433 sim_fpu_class_number, 0, IMPLICIT_1, 0
2434 };
2435 const sim_fpu sim_fpu_two = {
2436 sim_fpu_class_number, 0, IMPLICIT_1, 1
2437 };
2438 const sim_fpu sim_fpu_max32 = {
2439 sim_fpu_class_number, 0, LSMASK64 (NR_FRAC_GUARD, NR_GUARDS32), NORMAL_EXPMAX32
2440 };
2441 const sim_fpu sim_fpu_max64 = {
2442 sim_fpu_class_number, 0, LSMASK64 (NR_FRAC_GUARD, NR_GUARDS64), NORMAL_EXPMAX64
2443 };
2444 #endif
2445
2446
2447 /* For debugging */
2448
2449 INLINE_SIM_FPU (void)
sim_fpu_print_fpu(const sim_fpu * f,sim_fpu_print_func * print,void * arg)2450 sim_fpu_print_fpu (const sim_fpu *f,
2451 sim_fpu_print_func *print,
2452 void *arg)
2453 {
2454 sim_fpu_printn_fpu (f, print, -1, arg);
2455 }
2456
2457 INLINE_SIM_FPU (void)
sim_fpu_printn_fpu(const sim_fpu * f,sim_fpu_print_func * print,int digits,void * arg)2458 sim_fpu_printn_fpu (const sim_fpu *f,
2459 sim_fpu_print_func *print,
2460 int digits,
2461 void *arg)
2462 {
2463 print (arg, "%s", f->sign ? "-" : "+");
2464 switch (f->class)
2465 {
2466 case sim_fpu_class_qnan:
2467 print (arg, "0.");
2468 print_bits (f->fraction, NR_FRAC_GUARD - 1, digits, print, arg);
2469 print (arg, "*QuietNaN");
2470 break;
2471 case sim_fpu_class_snan:
2472 print (arg, "0.");
2473 print_bits (f->fraction, NR_FRAC_GUARD - 1, digits, print, arg);
2474 print (arg, "*SignalNaN");
2475 break;
2476 case sim_fpu_class_zero:
2477 print (arg, "0.0");
2478 break;
2479 case sim_fpu_class_infinity:
2480 print (arg, "INF");
2481 break;
2482 case sim_fpu_class_number:
2483 case sim_fpu_class_denorm:
2484 print (arg, "1.");
2485 print_bits (f->fraction, NR_FRAC_GUARD - 1, digits, print, arg);
2486 print (arg, "*2^%+d", f->normal_exp);
2487 ASSERT (f->fraction >= IMPLICIT_1);
2488 ASSERT (f->fraction < IMPLICIT_2);
2489 }
2490 }
2491
2492
2493 INLINE_SIM_FPU (void)
sim_fpu_print_status(int status,sim_fpu_print_func * print,void * arg)2494 sim_fpu_print_status (int status,
2495 sim_fpu_print_func *print,
2496 void *arg)
2497 {
2498 int i = 1;
2499 char *prefix = "";
2500 while (status >= i)
2501 {
2502 switch ((sim_fpu_status) (status & i))
2503 {
2504 case sim_fpu_status_denorm:
2505 print (arg, "%sD", prefix);
2506 break;
2507 case sim_fpu_status_invalid_snan:
2508 print (arg, "%sSNaN", prefix);
2509 break;
2510 case sim_fpu_status_invalid_qnan:
2511 print (arg, "%sQNaN", prefix);
2512 break;
2513 case sim_fpu_status_invalid_isi:
2514 print (arg, "%sISI", prefix);
2515 break;
2516 case sim_fpu_status_invalid_idi:
2517 print (arg, "%sIDI", prefix);
2518 break;
2519 case sim_fpu_status_invalid_zdz:
2520 print (arg, "%sZDZ", prefix);
2521 break;
2522 case sim_fpu_status_invalid_imz:
2523 print (arg, "%sIMZ", prefix);
2524 break;
2525 case sim_fpu_status_invalid_cvi:
2526 print (arg, "%sCVI", prefix);
2527 break;
2528 case sim_fpu_status_invalid_cmp:
2529 print (arg, "%sCMP", prefix);
2530 break;
2531 case sim_fpu_status_invalid_sqrt:
2532 print (arg, "%sSQRT", prefix);
2533 break;
2534 break;
2535 case sim_fpu_status_inexact:
2536 print (arg, "%sX", prefix);
2537 break;
2538 break;
2539 case sim_fpu_status_overflow:
2540 print (arg, "%sO", prefix);
2541 break;
2542 break;
2543 case sim_fpu_status_underflow:
2544 print (arg, "%sU", prefix);
2545 break;
2546 break;
2547 case sim_fpu_status_invalid_div0:
2548 print (arg, "%s/", prefix);
2549 break;
2550 break;
2551 case sim_fpu_status_rounded:
2552 print (arg, "%sR", prefix);
2553 break;
2554 break;
2555 }
2556 i <<= 1;
2557 prefix = ",";
2558 }
2559 }
2560
2561 #endif
2562