1 /* This is a software floating point library which can be used instead
2    of the floating point routines in libgcc1.c for targets without
3    hardware floating point.  */
4 
5 /* Copyright 1994-2019 Free Software Foundation, Inc.
6 
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11 
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 GNU General Public License for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
19 
20 /* As a special exception, if you link this library with other files,
21    some of which are compiled with GCC, to produce an executable,
22    this library does not by itself cause the resulting executable
23    to be covered by the GNU General Public License.
24    This exception does not however invalidate any other reasons why
25    the executable file might be covered by the GNU General Public License.  */
26 
27 /* This implements IEEE 754 format arithmetic, but does not provide a
28    mechanism for setting the rounding mode, or for generating or handling
29    exceptions.
30 
31    The original code by Steve Chamberlain, hacked by Mark Eichin and Jim
32    Wilson, all of Cygnus Support.  */
33 
34 
35 #ifndef SIM_FPU_C
36 #define SIM_FPU_C
37 
38 #include "sim-basics.h"
39 #include "sim-fpu.h"
40 
41 #include "sim-io.h"
42 #include "sim-assert.h"
43 
44 
45 /* Debugging support.
46    If digits is -1, then print all digits.  */
47 
48 static void
49 print_bits (unsigned64 x,
50 	    int msbit,
51 	    int digits,
52 	    sim_fpu_print_func print,
53 	    void *arg)
54 {
55   unsigned64 bit = LSBIT64 (msbit);
56   int i = 4;
57   while (bit && digits)
58     {
59       if (i == 0)
60 	print (arg, ",");
61 
62       if ((x & bit))
63 	print (arg, "1");
64       else
65 	print (arg, "0");
66       bit >>= 1;
67 
68       if (digits > 0)
69 	digits--;
70       i = (i + 1) % 4;
71     }
72 }
73 
74 
75 
76 /* Quick and dirty conversion between a host double and host 64bit int.  */
77 
78 typedef union
79 {
80   double d;
81   unsigned64 i;
82 } sim_fpu_map;
83 
84 
85 /* A packed IEEE floating point number.
86 
87    Form is <SIGN:1><BIASEDEXP:NR_EXPBITS><FRAC:NR_FRACBITS> for both
88    32 and 64 bit numbers.  This number is interpreted as:
89 
90    Normalized (0 < BIASEDEXP && BIASEDEXP < EXPMAX):
91    (sign ? '-' : '+') 1.<FRAC> x 2 ^ (BIASEDEXP - EXPBIAS)
92 
93    Denormalized (0 == BIASEDEXP && FRAC != 0):
94    (sign ? "-" : "+") 0.<FRAC> x 2 ^ (- EXPBIAS)
95 
96    Zero (0 == BIASEDEXP && FRAC == 0):
97    (sign ? "-" : "+") 0.0
98 
99    Infinity (BIASEDEXP == EXPMAX && FRAC == 0):
100    (sign ? "-" : "+") "infinity"
101 
102    SignalingNaN (BIASEDEXP == EXPMAX && FRAC > 0 && FRAC < QUIET_NAN):
103    SNaN.FRAC
104 
105    QuietNaN (BIASEDEXP == EXPMAX && FRAC > 0 && FRAC > QUIET_NAN):
106    QNaN.FRAC
107 
108    */
109 
110 #define NR_EXPBITS  (is_double ?   11 :   8)
111 #define NR_FRACBITS (is_double ?   52 : 23)
112 #define SIGNBIT     (is_double ? MSBIT64 (0) : MSBIT64 (32))
113 
114 #define EXPMAX32    (255)
115 #define EXMPAX64    (2047)
116 #define EXPMAX      ((unsigned) (is_double ? EXMPAX64 : EXPMAX32))
117 
118 #define EXPBIAS32   (127)
119 #define EXPBIAS64   (1023)
120 #define EXPBIAS     (is_double ? EXPBIAS64 : EXPBIAS32)
121 
122 #define QUIET_NAN   LSBIT64 (NR_FRACBITS - 1)
123 
124 
125 
126 /* An unpacked floating point number.
127 
128    When unpacked, the fraction of both a 32 and 64 bit floating point
129    number is stored using the same format:
130 
131    64 bit - <IMPLICIT_1:1><FRACBITS:52><GUARDS:8><PAD:00>
132    32 bit - <IMPLICIT_1:1><FRACBITS:23><GUARDS:7><PAD:30> */
133 
134 #define NR_PAD32    (30)
135 #define NR_PAD64    (0)
136 #define NR_PAD      (is_double ? NR_PAD64 : NR_PAD32)
137 #define PADMASK     (is_double ? 0 : LSMASK64 (NR_PAD32 - 1, 0))
138 
139 #define NR_GUARDS32 (7 + NR_PAD32)
140 #define NR_GUARDS64 (8 + NR_PAD64)
141 #define NR_GUARDS  (is_double ? NR_GUARDS64 : NR_GUARDS32)
142 #define GUARDMASK  LSMASK64 (NR_GUARDS - 1, 0)
143 
144 #define GUARDMSB   LSBIT64  (NR_GUARDS - 1)
145 #define GUARDLSB   LSBIT64  (NR_PAD)
146 #define GUARDROUND LSMASK64 (NR_GUARDS - 2, 0)
147 
148 #define NR_FRAC_GUARD   (60)
149 #define IMPLICIT_1 LSBIT64 (NR_FRAC_GUARD)
150 #define IMPLICIT_2 LSBIT64 (NR_FRAC_GUARD + 1)
151 #define IMPLICIT_4 LSBIT64 (NR_FRAC_GUARD + 2)
152 #define NR_SPARE 2
153 
154 #define FRAC32MASK LSMASK64 (63, NR_FRAC_GUARD - 32 + 1)
155 
156 #define NORMAL_EXPMIN (-(EXPBIAS)+1)
157 
158 #define NORMAL_EXPMAX32 (EXPBIAS32)
159 #define NORMAL_EXPMAX64 (EXPBIAS64)
160 #define NORMAL_EXPMAX (EXPBIAS)
161 
162 
163 /* Integer constants */
164 
165 #define MAX_INT32  ((signed64) LSMASK64 (30, 0))
166 #define MAX_UINT32 LSMASK64 (31, 0)
167 #define MIN_INT32  ((signed64) LSMASK64 (63, 31))
168 
169 #define MAX_INT64  ((signed64) LSMASK64 (62, 0))
170 #define MAX_UINT64 LSMASK64 (63, 0)
171 #define MIN_INT64  ((signed64) LSMASK64 (63, 63))
172 
173 #define MAX_INT   (is_64bit ? MAX_INT64  : MAX_INT32)
174 #define MIN_INT   (is_64bit ? MIN_INT64  : MIN_INT32)
175 #define MAX_UINT  (is_64bit ? MAX_UINT64 : MAX_UINT32)
176 #define NR_INTBITS (is_64bit ? 64 : 32)
177 
178 /* Squeeze an unpacked sim_fpu struct into a 32/64 bit integer.  */
179 STATIC_INLINE_SIM_FPU (unsigned64)
180 pack_fpu (const sim_fpu *src,
181 	  int is_double)
182 {
183   int sign;
184   unsigned64 exp;
185   unsigned64 fraction;
186   unsigned64 packed;
187 
188   switch (src->class)
189     {
190       /* Create a NaN.  */
191     case sim_fpu_class_qnan:
192       sign = src->sign;
193       exp = EXPMAX;
194       /* Force fraction to correct class.  */
195       fraction = src->fraction;
196       fraction >>= NR_GUARDS;
197 #ifdef SIM_QUIET_NAN_NEGATED
198       fraction |= QUIET_NAN - 1;
199 #else
200       fraction |= QUIET_NAN;
201 #endif
202       break;
203     case sim_fpu_class_snan:
204       sign = src->sign;
205       exp = EXPMAX;
206       /* Force fraction to correct class.  */
207       fraction = src->fraction;
208       fraction >>= NR_GUARDS;
209 #ifdef SIM_QUIET_NAN_NEGATED
210       fraction |= QUIET_NAN;
211 #else
212       fraction &= ~QUIET_NAN;
213 #endif
214       break;
215     case sim_fpu_class_infinity:
216       sign = src->sign;
217       exp = EXPMAX;
218       fraction = 0;
219       break;
220     case sim_fpu_class_zero:
221       sign = src->sign;
222       exp = 0;
223       fraction = 0;
224       break;
225     case sim_fpu_class_number:
226     case sim_fpu_class_denorm:
227       ASSERT (src->fraction >= IMPLICIT_1);
228       ASSERT (src->fraction < IMPLICIT_2);
229       if (src->normal_exp < NORMAL_EXPMIN)
230 	{
231 	  /* This number's exponent is too low to fit into the bits
232 	     available in the number We'll denormalize the number by
233 	     storing zero in the exponent and shift the fraction to
234 	     the right to make up for it. */
235 	  int nr_shift = NORMAL_EXPMIN - src->normal_exp;
236 	  if (nr_shift > NR_FRACBITS)
237 	    {
238 	      /* Underflow, just make the number zero.  */
239 	      sign = src->sign;
240 	      exp = 0;
241 	      fraction = 0;
242 	    }
243 	  else
244 	    {
245 	      sign = src->sign;
246 	      exp = 0;
247 	      /* Shift by the value.  */
248 	      fraction = src->fraction;
249 	      fraction >>= NR_GUARDS;
250 	      fraction >>= nr_shift;
251 	    }
252 	}
253       else if (src->normal_exp > NORMAL_EXPMAX)
254 	{
255 	  /* Infinity */
256 	  sign = src->sign;
257 	  exp = EXPMAX;
258 	  fraction = 0;
259 	}
260       else
261 	{
262 	  exp = (src->normal_exp + EXPBIAS);
263 	  sign = src->sign;
264 	  fraction = src->fraction;
265 	  /* FIXME: Need to round according to WITH_SIM_FPU_ROUNDING
266              or some such.  */
267 	  /* Round to nearest: If the guard bits are the all zero, but
268 	     the first, then we're half way between two numbers,
269 	     choose the one which makes the lsb of the answer 0.  */
270 	  if ((fraction & GUARDMASK) == GUARDMSB)
271 	    {
272 	      if ((fraction & (GUARDMSB << 1)))
273 		fraction += (GUARDMSB << 1);
274 	    }
275 	  else
276 	    {
277 	      /* Add a one to the guards to force round to nearest.  */
278 	      fraction += GUARDROUND;
279 	    }
280 	  if ((fraction & IMPLICIT_2)) /* Rounding resulted in carry.  */
281 	    {
282 	      exp += 1;
283 	      fraction >>= 1;
284 	    }
285 	  fraction >>= NR_GUARDS;
286 	  /* When exp == EXPMAX (overflow from carry) fraction must
287 	     have been made zero.  */
288 	  ASSERT ((exp == EXPMAX) <= ((fraction & ~IMPLICIT_1) == 0));
289 	}
290       break;
291     default:
292       abort ();
293     }
294 
295   packed = ((sign ? SIGNBIT : 0)
296 	     | (exp << NR_FRACBITS)
297 	     | LSMASKED64 (fraction, NR_FRACBITS - 1, 0));
298 
299   /* Trace operation.  */
300 #if 0
301   if (is_double)
302     {
303     }
304   else
305     {
306       printf ("pack_fpu: ");
307       printf ("-> %c%0lX.%06lX\n",
308 	      LSMASKED32 (packed, 31, 31) ? '8' : '0',
309 	      (long) LSEXTRACTED32 (packed, 30, 23),
310 	      (long) LSEXTRACTED32 (packed, 23 - 1, 0));
311     }
312 #endif
313 
314   return packed;
315 }
316 
317 
318 /* Unpack a 32/64 bit integer into a sim_fpu structure.  */
319 STATIC_INLINE_SIM_FPU (void)
320 unpack_fpu (sim_fpu *dst, unsigned64 packed, int is_double)
321 {
322   unsigned64 fraction = LSMASKED64 (packed, NR_FRACBITS - 1, 0);
323   unsigned exp = LSEXTRACTED64 (packed, NR_EXPBITS + NR_FRACBITS - 1, NR_FRACBITS);
324   int sign = (packed & SIGNBIT) != 0;
325 
326   if (exp == 0)
327     {
328       /* Hmm.  Looks like 0 */
329       if (fraction == 0)
330 	{
331 	  /* Tastes like zero.  */
332 	  dst->class = sim_fpu_class_zero;
333 	  dst->sign = sign;
334 	  dst->normal_exp = 0;
335 	}
336       else
337 	{
338 	  /* Zero exponent with non zero fraction - it's denormalized,
339 	     so there isn't a leading implicit one - we'll shift it so
340 	     it gets one.  */
341 	  dst->normal_exp = exp - EXPBIAS + 1;
342 	  dst->class = sim_fpu_class_denorm;
343 	  dst->sign = sign;
344 	  fraction <<= NR_GUARDS;
345 	  while (fraction < IMPLICIT_1)
346 	    {
347 	      fraction <<= 1;
348 	      dst->normal_exp--;
349 	    }
350 	  dst->fraction = fraction;
351 	}
352     }
353   else if (exp == EXPMAX)
354     {
355       /* Huge exponent*/
356       if (fraction == 0)
357 	{
358 	  /* Attached to a zero fraction - means infinity.  */
359 	  dst->class = sim_fpu_class_infinity;
360 	  dst->sign = sign;
361 	  /* dst->normal_exp = EXPBIAS; */
362 	  /* dst->fraction = 0; */
363 	}
364       else
365 	{
366 	  int qnan;
367 
368 	  /* Non zero fraction, means NaN.  */
369 	  dst->sign = sign;
370 	  dst->fraction = (fraction << NR_GUARDS);
371 #ifdef SIM_QUIET_NAN_NEGATED
372 	  qnan = (fraction & QUIET_NAN) == 0;
373 #else
374 	  qnan = fraction >= QUIET_NAN;
375 #endif
376 	  if (qnan)
377 	    dst->class = sim_fpu_class_qnan;
378 	  else
379 	    dst->class = sim_fpu_class_snan;
380 	}
381     }
382   else
383     {
384       /* Nothing strange about this number.  */
385       dst->class = sim_fpu_class_number;
386       dst->sign = sign;
387       dst->fraction = ((fraction << NR_GUARDS) | IMPLICIT_1);
388       dst->normal_exp = exp - EXPBIAS;
389     }
390 
391   /* Trace operation.  */
392 #if 0
393   if (is_double)
394     {
395     }
396   else
397     {
398       printf ("unpack_fpu: %c%02lX.%06lX ->\n",
399 	      LSMASKED32 (packed, 31, 31) ? '8' : '0',
400 	      (long) LSEXTRACTED32 (packed, 30, 23),
401 	      (long) LSEXTRACTED32 (packed, 23 - 1, 0));
402     }
403 #endif
404 
405   /* sanity checks */
406   {
407     sim_fpu_map val;
408     val.i = pack_fpu (dst, 1);
409     if (is_double)
410       {
411 	ASSERT (val.i == packed);
412       }
413     else
414       {
415 	unsigned32 val = pack_fpu (dst, 0);
416 	unsigned32 org = packed;
417 	ASSERT (val == org);
418       }
419   }
420 }
421 
422 
423 /* Convert a floating point into an integer.  */
424 STATIC_INLINE_SIM_FPU (int)
425 fpu2i (signed64 *i,
426        const sim_fpu *s,
427        int is_64bit,
428        sim_fpu_round round)
429 {
430   unsigned64 tmp;
431   int shift;
432   int status = 0;
433   if (sim_fpu_is_zero (s))
434     {
435       *i = 0;
436       return 0;
437     }
438   if (sim_fpu_is_snan (s))
439     {
440       *i = MIN_INT; /* FIXME */
441       return sim_fpu_status_invalid_cvi;
442     }
443   if (sim_fpu_is_qnan (s))
444     {
445       *i = MIN_INT; /* FIXME */
446       return sim_fpu_status_invalid_cvi;
447     }
448   /* Map infinity onto MAX_INT...  */
449   if (sim_fpu_is_infinity (s))
450     {
451       *i = s->sign ? MIN_INT : MAX_INT;
452       return sim_fpu_status_invalid_cvi;
453     }
454   /* It is a number, but a small one.  */
455   if (s->normal_exp < 0)
456     {
457       *i = 0;
458       return sim_fpu_status_inexact;
459     }
460   /* Is the floating point MIN_INT or just close? */
461   if (s->sign && s->normal_exp == (NR_INTBITS - 1))
462     {
463       *i = MIN_INT;
464       ASSERT (s->fraction >= IMPLICIT_1);
465       if (s->fraction == IMPLICIT_1)
466 	return 0; /* exact */
467       if (is_64bit) /* can't round */
468 	return sim_fpu_status_invalid_cvi; /* must be overflow */
469       /* For a 32bit with MAX_INT, rounding is possible.  */
470       switch (round)
471 	{
472 	case sim_fpu_round_default:
473 	  abort ();
474 	case sim_fpu_round_zero:
475 	  if ((s->fraction & FRAC32MASK) != IMPLICIT_1)
476 	    return sim_fpu_status_invalid_cvi;
477 	  else
478 	    return sim_fpu_status_inexact;
479 	  break;
480 	case sim_fpu_round_near:
481 	  {
482 	    if ((s->fraction & FRAC32MASK) != IMPLICIT_1)
483 	      return sim_fpu_status_invalid_cvi;
484 	    else if ((s->fraction & !FRAC32MASK) >= (~FRAC32MASK >> 1))
485 	      return sim_fpu_status_invalid_cvi;
486 	    else
487 	      return sim_fpu_status_inexact;
488 	  }
489 	case sim_fpu_round_up:
490 	  if ((s->fraction & FRAC32MASK) == IMPLICIT_1)
491 	    return sim_fpu_status_inexact;
492 	  else
493 	    return sim_fpu_status_invalid_cvi;
494 	case sim_fpu_round_down:
495 	  return sim_fpu_status_invalid_cvi;
496 	}
497     }
498   /* Would right shifting result in the FRAC being shifted into
499      (through) the integer's sign bit? */
500   if (s->normal_exp > (NR_INTBITS - 2))
501     {
502       *i = s->sign ? MIN_INT : MAX_INT;
503       return sim_fpu_status_invalid_cvi;
504     }
505   /* Normal number, shift it into place.  */
506   tmp = s->fraction;
507   shift = (s->normal_exp - (NR_FRAC_GUARD));
508   if (shift > 0)
509     {
510       tmp <<= shift;
511     }
512   else
513     {
514       shift = -shift;
515       if (tmp & ((SIGNED64 (1) << shift) - 1))
516 	status |= sim_fpu_status_inexact;
517       tmp >>= shift;
518     }
519   *i = s->sign ? (-tmp) : (tmp);
520   return status;
521 }
522 
523 /* Convert an integer into a floating point.  */
524 STATIC_INLINE_SIM_FPU (int)
525 i2fpu (sim_fpu *f, signed64 i, int is_64bit)
526 {
527   int status = 0;
528   if (i == 0)
529     {
530       f->class = sim_fpu_class_zero;
531       f->sign = 0;
532       f->normal_exp = 0;
533     }
534   else
535     {
536       f->class = sim_fpu_class_number;
537       f->sign = (i < 0);
538       f->normal_exp = NR_FRAC_GUARD;
539 
540       if (f->sign)
541 	{
542 	  /* Special case for minint, since there is no corresponding
543 	     +ve integer representation for it.  */
544 	  if (i == MIN_INT)
545 	    {
546 	      f->fraction = IMPLICIT_1;
547 	      f->normal_exp = NR_INTBITS - 1;
548 	    }
549 	  else
550 	    f->fraction = (-i);
551 	}
552       else
553 	f->fraction = i;
554 
555       if (f->fraction >= IMPLICIT_2)
556 	{
557 	  do
558 	    {
559 	      f->fraction = (f->fraction >> 1) | (f->fraction & 1);
560 	      f->normal_exp += 1;
561 	    }
562 	  while (f->fraction >= IMPLICIT_2);
563 	}
564       else if (f->fraction < IMPLICIT_1)
565 	{
566 	  do
567 	    {
568 	      f->fraction <<= 1;
569 	      f->normal_exp -= 1;
570 	    }
571 	  while (f->fraction < IMPLICIT_1);
572 	}
573     }
574 
575   /* trace operation */
576 #if 0
577   {
578     printf ("i2fpu: 0x%08lX ->\n", (long) i);
579   }
580 #endif
581 
582   /* sanity check */
583   {
584     signed64 val;
585     fpu2i (&val, f, is_64bit, sim_fpu_round_zero);
586     if (i >= MIN_INT32 && i <= MAX_INT32)
587       {
588 	ASSERT (val == i);
589       }
590   }
591 
592   return status;
593 }
594 
595 
596 /* Convert a floating point into an integer.  */
597 STATIC_INLINE_SIM_FPU (int)
598 fpu2u (unsigned64 *u, const sim_fpu *s, int is_64bit)
599 {
600   const int is_double = 1;
601   unsigned64 tmp;
602   int shift;
603   if (sim_fpu_is_zero (s))
604     {
605       *u = 0;
606       return 0;
607     }
608   if (sim_fpu_is_nan (s))
609     {
610       *u = 0;
611       return 0;
612     }
613   /* It is a negative number.  */
614   if (s->sign)
615     {
616       *u = 0;
617       return 0;
618     }
619   /* Get reasonable MAX_USI_INT...  */
620   if (sim_fpu_is_infinity (s))
621     {
622       *u = MAX_UINT;
623       return 0;
624     }
625   /* It is a number, but a small one.  */
626   if (s->normal_exp < 0)
627     {
628       *u = 0;
629       return 0;
630     }
631   /* overflow */
632   if (s->normal_exp > (NR_INTBITS - 1))
633     {
634       *u = MAX_UINT;
635       return 0;
636     }
637   /* normal number */
638   tmp = (s->fraction & ~PADMASK);
639   shift = (s->normal_exp - (NR_FRACBITS + NR_GUARDS));
640   if (shift > 0)
641     {
642       tmp <<= shift;
643     }
644   else
645     {
646       shift = -shift;
647       tmp >>= shift;
648     }
649   *u = tmp;
650   return 0;
651 }
652 
653 /* Convert an unsigned integer into a floating point.  */
654 STATIC_INLINE_SIM_FPU (int)
655 u2fpu (sim_fpu *f, unsigned64 u, int is_64bit)
656 {
657   if (u == 0)
658     {
659       f->class = sim_fpu_class_zero;
660       f->sign = 0;
661       f->normal_exp = 0;
662     }
663   else
664     {
665       f->class = sim_fpu_class_number;
666       f->sign = 0;
667       f->normal_exp = NR_FRAC_GUARD;
668       f->fraction = u;
669 
670       while (f->fraction < IMPLICIT_1)
671 	{
672 	  f->fraction <<= 1;
673 	  f->normal_exp -= 1;
674 	}
675     }
676   return 0;
677 }
678 
679 
680 /* register <-> sim_fpu */
681 
682 INLINE_SIM_FPU (void)
683 sim_fpu_32to (sim_fpu *f, unsigned32 s)
684 {
685   unpack_fpu (f, s, 0);
686 }
687 
688 
689 INLINE_SIM_FPU (void)
690 sim_fpu_232to (sim_fpu *f, unsigned32 h, unsigned32 l)
691 {
692   unsigned64 s = h;
693   s = (s << 32) | l;
694   unpack_fpu (f, s, 1);
695 }
696 
697 
698 INLINE_SIM_FPU (void)
699 sim_fpu_64to (sim_fpu *f, unsigned64 s)
700 {
701   unpack_fpu (f, s, 1);
702 }
703 
704 
705 INLINE_SIM_FPU (void)
706 sim_fpu_to32 (unsigned32 *s,
707 	      const sim_fpu *f)
708 {
709   *s = pack_fpu (f, 0);
710 }
711 
712 
713 INLINE_SIM_FPU (void)
714 sim_fpu_to232 (unsigned32 *h, unsigned32 *l,
715 	       const sim_fpu *f)
716 {
717   unsigned64 s = pack_fpu (f, 1);
718   *l = s;
719   *h = (s >> 32);
720 }
721 
722 
723 INLINE_SIM_FPU (void)
724 sim_fpu_to64 (unsigned64 *u,
725 	      const sim_fpu *f)
726 {
727   *u = pack_fpu (f, 1);
728 }
729 
730 
731 INLINE_SIM_FPU (void)
732 sim_fpu_fractionto (sim_fpu *f,
733 		    int sign,
734 		    int normal_exp,
735 		    unsigned64 fraction,
736 		    int precision)
737 {
738   int shift = (NR_FRAC_GUARD - precision);
739   f->class = sim_fpu_class_number;
740   f->sign = sign;
741   f->normal_exp = normal_exp;
742   /* Shift the fraction to where sim-fpu expects it.  */
743   if (shift >= 0)
744     f->fraction = (fraction << shift);
745   else
746     f->fraction = (fraction >> -shift);
747   f->fraction |= IMPLICIT_1;
748 }
749 
750 
751 INLINE_SIM_FPU (unsigned64)
752 sim_fpu_tofraction (const sim_fpu *d,
753 		    int precision)
754 {
755   /* We have NR_FRAC_GUARD bits, we want only PRECISION bits.  */
756   int shift = (NR_FRAC_GUARD - precision);
757   unsigned64 fraction = (d->fraction & ~IMPLICIT_1);
758   if (shift >= 0)
759     return fraction >> shift;
760   else
761     return fraction << -shift;
762 }
763 
764 
765 /* Rounding */
766 
767 STATIC_INLINE_SIM_FPU (int)
768 do_normal_overflow (sim_fpu *f,
769 		    int is_double,
770 		    sim_fpu_round round)
771 {
772   switch (round)
773     {
774     case sim_fpu_round_default:
775       return 0;
776     case sim_fpu_round_near:
777       f->class = sim_fpu_class_infinity;
778       break;
779     case sim_fpu_round_up:
780       if (!f->sign)
781 	f->class = sim_fpu_class_infinity;
782       break;
783     case sim_fpu_round_down:
784       if (f->sign)
785 	f->class = sim_fpu_class_infinity;
786       break;
787     case sim_fpu_round_zero:
788       break;
789     }
790   f->normal_exp = NORMAL_EXPMAX;
791   f->fraction = LSMASK64 (NR_FRAC_GUARD, NR_GUARDS);
792   return (sim_fpu_status_overflow | sim_fpu_status_inexact);
793 }
794 
795 STATIC_INLINE_SIM_FPU (int)
796 do_normal_underflow (sim_fpu *f,
797 		     int is_double,
798 		     sim_fpu_round round)
799 {
800   switch (round)
801     {
802     case sim_fpu_round_default:
803       return 0;
804     case sim_fpu_round_near:
805       f->class = sim_fpu_class_zero;
806       break;
807     case sim_fpu_round_up:
808       if (f->sign)
809 	f->class = sim_fpu_class_zero;
810       break;
811     case sim_fpu_round_down:
812       if (!f->sign)
813 	f->class = sim_fpu_class_zero;
814       break;
815     case sim_fpu_round_zero:
816       f->class = sim_fpu_class_zero;
817       break;
818     }
819   f->normal_exp = NORMAL_EXPMIN - NR_FRACBITS;
820   f->fraction = IMPLICIT_1;
821   return (sim_fpu_status_inexact | sim_fpu_status_underflow);
822 }
823 
824 
825 
826 /* Round a number using NR_GUARDS.
827    Will return the rounded number or F->FRACTION == 0 when underflow.  */
828 
829 STATIC_INLINE_SIM_FPU (int)
830 do_normal_round (sim_fpu *f,
831 		 int nr_guards,
832 		 sim_fpu_round round)
833 {
834   unsigned64 guardmask = LSMASK64 (nr_guards - 1, 0);
835   unsigned64 guardmsb = LSBIT64 (nr_guards - 1);
836   unsigned64 fraclsb = guardmsb << 1;
837   if ((f->fraction & guardmask))
838     {
839       int status = sim_fpu_status_inexact;
840       switch (round)
841 	{
842 	case sim_fpu_round_default:
843 	  return 0;
844 	case sim_fpu_round_near:
845 	  if ((f->fraction & guardmsb))
846 	    {
847 	      if ((f->fraction & fraclsb))
848 		{
849 		  status |= sim_fpu_status_rounded;
850 		}
851 	      else if ((f->fraction & (guardmask >> 1)))
852 		{
853 		  status |= sim_fpu_status_rounded;
854 		}
855 	    }
856 	  break;
857 	case sim_fpu_round_up:
858 	  if (!f->sign)
859 	    status |= sim_fpu_status_rounded;
860 	  break;
861 	case sim_fpu_round_down:
862 	  if (f->sign)
863 	    status |= sim_fpu_status_rounded;
864 	  break;
865 	case sim_fpu_round_zero:
866 	  break;
867 	}
868       f->fraction &= ~guardmask;
869       /* Round if needed, handle resulting overflow.  */
870       if ((status & sim_fpu_status_rounded))
871 	{
872 	  f->fraction += fraclsb;
873 	  if ((f->fraction & IMPLICIT_2))
874 	    {
875 	      f->fraction >>= 1;
876 	      f->normal_exp += 1;
877 	    }
878 	}
879       return status;
880     }
881   else
882     return 0;
883 }
884 
885 
886 STATIC_INLINE_SIM_FPU (int)
887 do_round (sim_fpu *f,
888 	  int is_double,
889 	  sim_fpu_round round,
890 	  sim_fpu_denorm denorm)
891 {
892   switch (f->class)
893     {
894     case sim_fpu_class_qnan:
895     case sim_fpu_class_zero:
896     case sim_fpu_class_infinity:
897       return 0;
898       break;
899     case sim_fpu_class_snan:
900       /* Quieten a SignalingNaN.  */
901       f->class = sim_fpu_class_qnan;
902       return sim_fpu_status_invalid_snan;
903       break;
904     case sim_fpu_class_number:
905     case sim_fpu_class_denorm:
906       {
907 	int status;
908 	ASSERT (f->fraction < IMPLICIT_2);
909 	ASSERT (f->fraction >= IMPLICIT_1);
910 	if (f->normal_exp < NORMAL_EXPMIN)
911 	  {
912 	    /* This number's exponent is too low to fit into the bits
913 	       available in the number.  Round off any bits that will be
914 	       discarded as a result of denormalization.  Edge case is
915 	       the implicit bit shifted to GUARD0 and then rounded
916 	       up. */
917 	    int shift = NORMAL_EXPMIN - f->normal_exp;
918 	    if (shift + NR_GUARDS <= NR_FRAC_GUARD + 1
919 		&& !(denorm & sim_fpu_denorm_zero))
920 	      {
921 		status = do_normal_round (f, shift + NR_GUARDS, round);
922 		if (f->fraction == 0) /* Rounding underflowed.  */
923 		  {
924 		    status |= do_normal_underflow (f, is_double, round);
925 		  }
926 		else if (f->normal_exp < NORMAL_EXPMIN) /* still underflow? */
927 		  {
928 		    status |= sim_fpu_status_denorm;
929 		    /* Any loss of precision when denormalizing is
930 		       underflow. Some processors check for underflow
931 		       before rounding, some after! */
932 		    if (status & sim_fpu_status_inexact)
933 		      status |= sim_fpu_status_underflow;
934 		    /* Flag that resultant value has been denormalized.  */
935 		    f->class = sim_fpu_class_denorm;
936 		  }
937 		else if ((denorm & sim_fpu_denorm_underflow_inexact))
938 		  {
939 		    if ((status & sim_fpu_status_inexact))
940 		      status |= sim_fpu_status_underflow;
941 		  }
942 	      }
943 	    else
944 	      {
945 		status = do_normal_underflow (f, is_double, round);
946 	      }
947 	  }
948 	else if (f->normal_exp > NORMAL_EXPMAX)
949 	  {
950 	    /* Infinity */
951 	    status = do_normal_overflow (f, is_double, round);
952 	  }
953 	else
954 	  {
955 	    status = do_normal_round (f, NR_GUARDS, round);
956 	    if (f->fraction == 0)
957 	      /* f->class = sim_fpu_class_zero; */
958 	      status |= do_normal_underflow (f, is_double, round);
959 	    else if (f->normal_exp > NORMAL_EXPMAX)
960 	      /* Oops! rounding caused overflow.  */
961 	      status |= do_normal_overflow (f, is_double, round);
962 	  }
963 	ASSERT ((f->class == sim_fpu_class_number
964 		 || f->class == sim_fpu_class_denorm)
965 		<= (f->fraction < IMPLICIT_2 && f->fraction >= IMPLICIT_1));
966 	return status;
967       }
968     }
969   return 0;
970 }
971 
972 INLINE_SIM_FPU (int)
973 sim_fpu_round_32 (sim_fpu *f,
974 		  sim_fpu_round round,
975 		  sim_fpu_denorm denorm)
976 {
977   return do_round (f, 0, round, denorm);
978 }
979 
980 INLINE_SIM_FPU (int)
981 sim_fpu_round_64 (sim_fpu *f,
982 		  sim_fpu_round round,
983 		  sim_fpu_denorm denorm)
984 {
985   return do_round (f, 1, round, denorm);
986 }
987 
988 
989 
990 /* Arithmetic ops */
991 
992 INLINE_SIM_FPU (int)
993 sim_fpu_add (sim_fpu *f,
994 	     const sim_fpu *l,
995 	     const sim_fpu *r)
996 {
997   if (sim_fpu_is_snan (l))
998     {
999       *f = *l;
1000       f->class = sim_fpu_class_qnan;
1001       return sim_fpu_status_invalid_snan;
1002     }
1003   if (sim_fpu_is_snan (r))
1004     {
1005       *f = *r;
1006       f->class = sim_fpu_class_qnan;
1007       return sim_fpu_status_invalid_snan;
1008     }
1009   if (sim_fpu_is_qnan (l))
1010     {
1011       *f = *l;
1012       return 0;
1013     }
1014   if (sim_fpu_is_qnan (r))
1015     {
1016       *f = *r;
1017       return 0;
1018     }
1019   if (sim_fpu_is_infinity (l))
1020     {
1021       if (sim_fpu_is_infinity (r)
1022 	  && l->sign != r->sign)
1023 	{
1024 	  *f = sim_fpu_qnan;
1025 	  return sim_fpu_status_invalid_isi;
1026 	}
1027       *f = *l;
1028       return 0;
1029     }
1030   if (sim_fpu_is_infinity (r))
1031     {
1032       *f = *r;
1033       return 0;
1034     }
1035   if (sim_fpu_is_zero (l))
1036     {
1037       if (sim_fpu_is_zero (r))
1038 	{
1039 	  *f = sim_fpu_zero;
1040 	  f->sign = l->sign & r->sign;
1041 	}
1042       else
1043 	*f = *r;
1044       return 0;
1045     }
1046   if (sim_fpu_is_zero (r))
1047     {
1048       *f = *l;
1049       return 0;
1050     }
1051   {
1052     int status = 0;
1053     int shift = l->normal_exp - r->normal_exp;
1054     unsigned64 lfraction;
1055     unsigned64 rfraction;
1056     /* use exp of larger */
1057     if (shift >= NR_FRAC_GUARD)
1058       {
1059 	/* left has much bigger magnitude */
1060 	*f = *l;
1061 	return sim_fpu_status_inexact;
1062       }
1063     if (shift <= - NR_FRAC_GUARD)
1064       {
1065 	/* right has much bigger magnitude */
1066 	*f = *r;
1067 	return sim_fpu_status_inexact;
1068       }
1069     lfraction = l->fraction;
1070     rfraction = r->fraction;
1071     if (shift > 0)
1072       {
1073 	f->normal_exp = l->normal_exp;
1074 	if (rfraction & LSMASK64 (shift - 1, 0))
1075 	  {
1076 	    status |= sim_fpu_status_inexact;
1077 	    rfraction |= LSBIT64 (shift); /* Stick LSBit.  */
1078 	  }
1079 	rfraction >>= shift;
1080       }
1081     else if (shift < 0)
1082       {
1083 	f->normal_exp = r->normal_exp;
1084 	if (lfraction & LSMASK64 (- shift - 1, 0))
1085 	  {
1086 	    status |= sim_fpu_status_inexact;
1087 	    lfraction |= LSBIT64 (- shift); /* Stick LSBit.  */
1088 	  }
1089 	lfraction >>= -shift;
1090       }
1091     else
1092       {
1093 	f->normal_exp = r->normal_exp;
1094       }
1095 
1096     /* Perform the addition.  */
1097     if (l->sign)
1098       lfraction = - lfraction;
1099     if (r->sign)
1100       rfraction = - rfraction;
1101     f->fraction = lfraction + rfraction;
1102 
1103     /* zero? */
1104     if (f->fraction == 0)
1105       {
1106 	*f = sim_fpu_zero;
1107 	return 0;
1108       }
1109 
1110     /* sign? */
1111     f->class = sim_fpu_class_number;
1112     if (((signed64) f->fraction) >= 0)
1113       f->sign = 0;
1114     else
1115       {
1116 	f->sign = 1;
1117 	f->fraction = - f->fraction;
1118       }
1119 
1120     /* Normalize it.  */
1121     if ((f->fraction & IMPLICIT_2))
1122       {
1123 	f->fraction = (f->fraction >> 1) | (f->fraction & 1);
1124 	f->normal_exp ++;
1125       }
1126     else if (f->fraction < IMPLICIT_1)
1127       {
1128 	do
1129 	  {
1130 	    f->fraction <<= 1;
1131 	    f->normal_exp --;
1132 	  }
1133 	while (f->fraction < IMPLICIT_1);
1134       }
1135     ASSERT (f->fraction >= IMPLICIT_1 && f->fraction < IMPLICIT_2);
1136     return status;
1137   }
1138 }
1139 
1140 
1141 INLINE_SIM_FPU (int)
1142 sim_fpu_sub (sim_fpu *f,
1143 	     const sim_fpu *l,
1144 	     const sim_fpu *r)
1145 {
1146   if (sim_fpu_is_snan (l))
1147     {
1148       *f = *l;
1149       f->class = sim_fpu_class_qnan;
1150       return sim_fpu_status_invalid_snan;
1151     }
1152   if (sim_fpu_is_snan (r))
1153     {
1154       *f = *r;
1155       f->class = sim_fpu_class_qnan;
1156       return sim_fpu_status_invalid_snan;
1157     }
1158   if (sim_fpu_is_qnan (l))
1159     {
1160       *f = *l;
1161       return 0;
1162     }
1163   if (sim_fpu_is_qnan (r))
1164     {
1165       *f = *r;
1166       return 0;
1167     }
1168   if (sim_fpu_is_infinity (l))
1169     {
1170       if (sim_fpu_is_infinity (r)
1171 	  && l->sign == r->sign)
1172 	{
1173 	  *f = sim_fpu_qnan;
1174 	  return sim_fpu_status_invalid_isi;
1175 	}
1176       *f = *l;
1177       return 0;
1178     }
1179   if (sim_fpu_is_infinity (r))
1180     {
1181       *f = *r;
1182       f->sign = !r->sign;
1183       return 0;
1184     }
1185   if (sim_fpu_is_zero (l))
1186     {
1187       if (sim_fpu_is_zero (r))
1188 	{
1189 	  *f = sim_fpu_zero;
1190 	  f->sign = l->sign & !r->sign;
1191 	}
1192       else
1193 	{
1194 	  *f = *r;
1195 	  f->sign = !r->sign;
1196 	}
1197       return 0;
1198     }
1199   if (sim_fpu_is_zero (r))
1200     {
1201       *f = *l;
1202       return 0;
1203     }
1204   {
1205     int status = 0;
1206     int shift = l->normal_exp - r->normal_exp;
1207     unsigned64 lfraction;
1208     unsigned64 rfraction;
1209     /* use exp of larger */
1210     if (shift >= NR_FRAC_GUARD)
1211       {
1212 	/* left has much bigger magnitude */
1213 	*f = *l;
1214 	return sim_fpu_status_inexact;
1215       }
1216     if (shift <= - NR_FRAC_GUARD)
1217       {
1218 	/* right has much bigger magnitude */
1219 	*f = *r;
1220 	f->sign = !r->sign;
1221 	return sim_fpu_status_inexact;
1222       }
1223     lfraction = l->fraction;
1224     rfraction = r->fraction;
1225     if (shift > 0)
1226       {
1227 	f->normal_exp = l->normal_exp;
1228 	if (rfraction & LSMASK64 (shift - 1, 0))
1229 	  {
1230 	    status |= sim_fpu_status_inexact;
1231 	    rfraction |= LSBIT64 (shift); /* Stick LSBit.  */
1232 	  }
1233 	rfraction >>= shift;
1234       }
1235     else if (shift < 0)
1236       {
1237 	f->normal_exp = r->normal_exp;
1238 	if (lfraction & LSMASK64 (- shift - 1, 0))
1239 	  {
1240 	    status |= sim_fpu_status_inexact;
1241 	    lfraction |= LSBIT64 (- shift); /* Stick LSBit.  */
1242 	  }
1243 	lfraction >>= -shift;
1244       }
1245     else
1246       {
1247 	f->normal_exp = r->normal_exp;
1248       }
1249 
1250     /* Perform the subtraction.  */
1251     if (l->sign)
1252       lfraction = - lfraction;
1253     if (!r->sign)
1254       rfraction = - rfraction;
1255     f->fraction = lfraction + rfraction;
1256 
1257     /* zero? */
1258     if (f->fraction == 0)
1259       {
1260 	*f = sim_fpu_zero;
1261 	return 0;
1262       }
1263 
1264     /* sign? */
1265     f->class = sim_fpu_class_number;
1266     if (((signed64) f->fraction) >= 0)
1267       f->sign = 0;
1268     else
1269       {
1270 	f->sign = 1;
1271 	f->fraction = - f->fraction;
1272       }
1273 
1274     /* Normalize it.  */
1275     if ((f->fraction & IMPLICIT_2))
1276       {
1277 	f->fraction = (f->fraction >> 1) | (f->fraction & 1);
1278 	f->normal_exp ++;
1279       }
1280     else if (f->fraction < IMPLICIT_1)
1281       {
1282 	do
1283 	  {
1284 	    f->fraction <<= 1;
1285 	    f->normal_exp --;
1286 	  }
1287 	while (f->fraction < IMPLICIT_1);
1288       }
1289     ASSERT (f->fraction >= IMPLICIT_1 && f->fraction < IMPLICIT_2);
1290     return status;
1291   }
1292 }
1293 
1294 
1295 INLINE_SIM_FPU (int)
1296 sim_fpu_mul (sim_fpu *f,
1297 	     const sim_fpu *l,
1298 	     const sim_fpu *r)
1299 {
1300   if (sim_fpu_is_snan (l))
1301     {
1302       *f = *l;
1303       f->class = sim_fpu_class_qnan;
1304       return sim_fpu_status_invalid_snan;
1305     }
1306   if (sim_fpu_is_snan (r))
1307     {
1308       *f = *r;
1309       f->class = sim_fpu_class_qnan;
1310       return sim_fpu_status_invalid_snan;
1311     }
1312   if (sim_fpu_is_qnan (l))
1313     {
1314       *f = *l;
1315       return 0;
1316     }
1317   if (sim_fpu_is_qnan (r))
1318     {
1319       *f = *r;
1320       return 0;
1321     }
1322   if (sim_fpu_is_infinity (l))
1323     {
1324       if (sim_fpu_is_zero (r))
1325 	{
1326 	  *f = sim_fpu_qnan;
1327 	  return sim_fpu_status_invalid_imz;
1328 	}
1329       *f = *l;
1330       f->sign = l->sign ^ r->sign;
1331       return 0;
1332     }
1333   if (sim_fpu_is_infinity (r))
1334     {
1335       if (sim_fpu_is_zero (l))
1336 	{
1337 	  *f = sim_fpu_qnan;
1338 	  return sim_fpu_status_invalid_imz;
1339 	}
1340       *f = *r;
1341       f->sign = l->sign ^ r->sign;
1342       return 0;
1343     }
1344   if (sim_fpu_is_zero (l) || sim_fpu_is_zero (r))
1345     {
1346       *f = sim_fpu_zero;
1347       f->sign = l->sign ^ r->sign;
1348       return 0;
1349     }
1350   /* Calculate the mantissa by multiplying both 64bit numbers to get a
1351      128 bit number.  */
1352   {
1353     unsigned64 low;
1354     unsigned64 high;
1355     unsigned64 nl = l->fraction & 0xffffffff;
1356     unsigned64 nh = l->fraction >> 32;
1357     unsigned64 ml = r->fraction & 0xffffffff;
1358     unsigned64 mh = r->fraction >>32;
1359     unsigned64 pp_ll = ml * nl;
1360     unsigned64 pp_hl = mh * nl;
1361     unsigned64 pp_lh = ml * nh;
1362     unsigned64 pp_hh = mh * nh;
1363     unsigned64 res2 = 0;
1364     unsigned64 res0 = 0;
1365     unsigned64 ps_hh__ = pp_hl + pp_lh;
1366     if (ps_hh__ < pp_hl)
1367       res2 += UNSIGNED64 (0x100000000);
1368     pp_hl = (ps_hh__ << 32) & UNSIGNED64 (0xffffffff00000000);
1369     res0 = pp_ll + pp_hl;
1370     if (res0 < pp_ll)
1371       res2++;
1372     res2 += ((ps_hh__ >> 32) & 0xffffffff) + pp_hh;
1373     high = res2;
1374     low = res0;
1375 
1376     f->normal_exp = l->normal_exp + r->normal_exp;
1377     f->sign = l->sign ^ r->sign;
1378     f->class = sim_fpu_class_number;
1379 
1380     /* Input is bounded by [1,2)   ;   [2^60,2^61)
1381        Output is bounded by [1,4)  ;   [2^120,2^122) */
1382 
1383     /* Adjust the exponent according to where the decimal point ended
1384        up in the high 64 bit word.  In the source the decimal point
1385        was at NR_FRAC_GUARD. */
1386     f->normal_exp += NR_FRAC_GUARD + 64 - (NR_FRAC_GUARD * 2);
1387 
1388     /* The high word is bounded according to the above.  Consequently
1389        it has never overflowed into IMPLICIT_2. */
1390     ASSERT (high < LSBIT64 (((NR_FRAC_GUARD + 1) * 2) - 64));
1391     ASSERT (high >= LSBIT64 ((NR_FRAC_GUARD * 2) - 64));
1392     ASSERT (LSBIT64 (((NR_FRAC_GUARD + 1) * 2) - 64) < IMPLICIT_1);
1393 
1394     /* Normalize.  */
1395     do
1396       {
1397 	f->normal_exp--;
1398 	high <<= 1;
1399 	if (low & LSBIT64 (63))
1400 	  high |= 1;
1401 	low <<= 1;
1402       }
1403     while (high < IMPLICIT_1);
1404 
1405     ASSERT (high >= IMPLICIT_1 && high < IMPLICIT_2);
1406     if (low != 0)
1407       {
1408 	f->fraction = (high | 1); /* sticky */
1409 	return sim_fpu_status_inexact;
1410       }
1411     else
1412       {
1413 	f->fraction = high;
1414 	return 0;
1415       }
1416     return 0;
1417   }
1418 }
1419 
1420 INLINE_SIM_FPU (int)
1421 sim_fpu_div (sim_fpu *f,
1422 	     const sim_fpu *l,
1423 	     const sim_fpu *r)
1424 {
1425   if (sim_fpu_is_snan (l))
1426     {
1427       *f = *l;
1428       f->class = sim_fpu_class_qnan;
1429       return sim_fpu_status_invalid_snan;
1430     }
1431   if (sim_fpu_is_snan (r))
1432     {
1433       *f = *r;
1434       f->class = sim_fpu_class_qnan;
1435       return sim_fpu_status_invalid_snan;
1436     }
1437   if (sim_fpu_is_qnan (l))
1438     {
1439       *f = *l;
1440       f->class = sim_fpu_class_qnan;
1441       return 0;
1442     }
1443   if (sim_fpu_is_qnan (r))
1444     {
1445       *f = *r;
1446       f->class = sim_fpu_class_qnan;
1447       return 0;
1448     }
1449   if (sim_fpu_is_infinity (l))
1450     {
1451       if (sim_fpu_is_infinity (r))
1452 	{
1453 	  *f = sim_fpu_qnan;
1454 	  return sim_fpu_status_invalid_idi;
1455 	}
1456       else
1457 	{
1458 	  *f = *l;
1459 	  f->sign = l->sign ^ r->sign;
1460 	  return 0;
1461 	}
1462     }
1463   if (sim_fpu_is_zero (l))
1464     {
1465       if (sim_fpu_is_zero (r))
1466 	{
1467 	  *f = sim_fpu_qnan;
1468 	  return sim_fpu_status_invalid_zdz;
1469 	}
1470       else
1471 	{
1472 	  *f = *l;
1473 	  f->sign = l->sign ^ r->sign;
1474 	  return 0;
1475 	}
1476     }
1477   if (sim_fpu_is_infinity (r))
1478     {
1479       *f = sim_fpu_zero;
1480       f->sign = l->sign ^ r->sign;
1481       return 0;
1482     }
1483   if (sim_fpu_is_zero (r))
1484     {
1485       f->class = sim_fpu_class_infinity;
1486       f->sign = l->sign ^ r->sign;
1487       return sim_fpu_status_invalid_div0;
1488     }
1489 
1490   /* Calculate the mantissa by multiplying both 64bit numbers to get a
1491      128 bit number.  */
1492   {
1493     /* quotient =  ( ( numerator / denominator)
1494                       x 2^(numerator exponent -  denominator exponent)
1495      */
1496     unsigned64 numerator;
1497     unsigned64 denominator;
1498     unsigned64 quotient;
1499     unsigned64 bit;
1500 
1501     f->class = sim_fpu_class_number;
1502     f->sign = l->sign ^ r->sign;
1503     f->normal_exp = l->normal_exp - r->normal_exp;
1504 
1505     numerator = l->fraction;
1506     denominator = r->fraction;
1507 
1508     /* Fraction will be less than 1.0 */
1509     if (numerator < denominator)
1510       {
1511 	numerator <<= 1;
1512 	f->normal_exp--;
1513       }
1514     ASSERT (numerator >= denominator);
1515 
1516     /* Gain extra precision, already used one spare bit.  */
1517     numerator <<=    NR_SPARE;
1518     denominator <<=  NR_SPARE;
1519 
1520     /* Does divide one bit at a time.  Optimize???  */
1521     quotient = 0;
1522     bit = (IMPLICIT_1 << NR_SPARE);
1523     while (bit)
1524       {
1525 	if (numerator >= denominator)
1526 	  {
1527 	    quotient |= bit;
1528 	    numerator -= denominator;
1529 	  }
1530 	bit >>= 1;
1531 	numerator <<= 1;
1532       }
1533 
1534     /* Discard (but save) the extra bits.  */
1535     if ((quotient & LSMASK64 (NR_SPARE -1, 0)))
1536       quotient = (quotient >> NR_SPARE) | 1;
1537     else
1538       quotient = (quotient >> NR_SPARE);
1539 
1540     f->fraction = quotient;
1541     ASSERT (f->fraction >= IMPLICIT_1 && f->fraction < IMPLICIT_2);
1542     if (numerator != 0)
1543       {
1544 	f->fraction |= 1; /* Stick remaining bits.  */
1545 	return sim_fpu_status_inexact;
1546       }
1547     else
1548       return 0;
1549   }
1550 }
1551 
1552 
1553 INLINE_SIM_FPU (int)
1554 sim_fpu_rem (sim_fpu *f,
1555 	     const sim_fpu *l,
1556 	     const sim_fpu *r)
1557 {
1558   if (sim_fpu_is_snan (l))
1559     {
1560       *f = *l;
1561       f->class = sim_fpu_class_qnan;
1562       return sim_fpu_status_invalid_snan;
1563     }
1564   if (sim_fpu_is_snan (r))
1565     {
1566       *f = *r;
1567       f->class = sim_fpu_class_qnan;
1568       return sim_fpu_status_invalid_snan;
1569     }
1570   if (sim_fpu_is_qnan (l))
1571     {
1572       *f = *l;
1573       f->class = sim_fpu_class_qnan;
1574       return 0;
1575     }
1576   if (sim_fpu_is_qnan (r))
1577     {
1578       *f = *r;
1579       f->class = sim_fpu_class_qnan;
1580       return 0;
1581     }
1582   if (sim_fpu_is_infinity (l))
1583     {
1584       *f = sim_fpu_qnan;
1585       return sim_fpu_status_invalid_irx;
1586     }
1587   if (sim_fpu_is_zero (r))
1588     {
1589       *f = sim_fpu_qnan;
1590       return sim_fpu_status_invalid_div0;
1591     }
1592   if (sim_fpu_is_zero (l))
1593     {
1594       *f = *l;
1595       return 0;
1596     }
1597   if (sim_fpu_is_infinity (r))
1598     {
1599       *f = *l;
1600       return 0;
1601     }
1602   {
1603     sim_fpu n, tmp;
1604 
1605     /* Remainder is calculated as l-n*r, where n is l/r rounded to the
1606        nearest integer.  The variable n is rounded half even.  */
1607 
1608     sim_fpu_div (&n, l, r);
1609     sim_fpu_round_64 (&n, 0, 0);
1610 
1611     if (n.normal_exp < -1) /* If n looks like zero just return l.  */
1612       {
1613 	*f = *l;
1614 	return 0;
1615       }
1616     else if (n.class == sim_fpu_class_number
1617 	     && n.normal_exp <= (NR_FRAC_GUARD)) /* If not too large round.  */
1618       do_normal_round (&n, (NR_FRAC_GUARD) - n.normal_exp, sim_fpu_round_near);
1619 
1620     /* Mark 0's as zero so multiply can detect zero.  */
1621     if (n.fraction == 0)
1622       n.class = sim_fpu_class_zero;
1623 
1624     /* Calculate n*r.  */
1625     sim_fpu_mul (&tmp, &n, r);
1626     sim_fpu_round_64 (&tmp, 0, 0);
1627 
1628     /* Finally calculate l-n*r.  */
1629     sim_fpu_sub (f, l, &tmp);
1630 
1631     return 0;
1632   }
1633 }
1634 
1635 
1636 INLINE_SIM_FPU (int)
1637 sim_fpu_max (sim_fpu *f,
1638 	     const sim_fpu *l,
1639 	     const sim_fpu *r)
1640 {
1641   if (sim_fpu_is_snan (l))
1642     {
1643       *f = *l;
1644       f->class = sim_fpu_class_qnan;
1645       return sim_fpu_status_invalid_snan;
1646     }
1647   if (sim_fpu_is_snan (r))
1648     {
1649       *f = *r;
1650       f->class = sim_fpu_class_qnan;
1651       return sim_fpu_status_invalid_snan;
1652     }
1653   if (sim_fpu_is_qnan (l))
1654     {
1655       *f = *l;
1656       return 0;
1657     }
1658   if (sim_fpu_is_qnan (r))
1659     {
1660       *f = *r;
1661       return 0;
1662     }
1663   if (sim_fpu_is_infinity (l))
1664     {
1665       if (sim_fpu_is_infinity (r)
1666 	  && l->sign == r->sign)
1667 	{
1668 	  *f = sim_fpu_qnan;
1669 	  return sim_fpu_status_invalid_isi;
1670 	}
1671       if (l->sign)
1672 	*f = *r; /* -inf < anything */
1673       else
1674 	*f = *l; /* +inf > anything */
1675       return 0;
1676     }
1677   if (sim_fpu_is_infinity (r))
1678     {
1679       if (r->sign)
1680 	*f = *l; /* anything > -inf */
1681       else
1682 	*f = *r; /* anything < +inf */
1683       return 0;
1684     }
1685   if (l->sign > r->sign)
1686     {
1687       *f = *r; /* -ve < +ve */
1688       return 0;
1689     }
1690   if (l->sign < r->sign)
1691     {
1692       *f = *l; /* +ve > -ve */
1693       return 0;
1694     }
1695   ASSERT (l->sign == r->sign);
1696   if (l->normal_exp > r->normal_exp
1697       || (l->normal_exp == r->normal_exp
1698 	  && l->fraction > r->fraction))
1699     {
1700       /* |l| > |r| */
1701       if (l->sign)
1702 	*f = *r; /* -ve < -ve */
1703       else
1704 	*f = *l; /* +ve > +ve */
1705       return 0;
1706     }
1707   else
1708     {
1709       /* |l| <= |r| */
1710       if (l->sign)
1711 	*f = *l; /* -ve > -ve */
1712       else
1713 	*f = *r; /* +ve < +ve */
1714       return 0;
1715     }
1716 }
1717 
1718 
1719 INLINE_SIM_FPU (int)
1720 sim_fpu_min (sim_fpu *f,
1721 	     const sim_fpu *l,
1722 	     const sim_fpu *r)
1723 {
1724   if (sim_fpu_is_snan (l))
1725     {
1726       *f = *l;
1727       f->class = sim_fpu_class_qnan;
1728       return sim_fpu_status_invalid_snan;
1729     }
1730   if (sim_fpu_is_snan (r))
1731     {
1732       *f = *r;
1733       f->class = sim_fpu_class_qnan;
1734       return sim_fpu_status_invalid_snan;
1735     }
1736   if (sim_fpu_is_qnan (l))
1737     {
1738       *f = *l;
1739       return 0;
1740     }
1741   if (sim_fpu_is_qnan (r))
1742     {
1743       *f = *r;
1744       return 0;
1745     }
1746   if (sim_fpu_is_infinity (l))
1747     {
1748       if (sim_fpu_is_infinity (r)
1749 	  && l->sign == r->sign)
1750 	{
1751 	  *f = sim_fpu_qnan;
1752 	  return sim_fpu_status_invalid_isi;
1753 	}
1754       if (l->sign)
1755 	*f = *l; /* -inf < anything */
1756       else
1757 	*f = *r; /* +inf > anthing */
1758       return 0;
1759     }
1760   if (sim_fpu_is_infinity (r))
1761     {
1762       if (r->sign)
1763 	*f = *r; /* anything > -inf */
1764       else
1765 	*f = *l; /* anything < +inf */
1766       return 0;
1767     }
1768   if (l->sign > r->sign)
1769     {
1770       *f = *l; /* -ve < +ve */
1771       return 0;
1772     }
1773   if (l->sign < r->sign)
1774     {
1775       *f = *r; /* +ve > -ve */
1776       return 0;
1777     }
1778   ASSERT (l->sign == r->sign);
1779   if (l->normal_exp > r->normal_exp
1780       || (l->normal_exp == r->normal_exp
1781 	  && l->fraction > r->fraction))
1782     {
1783       /* |l| > |r| */
1784       if (l->sign)
1785 	*f = *l; /* -ve < -ve */
1786       else
1787 	*f = *r; /* +ve > +ve */
1788       return 0;
1789     }
1790   else
1791     {
1792       /* |l| <= |r| */
1793       if (l->sign)
1794 	*f = *r; /* -ve > -ve */
1795       else
1796 	*f = *l; /* +ve < +ve */
1797       return 0;
1798     }
1799 }
1800 
1801 
1802 INLINE_SIM_FPU (int)
1803 sim_fpu_neg (sim_fpu *f,
1804 	     const sim_fpu *r)
1805 {
1806   if (sim_fpu_is_snan (r))
1807     {
1808       *f = *r;
1809       f->class = sim_fpu_class_qnan;
1810       return sim_fpu_status_invalid_snan;
1811     }
1812   if (sim_fpu_is_qnan (r))
1813     {
1814       *f = *r;
1815       return 0;
1816     }
1817   *f = *r;
1818   f->sign = !r->sign;
1819   return 0;
1820 }
1821 
1822 
1823 INLINE_SIM_FPU (int)
1824 sim_fpu_abs (sim_fpu *f,
1825 	     const sim_fpu *r)
1826 {
1827   *f = *r;
1828   f->sign = 0;
1829   if (sim_fpu_is_snan (r))
1830     {
1831       f->class = sim_fpu_class_qnan;
1832       return sim_fpu_status_invalid_snan;
1833     }
1834   return 0;
1835 }
1836 
1837 
1838 INLINE_SIM_FPU (int)
1839 sim_fpu_inv (sim_fpu *f,
1840 	     const sim_fpu *r)
1841 {
1842   return sim_fpu_div (f, &sim_fpu_one, r);
1843 }
1844 
1845 
1846 INLINE_SIM_FPU (int)
1847 sim_fpu_sqrt (sim_fpu *f,
1848 	      const sim_fpu *r)
1849 {
1850   if (sim_fpu_is_snan (r))
1851     {
1852       *f = sim_fpu_qnan;
1853       return sim_fpu_status_invalid_snan;
1854     }
1855   if (sim_fpu_is_qnan (r))
1856     {
1857       *f = sim_fpu_qnan;
1858       return 0;
1859     }
1860   if (sim_fpu_is_zero (r))
1861     {
1862       f->class = sim_fpu_class_zero;
1863       f->sign = r->sign;
1864       f->normal_exp = 0;
1865       return 0;
1866     }
1867   if (sim_fpu_is_infinity (r))
1868     {
1869       if (r->sign)
1870 	{
1871 	  *f = sim_fpu_qnan;
1872 	  return sim_fpu_status_invalid_sqrt;
1873 	}
1874       else
1875 	{
1876 	  f->class = sim_fpu_class_infinity;
1877 	  f->sign = 0;
1878 	  f->sign = 0;
1879 	  return 0;
1880 	}
1881     }
1882   if (r->sign)
1883     {
1884       *f = sim_fpu_qnan;
1885       return sim_fpu_status_invalid_sqrt;
1886     }
1887 
1888   /* @(#)e_sqrt.c 5.1 93/09/24 */
1889   /*
1890    * ====================================================
1891    * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
1892    *
1893    * Developed at SunPro, a Sun Microsystems, Inc. business.
1894    * Permission to use, copy, modify, and distribute this
1895    * software is freely granted, provided that this notice
1896    * is preserved.
1897    * ====================================================
1898    */
1899 
1900   /* __ieee754_sqrt(x)
1901    * Return correctly rounded sqrt.
1902    *           ------------------------------------------
1903    *           |  Use the hardware sqrt if you have one |
1904    *           ------------------------------------------
1905    * Method:
1906    *   Bit by bit method using integer arithmetic. (Slow, but portable)
1907    *   1. Normalization
1908    *	Scale x to y in [1,4) with even powers of 2:
1909    *	find an integer k such that  1 <= (y=x*2^(2k)) < 4, then
1910    *		sqrt(x) = 2^k * sqrt(y)
1911    -
1912    - Since:
1913    -   sqrt ( x*2^(2m) )     = sqrt(x).2^m    ; m even
1914    -   sqrt ( x*2^(2m + 1) ) = sqrt(2.x).2^m  ; m odd
1915    - Define:
1916    -   y = ((m even) ? x : 2.x)
1917    - Then:
1918    -   y in [1, 4)                            ; [IMPLICIT_1,IMPLICIT_4)
1919    - And:
1920    -   sqrt (y) in [1, 2)                     ; [IMPLICIT_1,IMPLICIT_2)
1921    -
1922    *   2. Bit by bit computation
1923    *	Let q  = sqrt(y) truncated to i bit after binary point (q = 1),
1924    *	     i							 0
1925    *                                     i+1         2
1926    *	    s  = 2*q , and	y  =  2   * ( y - q  ).		(1)
1927    *	     i      i            i                 i
1928    *
1929    *	To compute q    from q , one checks whether
1930    *		    i+1       i
1931    *
1932    *			      -(i+1) 2
1933    *			(q + 2      ) <= y.			(2)
1934    *     			  i
1935    *							      -(i+1)
1936    *	If (2) is false, then q   = q ; otherwise q   = q  + 2      .
1937    *		 	       i+1   i             i+1   i
1938    *
1939    *	With some algebraic manipulation, it is not difficult to see
1940    *	that (2) is equivalent to
1941    *                             -(i+1)
1942    *			s  +  2       <= y			(3)
1943    *			 i                i
1944    *
1945    *	The advantage of (3) is that s  and y  can be computed by
1946    *				      i      i
1947    *	the following recurrence formula:
1948    *	    if (3) is false
1949    *
1950    *	    s     =  s  ,	y    = y   ;			(4)
1951    *	     i+1      i		 i+1    i
1952    *
1953    -
1954    -                      NOTE: y    = 2*y
1955    -                             i+1      i
1956    -
1957    *	    otherwise,
1958    *                       -i                      -(i+1)
1959    *	    s	  =  s  + 2  ,  y    = y  -  s  - 2  		(5)
1960    *         i+1      i          i+1    i     i
1961    *
1962    -
1963    -                                                   -(i+1)
1964    -                      NOTE: y    = 2 (y  -  s  -  2      )
1965    -                             i+1       i     i
1966    -
1967    *	One may easily use induction to prove (4) and (5).
1968    *	Note. Since the left hand side of (3) contain only i+2 bits,
1969    *	      it does not necessary to do a full (53-bit) comparison
1970    *	      in (3).
1971    *   3. Final rounding
1972    *	After generating the 53 bits result, we compute one more bit.
1973    *	Together with the remainder, we can decide whether the
1974    *	result is exact, bigger than 1/2ulp, or less than 1/2ulp
1975    *	(it will never equal to 1/2ulp).
1976    *	The rounding mode can be detected by checking whether
1977    *	huge + tiny is equal to huge, and whether huge - tiny is
1978    *	equal to huge for some floating point number "huge" and "tiny".
1979    *
1980    * Special cases:
1981    *	sqrt(+-0) = +-0 	... exact
1982    *	sqrt(inf) = inf
1983    *	sqrt(-ve) = NaN		... with invalid signal
1984    *	sqrt(NaN) = NaN		... with invalid signal for signalling NaN
1985    *
1986    * Other methods : see the appended file at the end of the program below.
1987    *---------------
1988    */
1989 
1990   {
1991     /* Generate sqrt(x) bit by bit.  */
1992     unsigned64 y;
1993     unsigned64 q;
1994     unsigned64 s;
1995     unsigned64 b;
1996 
1997     f->class = sim_fpu_class_number;
1998     f->sign = 0;
1999     y = r->fraction;
2000     f->normal_exp = (r->normal_exp >> 1);	/* exp = [exp/2] */
2001 
2002     /* Odd exp, double x to make it even.  */
2003     ASSERT (y >= IMPLICIT_1 && y < IMPLICIT_4);
2004     if ((r->normal_exp & 1))
2005       {
2006 	y += y;
2007       }
2008     ASSERT (y >= IMPLICIT_1 && y < (IMPLICIT_2 << 1));
2009 
2010     /* Let loop determine first value of s (either 1 or 2) */
2011     b = IMPLICIT_1;
2012     q = 0;
2013     s = 0;
2014 
2015     while (b)
2016       {
2017 	unsigned64 t = s + b;
2018 	if (t <= y)
2019 	  {
2020 	    s |= (b << 1);
2021 	    y -= t;
2022 	    q |= b;
2023 	  }
2024 	y <<= 1;
2025 	b >>= 1;
2026       }
2027 
2028     ASSERT (q >= IMPLICIT_1 && q < IMPLICIT_2);
2029     f->fraction = q;
2030     if (y != 0)
2031       {
2032 	f->fraction |= 1; /* Stick remaining bits.  */
2033 	return sim_fpu_status_inexact;
2034       }
2035     else
2036       return 0;
2037   }
2038 }
2039 
2040 
2041 /* int/long <-> sim_fpu */
2042 
2043 INLINE_SIM_FPU (int)
2044 sim_fpu_i32to (sim_fpu *f,
2045 	       signed32 i,
2046 	       sim_fpu_round round)
2047 {
2048   i2fpu (f, i, 0);
2049   return 0;
2050 }
2051 
2052 INLINE_SIM_FPU (int)
2053 sim_fpu_u32to (sim_fpu *f,
2054 	       unsigned32 u,
2055 	       sim_fpu_round round)
2056 {
2057   u2fpu (f, u, 0);
2058   return 0;
2059 }
2060 
2061 INLINE_SIM_FPU (int)
2062 sim_fpu_i64to (sim_fpu *f,
2063 	       signed64 i,
2064 	       sim_fpu_round round)
2065 {
2066   i2fpu (f, i, 1);
2067   return 0;
2068 }
2069 
2070 INLINE_SIM_FPU (int)
2071 sim_fpu_u64to (sim_fpu *f,
2072 	       unsigned64 u,
2073 	       sim_fpu_round round)
2074 {
2075   u2fpu (f, u, 1);
2076   return 0;
2077 }
2078 
2079 
2080 INLINE_SIM_FPU (int)
2081 sim_fpu_to32i (signed32 *i,
2082 	       const sim_fpu *f,
2083 	       sim_fpu_round round)
2084 {
2085   signed64 i64;
2086   int status = fpu2i (&i64, f, 0, round);
2087   *i = i64;
2088   return status;
2089 }
2090 
2091 INLINE_SIM_FPU (int)
2092 sim_fpu_to32u (unsigned32 *u,
2093 	       const sim_fpu *f,
2094 	       sim_fpu_round round)
2095 {
2096   unsigned64 u64;
2097   int status = fpu2u (&u64, f, 0);
2098   *u = u64;
2099   return status;
2100 }
2101 
2102 INLINE_SIM_FPU (int)
2103 sim_fpu_to64i (signed64 *i,
2104 	       const sim_fpu *f,
2105 	       sim_fpu_round round)
2106 {
2107   return fpu2i (i, f, 1, round);
2108 }
2109 
2110 
2111 INLINE_SIM_FPU (int)
2112 sim_fpu_to64u (unsigned64 *u,
2113 	       const sim_fpu *f,
2114 	       sim_fpu_round round)
2115 {
2116   return fpu2u (u, f, 1);
2117 }
2118 
2119 
2120 
2121 /* sim_fpu -> host format */
2122 
2123 #if 0
2124 INLINE_SIM_FPU (float)
2125 sim_fpu_2f (const sim_fpu *f)
2126 {
2127   return fval.d;
2128 }
2129 #endif
2130 
2131 
2132 INLINE_SIM_FPU (double)
2133 sim_fpu_2d (const sim_fpu *s)
2134 {
2135   sim_fpu_map val;
2136   if (sim_fpu_is_snan (s))
2137     {
2138       /* gag SNaN's */
2139       sim_fpu n = *s;
2140       n.class = sim_fpu_class_qnan;
2141       val.i = pack_fpu (&n, 1);
2142     }
2143   else
2144     {
2145       val.i = pack_fpu (s, 1);
2146     }
2147   return val.d;
2148 }
2149 
2150 
2151 #if 0
2152 INLINE_SIM_FPU (void)
2153 sim_fpu_f2 (sim_fpu *f,
2154 	    float s)
2155 {
2156   sim_fpu_map val;
2157   val.d = s;
2158   unpack_fpu (f, val.i, 1);
2159 }
2160 #endif
2161 
2162 
2163 INLINE_SIM_FPU (void)
2164 sim_fpu_d2 (sim_fpu *f,
2165 	    double d)
2166 {
2167   sim_fpu_map val;
2168   val.d = d;
2169   unpack_fpu (f, val.i, 1);
2170 }
2171 
2172 
2173 /* General */
2174 
2175 INLINE_SIM_FPU (int)
2176 sim_fpu_is_nan (const sim_fpu *d)
2177 {
2178   switch (d->class)
2179     {
2180     case sim_fpu_class_qnan:
2181     case sim_fpu_class_snan:
2182       return 1;
2183     default:
2184       return 0;
2185     }
2186 }
2187 
2188 INLINE_SIM_FPU (int)
2189 sim_fpu_is_qnan (const sim_fpu *d)
2190 {
2191   switch (d->class)
2192     {
2193     case sim_fpu_class_qnan:
2194       return 1;
2195     default:
2196       return 0;
2197     }
2198 }
2199 
2200 INLINE_SIM_FPU (int)
2201 sim_fpu_is_snan (const sim_fpu *d)
2202 {
2203   switch (d->class)
2204     {
2205     case sim_fpu_class_snan:
2206       return 1;
2207     default:
2208       return 0;
2209     }
2210 }
2211 
2212 INLINE_SIM_FPU (int)
2213 sim_fpu_is_zero (const sim_fpu *d)
2214 {
2215   switch (d->class)
2216     {
2217     case sim_fpu_class_zero:
2218       return 1;
2219     default:
2220       return 0;
2221     }
2222 }
2223 
2224 INLINE_SIM_FPU (int)
2225 sim_fpu_is_infinity (const sim_fpu *d)
2226 {
2227   switch (d->class)
2228     {
2229     case sim_fpu_class_infinity:
2230       return 1;
2231     default:
2232       return 0;
2233     }
2234 }
2235 
2236 INLINE_SIM_FPU (int)
2237 sim_fpu_is_number (const sim_fpu *d)
2238 {
2239   switch (d->class)
2240     {
2241     case sim_fpu_class_denorm:
2242     case sim_fpu_class_number:
2243       return 1;
2244     default:
2245       return 0;
2246     }
2247 }
2248 
2249 INLINE_SIM_FPU (int)
2250 sim_fpu_is_denorm (const sim_fpu *d)
2251 {
2252   switch (d->class)
2253     {
2254     case sim_fpu_class_denorm:
2255       return 1;
2256     default:
2257       return 0;
2258     }
2259 }
2260 
2261 
2262 INLINE_SIM_FPU (int)
2263 sim_fpu_sign (const sim_fpu *d)
2264 {
2265   return d->sign;
2266 }
2267 
2268 
2269 INLINE_SIM_FPU (int)
2270 sim_fpu_exp (const sim_fpu *d)
2271 {
2272   return d->normal_exp;
2273 }
2274 
2275 
2276 INLINE_SIM_FPU (unsigned64)
2277 sim_fpu_fraction (const sim_fpu *d)
2278 {
2279   return d->fraction;
2280 }
2281 
2282 
2283 INLINE_SIM_FPU (unsigned64)
2284 sim_fpu_guard (const sim_fpu *d, int is_double)
2285 {
2286   unsigned64 rv;
2287   unsigned64 guardmask = LSMASK64 (NR_GUARDS - 1, 0);
2288   rv = (d->fraction & guardmask) >> NR_PAD;
2289   return rv;
2290 }
2291 
2292 
2293 INLINE_SIM_FPU (int)
2294 sim_fpu_is (const sim_fpu *d)
2295 {
2296   switch (d->class)
2297     {
2298     case sim_fpu_class_qnan:
2299       return SIM_FPU_IS_QNAN;
2300     case sim_fpu_class_snan:
2301       return SIM_FPU_IS_SNAN;
2302     case sim_fpu_class_infinity:
2303       if (d->sign)
2304 	return SIM_FPU_IS_NINF;
2305       else
2306 	return SIM_FPU_IS_PINF;
2307     case sim_fpu_class_number:
2308       if (d->sign)
2309 	return SIM_FPU_IS_NNUMBER;
2310       else
2311 	return SIM_FPU_IS_PNUMBER;
2312     case sim_fpu_class_denorm:
2313       if (d->sign)
2314 	return SIM_FPU_IS_NDENORM;
2315       else
2316 	return SIM_FPU_IS_PDENORM;
2317     case sim_fpu_class_zero:
2318       if (d->sign)
2319 	return SIM_FPU_IS_NZERO;
2320       else
2321 	return SIM_FPU_IS_PZERO;
2322     default:
2323       return -1;
2324       abort ();
2325     }
2326 }
2327 
2328 INLINE_SIM_FPU (int)
2329 sim_fpu_cmp (const sim_fpu *l, const sim_fpu *r)
2330 {
2331   sim_fpu res;
2332   sim_fpu_sub (&res, l, r);
2333   return sim_fpu_is (&res);
2334 }
2335 
2336 INLINE_SIM_FPU (int)
2337 sim_fpu_is_lt (const sim_fpu *l, const sim_fpu *r)
2338 {
2339   int status;
2340   sim_fpu_lt (&status, l, r);
2341   return status;
2342 }
2343 
2344 INLINE_SIM_FPU (int)
2345 sim_fpu_is_le (const sim_fpu *l, const sim_fpu *r)
2346 {
2347   int is;
2348   sim_fpu_le (&is, l, r);
2349   return is;
2350 }
2351 
2352 INLINE_SIM_FPU (int)
2353 sim_fpu_is_eq (const sim_fpu *l, const sim_fpu *r)
2354 {
2355   int is;
2356   sim_fpu_eq (&is, l, r);
2357   return is;
2358 }
2359 
2360 INLINE_SIM_FPU (int)
2361 sim_fpu_is_ne (const sim_fpu *l, const sim_fpu *r)
2362 {
2363   int is;
2364   sim_fpu_ne (&is, l, r);
2365   return is;
2366 }
2367 
2368 INLINE_SIM_FPU (int)
2369 sim_fpu_is_ge (const sim_fpu *l, const sim_fpu *r)
2370 {
2371   int is;
2372   sim_fpu_ge (&is, l, r);
2373   return is;
2374 }
2375 
2376 INLINE_SIM_FPU (int)
2377 sim_fpu_is_gt (const sim_fpu *l, const sim_fpu *r)
2378 {
2379   int is;
2380   sim_fpu_gt (&is, l, r);
2381   return is;
2382 }
2383 
2384 
2385 /* Compare operators */
2386 
2387 INLINE_SIM_FPU (int)
2388 sim_fpu_lt (int *is,
2389 	    const sim_fpu *l,
2390 	    const sim_fpu *r)
2391 {
2392   if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2393     {
2394       sim_fpu_map lval;
2395       sim_fpu_map rval;
2396       lval.i = pack_fpu (l, 1);
2397       rval.i = pack_fpu (r, 1);
2398       (*is) = (lval.d < rval.d);
2399       return 0;
2400     }
2401   else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2402     {
2403       *is = 0;
2404       return sim_fpu_status_invalid_snan;
2405     }
2406   else
2407     {
2408       *is = 0;
2409       return sim_fpu_status_invalid_qnan;
2410     }
2411 }
2412 
2413 INLINE_SIM_FPU (int)
2414 sim_fpu_le (int *is,
2415 	    const sim_fpu *l,
2416 	    const sim_fpu *r)
2417 {
2418   if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2419     {
2420       sim_fpu_map lval;
2421       sim_fpu_map rval;
2422       lval.i = pack_fpu (l, 1);
2423       rval.i = pack_fpu (r, 1);
2424       *is = (lval.d <= rval.d);
2425       return 0;
2426     }
2427   else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2428     {
2429       *is = 0;
2430       return sim_fpu_status_invalid_snan;
2431     }
2432   else
2433     {
2434       *is = 0;
2435       return sim_fpu_status_invalid_qnan;
2436     }
2437 }
2438 
2439 INLINE_SIM_FPU (int)
2440 sim_fpu_eq (int *is,
2441 	    const sim_fpu *l,
2442 	    const sim_fpu *r)
2443 {
2444   if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2445     {
2446       sim_fpu_map lval;
2447       sim_fpu_map rval;
2448       lval.i = pack_fpu (l, 1);
2449       rval.i = pack_fpu (r, 1);
2450       (*is) = (lval.d == rval.d);
2451       return 0;
2452     }
2453   else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2454     {
2455       *is = 0;
2456       return sim_fpu_status_invalid_snan;
2457     }
2458   else
2459     {
2460       *is = 0;
2461       return sim_fpu_status_invalid_qnan;
2462     }
2463 }
2464 
2465 INLINE_SIM_FPU (int)
2466 sim_fpu_ne (int *is,
2467 	    const sim_fpu *l,
2468 	    const sim_fpu *r)
2469 {
2470   if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2471     {
2472       sim_fpu_map lval;
2473       sim_fpu_map rval;
2474       lval.i = pack_fpu (l, 1);
2475       rval.i = pack_fpu (r, 1);
2476       (*is) = (lval.d != rval.d);
2477       return 0;
2478     }
2479   else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2480     {
2481       *is = 0;
2482       return sim_fpu_status_invalid_snan;
2483     }
2484   else
2485     {
2486       *is = 0;
2487       return sim_fpu_status_invalid_qnan;
2488     }
2489 }
2490 
2491 INLINE_SIM_FPU (int)
2492 sim_fpu_ge (int *is,
2493 	    const sim_fpu *l,
2494 	    const sim_fpu *r)
2495 {
2496   return sim_fpu_le (is, r, l);
2497 }
2498 
2499 INLINE_SIM_FPU (int)
2500 sim_fpu_gt (int *is,
2501 	    const sim_fpu *l,
2502 	    const sim_fpu *r)
2503 {
2504   return sim_fpu_lt (is, r, l);
2505 }
2506 
2507 
2508 /* A number of useful constants */
2509 
2510 #if EXTERN_SIM_FPU_P
2511 const sim_fpu sim_fpu_zero = {
2512   sim_fpu_class_zero, 0, 0, 0
2513 };
2514 const sim_fpu sim_fpu_qnan = {
2515   sim_fpu_class_qnan, 0, 0, 0
2516 };
2517 const sim_fpu sim_fpu_one = {
2518   sim_fpu_class_number, 0, IMPLICIT_1, 0
2519 };
2520 const sim_fpu sim_fpu_two = {
2521   sim_fpu_class_number, 0, IMPLICIT_1, 1
2522 };
2523 const sim_fpu sim_fpu_max32 = {
2524   sim_fpu_class_number, 0, LSMASK64 (NR_FRAC_GUARD, NR_GUARDS32), NORMAL_EXPMAX32
2525 };
2526 const sim_fpu sim_fpu_max64 = {
2527   sim_fpu_class_number, 0, LSMASK64 (NR_FRAC_GUARD, NR_GUARDS64), NORMAL_EXPMAX64
2528 };
2529 #endif
2530 
2531 
2532 /* For debugging */
2533 
2534 INLINE_SIM_FPU (void)
2535 sim_fpu_print_fpu (const sim_fpu *f,
2536 		   sim_fpu_print_func *print,
2537 		   void *arg)
2538 {
2539   sim_fpu_printn_fpu (f, print, -1, arg);
2540 }
2541 
2542 INLINE_SIM_FPU (void)
2543 sim_fpu_printn_fpu (const sim_fpu *f,
2544 		   sim_fpu_print_func *print,
2545 		   int digits,
2546 		   void *arg)
2547 {
2548   print (arg, "%s", f->sign ? "-" : "+");
2549   switch (f->class)
2550     {
2551     case sim_fpu_class_qnan:
2552       print (arg, "0.");
2553       print_bits (f->fraction, NR_FRAC_GUARD - 1, digits, print, arg);
2554       print (arg, "*QuietNaN");
2555       break;
2556     case sim_fpu_class_snan:
2557       print (arg, "0.");
2558       print_bits (f->fraction, NR_FRAC_GUARD - 1, digits, print, arg);
2559       print (arg, "*SignalNaN");
2560       break;
2561     case sim_fpu_class_zero:
2562       print (arg, "0.0");
2563       break;
2564     case sim_fpu_class_infinity:
2565       print (arg, "INF");
2566       break;
2567     case sim_fpu_class_number:
2568     case sim_fpu_class_denorm:
2569       print (arg, "1.");
2570       print_bits (f->fraction, NR_FRAC_GUARD - 1, digits, print, arg);
2571       print (arg, "*2^%+d", f->normal_exp);
2572       ASSERT (f->fraction >= IMPLICIT_1);
2573       ASSERT (f->fraction < IMPLICIT_2);
2574     }
2575 }
2576 
2577 
2578 INLINE_SIM_FPU (void)
2579 sim_fpu_print_status (int status,
2580 		      sim_fpu_print_func *print,
2581 		      void *arg)
2582 {
2583   int i = 1;
2584   const char *prefix = "";
2585   while (status >= i)
2586     {
2587       switch ((sim_fpu_status) (status & i))
2588 	{
2589 	case sim_fpu_status_denorm:
2590 	  print (arg, "%sD", prefix);
2591 	  break;
2592 	case sim_fpu_status_invalid_snan:
2593 	  print (arg, "%sSNaN", prefix);
2594 	  break;
2595 	case sim_fpu_status_invalid_qnan:
2596 	  print (arg, "%sQNaN", prefix);
2597 	  break;
2598 	case sim_fpu_status_invalid_isi:
2599 	  print (arg, "%sISI", prefix);
2600 	  break;
2601 	case sim_fpu_status_invalid_idi:
2602 	  print (arg, "%sIDI", prefix);
2603 	  break;
2604 	case sim_fpu_status_invalid_zdz:
2605 	  print (arg, "%sZDZ", prefix);
2606 	  break;
2607 	case sim_fpu_status_invalid_imz:
2608 	  print (arg, "%sIMZ", prefix);
2609 	  break;
2610 	case sim_fpu_status_invalid_cvi:
2611 	  print (arg, "%sCVI", prefix);
2612 	  break;
2613 	case sim_fpu_status_invalid_cmp:
2614 	  print (arg, "%sCMP", prefix);
2615 	  break;
2616 	case sim_fpu_status_invalid_sqrt:
2617 	  print (arg, "%sSQRT", prefix);
2618 	  break;
2619 	case sim_fpu_status_invalid_irx:
2620 	  print (arg, "%sIRX", prefix);
2621 	  break;
2622 	case sim_fpu_status_inexact:
2623 	  print (arg, "%sX", prefix);
2624 	  break;
2625 	case sim_fpu_status_overflow:
2626 	  print (arg, "%sO", prefix);
2627 	  break;
2628 	case sim_fpu_status_underflow:
2629 	  print (arg, "%sU", prefix);
2630 	  break;
2631 	case sim_fpu_status_invalid_div0:
2632 	  print (arg, "%s/", prefix);
2633 	  break;
2634 	case sim_fpu_status_rounded:
2635 	  print (arg, "%sR", prefix);
2636 	  break;
2637 	}
2638       i <<= 1;
2639       prefix = ",";
2640     }
2641 }
2642 
2643 #endif
2644