xref: /netbsd/sys/arch/powerpc/fpu/fpu_implode.c (revision bf9ec67e)
1 /*	$NetBSD: fpu_implode.c,v 1.1 2001/06/13 06:01:47 simonb Exp $ */
2 
3 /*
4  * Copyright (c) 1992, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This software was developed by the Computer Systems Engineering group
8  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9  * contributed to Berkeley.
10  *
11  * All advertising materials mentioning features or use of this software
12  * must display the following acknowledgement:
13  *	This product includes software developed by the University of
14  *	California, Lawrence Berkeley Laboratory.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. All advertising materials mentioning features or use of this software
25  *    must display the following acknowledgement:
26  *	This product includes software developed by the University of
27  *	California, Berkeley and its contributors.
28  * 4. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  *
44  *	@(#)fpu_implode.c	8.1 (Berkeley) 6/11/93
45  */
46 
47 /*
48  * FPU subroutines: `implode' internal format numbers into the machine's
49  * `packed binary' format.
50  */
51 
52 #include <sys/types.h>
53 #include <sys/systm.h>
54 
55 #include <machine/ieee.h>
56 #include <powerpc/instr.h>
57 #include <machine/reg.h>
58 #include <machine/fpu.h>
59 
60 #include <powerpc/fpu/fpu_arith.h>
61 #include <powerpc/fpu/fpu_emu.h>
62 #include <powerpc/fpu/fpu_extern.h>
63 
64 static int round(struct fpemu *, struct fpn *);
65 static int toinf(struct fpemu *, int);
66 
67 /*
68  * Round a number (algorithm from Motorola MC68882 manual, modified for
69  * our internal format).  Set inexact exception if rounding is required.
70  * Return true iff we rounded up.
71  *
72  * After rounding, we discard the guard and round bits by shifting right
73  * 2 bits (a la fpu_shr(), but we do not bother with fp->fp_sticky).
74  * This saves effort later.
75  *
76  * Note that we may leave the value 2.0 in fp->fp_mant; it is the caller's
77  * responsibility to fix this if necessary.
78  */
79 static int
80 round(struct fpemu *fe, struct fpn *fp)
81 {
82 	u_int m0, m1, m2, m3;
83 	int gr, s;
84 	FPU_DECL_CARRY;
85 
86 	m0 = fp->fp_mant[0];
87 	m1 = fp->fp_mant[1];
88 	m2 = fp->fp_mant[2];
89 	m3 = fp->fp_mant[3];
90 	gr = m3 & 3;
91 	s = fp->fp_sticky;
92 
93 	/* mant >>= FP_NG */
94 	m3 = (m3 >> FP_NG) | (m2 << (32 - FP_NG));
95 	m2 = (m2 >> FP_NG) | (m1 << (32 - FP_NG));
96 	m1 = (m1 >> FP_NG) | (m0 << (32 - FP_NG));
97 	m0 >>= FP_NG;
98 
99 	if ((gr | s) == 0)	/* result is exact: no rounding needed */
100 		goto rounddown;
101 
102 	fe->fe_cx |= FPSCR_XX|FPSCR_FI;	/* inexact */
103 
104 	/* Go to rounddown to round down; break to round up. */
105 	switch ((fe->fe_fpscr) & FPSCR_RN) {
106 
107 	case FSR_RD_RN:
108 	default:
109 		/*
110 		 * Round only if guard is set (gr & 2).  If guard is set,
111 		 * but round & sticky both clear, then we want to round
112 		 * but have a tie, so round to even, i.e., add 1 iff odd.
113 		 */
114 		if ((gr & 2) == 0)
115 			goto rounddown;
116 		if ((gr & 1) || fp->fp_sticky || (m3 & 1))
117 			break;
118 		goto rounddown;
119 
120 	case FSR_RD_RZ:
121 		/* Round towards zero, i.e., down. */
122 		goto rounddown;
123 
124 	case FSR_RD_RM:
125 		/* Round towards -Inf: up if negative, down if positive. */
126 		if (fp->fp_sign)
127 			break;
128 		goto rounddown;
129 
130 	case FSR_RD_RP:
131 		/* Round towards +Inf: up if positive, down otherwise. */
132 		if (!fp->fp_sign)
133 			break;
134 		goto rounddown;
135 	}
136 
137 	/* Bump low bit of mantissa, with carry. */
138 	fe->fe_cx |= FPSCR_FR;
139 
140 	FPU_ADDS(m3, m3, 1);
141 	FPU_ADDCS(m2, m2, 0);
142 	FPU_ADDCS(m1, m1, 0);
143 	FPU_ADDC(m0, m0, 0);
144 	fp->fp_mant[0] = m0;
145 	fp->fp_mant[1] = m1;
146 	fp->fp_mant[2] = m2;
147 	fp->fp_mant[3] = m3;
148 	return (1);
149 
150 rounddown:
151 	fp->fp_mant[0] = m0;
152 	fp->fp_mant[1] = m1;
153 	fp->fp_mant[2] = m2;
154 	fp->fp_mant[3] = m3;
155 	return (0);
156 }
157 
158 /*
159  * For overflow: return true if overflow is to go to +/-Inf, according
160  * to the sign of the overflowing result.  If false, overflow is to go
161  * to the largest magnitude value instead.
162  */
163 static int
164 toinf(struct fpemu *fe, int sign)
165 {
166 	int inf;
167 
168 	/* look at rounding direction */
169 	switch ((fe->fe_fpscr) & FPSCR_RN) {
170 
171 	default:
172 	case FSR_RD_RN:		/* the nearest value is always Inf */
173 		inf = 1;
174 		break;
175 
176 	case FSR_RD_RZ:		/* toward 0 => never towards Inf */
177 		inf = 0;
178 		break;
179 
180 	case FSR_RD_RP:		/* toward +Inf iff positive */
181 		inf = sign == 0;
182 		break;
183 
184 	case FSR_RD_RM:		/* toward -Inf iff negative */
185 		inf = sign;
186 		break;
187 	}
188 	if (inf) fe->fe_cx |= FPSCR_OX;
189 	return (inf);
190 }
191 
192 /*
193  * fpn -> int (int value returned as return value).
194  *
195  * N.B.: this conversion always rounds towards zero (this is a peculiarity
196  * of the SPARC instruction set).
197  */
198 u_int
199 fpu_ftoi(struct fpemu *fe, struct fpn *fp)
200 {
201 	u_int i;
202 	int sign, exp;
203 
204 	sign = fp->fp_sign;
205 	switch (fp->fp_class) {
206 
207 	case FPC_ZERO:
208 		return (0);
209 
210 	case FPC_NUM:
211 		/*
212 		 * If exp >= 2^32, overflow.  Otherwise shift value right
213 		 * into last mantissa word (this will not exceed 0xffffffff),
214 		 * shifting any guard and round bits out into the sticky
215 		 * bit.  Then ``round'' towards zero, i.e., just set an
216 		 * inexact exception if sticky is set (see round()).
217 		 * If the result is > 0x80000000, or is positive and equals
218 		 * 0x80000000, overflow; otherwise the last fraction word
219 		 * is the result.
220 		 */
221 		if ((exp = fp->fp_exp) >= 32)
222 			break;
223 		/* NB: the following includes exp < 0 cases */
224 		if (fpu_shr(fp, FP_NMANT - 1 - exp) != 0)
225 			fe->fe_cx |= FPSCR_UX;
226 		i = fp->fp_mant[3];
227 		if (i >= ((u_int)0x80000000 + sign))
228 			break;
229 		return (sign ? -i : i);
230 
231 	default:		/* Inf, qNaN, sNaN */
232 		break;
233 	}
234 	/* overflow: replace any inexact exception with invalid */
235 	fe->fe_cx |= FPSCR_VXCVI;
236 	return (0x7fffffff + sign);
237 }
238 
239 /*
240  * fpn -> extended int (high bits of int value returned as return value).
241  *
242  * N.B.: this conversion always rounds towards zero (this is a peculiarity
243  * of the SPARC instruction set).
244  */
245 u_int
246 fpu_ftox(struct fpemu *fe, struct fpn *fp, u_int *res)
247 {
248 	u_int64_t i;
249 	int sign, exp;
250 
251 	sign = fp->fp_sign;
252 	switch (fp->fp_class) {
253 
254 	case FPC_ZERO:
255 		res[1] = 0;
256 		return (0);
257 
258 	case FPC_NUM:
259 		/*
260 		 * If exp >= 2^64, overflow.  Otherwise shift value right
261 		 * into last mantissa word (this will not exceed 0xffffffffffffffff),
262 		 * shifting any guard and round bits out into the sticky
263 		 * bit.  Then ``round'' towards zero, i.e., just set an
264 		 * inexact exception if sticky is set (see round()).
265 		 * If the result is > 0x8000000000000000, or is positive and equals
266 		 * 0x8000000000000000, overflow; otherwise the last fraction word
267 		 * is the result.
268 		 */
269 		if ((exp = fp->fp_exp) >= 64)
270 			break;
271 		/* NB: the following includes exp < 0 cases */
272 		if (fpu_shr(fp, FP_NMANT - 1 - exp) != 0)
273 			fe->fe_cx |= FPSCR_UX;
274 		i = ((u_int64_t)fp->fp_mant[2]<<32)|fp->fp_mant[3];
275 		if (i >= ((u_int64_t)0x8000000000000000LL + sign))
276 			break;
277 		return (sign ? -i : i);
278 
279 	default:		/* Inf, qNaN, sNaN */
280 		break;
281 	}
282 	/* overflow: replace any inexact exception with invalid */
283 	fe->fe_cx |= FPSCR_VXCVI;
284 	return (0x7fffffffffffffffLL + sign);
285 }
286 
287 /*
288  * fpn -> single (32 bit single returned as return value).
289  * We assume <= 29 bits in a single-precision fraction (1.f part).
290  */
291 u_int
292 fpu_ftos(struct fpemu *fe, struct fpn *fp)
293 {
294 	u_int sign = fp->fp_sign << 31;
295 	int exp;
296 
297 #define	SNG_EXP(e)	((e) << SNG_FRACBITS)	/* makes e an exponent */
298 #define	SNG_MASK	(SNG_EXP(1) - 1)	/* mask for fraction */
299 
300 	/* Take care of non-numbers first. */
301 	if (ISNAN(fp)) {
302 		/*
303 		 * Preserve upper bits of NaN, per SPARC V8 appendix N.
304 		 * Note that fp->fp_mant[0] has the quiet bit set,
305 		 * even if it is classified as a signalling NaN.
306 		 */
307 		(void) fpu_shr(fp, FP_NMANT - 1 - SNG_FRACBITS);
308 		exp = SNG_EXP_INFNAN;
309 		goto done;
310 	}
311 	if (ISINF(fp))
312 		return (sign | SNG_EXP(SNG_EXP_INFNAN));
313 	if (ISZERO(fp))
314 		return (sign);
315 
316 	/*
317 	 * Normals (including subnormals).  Drop all the fraction bits
318 	 * (including the explicit ``implied'' 1 bit) down into the
319 	 * single-precision range.  If the number is subnormal, move
320 	 * the ``implied'' 1 into the explicit range as well, and shift
321 	 * right to introduce leading zeroes.  Rounding then acts
322 	 * differently for normals and subnormals: the largest subnormal
323 	 * may round to the smallest normal (1.0 x 2^minexp), or may
324 	 * remain subnormal.  In the latter case, signal an underflow
325 	 * if the result was inexact or if underflow traps are enabled.
326 	 *
327 	 * Rounding a normal, on the other hand, always produces another
328 	 * normal (although either way the result might be too big for
329 	 * single precision, and cause an overflow).  If rounding a
330 	 * normal produces 2.0 in the fraction, we need not adjust that
331 	 * fraction at all, since both 1.0 and 2.0 are zero under the
332 	 * fraction mask.
333 	 *
334 	 * Note that the guard and round bits vanish from the number after
335 	 * rounding.
336 	 */
337 	if ((exp = fp->fp_exp + SNG_EXP_BIAS) <= 0) {	/* subnormal */
338 		/* -NG for g,r; -SNG_FRACBITS-exp for fraction */
339 		(void) fpu_shr(fp, FP_NMANT - FP_NG - SNG_FRACBITS - exp);
340 		if (round(fe, fp) && fp->fp_mant[3] == SNG_EXP(1))
341 			return (sign | SNG_EXP(1) | 0);
342 		if ((fe->fe_cx & FPSCR_FI) ||
343 		    (fe->fe_fpscr & FPSCR_UX))
344 			fe->fe_cx |= FPSCR_UX;
345 		return (sign | SNG_EXP(0) | fp->fp_mant[3]);
346 	}
347 	/* -FP_NG for g,r; -1 for implied 1; -SNG_FRACBITS for fraction */
348 	(void) fpu_shr(fp, FP_NMANT - FP_NG - 1 - SNG_FRACBITS);
349 #ifdef DIAGNOSTIC
350 	if ((fp->fp_mant[3] & SNG_EXP(1 << FP_NG)) == 0)
351 		panic("fpu_ftos");
352 #endif
353 	if (round(fe, fp) && fp->fp_mant[3] == SNG_EXP(2))
354 		exp++;
355 	if (exp >= SNG_EXP_INFNAN) {
356 		/* overflow to inf or to max single */
357 		if (toinf(fe, sign))
358 			return (sign | SNG_EXP(SNG_EXP_INFNAN));
359 		return (sign | SNG_EXP(SNG_EXP_INFNAN - 1) | SNG_MASK);
360 	}
361 done:
362 	/* phew, made it */
363 	return (sign | SNG_EXP(exp) | (fp->fp_mant[3] & SNG_MASK));
364 }
365 
366 /*
367  * fpn -> double (32 bit high-order result returned; 32-bit low order result
368  * left in res[1]).  Assumes <= 61 bits in double precision fraction.
369  *
370  * This code mimics fpu_ftos; see it for comments.
371  */
372 u_int
373 fpu_ftod(struct fpemu *fe, struct fpn *fp, u_int *res)
374 {
375 	u_int sign = fp->fp_sign << 31;
376 	int exp;
377 
378 #define	DBL_EXP(e)	((e) << (DBL_FRACBITS & 31))
379 #define	DBL_MASK	(DBL_EXP(1) - 1)
380 
381 	if (ISNAN(fp)) {
382 		(void) fpu_shr(fp, FP_NMANT - 1 - DBL_FRACBITS);
383 		exp = DBL_EXP_INFNAN;
384 		goto done;
385 	}
386 	if (ISINF(fp)) {
387 		sign |= DBL_EXP(DBL_EXP_INFNAN);
388 		goto zero;
389 	}
390 	if (ISZERO(fp)) {
391 zero:		res[1] = 0;
392 		return (sign);
393 	}
394 
395 	if ((exp = fp->fp_exp + DBL_EXP_BIAS) <= 0) {
396 		(void) fpu_shr(fp, FP_NMANT - FP_NG - DBL_FRACBITS - exp);
397 		if (round(fe, fp) && fp->fp_mant[2] == DBL_EXP(1)) {
398 			res[1] = 0;
399 			return (sign | DBL_EXP(1) | 0);
400 		}
401 		if ((fe->fe_cx & FPSCR_FI) ||
402 		    (fe->fe_fpscr & FPSCR_UX))
403 			fe->fe_cx |= FPSCR_UX;
404 		exp = 0;
405 		goto done;
406 	}
407 	(void) fpu_shr(fp, FP_NMANT - FP_NG - 1 - DBL_FRACBITS);
408 	if (round(fe, fp) && fp->fp_mant[2] == DBL_EXP(2))
409 		exp++;
410 	if (exp >= DBL_EXP_INFNAN) {
411 		fe->fe_cx |= FPSCR_OX | FPSCR_UX;
412 		if (toinf(fe, sign)) {
413 			res[1] = 0;
414 			return (sign | DBL_EXP(DBL_EXP_INFNAN) | 0);
415 		}
416 		res[1] = ~0;
417 		return (sign | DBL_EXP(DBL_EXP_INFNAN) | DBL_MASK);
418 	}
419 done:
420 	res[1] = fp->fp_mant[3];
421 	return (sign | DBL_EXP(exp) | (fp->fp_mant[2] & DBL_MASK));
422 }
423 
424 /*
425  * fpn -> extended (32 bit high-order result returned; low-order fraction
426  * words left in res[1]..res[3]).  Like ftod, which is like ftos ... but
427  * our internal format *is* extended precision, plus 2 bits for guard/round,
428  * so we can avoid a small bit of work.
429  */
430 u_int
431 fpu_ftoq(struct fpemu *fe, struct fpn *fp, u_int *res)
432 {
433 	u_int sign = fp->fp_sign << 31;
434 	int exp;
435 
436 #define	EXT_EXP(e)	((e) << (EXT_FRACBITS & 31))
437 #define	EXT_MASK	(EXT_EXP(1) - 1)
438 
439 	if (ISNAN(fp)) {
440 		(void) fpu_shr(fp, 2);	/* since we are not rounding */
441 		exp = EXT_EXP_INFNAN;
442 		goto done;
443 	}
444 	if (ISINF(fp)) {
445 		sign |= EXT_EXP(EXT_EXP_INFNAN);
446 		goto zero;
447 	}
448 	if (ISZERO(fp)) {
449 zero:		res[1] = res[2] = res[3] = 0;
450 		return (sign);
451 	}
452 
453 	if ((exp = fp->fp_exp + EXT_EXP_BIAS) <= 0) {
454 		(void) fpu_shr(fp, FP_NMANT - FP_NG - EXT_FRACBITS - exp);
455 		if (round(fe, fp) && fp->fp_mant[0] == EXT_EXP(1)) {
456 			res[1] = res[2] = res[3] = 0;
457 			return (sign | EXT_EXP(1) | 0);
458 		}
459 		if ((fe->fe_cx & FPSCR_FI) ||
460 		    (fe->fe_fpscr & FPSCR_UX))
461 			fe->fe_cx |= FPSCR_UX;
462 		exp = 0;
463 		goto done;
464 	}
465 	/* Since internal == extended, no need to shift here. */
466 	if (round(fe, fp) && fp->fp_mant[0] == EXT_EXP(2))
467 		exp++;
468 	if (exp >= EXT_EXP_INFNAN) {
469 		fe->fe_cx |= FPSCR_OX | FPSCR_UX;
470 		if (toinf(fe, sign)) {
471 			res[1] = res[2] = res[3] = 0;
472 			return (sign | EXT_EXP(EXT_EXP_INFNAN) | 0);
473 		}
474 		res[1] = res[2] = res[3] = ~0;
475 		return (sign | EXT_EXP(EXT_EXP_INFNAN) | EXT_MASK);
476 	}
477 done:
478 	res[1] = fp->fp_mant[1];
479 	res[2] = fp->fp_mant[2];
480 	res[3] = fp->fp_mant[3];
481 	return (sign | EXT_EXP(exp) | (fp->fp_mant[0] & EXT_MASK));
482 }
483 
484 /*
485  * Implode an fpn, writing the result into the given space.
486  */
487 void
488 fpu_implode(struct fpemu *fe, struct fpn *fp, int type, u_int *space)
489 {
490 
491 	switch (type) {
492 
493 	case FTYPE_LNG:
494 		space[0] = fpu_ftox(fe, fp, space);
495 		DPRINTF(FPE_REG, ("fpu_implode: long %x %x\n",
496 			space[0], space[1]));
497 		break;
498 
499 	case FTYPE_INT:
500 		space[0] = 0;
501 		space[1] = fpu_ftoi(fe, fp);
502 		DPRINTF(FPE_REG, ("fpu_implode: int %x\n",
503 			space[1]));
504 		break;
505 
506 	case FTYPE_SNG:
507 		space[0] = fpu_ftos(fe, fp);
508 		DPRINTF(FPE_REG, ("fpu_implode: single %x\n",
509 			space[0]));
510 		break;
511 
512 	case FTYPE_DBL:
513 		space[0] = fpu_ftod(fe, fp, space);
514 		DPRINTF(FPE_REG, ("fpu_implode: double %x %x\n",
515 			space[0], space[1]));
516 		break;		break;
517 
518 	case FTYPE_EXT:
519 		/* funky rounding precision options ?? */
520 		space[0] = fpu_ftoq(fe, fp, space);
521 		DPRINTF(FPE_REG, ("fpu_implode: long double %x %x %x %x\n",
522 			space[0], space[1], space[2], space[3]));
523 		break;		break;
524 
525 	default:
526 		panic("fpu_implode: invalid type %d", type);
527 	}
528 }
529