xref: /openbsd/sys/arch/sparc64/fpu/fpu_implode.c (revision 264ca280)
1 /*	$OpenBSD: fpu_implode.c,v 1.8 2010/05/09 19:55:43 kettenis Exp $	*/
2 /*	$NetBSD: fpu_implode.c,v 1.7 2000/08/03 18:32:08 eeh Exp $ */
3 
4 /*
5  * Copyright (c) 1992, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This software was developed by the Computer Systems Engineering group
9  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
10  * contributed to Berkeley.
11  *
12  * All advertising materials mentioning features or use of this software
13  * must display the following acknowledgement:
14  *	This product includes software developed by the University of
15  *	California, Lawrence Berkeley Laboratory.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions
19  * are met:
20  * 1. Redistributions of source code must retain the above copyright
21  *    notice, this list of conditions and the following disclaimer.
22  * 2. Redistributions in binary form must reproduce the above copyright
23  *    notice, this list of conditions and the following disclaimer in the
24  *    documentation and/or other materials provided with the distribution.
25  * 3. Neither the name of the University nor the names of its contributors
26  *    may be used to endorse or promote products derived from this software
27  *    without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  *	@(#)fpu_implode.c	8.1 (Berkeley) 6/11/93
42  */
43 
44 /*
45  * FPU subroutines: `implode' internal format numbers into the machine's
46  * `packed binary' format.
47  */
48 
49 #include <sys/types.h>
50 #include <sys/systm.h>
51 
52 #include <machine/ieee.h>
53 #include <machine/instr.h>
54 #include <machine/reg.h>
55 
56 #include <sparc64/fpu/fpu_arith.h>
57 #include <sparc64/fpu/fpu_emu.h>
58 #include <sparc64/fpu/fpu_extern.h>
59 
60 static int fpu_round(register struct fpemu *, register struct fpn *);
61 static int toinf(struct fpemu *, int);
62 
63 /*
64  * Round a number (algorithm from Motorola MC68882 manual, modified for
65  * our internal format).  Set inexact exception if rounding is required.
66  * Return true iff we rounded up.
67  *
68  * After rounding, we discard the guard and round bits by shifting right
69  * 2 bits (a la fpu_shr(), but we do not bother with fp->fp_sticky).
70  * This saves effort later.
71  *
72  * Note that we may leave the value 2.0 in fp->fp_mant; it is the caller's
73  * responsibility to fix this if necessary.
74  */
75 static int
76 fpu_round(register struct fpemu *fe, register struct fpn *fp)
77 {
78 	register u_int m0, m1, m2, m3;
79 	register int gr, s;
80 
81 	m0 = fp->fp_mant[0];
82 	m1 = fp->fp_mant[1];
83 	m2 = fp->fp_mant[2];
84 	m3 = fp->fp_mant[3];
85 	gr = m3 & 3;
86 	s = fp->fp_sticky;
87 
88 	/* mant >>= FP_NG */
89 	m3 = (m3 >> FP_NG) | (m2 << (32 - FP_NG));
90 	m2 = (m2 >> FP_NG) | (m1 << (32 - FP_NG));
91 	m1 = (m1 >> FP_NG) | (m0 << (32 - FP_NG));
92 	m0 >>= FP_NG;
93 
94 	if ((gr | s) == 0)	/* result is exact: no rounding needed */
95 		goto rounddown;
96 
97 	fe->fe_cx |= FSR_NX;	/* inexact */
98 
99 	/* Go to rounddown to round down; break to round up. */
100 	switch ((fe->fe_fsr >> FSR_RD_SHIFT) & FSR_RD_MASK) {
101 
102 	case FSR_RD_RN:
103 	default:
104 		/*
105 		 * Round only if guard is set (gr & 2).  If guard is set,
106 		 * but round & sticky both clear, then we want to round
107 		 * but have a tie, so round to even, i.e., add 1 iff odd.
108 		 */
109 		if ((gr & 2) == 0)
110 			goto rounddown;
111 		if ((gr & 1) || fp->fp_sticky || (m3 & 1))
112 			break;
113 		goto rounddown;
114 
115 	case FSR_RD_RZ:
116 		/* Round towards zero, i.e., down. */
117 		goto rounddown;
118 
119 	case FSR_RD_RM:
120 		/* Round towards -Inf: up if negative, down if positive. */
121 		if (fp->fp_sign)
122 			break;
123 		goto rounddown;
124 
125 	case FSR_RD_RP:
126 		/* Round towards +Inf: up if positive, down otherwise. */
127 		if (!fp->fp_sign)
128 			break;
129 		goto rounddown;
130 	}
131 
132 	/* Bump low bit of mantissa, with carry. */
133 	FPU_ADDS(m3, m3, 1);
134 	FPU_ADDCS(m2, m2, 0);
135 	FPU_ADDCS(m1, m1, 0);
136 	FPU_ADDC(m0, m0, 0);
137 	fp->fp_mant[0] = m0;
138 	fp->fp_mant[1] = m1;
139 	fp->fp_mant[2] = m2;
140 	fp->fp_mant[3] = m3;
141 	return (1);
142 
143 rounddown:
144 	fp->fp_mant[0] = m0;
145 	fp->fp_mant[1] = m1;
146 	fp->fp_mant[2] = m2;
147 	fp->fp_mant[3] = m3;
148 	return (0);
149 }
150 
151 /*
152  * For overflow: return true if overflow is to go to +/-Inf, according
153  * to the sign of the overflowing result.  If false, overflow is to go
154  * to the largest magnitude value instead.
155  */
156 static int
157 toinf(struct fpemu *fe, int sign)
158 {
159 	int inf;
160 
161 	/* look at rounding direction */
162 	switch ((fe->fe_fsr >> FSR_RD_SHIFT) & FSR_RD_MASK) {
163 
164 	default:
165 	case FSR_RD_RN:		/* the nearest value is always Inf */
166 		inf = 1;
167 		break;
168 
169 	case FSR_RD_RZ:		/* toward 0 => never towards Inf */
170 		inf = 0;
171 		break;
172 
173 	case FSR_RD_RP:		/* toward +Inf iff positive */
174 		inf = sign == 0;
175 		break;
176 
177 	case FSR_RD_RM:		/* toward -Inf iff negative */
178 		inf = sign;
179 		break;
180 	}
181 	return (inf);
182 }
183 
184 /*
185  * fpn -> int (int value returned as return value).
186  *
187  * N.B.: this conversion always rounds towards zero (this is a peculiarity
188  * of the SPARC instruction set).
189  */
190 u_int
191 fpu_ftoi(fe, fp)
192 	struct fpemu *fe;
193 	register struct fpn *fp;
194 {
195 	register u_int i;
196 	register int sign, exp;
197 
198 	sign = fp->fp_sign;
199 	switch (fp->fp_class) {
200 
201 	case FPC_ZERO:
202 		return (0);
203 
204 	case FPC_NUM:
205 		/*
206 		 * If exp >= 2^32, overflow.  Otherwise shift value right
207 		 * into last mantissa word (this will not exceed 0xffffffff),
208 		 * shifting any guard and round bits out into the sticky
209 		 * bit.  Then ``round'' towards zero, i.e., just set an
210 		 * inexact exception if sticky is set (see fpu_round()).
211 		 * If the result is > 0x80000000, or is positive and equals
212 		 * 0x80000000, overflow; otherwise the last fraction word
213 		 * is the result.
214 		 */
215 		if ((exp = fp->fp_exp) >= 32)
216 			break;
217 		/* NB: the following includes exp < 0 cases */
218 		if (fpu_shr(fp, FP_NMANT - 1 - exp) != 0)
219 			fe->fe_cx |= FSR_NX;
220 		i = fp->fp_mant[3];
221 		if (i >= ((u_int)0x80000000 + sign))
222 			break;
223 		return (sign ? -i : i);
224 
225 	default:		/* Inf, qNaN, sNaN */
226 		break;
227 	}
228 	/* overflow: replace any inexact exception with invalid */
229 	fe->fe_cx = (fe->fe_cx & ~FSR_NX) | FSR_NV;
230 	return (0x7fffffff + sign);
231 }
232 
233 /*
234  * fpn -> extended int (high bits of int value returned as return value).
235  *
236  * N.B.: this conversion always rounds towards zero (this is a peculiarity
237  * of the SPARC instruction set).
238  */
239 u_int
240 fpu_ftox(fe, fp, res)
241 	struct fpemu *fe;
242 	register struct fpn *fp;
243 	u_int *res;
244 {
245 	register u_int64_t i;
246 	register int sign, exp;
247 
248 	sign = fp->fp_sign;
249 	switch (fp->fp_class) {
250 
251 	case FPC_ZERO:
252 		i = 0;
253 		goto out;
254 
255 	case FPC_NUM:
256 		/*
257 		 * If exp >= 2^64, overflow.  Otherwise shift value right
258 		 * into last mantissa word (this will not exceed 0xffffffffffffffff),
259 		 * shifting any guard and round bits out into the sticky
260 		 * bit.  Then ``round'' towards zero, i.e., just set an
261 		 * inexact exception if sticky is set (see fpu_round()).
262 		 * If the result is > 0x8000000000000000, or is positive and equals
263 		 * 0x8000000000000000, overflow; otherwise the last fraction word
264 		 * is the result.
265 		 */
266 		if ((exp = fp->fp_exp) >= 64)
267 			break;
268 		/* NB: the following includes exp < 0 cases */
269 		if (fpu_shr(fp, FP_NMANT - 1 - exp) != 0)
270 			fe->fe_cx |= FSR_NX;
271 		i = ((u_int64_t)fp->fp_mant[2]<<32)|fp->fp_mant[3];
272 		if (i >= ((u_int64_t)0x8000000000000000LL + sign))
273 			break;
274 		if (sign)
275 			i = -i;
276 		goto out;
277 
278 	default:		/* Inf, qNaN, sNaN */
279 		break;
280 	}
281 	/* overflow: replace any inexact exception with invalid */
282 	fe->fe_cx = (fe->fe_cx & ~FSR_NX) | FSR_NV;
283 	i = 0x7fffffffffffffffLL + sign;
284 out:
285 	res[1] = i & 0xffffffff;
286 	return (i >> 32);
287 }
288 
289 /*
290  * fpn -> single (32 bit single returned as return value).
291  * We assume <= 29 bits in a single-precision fraction (1.f part).
292  */
293 u_int
294 fpu_ftos(fe, fp)
295 	struct fpemu *fe;
296 	register struct fpn *fp;
297 {
298 	register u_int sign = fp->fp_sign << 31;
299 	register int exp;
300 
301 #define	SNG_EXP(e)	((e) << SNG_FRACBITS)	/* makes e an exponent */
302 #define	SNG_MASK	(SNG_EXP(1) - 1)	/* mask for fraction */
303 
304 	/* Take care of non-numbers first. */
305 	if (ISNAN(fp)) {
306 		/*
307 		 * Preserve upper bits of NaN, per SPARC V8 appendix N.
308 		 * Note that fp->fp_mant[0] has the quiet bit set,
309 		 * even if it is classified as a signalling NaN.
310 		 */
311 		(void) fpu_shr(fp, FP_NMANT - 1 - SNG_FRACBITS);
312 		exp = SNG_EXP_INFNAN;
313 		goto done;
314 	}
315 	if (ISINF(fp))
316 		return (sign | SNG_EXP(SNG_EXP_INFNAN));
317 	if (ISZERO(fp))
318 		return (sign);
319 
320 	/*
321 	 * Normals (including subnormals).  Drop all the fraction bits
322 	 * (including the explicit ``implied'' 1 bit) down into the
323 	 * single-precision range.  If the number is subnormal, move
324 	 * the ``implied'' 1 into the explicit range as well, and shift
325 	 * right to introduce leading zeroes.  Rounding then acts
326 	 * differently for normals and subnormals: the largest subnormal
327 	 * may round to the smallest normal (1.0 x 2^minexp), or may
328 	 * remain subnormal.  In the latter case, signal an underflow
329 	 * if the result was inexact or if underflow traps are enabled.
330 	 *
331 	 * Rounding a normal, on the other hand, always produces another
332 	 * normal (although either way the result might be too big for
333 	 * single precision, and cause an overflow).  If rounding a
334 	 * normal produces 2.0 in the fraction, we need not adjust that
335 	 * fraction at all, since both 1.0 and 2.0 are zero under the
336 	 * fraction mask.
337 	 *
338 	 * Note that the guard and round bits vanish from the number after
339 	 * rounding.
340 	 */
341 	if ((exp = fp->fp_exp + SNG_EXP_BIAS) <= 0) {	/* subnormal */
342 		/* -NG for g,r; -SNG_FRACBITS-exp for fraction */
343 		(void) fpu_shr(fp, FP_NMANT - FP_NG - SNG_FRACBITS - exp);
344 		if (fpu_round(fe, fp) && fp->fp_mant[3] == SNG_EXP(1))
345 			return (sign | SNG_EXP(1) | 0);
346 		if ((fe->fe_cx & FSR_NX) ||
347 		    (fe->fe_fsr & (FSR_UF << FSR_TEM_SHIFT)))
348 			fe->fe_cx |= FSR_UF;
349 		return (sign | SNG_EXP(0) | fp->fp_mant[3]);
350 	}
351 	/* -FP_NG for g,r; -1 for implied 1; -SNG_FRACBITS for fraction */
352 	(void) fpu_shr(fp, FP_NMANT - FP_NG - 1 - SNG_FRACBITS);
353 #ifdef DIAGNOSTIC
354 	if ((fp->fp_mant[3] & SNG_EXP(1 << FP_NG)) == 0)
355 		panic("fpu_ftos");
356 #endif
357 	if (fpu_round(fe, fp) && fp->fp_mant[3] == SNG_EXP(2))
358 		exp++;
359 	if (exp >= SNG_EXP_INFNAN) {
360 		/* overflow to inf or to max single */
361 		fe->fe_cx |= FSR_OF | FSR_NX;
362 		if (toinf(fe, sign))
363 			return (sign | SNG_EXP(SNG_EXP_INFNAN));
364 		return (sign | SNG_EXP(SNG_EXP_INFNAN - 1) | SNG_MASK);
365 	}
366 done:
367 	/* phew, made it */
368 	return (sign | SNG_EXP(exp) | (fp->fp_mant[3] & SNG_MASK));
369 }
370 
371 /*
372  * fpn -> double (32 bit high-order result returned; 32-bit low order result
373  * left in res[1]).  Assumes <= 61 bits in double precision fraction.
374  *
375  * This code mimics fpu_ftos; see it for comments.
376  */
377 u_int
378 fpu_ftod(fe, fp, res)
379 	struct fpemu *fe;
380 	register struct fpn *fp;
381 	u_int *res;
382 {
383 	register u_int sign = fp->fp_sign << 31;
384 	register int exp;
385 
386 #define	DBL_EXP(e)	((e) << (DBL_FRACBITS & 31))
387 #define	DBL_MASK	(DBL_EXP(1) - 1)
388 
389 	if (ISNAN(fp)) {
390 		(void) fpu_shr(fp, FP_NMANT - 1 - DBL_FRACBITS);
391 		exp = DBL_EXP_INFNAN;
392 		goto done;
393 	}
394 	if (ISINF(fp)) {
395 		sign |= DBL_EXP(DBL_EXP_INFNAN);
396 		goto zero;
397 	}
398 	if (ISZERO(fp)) {
399 zero:		res[1] = 0;
400 		return (sign);
401 	}
402 
403 	if ((exp = fp->fp_exp + DBL_EXP_BIAS) <= 0) {
404 		(void) fpu_shr(fp, FP_NMANT - FP_NG - DBL_FRACBITS - exp);
405 		if (fpu_round(fe, fp) && fp->fp_mant[2] == DBL_EXP(1)) {
406 			res[1] = 0;
407 			return (sign | DBL_EXP(1) | 0);
408 		}
409 		if ((fe->fe_cx & FSR_NX) ||
410 		    (fe->fe_fsr & (FSR_UF << FSR_TEM_SHIFT)))
411 			fe->fe_cx |= FSR_UF;
412 		exp = 0;
413 		goto done;
414 	}
415 	(void) fpu_shr(fp, FP_NMANT - FP_NG - 1 - DBL_FRACBITS);
416 	if (fpu_round(fe, fp) && fp->fp_mant[2] == DBL_EXP(2))
417 		exp++;
418 	if (exp >= DBL_EXP_INFNAN) {
419 		fe->fe_cx |= FSR_OF | FSR_NX;
420 		if (toinf(fe, sign)) {
421 			res[1] = 0;
422 			return (sign | DBL_EXP(DBL_EXP_INFNAN) | 0);
423 		}
424 		res[1] = ~0;
425 		return (sign | DBL_EXP(DBL_EXP_INFNAN) | DBL_MASK);
426 	}
427 done:
428 	res[1] = fp->fp_mant[3];
429 	return (sign | DBL_EXP(exp) | (fp->fp_mant[2] & DBL_MASK));
430 }
431 
432 /*
433  * fpn -> extended (32 bit high-order result returned; low-order fraction
434  * words left in res[1]..res[3]).  Like ftod, which is like ftos ... but
435  * our internal format *is* extended precision, plus 2 bits for guard/round,
436  * so we can avoid a small bit of work.
437  */
438 u_int
439 fpu_ftoq(fe, fp, res)
440 	struct fpemu *fe;
441 	register struct fpn *fp;
442 	u_int *res;
443 {
444 	register u_int sign = fp->fp_sign << 31;
445 	register int exp;
446 
447 #define	EXT_EXP(e)	((e) << (EXT_FRACBITS & 31))
448 #define	EXT_MASK	(EXT_EXP(1) - 1)
449 
450 	if (ISNAN(fp)) {
451 		(void) fpu_shr(fp, 2);	/* since we are not rounding */
452 		exp = EXT_EXP_INFNAN;
453 		goto done;
454 	}
455 	if (ISINF(fp)) {
456 		sign |= EXT_EXP(EXT_EXP_INFNAN);
457 		goto zero;
458 	}
459 	if (ISZERO(fp)) {
460 zero:		res[1] = res[2] = res[3] = 0;
461 		return (sign);
462 	}
463 
464 	if ((exp = fp->fp_exp + EXT_EXP_BIAS) <= 0) {
465 		(void) fpu_shr(fp, FP_NMANT - FP_NG - EXT_FRACBITS - exp);
466 		if (fpu_round(fe, fp) && fp->fp_mant[0] == EXT_EXP(1)) {
467 			res[1] = res[2] = res[3] = 0;
468 			return (sign | EXT_EXP(1) | 0);
469 		}
470 		if ((fe->fe_cx & FSR_NX) ||
471 		    (fe->fe_fsr & (FSR_UF << FSR_TEM_SHIFT)))
472 			fe->fe_cx |= FSR_UF;
473 		exp = 0;
474 		goto done;
475 	}
476 	/* Since internal == extended, no need to shift here. */
477 	if (fpu_round(fe, fp) && fp->fp_mant[0] == EXT_EXP(2))
478 		exp++;
479 	if (exp >= EXT_EXP_INFNAN) {
480 		fe->fe_cx |= FSR_OF | FSR_NX;
481 		if (toinf(fe, sign)) {
482 			res[1] = res[2] = res[3] = 0;
483 			return (sign | EXT_EXP(EXT_EXP_INFNAN) | 0);
484 		}
485 		res[1] = res[2] = res[3] = ~0;
486 		return (sign | EXT_EXP(EXT_EXP_INFNAN) | EXT_MASK);
487 	}
488 done:
489 	res[1] = fp->fp_mant[1];
490 	res[2] = fp->fp_mant[2];
491 	res[3] = fp->fp_mant[3];
492 	return (sign | EXT_EXP(exp) | (fp->fp_mant[0] & EXT_MASK));
493 }
494 
495 /*
496  * Implode an fpn, writing the result into the given space.
497  */
498 void
499 fpu_implode(fe, fp, type, space)
500 	struct fpemu *fe;
501 	register struct fpn *fp;
502 	int type;
503 	register u_int *space;
504 {
505 	DPRINTF(FPE_INSN, ("fpu_implode: "));
506 	switch (type) {
507 
508 	case FTYPE_LNG:
509 		space[0] = fpu_ftox(fe, fp, space);
510 		DPRINTF(FPE_INSN, ("LNG %x %x\n", space[0], space[1]));
511 		break;
512 
513 	case FTYPE_INT:
514 		space[0] = fpu_ftoi(fe, fp);
515 		DPRINTF(FPE_INSN, ("INT %x\n", space[0]));
516 		break;
517 
518 	case FTYPE_SNG:
519 		space[0] = fpu_ftos(fe, fp);
520 		DPRINTF(FPE_INSN, ("SNG %x\n", space[0]));
521 		break;
522 
523 	case FTYPE_DBL:
524 		space[0] = fpu_ftod(fe, fp, space);
525 		DPRINTF(FPE_INSN, ("DBL %x %x\n", space[0], space[1]));
526 		break;
527 
528 	case FTYPE_EXT:
529 		/* funky rounding precision options ?? */
530 		space[0] = fpu_ftoq(fe, fp, space);
531 		DPRINTF(FPE_INSN, ("EXT %x %x %x %x\n", space[0], space[1],
532 		    space[2], space[3]));
533 		break;
534 
535 	default:
536 		panic("fpu_implode");
537 	}
538 }
539