xref: /netbsd/sys/arch/sparc/fpu/fpu_subr.c (revision bf9ec67e)
1 /*	$NetBSD: fpu_subr.c,v 1.3 1996/03/14 19:42:01 christos Exp $ */
2 
3 /*
4  * Copyright (c) 1992, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This software was developed by the Computer Systems Engineering group
8  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9  * contributed to Berkeley.
10  *
11  * All advertising materials mentioning features or use of this software
12  * must display the following acknowledgement:
13  *	This product includes software developed by the University of
14  *	California, Lawrence Berkeley Laboratory.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. All advertising materials mentioning features or use of this software
25  *    must display the following acknowledgement:
26  *	This product includes software developed by the University of
27  *	California, Berkeley and its contributors.
28  * 4. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  *
44  *	@(#)fpu_subr.c	8.1 (Berkeley) 6/11/93
45  */
46 
47 /*
48  * FPU subroutines.
49  */
50 
51 #include <sys/types.h>
52 #ifdef DIAGNOSTIC
53 #include <sys/systm.h>
54 #endif
55 
56 #include <machine/reg.h>
57 #include <machine/instr.h>
58 
59 #include <sparc/fpu/fpu_arith.h>
60 #include <sparc/fpu/fpu_emu.h>
61 #include <sparc/fpu/fpu_extern.h>
62 
63 /*
64  * Shift the given number right rsh bits.  Any bits that `fall off' will get
65  * shoved into the sticky field; we return the resulting sticky.  Note that
66  * shifting NaNs is legal (this will never shift all bits out); a NaN's
67  * sticky field is ignored anyway.
68  */
69 int
70 fpu_shr(register struct fpn *fp, register int rsh)
71 {
72 	register u_int m0, m1, m2, m3, s;
73 	register int lsh;
74 
75 #ifdef DIAGNOSTIC
76 	if (rsh <= 0 || (fp->fp_class != FPC_NUM && !ISNAN(fp)))
77 		panic("fpu_rightshift 1");
78 #endif
79 
80 	m0 = fp->fp_mant[0];
81 	m1 = fp->fp_mant[1];
82 	m2 = fp->fp_mant[2];
83 	m3 = fp->fp_mant[3];
84 
85 	/* If shifting all the bits out, take a shortcut. */
86 	if (rsh >= FP_NMANT) {
87 #ifdef DIAGNOSTIC
88 		if ((m0 | m1 | m2 | m3) == 0)
89 			panic("fpu_rightshift 2");
90 #endif
91 		fp->fp_mant[0] = 0;
92 		fp->fp_mant[1] = 0;
93 		fp->fp_mant[2] = 0;
94 		fp->fp_mant[3] = 0;
95 #ifdef notdef
96 		if ((m0 | m1 | m2 | m3) == 0)
97 			fp->fp_class = FPC_ZERO;
98 		else
99 #endif
100 			fp->fp_sticky = 1;
101 		return (1);
102 	}
103 
104 	/* Squish out full words. */
105 	s = fp->fp_sticky;
106 	if (rsh >= 32 * 3) {
107 		s |= m3 | m2 | m1;
108 		m3 = m0, m2 = 0, m1 = 0, m0 = 0;
109 	} else if (rsh >= 32 * 2) {
110 		s |= m3 | m2;
111 		m3 = m1, m2 = m0, m1 = 0, m0 = 0;
112 	} else if (rsh >= 32) {
113 		s |= m3;
114 		m3 = m2, m2 = m1, m1 = m0, m0 = 0;
115 	}
116 
117 	/* Handle any remaining partial word. */
118 	if ((rsh &= 31) != 0) {
119 		lsh = 32 - rsh;
120 		s |= m3 << lsh;
121 		m3 = (m3 >> rsh) | (m2 << lsh);
122 		m2 = (m2 >> rsh) | (m1 << lsh);
123 		m1 = (m1 >> rsh) | (m0 << lsh);
124 		m0 >>= rsh;
125 	}
126 	fp->fp_mant[0] = m0;
127 	fp->fp_mant[1] = m1;
128 	fp->fp_mant[2] = m2;
129 	fp->fp_mant[3] = m3;
130 	fp->fp_sticky = s;
131 	return (s);
132 }
133 
134 /*
135  * Force a number to be normal, i.e., make its fraction have all zero
136  * bits before FP_1, then FP_1, then all 1 bits.  This is used for denorms
137  * and (sometimes) for intermediate results.
138  *
139  * Internally, this may use a `supernormal' -- a number whose fp_mant
140  * is greater than or equal to 2.0 -- so as a side effect you can hand it
141  * a supernormal and it will fix it (provided fp->fp_mant[3] == 0).
142  */
143 void
144 fpu_norm(register struct fpn *fp)
145 {
146 	register u_int m0, m1, m2, m3, top, sup, nrm;
147 	register int lsh, rsh, exp;
148 
149 	exp = fp->fp_exp;
150 	m0 = fp->fp_mant[0];
151 	m1 = fp->fp_mant[1];
152 	m2 = fp->fp_mant[2];
153 	m3 = fp->fp_mant[3];
154 
155 	/* Handle severe subnormals with 32-bit moves. */
156 	if (m0 == 0) {
157 		if (m1)
158 			m0 = m1, m1 = m2, m2 = m3, m3 = 0, exp -= 32;
159 		else if (m2)
160 			m0 = m2, m1 = m3, m2 = 0, m3 = 0, exp -= 2 * 32;
161 		else if (m3)
162 			m0 = m3, m1 = 0, m2 = 0, m3 = 0, exp -= 3 * 32;
163 		else {
164 			fp->fp_class = FPC_ZERO;
165 			return;
166 		}
167 	}
168 
169 	/* Now fix any supernormal or remaining subnormal. */
170 	nrm = FP_1;
171 	sup = nrm << 1;
172 	if (m0 >= sup) {
173 		/*
174 		 * We have a supernormal number.  We need to shift it right.
175 		 * We may assume m3==0.
176 		 */
177 		for (rsh = 1, top = m0 >> 1; top >= sup; rsh++)	/* XXX slow */
178 			top >>= 1;
179 		exp += rsh;
180 		lsh = 32 - rsh;
181 		m3 = m2 << lsh;
182 		m2 = (m2 >> rsh) | (m1 << lsh);
183 		m1 = (m1 >> rsh) | (m0 << lsh);
184 		m0 = top;
185 	} else if (m0 < nrm) {
186 		/*
187 		 * We have a regular denorm (a subnormal number), and need
188 		 * to shift it left.
189 		 */
190 		for (lsh = 1, top = m0 << 1; top < nrm; lsh++)	/* XXX slow */
191 			top <<= 1;
192 		exp -= lsh;
193 		rsh = 32 - lsh;
194 		m0 = top | (m1 >> rsh);
195 		m1 = (m1 << lsh) | (m2 >> rsh);
196 		m2 = (m2 << lsh) | (m3 >> rsh);
197 		m3 <<= lsh;
198 	}
199 
200 	fp->fp_exp = exp;
201 	fp->fp_mant[0] = m0;
202 	fp->fp_mant[1] = m1;
203 	fp->fp_mant[2] = m2;
204 	fp->fp_mant[3] = m3;
205 }
206 
207 /*
208  * Concoct a `fresh' Quiet NaN per Appendix N.
209  * As a side effect, we set NV (invalid) for the current exceptions.
210  */
211 struct fpn *
212 fpu_newnan(register struct fpemu *fe)
213 {
214 	register struct fpn *fp;
215 
216 	fe->fe_cx = FSR_NV;
217 	fp = &fe->fe_f3;
218 	fp->fp_class = FPC_QNAN;
219 	fp->fp_sign = 0;
220 	fp->fp_mant[0] = FP_1 - 1;
221 	fp->fp_mant[1] = fp->fp_mant[2] = fp->fp_mant[3] = ~0;
222 	return (fp);
223 }
224