xref: /netbsd/sys/arch/m68k/fpe/fpu_mul.c (revision bf9ec67e)
1 /*	$NetBSD: fpu_mul.c,v 1.2 1999/05/30 20:17:48 briggs Exp $ */
2 
3 /*
4  * Copyright (c) 1992, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This software was developed by the Computer Systems Engineering group
8  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9  * contributed to Berkeley.
10  *
11  * All advertising materials mentioning features or use of this software
12  * must display the following acknowledgement:
13  *	This product includes software developed by the University of
14  *	California, Lawrence Berkeley Laboratory.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. All advertising materials mentioning features or use of this software
25  *    must display the following acknowledgement:
26  *	This product includes software developed by the University of
27  *	California, Berkeley and its contributors.
28  * 4. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  *
44  *	@(#)fpu_mul.c	8.1 (Berkeley) 6/11/93
45  */
46 
47 /*
48  * Perform an FPU multiply (return x * y).
49  */
50 
51 #include <sys/types.h>
52 
53 #include <machine/reg.h>
54 
55 #include "fpu_arith.h"
56 #include "fpu_emulate.h"
57 
58 /*
59  * The multiplication algorithm for normal numbers is as follows:
60  *
61  * The fraction of the product is built in the usual stepwise fashion.
62  * Each step consists of shifting the accumulator right one bit
63  * (maintaining any guard bits) and, if the next bit in y is set,
64  * adding the multiplicand (x) to the accumulator.  Then, in any case,
65  * we advance one bit leftward in y.  Algorithmically:
66  *
67  *	A = 0;
68  *	for (bit = 0; bit < FP_NMANT; bit++) {
69  *		sticky |= A & 1, A >>= 1;
70  *		if (Y & (1 << bit))
71  *			A += X;
72  *	}
73  *
74  * (X and Y here represent the mantissas of x and y respectively.)
75  * The resultant accumulator (A) is the product's mantissa.  It may
76  * be as large as 11.11111... in binary and hence may need to be
77  * shifted right, but at most one bit.
78  *
79  * Since we do not have efficient multiword arithmetic, we code the
80  * accumulator as four separate words, just like any other mantissa.
81  * We use local `register' variables in the hope that this is faster
82  * than memory.  We keep x->fp_mant in locals for the same reason.
83  *
84  * In the algorithm above, the bits in y are inspected one at a time.
85  * We will pick them up 32 at a time and then deal with those 32, one
86  * at a time.  Note, however, that we know several things about y:
87  *
88  *    - the guard and round bits at the bottom are sure to be zero;
89  *
90  *    - often many low bits are zero (y is often from a single or double
91  *	precision source);
92  *
93  *    - bit FP_NMANT-1 is set, and FP_1*2 fits in a word.
94  *
95  * We can also test for 32-zero-bits swiftly.  In this case, the center
96  * part of the loop---setting sticky, shifting A, and not adding---will
97  * run 32 times without adding X to A.  We can do a 32-bit shift faster
98  * by simply moving words.  Since zeros are common, we optimize this case.
99  * Furthermore, since A is initially zero, we can omit the shift as well
100  * until we reach a nonzero word.
101  */
102 struct fpn *
103 fpu_mul(fe)
104 	register struct fpemu *fe;
105 {
106 	register struct fpn *x = &fe->fe_f1, *y = &fe->fe_f2;
107 	register u_int a2, a1, a0, x2, x1, x0, bit, m;
108 	register int sticky;
109 	FPU_DECL_CARRY
110 
111 	/*
112 	 * Put the `heavier' operand on the right (see fpu_emu.h).
113 	 * Then we will have one of the following cases, taken in the
114 	 * following order:
115 	 *
116 	 *  - y = NaN.  Implied: if only one is a signalling NaN, y is.
117 	 *	The result is y.
118 	 *  - y = Inf.  Implied: x != NaN (is 0, number, or Inf: the NaN
119 	 *    case was taken care of earlier).
120 	 *	If x = 0, the result is NaN.  Otherwise the result
121 	 *	is y, with its sign reversed if x is negative.
122 	 *  - x = 0.  Implied: y is 0 or number.
123 	 *	The result is 0 (with XORed sign as usual).
124 	 *  - other.  Implied: both x and y are numbers.
125 	 *	The result is x * y (XOR sign, multiply bits, add exponents).
126 	 */
127 	ORDER(x, y);
128 	if (ISNAN(y)) {
129 		y->fp_sign ^= x->fp_sign;
130 		return (y);
131 	}
132 	if (ISINF(y)) {
133 		if (ISZERO(x))
134 			return (fpu_newnan(fe));
135 		y->fp_sign ^= x->fp_sign;
136 		return (y);
137 	}
138 	if (ISZERO(x)) {
139 		x->fp_sign ^= y->fp_sign;
140 		return (x);
141 	}
142 
143 	/*
144 	 * Setup.  In the code below, the mask `m' will hold the current
145 	 * mantissa byte from y.  The variable `bit' denotes the bit
146 	 * within m.  We also define some macros to deal with everything.
147 	 */
148 	x2 = x->fp_mant[2];
149 	x1 = x->fp_mant[1];
150 	x0 = x->fp_mant[0];
151 	sticky = a2 = a1 = a0 = 0;
152 
153 #define	ADD	/* A += X */ \
154 	FPU_ADDS(a2, a2, x2); \
155 	FPU_ADDCS(a1, a1, x1); \
156 	FPU_ADDC(a0, a0, x0)
157 
158 #define	SHR1	/* A >>= 1, with sticky */ \
159 	sticky |= a2 & 1, \
160 	a2 = (a2 >> 1) | (a1 << 31), a1 = (a1 >> 1) | (a0 << 31), a0 >>= 1
161 
162 #define	SHR32	/* A >>= 32, with sticky */ \
163 	sticky |= a2, a2 = a1, a1 = a0, a0 = 0
164 
165 #define	STEP	/* each 1-bit step of the multiplication */ \
166 	SHR1; if (bit & m) { ADD; }; bit <<= 1
167 
168 	/*
169 	 * We are ready to begin.  The multiply loop runs once for each
170 	 * of the four 32-bit words.  Some words, however, are special.
171 	 * As noted above, the low order bits of Y are often zero.  Even
172 	 * if not, the first loop can certainly skip the guard bits.
173 	 * The last word of y has its highest 1-bit in position FP_NMANT-1,
174 	 * so we stop the loop when we move past that bit.
175 	 */
176 	if ((m = y->fp_mant[2]) == 0) {
177 		/* SHR32; */			/* unneeded since A==0 */
178 	} else {
179 		bit = 1 << FP_NG;
180 		do {
181 			STEP;
182 		} while (bit != 0);
183 	}
184 	if ((m = y->fp_mant[1]) == 0) {
185 		SHR32;
186 	} else {
187 		bit = 1;
188 		do {
189 			STEP;
190 		} while (bit != 0);
191 	}
192 	m = y->fp_mant[0];		/* definitely != 0 */
193 	bit = 1;
194 	do {
195 		STEP;
196 	} while (bit <= m);
197 
198 	/*
199 	 * Done with mantissa calculation.  Get exponent and handle
200 	 * 11.111...1 case, then put result in place.  We reuse x since
201 	 * it already has the right class (FP_NUM).
202 	 */
203 	m = x->fp_exp + y->fp_exp;
204 	if (a0 >= FP_2) {
205 		SHR1;
206 		m++;
207 	}
208 	x->fp_sign ^= y->fp_sign;
209 	x->fp_exp = m;
210 	x->fp_sticky = sticky;
211 	x->fp_mant[2] = a2;
212 	x->fp_mant[1] = a1;
213 	x->fp_mant[0] = a0;
214 	return (x);
215 }
216