xref: /netbsd/common/lib/libc/quad/muldi3.c (revision 6550d01e)
1 /*	$NetBSD: muldi3.c,v 1.2 2009/03/15 22:31:12 cegger Exp $	*/
2 
3 /*-
4  * Copyright (c) 1992, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This software was developed by the Computer Systems Engineering group
8  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9  * contributed to Berkeley.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #include <sys/cdefs.h>
37 #if defined(LIBC_SCCS) && !defined(lint)
38 #if 0
39 static char sccsid[] = "@(#)muldi3.c	8.1 (Berkeley) 6/4/93";
40 #else
41 __RCSID("$NetBSD: muldi3.c,v 1.2 2009/03/15 22:31:12 cegger Exp $");
42 #endif
43 #endif /* LIBC_SCCS and not lint */
44 
45 #include "quad.h"
46 
47 /*
48  * Multiply two quads.
49  *
50  * Our algorithm is based on the following.  Split incoming quad values
51  * u and v (where u,v >= 0) into
52  *
53  *	u = 2^n u1  *  u0	(n = number of bits in `u_int', usu. 32)
54  *
55  * and
56  *
57  *	v = 2^n v1  *  v0
58  *
59  * Then
60  *
61  *	uv = 2^2n u1 v1  +  2^n u1 v0  +  2^n v1 u0  +  u0 v0
62  *	   = 2^2n u1 v1  +     2^n (u1 v0 + v1 u0)   +  u0 v0
63  *
64  * Now add 2^n u1 v1 to the first term and subtract it from the middle,
65  * and add 2^n u0 v0 to the last term and subtract it from the middle.
66  * This gives:
67  *
68  *	uv = (2^2n + 2^n) (u1 v1)  +
69  *	         (2^n)    (u1 v0 - u1 v1 + u0 v1 - u0 v0)  +
70  *	       (2^n + 1)  (u0 v0)
71  *
72  * Factoring the middle a bit gives us:
73  *
74  *	uv = (2^2n + 2^n) (u1 v1)  +			[u1v1 = high]
75  *		 (2^n)    (u1 - u0) (v0 - v1)  +	[(u1-u0)... = mid]
76  *	       (2^n + 1)  (u0 v0)			[u0v0 = low]
77  *
78  * The terms (u1 v1), (u1 - u0) (v0 - v1), and (u0 v0) can all be done
79  * in just half the precision of the original.  (Note that either or both
80  * of (u1 - u0) or (v0 - v1) may be negative.)
81  *
82  * This algorithm is from Knuth vol. 2 (2nd ed), section 4.3.3, p. 278.
83  *
84  * Since C does not give us a `int * int = quad' operator, we split
85  * our input quads into two ints, then split the two ints into two
86  * shorts.  We can then calculate `short * short = int' in native
87  * arithmetic.
88  *
89  * Our product should, strictly speaking, be a `long quad', with 128
90  * bits, but we are going to discard the upper 64.  In other words,
91  * we are not interested in uv, but rather in (uv mod 2^2n).  This
92  * makes some of the terms above vanish, and we get:
93  *
94  *	(2^n)(high) + (2^n)(mid) + (2^n + 1)(low)
95  *
96  * or
97  *
98  *	(2^n)(high + mid + low) + low
99  *
100  * Furthermore, `high' and `mid' can be computed mod 2^n, as any factor
101  * of 2^n in either one will also vanish.  Only `low' need be computed
102  * mod 2^2n, and only because of the final term above.
103  */
104 static quad_t __lmulq(u_int, u_int);
105 
106 quad_t
107 __muldi3(quad_t a, quad_t b)
108 {
109 	union uu u, v, low, prod;
110 	u_int high, mid, udiff, vdiff;
111 	int negall, negmid;
112 #define	u1	u.ul[H]
113 #define	u0	u.ul[L]
114 #define	v1	v.ul[H]
115 #define	v0	v.ul[L]
116 
117 	/*
118 	 * Get u and v such that u, v >= 0.  When this is finished,
119 	 * u1, u0, v1, and v0 will be directly accessible through the
120 	 * int fields.
121 	 */
122 	if (a >= 0)
123 		u.q = a, negall = 0;
124 	else
125 		u.q = -a, negall = 1;
126 	if (b >= 0)
127 		v.q = b;
128 	else
129 		v.q = -b, negall ^= 1;
130 
131 	if (u1 == 0 && v1 == 0) {
132 		/*
133 		 * An (I hope) important optimization occurs when u1 and v1
134 		 * are both 0.  This should be common since most numbers
135 		 * are small.  Here the product is just u0*v0.
136 		 */
137 		prod.q = __lmulq(u0, v0);
138 	} else {
139 		/*
140 		 * Compute the three intermediate products, remembering
141 		 * whether the middle term is negative.  We can discard
142 		 * any upper bits in high and mid, so we can use native
143 		 * u_int * u_int => u_int arithmetic.
144 		 */
145 		low.q = __lmulq(u0, v0);
146 
147 		if (u1 >= u0)
148 			negmid = 0, udiff = u1 - u0;
149 		else
150 			negmid = 1, udiff = u0 - u1;
151 		if (v0 >= v1)
152 			vdiff = v0 - v1;
153 		else
154 			vdiff = v1 - v0, negmid ^= 1;
155 		mid = udiff * vdiff;
156 
157 		high = u1 * v1;
158 
159 		/*
160 		 * Assemble the final product.
161 		 */
162 		prod.ul[H] = high + (negmid ? -mid : mid) + low.ul[L] +
163 		    low.ul[H];
164 		prod.ul[L] = low.ul[L];
165 	}
166 	return (negall ? -prod.q : prod.q);
167 #undef u1
168 #undef u0
169 #undef v1
170 #undef v0
171 }
172 
173 /*
174  * Multiply two 2N-bit ints to produce a 4N-bit quad, where N is half
175  * the number of bits in an int (whatever that is---the code below
176  * does not care as long as quad.h does its part of the bargain---but
177  * typically N==16).
178  *
179  * We use the same algorithm from Knuth, but this time the modulo refinement
180  * does not apply.  On the other hand, since N is half the size of an int,
181  * we can get away with native multiplication---none of our input terms
182  * exceeds (UINT_MAX >> 1).
183  *
184  * Note that, for u_int l, the quad-precision result
185  *
186  *	l << N
187  *
188  * splits into high and low ints as HHALF(l) and LHUP(l) respectively.
189  */
190 static quad_t
191 __lmulq(u_int u, u_int v)
192 {
193 	u_int u1, u0, v1, v0, udiff, vdiff, high, mid, low;
194 	u_int prodh, prodl, was;
195 	union uu prod;
196 	int neg;
197 
198 	u1 = HHALF(u);
199 	u0 = LHALF(u);
200 	v1 = HHALF(v);
201 	v0 = LHALF(v);
202 
203 	low = u0 * v0;
204 
205 	/* This is the same small-number optimization as before. */
206 	if (u1 == 0 && v1 == 0)
207 		return (low);
208 
209 	if (u1 >= u0)
210 		udiff = u1 - u0, neg = 0;
211 	else
212 		udiff = u0 - u1, neg = 1;
213 	if (v0 >= v1)
214 		vdiff = v0 - v1;
215 	else
216 		vdiff = v1 - v0, neg ^= 1;
217 	mid = udiff * vdiff;
218 
219 	high = u1 * v1;
220 
221 	/* prod = (high << 2N) + (high << N); */
222 	prodh = high + HHALF(high);
223 	prodl = LHUP(high);
224 
225 	/* if (neg) prod -= mid << N; else prod += mid << N; */
226 	if (neg) {
227 		was = prodl;
228 		prodl -= LHUP(mid);
229 		prodh -= HHALF(mid) + (prodl > was);
230 	} else {
231 		was = prodl;
232 		prodl += LHUP(mid);
233 		prodh += HHALF(mid) + (prodl < was);
234 	}
235 
236 	/* prod += low << N */
237 	was = prodl;
238 	prodl += LHUP(low);
239 	prodh += HHALF(low) + (prodl < was);
240 	/* ... + low; */
241 	if ((prodl += low) < low)
242 		prodh++;
243 
244 	/* return 4N-bit product */
245 	prod.ul[H] = prodh;
246 	prod.ul[L] = prodl;
247 	return (prod.q);
248 }
249