xref: /dragonfly/contrib/gmp/mpn/generic/toom62_mul.c (revision e7d467f4)
1 /* mpn_toom62_mul -- Multiply {ap,an} and {bp,bn} where an is nominally 3 times
2    as large as bn.  Or more accurately, (5/2)bn < an < 6bn.
3 
4    Contributed to the GNU project by Torbjorn Granlund and Marco Bodrato.
5 
6    The idea of applying toom to unbalanced multiplication is due to Marco
7    Bodrato and Alberto Zanoni.
8 
9    THE FUNCTION IN THIS FILE IS INTERNAL WITH A MUTABLE INTERFACE.  IT IS ONLY
10    SAFE TO REACH IT THROUGH DOCUMENTED INTERFACES.  IN FACT, IT IS ALMOST
11    GUARANTEED THAT IT WILL CHANGE OR DISAPPEAR IN A FUTURE GNU MP RELEASE.
12 
13 Copyright 2006, 2007, 2008 Free Software Foundation, Inc.
14 
15 This file is part of the GNU MP Library.
16 
17 The GNU MP Library is free software; you can redistribute it and/or modify
18 it under the terms of the GNU Lesser General Public License as published by
19 the Free Software Foundation; either version 3 of the License, or (at your
20 option) any later version.
21 
22 The GNU MP Library is distributed in the hope that it will be useful, but
23 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
24 or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
25 License for more details.
26 
27 You should have received a copy of the GNU Lesser General Public License
28 along with the GNU MP Library.  If not, see http://www.gnu.org/licenses/.  */
29 
30 
31 #include "gmp.h"
32 #include "gmp-impl.h"
33 
34 /* Evaluate in:
35    0, +1, -1, +2, -2, 1/2, +inf
36 
37   <-s-><--n--><--n--><--n--><--n--><--n-->
38    ___ ______ ______ ______ ______ ______
39   |a5_|___a4_|___a3_|___a2_|___a1_|___a0_|
40 			     |_b1_|___b0_|
41 			     <-t--><--n-->
42 
43   v0  =    a0                       *   b0      #    A(0)*B(0)
44   v1  = (  a0+  a1+ a2+ a3+  a4+  a5)*( b0+ b1) #    A(1)*B(1)      ah  <= 5   bh <= 1
45   vm1 = (  a0-  a1+ a2- a3+  a4-  a5)*( b0- b1) #   A(-1)*B(-1)    |ah| <= 2   bh  = 0
46   v2  = (  a0+ 2a1+4a2+8a3+16a4+32a5)*( b0+2b1) #    A(2)*B(2)      ah  <= 62  bh <= 2
47   vm2 = (  a0- 2a1+4a2-8a3+16a4-32a5)*( b0-2b1) #   A(-2)*B(-2)    -41<=ah<=20 -1<=bh<=0
48   vh  = (32a0+16a1+8a2+4a3+ 2a4+  a5)*(2b0+ b1) #  A(1/2)*B(1/2)    ah  <= 62  bh <= 2
49   vinf=                           a5 *      b1  #  A(inf)*B(inf)
50 */
51 
52 void
53 mpn_toom62_mul (mp_ptr pp,
54 		mp_srcptr ap, mp_size_t an,
55 		mp_srcptr bp, mp_size_t bn,
56 		mp_ptr scratch)
57 {
58   mp_size_t n, s, t;
59   mp_limb_t cy;
60   mp_ptr as1, asm1, as2, asm2, ash;
61   mp_ptr bs1, bsm1, bs2, bsm2, bsh;
62   mp_ptr gp;
63   enum toom7_flags aflags, bflags;
64   TMP_DECL;
65 
66 #define a0  ap
67 #define a1  (ap + n)
68 #define a2  (ap + 2*n)
69 #define a3  (ap + 3*n)
70 #define a4  (ap + 4*n)
71 #define a5  (ap + 5*n)
72 #define b0  bp
73 #define b1  (bp + n)
74 
75   n = 1 + (an >= 3 * bn ? (an - 1) / (size_t) 6 : (bn - 1) >> 1);
76 
77   s = an - 5 * n;
78   t = bn - n;
79 
80   ASSERT (0 < s && s <= n);
81   ASSERT (0 < t && t <= n);
82 
83   TMP_MARK;
84 
85   as1 = TMP_SALLOC_LIMBS (n + 1);
86   asm1 = TMP_SALLOC_LIMBS (n + 1);
87   as2 = TMP_SALLOC_LIMBS (n + 1);
88   asm2 = TMP_SALLOC_LIMBS (n + 1);
89   ash = TMP_SALLOC_LIMBS (n + 1);
90 
91   bs1 = TMP_SALLOC_LIMBS (n + 1);
92   bsm1 = TMP_SALLOC_LIMBS (n);
93   bs2 = TMP_SALLOC_LIMBS (n + 1);
94   bsm2 = TMP_SALLOC_LIMBS (n + 1);
95   bsh = TMP_SALLOC_LIMBS (n + 1);
96 
97   gp = pp;
98 
99   /* Compute as1 and asm1.  */
100   aflags = toom7_w3_neg & mpn_toom_eval_pm1 (as1, asm1, 5, ap, n, s, gp);
101 
102   /* Compute as2 and asm2. */
103   aflags |= toom7_w1_neg & mpn_toom_eval_pm2 (as2, asm2, 5, ap, n, s, gp);
104 
105   /* Compute ash = 32 a0 + 16 a1 + 8 a2 + 4 a3 + 2 a4 + a5
106      = 2*(2*(2*(2*(2*a0 + a1) + a2) + a3) + a4) + a5  */
107 
108 #if HAVE_NATIVE_mpn_addlsh1_n
109   cy = mpn_addlsh1_n (ash, a1, a0, n);
110   cy = 2*cy + mpn_addlsh1_n (ash, a2, ash, n);
111   cy = 2*cy + mpn_addlsh1_n (ash, a3, ash, n);
112   cy = 2*cy + mpn_addlsh1_n (ash, a4, ash, n);
113   if (s < n)
114     {
115       mp_limb_t cy2;
116       cy2 = mpn_addlsh1_n (ash, a5, ash, s);
117       ash[n] = 2*cy + mpn_lshift (ash + s, ash + s, n - s, 1);
118       MPN_INCR_U (ash + s, n+1-s, cy2);
119     }
120   else
121     ash[n] = 2*cy + mpn_addlsh1_n (ash, a5, ash, n);
122 #else
123   cy = mpn_lshift (ash, a0, n, 1);
124   cy += mpn_add_n (ash, ash, a1, n);
125   cy = 2*cy + mpn_lshift (ash, ash, n, 1);
126   cy += mpn_add_n (ash, ash, a2, n);
127   cy = 2*cy + mpn_lshift (ash, ash, n, 1);
128   cy += mpn_add_n (ash, ash, a3, n);
129   cy = 2*cy + mpn_lshift (ash, ash, n, 1);
130   cy += mpn_add_n (ash, ash, a4, n);
131   cy = 2*cy + mpn_lshift (ash, ash, n, 1);
132   ash[n] = cy + mpn_add (ash, ash, n, a5, s);
133 #endif
134 
135   /* Compute bs1 and bsm1.  */
136   if (t == n)
137     {
138 #if HAVE_NATIVE_mpn_add_n_sub_n
139       if (mpn_cmp (b0, b1, n) < 0)
140 	{
141 	  cy = mpn_add_n_sub_n (bs1, bsm1, b1, b0, n);
142 	  bflags = toom7_w3_neg;
143 	}
144       else
145 	{
146 	  cy = mpn_add_n_sub_n (bs1, bsm1, b0, b1, n);
147 	  bflags = 0;
148 	}
149       bs1[n] = cy >> 1;
150 #else
151       bs1[n] = mpn_add_n (bs1, b0, b1, n);
152       if (mpn_cmp (b0, b1, n) < 0)
153 	{
154 	  mpn_sub_n (bsm1, b1, b0, n);
155 	  bflags = toom7_w3_neg;
156 	}
157       else
158 	{
159 	  mpn_sub_n (bsm1, b0, b1, n);
160 	  bflags = 0;
161 	}
162 #endif
163     }
164   else
165     {
166       bs1[n] = mpn_add (bs1, b0, n, b1, t);
167       if (mpn_zero_p (b0 + t, n - t) && mpn_cmp (b0, b1, t) < 0)
168 	{
169 	  mpn_sub_n (bsm1, b1, b0, t);
170 	  MPN_ZERO (bsm1 + t, n - t);
171 	  bflags = toom7_w3_neg;
172 	}
173       else
174 	{
175 	  mpn_sub (bsm1, b0, n, b1, t);
176 	  bflags = 0;
177 	}
178     }
179 
180   /* Compute bs2 and bsm2. Recycling bs1 and bsm1; bs2=bs1+b1, bsm2 =
181      bsm1 - b1 */
182   mpn_add (bs2, bs1, n + 1, b1, t);
183   if (bflags & toom7_w3_neg)
184     {
185       bsm2[n] = mpn_add (bsm2, bsm1, n, b1, t);
186       bflags |= toom7_w1_neg;
187     }
188   else
189     {
190       /* FIXME: Simplify this logic? */
191       if (t < n)
192 	{
193 	  if (mpn_zero_p (bsm1 + t, n - t) && mpn_cmp (bsm1, b1, t) < 0)
194 	    {
195 	      ASSERT_NOCARRY (mpn_sub_n (bsm2, b1, bsm1, t));
196 	      MPN_ZERO (bsm2 + t, n + 1 - t);
197 	      bflags |= toom7_w1_neg;
198 	    }
199 	  else
200 	    {
201 	      ASSERT_NOCARRY (mpn_sub (bsm2, bsm1, n, b1, t));
202 	      bsm2[n] = 0;
203 	    }
204 	}
205       else
206 	{
207 	  if (mpn_cmp (bsm1, b1, n) < 0)
208 	    {
209 	      ASSERT_NOCARRY (mpn_sub_n (bsm2, b1, bsm1, n));
210 	      bflags |= toom7_w1_neg;
211 	    }
212 	  else
213 	    {
214 	      ASSERT_NOCARRY (mpn_sub (bsm2, bsm1, n, b1, n));
215 	    }
216 	  bsm2[n] = 0;
217 	}
218     }
219 
220   /* Compute bsh, recycling bs1 and bsm1. bsh=bs1+b0;  */
221   mpn_add (bsh, bs1, n + 1, b0, n);
222 
223   ASSERT (as1[n] <= 5);
224   ASSERT (bs1[n] <= 1);
225   ASSERT (asm1[n] <= 2);
226   ASSERT (as2[n] <= 62);
227   ASSERT (bs2[n] <= 2);
228   ASSERT (asm2[n] <= 41);
229   ASSERT (bsm2[n] <= 1);
230   ASSERT (ash[n] <= 62);
231   ASSERT (bsh[n] <= 2);
232 
233 #define v0    pp				/* 2n */
234 #define v1    (pp + 2 * n)			/* 2n+1 */
235 #define vinf  (pp + 6 * n)			/* s+t */
236 #define v2    scratch				/* 2n+1 */
237 #define vm2   (scratch + 2 * n + 1)		/* 2n+1 */
238 #define vh    (scratch + 4 * n + 2)		/* 2n+1 */
239 #define vm1   (scratch + 6 * n + 3)		/* 2n+1 */
240 #define scratch_out (scratch + 8 * n + 4)		/* 2n+1 */
241   /* Total scratch need: 10*n+5 */
242 
243   /* Must be in allocation order, as they overwrite one limb beyond
244    * 2n+1. */
245   mpn_mul_n (v2, as2, bs2, n + 1);		/* v2, 2n+1 limbs */
246   mpn_mul_n (vm2, asm2, bsm2, n + 1);		/* vm2, 2n+1 limbs */
247   mpn_mul_n (vh, ash, bsh, n + 1);		/* vh, 2n+1 limbs */
248 
249   /* vm1, 2n+1 limbs */
250   mpn_mul_n (vm1, asm1, bsm1, n);
251   cy = 0;
252   if (asm1[n] == 1)
253     {
254       cy = mpn_add_n (vm1 + n, vm1 + n, bsm1, n);
255     }
256   else if (asm1[n] == 2)
257     {
258 #if HAVE_NATIVE_mpn_addlsh1_n
259       cy = mpn_addlsh1_n (vm1 + n, vm1 + n, bsm1, n);
260 #else
261       cy = mpn_addmul_1 (vm1 + n, bsm1, n, CNST_LIMB(2));
262 #endif
263     }
264   vm1[2 * n] = cy;
265 
266   /* v1, 2n+1 limbs */
267   mpn_mul_n (v1, as1, bs1, n);
268   if (as1[n] == 1)
269     {
270       cy = bs1[n] + mpn_add_n (v1 + n, v1 + n, bs1, n);
271     }
272   else if (as1[n] == 2)
273     {
274 #if HAVE_NATIVE_mpn_addlsh1_n
275       cy = 2 * bs1[n] + mpn_addlsh1_n (v1 + n, v1 + n, bs1, n);
276 #else
277       cy = 2 * bs1[n] + mpn_addmul_1 (v1 + n, bs1, n, CNST_LIMB(2));
278 #endif
279     }
280   else if (as1[n] != 0)
281     {
282       cy = as1[n] * bs1[n] + mpn_addmul_1 (v1 + n, bs1, n, as1[n]);
283     }
284   else
285     cy = 0;
286   if (bs1[n] != 0)
287     cy += mpn_add_n (v1 + n, v1 + n, as1, n);
288   v1[2 * n] = cy;
289 
290   mpn_mul_n (v0, a0, b0, n);			/* v0, 2n limbs */
291 
292   /* vinf, s+t limbs */
293   if (s > t)  mpn_mul (vinf, a5, s, b1, t);
294   else        mpn_mul (vinf, b1, t, a5, s);
295 
296   mpn_toom_interpolate_7pts (pp, n, aflags ^ bflags,
297 			     vm2, vm1, v2, vh, s + t, scratch_out);
298 
299   TMP_FREE;
300 }
301