xref: /dragonfly/contrib/gmp/mpn/generic/toom22_mul.c (revision a68e0df0)
1 /* mpn_toom22_mul -- Multiply {ap,an} and {bp,bn} where an >= bn.  Or more
2    accurately, bn <= an < 2bn.
3 
4    Contributed to the GNU project by Torbjorn Granlund.
5 
6    THE FUNCTION IN THIS FILE IS INTERNAL WITH A MUTABLE INTERFACE.  IT IS ONLY
7    SAFE TO REACH IT THROUGH DOCUMENTED INTERFACES.  IN FACT, IT IS ALMOST
8    GUARANTEED THAT IT WILL CHANGE OR DISAPPEAR IN A FUTURE GNU MP RELEASE.
9 
10 Copyright 2006, 2007, 2008 Free Software Foundation, Inc.
11 
12 This file is part of the GNU MP Library.
13 
14 The GNU MP Library is free software; you can redistribute it and/or modify
15 it under the terms of the GNU Lesser General Public License as published by
16 the Free Software Foundation; either version 3 of the License, or (at your
17 option) any later version.
18 
19 The GNU MP Library is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
21 or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
22 License for more details.
23 
24 You should have received a copy of the GNU Lesser General Public License
25 along with the GNU MP Library.  If not, see http://www.gnu.org/licenses/.  */
26 
27 
28 #include "gmp.h"
29 #include "gmp-impl.h"
30 
31 /* Evaluate in: -1, 0, +inf
32 
33   <-s--><--n-->
34    ____ ______
35   |_a1_|___a0_|
36    |b1_|___b0_|
37    <-t-><--n-->
38 
39   v0  =  a0     * b0       #   A(0)*B(0)
40   vm1 = (a0- a1)*(b0- b1)  #  A(-1)*B(-1)
41   vinf=      a1 *     b1   # A(inf)*B(inf)
42 */
43 
44 #if TUNE_PROGRAM_BUILD
45 #define MAYBE_mul_toom22   1
46 #else
47 #define MAYBE_mul_toom22						\
48   (MUL_TOOM33_THRESHOLD >= 2 * MUL_TOOM22_THRESHOLD)
49 #endif
50 
51 #define TOOM22_MUL_N_REC(p, a, b, n, ws)				\
52   do {									\
53     if (! MAYBE_mul_toom22						\
54 	|| BELOW_THRESHOLD (n, MUL_KARATSUBA_THRESHOLD))		\
55       mpn_mul_basecase (p, a, n, b, n);					\
56     else								\
57       mpn_toom22_mul (p, a, n, b, n, ws);				\
58   } while (0)
59 
60 void
61 mpn_toom22_mul (mp_ptr pp,
62 		mp_srcptr ap, mp_size_t an,
63 		mp_srcptr bp, mp_size_t bn,
64 		mp_ptr scratch)
65 {
66   mp_size_t n, s, t;
67   int vm1_neg;
68   mp_limb_t cy, cy2;
69   mp_ptr asm1;
70   mp_ptr bsm1;
71 
72 #define a0  ap
73 #define a1  (ap + n)
74 #define b0  bp
75 #define b1  (bp + n)
76 
77   s = an >> 1;
78   n = an - s;
79   t = bn - n;
80 
81   ASSERT (an >= bn);
82 
83   ASSERT (0 < s && s <= n);
84   ASSERT (0 < t && t <= s);
85 
86   asm1 = pp;
87   bsm1 = pp + n;
88 
89   vm1_neg = 0;
90 
91   /* Compute asm1.  */
92   if (s == n)
93     {
94       if (mpn_cmp (a0, a1, n) < 0)
95 	{
96 	  mpn_sub_n (asm1, a1, a0, n);
97 	  vm1_neg = 1;
98 	}
99       else
100 	{
101 	  mpn_sub_n (asm1, a0, a1, n);
102 	}
103     }
104   else
105     {
106       if (mpn_zero_p (a0 + s, n - s) && mpn_cmp (a0, a1, s) < 0)
107 	{
108 	  mpn_sub_n (asm1, a1, a0, s);
109 	  MPN_ZERO (asm1 + s, n - s);
110 	  vm1_neg = 1;
111 	}
112       else
113 	{
114 	  mpn_sub (asm1, a0, n, a1, s);
115 	}
116     }
117 
118   /* Compute bsm1.  */
119   if (t == n)
120     {
121       if (mpn_cmp (b0, b1, n) < 0)
122 	{
123 	  mpn_sub_n (bsm1, b1, b0, n);
124 	  vm1_neg ^= 1;
125 	}
126       else
127 	{
128 	  mpn_sub_n (bsm1, b0, b1, n);
129 	}
130     }
131   else
132     {
133       if (mpn_zero_p (b0 + t, n - t) && mpn_cmp (b0, b1, t) < 0)
134 	{
135 	  mpn_sub_n (bsm1, b1, b0, t);
136 	  MPN_ZERO (bsm1 + t, n - t);
137 	  vm1_neg ^= 1;
138 	}
139       else
140 	{
141 	  mpn_sub (bsm1, b0, n, b1, t);
142 	}
143     }
144 
145 #define v0	pp				/* 2n */
146 #define vinf	(pp + 2 * n)			/* s+t */
147 #define vm1	scratch				/* 2n */
148 #define scratch_out	scratch + 2 * n
149 
150   /* vm1, 2n limbs */
151   TOOM22_MUL_N_REC (vm1, asm1, bsm1, n, scratch_out);
152 
153   /* vinf, s+t limbs */
154   mpn_mul (vinf, a1, s, b1, t);
155 
156   /* v0, 2n limbs */
157   TOOM22_MUL_N_REC (v0, ap, bp, n, scratch_out);
158 
159   /* H(v0) + L(vinf) */
160   cy = mpn_add_n (pp + 2 * n, v0 + n, vinf, n);
161 
162   /* L(v0) + H(v0) */
163   cy2 = cy + mpn_add_n (pp + n, pp + 2 * n, v0, n);
164 
165   /* L(vinf) + H(vinf) */
166   cy += mpn_add (pp + 2 * n, pp + 2 * n, n, vinf + n, s + t - n);
167 
168   if (vm1_neg)
169     cy += mpn_add_n (pp + n, pp + n, vm1, 2 * n);
170   else
171     cy -= mpn_sub_n (pp + n, pp + n, vm1, 2 * n);
172 
173   ASSERT (cy + 1  <= 3);
174   ASSERT (cy2 <= 2);
175 
176   mpn_incr_u (pp + 2 * n, cy2);
177   if (LIKELY (cy <= 2))
178     mpn_incr_u (pp + 3 * n, cy);
179   else
180     mpn_decr_u (pp + 3 * n, 1);
181 }
182