1 /* hgcd_reduce.c.
2 
3    THE FUNCTIONS IN THIS FILE ARE INTERNAL WITH MUTABLE INTERFACES.  IT IS ONLY
4    SAFE TO REACH THEM THROUGH DOCUMENTED INTERFACES.  IN FACT, IT IS ALMOST
5    GUARANTEED THAT THEY'LL CHANGE OR DISAPPEAR IN A FUTURE GNU MP RELEASE.
6 
7 Copyright 2011, 2012 Free Software Foundation, Inc.
8 
9 This file is part of the GNU MP Library.
10 
11 The GNU MP Library is free software; you can redistribute it and/or modify
12 it under the terms of either:
13 
14   * the GNU Lesser General Public License as published by the Free
15     Software Foundation; either version 3 of the License, or (at your
16     option) any later version.
17 
18 or
19 
20   * the GNU General Public License as published by the Free Software
21     Foundation; either version 2 of the License, or (at your option) any
22     later version.
23 
24 or both in parallel, as here.
25 
26 The GNU MP Library is distributed in the hope that it will be useful, but
27 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
28 or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
29 for more details.
30 
31 You should have received copies of the GNU General Public License and the
32 GNU Lesser General Public License along with the GNU MP Library.  If not,
33 see https://www.gnu.org/licenses/.  */
34 
35 #include "gmp.h"
36 #include "gmp-impl.h"
37 #include "longlong.h"
38 
39 /* Computes R -= A * B. Result must be non-negative. Normalized down
40    to size an, and resulting size is returned. */
41 static mp_size_t
submul(mp_ptr rp,mp_size_t rn,mp_srcptr ap,mp_size_t an,mp_srcptr bp,mp_size_t bn)42 submul (mp_ptr rp, mp_size_t rn,
43 	mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn)
44 {
45   mp_ptr tp;
46   TMP_DECL;
47 
48   ASSERT (bn > 0);
49   ASSERT (an >= bn);
50   ASSERT (rn >= an);
51   ASSERT (an + bn <= rn + 1);
52 
53   TMP_MARK;
54   tp = TMP_ALLOC_LIMBS (an + bn);
55 
56   mpn_mul (tp, ap, an, bp, bn);
57   if (an + bn > rn)
58     {
59       ASSERT (tp[rn] == 0);
60       bn--;
61     }
62   ASSERT_NOCARRY (mpn_sub (rp, rp, rn, tp, an + bn));
63   TMP_FREE;
64 
65   while (rn > an && (rp[rn-1] == 0))
66     rn--;
67 
68   return rn;
69 }
70 
71 /* Computes (a, b)  <--  M^{-1} (a; b) */
72 /* FIXME:
73     x Take scratch parameter, and figure out scratch need.
74 
75     x Use some fallback for small M->n?
76 */
77 static mp_size_t
hgcd_matrix_apply(const struct hgcd_matrix * M,mp_ptr ap,mp_ptr bp,mp_size_t n)78 hgcd_matrix_apply (const struct hgcd_matrix *M,
79 		   mp_ptr ap, mp_ptr bp,
80 		   mp_size_t n)
81 {
82   mp_size_t an, bn, un, vn, nn;
83   mp_size_t mn[2][2];
84   mp_size_t modn;
85   mp_ptr tp, sp, scratch;
86   mp_limb_t cy;
87   unsigned i, j;
88 
89   TMP_DECL;
90 
91   ASSERT ( (ap[n-1] | bp[n-1]) > 0);
92 
93   an = n;
94   MPN_NORMALIZE (ap, an);
95   bn = n;
96   MPN_NORMALIZE (bp, bn);
97 
98   for (i = 0; i < 2; i++)
99     for (j = 0; j < 2; j++)
100       {
101 	mp_size_t k;
102 	k = M->n;
103 	MPN_NORMALIZE (M->p[i][j], k);
104 	mn[i][j] = k;
105       }
106 
107   ASSERT (mn[0][0] > 0);
108   ASSERT (mn[1][1] > 0);
109   ASSERT ( (mn[0][1] | mn[1][0]) > 0);
110 
111   TMP_MARK;
112 
113   if (mn[0][1] == 0)
114     {
115       /* A unchanged, M = (1, 0; q, 1) */
116       ASSERT (mn[0][0] == 1);
117       ASSERT (M->p[0][0][0] == 1);
118       ASSERT (mn[1][1] == 1);
119       ASSERT (M->p[1][1][0] == 1);
120 
121       /* Put B <-- B - q A */
122       nn = submul (bp, bn, ap, an, M->p[1][0], mn[1][0]);
123     }
124   else if (mn[1][0] == 0)
125     {
126       /* B unchanged, M = (1, q; 0, 1) */
127       ASSERT (mn[0][0] == 1);
128       ASSERT (M->p[0][0][0] == 1);
129       ASSERT (mn[1][1] == 1);
130       ASSERT (M->p[1][1][0] == 1);
131 
132       /* Put A  <-- A - q * B */
133       nn = submul (ap, an, bp, bn, M->p[0][1], mn[0][1]);
134     }
135   else
136     {
137       /* A = m00 a + m01 b  ==> a <= A / m00, b <= A / m01.
138 	 B = m10 a + m11 b  ==> a <= B / m10, b <= B / m11. */
139       un = MIN (an - mn[0][0], bn - mn[1][0]) + 1;
140       vn = MIN (an - mn[0][1], bn - mn[1][1]) + 1;
141 
142       nn = MAX (un, vn);
143       /* In the range of interest, mulmod_bnm1 should always beat mullo. */
144       modn = mpn_mulmod_bnm1_next_size (nn + 1);
145 
146       scratch = TMP_ALLOC_LIMBS (mpn_mulmod_bnm1_itch (modn, modn, M->n));
147       tp = TMP_ALLOC_LIMBS (modn);
148       sp = TMP_ALLOC_LIMBS (modn);
149 
150       ASSERT (n <= 2*modn);
151 
152       if (n > modn)
153 	{
154 	  cy = mpn_add (ap, ap, modn, ap + modn, n - modn);
155 	  MPN_INCR_U (ap, modn, cy);
156 
157 	  cy = mpn_add (bp, bp, modn, bp + modn, n - modn);
158 	  MPN_INCR_U (bp, modn, cy);
159 
160 	  n = modn;
161 	}
162 
163       mpn_mulmod_bnm1 (tp, modn, ap, n, M->p[1][1], mn[1][1], scratch);
164       mpn_mulmod_bnm1 (sp, modn, bp, n, M->p[0][1], mn[0][1], scratch);
165 
166       /* FIXME: Handle the small n case in some better way. */
167       if (n + mn[1][1] < modn)
168 	MPN_ZERO (tp + n + mn[1][1], modn - n - mn[1][1]);
169       if (n + mn[0][1] < modn)
170 	MPN_ZERO (sp + n + mn[0][1], modn - n - mn[0][1]);
171 
172       cy = mpn_sub_n (tp, tp, sp, modn);
173       MPN_DECR_U (tp, modn, cy);
174 
175       ASSERT (mpn_zero_p (tp + nn, modn - nn));
176 
177       mpn_mulmod_bnm1 (sp, modn, ap, n, M->p[1][0], mn[1][0], scratch);
178       MPN_COPY (ap, tp, nn);
179       mpn_mulmod_bnm1 (tp, modn, bp, n, M->p[0][0], mn[0][0], scratch);
180 
181       if (n + mn[1][0] < modn)
182 	MPN_ZERO (sp + n + mn[1][0], modn - n - mn[1][0]);
183       if (n + mn[0][0] < modn)
184 	MPN_ZERO (tp + n + mn[0][0], modn - n - mn[0][0]);
185 
186       cy = mpn_sub_n (tp, tp, sp, modn);
187       MPN_DECR_U (tp, modn, cy);
188 
189       ASSERT (mpn_zero_p (tp + nn, modn - nn));
190       MPN_COPY (bp, tp, nn);
191 
192       while ( (ap[nn-1] | bp[nn-1]) == 0)
193 	{
194 	  nn--;
195 	  ASSERT (nn > 0);
196 	}
197     }
198   TMP_FREE;
199 
200   return nn;
201 }
202 
203 mp_size_t
mpn_hgcd_reduce_itch(mp_size_t n,mp_size_t p)204 mpn_hgcd_reduce_itch (mp_size_t n, mp_size_t p)
205 {
206   mp_size_t itch;
207   if (BELOW_THRESHOLD (n, HGCD_REDUCE_THRESHOLD))
208     {
209       itch = mpn_hgcd_itch (n-p);
210 
211       /* For arbitrary p, the storage for _adjust is 2*(p + M->n) = 2 *
212 	 (p + ceil((n-p)/2) - 1 <= n + p - 1 */
213       if (itch < n + p - 1)
214 	itch = n + p - 1;
215     }
216   else
217     {
218       itch = 2*(n-p) + mpn_hgcd_itch (n-p);
219       /* Currently, hgcd_matrix_apply allocates its own storage. */
220     }
221   return itch;
222 }
223 
224 /* FIXME: Document storage need. */
225 mp_size_t
mpn_hgcd_reduce(struct hgcd_matrix * M,mp_ptr ap,mp_ptr bp,mp_size_t n,mp_size_t p,mp_ptr tp)226 mpn_hgcd_reduce (struct hgcd_matrix *M,
227 		 mp_ptr ap, mp_ptr bp, mp_size_t n, mp_size_t p,
228 		 mp_ptr tp)
229 {
230   mp_size_t nn;
231   if (BELOW_THRESHOLD (n, HGCD_REDUCE_THRESHOLD))
232     {
233       nn = mpn_hgcd (ap + p, bp + p, n - p, M, tp);
234       if (nn > 0)
235 	/* Needs 2*(p + M->n) <= 2*(floor(n/2) + ceil(n/2) - 1)
236 	   = 2 (n - 1) */
237 	return mpn_hgcd_matrix_adjust (M, p + nn, ap, bp, p, tp);
238     }
239   else
240     {
241       MPN_COPY (tp, ap + p, n - p);
242       MPN_COPY (tp + n - p, bp + p, n - p);
243       if (mpn_hgcd_appr (tp, tp + n - p, n - p, M, tp + 2*(n-p)))
244 	return hgcd_matrix_apply (M, ap, bp, n);
245     }
246   return 0;
247 }
248