1 /* hgcd_reduce.c.
2 
3    THE FUNCTIONS IN THIS FILE ARE INTERNAL WITH MUTABLE INTERFACES.  IT IS ONLY
4    SAFE TO REACH THEM THROUGH DOCUMENTED INTERFACES.  IN FACT, IT IS ALMOST
5    GUARANTEED THAT THEY'LL CHANGE OR DISAPPEAR IN A FUTURE GNU MP RELEASE.
6 
7 Copyright 2011, 2012 Free Software Foundation, Inc.
8 
9 This file is part of the GNU MP Library.
10 
11 The GNU MP Library is free software; you can redistribute it and/or modify
12 it under the terms of either:
13 
14   * the GNU Lesser General Public License as published by the Free
15     Software Foundation; either version 3 of the License, or (at your
16     option) any later version.
17 
18 or
19 
20   * the GNU General Public License as published by the Free Software
21     Foundation; either version 2 of the License, or (at your option) any
22     later version.
23 
24 or both in parallel, as here.
25 
26 The GNU MP Library is distributed in the hope that it will be useful, but
27 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
28 or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
29 for more details.
30 
31 You should have received copies of the GNU General Public License and the
32 GNU Lesser General Public License along with the GNU MP Library.  If not,
33 see https://www.gnu.org/licenses/.  */
34 
35 #include "gmp.h"
36 #include "gmp-impl.h"
37 #include "longlong.h"
38 
39 /* Computes R -= A * B. Result must be non-negative. Normalized down
40    to size an, and resulting size is returned. */
41 static mp_size_t
submul(mp_ptr rp,mp_size_t rn,mp_srcptr ap,mp_size_t an,mp_srcptr bp,mp_size_t bn)42 submul (mp_ptr rp, mp_size_t rn,
43 	mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn)
44 {
45   mp_ptr tp;
46   TMP_DECL;
47 
48   ASSERT (bn > 0);
49   ASSERT (an >= bn);
50   ASSERT (rn >= an);
51   ASSERT (an + bn <= rn + 1);
52 
53   TMP_MARK;
54   tp = TMP_ALLOC_LIMBS (an + bn);
55 
56   mpn_mul (tp, ap, an, bp, bn);
57   ASSERT ((an + bn <= rn) || (tp[rn] == 0));
58   ASSERT_NOCARRY (mpn_sub (rp, rp, rn, tp, an + bn - (an + bn > rn)));
59   TMP_FREE;
60 
61   while (rn > an && (rp[rn-1] == 0))
62     rn--;
63 
64   return rn;
65 }
66 
67 /* Computes (a, b)  <--  M^{-1} (a; b) */
68 /* FIXME:
69     x Take scratch parameter, and figure out scratch need.
70 
71     x Use some fallback for small M->n?
72 */
73 static mp_size_t
hgcd_matrix_apply(const struct hgcd_matrix * M,mp_ptr ap,mp_ptr bp,mp_size_t n)74 hgcd_matrix_apply (const struct hgcd_matrix *M,
75 		   mp_ptr ap, mp_ptr bp,
76 		   mp_size_t n)
77 {
78   mp_size_t an, bn, un, vn, nn;
79   mp_size_t mn[2][2];
80   mp_size_t modn;
81   mp_ptr tp, sp, scratch;
82   mp_limb_t cy;
83   unsigned i, j;
84 
85   TMP_DECL;
86 
87   ASSERT ( (ap[n-1] | bp[n-1]) > 0);
88 
89   an = n;
90   MPN_NORMALIZE (ap, an);
91   bn = n;
92   MPN_NORMALIZE (bp, bn);
93 
94   for (i = 0; i < 2; i++)
95     for (j = 0; j < 2; j++)
96       {
97 	mp_size_t k;
98 	k = M->n;
99 	MPN_NORMALIZE (M->p[i][j], k);
100 	mn[i][j] = k;
101       }
102 
103   ASSERT (mn[0][0] > 0);
104   ASSERT (mn[1][1] > 0);
105   ASSERT ( (mn[0][1] | mn[1][0]) > 0);
106 
107   TMP_MARK;
108 
109   if (mn[0][1] == 0)
110     {
111       /* A unchanged, M = (1, 0; q, 1) */
112       ASSERT (mn[0][0] == 1);
113       ASSERT (M->p[0][0][0] == 1);
114       ASSERT (mn[1][1] == 1);
115       ASSERT (M->p[1][1][0] == 1);
116 
117       /* Put B <-- B - q A */
118       nn = submul (bp, bn, ap, an, M->p[1][0], mn[1][0]);
119     }
120   else if (mn[1][0] == 0)
121     {
122       /* B unchanged, M = (1, q; 0, 1) */
123       ASSERT (mn[0][0] == 1);
124       ASSERT (M->p[0][0][0] == 1);
125       ASSERT (mn[1][1] == 1);
126       ASSERT (M->p[1][1][0] == 1);
127 
128       /* Put A  <-- A - q * B */
129       nn = submul (ap, an, bp, bn, M->p[0][1], mn[0][1]);
130     }
131   else
132     {
133       /* A = m00 a + m01 b  ==> a <= A / m00, b <= A / m01.
134 	 B = m10 a + m11 b  ==> a <= B / m10, b <= B / m11. */
135       un = MIN (an - mn[0][0], bn - mn[1][0]) + 1;
136       vn = MIN (an - mn[0][1], bn - mn[1][1]) + 1;
137 
138       nn = MAX (un, vn);
139       /* In the range of interest, mulmod_bnm1 should always beat mullo. */
140       modn = mpn_mulmod_bnm1_next_size (nn + 1);
141 
142       TMP_ALLOC_LIMBS_3 (tp, modn,
143 			 sp, modn,
144 			 scratch, mpn_mulmod_bnm1_itch (modn, modn, M->n));
145 
146       ASSERT (n <= 2*modn);
147 
148       if (n > modn)
149 	{
150 	  cy = mpn_add (ap, ap, modn, ap + modn, n - modn);
151 	  MPN_INCR_U (ap, modn, cy);
152 
153 	  cy = mpn_add (bp, bp, modn, bp + modn, n - modn);
154 	  MPN_INCR_U (bp, modn, cy);
155 
156 	  n = modn;
157 	}
158 
159       mpn_mulmod_bnm1 (tp, modn, ap, n, M->p[1][1], mn[1][1], scratch);
160       mpn_mulmod_bnm1 (sp, modn, bp, n, M->p[0][1], mn[0][1], scratch);
161 
162       /* FIXME: Handle the small n case in some better way. */
163       if (n + mn[1][1] < modn)
164 	MPN_ZERO (tp + n + mn[1][1], modn - n - mn[1][1]);
165       if (n + mn[0][1] < modn)
166 	MPN_ZERO (sp + n + mn[0][1], modn - n - mn[0][1]);
167 
168       cy = mpn_sub_n (tp, tp, sp, modn);
169       MPN_DECR_U (tp, modn, cy);
170 
171       ASSERT (mpn_zero_p (tp + nn, modn - nn));
172 
173       mpn_mulmod_bnm1 (sp, modn, ap, n, M->p[1][0], mn[1][0], scratch);
174       MPN_COPY (ap, tp, nn);
175       mpn_mulmod_bnm1 (tp, modn, bp, n, M->p[0][0], mn[0][0], scratch);
176 
177       if (n + mn[1][0] < modn)
178 	MPN_ZERO (sp + n + mn[1][0], modn - n - mn[1][0]);
179       if (n + mn[0][0] < modn)
180 	MPN_ZERO (tp + n + mn[0][0], modn - n - mn[0][0]);
181 
182       cy = mpn_sub_n (tp, tp, sp, modn);
183       MPN_DECR_U (tp, modn, cy);
184 
185       ASSERT (mpn_zero_p (tp + nn, modn - nn));
186       MPN_COPY (bp, tp, nn);
187 
188       while ( (ap[nn-1] | bp[nn-1]) == 0)
189 	{
190 	  nn--;
191 	  ASSERT (nn > 0);
192 	}
193     }
194   TMP_FREE;
195 
196   return nn;
197 }
198 
199 mp_size_t
mpn_hgcd_reduce_itch(mp_size_t n,mp_size_t p)200 mpn_hgcd_reduce_itch (mp_size_t n, mp_size_t p)
201 {
202   mp_size_t itch;
203   if (BELOW_THRESHOLD (n, HGCD_REDUCE_THRESHOLD))
204     {
205       itch = mpn_hgcd_itch (n-p);
206 
207       /* For arbitrary p, the storage for _adjust is 2*(p + M->n) = 2 *
208 	 (p + ceil((n-p)/2) - 1 <= n + p - 1 */
209       if (itch < n + p - 1)
210 	itch = n + p - 1;
211     }
212   else
213     {
214       itch = 2*(n-p) + mpn_hgcd_itch (n-p);
215       /* Currently, hgcd_matrix_apply allocates its own storage. */
216     }
217   return itch;
218 }
219 
220 /* FIXME: Document storage need. */
221 mp_size_t
mpn_hgcd_reduce(struct hgcd_matrix * M,mp_ptr ap,mp_ptr bp,mp_size_t n,mp_size_t p,mp_ptr tp)222 mpn_hgcd_reduce (struct hgcd_matrix *M,
223 		 mp_ptr ap, mp_ptr bp, mp_size_t n, mp_size_t p,
224 		 mp_ptr tp)
225 {
226   mp_size_t nn;
227   if (BELOW_THRESHOLD (n, HGCD_REDUCE_THRESHOLD))
228     {
229       nn = mpn_hgcd (ap + p, bp + p, n - p, M, tp);
230       if (nn > 0)
231 	/* Needs 2*(p + M->n) <= 2*(floor(n/2) + ceil(n/2) - 1)
232 	   = 2 (n - 1) */
233 	return mpn_hgcd_matrix_adjust (M, p + nn, ap, bp, p, tp);
234     }
235   else
236     {
237       MPN_COPY (tp, ap + p, n - p);
238       MPN_COPY (tp + n - p, bp + p, n - p);
239       if (mpn_hgcd_appr (tp, tp + n - p, n - p, M, tp + 2*(n-p)))
240 	return hgcd_matrix_apply (M, ap, bp, n);
241     }
242   return 0;
243 }
244