xref: /dragonfly/contrib/gmp/mpn/generic/mod_1_3.c (revision 67640b13)
1 /* mpn_mod_1s_3p (ap, n, b, cps)
2    Divide (ap,,n) by b.  Return the single-limb remainder.
3    Requires that d < B / 3.
4 
5    Contributed to the GNU project by Torbjorn Granlund.
6 
7    THE FUNCTIONS IN THIS FILE ARE INTERNAL WITH MUTABLE INTERFACES.  IT IS ONLY
8    SAFE TO REACH THEM THROUGH DOCUMENTED INTERFACES.  IN FACT, IT IS ALMOST
9    GUARANTEED THAT THEY WILL CHANGE OR DISAPPEAR IN A FUTURE GNU MP RELEASE.
10 
11 Copyright 2008, 2009, 2010 Free Software Foundation, Inc.
12 
13 This file is part of the GNU MP Library.
14 
15 The GNU MP Library is free software; you can redistribute it and/or modify
16 it under the terms of the GNU Lesser General Public License as published by
17 the Free Software Foundation; either version 3 of the License, or (at your
18 option) any later version.
19 
20 The GNU MP Library is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
22 or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
23 License for more details.
24 
25 You should have received a copy of the GNU Lesser General Public License
26 along with the GNU MP Library.  If not, see http://www.gnu.org/licenses/.  */
27 
28 #include "gmp.h"
29 #include "gmp-impl.h"
30 #include "longlong.h"
31 
32 void
33 mpn_mod_1s_3p_cps (mp_limb_t cps[6], mp_limb_t b)
34 {
35   mp_limb_t bi;
36   mp_limb_t B1modb, B2modb, B3modb, B4modb;
37   int cnt;
38 
39   ASSERT (b <= (~(mp_limb_t) 0) / 3);
40 
41   count_leading_zeros (cnt, b);
42 
43   b <<= cnt;
44   invert_limb (bi, b);
45 
46   B1modb = -b * ((bi >> (GMP_LIMB_BITS-cnt)) | (CNST_LIMB(1) << cnt));
47   ASSERT (B1modb <= b);		/* NB: not fully reduced mod b */
48   udiv_rnd_preinv (B2modb, B1modb, b, bi);
49   udiv_rnd_preinv (B3modb, B2modb, b, bi);
50   udiv_rnd_preinv (B4modb, B3modb, b, bi);
51 
52   cps[0] = bi;
53   cps[1] = cnt;
54   cps[2] = B1modb >> cnt;
55   cps[3] = B2modb >> cnt;
56   cps[4] = B3modb >> cnt;
57   cps[5] = B4modb >> cnt;
58 
59 #if WANT_ASSERT
60   {
61     int i;
62     b = cps[2];
63     for (i = 3; i <= 5; i++)
64       {
65 	b += cps[i];
66 	ASSERT (b >= cps[i]);
67       }
68   }
69 #endif
70 }
71 
72 mp_limb_t
73 mpn_mod_1s_3p (mp_srcptr ap, mp_size_t n, mp_limb_t b, mp_limb_t cps[6])
74 {
75   mp_limb_t rh, rl, bi, q, ph, pl, ch, cl, r;
76   mp_limb_t B1modb, B2modb, B3modb, B4modb;
77   mp_size_t i;
78   int cnt;
79 
80   ASSERT (n >= 1);
81 
82   B1modb = cps[2];
83   B2modb = cps[3];
84   B3modb = cps[4];
85   B4modb = cps[5];
86 
87   /* We compute n mod 3 in a tricky way, which works except for when n is so
88      close to the maximum size that we don't need to support it.  The final
89      cast to int is a workaround for HP cc.  */
90   switch ((int) ((mp_limb_t) n * MODLIMB_INVERSE_3 >> (GMP_NUMB_BITS - 2)))
91     {
92     case 0:
93       umul_ppmm (ph, pl, ap[n - 2], B1modb);
94       add_ssaaaa (ph, pl, ph, pl, 0, ap[n - 3]);
95       umul_ppmm (rh, rl, ap[n - 1], B2modb);
96       add_ssaaaa (rh, rl, rh, rl, ph, pl);
97       n -= 3;
98       break;
99     case 2:	/* n mod 3 = 1 */
100       rh = 0;
101       rl = ap[n - 1];
102       n -= 1;
103       break;
104     case 1:	/* n mod 3 = 2 */
105       umul_ppmm (ph, pl, ap[n - 1], B1modb);
106       add_ssaaaa (rh, rl, ph, pl, 0, ap[n - 2]);
107       n -= 2;
108       break;
109     }
110 
111   for (i = n - 3; i >= 0; i -= 3)
112     {
113       /* rr = ap[i]				< B
114 	    + ap[i+1] * (B mod b)		<= (B-1)(b-1)
115 	    + ap[i+2] * (B^2 mod b)		<= (B-1)(b-1)
116 	    + LO(rr)  * (B^3 mod b)		<= (B-1)(b-1)
117 	    + HI(rr)  * (B^4 mod b)		<= (B-1)(b-1)
118       */
119       umul_ppmm (ph, pl, ap[i + 1], B1modb);
120       add_ssaaaa (ph, pl, ph, pl, 0, ap[i + 0]);
121 
122       umul_ppmm (ch, cl, ap[i + 2], B2modb);
123       add_ssaaaa (ph, pl, ph, pl, ch, cl);
124 
125       umul_ppmm (ch, cl, rl, B3modb);
126       add_ssaaaa (ph, pl, ph, pl, ch, cl);
127 
128       umul_ppmm (rh, rl, rh, B4modb);
129       add_ssaaaa (rh, rl, rh, rl, ph, pl);
130     }
131 
132   bi = cps[0];
133   cnt = cps[1];
134 
135 #if 1
136   umul_ppmm (rh, cl, rh, B1modb);
137   add_ssaaaa (rh, rl, rh, rl, 0, cl);
138   r = (rh << cnt) | (rl >> (GMP_LIMB_BITS - cnt));
139 #else
140   udiv_qrnnd_preinv (q, r, rh >> (GMP_LIMB_BITS - cnt),
141 		     (rh << cnt) | (rl >> (GMP_LIMB_BITS - cnt)), b, bi);
142   ASSERT (q <= 3);	/* optimize for small quotient? */
143 #endif
144 
145   udiv_qrnnd_preinv (q, r, r, rl << cnt, b, bi);
146 
147   return r >> cnt;
148 }
149