1 /* mpn_toom2_sqr -- Square {ap,an}.
2
3 Contributed to the GNU project by Torbjorn Granlund.
4
5 THE FUNCTION IN THIS FILE IS INTERNAL WITH A MUTABLE INTERFACE. IT IS ONLY
6 SAFE TO REACH IT THROUGH DOCUMENTED INTERFACES. IN FACT, IT IS ALMOST
7 GUARANTEED THAT IT WILL CHANGE OR DISAPPEAR IN A FUTURE GNU MP RELEASE.
8
9 Copyright 2006-2010, 2012 Free Software Foundation, Inc.
10
11 This file is part of the GNU MP Library.
12
13 The GNU MP Library is free software; you can redistribute it and/or modify
14 it under the terms of either:
15
16 * the GNU Lesser General Public License as published by the Free
17 Software Foundation; either version 3 of the License, or (at your
18 option) any later version.
19
20 or
21
22 * the GNU General Public License as published by the Free Software
23 Foundation; either version 2 of the License, or (at your option) any
24 later version.
25
26 or both in parallel, as here.
27
28 The GNU MP Library is distributed in the hope that it will be useful, but
29 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
30 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
31 for more details.
32
33 You should have received copies of the GNU General Public License and the
34 GNU Lesser General Public License along with the GNU MP Library. If not,
35 see https://www.gnu.org/licenses/. */
36
37
38 #include "gmp.h"
39 #include "gmp-impl.h"
40
41 /* Evaluate in: -1, 0, +inf
42
43 <-s--><--n-->
44 ____ ______
45 |_a1_|___a0_|
46
47 v0 = a0 ^2 # A(0)^2
48 vm1 = (a0- a1)^2 # A(-1)^2
49 vinf= a1 ^2 # A(inf)^2
50 */
51
52 #if TUNE_PROGRAM_BUILD || WANT_FAT_BINARY
53 #define MAYBE_sqr_toom2 1
54 #else
55 #define MAYBE_sqr_toom2 \
56 (SQR_TOOM3_THRESHOLD >= 2 * SQR_TOOM2_THRESHOLD)
57 #endif
58
59 #define TOOM2_SQR_REC(p, a, n, ws) \
60 do { \
61 if (! MAYBE_sqr_toom2 \
62 || BELOW_THRESHOLD (n, SQR_TOOM2_THRESHOLD)) \
63 mpn_sqr_basecase (p, a, n); \
64 else \
65 mpn_toom2_sqr (p, a, n, ws); \
66 } while (0)
67
68 void
mpn_toom2_sqr(mp_ptr pp,mp_srcptr ap,mp_size_t an,mp_ptr scratch)69 mpn_toom2_sqr (mp_ptr pp,
70 mp_srcptr ap, mp_size_t an,
71 mp_ptr scratch)
72 {
73 const int __gmpn_cpuvec_initialized = 1;
74 mp_size_t n, s;
75 mp_limb_t cy, cy2;
76 mp_ptr asm1;
77
78 #define a0 ap
79 #define a1 (ap + n)
80
81 s = an >> 1;
82 n = an - s;
83
84 ASSERT (0 < s && s <= n);
85
86 asm1 = pp;
87
88 /* Compute asm1. */
89 if (s == n)
90 {
91 if (mpn_cmp (a0, a1, n) < 0)
92 {
93 mpn_sub_n (asm1, a1, a0, n);
94 }
95 else
96 {
97 mpn_sub_n (asm1, a0, a1, n);
98 }
99 }
100 else
101 {
102 if (mpn_zero_p (a0 + s, n - s) && mpn_cmp (a0, a1, s) < 0)
103 {
104 mpn_sub_n (asm1, a1, a0, s);
105 MPN_ZERO (asm1 + s, n - s);
106 }
107 else
108 {
109 mpn_sub (asm1, a0, n, a1, s);
110 }
111 }
112
113 #define v0 pp /* 2n */
114 #define vinf (pp + 2 * n) /* s+s */
115 #define vm1 scratch /* 2n */
116 #define scratch_out scratch + 2 * n
117
118 /* vm1, 2n limbs */
119 TOOM2_SQR_REC (vm1, asm1, n, scratch_out);
120
121 /* vinf, s+s limbs */
122 TOOM2_SQR_REC (vinf, a1, s, scratch_out);
123
124 /* v0, 2n limbs */
125 TOOM2_SQR_REC (v0, ap, n, scratch_out);
126
127 /* H(v0) + L(vinf) */
128 cy = mpn_add_n (pp + 2 * n, v0 + n, vinf, n);
129
130 /* L(v0) + H(v0) */
131 cy2 = cy + mpn_add_n (pp + n, pp + 2 * n, v0, n);
132
133 /* L(vinf) + H(vinf) */
134 cy += mpn_add (pp + 2 * n, pp + 2 * n, n, vinf + n, s + s - n);
135
136 cy -= mpn_sub_n (pp + n, pp + n, vm1, 2 * n);
137
138 ASSERT (cy + 1 <= 3);
139 ASSERT (cy2 <= 2);
140
141 mpn_incr_u (pp + 2 * n, cy2);
142 if (LIKELY (cy <= 2))
143 mpn_incr_u (pp + 3 * n, cy);
144 else
145 mpn_decr_u (pp + 3 * n, 1);
146 }
147