xref: /dragonfly/contrib/gmp/mpn/generic/submul_1.c (revision 73e0051e)
1 /* mpn_submul_1 -- multiply the N long limb vector pointed to by UP by VL,
2    subtract the N least significant limbs of the product from the limb
3    vector pointed to by RP.  Return the most significant limb of the
4    product, adjusted for carry-out from the subtraction.
5 
6 Copyright 1992, 1993, 1994, 1996, 2000, 2002, 2004 Free Software Foundation,
7 Inc.
8 
9 This file is part of the GNU MP Library.
10 
11 The GNU MP Library is free software; you can redistribute it and/or modify
12 it under the terms of the GNU Lesser General Public License as published by
13 the Free Software Foundation; either version 3 of the License, or (at your
14 option) any later version.
15 
16 The GNU MP Library is distributed in the hope that it will be useful, but
17 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
18 or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
19 License for more details.
20 
21 You should have received a copy of the GNU Lesser General Public License
22 along with the GNU MP Library.  If not, see http://www.gnu.org/licenses/.  */
23 
24 #include "gmp.h"
25 #include "gmp-impl.h"
26 #include "longlong.h"
27 
28 
29 #if GMP_NAIL_BITS == 0
30 
31 mp_limb_t
32 mpn_submul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
33 {
34   mp_limb_t ul, cl, hpl, lpl, rl;
35 
36   ASSERT (n >= 1);
37   ASSERT (MPN_SAME_OR_SEPARATE_P (rp, up, n));
38 
39   cl = 0;
40   do
41     {
42       ul = *up++;
43       umul_ppmm (hpl, lpl, ul, vl);
44 
45       lpl += cl;
46       cl = (lpl < cl) + hpl;
47 
48       rl = *rp;
49       lpl = rl - lpl;
50       cl += lpl > rl;
51       *rp++ = lpl;
52     }
53   while (--n != 0);
54 
55   return cl;
56 }
57 
58 #endif
59 
60 #if GMP_NAIL_BITS == 1
61 
62 mp_limb_t
63 mpn_submul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
64 {
65   mp_limb_t shifted_vl, ul, rl, lpl, hpl, prev_hpl, cl, xl, c1, c2, c3;
66 
67   ASSERT (n >= 1);
68   ASSERT (MPN_SAME_OR_SEPARATE_P (rp, up, n));
69   ASSERT_MPN (rp, n);
70   ASSERT_MPN (up, n);
71   ASSERT_LIMB (vl);
72 
73   shifted_vl = vl << GMP_NAIL_BITS;
74   cl = 0;
75   prev_hpl = 0;
76   do
77     {
78       ul = *up++;
79       rl = *rp;
80       umul_ppmm (hpl, lpl, ul, shifted_vl);
81       lpl >>= GMP_NAIL_BITS;
82       SUBC_LIMB (c1, xl, rl, prev_hpl);
83       SUBC_LIMB (c2, xl, xl, lpl);
84       SUBC_LIMB (c3, xl, xl, cl);
85       cl = c1 + c2 + c3;
86       *rp++ = xl;
87       prev_hpl = hpl;
88     }
89   while (--n != 0);
90 
91   return prev_hpl + cl;
92 }
93 
94 #endif
95 
96 #if GMP_NAIL_BITS >= 2
97 
98 mp_limb_t
99 mpn_submul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
100 {
101   mp_limb_t shifted_vl, ul, rl, lpl, hpl, prev_hpl, xw, cl, xl;
102 
103   ASSERT (n >= 1);
104   ASSERT (MPN_SAME_OR_SEPARATE_P (rp, up, n));
105   ASSERT_MPN (rp, n);
106   ASSERT_MPN (up, n);
107   ASSERT_LIMB (vl);
108 
109   shifted_vl = vl << GMP_NAIL_BITS;
110   cl = 0;
111   prev_hpl = 0;
112   do
113     {
114       ul = *up++;
115       rl = *rp;
116       umul_ppmm (hpl, lpl, ul, shifted_vl);
117       lpl >>= GMP_NAIL_BITS;
118       xw = rl - (prev_hpl + lpl) + cl;
119       cl = (mp_limb_signed_t) xw >> GMP_NUMB_BITS; /* FIXME: non-portable */
120       xl = xw & GMP_NUMB_MASK;
121       *rp++ = xl;
122       prev_hpl = hpl;
123     }
124   while (--n != 0);
125 
126   return prev_hpl - cl;
127 }
128 
129 #endif
130