1 /* mpn_mul_1 -- Multiply a limb vector with a single limb and store the
2    product in a second limb vector.
3 
4 Copyright 1991-1994, 1996, 2000-2002 Free Software Foundation, Inc.
5 
6 This file is part of the GNU MP Library.
7 
8 The GNU MP Library is free software; you can redistribute it and/or modify
9 it under the terms of either:
10 
11   * the GNU Lesser General Public License as published by the Free
12     Software Foundation; either version 3 of the License, or (at your
13     option) any later version.
14 
15 or
16 
17   * the GNU General Public License as published by the Free Software
18     Foundation; either version 2 of the License, or (at your option) any
19     later version.
20 
21 or both in parallel, as here.
22 
23 The GNU MP Library is distributed in the hope that it will be useful, but
24 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
25 or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
26 for more details.
27 
28 You should have received copies of the GNU General Public License and the
29 GNU Lesser General Public License along with the GNU MP Library.  If not,
30 see https://www.gnu.org/licenses/.  */
31 
32 #include "gmp-impl.h"
33 #include "longlong.h"
34 
35 
36 #if GMP_NAIL_BITS == 0
37 
38 mp_limb_t
mpn_mul_1(mp_ptr rp,mp_srcptr up,mp_size_t n,mp_limb_t vl)39 mpn_mul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
40 {
41   mp_limb_t ul, cl, hpl, lpl;
42 
43   ASSERT (n >= 1);
44   ASSERT (MPN_SAME_OR_INCR_P (rp, up, n));
45 
46   cl = 0;
47   do
48     {
49       ul = *up++;
50       umul_ppmm (hpl, lpl, ul, vl);
51 
52       lpl += cl;
53       cl = (lpl < cl) + hpl;
54 
55       *rp++ = lpl;
56     }
57   while (--n != 0);
58 
59   return cl;
60 }
61 
62 #endif
63 
64 #if GMP_NAIL_BITS >= 1
65 
66 mp_limb_t
mpn_mul_1(mp_ptr rp,mp_srcptr up,mp_size_t n,mp_limb_t vl)67 mpn_mul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
68 {
69   mp_limb_t shifted_vl, ul, lpl, hpl, prev_hpl, xw, cl, xl;
70 
71   ASSERT (n >= 1);
72   ASSERT (MPN_SAME_OR_INCR_P (rp, up, n));
73   ASSERT_MPN (up, n);
74   ASSERT_LIMB (vl);
75 
76   shifted_vl = vl << GMP_NAIL_BITS;
77   cl = 0;
78   prev_hpl = 0;
79   do
80     {
81       ul = *up++;
82 
83       umul_ppmm (hpl, lpl, ul, shifted_vl);
84       lpl >>= GMP_NAIL_BITS;
85       xw = prev_hpl + lpl + cl;
86       cl = xw >> GMP_NUMB_BITS;
87       xl = xw & GMP_NUMB_MASK;
88       *rp++ = xl;
89       prev_hpl = hpl;
90     }
91   while (--n != 0);
92 
93   return prev_hpl + cl;
94 }
95 
96 #endif
97