1 /* mpf_mul_2exp -- Multiply a float by 2^n. 2 3 Copyright 1993, 1994, 1996, 2000, 2001, 2002, 2004 Free Software Foundation, 4 Inc. 5 6 This file is part of the GNU MP Library. 7 8 The GNU MP Library is free software; you can redistribute it and/or modify 9 it under the terms of the GNU Lesser General Public License as published by 10 the Free Software Foundation; either version 3 of the License, or (at your 11 option) any later version. 12 13 The GNU MP Library is distributed in the hope that it will be useful, but 14 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 15 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public 16 License for more details. 17 18 You should have received a copy of the GNU Lesser General Public License 19 along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */ 20 21 #include "gmp.h" 22 #include "gmp-impl.h" 23 24 25 /* Multiples of GMP_NUMB_BITS in exp simply mean an amount added to EXP(u) 26 to set EXP(r). The remainder exp%GMP_NUMB_BITS is then a left shift for 27 the limb data. 28 29 If exp%GMP_NUMB_BITS == 0 then there's no shifting, we effectively just 30 do an mpz_set with changed EXP(r). Like mpz_set we take prec+1 limbs in 31 this case. Although just prec would suffice, it's nice to have 32 mpf_mul_2exp with exp==0 come out the same as mpz_set. 33 34 When shifting we take up to prec many limbs from the input. Our shift is 35 cy = mpn_lshift (PTR(r), PTR(u)+k, size, ...), where k is the number of 36 low limbs dropped from u, and the carry out is stored to PTR(r)[size]. 37 38 It may be noted that the low limb PTR(r)[0] doesn't incorporate bits from 39 PTR(u)[k-1] (when k>=1 makes that limb available). Taking just prec 40 limbs from the input (with the high non-zero) is enough bits for the 41 application requested precision, there's no need for extra work. 42 43 If r==u the shift will have overlapping operands. When k==0 (ie. when 44 usize <= prec), the overlap is supported by lshift (ie. dst == src). 45 46 But when r==u and k>=1 (ie. usize > prec), we would have an invalid 47 overlap (ie. mpn_lshift (rp, rp+k, ...)). In this case we must instead 48 use mpn_rshift (PTR(r)+1, PTR(u)+k, size, NUMB-shift) with the carry out 49 stored to PTR(r)[0]. An rshift by NUMB-shift bits like this gives 50 identical data, it's just its overlap restrictions which differ. 51 52 Enhancements: 53 54 The way mpn_lshift is used means successive mpf_mul_2exp calls on the 55 same operand will accumulate low zero limbs, until prec+1 limbs is 56 reached. This is wasteful for subsequent operations. When abs_usize <= 57 prec, we should test the low exp%GMP_NUMB_BITS many bits of PTR(u)[0], 58 ie. those which would be shifted out by an mpn_rshift. If they're zero 59 then use that mpn_rshift. */ 60 61 void 62 mpf_mul_2exp (mpf_ptr r, mpf_srcptr u, mp_bitcnt_t exp) 63 { 64 mp_srcptr up; 65 mp_ptr rp = r->_mp_d; 66 mp_size_t usize; 67 mp_size_t abs_usize; 68 mp_size_t prec = r->_mp_prec; 69 mp_exp_t uexp = u->_mp_exp; 70 71 usize = u->_mp_size; 72 73 if (UNLIKELY (usize == 0)) 74 { 75 r->_mp_size = 0; 76 r->_mp_exp = 0; 77 return; 78 } 79 80 abs_usize = ABS (usize); 81 up = u->_mp_d; 82 83 if (exp % GMP_NUMB_BITS == 0) 84 { 85 prec++; /* retain more precision here as we don't need 86 to account for carry-out here */ 87 if (abs_usize > prec) 88 { 89 up += abs_usize - prec; 90 abs_usize = prec; 91 } 92 if (rp != up) 93 MPN_COPY_INCR (rp, up, abs_usize); 94 r->_mp_exp = uexp + exp / GMP_NUMB_BITS; 95 } 96 else 97 { 98 mp_limb_t cy_limb; 99 mp_size_t adj; 100 if (abs_usize > prec) 101 { 102 up += abs_usize - prec; 103 abs_usize = prec; 104 /* Use mpn_rshift since mpn_lshift operates downwards, and we 105 therefore would clobber part of U before using that part, in case 106 R is the same variable as U. */ 107 cy_limb = mpn_rshift (rp + 1, up, abs_usize, 108 GMP_NUMB_BITS - exp % GMP_NUMB_BITS); 109 rp[0] = cy_limb; 110 adj = rp[abs_usize] != 0; 111 } 112 else 113 { 114 cy_limb = mpn_lshift (rp, up, abs_usize, exp % GMP_NUMB_BITS); 115 rp[abs_usize] = cy_limb; 116 adj = cy_limb != 0; 117 } 118 119 abs_usize += adj; 120 r->_mp_exp = uexp + exp / GMP_NUMB_BITS + adj; 121 } 122 r->_mp_size = usize >= 0 ? abs_usize : -abs_usize; 123 } 124