xref: /dragonfly/contrib/gmp/mpf/ui_sub.c (revision 36a3d1d6)
1 /* mpf_ui_sub -- Subtract a float from an unsigned long int.
2 
3 Copyright 1993, 1994, 1995, 1996, 2001, 2002, 2005 Free Software Foundation,
4 Inc.
5 
6 This file is part of the GNU MP Library.
7 
8 The GNU MP Library is free software; you can redistribute it and/or modify
9 it under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or (at your
11 option) any later version.
12 
13 The GNU MP Library is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
16 License for more details.
17 
18 You should have received a copy of the GNU Lesser General Public License
19 along with the GNU MP Library.  If not, see http://www.gnu.org/licenses/.  */
20 
21 #include "gmp.h"
22 #include "gmp-impl.h"
23 
24 void
25 mpf_ui_sub (mpf_ptr r, unsigned long int u, mpf_srcptr v)
26 {
27   mp_srcptr up, vp;
28   mp_ptr rp, tp;
29   mp_size_t usize, vsize, rsize;
30   mp_size_t prec;
31   mp_exp_t uexp;
32   mp_size_t ediff;
33   int negate;
34   mp_limb_t ulimb;
35   TMP_DECL;
36 
37   vsize = v->_mp_size;
38 
39   /* Handle special cases that don't work in generic code below.  */
40   if (u == 0)
41     {
42       mpf_neg (r, v);
43       return;
44     }
45   if (vsize == 0)
46     {
47       mpf_set_ui (r, u);
48       return;
49     }
50 
51   /* If signs of U and V are different, perform addition.  */
52   if (vsize < 0)
53     {
54       __mpf_struct v_negated;
55       v_negated._mp_size = -vsize;
56       v_negated._mp_exp = v->_mp_exp;
57       v_negated._mp_d = v->_mp_d;
58       mpf_add_ui (r, &v_negated, u);
59       return;
60     }
61 
62   TMP_MARK;
63 
64   /* Signs are now known to be the same.  */
65 
66   ulimb = u;
67   /* Make U be the operand with the largest exponent.  */
68   if (1 < v->_mp_exp)
69     {
70       negate = 1;
71       usize = ABS (vsize);
72       vsize = 1;
73       up = v->_mp_d;
74       vp = &ulimb;
75       rp = r->_mp_d;
76       prec = r->_mp_prec + 1;
77       uexp = v->_mp_exp;
78       ediff = uexp - 1;
79     }
80   else
81     {
82       negate = 0;
83       usize = 1;
84       vsize = ABS (vsize);
85       up = &ulimb;
86       vp = v->_mp_d;
87       rp = r->_mp_d;
88       prec = r->_mp_prec;
89       uexp = 1;
90       ediff = 1 - v->_mp_exp;
91     }
92 
93   /* Ignore leading limbs in U and V that are equal.  Doing
94      this helps increase the precision of the result.  */
95   if (ediff == 0)
96     {
97       /* This loop normally exits immediately.  Optimize for that.  */
98       for (;;)
99 	{
100 	  usize--;
101 	  vsize--;
102 	  if (up[usize] != vp[vsize])
103 	    break;
104 	  uexp--;
105 	  if (usize == 0)
106 	    goto Lu0;
107 	  if (vsize == 0)
108 	    goto Lv0;
109 	}
110       usize++;
111       vsize++;
112       /* Note that either operand (but not both operands) might now have
113 	 leading zero limbs.  It matters only that U is unnormalized if
114 	 vsize is now zero, and vice versa.  And it is only in that case
115 	 that we have to adjust uexp.  */
116       if (vsize == 0)
117       Lv0:
118 	while (usize != 0 && up[usize - 1] == 0)
119 	  usize--, uexp--;
120       if (usize == 0)
121       Lu0:
122 	while (vsize != 0 && vp[vsize - 1] == 0)
123 	  vsize--, uexp--;
124     }
125 
126   /* If U extends beyond PREC, ignore the part that does.  */
127   if (usize > prec)
128     {
129       up += usize - prec;
130       usize = prec;
131     }
132 
133   /* If V extends beyond PREC, ignore the part that does.
134      Note that this may make vsize negative.  */
135   if (vsize + ediff > prec)
136     {
137       vp += vsize + ediff - prec;
138       vsize = prec - ediff;
139     }
140 
141   /* Allocate temp space for the result.  Allocate
142      just vsize + ediff later???  */
143   tp = (mp_ptr) TMP_ALLOC (prec * BYTES_PER_MP_LIMB);
144 
145   if (ediff >= prec)
146     {
147       /* V completely cancelled.  */
148       if (tp != up)
149 	MPN_COPY (rp, up, usize);
150       rsize = usize;
151     }
152   else
153     {
154       /* Locate the least significant non-zero limb in (the needed
155 	 parts of) U and V, to simplify the code below.  */
156       for (;;)
157 	{
158 	  if (vsize == 0)
159 	    {
160 	      MPN_COPY (rp, up, usize);
161 	      rsize = usize;
162 	      goto done;
163 	    }
164 	  if (vp[0] != 0)
165 	    break;
166 	  vp++, vsize--;
167 	}
168       for (;;)
169 	{
170 	  if (usize == 0)
171 	    {
172 	      MPN_COPY (rp, vp, vsize);
173 	      rsize = vsize;
174 	      negate ^= 1;
175 	      goto done;
176 	    }
177 	  if (up[0] != 0)
178 	    break;
179 	  up++, usize--;
180 	}
181 
182       /* uuuu     |  uuuu     |  uuuu     |  uuuu     |  uuuu    */
183       /* vvvvvvv  |  vv       |    vvvvv  |    v      |       vv */
184 
185       if (usize > ediff)
186 	{
187 	  /* U and V partially overlaps.  */
188 	  if (ediff == 0)
189 	    {
190 	      /* Have to compare the leading limbs of u and v
191 		 to determine whether to compute u - v or v - u.  */
192 	      if (usize > vsize)
193 		{
194 		  /* uuuu     */
195 		  /* vv       */
196 		  int cmp;
197 		  cmp = mpn_cmp (up + usize - vsize, vp, vsize);
198 		  if (cmp >= 0)
199 		    {
200 		      mp_size_t size;
201 		      size = usize - vsize;
202 		      MPN_COPY (tp, up, size);
203 		      mpn_sub_n (tp + size, up + size, vp, vsize);
204 		      rsize = usize;
205 		    }
206 		  else
207 		    {
208 		      /* vv       */  /* Swap U and V. */
209 		      /* uuuu     */
210 		      mp_size_t size, i;
211 		      size = usize - vsize;
212 		      tp[0] = -up[0] & GMP_NUMB_MASK;
213 		      for (i = 1; i < size; i++)
214 			tp[i] = ~up[i] & GMP_NUMB_MASK;
215 		      mpn_sub_n (tp + size, vp, up + size, vsize);
216 		      mpn_sub_1 (tp + size, tp + size, vsize, (mp_limb_t) 1);
217 		      negate ^= 1;
218 		      rsize = usize;
219 		    }
220 		}
221 	      else if (usize < vsize)
222 		{
223 		  /* uuuu     */
224 		  /* vvvvvvv  */
225 		  int cmp;
226 		  cmp = mpn_cmp (up, vp + vsize - usize, usize);
227 		  if (cmp > 0)
228 		    {
229 		      mp_size_t size, i;
230 		      size = vsize - usize;
231 		      tp[0] = -vp[0] & GMP_NUMB_MASK;
232 		      for (i = 1; i < size; i++)
233 			tp[i] = ~vp[i] & GMP_NUMB_MASK;
234 		      mpn_sub_n (tp + size, up, vp + size, usize);
235 		      mpn_sub_1 (tp + size, tp + size, usize, (mp_limb_t) 1);
236 		      rsize = vsize;
237 		    }
238 		  else
239 		    {
240 		      /* vvvvvvv  */  /* Swap U and V. */
241 		      /* uuuu     */
242 		      /* This is the only place we can get 0.0.  */
243 		      mp_size_t size;
244 		      size = vsize - usize;
245 		      MPN_COPY (tp, vp, size);
246 		      mpn_sub_n (tp + size, vp + size, up, usize);
247 		      negate ^= 1;
248 		      rsize = vsize;
249 		    }
250 		}
251 	      else
252 		{
253 		  /* uuuu     */
254 		  /* vvvv     */
255 		  int cmp;
256 		  cmp = mpn_cmp (up, vp + vsize - usize, usize);
257 		  if (cmp > 0)
258 		    {
259 		      mpn_sub_n (tp, up, vp, usize);
260 		      rsize = usize;
261 		    }
262 		  else
263 		    {
264 		      mpn_sub_n (tp, vp, up, usize);
265 		      negate ^= 1;
266 		      rsize = usize;
267 		      /* can give zero */
268 		    }
269 		}
270 	    }
271 	  else
272 	    {
273 	      if (vsize + ediff <= usize)
274 		{
275 		  /* uuuu     */
276 		  /*   v      */
277 		  mp_size_t size;
278 		  size = usize - ediff - vsize;
279 		  MPN_COPY (tp, up, size);
280 		  mpn_sub (tp + size, up + size, usize - size, vp, vsize);
281 		  rsize = usize;
282 		}
283 	      else
284 		{
285 		  /* uuuu     */
286 		  /*   vvvvv  */
287 		  mp_size_t size, i;
288 		  size = vsize + ediff - usize;
289 		  tp[0] = -vp[0] & GMP_NUMB_MASK;
290 		  for (i = 1; i < size; i++)
291 		    tp[i] = ~vp[i] & GMP_NUMB_MASK;
292 		  mpn_sub (tp + size, up, usize, vp + size, usize - ediff);
293 		  mpn_sub_1 (tp + size, tp + size, usize, (mp_limb_t) 1);
294 		  rsize = vsize + ediff;
295 		}
296 	    }
297 	}
298       else
299 	{
300 	  /* uuuu     */
301 	  /*      vv  */
302 	  mp_size_t size, i;
303 	  size = vsize + ediff - usize;
304 	  tp[0] = -vp[0] & GMP_NUMB_MASK;
305 	  for (i = 1; i < vsize; i++)
306 	    tp[i] = ~vp[i] & GMP_NUMB_MASK;
307 	  for (i = vsize; i < size; i++)
308 	    tp[i] = GMP_NUMB_MAX;
309 	  mpn_sub_1 (tp + size, up, usize, (mp_limb_t) 1);
310 	  rsize = size + usize;
311 	}
312 
313       /* Full normalize.  Optimize later.  */
314       while (rsize != 0 && tp[rsize - 1] == 0)
315 	{
316 	  rsize--;
317 	  uexp--;
318 	}
319       MPN_COPY (rp, tp, rsize);
320     }
321 
322  done:
323   r->_mp_size = negate ? -rsize : rsize;
324   r->_mp_exp = uexp;
325   TMP_FREE;
326 }
327