1 /* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
2 /* cairo - a vector graphics library with display and print output
3  *
4  * Copyright © 2004 Keith Packard
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation;
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18  *
19  * The original code as contributed to the cairo library under
20  * the dual license MPL+LGPL. We used the LGPL relicensing clause to
21  * get a GPL version of this code which now lives here. This header is
22  * unmodified other than the licensing clause.
23  *
24  * The Original Code is the cairo graphics library.
25  *
26  * The Initial Developer of the Original Code is Keith Packard
27  *
28  * Contributor(s):
29  *	Keith R. Packard <keithp@keithp.com>
30  *
31  * Code changes for ns-3 from upstream are marked with `//PDB'
32  */
33 
34 #include <climits>
35 #include "cairo-wideint-private.h"
36 #include <climits>
37 
38 /**
39  * \file
40  * \ingroup highprec
41  * Implementation of the cairo_x functions which implement high precision arithmetic.
42  */
43 
44 // *NS_CHECK_STYLE_OFF*
45 
46 #if HAVE_UINT64_T
47 
48 const char * cairo_impl64 = "uint64_t";
49 
50 #define _cairo_uint32s_to_uint64(h,l) ((uint64_t) (h) << 32 | (l))
51 
52 cairo_uquorem64_t
_cairo_uint64_divrem(cairo_uint64_t num,cairo_uint64_t den)53 _cairo_uint64_divrem (cairo_uint64_t num, cairo_uint64_t den)
54 {
55     cairo_uquorem64_t	qr;
56 
57     qr.quo = num / den;
58     qr.rem = num % den;
59     return qr;
60 }
61 
62 #else
63 
64 const char * cairo_impl64 = "uint32_t";
65 
66 cairo_uint64_t
_cairo_uint32_to_uint64(uint32_t i)67 _cairo_uint32_to_uint64 (uint32_t i)
68 {
69     cairo_uint64_t	q;
70 
71     q.lo = i;
72     q.hi = 0;
73     return q;
74 }
75 
76 cairo_int64_t
_cairo_int32_to_int64(int32_t i)77 _cairo_int32_to_int64 (int32_t i)
78 {
79     cairo_uint64_t	q;
80 
81     q.lo = i;
82     q.hi = i < 0 ? -1 : 0;
83     return q;
84 }
85 
86 static cairo_uint64_t
_cairo_uint32s_to_uint64(uint32_t h,uint32_t l)87 _cairo_uint32s_to_uint64 (uint32_t h, uint32_t l)
88 {
89     cairo_uint64_t	q;
90 
91     q.lo = l;
92     q.hi = h;
93     return q;
94 }
95 
96 cairo_uint64_t
_cairo_uint64_add(cairo_uint64_t a,cairo_uint64_t b)97 _cairo_uint64_add (cairo_uint64_t a, cairo_uint64_t b)
98 {
99     cairo_uint64_t	s;
100 
101     s.hi = a.hi + b.hi;
102     s.lo = a.lo + b.lo;
103     if (s.lo < a.lo)
104 	s.hi++;
105     return s;
106 }
107 
108 cairo_uint64_t
_cairo_uint64_sub(cairo_uint64_t a,cairo_uint64_t b)109 _cairo_uint64_sub (cairo_uint64_t a, cairo_uint64_t b)
110 {
111     cairo_uint64_t	s;
112 
113     s.hi = a.hi - b.hi;
114     s.lo = a.lo - b.lo;
115     if (s.lo > a.lo)
116 	s.hi--;
117     return s;
118 }
119 
120 #define uint32_lo(i)	((i) & 0xffff)
121 #define uint32_hi(i)	((i) >> 16)
122 #define uint32_carry16	((1) << 16)
123 
124 cairo_uint64_t
_cairo_uint32x32_64_mul(uint32_t a,uint32_t b)125 _cairo_uint32x32_64_mul (uint32_t a, uint32_t b)
126 {
127     cairo_uint64_t  s;
128 
129     uint16_t	ah, al, bh, bl;
130     uint32_t	r0, r1, r2, r3;
131 
132     al = uint32_lo (a);
133     ah = uint32_hi (a);
134     bl = uint32_lo (b);
135     bh = uint32_hi (b);
136 
137     r0 = (uint32_t) al * bl;
138     r1 = (uint32_t) al * bh;
139     r2 = (uint32_t) ah * bl;
140     r3 = (uint32_t) ah * bh;
141 
142     r1 += uint32_hi(r0);    /* no carry possible */
143     r1 += r2;		    /* but this can carry */
144     if (r1 < r2)	    /* check */
145 	r3 += uint32_carry16;
146 
147     s.hi = r3 + uint32_hi(r1);
148     s.lo = (uint32_lo (r1) << 16) + uint32_lo (r0);
149     return s;
150 }
151 
152 cairo_int64_t
_cairo_int32x32_64_mul(int32_t a,int32_t b)153 _cairo_int32x32_64_mul (int32_t a, int32_t b)
154 {
155     cairo_int64_t s;
156     s = _cairo_uint32x32_64_mul ((uint32_t) a, (uint32_t) b);
157     if (a < 0)
158 	s.hi -= b;
159     if (b < 0)
160 	s.hi -= a;
161     return s;
162 }
163 
164 cairo_uint64_t
_cairo_uint64_mul(cairo_uint64_t a,cairo_uint64_t b)165 _cairo_uint64_mul (cairo_uint64_t a, cairo_uint64_t b)
166 {
167     cairo_uint64_t	s;
168 
169     s = _cairo_uint32x32_64_mul (a.lo, b.lo);
170     s.hi += a.lo * b.hi + a.hi * b.lo;
171     return s;
172 }
173 
174 cairo_uint64_t
_cairo_uint64_lsl(cairo_uint64_t a,int shift)175 _cairo_uint64_lsl (cairo_uint64_t a, int shift)
176 {
177     if (shift >= 32)
178     {
179 	a.hi = a.lo;
180 	a.lo = 0;
181 	shift -= 32;
182     }
183     if (shift)
184     {
185 	a.hi = a.hi << shift | a.lo >> (32 - shift);
186 	a.lo = a.lo << shift;
187     }
188     return a;
189 }
190 
191 cairo_uint64_t
_cairo_uint64_rsl(cairo_uint64_t a,int shift)192 _cairo_uint64_rsl (cairo_uint64_t a, int shift)
193 {
194     if (shift >= 32)
195     {
196 	a.lo = a.hi;
197 	a.hi = 0;
198 	shift -= 32;
199     }
200     if (shift)
201     {
202 	a.lo = a.lo >> shift | a.hi << (32 - shift);
203 	a.hi = a.hi >> shift;
204     }
205     return a;
206 }
207 
208 #define _cairo_uint32_rsa(a,n)	((uint32_t) (((int32_t) (a)) >> (n)))
209 
210 cairo_int64_t
_cairo_uint64_rsa(cairo_int64_t a,int shift)211 _cairo_uint64_rsa (cairo_int64_t a, int shift)
212 {
213     if (shift >= 32)
214     {
215 	a.lo = a.hi;
216 	a.hi = _cairo_uint32_rsa (a.hi, 31);
217 	shift -= 32;
218     }
219     if (shift)
220     {
221 	a.lo = a.lo >> shift | a.hi << (32 - shift);
222 	a.hi = _cairo_uint32_rsa (a.hi, shift);
223     }
224     return a;
225 }
226 
227 int
_cairo_uint64_lt(cairo_uint64_t a,cairo_uint64_t b)228 _cairo_uint64_lt (cairo_uint64_t a, cairo_uint64_t b)
229 {
230     return (a.hi < b.hi ||
231 	    (a.hi == b.hi && a.lo < b.lo));
232 }
233 
234 int
_cairo_uint64_eq(cairo_uint64_t a,cairo_uint64_t b)235 _cairo_uint64_eq (cairo_uint64_t a, cairo_uint64_t b)
236 {
237     return a.hi == b.hi && a.lo == b.lo;
238 }
239 
240 int
_cairo_int64_lt(cairo_int64_t a,cairo_int64_t b)241 _cairo_int64_lt (cairo_int64_t a, cairo_int64_t b)
242 {
243     if (_cairo_int64_negative (a) && !_cairo_int64_negative (b))
244 	return 1;
245     if (!_cairo_int64_negative (a) && _cairo_int64_negative (b))
246 	return 0;
247     return _cairo_uint64_lt (a, b);
248 }
249 
250 cairo_uint64_t
_cairo_uint64_not(cairo_uint64_t a)251 _cairo_uint64_not (cairo_uint64_t a)
252 {
253     a.lo = ~a.lo;
254     a.hi = ~a.hi;
255     return a;
256 }
257 
258 cairo_uint64_t
_cairo_uint64_negate(cairo_uint64_t a)259 _cairo_uint64_negate (cairo_uint64_t a)
260 {
261     a.lo = ~a.lo;
262     a.hi = ~a.hi;
263     if (++a.lo == 0)
264 	++a.hi;
265     return a;
266 }
267 
268 /*
269  * Simple bit-at-a-time divide.
270  */
271 cairo_uquorem64_t
_cairo_uint64_divrem(cairo_uint64_t num,cairo_uint64_t den)272 _cairo_uint64_divrem (cairo_uint64_t num, cairo_uint64_t den)
273 {
274     cairo_uquorem64_t	qr;
275     cairo_uint64_t	bit;
276     cairo_uint64_t	quo;
277 
278     bit = _cairo_uint32_to_uint64 (1);
279 
280     /* normalize to make den >= num, but not overflow */
281     while (_cairo_uint64_lt (den, num) && (den.hi & 0x80000000) == 0)
282     {
283 	bit = _cairo_uint64_lsl (bit, 1);
284 	den = _cairo_uint64_lsl (den, 1);
285     }
286     quo = _cairo_uint32_to_uint64 (0);
287 
288     /* generate quotient, one bit at a time */
289     while (bit.hi | bit.lo)
290     {
291 	if (_cairo_uint64_le (den, num))
292 	{
293 	    num = _cairo_uint64_sub (num, den);
294 	    quo = _cairo_uint64_add (quo, bit);
295 	}
296 	bit = _cairo_uint64_rsl (bit, 1);
297 	den = _cairo_uint64_rsl (den, 1);
298     }
299     qr.quo = quo;
300     qr.rem = num;
301     return qr;
302 }
303 
304 #endif /* !HAVE_UINT64_T */
305 
306 cairo_quorem64_t
_cairo_int64_divrem(cairo_int64_t num,cairo_int64_t den)307 _cairo_int64_divrem (cairo_int64_t num, cairo_int64_t den)
308 {
309     int			num_neg = _cairo_int64_negative (num);
310     int			den_neg = _cairo_int64_negative (den);
311     cairo_uquorem64_t	uqr;
312     cairo_quorem64_t	qr;
313 
314     if (num_neg)
315 	num = _cairo_int64_negate (num);
316     if (den_neg)
317 	den = _cairo_int64_negate (den);
318     uqr = _cairo_uint64_divrem (num, den);
319     if (num_neg)
320 	qr.rem = _cairo_int64_negate ((cairo_int64_t)uqr.rem);  //PDB cast
321     else
322 	qr.rem = uqr.rem;
323     if (num_neg != den_neg)
324 	qr.quo = (cairo_int64_t) _cairo_int64_negate ((cairo_int64_t)uqr.quo);  //PDB cast
325     else
326 	qr.quo = (cairo_int64_t) uqr.quo;
327     return qr;
328 }
329 
330 #if HAVE_UINT128_T
331 
332 const char * cairo_impl128 = "uint128_t";
333 
334 cairo_uquorem128_t
_cairo_uint128_divrem(cairo_uint128_t num,cairo_uint128_t den)335 _cairo_uint128_divrem (cairo_uint128_t num, cairo_uint128_t den)
336 {
337     cairo_uquorem128_t	qr;
338 
339     qr.quo = num / den;
340     qr.rem = num % den;
341     return qr;
342 }
343 
344 #else
345 
346 const char * cairo_impl128 = "cairo_uint64_t";
347 
348 cairo_uint128_t
_cairo_uint32_to_uint128(uint32_t i)349 _cairo_uint32_to_uint128 (uint32_t i)
350 {
351     cairo_uint128_t	q;
352 
353     q.lo = _cairo_uint32_to_uint64 (i);
354     q.hi = _cairo_uint32_to_uint64 (0);
355     return q;
356 }
357 
358 cairo_int128_t
_cairo_int32_to_int128(int32_t i)359 _cairo_int32_to_int128 (int32_t i)
360 {
361     cairo_int128_t	q;
362 
363     q.lo = _cairo_int32_to_int64 (i);
364     q.hi = _cairo_int32_to_int64 (i < 0 ? -1 : 0);
365     return q;
366 }
367 
368 cairo_uint128_t
_cairo_uint64_to_uint128(cairo_uint64_t i)369 _cairo_uint64_to_uint128 (cairo_uint64_t i)
370 {
371     cairo_uint128_t	q;
372 
373     q.lo = i;
374     q.hi = _cairo_uint32_to_uint64 (0);
375     return q;
376 }
377 
378 cairo_int128_t
_cairo_int64_to_int128(cairo_int64_t i)379 _cairo_int64_to_int128 (cairo_int64_t i)
380 {
381     cairo_int128_t	q;
382 
383     q.lo = i;
384     q.hi = _cairo_int32_to_int64 (_cairo_int64_negative(i) ? -1 : 0);
385     return q;
386 }
387 
388 cairo_uint128_t
_cairo_uint128_add(cairo_uint128_t a,cairo_uint128_t b)389 _cairo_uint128_add (cairo_uint128_t a, cairo_uint128_t b)
390 {
391     cairo_uint128_t	s;
392 
393     s.hi = _cairo_uint64_add (a.hi, b.hi);
394     s.lo = _cairo_uint64_add (a.lo, b.lo);
395     if (_cairo_uint64_lt (s.lo, a.lo))
396 	s.hi = _cairo_uint64_add (s.hi, _cairo_uint32_to_uint64 (1));
397     return s;
398 }
399 
400 cairo_uint128_t
_cairo_uint128_sub(cairo_uint128_t a,cairo_uint128_t b)401 _cairo_uint128_sub (cairo_uint128_t a, cairo_uint128_t b)
402 {
403     cairo_uint128_t	s;
404 
405     s.hi = _cairo_uint64_sub (a.hi, b.hi);
406     s.lo = _cairo_uint64_sub (a.lo, b.lo);
407     if (_cairo_uint64_gt (s.lo, a.lo))
408 	s.hi = _cairo_uint64_sub (s.hi, _cairo_uint32_to_uint64(1));
409     return s;
410 }
411 
412 #if HAVE_UINT64_T
413 
414 #define uint64_lo32(i)	((i) & 0xffffffff)
415 #define uint64_hi32(i)	((i) >> 32)
416 #define uint64_lo(i)	((i) & 0xffffffff)
417 #define uint64_hi(i)	((i) >> 32)
418 #define uint64_shift32(i)   ((i) << 32)
419 #define uint64_carry32	(((uint64_t) 1) << 32)
420 
421 #else
422 
423 #define uint64_lo32(i)	((i).lo)
424 #define uint64_hi32(i)	((i).hi)
425 
426 static cairo_uint64_t
uint64_lo(cairo_uint64_t i)427 uint64_lo (cairo_uint64_t i)
428 {
429     cairo_uint64_t  s;
430 
431     s.lo = i.lo;
432     s.hi = 0;
433     return s;
434 }
435 
436 static cairo_uint64_t
uint64_hi(cairo_uint64_t i)437 uint64_hi (cairo_uint64_t i)
438 {
439     cairo_uint64_t  s;
440 
441     s.lo = i.hi;
442     s.hi = 0;
443     return s;
444 }
445 
446 static cairo_uint64_t
uint64_shift32(cairo_uint64_t i)447 uint64_shift32 (cairo_uint64_t i)
448 {
449     cairo_uint64_t  s;
450 
451     s.lo = 0;
452     s.hi = i.lo;
453     return s;
454 }
455 
456 static const cairo_uint64_t uint64_carry32 = { 0, 1 };
457 
458 #endif
459 
460 cairo_uint128_t
_cairo_uint64x64_128_mul(cairo_uint64_t a,cairo_uint64_t b)461 _cairo_uint64x64_128_mul (cairo_uint64_t a, cairo_uint64_t b)
462 {
463     cairo_uint128_t	s;
464     uint32_t		ah, al, bh, bl;
465     cairo_uint64_t	r0, r1, r2, r3;
466 
467     al = uint64_lo32 (a);
468     ah = uint64_hi32 (a);
469     bl = uint64_lo32 (b);
470     bh = uint64_hi32 (b);
471 
472     r0 = _cairo_uint32x32_64_mul (al, bl);
473     r1 = _cairo_uint32x32_64_mul (al, bh);
474     r2 = _cairo_uint32x32_64_mul (ah, bl);
475     r3 = _cairo_uint32x32_64_mul (ah, bh);
476 
477     r1 = _cairo_uint64_add (r1, uint64_hi (r0));    /* no carry possible */
478     r1 = _cairo_uint64_add (r1, r2);	    	    /* but this can carry */
479     if (_cairo_uint64_lt (r1, r2))		    /* check */
480 	r3 = _cairo_uint64_add (r3, uint64_carry32);
481 
482     s.hi = _cairo_uint64_add (r3, uint64_hi(r1));
483     s.lo = _cairo_uint64_add (uint64_shift32 (r1),
484 				uint64_lo (r0));
485     return s;
486 }
487 
488 cairo_int128_t
_cairo_int64x64_128_mul(cairo_int64_t a,cairo_int64_t b)489 _cairo_int64x64_128_mul (cairo_int64_t a, cairo_int64_t b)
490 {
491     cairo_int128_t  s;
492     s = _cairo_uint64x64_128_mul (_cairo_int64_to_uint64(a),
493 				  _cairo_int64_to_uint64(b));
494     if (_cairo_int64_negative (a))
495 	s.hi = _cairo_uint64_sub (s.hi,
496 				  _cairo_int64_to_uint64 (b));
497     if (_cairo_int64_negative (b))
498 	s.hi = _cairo_uint64_sub (s.hi,
499 				  _cairo_int64_to_uint64 (a));
500     return s;
501 }
502 
503 cairo_uint128_t
_cairo_uint128_mul(cairo_uint128_t a,cairo_uint128_t b)504 _cairo_uint128_mul (cairo_uint128_t a, cairo_uint128_t b)
505 {
506     cairo_uint128_t	s;
507 
508     s = _cairo_uint64x64_128_mul (a.lo, b.lo);
509     s.hi = _cairo_uint64_add (s.hi,
510 				_cairo_uint64_mul (a.lo, b.hi));
511     s.hi = _cairo_uint64_add (s.hi,
512 				_cairo_uint64_mul (a.hi, b.lo));
513     return s;
514 }
515 
516 cairo_uint128_t
_cairo_uint128_lsl(cairo_uint128_t a,int shift)517 _cairo_uint128_lsl (cairo_uint128_t a, int shift)
518 {
519     if (shift >= 64)
520     {
521 	a.hi = a.lo;
522 	a.lo = _cairo_uint32_to_uint64 (0);
523 	shift -= 64;
524     }
525     if (shift)
526     {
527 	a.hi = _cairo_uint64_add (_cairo_uint64_lsl (a.hi, shift),
528 				    _cairo_uint64_rsl (a.lo, (64 - shift)));
529 	a.lo = _cairo_uint64_lsl (a.lo, shift);
530     }
531     return a;
532 }
533 
534 cairo_uint128_t
_cairo_uint128_rsl(cairo_uint128_t a,int shift)535 _cairo_uint128_rsl (cairo_uint128_t a, int shift)
536 {
537     if (shift >= 64)
538     {
539 	a.lo = a.hi;
540 	a.hi = _cairo_uint32_to_uint64 (0);
541 	shift -= 64;
542     }
543     if (shift)
544     {
545 	a.lo = _cairo_uint64_add (_cairo_uint64_rsl (a.lo, shift),
546 				    _cairo_uint64_lsl (a.hi, (64 - shift)));
547 	a.hi = _cairo_uint64_rsl (a.hi, shift);
548     }
549     return a;
550 }
551 
552 cairo_uint128_t
_cairo_uint128_rsa(cairo_int128_t a,int shift)553 _cairo_uint128_rsa (cairo_int128_t a, int shift)
554 {
555     if (shift >= 64)
556     {
557 	a.lo = a.hi;
558 	a.hi = _cairo_uint64_rsa (a.hi, 64-1);
559 	shift -= 64;
560     }
561     if (shift)
562     {
563 	a.lo = _cairo_uint64_add (_cairo_uint64_rsl (a.lo, shift),
564 				    _cairo_uint64_lsl (a.hi, (64 - shift)));
565 	a.hi = _cairo_uint64_rsa (a.hi, shift);
566     }
567     return a;
568 }
569 
570 int
_cairo_uint128_lt(cairo_uint128_t a,cairo_uint128_t b)571 _cairo_uint128_lt (cairo_uint128_t a, cairo_uint128_t b)
572 {
573     return (_cairo_uint64_lt (a.hi, b.hi) ||
574 	    (_cairo_uint64_eq (a.hi, b.hi) &&
575 	     _cairo_uint64_lt (a.lo, b.lo)));
576 }
577 
578 int
_cairo_int128_lt(cairo_int128_t a,cairo_int128_t b)579 _cairo_int128_lt (cairo_int128_t a, cairo_int128_t b)
580 {
581     if (_cairo_int128_negative (a) && !_cairo_int128_negative (b))
582 	return 1;
583     if (!_cairo_int128_negative (a) && _cairo_int128_negative (b))
584 	return 0;
585     return _cairo_uint128_lt (a, b);
586 }
587 
588 int
_cairo_uint128_eq(cairo_uint128_t a,cairo_uint128_t b)589 _cairo_uint128_eq (cairo_uint128_t a, cairo_uint128_t b)
590 {
591     return (_cairo_uint64_eq (a.hi, b.hi) &&
592 	    _cairo_uint64_eq (a.lo, b.lo));
593 }
594 
595 #if HAVE_UINT64_T
596 #define _cairo_msbset64(q)  (q & ((uint64_t) 1 << 63))
597 #else
598 #define _cairo_msbset64(q)  (q.hi & ((uint32_t) 1 << 31))
599 #endif
600 
601 cairo_uquorem128_t
_cairo_uint128_divrem(cairo_uint128_t num,cairo_uint128_t den)602 _cairo_uint128_divrem (cairo_uint128_t num, cairo_uint128_t den)
603 {
604     cairo_uquorem128_t	qr;
605     cairo_uint128_t	bit;
606     cairo_uint128_t	quo;
607 
608     bit = _cairo_uint32_to_uint128 (1);
609 
610     /* normalize to make den >= num, but not overflow */
611     while (_cairo_uint128_lt (den, num) && !_cairo_msbset64(den.hi))
612     {
613 	bit = _cairo_uint128_lsl (bit, 1);
614 	den = _cairo_uint128_lsl (den, 1);
615     }
616     quo = _cairo_uint32_to_uint128 (0);
617 
618     /* generate quotient, one bit at a time */
619     while (_cairo_uint128_ne (bit, _cairo_uint32_to_uint128(0)))
620     {
621 	if (_cairo_uint128_le (den, num))
622 	{
623 	    num = _cairo_uint128_sub (num, den);
624 	    quo = _cairo_uint128_add (quo, bit);
625 	}
626 	bit = _cairo_uint128_rsl (bit, 1);
627 	den = _cairo_uint128_rsl (den, 1);
628     }
629     qr.quo = quo;
630     qr.rem = num;
631     return qr;
632 }
633 
634 cairo_uint128_t
_cairo_uint128_negate(cairo_uint128_t a)635 _cairo_uint128_negate (cairo_uint128_t a)
636 {
637     a.lo = _cairo_uint64_not (a.lo);
638     a.hi = _cairo_uint64_not (a.hi);
639     return _cairo_uint128_add (a, _cairo_uint32_to_uint128 (1));
640 }
641 
642 cairo_uint128_t
_cairo_uint128_not(cairo_uint128_t a)643 _cairo_uint128_not (cairo_uint128_t a)
644 {
645     a.lo = _cairo_uint64_not (a.lo);
646     a.hi = _cairo_uint64_not (a.hi);
647     return a;
648 }
649 
650 #endif /* !HAVE_UINT128_T */
651 
652 cairo_quorem128_t
_cairo_int128_divrem(cairo_int128_t num,cairo_int128_t den)653 _cairo_int128_divrem (cairo_int128_t num, cairo_int128_t den)
654 {
655     int			num_neg = _cairo_int128_negative (num);
656     int			den_neg = _cairo_int128_negative (den);
657     cairo_uquorem128_t	uqr;
658     cairo_quorem128_t	qr;
659 
660     if (num_neg)
661 	num = _cairo_int128_negate (num);
662     if (den_neg)
663 	den = _cairo_int128_negate (den);
664     uqr = _cairo_uint128_divrem (num, den);
665     if (num_neg)
666 	qr.rem = _cairo_int128_negate (uqr.rem);
667     else
668 	qr.rem = uqr.rem;
669     if (num_neg != den_neg)
670 	qr.quo = _cairo_int128_negate (uqr.quo);
671     else
672 	qr.quo = uqr.quo;
673     return qr;
674 }
675 
676 /**
677  * _cairo_uint_96by64_32x64_divrem:
678  *
679  * Compute a 32 bit quotient and 64 bit remainder of a 96 bit unsigned
680  * dividend and 64 bit divisor.  If the quotient doesn't fit into 32
681  * bits then the returned remainder is equal to the divisor, and the
682  * quotient is the largest representable 64 bit integer.  It is an
683  * error to call this function with the high 32 bits of `num' being
684  * non-zero. */
685 cairo_uquorem64_t
_cairo_uint_96by64_32x64_divrem(cairo_uint128_t num,cairo_uint64_t den)686 _cairo_uint_96by64_32x64_divrem (cairo_uint128_t num,
687 				 cairo_uint64_t den)
688 {
689     cairo_uquorem64_t result;
690     cairo_uint64_t B = _cairo_uint32s_to_uint64 (1, 0);
691 
692     /* These are the high 64 bits of the *96* bit numerator.  We're
693      * going to represent the numerator as xB + y, where x is a 64,
694      * and y is a 32 bit number. */
695     cairo_uint64_t x = _cairo_uint128_to_uint64 (_cairo_uint128_rsl(num, 32));
696 
697     /* Initialise the result to indicate overflow. */
698     result.quo = _cairo_uint32s_to_uint64 (UINT_MAX, UINT_MAX);  //PDB cast
699     result.rem = den;
700 
701     /* Don't bother if the quotient is going to overflow. */
702     if (_cairo_uint64_ge (x, den)) {
703 	return /* overflow */ result;
704     }
705 
706     if (_cairo_uint64_lt (x, B)) {
707 	/* When the final quotient is known to fit in 32 bits, then
708 	 * num < 2^64 if and only if den < 2^32. */
709 	return _cairo_uint64_divrem (_cairo_uint128_to_uint64 (num), den);
710     }
711     else {
712 	/* Denominator is >= 2^32. the numerator is >= 2^64, and the
713 	 * division won't overflow: need two divrems.  Write the
714 	 * numerator and denominator as
715 	 *
716 	 *	num = xB + y		x : 64 bits, y : 32 bits
717 	 *	den = uB + v		u, v : 32 bits
718 	 */
719 	uint32_t y = _cairo_uint128_to_uint32 (num);
720 	uint32_t u = uint64_hi32 (den);
721 	uint32_t v = _cairo_uint64_to_uint32 (den);
722 
723 	/* Compute a lower bound approximate quotient of num/den
724 	 * from x/(u+1).  Then we have
725 	 *
726 	 * x	= q(u+1) + r	; q : 32 bits, r <= u : 32 bits.
727 	 *
728 	 * xB + y	= q(u+1)B	+ (rB+y)
729 	 *		= q(uB + B + v - v) + (rB+y)
730 	 *		= q(uB + v)	+ qB - qv + (rB+y)
731 	 *		= q(uB + v)	+ q(B-v) + (rB+y)
732 	 *
733 	 * The true quotient of num/den then is q plus the
734 	 * contribution of q(B-v) + (rB+y).  The main contribution
735 	 * comes from the term q(B-v), with the term (rB+y) only
736 	 * contributing at most one part.
737 	 *
738 	 * The term q(B-v) must fit into 64 bits, since q fits into 32
739 	 * bits on account of being a lower bound to the true
740 	 * quotient, and as B-v <= 2^32, we may safely use a single
741 	 * 64/64 bit division to find its contribution. */
742 
743 	cairo_uquorem64_t quorem;
744 	cairo_uint64_t remainder; /* will contain final remainder */
745 	uint32_t quotient;	/* will contain final quotient. */
746 	uint32_t q;
747 	uint32_t r;
748 
749 	/* Approximate quotient by dividing the high 64 bits of num by
750 	 * u+1. Watch out for overflow of u+1. */
751 	if (u+1) {
752 	    quorem = _cairo_uint64_divrem (x, _cairo_uint32_to_uint64 (u+1));
753 	    q = _cairo_uint64_to_uint32 (quorem.quo);
754 	    r = _cairo_uint64_to_uint32 (quorem.rem);
755 	}
756 	else {
757 	    q = uint64_hi32 (x);
758 	    r = _cairo_uint64_to_uint32 (x);
759 	}
760 	quotient = q;
761 
762 	/* Add the main term's contribution to quotient.  Note B-v =
763 	 * -v as an uint32 (unless v = 0) */
764 	if (v)
765 	    quorem = _cairo_uint64_divrem (_cairo_uint32x32_64_mul (q, -(int32_t)v), den);  //PDB cast
766 	else
767 	    quorem = _cairo_uint64_divrem (_cairo_uint32s_to_uint64 (q, 0), den);
768 	quotient += _cairo_uint64_to_uint32 (quorem.quo);
769 
770 	/* Add the contribution of the subterm and start computing the
771 	 * true remainder. */
772 	remainder = _cairo_uint32s_to_uint64 (r, y);
773 	if (_cairo_uint64_ge (remainder, den)) {
774 	    remainder = _cairo_uint64_sub (remainder, den);
775 	    quotient++;
776 	}
777 
778 	/* Add the contribution of the main term's remainder. The
779 	 * funky test here checks that remainder + main_rem >= den,
780 	 * taking into account overflow of the addition. */
781 	remainder = _cairo_uint64_add (remainder, quorem.rem);
782 	if (_cairo_uint64_ge (remainder, den) ||
783 	    _cairo_uint64_lt (remainder, quorem.rem))
784 	{
785 	    remainder = _cairo_uint64_sub (remainder, den);
786 	    quotient++;
787 	}
788 
789 	result.quo = _cairo_uint32_to_uint64 (quotient);
790 	result.rem = remainder;
791     }
792     return result;
793 }
794 
795 cairo_quorem64_t
_cairo_int_96by64_32x64_divrem(cairo_int128_t num,cairo_int64_t den)796 _cairo_int_96by64_32x64_divrem (cairo_int128_t num, cairo_int64_t den)
797 {
798     int			num_neg = _cairo_int128_negative (num);
799     int			den_neg = _cairo_int64_negative (den);
800     cairo_uint64_t	nonneg_den;
801     cairo_uquorem64_t	uqr;
802     cairo_quorem64_t	qr;
803 
804     if (num_neg)
805 	num = _cairo_int128_negate (num);
806     if (den_neg)
807 	nonneg_den = _cairo_int64_negate (den);
808     else
809 	nonneg_den = den;
810 
811     uqr = _cairo_uint_96by64_32x64_divrem (num, nonneg_den);
812     if (_cairo_uint64_eq (uqr.rem, _cairo_int64_to_uint64 (nonneg_den))) {
813 	/* bail on overflow. */
814 	qr.quo = _cairo_uint32s_to_uint64 (0x7FFFFFFF, UINT_MAX);  //PDB cast
815 	qr.rem = den;
816 	return qr;
817     }
818 
819     if (num_neg)
820 	qr.rem = _cairo_int64_negate ((cairo_int64_t)uqr.rem);  //PDB cast
821     else
822 	qr.rem = uqr.rem;
823     if (num_neg != den_neg)
824 	qr.quo = _cairo_int64_negate ((cairo_int64_t)uqr.quo);  //PDB cast
825     else
826 	qr.quo = uqr.quo;
827     return qr;
828 }
829