xref: /linux/arch/powerpc/math-emu/udivmodti4.c (revision b2441318)
1*b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
25cd27208SKumar Gala /* This has so very few changes over libgcc2's __udivmoddi4 it isn't funny.  */
35cd27208SKumar Gala 
4d2b194edSKumar Gala #include <math-emu/soft-fp.h>
55cd27208SKumar Gala 
65cd27208SKumar Gala #undef count_leading_zeros
75cd27208SKumar Gala #define count_leading_zeros  __FP_CLZ
85cd27208SKumar Gala 
95cd27208SKumar Gala void
_fp_udivmodti4(_FP_W_TYPE q[2],_FP_W_TYPE r[2],_FP_W_TYPE n1,_FP_W_TYPE n0,_FP_W_TYPE d1,_FP_W_TYPE d0)105cd27208SKumar Gala _fp_udivmodti4(_FP_W_TYPE q[2], _FP_W_TYPE r[2],
115cd27208SKumar Gala 	       _FP_W_TYPE n1, _FP_W_TYPE n0,
125cd27208SKumar Gala 	       _FP_W_TYPE d1, _FP_W_TYPE d0)
135cd27208SKumar Gala {
145cd27208SKumar Gala   _FP_W_TYPE q0, q1, r0, r1;
155cd27208SKumar Gala   _FP_I_TYPE b, bm;
165cd27208SKumar Gala 
175cd27208SKumar Gala   if (d1 == 0)
185cd27208SKumar Gala     {
195cd27208SKumar Gala #if !UDIV_NEEDS_NORMALIZATION
205cd27208SKumar Gala       if (d0 > n1)
215cd27208SKumar Gala 	{
225cd27208SKumar Gala 	  /* 0q = nn / 0D */
235cd27208SKumar Gala 
245cd27208SKumar Gala 	  udiv_qrnnd (q0, n0, n1, n0, d0);
255cd27208SKumar Gala 	  q1 = 0;
265cd27208SKumar Gala 
275cd27208SKumar Gala 	  /* Remainder in n0.  */
285cd27208SKumar Gala 	}
295cd27208SKumar Gala       else
305cd27208SKumar Gala 	{
315cd27208SKumar Gala 	  /* qq = NN / 0d */
325cd27208SKumar Gala 
335cd27208SKumar Gala 	  if (d0 == 0)
345cd27208SKumar Gala 	    d0 = 1 / d0;	/* Divide intentionally by zero.  */
355cd27208SKumar Gala 
365cd27208SKumar Gala 	  udiv_qrnnd (q1, n1, 0, n1, d0);
375cd27208SKumar Gala 	  udiv_qrnnd (q0, n0, n1, n0, d0);
385cd27208SKumar Gala 
395cd27208SKumar Gala 	  /* Remainder in n0.  */
405cd27208SKumar Gala 	}
415cd27208SKumar Gala 
425cd27208SKumar Gala       r0 = n0;
435cd27208SKumar Gala       r1 = 0;
445cd27208SKumar Gala 
455cd27208SKumar Gala #else /* UDIV_NEEDS_NORMALIZATION */
465cd27208SKumar Gala 
475cd27208SKumar Gala       if (d0 > n1)
485cd27208SKumar Gala 	{
495cd27208SKumar Gala 	  /* 0q = nn / 0D */
505cd27208SKumar Gala 
515cd27208SKumar Gala 	  count_leading_zeros (bm, d0);
525cd27208SKumar Gala 
535cd27208SKumar Gala 	  if (bm != 0)
545cd27208SKumar Gala 	    {
555cd27208SKumar Gala 	      /* Normalize, i.e. make the most significant bit of the
565cd27208SKumar Gala 		 denominator set.  */
575cd27208SKumar Gala 
585cd27208SKumar Gala 	      d0 = d0 << bm;
595cd27208SKumar Gala 	      n1 = (n1 << bm) | (n0 >> (_FP_W_TYPE_SIZE - bm));
605cd27208SKumar Gala 	      n0 = n0 << bm;
615cd27208SKumar Gala 	    }
625cd27208SKumar Gala 
635cd27208SKumar Gala 	  udiv_qrnnd (q0, n0, n1, n0, d0);
645cd27208SKumar Gala 	  q1 = 0;
655cd27208SKumar Gala 
665cd27208SKumar Gala 	  /* Remainder in n0 >> bm.  */
675cd27208SKumar Gala 	}
685cd27208SKumar Gala       else
695cd27208SKumar Gala 	{
705cd27208SKumar Gala 	  /* qq = NN / 0d */
715cd27208SKumar Gala 
725cd27208SKumar Gala 	  if (d0 == 0)
735cd27208SKumar Gala 	    d0 = 1 / d0;	/* Divide intentionally by zero.  */
745cd27208SKumar Gala 
755cd27208SKumar Gala 	  count_leading_zeros (bm, d0);
765cd27208SKumar Gala 
775cd27208SKumar Gala 	  if (bm == 0)
785cd27208SKumar Gala 	    {
795cd27208SKumar Gala 	      /* From (n1 >= d0) /\ (the most significant bit of d0 is set),
805cd27208SKumar Gala 		 conclude (the most significant bit of n1 is set) /\ (the
815cd27208SKumar Gala 		 leading quotient digit q1 = 1).
825cd27208SKumar Gala 
835cd27208SKumar Gala 		 This special case is necessary, not an optimization.
845cd27208SKumar Gala 		 (Shifts counts of SI_TYPE_SIZE are undefined.)  */
855cd27208SKumar Gala 
865cd27208SKumar Gala 	      n1 -= d0;
875cd27208SKumar Gala 	      q1 = 1;
885cd27208SKumar Gala 	    }
895cd27208SKumar Gala 	  else
905cd27208SKumar Gala 	    {
915cd27208SKumar Gala 	      _FP_W_TYPE n2;
925cd27208SKumar Gala 
935cd27208SKumar Gala 	      /* Normalize.  */
945cd27208SKumar Gala 
955cd27208SKumar Gala 	      b = _FP_W_TYPE_SIZE - bm;
965cd27208SKumar Gala 
975cd27208SKumar Gala 	      d0 = d0 << bm;
985cd27208SKumar Gala 	      n2 = n1 >> b;
995cd27208SKumar Gala 	      n1 = (n1 << bm) | (n0 >> b);
1005cd27208SKumar Gala 	      n0 = n0 << bm;
1015cd27208SKumar Gala 
1025cd27208SKumar Gala 	      udiv_qrnnd (q1, n1, n2, n1, d0);
1035cd27208SKumar Gala 	    }
1045cd27208SKumar Gala 
1055cd27208SKumar Gala 	  /* n1 != d0...  */
1065cd27208SKumar Gala 
1075cd27208SKumar Gala 	  udiv_qrnnd (q0, n0, n1, n0, d0);
1085cd27208SKumar Gala 
1095cd27208SKumar Gala 	  /* Remainder in n0 >> bm.  */
1105cd27208SKumar Gala 	}
1115cd27208SKumar Gala 
1125cd27208SKumar Gala       r0 = n0 >> bm;
1135cd27208SKumar Gala       r1 = 0;
1145cd27208SKumar Gala #endif /* UDIV_NEEDS_NORMALIZATION */
1155cd27208SKumar Gala     }
1165cd27208SKumar Gala   else
1175cd27208SKumar Gala     {
1185cd27208SKumar Gala       if (d1 > n1)
1195cd27208SKumar Gala 	{
1205cd27208SKumar Gala 	  /* 00 = nn / DD */
1215cd27208SKumar Gala 
1225cd27208SKumar Gala 	  q0 = 0;
1235cd27208SKumar Gala 	  q1 = 0;
1245cd27208SKumar Gala 
1255cd27208SKumar Gala 	  /* Remainder in n1n0.  */
1265cd27208SKumar Gala 	  r0 = n0;
1275cd27208SKumar Gala 	  r1 = n1;
1285cd27208SKumar Gala 	}
1295cd27208SKumar Gala       else
1305cd27208SKumar Gala 	{
1315cd27208SKumar Gala 	  /* 0q = NN / dd */
1325cd27208SKumar Gala 
1335cd27208SKumar Gala 	  count_leading_zeros (bm, d1);
1345cd27208SKumar Gala 	  if (bm == 0)
1355cd27208SKumar Gala 	    {
1365cd27208SKumar Gala 	      /* From (n1 >= d1) /\ (the most significant bit of d1 is set),
1375cd27208SKumar Gala 		 conclude (the most significant bit of n1 is set) /\ (the
1385cd27208SKumar Gala 		 quotient digit q0 = 0 or 1).
1395cd27208SKumar Gala 
1405cd27208SKumar Gala 		 This special case is necessary, not an optimization.  */
1415cd27208SKumar Gala 
1425cd27208SKumar Gala 	      /* The condition on the next line takes advantage of that
1435cd27208SKumar Gala 		 n1 >= d1 (true due to program flow).  */
1445cd27208SKumar Gala 	      if (n1 > d1 || n0 >= d0)
1455cd27208SKumar Gala 		{
1465cd27208SKumar Gala 		  q0 = 1;
1475cd27208SKumar Gala 		  sub_ddmmss (n1, n0, n1, n0, d1, d0);
1485cd27208SKumar Gala 		}
1495cd27208SKumar Gala 	      else
1505cd27208SKumar Gala 		q0 = 0;
1515cd27208SKumar Gala 
1525cd27208SKumar Gala 	      q1 = 0;
1535cd27208SKumar Gala 
1545cd27208SKumar Gala 	      r0 = n0;
1555cd27208SKumar Gala 	      r1 = n1;
1565cd27208SKumar Gala 	    }
1575cd27208SKumar Gala 	  else
1585cd27208SKumar Gala 	    {
1595cd27208SKumar Gala 	      _FP_W_TYPE m1, m0, n2;
1605cd27208SKumar Gala 
1615cd27208SKumar Gala 	      /* Normalize.  */
1625cd27208SKumar Gala 
1635cd27208SKumar Gala 	      b = _FP_W_TYPE_SIZE - bm;
1645cd27208SKumar Gala 
1655cd27208SKumar Gala 	      d1 = (d1 << bm) | (d0 >> b);
1665cd27208SKumar Gala 	      d0 = d0 << bm;
1675cd27208SKumar Gala 	      n2 = n1 >> b;
1685cd27208SKumar Gala 	      n1 = (n1 << bm) | (n0 >> b);
1695cd27208SKumar Gala 	      n0 = n0 << bm;
1705cd27208SKumar Gala 
1715cd27208SKumar Gala 	      udiv_qrnnd (q0, n1, n2, n1, d1);
1725cd27208SKumar Gala 	      umul_ppmm (m1, m0, q0, d0);
1735cd27208SKumar Gala 
1745cd27208SKumar Gala 	      if (m1 > n1 || (m1 == n1 && m0 > n0))
1755cd27208SKumar Gala 		{
1765cd27208SKumar Gala 		  q0--;
1775cd27208SKumar Gala 		  sub_ddmmss (m1, m0, m1, m0, d1, d0);
1785cd27208SKumar Gala 		}
1795cd27208SKumar Gala 
1805cd27208SKumar Gala 	      q1 = 0;
1815cd27208SKumar Gala 
1825cd27208SKumar Gala 	      /* Remainder in (n1n0 - m1m0) >> bm.  */
1835cd27208SKumar Gala 	      sub_ddmmss (n1, n0, n1, n0, m1, m0);
1845cd27208SKumar Gala 	      r0 = (n1 << b) | (n0 >> bm);
1855cd27208SKumar Gala 	      r1 = n1 >> bm;
1865cd27208SKumar Gala 	    }
1875cd27208SKumar Gala 	}
1885cd27208SKumar Gala     }
1895cd27208SKumar Gala 
1905cd27208SKumar Gala   q[0] = q0; q[1] = q1;
1915cd27208SKumar Gala   r[0] = r0, r[1] = r1;
1925cd27208SKumar Gala }
193