Lines Matching refs:w

48 x_sign = x.w[1] & MASK_SIGN;	// 0 for positive, MASK_SIGN for negative
49 x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
50 C1.w[1] = x.w[1] & MASK_COEFF;
51 C1.w[0] = x.w[0];
54 if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) {
56 if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
57 if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
85 if ((C1.w[1] > 0x0001ed09bead87c0ull) ||
86 (C1.w[1] == 0x0001ed09bead87c0ull
87 && (C1.w[0] > 0x378d8e63ffffffffull))
88 || ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) {
91 } else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
99 if (C1.w[1] == 0) {
100 if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53
102 if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32
103 tmp1.d = (double) (C1.w[0] >> 32); // exact conversion
107 tmp1.d = (double) (C1.w[0]); // exact conversion
112 tmp1.d = (double) C1.w[0]; // exact conversion
117 tmp1.d = (double) C1.w[1]; // exact conversion
124 if (C1.w[1] > nr_digits[x_nr_bits - 1].threshold_hi
125 || (C1.w[1] == nr_digits[x_nr_bits - 1].threshold_hi
126 && C1.w[0] >= nr_digits[x_nr_bits - 1].threshold_lo))
147 C.w[1] = 0x0000000000000005ull;
148 C.w[0] = 0000000000000005ull;
151 __mul_64x64_to_128MACH (C1, C1.w[0], ten2k64[20 - q]);
157 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] > C.w[0])) {
171 C.w[1] = 0x0000000000000004ull;
172 C.w[0] = 0xfffffffffffffffbull;
175 __mul_64x64_to_128MACH (C1, C1.w[0], ten2k64[20 - q]);
181 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
195 C1.w[1] = x.w[1] & MASK_COEFF;
196 C1.w[0] = x.w[0];
208 if ((C1.w[1] == 0) && (C1.w[0] <= midpoint64[ind])) {
216 if ((C1.w[1] < midpoint128[ind - 19].w[1])
217 || ((C1.w[1] == midpoint128[ind - 19].w[1])
218 && (C1.w[0] <= midpoint128[ind - 19].w[0]))) {
233 tmp64 = C1.w[0];
235 C1.w[0] = C1.w[0] + midpoint64[ind - 1];
237 C1.w[0] = C1.w[0] + midpoint128[ind - 20].w[0];
238 C1.w[1] = C1.w[1] + midpoint128[ind - 20].w[1];
240 if (C1.w[0] < tmp64)
241 C1.w[1]++;
252 Cstar.w[1] = P256.w[3];
253 Cstar.w[0] = P256.w[2];
254 fstar.w[3] = 0;
255 fstar.w[2] = P256.w[2] & maskhigh128[ind - 1];
256 fstar.w[1] = P256.w[1];
257 fstar.w[0] = P256.w[0];
259 Cstar.w[1] = 0;
260 Cstar.w[0] = P256.w[3];
261 fstar.w[3] = P256.w[3] & maskhigh128[ind - 1];
262 fstar.w[2] = P256.w[2];
263 fstar.w[1] = P256.w[1];
264 fstar.w[0] = P256.w[0];
281 Cstar.w[0] =
282 (Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift));
285 Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38
290 if ((fstar.w[3] == 0) && (fstar.w[2] == 0) &&
291 (fstar.w[1] || fstar.w[0]) &&
292 (fstar.w[1] < ten2mk128trunc[ind - 1].w[1] ||
293 (fstar.w[1] == ten2mk128trunc[ind - 1].w[1] &&
294 fstar.w[0] <= ten2mk128trunc[ind - 1].w[0]))) {
296 if (Cstar.w[0] & 0x01) { // Cstar.w[0] is odd; MP in [EVEN, ODD]
298 Cstar.w[0]--; // Cstar.w[0] is now even
302 res = -Cstar.w[0];
304 res = Cstar.w[0];
309 res = -C1.w[0];
311 res = C1.w[0];
315 res = -C1.w[0] * ten2k64[exp];
317 res = C1.w[0] * ten2k64[exp];
347 x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
348 x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
349 C1.w[1] = x.w[1] & MASK_COEFF;
350 C1.w[0] = x.w[0];
353 if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) {
355 if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
356 if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
384 if ((C1.w[1] > 0x0001ed09bead87c0ull)
385 || (C1.w[1] == 0x0001ed09bead87c0ull
386 && (C1.w[0] > 0x378d8e63ffffffffull))
387 || ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) {
390 } else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
398 if (C1.w[1] == 0) {
399 if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53
401 if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32
402 tmp1.d = (double) (C1.w[0] >> 32); // exact conversion
406 tmp1.d = (double) (C1.w[0]); // exact conversion
411 tmp1.d = (double) C1.w[0]; // exact conversion
416 tmp1.d = (double) C1.w[1]; // exact conversion
423 if (C1.w[1] > nr_digits[x_nr_bits - 1].threshold_hi
424 || (C1.w[1] == nr_digits[x_nr_bits - 1].threshold_hi
425 && C1.w[0] >= nr_digits[x_nr_bits - 1].threshold_lo))
446 C.w[1] = 0x0000000000000005ull;
447 C.w[0] = 0000000000000005ull;
450 __mul_64x64_to_128MACH (C1, C1.w[0], ten2k64[20 - q]);
456 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] > C.w[0])) {
470 C.w[1] = 0x0000000000000004ull;
471 C.w[0] = 0xfffffffffffffffbull;
474 __mul_64x64_to_128MACH (C1, C1.w[0], ten2k64[20 - q]);
480 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
494 C1.w[1] = x.w[1] & MASK_COEFF;
495 C1.w[0] = x.w[0];
509 if ((C1.w[1] == 0) && (C1.w[0] <= midpoint64[ind])) {
517 if ((C1.w[1] < midpoint128[ind - 19].w[1])
518 || ((C1.w[1] == midpoint128[ind - 19].w[1])
519 && (C1.w[0] <= midpoint128[ind - 19].w[0]))) {
536 tmp64 = C1.w[0];
538 C1.w[0] = C1.w[0] + midpoint64[ind - 1];
540 C1.w[0] = C1.w[0] + midpoint128[ind - 20].w[0];
541 C1.w[1] = C1.w[1] + midpoint128[ind - 20].w[1];
543 if (C1.w[0] < tmp64)
544 C1.w[1]++;
555 Cstar.w[1] = P256.w[3];
556 Cstar.w[0] = P256.w[2];
557 fstar.w[3] = 0;
558 fstar.w[2] = P256.w[2] & maskhigh128[ind - 1];
559 fstar.w[1] = P256.w[1];
560 fstar.w[0] = P256.w[0];
562 Cstar.w[1] = 0;
563 Cstar.w[0] = P256.w[3];
564 fstar.w[3] = P256.w[3] & maskhigh128[ind - 1];
565 fstar.w[2] = P256.w[2];
566 fstar.w[1] = P256.w[1];
567 fstar.w[0] = P256.w[0];
584 Cstar.w[0] =
585 (Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift));
588 Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38
596 if (fstar.w[1] > 0x8000000000000000ull ||
597 (fstar.w[1] == 0x8000000000000000ull
598 && fstar.w[0] > 0x0ull)) {
600 tmp64 = fstar.w[1] - 0x8000000000000000ull; // f* - 1/2
601 if (tmp64 > ten2mk128trunc[ind - 1].w[1]
602 || (tmp64 == ten2mk128trunc[ind - 1].w[1]
603 && fstar.w[0] >= ten2mk128trunc[ind - 1].w[0])) {
612 if (fstar.w[3] > 0x0 ||
613 (fstar.w[3] == 0x0 && fstar.w[2] > onehalf128[ind - 1]) ||
614 (fstar.w[3] == 0x0 && fstar.w[2] == onehalf128[ind - 1] &&
615 (fstar.w[1] || fstar.w[0]))) {
618 tmp64 = fstar.w[2] - onehalf128[ind - 1];
619 tmp64A = fstar.w[3];
620 if (tmp64 > fstar.w[2])
623 || fstar.w[1] > ten2mk128trunc[ind - 1].w[1]
624 || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
625 && fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) {
634 if (fstar.w[3] > onehalf128[ind - 1] ||
635 (fstar.w[3] == onehalf128[ind - 1] &&
636 (fstar.w[2] || fstar.w[1] || fstar.w[0]))) {
639 tmp64 = fstar.w[3] - onehalf128[ind - 1];
640 if (tmp64 || fstar.w[2]
641 || fstar.w[1] > ten2mk128trunc[ind - 1].w[1]
642 || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
643 && fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) {
656 if ((fstar.w[3] == 0) && (fstar.w[2] == 0) &&
657 (fstar.w[1] || fstar.w[0]) &&
658 (fstar.w[1] < ten2mk128trunc[ind - 1].w[1] ||
659 (fstar.w[1] == ten2mk128trunc[ind - 1].w[1] &&
660 fstar.w[0] <= ten2mk128trunc[ind - 1].w[0]))) {
662 if (Cstar.w[0] & 0x01) { // Cstar.w[0] is odd; MP in [EVEN, ODD]
664 Cstar.w[0]--; // Cstar.w[0] is now even
668 res = -Cstar.w[0];
670 res = Cstar.w[0];
675 res = -C1.w[0];
677 res = C1.w[0];
681 res = -C1.w[0] * ten2k64[exp];
683 res = C1.w[0] * ten2k64[exp];
712 x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
713 x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
714 C1.w[1] = x.w[1] & MASK_COEFF;
715 C1.w[0] = x.w[0];
718 if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) {
720 if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
721 if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
749 if ((C1.w[1] > 0x0001ed09bead87c0ull)
750 || (C1.w[1] == 0x0001ed09bead87c0ull
751 && (C1.w[0] > 0x378d8e63ffffffffull))
752 || ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) {
755 } else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
763 if (C1.w[1] == 0) {
764 if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53
766 if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32
767 tmp1.d = (double) (C1.w[0] >> 32); // exact conversion
771 tmp1.d = (double) (C1.w[0]); // exact conversion
776 tmp1.d = (double) C1.w[0]; // exact conversion
781 tmp1.d = (double) C1.w[1]; // exact conversion
788 if (C1.w[1] > nr_digits[x_nr_bits - 1].threshold_hi
789 || (C1.w[1] == nr_digits[x_nr_bits - 1].threshold_hi
790 && C1.w[0] >= nr_digits[x_nr_bits - 1].threshold_lo))
812 C.w[1] = 0x0000000000000005ull;
813 C.w[0] = 0x0000000000000000ull;
816 __mul_64x64_to_128MACH (C1, C1.w[0], ten2k64[20 - q]);
822 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] > C.w[0])) {
836 C.w[1] = 0x0000000000000005ull;
837 C.w[0] = 0x0000000000000000ull;
840 __mul_64x64_to_128MACH (C1, C1.w[0], ten2k64[20 - q]);
846 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
860 C1.w[1] = x.w[1] & MASK_COEFF;
861 C1.w[0] = x.w[0];
886 Cstar.w[1] = P256.w[3];
887 Cstar.w[0] = P256.w[2];
888 fstar.w[3] = 0;
889 fstar.w[2] = P256.w[2] & maskhigh128[ind - 1];
890 fstar.w[1] = P256.w[1];
891 fstar.w[0] = P256.w[0];
893 Cstar.w[1] = 0;
894 Cstar.w[0] = P256.w[3];
895 fstar.w[3] = P256.w[3] & maskhigh128[ind - 1];
896 fstar.w[2] = P256.w[2];
897 fstar.w[1] = P256.w[1];
898 fstar.w[0] = P256.w[0];
909 Cstar.w[0] =
910 (Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift));
913 Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38
923 if (fstar.w[1] > ten2mk128trunc[ind - 1].w[1] ||
924 (fstar.w[1] == ten2mk128trunc[ind - 1].w[1] &&
925 fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) {
927 Cstar.w[0]++;
928 if (Cstar.w[0] == 0x0)
929 Cstar.w[1]++;
933 if (fstar.w[2] || fstar.w[1] > ten2mk128trunc[ind - 1].w[1]
934 || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
935 && fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) {
937 Cstar.w[0]++;
938 if (Cstar.w[0] == 0x0)
939 Cstar.w[1]++;
943 if (fstar.w[3] || fstar.w[2]
944 || fstar.w[1] > ten2mk128trunc[ind - 1].w[1]
945 || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
946 && fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) {
948 Cstar.w[0]++;
949 if (Cstar.w[0] == 0x0)
950 Cstar.w[1]++;
956 res = -Cstar.w[0];
958 res = Cstar.w[0];
963 res = -C1.w[0];
965 res = C1.w[0];
969 res = -C1.w[0] * ten2k64[exp];
971 res = C1.w[0] * ten2k64[exp];
1000 x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
1001 x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
1002 C1.w[1] = x.w[1] & MASK_COEFF;
1003 C1.w[0] = x.w[0];
1006 if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) {
1008 if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
1009 if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
1037 if ((C1.w[1] > 0x0001ed09bead87c0ull)
1038 || (C1.w[1] == 0x0001ed09bead87c0ull
1039 && (C1.w[0] > 0x378d8e63ffffffffull))
1040 || ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) {
1043 } else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
1051 if (C1.w[1] == 0) {
1052 if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53
1054 if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32
1055 tmp1.d = (double) (C1.w[0] >> 32); // exact conversion
1059 tmp1.d = (double) (C1.w[0]); // exact conversion
1064 tmp1.d = (double) C1.w[0]; // exact conversion
1069 tmp1.d = (double) C1.w[1]; // exact conversion
1076 if (C1.w[1] > nr_digits[x_nr_bits - 1].threshold_hi
1077 || (C1.w[1] == nr_digits[x_nr_bits - 1].threshold_hi
1078 && C1.w[0] >= nr_digits[x_nr_bits - 1].threshold_lo))
1099 C.w[1] = 0x0000000000000005ull;
1100 C.w[0] = 0x0000000000000000ull;
1103 __mul_64x64_to_128MACH (C1, C1.w[0], ten2k64[20 - q]);
1109 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] > C.w[0])) {
1123 C.w[1] = 0x0000000000000005ull;
1124 C.w[0] = 0x0000000000000000ull;
1127 __mul_64x64_to_128MACH (C1, C1.w[0], ten2k64[20 - q]);
1133 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
1147 C1.w[1] = x.w[1] & MASK_COEFF;
1148 C1.w[0] = x.w[0];
1175 Cstar.w[1] = P256.w[3];
1176 Cstar.w[0] = P256.w[2];
1177 fstar.w[3] = 0;
1178 fstar.w[2] = P256.w[2] & maskhigh128[ind - 1];
1179 fstar.w[1] = P256.w[1];
1180 fstar.w[0] = P256.w[0];
1182 Cstar.w[1] = 0;
1183 Cstar.w[0] = P256.w[3];
1184 fstar.w[3] = P256.w[3] & maskhigh128[ind - 1];
1185 fstar.w[2] = P256.w[2];
1186 fstar.w[1] = P256.w[1];
1187 fstar.w[0] = P256.w[0];
1198 Cstar.w[0] =
1199 (Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift));
1202 Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38
1212 if (fstar.w[1] > ten2mk128trunc[ind - 1].w[1] ||
1213 (fstar.w[1] == ten2mk128trunc[ind - 1].w[1] &&
1214 fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) {
1216 Cstar.w[0]++;
1217 if (Cstar.w[0] == 0x0)
1218 Cstar.w[1]++;
1224 if (fstar.w[2] || fstar.w[1] > ten2mk128trunc[ind - 1].w[1]
1225 || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
1226 && fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) {
1228 Cstar.w[0]++;
1229 if (Cstar.w[0] == 0x0)
1230 Cstar.w[1]++;
1236 if (fstar.w[3] || fstar.w[2]
1237 || fstar.w[1] > ten2mk128trunc[ind - 1].w[1]
1238 || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
1239 && fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) {
1241 Cstar.w[0]++;
1242 if (Cstar.w[0] == 0x0)
1243 Cstar.w[1]++;
1251 res = -Cstar.w[0];
1253 res = Cstar.w[0];
1258 res = -C1.w[0];
1260 res = C1.w[0];
1264 res = -C1.w[0] * ten2k64[exp];
1266 res = C1.w[0] * ten2k64[exp];
1295 x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
1296 x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
1297 C1.w[1] = x.w[1] & MASK_COEFF;
1298 C1.w[0] = x.w[0];
1301 if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) {
1303 if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
1304 if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
1332 if ((C1.w[1] > 0x0001ed09bead87c0ull)
1333 || (C1.w[1] == 0x0001ed09bead87c0ull
1334 && (C1.w[0] > 0x378d8e63ffffffffull))
1335 || ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) {
1338 } else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
1346 if (C1.w[1] == 0) {
1347 if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53
1349 if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32
1350 tmp1.d = (double) (C1.w[0] >> 32); // exact conversion
1354 tmp1.d = (double) (C1.w[0]); // exact conversion
1359 tmp1.d = (double) C1.w[0]; // exact conversion
1364 tmp1.d = (double) C1.w[1]; // exact conversion
1371 if (C1.w[1] > nr_digits[x_nr_bits - 1].threshold_hi
1372 || (C1.w[1] == nr_digits[x_nr_bits - 1].threshold_hi
1373 && C1.w[0] >= nr_digits[x_nr_bits - 1].threshold_lo))
1394 C.w[1] = 0x0000000000000005ull;
1395 C.w[0] = 0x000000000000000aull;
1398 __mul_64x64_to_128MACH (C1, C1.w[0], ten2k64[20 - q]);
1404 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
1418 C.w[1] = 0x0000000000000004ull;
1419 C.w[0] = 0xfffffffffffffff6ull;
1422 __mul_64x64_to_128MACH (C1, C1.w[0], ten2k64[20 - q]);
1428 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] > C.w[0])) {
1442 C1.w[1] = x.w[1] & MASK_COEFF;
1443 C1.w[0] = x.w[0];
1468 Cstar.w[1] = P256.w[3];
1469 Cstar.w[0] = P256.w[2];
1470 fstar.w[3] = 0;
1471 fstar.w[2] = P256.w[2] & maskhigh128[ind - 1];
1472 fstar.w[1] = P256.w[1];
1473 fstar.w[0] = P256.w[0];
1475 Cstar.w[1] = 0;
1476 Cstar.w[0] = P256.w[3];
1477 fstar.w[3] = P256.w[3] & maskhigh128[ind - 1];
1478 fstar.w[2] = P256.w[2];
1479 fstar.w[1] = P256.w[1];
1480 fstar.w[0] = P256.w[0];
1491 Cstar.w[0] =
1492 (Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift));
1495 Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38
1505 if (fstar.w[1] > ten2mk128trunc[ind - 1].w[1]
1506 || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
1507 && fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) {
1509 Cstar.w[0]++;
1510 if (Cstar.w[0] == 0x0)
1511 Cstar.w[1]++;
1515 if (fstar.w[2] || fstar.w[1] > ten2mk128trunc[ind - 1].w[1]
1516 || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
1517 && fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) {
1519 Cstar.w[0]++;
1520 if (Cstar.w[0] == 0x0)
1521 Cstar.w[1]++;
1525 if (fstar.w[3] || fstar.w[2]
1526 || fstar.w[1] > ten2mk128trunc[ind - 1].w[1]
1527 || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
1528 && fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) {
1530 Cstar.w[0]++;
1531 if (Cstar.w[0] == 0x0)
1532 Cstar.w[1]++;
1537 res = -Cstar.w[0];
1539 res = Cstar.w[0];
1544 res = -C1.w[0];
1546 res = C1.w[0];
1550 res = -C1.w[0] * ten2k64[exp];
1552 res = C1.w[0] * ten2k64[exp];
1581 x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
1582 x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
1583 C1.w[1] = x.w[1] & MASK_COEFF;
1584 C1.w[0] = x.w[0];
1587 if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) {
1589 if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
1590 if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
1618 if ((C1.w[1] > 0x0001ed09bead87c0ull)
1619 || (C1.w[1] == 0x0001ed09bead87c0ull
1620 && (C1.w[0] > 0x378d8e63ffffffffull))
1621 || ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) {
1624 } else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
1632 if (C1.w[1] == 0) {
1633 if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53
1635 if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32
1636 tmp1.d = (double) (C1.w[0] >> 32); // exact conversion
1640 tmp1.d = (double) (C1.w[0]); // exact conversion
1645 tmp1.d = (double) C1.w[0]; // exact conversion
1650 tmp1.d = (double) C1.w[1]; // exact conversion
1657 if (C1.w[1] > nr_digits[x_nr_bits - 1].threshold_hi
1658 || (C1.w[1] == nr_digits[x_nr_bits - 1].threshold_hi
1659 && C1.w[0] >= nr_digits[x_nr_bits - 1].threshold_lo))
1680 C.w[1] = 0x0000000000000005ull;
1681 C.w[0] = 0x000000000000000aull;
1684 __mul_64x64_to_128MACH (C1, C1.w[0], ten2k64[20 - q]);
1690 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
1704 C.w[1] = 0x0000000000000004ull;
1705 C.w[0] = 0xfffffffffffffff6ull;
1708 __mul_64x64_to_128MACH (C1, C1.w[0], ten2k64[20 - q]);
1714 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] > C.w[0])) {
1728 C1.w[1] = x.w[1] & MASK_COEFF;
1729 C1.w[0] = x.w[0];
1756 Cstar.w[1] = P256.w[3];
1757 Cstar.w[0] = P256.w[2];
1758 fstar.w[3] = 0;
1759 fstar.w[2] = P256.w[2] & maskhigh128[ind - 1];
1760 fstar.w[1] = P256.w[1];
1761 fstar.w[0] = P256.w[0];
1763 Cstar.w[1] = 0;
1764 Cstar.w[0] = P256.w[3];
1765 fstar.w[3] = P256.w[3] & maskhigh128[ind - 1];
1766 fstar.w[2] = P256.w[2];
1767 fstar.w[1] = P256.w[1];
1768 fstar.w[0] = P256.w[0];
1779 Cstar.w[0] =
1780 (Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift));
1783 Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38
1793 if (fstar.w[1] > ten2mk128trunc[ind - 1].w[1]
1794 || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
1795 && fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) {
1797 Cstar.w[0]++;
1798 if (Cstar.w[0] == 0x0)
1799 Cstar.w[1]++;
1805 if (fstar.w[2] || fstar.w[1] > ten2mk128trunc[ind - 1].w[1]
1806 || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
1807 && fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) {
1809 Cstar.w[0]++;
1810 if (Cstar.w[0] == 0x0)
1811 Cstar.w[1]++;
1817 if (fstar.w[3] || fstar.w[2]
1818 || fstar.w[1] > ten2mk128trunc[ind - 1].w[1]
1819 || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
1820 && fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) {
1822 Cstar.w[0]++;
1823 if (Cstar.w[0] == 0x0)
1824 Cstar.w[1]++;
1832 res = -Cstar.w[0];
1834 res = Cstar.w[0];
1839 res = -C1.w[0];
1841 res = C1.w[0];
1845 res = -C1.w[0] * ten2k64[exp];
1847 res = C1.w[0] * ten2k64[exp];
1875 x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
1876 x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
1877 C1.w[1] = x.w[1] & MASK_COEFF;
1878 C1.w[0] = x.w[0];
1881 if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) {
1883 if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
1884 if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
1912 if ((C1.w[1] > 0x0001ed09bead87c0ull)
1913 || (C1.w[1] == 0x0001ed09bead87c0ull
1914 && (C1.w[0] > 0x378d8e63ffffffffull))
1915 || ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) {
1918 } else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
1926 if (C1.w[1] == 0) {
1927 if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53
1929 if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32
1930 tmp1.d = (double) (C1.w[0] >> 32); // exact conversion
1934 tmp1.d = (double) (C1.w[0]); // exact conversion
1939 tmp1.d = (double) C1.w[0]; // exact conversion
1944 tmp1.d = (double) C1.w[1]; // exact conversion
1951 if (C1.w[1] > nr_digits[x_nr_bits - 1].threshold_hi
1952 || (C1.w[1] == nr_digits[x_nr_bits - 1].threshold_hi
1953 && C1.w[0] >= nr_digits[x_nr_bits - 1].threshold_lo))
1974 C.w[1] = 0x0000000000000005ull;
1975 C.w[0] = 0x000000000000000aull;
1978 __mul_64x64_to_128MACH (C1, C1.w[0], ten2k64[20 - q]);
1984 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
1998 C.w[1] = 0x0000000000000005ull;
1999 C.w[0] = 0x0000000000000000ull;
2002 __mul_64x64_to_128MACH (C1, C1.w[0], ten2k64[20 - q]);
2008 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
2022 C1.w[1] = x.w[1] & MASK_COEFF;
2023 C1.w[0] = x.w[0];
2045 Cstar.w[1] = P256.w[3];
2046 Cstar.w[0] = P256.w[2];
2048 Cstar.w[1] = 0;
2049 Cstar.w[0] = P256.w[3];
2060 Cstar.w[0] =
2061 (Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift));
2064 Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38
2067 res = -Cstar.w[0];
2069 res = Cstar.w[0];
2074 res = -C1.w[0];
2076 res = C1.w[0];
2080 res = -C1.w[0] * ten2k64[exp];
2082 res = C1.w[0] * ten2k64[exp];
2111 x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
2112 x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
2113 C1.w[1] = x.w[1] & MASK_COEFF;
2114 C1.w[0] = x.w[0];
2117 if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) {
2119 if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
2120 if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
2148 if ((C1.w[1] > 0x0001ed09bead87c0ull)
2149 || (C1.w[1] == 0x0001ed09bead87c0ull
2150 && (C1.w[0] > 0x378d8e63ffffffffull))
2151 || ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) {
2154 } else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
2162 if (C1.w[1] == 0) {
2163 if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53
2165 if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32
2166 tmp1.d = (double) (C1.w[0] >> 32); // exact conversion
2170 tmp1.d = (double) (C1.w[0]); // exact conversion
2175 tmp1.d = (double) C1.w[0]; // exact conversion
2180 tmp1.d = (double) C1.w[1]; // exact conversion
2187 if (C1.w[1] > nr_digits[x_nr_bits - 1].threshold_hi
2188 || (C1.w[1] == nr_digits[x_nr_bits - 1].threshold_hi
2189 && C1.w[0] >= nr_digits[x_nr_bits - 1].threshold_lo))
2210 C.w[1] = 0x0000000000000005ull;
2211 C.w[0] = 0x000000000000000aull;
2214 __mul_64x64_to_128MACH (C1, C1.w[0], ten2k64[20 - q]);
2220 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
2234 C.w[1] = 0x0000000000000005ull;
2235 C.w[0] = 0x0000000000000000ull;
2238 __mul_64x64_to_128MACH (C1, C1.w[0], ten2k64[20 - q]);
2244 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
2258 C1.w[1] = x.w[1] & MASK_COEFF;
2259 C1.w[0] = x.w[0];
2283 Cstar.w[1] = P256.w[3];
2284 Cstar.w[0] = P256.w[2];
2285 fstar.w[3] = 0;
2286 fstar.w[2] = P256.w[2] & maskhigh128[ind - 1];
2287 fstar.w[1] = P256.w[1];
2288 fstar.w[0] = P256.w[0];
2290 Cstar.w[1] = 0;
2291 Cstar.w[0] = P256.w[3];
2292 fstar.w[3] = P256.w[3] & maskhigh128[ind - 1];
2293 fstar.w[2] = P256.w[2];
2294 fstar.w[1] = P256.w[1];
2295 fstar.w[0] = P256.w[0];
2306 Cstar.w[0] =
2307 (Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift));
2310 Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38
2318 if (fstar.w[1] > ten2mk128trunc[ind - 1].w[1]
2319 || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
2320 && fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) {
2325 if (fstar.w[2] || fstar.w[1] > ten2mk128trunc[ind - 1].w[1]
2326 || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
2327 && fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) {
2332 if (fstar.w[3] || fstar.w[2]
2333 || fstar.w[1] > ten2mk128trunc[ind - 1].w[1]
2334 || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
2335 && fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) {
2342 res = -Cstar.w[0];
2344 res = Cstar.w[0];
2349 res = -C1.w[0];
2351 res = C1.w[0];
2355 res = -C1.w[0] * ten2k64[exp];
2357 res = C1.w[0] * ten2k64[exp];
2386 x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
2387 x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
2388 C1.w[1] = x.w[1] & MASK_COEFF;
2389 C1.w[0] = x.w[0];
2392 if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) {
2394 if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
2395 if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
2423 if ((C1.w[1] > 0x0001ed09bead87c0ull)
2424 || (C1.w[1] == 0x0001ed09bead87c0ull
2425 && (C1.w[0] > 0x378d8e63ffffffffull))
2426 || ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) {
2429 } else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
2437 if (C1.w[1] == 0) {
2438 if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53
2440 if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32
2441 tmp1.d = (double) (C1.w[0] >> 32); // exact conversion
2445 tmp1.d = (double) (C1.w[0]); // exact conversion
2450 tmp1.d = (double) C1.w[0]; // exact conversion
2455 tmp1.d = (double) C1.w[1]; // exact conversion
2462 if (C1.w[1] > nr_digits[x_nr_bits - 1].threshold_hi
2463 || (C1.w[1] == nr_digits[x_nr_bits - 1].threshold_hi
2464 && C1.w[0] >= nr_digits[x_nr_bits - 1].threshold_lo))
2485 C.w[1] = 0x0000000000000005ull;
2486 C.w[0] = 0000000000000005ull;
2489 __mul_64x64_to_128MACH (C1, C1.w[0], ten2k64[20 - q]);
2495 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
2509 C.w[1] = 0x0000000000000004ull;
2510 C.w[0] = 0xfffffffffffffffbull;
2513 __mul_64x64_to_128MACH (C1, C1.w[0], ten2k64[20 - q]);
2519 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
2533 C1.w[1] = x.w[1] & MASK_COEFF;
2534 C1.w[0] = x.w[0];
2546 if ((C1.w[1] == 0) && (C1.w[0] < midpoint64[ind])) {
2554 if ((C1.w[1] < midpoint128[ind - 19].w[1])
2555 || ((C1.w[1] == midpoint128[ind - 19].w[1])
2556 && (C1.w[0] < midpoint128[ind - 19].w[0]))) {
2571 tmp64 = C1.w[0];
2573 C1.w[0] = C1.w[0] + midpoint64[ind - 1];
2575 C1.w[0] = C1.w[0] + midpoint128[ind - 20].w[0];
2576 C1.w[1] = C1.w[1] + midpoint128[ind - 20].w[1];
2578 if (C1.w[0] < tmp64)
2579 C1.w[1]++;
2590 Cstar.w[1] = P256.w[3];
2591 Cstar.w[0] = P256.w[2];
2593 Cstar.w[1] = 0;
2594 Cstar.w[0] = P256.w[3];
2611 Cstar.w[0] =
2612 (Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift));
2615 Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38
2620 res = -Cstar.w[0];
2622 res = Cstar.w[0];
2627 res = -C1.w[0];
2629 res = C1.w[0];
2633 res = -C1.w[0] * ten2k64[exp];
2635 res = C1.w[0] * ten2k64[exp];
2665 x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
2666 x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
2667 C1.w[1] = x.w[1] & MASK_COEFF;
2668 C1.w[0] = x.w[0];
2671 if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) {
2673 if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
2674 if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
2702 if ((C1.w[1] > 0x0001ed09bead87c0ull)
2703 || (C1.w[1] == 0x0001ed09bead87c0ull
2704 && (C1.w[0] > 0x378d8e63ffffffffull))
2705 || ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) {
2708 } else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
2716 if (C1.w[1] == 0) {
2717 if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53
2719 if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32
2720 tmp1.d = (double) (C1.w[0] >> 32); // exact conversion
2724 tmp1.d = (double) (C1.w[0]); // exact conversion
2729 tmp1.d = (double) C1.w[0]; // exact conversion
2734 tmp1.d = (double) C1.w[1]; // exact conversion
2741 if (C1.w[1] > nr_digits[x_nr_bits - 1].threshold_hi
2742 || (C1.w[1] == nr_digits[x_nr_bits - 1].threshold_hi
2743 && C1.w[0] >= nr_digits[x_nr_bits - 1].threshold_lo))
2764 C.w[1] = 0x0000000000000005ull;
2765 C.w[0] = 0000000000000005ull;
2768 __mul_64x64_to_128MACH (C1, C1.w[0], ten2k64[20 - q]);
2774 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
2788 C.w[1] = 0x0000000000000004ull;
2789 C.w[0] = 0xfffffffffffffffbull;
2792 __mul_64x64_to_128MACH (C1, C1.w[0], ten2k64[20 - q]);
2798 if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
2812 C1.w[1] = x.w[1] & MASK_COEFF;
2813 C1.w[0] = x.w[0];
2827 if ((C1.w[1] == 0) && (C1.w[0] < midpoint64[ind])) {
2835 if ((C1.w[1] < midpoint128[ind - 19].w[1])
2836 || ((C1.w[1] == midpoint128[ind - 19].w[1])
2837 && (C1.w[0] < midpoint128[ind - 19].w[0]))) {
2854 tmp64 = C1.w[0];
2856 C1.w[0] = C1.w[0] + midpoint64[ind - 1];
2858 C1.w[0] = C1.w[0] + midpoint128[ind - 20].w[0];
2859 C1.w[1] = C1.w[1] + midpoint128[ind - 20].w[1];
2861 if (C1.w[0] < tmp64)
2862 C1.w[1]++;
2873 Cstar.w[1] = P256.w[3];
2874 Cstar.w[0] = P256.w[2];
2875 fstar.w[3] = 0;
2876 fstar.w[2] = P256.w[2] & maskhigh128[ind - 1];
2877 fstar.w[1] = P256.w[1];
2878 fstar.w[0] = P256.w[0];
2880 Cstar.w[1] = 0;
2881 Cstar.w[0] = P256.w[3];
2882 fstar.w[3] = P256.w[3] & maskhigh128[ind - 1];
2883 fstar.w[2] = P256.w[2];
2884 fstar.w[1] = P256.w[1];
2885 fstar.w[0] = P256.w[0];
2902 Cstar.w[0] =
2903 (Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift));
2906 Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38
2914 if (fstar.w[1] > 0x8000000000000000ull ||
2915 (fstar.w[1] == 0x8000000000000000ull
2916 && fstar.w[0] > 0x0ull)) {
2918 tmp64 = fstar.w[1] - 0x8000000000000000ull; // f* - 1/2
2919 if (tmp64 > ten2mk128trunc[ind - 1].w[1]
2920 || (tmp64 == ten2mk128trunc[ind - 1].w[1]
2921 && fstar.w[0] >= ten2mk128trunc[ind - 1].w[0])) {
2930 if (fstar.w[3] > 0x0 ||
2931 (fstar.w[3] == 0x0 && fstar.w[2] > onehalf128[ind - 1]) ||
2932 (fstar.w[3] == 0x0 && fstar.w[2] == onehalf128[ind - 1] &&
2933 (fstar.w[1] || fstar.w[0]))) {
2936 tmp64 = fstar.w[2] - onehalf128[ind - 1];
2937 tmp64A = fstar.w[3];
2938 if (tmp64 > fstar.w[2])
2941 || fstar.w[1] > ten2mk128trunc[ind - 1].w[1]
2942 || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
2943 && fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) {
2952 if (fstar.w[3] > onehalf128[ind - 1] ||
2953 (fstar.w[3] == onehalf128[ind - 1] &&
2954 (fstar.w[2] || fstar.w[1] || fstar.w[0]))) {
2957 tmp64 = fstar.w[3] - onehalf128[ind - 1];
2958 if (tmp64 || fstar.w[2]
2959 || fstar.w[1] > ten2mk128trunc[ind - 1].w[1]
2960 || (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
2961 && fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) {
2973 res = -Cstar.w[0];
2975 res = Cstar.w[0];
2980 res = -C1.w[0];
2982 res = C1.w[0];
2986 res = -C1.w[0] * ten2k64[exp];
2988 res = C1.w[0] * ten2k64[exp];