1 /* 2 * PowerPC Decimal Floating Point (DPF) emulation helpers for QEMU. 3 * 4 * Copyright (c) 2014 IBM Corporation. 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "exec/helper-proto.h" 23 24 #define DECNUMDIGITS 34 25 #include "libdecnumber/decContext.h" 26 #include "libdecnumber/decNumber.h" 27 #include "libdecnumber/dpd/decimal32.h" 28 #include "libdecnumber/dpd/decimal64.h" 29 #include "libdecnumber/dpd/decimal128.h" 30 31 32 static void get_dfp64(ppc_vsr_t *dst, ppc_fprp_t *dfp) 33 { 34 dst->VsrD(1) = dfp->VsrD(0); 35 } 36 37 static void get_dfp128(ppc_vsr_t *dst, ppc_fprp_t *dfp) 38 { 39 dst->VsrD(0) = dfp[0].VsrD(0); 40 dst->VsrD(1) = dfp[1].VsrD(0); 41 } 42 43 static void set_dfp64(ppc_fprp_t *dfp, ppc_vsr_t *src) 44 { 45 dfp->VsrD(0) = src->VsrD(1); 46 } 47 48 static void set_dfp128(ppc_fprp_t *dfp, ppc_vsr_t *src) 49 { 50 dfp[0].VsrD(0) = src->VsrD(0); 51 dfp[1].VsrD(0) = src->VsrD(1); 52 } 53 54 static void set_dfp128_to_avr(ppc_avr_t *dst, ppc_vsr_t *src) 55 { 56 *dst = *src; 57 } 58 59 struct PPC_DFP { 60 CPUPPCState *env; 61 ppc_vsr_t vt, va, vb; 62 decNumber t, a, b; 63 decContext context; 64 uint8_t crbf; 65 }; 66 67 static void dfp_prepare_rounding_mode(decContext *context, uint64_t fpscr) 68 { 69 enum rounding rnd; 70 71 switch ((fpscr & FP_DRN) >> FPSCR_DRN0) { 72 case 0: 73 rnd = DEC_ROUND_HALF_EVEN; 74 break; 75 case 1: 76 rnd = DEC_ROUND_DOWN; 77 break; 78 case 2: 79 rnd = DEC_ROUND_CEILING; 80 break; 81 case 3: 82 rnd = DEC_ROUND_FLOOR; 83 break; 84 case 4: 85 rnd = DEC_ROUND_HALF_UP; 86 break; 87 case 5: 88 rnd = DEC_ROUND_HALF_DOWN; 89 break; 90 case 6: 91 rnd = DEC_ROUND_UP; 92 break; 93 case 7: 94 rnd = DEC_ROUND_05UP; 95 break; 96 default: 97 g_assert_not_reached(); 98 } 99 100 decContextSetRounding(context, rnd); 101 } 102 103 static void dfp_set_round_mode_from_immediate(uint8_t r, uint8_t rmc, 104 struct PPC_DFP *dfp) 105 { 106 enum rounding rnd; 107 if (r == 0) { 108 switch (rmc & 3) { 109 case 0: 110 rnd = DEC_ROUND_HALF_EVEN; 111 break; 112 case 1: 113 rnd = DEC_ROUND_DOWN; 114 break; 115 case 2: 116 rnd = DEC_ROUND_HALF_UP; 117 break; 118 case 3: /* use FPSCR rounding mode */ 119 return; 120 default: 121 assert(0); /* cannot get here */ 122 } 123 } else { /* r == 1 */ 124 switch (rmc & 3) { 125 case 0: 126 rnd = DEC_ROUND_CEILING; 127 break; 128 case 1: 129 rnd = DEC_ROUND_FLOOR; 130 break; 131 case 2: 132 rnd = DEC_ROUND_UP; 133 break; 134 case 3: 135 rnd = DEC_ROUND_HALF_DOWN; 136 break; 137 default: 138 assert(0); /* cannot get here */ 139 } 140 } 141 decContextSetRounding(&dfp->context, rnd); 142 } 143 144 static void dfp_prepare_decimal64(struct PPC_DFP *dfp, ppc_fprp_t *a, 145 ppc_fprp_t *b, CPUPPCState *env) 146 { 147 decContextDefault(&dfp->context, DEC_INIT_DECIMAL64); 148 dfp_prepare_rounding_mode(&dfp->context, env->fpscr); 149 dfp->env = env; 150 151 if (a) { 152 get_dfp64(&dfp->va, a); 153 decimal64ToNumber((decimal64 *)&dfp->va.VsrD(1), &dfp->a); 154 } else { 155 dfp->va.VsrD(1) = 0; 156 decNumberZero(&dfp->a); 157 } 158 159 if (b) { 160 get_dfp64(&dfp->vb, b); 161 decimal64ToNumber((decimal64 *)&dfp->vb.VsrD(1), &dfp->b); 162 } else { 163 dfp->vb.VsrD(1) = 0; 164 decNumberZero(&dfp->b); 165 } 166 } 167 168 static void dfp_prepare_decimal128(struct PPC_DFP *dfp, ppc_fprp_t *a, 169 ppc_fprp_t *b, CPUPPCState *env) 170 { 171 decContextDefault(&dfp->context, DEC_INIT_DECIMAL128); 172 dfp_prepare_rounding_mode(&dfp->context, env->fpscr); 173 dfp->env = env; 174 175 if (a) { 176 get_dfp128(&dfp->va, a); 177 decimal128ToNumber((decimal128 *)&dfp->va, &dfp->a); 178 } else { 179 dfp->va.VsrD(0) = dfp->va.VsrD(1) = 0; 180 decNumberZero(&dfp->a); 181 } 182 183 if (b) { 184 get_dfp128(&dfp->vb, b); 185 decimal128ToNumber((decimal128 *)&dfp->vb, &dfp->b); 186 } else { 187 dfp->vb.VsrD(0) = dfp->vb.VsrD(1) = 0; 188 decNumberZero(&dfp->b); 189 } 190 } 191 192 static void dfp_finalize_decimal64(struct PPC_DFP *dfp) 193 { 194 decimal64FromNumber((decimal64 *)&dfp->vt.VsrD(1), &dfp->t, &dfp->context); 195 } 196 197 static void dfp_finalize_decimal128(struct PPC_DFP *dfp) 198 { 199 decimal128FromNumber((decimal128 *)&dfp->vt, &dfp->t, &dfp->context); 200 } 201 202 static void dfp_set_FPSCR_flag(struct PPC_DFP *dfp, uint64_t flag, 203 uint64_t enabled) 204 { 205 dfp->env->fpscr |= (flag | FP_FX); 206 if (dfp->env->fpscr & enabled) { 207 dfp->env->fpscr |= FP_FEX; 208 } 209 } 210 211 static void dfp_set_FPRF_from_FRT_with_context(struct PPC_DFP *dfp, 212 decContext *context) 213 { 214 uint64_t fprf = 0; 215 216 /* construct FPRF */ 217 switch (decNumberClass(&dfp->t, context)) { 218 case DEC_CLASS_SNAN: 219 fprf = 0x01; 220 break; 221 case DEC_CLASS_QNAN: 222 fprf = 0x11; 223 break; 224 case DEC_CLASS_NEG_INF: 225 fprf = 0x09; 226 break; 227 case DEC_CLASS_NEG_NORMAL: 228 fprf = 0x08; 229 break; 230 case DEC_CLASS_NEG_SUBNORMAL: 231 fprf = 0x18; 232 break; 233 case DEC_CLASS_NEG_ZERO: 234 fprf = 0x12; 235 break; 236 case DEC_CLASS_POS_ZERO: 237 fprf = 0x02; 238 break; 239 case DEC_CLASS_POS_SUBNORMAL: 240 fprf = 0x14; 241 break; 242 case DEC_CLASS_POS_NORMAL: 243 fprf = 0x04; 244 break; 245 case DEC_CLASS_POS_INF: 246 fprf = 0x05; 247 break; 248 default: 249 assert(0); /* should never get here */ 250 } 251 dfp->env->fpscr &= ~FP_FPRF; 252 dfp->env->fpscr |= (fprf << FPSCR_FPRF); 253 } 254 255 static void dfp_set_FPRF_from_FRT(struct PPC_DFP *dfp) 256 { 257 dfp_set_FPRF_from_FRT_with_context(dfp, &dfp->context); 258 } 259 260 static void dfp_set_FPRF_from_FRT_short(struct PPC_DFP *dfp) 261 { 262 decContext shortContext; 263 decContextDefault(&shortContext, DEC_INIT_DECIMAL32); 264 dfp_set_FPRF_from_FRT_with_context(dfp, &shortContext); 265 } 266 267 static void dfp_set_FPRF_from_FRT_long(struct PPC_DFP *dfp) 268 { 269 decContext longContext; 270 decContextDefault(&longContext, DEC_INIT_DECIMAL64); 271 dfp_set_FPRF_from_FRT_with_context(dfp, &longContext); 272 } 273 274 static void dfp_check_for_OX(struct PPC_DFP *dfp) 275 { 276 if (dfp->context.status & DEC_Overflow) { 277 dfp_set_FPSCR_flag(dfp, FP_OX, FP_OE); 278 } 279 } 280 281 static void dfp_check_for_UX(struct PPC_DFP *dfp) 282 { 283 if (dfp->context.status & DEC_Underflow) { 284 dfp_set_FPSCR_flag(dfp, FP_UX, FP_UE); 285 } 286 } 287 288 static void dfp_check_for_XX(struct PPC_DFP *dfp) 289 { 290 if (dfp->context.status & DEC_Inexact) { 291 dfp_set_FPSCR_flag(dfp, FP_XX | FP_FI, FP_XE); 292 } 293 } 294 295 static void dfp_check_for_ZX(struct PPC_DFP *dfp) 296 { 297 if (dfp->context.status & DEC_Division_by_zero) { 298 dfp_set_FPSCR_flag(dfp, FP_ZX, FP_ZE); 299 } 300 } 301 302 static void dfp_check_for_VXSNAN(struct PPC_DFP *dfp) 303 { 304 if (dfp->context.status & DEC_Invalid_operation) { 305 if (decNumberIsSNaN(&dfp->a) || decNumberIsSNaN(&dfp->b)) { 306 dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXSNAN, FP_VE); 307 } 308 } 309 } 310 311 static void dfp_check_for_VXSNAN_and_convert_to_QNaN(struct PPC_DFP *dfp) 312 { 313 if (decNumberIsSNaN(&dfp->t)) { 314 dfp->t.bits &= ~DECSNAN; 315 dfp->t.bits |= DECNAN; 316 dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXSNAN, FP_VE); 317 } 318 } 319 320 static void dfp_check_for_VXISI(struct PPC_DFP *dfp, int testForSameSign) 321 { 322 if (dfp->context.status & DEC_Invalid_operation) { 323 if (decNumberIsInfinite(&dfp->a) && decNumberIsInfinite(&dfp->b)) { 324 int same = decNumberClass(&dfp->a, &dfp->context) == 325 decNumberClass(&dfp->b, &dfp->context); 326 if ((same && testForSameSign) || (!same && !testForSameSign)) { 327 dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXISI, FP_VE); 328 } 329 } 330 } 331 } 332 333 static void dfp_check_for_VXISI_add(struct PPC_DFP *dfp) 334 { 335 dfp_check_for_VXISI(dfp, 0); 336 } 337 338 static void dfp_check_for_VXISI_subtract(struct PPC_DFP *dfp) 339 { 340 dfp_check_for_VXISI(dfp, 1); 341 } 342 343 static void dfp_check_for_VXIMZ(struct PPC_DFP *dfp) 344 { 345 if (dfp->context.status & DEC_Invalid_operation) { 346 if ((decNumberIsInfinite(&dfp->a) && decNumberIsZero(&dfp->b)) || 347 (decNumberIsInfinite(&dfp->b) && decNumberIsZero(&dfp->a))) { 348 dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXIMZ, FP_VE); 349 } 350 } 351 } 352 353 static void dfp_check_for_VXZDZ(struct PPC_DFP *dfp) 354 { 355 if (dfp->context.status & DEC_Division_undefined) { 356 dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXZDZ, FP_VE); 357 } 358 } 359 360 static void dfp_check_for_VXIDI(struct PPC_DFP *dfp) 361 { 362 if (dfp->context.status & DEC_Invalid_operation) { 363 if (decNumberIsInfinite(&dfp->a) && decNumberIsInfinite(&dfp->b)) { 364 dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXIDI, FP_VE); 365 } 366 } 367 } 368 369 static void dfp_check_for_VXVC(struct PPC_DFP *dfp) 370 { 371 if (decNumberIsNaN(&dfp->a) || decNumberIsNaN(&dfp->b)) { 372 dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXVC, FP_VE); 373 } 374 } 375 376 static void dfp_check_for_VXCVI(struct PPC_DFP *dfp) 377 { 378 if ((dfp->context.status & DEC_Invalid_operation) && 379 (!decNumberIsSNaN(&dfp->a)) && 380 (!decNumberIsSNaN(&dfp->b))) { 381 dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXCVI, FP_VE); 382 } 383 } 384 385 static void dfp_set_CRBF_from_T(struct PPC_DFP *dfp) 386 { 387 if (decNumberIsNaN(&dfp->t)) { 388 dfp->crbf = 1; 389 } else if (decNumberIsZero(&dfp->t)) { 390 dfp->crbf = 2; 391 } else if (decNumberIsNegative(&dfp->t)) { 392 dfp->crbf = 8; 393 } else { 394 dfp->crbf = 4; 395 } 396 } 397 398 static void dfp_set_FPCC_from_CRBF(struct PPC_DFP *dfp) 399 { 400 dfp->env->fpscr &= ~FP_FPCC; 401 dfp->env->fpscr |= (dfp->crbf << FPSCR_FPCC); 402 } 403 404 static inline void dfp_makeQNaN(decNumber *dn) 405 { 406 dn->bits &= ~DECSPECIAL; 407 dn->bits |= DECNAN; 408 } 409 410 static inline int dfp_get_digit(decNumber *dn, int n) 411 { 412 assert(DECDPUN == 3); 413 int unit = n / DECDPUN; 414 int dig = n % DECDPUN; 415 switch (dig) { 416 case 0: 417 return dn->lsu[unit] % 10; 418 case 1: 419 return (dn->lsu[unit] / 10) % 10; 420 case 2: 421 return dn->lsu[unit] / 100; 422 } 423 g_assert_not_reached(); 424 } 425 426 #define DFP_HELPER_TAB(op, dnop, postprocs, size) \ 427 void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \ 428 ppc_fprp_t *b) \ 429 { \ 430 struct PPC_DFP dfp; \ 431 dfp_prepare_decimal##size(&dfp, a, b, env); \ 432 dnop(&dfp.t, &dfp.a, &dfp.b, &dfp.context); \ 433 dfp_finalize_decimal##size(&dfp); \ 434 postprocs(&dfp); \ 435 set_dfp##size(t, &dfp.vt); \ 436 } 437 438 static void ADD_PPs(struct PPC_DFP *dfp) 439 { 440 dfp_set_FPRF_from_FRT(dfp); 441 dfp_check_for_OX(dfp); 442 dfp_check_for_UX(dfp); 443 dfp_check_for_XX(dfp); 444 dfp_check_for_VXSNAN(dfp); 445 dfp_check_for_VXISI_add(dfp); 446 } 447 448 DFP_HELPER_TAB(DADD, decNumberAdd, ADD_PPs, 64) 449 DFP_HELPER_TAB(DADDQ, decNumberAdd, ADD_PPs, 128) 450 451 static void SUB_PPs(struct PPC_DFP *dfp) 452 { 453 dfp_set_FPRF_from_FRT(dfp); 454 dfp_check_for_OX(dfp); 455 dfp_check_for_UX(dfp); 456 dfp_check_for_XX(dfp); 457 dfp_check_for_VXSNAN(dfp); 458 dfp_check_for_VXISI_subtract(dfp); 459 } 460 461 DFP_HELPER_TAB(DSUB, decNumberSubtract, SUB_PPs, 64) 462 DFP_HELPER_TAB(DSUBQ, decNumberSubtract, SUB_PPs, 128) 463 464 static void MUL_PPs(struct PPC_DFP *dfp) 465 { 466 dfp_set_FPRF_from_FRT(dfp); 467 dfp_check_for_OX(dfp); 468 dfp_check_for_UX(dfp); 469 dfp_check_for_XX(dfp); 470 dfp_check_for_VXSNAN(dfp); 471 dfp_check_for_VXIMZ(dfp); 472 } 473 474 DFP_HELPER_TAB(DMUL, decNumberMultiply, MUL_PPs, 64) 475 DFP_HELPER_TAB(DMULQ, decNumberMultiply, MUL_PPs, 128) 476 477 static void DIV_PPs(struct PPC_DFP *dfp) 478 { 479 dfp_set_FPRF_from_FRT(dfp); 480 dfp_check_for_OX(dfp); 481 dfp_check_for_UX(dfp); 482 dfp_check_for_ZX(dfp); 483 dfp_check_for_XX(dfp); 484 dfp_check_for_VXSNAN(dfp); 485 dfp_check_for_VXZDZ(dfp); 486 dfp_check_for_VXIDI(dfp); 487 } 488 489 DFP_HELPER_TAB(DDIV, decNumberDivide, DIV_PPs, 64) 490 DFP_HELPER_TAB(DDIVQ, decNumberDivide, DIV_PPs, 128) 491 492 #define DFP_HELPER_BF_AB(op, dnop, postprocs, size) \ 493 uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \ 494 { \ 495 struct PPC_DFP dfp; \ 496 dfp_prepare_decimal##size(&dfp, a, b, env); \ 497 dnop(&dfp.t, &dfp.a, &dfp.b, &dfp.context); \ 498 dfp_finalize_decimal##size(&dfp); \ 499 postprocs(&dfp); \ 500 return dfp.crbf; \ 501 } 502 503 static void CMPU_PPs(struct PPC_DFP *dfp) 504 { 505 dfp_set_CRBF_from_T(dfp); 506 dfp_set_FPCC_from_CRBF(dfp); 507 dfp_check_for_VXSNAN(dfp); 508 } 509 510 DFP_HELPER_BF_AB(DCMPU, decNumberCompare, CMPU_PPs, 64) 511 DFP_HELPER_BF_AB(DCMPUQ, decNumberCompare, CMPU_PPs, 128) 512 513 static void CMPO_PPs(struct PPC_DFP *dfp) 514 { 515 dfp_set_CRBF_from_T(dfp); 516 dfp_set_FPCC_from_CRBF(dfp); 517 dfp_check_for_VXSNAN(dfp); 518 dfp_check_for_VXVC(dfp); 519 } 520 521 DFP_HELPER_BF_AB(DCMPO, decNumberCompare, CMPO_PPs, 64) 522 DFP_HELPER_BF_AB(DCMPOQ, decNumberCompare, CMPO_PPs, 128) 523 524 #define DFP_HELPER_TSTDC(op, size) \ 525 uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, uint32_t dcm) \ 526 { \ 527 struct PPC_DFP dfp; \ 528 int match = 0; \ 529 \ 530 dfp_prepare_decimal##size(&dfp, a, 0, env); \ 531 \ 532 match |= (dcm & 0x20) && decNumberIsZero(&dfp.a); \ 533 match |= (dcm & 0x10) && decNumberIsSubnormal(&dfp.a, &dfp.context); \ 534 match |= (dcm & 0x08) && decNumberIsNormal(&dfp.a, &dfp.context); \ 535 match |= (dcm & 0x04) && decNumberIsInfinite(&dfp.a); \ 536 match |= (dcm & 0x02) && decNumberIsQNaN(&dfp.a); \ 537 match |= (dcm & 0x01) && decNumberIsSNaN(&dfp.a); \ 538 \ 539 if (decNumberIsNegative(&dfp.a)) { \ 540 dfp.crbf = match ? 0xA : 0x8; \ 541 } else { \ 542 dfp.crbf = match ? 0x2 : 0x0; \ 543 } \ 544 \ 545 dfp_set_FPCC_from_CRBF(&dfp); \ 546 return dfp.crbf; \ 547 } 548 549 DFP_HELPER_TSTDC(DTSTDC, 64) 550 DFP_HELPER_TSTDC(DTSTDCQ, 128) 551 552 #define DFP_HELPER_TSTDG(op, size) \ 553 uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, uint32_t dcm) \ 554 { \ 555 struct PPC_DFP dfp; \ 556 int minexp, maxexp, nzero_digits, nzero_idx, is_negative, is_zero, \ 557 is_extreme_exp, is_subnormal, is_normal, leftmost_is_nonzero, \ 558 match; \ 559 \ 560 dfp_prepare_decimal##size(&dfp, a, 0, env); \ 561 \ 562 if ((size) == 64) { \ 563 minexp = -398; \ 564 maxexp = 369; \ 565 nzero_digits = 16; \ 566 nzero_idx = 5; \ 567 } else if ((size) == 128) { \ 568 minexp = -6176; \ 569 maxexp = 6111; \ 570 nzero_digits = 34; \ 571 nzero_idx = 11; \ 572 } \ 573 \ 574 is_negative = decNumberIsNegative(&dfp.a); \ 575 is_zero = decNumberIsZero(&dfp.a); \ 576 is_extreme_exp = (dfp.a.exponent == maxexp) || \ 577 (dfp.a.exponent == minexp); \ 578 is_subnormal = decNumberIsSubnormal(&dfp.a, &dfp.context); \ 579 is_normal = decNumberIsNormal(&dfp.a, &dfp.context); \ 580 leftmost_is_nonzero = (dfp.a.digits == nzero_digits) && \ 581 (dfp.a.lsu[nzero_idx] != 0); \ 582 match = 0; \ 583 \ 584 match |= (dcm & 0x20) && is_zero && !is_extreme_exp; \ 585 match |= (dcm & 0x10) && is_zero && is_extreme_exp; \ 586 match |= (dcm & 0x08) && \ 587 (is_subnormal || (is_normal && is_extreme_exp)); \ 588 match |= (dcm & 0x04) && is_normal && !is_extreme_exp && \ 589 !leftmost_is_nonzero; \ 590 match |= (dcm & 0x02) && is_normal && !is_extreme_exp && \ 591 leftmost_is_nonzero; \ 592 match |= (dcm & 0x01) && decNumberIsSpecial(&dfp.a); \ 593 \ 594 if (is_negative) { \ 595 dfp.crbf = match ? 0xA : 0x8; \ 596 } else { \ 597 dfp.crbf = match ? 0x2 : 0x0; \ 598 } \ 599 \ 600 dfp_set_FPCC_from_CRBF(&dfp); \ 601 return dfp.crbf; \ 602 } 603 604 DFP_HELPER_TSTDG(DTSTDG, 64) 605 DFP_HELPER_TSTDG(DTSTDGQ, 128) 606 607 #define DFP_HELPER_TSTEX(op, size) \ 608 uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \ 609 { \ 610 struct PPC_DFP dfp; \ 611 int expa, expb, a_is_special, b_is_special; \ 612 \ 613 dfp_prepare_decimal##size(&dfp, a, b, env); \ 614 \ 615 expa = dfp.a.exponent; \ 616 expb = dfp.b.exponent; \ 617 a_is_special = decNumberIsSpecial(&dfp.a); \ 618 b_is_special = decNumberIsSpecial(&dfp.b); \ 619 \ 620 if (a_is_special || b_is_special) { \ 621 int atype = a_is_special ? (decNumberIsNaN(&dfp.a) ? 4 : 2) : 1; \ 622 int btype = b_is_special ? (decNumberIsNaN(&dfp.b) ? 4 : 2) : 1; \ 623 dfp.crbf = (atype ^ btype) ? 0x1 : 0x2; \ 624 } else if (expa < expb) { \ 625 dfp.crbf = 0x8; \ 626 } else if (expa > expb) { \ 627 dfp.crbf = 0x4; \ 628 } else { \ 629 dfp.crbf = 0x2; \ 630 } \ 631 \ 632 dfp_set_FPCC_from_CRBF(&dfp); \ 633 return dfp.crbf; \ 634 } 635 636 DFP_HELPER_TSTEX(DTSTEX, 64) 637 DFP_HELPER_TSTEX(DTSTEXQ, 128) 638 639 #define DFP_HELPER_TSTSF(op, size) \ 640 uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \ 641 { \ 642 struct PPC_DFP dfp; \ 643 unsigned k; \ 644 ppc_vsr_t va; \ 645 \ 646 dfp_prepare_decimal##size(&dfp, 0, b, env); \ 647 \ 648 get_dfp64(&va, a); \ 649 k = va.VsrD(1) & 0x3F; \ 650 \ 651 if (unlikely(decNumberIsSpecial(&dfp.b))) { \ 652 dfp.crbf = 1; \ 653 } else if (k == 0) { \ 654 dfp.crbf = 4; \ 655 } else if (unlikely(decNumberIsZero(&dfp.b))) { \ 656 /* Zero has no sig digits */ \ 657 dfp.crbf = 4; \ 658 } else { \ 659 unsigned nsd = dfp.b.digits; \ 660 if (k < nsd) { \ 661 dfp.crbf = 8; \ 662 } else if (k > nsd) { \ 663 dfp.crbf = 4; \ 664 } else { \ 665 dfp.crbf = 2; \ 666 } \ 667 } \ 668 \ 669 dfp_set_FPCC_from_CRBF(&dfp); \ 670 return dfp.crbf; \ 671 } 672 673 DFP_HELPER_TSTSF(DTSTSF, 64) 674 DFP_HELPER_TSTSF(DTSTSFQ, 128) 675 676 #define DFP_HELPER_TSTSFI(op, size) \ 677 uint32_t helper_##op(CPUPPCState *env, uint32_t a, ppc_fprp_t *b) \ 678 { \ 679 struct PPC_DFP dfp; \ 680 unsigned uim; \ 681 \ 682 dfp_prepare_decimal##size(&dfp, 0, b, env); \ 683 \ 684 uim = a & 0x3F; \ 685 \ 686 if (unlikely(decNumberIsSpecial(&dfp.b))) { \ 687 dfp.crbf = 1; \ 688 } else if (uim == 0) { \ 689 dfp.crbf = 4; \ 690 } else if (unlikely(decNumberIsZero(&dfp.b))) { \ 691 /* Zero has no sig digits */ \ 692 dfp.crbf = 4; \ 693 } else { \ 694 unsigned nsd = dfp.b.digits; \ 695 if (uim < nsd) { \ 696 dfp.crbf = 8; \ 697 } else if (uim > nsd) { \ 698 dfp.crbf = 4; \ 699 } else { \ 700 dfp.crbf = 2; \ 701 } \ 702 } \ 703 \ 704 dfp_set_FPCC_from_CRBF(&dfp); \ 705 return dfp.crbf; \ 706 } 707 708 DFP_HELPER_TSTSFI(DTSTSFI, 64) 709 DFP_HELPER_TSTSFI(DTSTSFIQ, 128) 710 711 static void QUA_PPs(struct PPC_DFP *dfp) 712 { 713 dfp_set_FPRF_from_FRT(dfp); 714 dfp_check_for_XX(dfp); 715 dfp_check_for_VXSNAN(dfp); 716 dfp_check_for_VXCVI(dfp); 717 } 718 719 static void dfp_quantize(uint8_t rmc, struct PPC_DFP *dfp) 720 { 721 dfp_set_round_mode_from_immediate(0, rmc, dfp); 722 decNumberQuantize(&dfp->t, &dfp->b, &dfp->a, &dfp->context); 723 if (decNumberIsSNaN(&dfp->a)) { 724 dfp->t = dfp->a; 725 dfp_makeQNaN(&dfp->t); 726 } else if (decNumberIsSNaN(&dfp->b)) { 727 dfp->t = dfp->b; 728 dfp_makeQNaN(&dfp->t); 729 } else if (decNumberIsQNaN(&dfp->a)) { 730 dfp->t = dfp->a; 731 } else if (decNumberIsQNaN(&dfp->b)) { 732 dfp->t = dfp->b; 733 } 734 } 735 736 #define DFP_HELPER_QUAI(op, size) \ 737 void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \ 738 uint32_t te, uint32_t rmc) \ 739 { \ 740 struct PPC_DFP dfp; \ 741 \ 742 dfp_prepare_decimal##size(&dfp, 0, b, env); \ 743 \ 744 decNumberFromUInt32(&dfp.a, 1); \ 745 dfp.a.exponent = (int32_t)((int8_t)(te << 3) >> 3); \ 746 \ 747 dfp_quantize(rmc, &dfp); \ 748 dfp_finalize_decimal##size(&dfp); \ 749 QUA_PPs(&dfp); \ 750 \ 751 set_dfp##size(t, &dfp.vt); \ 752 } 753 754 DFP_HELPER_QUAI(dquai, 64) 755 DFP_HELPER_QUAI(dquaiq, 128) 756 757 #define DFP_HELPER_QUA(op, size) \ 758 void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \ 759 ppc_fprp_t *b, uint32_t rmc) \ 760 { \ 761 struct PPC_DFP dfp; \ 762 \ 763 dfp_prepare_decimal##size(&dfp, a, b, env); \ 764 \ 765 dfp_quantize(rmc, &dfp); \ 766 dfp_finalize_decimal##size(&dfp); \ 767 QUA_PPs(&dfp); \ 768 \ 769 set_dfp##size(t, &dfp.vt); \ 770 } 771 772 DFP_HELPER_QUA(dqua, 64) 773 DFP_HELPER_QUA(dquaq, 128) 774 775 static void _dfp_reround(uint8_t rmc, int32_t ref_sig, int32_t xmax, 776 struct PPC_DFP *dfp) 777 { 778 int msd_orig, msd_rslt; 779 780 if (unlikely((ref_sig == 0) || (dfp->b.digits <= ref_sig))) { 781 dfp->t = dfp->b; 782 if (decNumberIsSNaN(&dfp->b)) { 783 dfp_makeQNaN(&dfp->t); 784 dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXSNAN, FPSCR_VE); 785 } 786 return; 787 } 788 789 /* Reround is equivalent to quantizing b with 1**E(n) where */ 790 /* n = exp(b) + numDigits(b) - reference_significance. */ 791 792 decNumberFromUInt32(&dfp->a, 1); 793 dfp->a.exponent = dfp->b.exponent + dfp->b.digits - ref_sig; 794 795 if (unlikely(dfp->a.exponent > xmax)) { 796 dfp->t.digits = 0; 797 dfp->t.bits &= ~DECNEG; 798 dfp_makeQNaN(&dfp->t); 799 dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXCVI, FPSCR_VE); 800 return; 801 } 802 803 dfp_quantize(rmc, dfp); 804 805 msd_orig = dfp_get_digit(&dfp->b, dfp->b.digits-1); 806 msd_rslt = dfp_get_digit(&dfp->t, dfp->t.digits-1); 807 808 /* If the quantization resulted in rounding up to the next magnitude, */ 809 /* then we need to shift the significand and adjust the exponent. */ 810 811 if (unlikely((msd_orig == 9) && (msd_rslt == 1))) { 812 813 decNumber negone; 814 815 decNumberFromInt32(&negone, -1); 816 decNumberShift(&dfp->t, &dfp->t, &negone, &dfp->context); 817 dfp->t.exponent++; 818 819 if (unlikely(dfp->t.exponent > xmax)) { 820 dfp_makeQNaN(&dfp->t); 821 dfp->t.digits = 0; 822 dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXCVI, FP_VE); 823 /* Inhibit XX in this case */ 824 decContextClearStatus(&dfp->context, DEC_Inexact); 825 } 826 } 827 } 828 829 #define DFP_HELPER_RRND(op, size) \ 830 void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \ 831 ppc_fprp_t *b, uint32_t rmc) \ 832 { \ 833 struct PPC_DFP dfp; \ 834 ppc_vsr_t va; \ 835 int32_t ref_sig; \ 836 int32_t xmax = ((size) == 64) ? 369 : 6111; \ 837 \ 838 dfp_prepare_decimal##size(&dfp, 0, b, env); \ 839 \ 840 get_dfp64(&va, a); \ 841 ref_sig = va.VsrD(1) & 0x3f; \ 842 \ 843 _dfp_reround(rmc, ref_sig, xmax, &dfp); \ 844 dfp_finalize_decimal##size(&dfp); \ 845 QUA_PPs(&dfp); \ 846 \ 847 set_dfp##size(t, &dfp.vt); \ 848 } 849 850 DFP_HELPER_RRND(drrnd, 64) 851 DFP_HELPER_RRND(drrndq, 128) 852 853 #define DFP_HELPER_RINT(op, postprocs, size) \ 854 void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \ 855 uint32_t r, uint32_t rmc) \ 856 { \ 857 struct PPC_DFP dfp; \ 858 \ 859 dfp_prepare_decimal##size(&dfp, 0, b, env); \ 860 \ 861 dfp_set_round_mode_from_immediate(r, rmc, &dfp); \ 862 decNumberToIntegralExact(&dfp.t, &dfp.b, &dfp.context); \ 863 dfp_finalize_decimal##size(&dfp); \ 864 postprocs(&dfp); \ 865 \ 866 set_dfp##size(t, &dfp.vt); \ 867 } 868 869 static void RINTX_PPs(struct PPC_DFP *dfp) 870 { 871 dfp_set_FPRF_from_FRT(dfp); 872 dfp_check_for_XX(dfp); 873 dfp_check_for_VXSNAN(dfp); 874 } 875 876 DFP_HELPER_RINT(drintx, RINTX_PPs, 64) 877 DFP_HELPER_RINT(drintxq, RINTX_PPs, 128) 878 879 static void RINTN_PPs(struct PPC_DFP *dfp) 880 { 881 dfp_set_FPRF_from_FRT(dfp); 882 dfp_check_for_VXSNAN(dfp); 883 } 884 885 DFP_HELPER_RINT(drintn, RINTN_PPs, 64) 886 DFP_HELPER_RINT(drintnq, RINTN_PPs, 128) 887 888 void helper_dctdp(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) 889 { 890 struct PPC_DFP dfp; 891 ppc_vsr_t vb; 892 uint32_t b_short; 893 894 get_dfp64(&vb, b); 895 b_short = (uint32_t)vb.VsrD(1); 896 897 dfp_prepare_decimal64(&dfp, 0, 0, env); 898 decimal32ToNumber((decimal32 *)&b_short, &dfp.t); 899 dfp_finalize_decimal64(&dfp); 900 set_dfp64(t, &dfp.vt); 901 dfp_set_FPRF_from_FRT(&dfp); 902 } 903 904 void helper_dctqpq(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) 905 { 906 struct PPC_DFP dfp; 907 ppc_vsr_t vb; 908 dfp_prepare_decimal128(&dfp, 0, 0, env); 909 get_dfp64(&vb, b); 910 decimal64ToNumber((decimal64 *)&vb.VsrD(1), &dfp.t); 911 912 dfp_check_for_VXSNAN_and_convert_to_QNaN(&dfp); 913 dfp_set_FPRF_from_FRT(&dfp); 914 915 dfp_finalize_decimal128(&dfp); 916 set_dfp128(t, &dfp.vt); 917 } 918 919 void helper_drsp(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) 920 { 921 struct PPC_DFP dfp; 922 uint32_t t_short = 0; 923 ppc_vsr_t vt; 924 dfp_prepare_decimal64(&dfp, 0, b, env); 925 decimal32FromNumber((decimal32 *)&t_short, &dfp.b, &dfp.context); 926 decimal32ToNumber((decimal32 *)&t_short, &dfp.t); 927 928 dfp_set_FPRF_from_FRT_short(&dfp); 929 dfp_check_for_OX(&dfp); 930 dfp_check_for_UX(&dfp); 931 dfp_check_for_XX(&dfp); 932 933 vt.VsrD(1) = (uint64_t)t_short; 934 set_dfp64(t, &vt); 935 } 936 937 void helper_drdpq(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) 938 { 939 struct PPC_DFP dfp; 940 dfp_prepare_decimal128(&dfp, 0, b, env); 941 decimal64FromNumber((decimal64 *)&dfp.vt.VsrD(1), &dfp.b, &dfp.context); 942 decimal64ToNumber((decimal64 *)&dfp.vt.VsrD(1), &dfp.t); 943 944 dfp_check_for_VXSNAN_and_convert_to_QNaN(&dfp); 945 dfp_set_FPRF_from_FRT_long(&dfp); 946 dfp_check_for_OX(&dfp); 947 dfp_check_for_UX(&dfp); 948 dfp_check_for_XX(&dfp); 949 950 dfp.vt.VsrD(0) = dfp.vt.VsrD(1) = 0; 951 dfp_finalize_decimal64(&dfp); 952 set_dfp128(t, &dfp.vt); 953 } 954 955 #define DFP_HELPER_CFFIX(op, size) \ 956 void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) \ 957 { \ 958 struct PPC_DFP dfp; \ 959 ppc_vsr_t vb; \ 960 dfp_prepare_decimal##size(&dfp, 0, b, env); \ 961 get_dfp64(&vb, b); \ 962 decNumberFromInt64(&dfp.t, (int64_t)vb.VsrD(1)); \ 963 dfp_finalize_decimal##size(&dfp); \ 964 CFFIX_PPs(&dfp); \ 965 \ 966 set_dfp##size(t, &dfp.vt); \ 967 } 968 969 static void CFFIX_PPs(struct PPC_DFP *dfp) 970 { 971 dfp_set_FPRF_from_FRT(dfp); 972 dfp_check_for_XX(dfp); 973 } 974 975 DFP_HELPER_CFFIX(dcffix, 64) 976 DFP_HELPER_CFFIX(dcffixq, 128) 977 978 void helper_DCFFIXQQ(CPUPPCState *env, ppc_fprp_t *t, ppc_avr_t *b) 979 { 980 struct PPC_DFP dfp; 981 982 dfp_prepare_decimal128(&dfp, NULL, NULL, env); 983 decNumberFromInt128(&dfp.t, (uint64_t)b->VsrD(1), (int64_t)b->VsrD(0)); 984 dfp_finalize_decimal128(&dfp); 985 CFFIX_PPs(&dfp); 986 987 set_dfp128(t, &dfp.vt); 988 } 989 990 #define DFP_HELPER_CTFIX(op, size) \ 991 void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) \ 992 { \ 993 struct PPC_DFP dfp; \ 994 dfp_prepare_decimal##size(&dfp, 0, b, env); \ 995 \ 996 if (unlikely(decNumberIsSpecial(&dfp.b))) { \ 997 uint64_t invalid_flags = FP_VX | FP_VXCVI; \ 998 if (decNumberIsInfinite(&dfp.b)) { \ 999 dfp.vt.VsrD(1) = decNumberIsNegative(&dfp.b) ? INT64_MIN : \ 1000 INT64_MAX; \ 1001 } else { /* NaN */ \ 1002 dfp.vt.VsrD(1) = INT64_MIN; \ 1003 if (decNumberIsSNaN(&dfp.b)) { \ 1004 invalid_flags |= FP_VXSNAN; \ 1005 } \ 1006 } \ 1007 dfp_set_FPSCR_flag(&dfp, invalid_flags, FP_VE); \ 1008 } else if (unlikely(decNumberIsZero(&dfp.b))) { \ 1009 dfp.vt.VsrD(1) = 0; \ 1010 } else { \ 1011 decNumberToIntegralExact(&dfp.b, &dfp.b, &dfp.context); \ 1012 dfp.vt.VsrD(1) = decNumberIntegralToInt64(&dfp.b, &dfp.context); \ 1013 if (decContextTestStatus(&dfp.context, DEC_Invalid_operation)) { \ 1014 dfp.vt.VsrD(1) = decNumberIsNegative(&dfp.b) ? INT64_MIN : \ 1015 INT64_MAX; \ 1016 dfp_set_FPSCR_flag(&dfp, FP_VX | FP_VXCVI, FP_VE); \ 1017 } else { \ 1018 dfp_check_for_XX(&dfp); \ 1019 } \ 1020 } \ 1021 \ 1022 set_dfp64(t, &dfp.vt); \ 1023 } 1024 1025 DFP_HELPER_CTFIX(dctfix, 64) 1026 DFP_HELPER_CTFIX(dctfixq, 128) 1027 1028 void helper_DCTFIXQQ(CPUPPCState *env, ppc_avr_t *t, ppc_fprp_t *b) 1029 { 1030 struct PPC_DFP dfp; 1031 dfp_prepare_decimal128(&dfp, 0, b, env); 1032 1033 if (unlikely(decNumberIsSpecial(&dfp.b))) { 1034 uint64_t invalid_flags = FP_VX | FP_VXCVI; 1035 if (decNumberIsInfinite(&dfp.b)) { 1036 if (decNumberIsNegative(&dfp.b)) { 1037 dfp.vt.VsrD(0) = INT64_MIN; 1038 dfp.vt.VsrD(1) = 0; 1039 } else { 1040 dfp.vt.VsrD(0) = INT64_MAX; 1041 dfp.vt.VsrD(1) = UINT64_MAX; 1042 } 1043 } else { /* NaN */ 1044 dfp.vt.VsrD(0) = INT64_MIN; 1045 dfp.vt.VsrD(1) = 0; 1046 if (decNumberIsSNaN(&dfp.b)) { 1047 invalid_flags |= FP_VXSNAN; 1048 } 1049 } 1050 dfp_set_FPSCR_flag(&dfp, invalid_flags, FP_VE); 1051 } else if (unlikely(decNumberIsZero(&dfp.b))) { 1052 dfp.vt.VsrD(0) = 0; 1053 dfp.vt.VsrD(1) = 0; 1054 } else { 1055 decNumberToIntegralExact(&dfp.b, &dfp.b, &dfp.context); 1056 decNumberIntegralToInt128(&dfp.b, &dfp.context, 1057 &dfp.vt.VsrD(1), &dfp.vt.VsrD(0)); 1058 if (decContextTestStatus(&dfp.context, DEC_Invalid_operation)) { 1059 if (decNumberIsNegative(&dfp.b)) { 1060 dfp.vt.VsrD(0) = INT64_MIN; 1061 dfp.vt.VsrD(1) = 0; 1062 } else { 1063 dfp.vt.VsrD(0) = INT64_MAX; 1064 dfp.vt.VsrD(1) = UINT64_MAX; 1065 } 1066 dfp_set_FPSCR_flag(&dfp, FP_VX | FP_VXCVI, FP_VE); 1067 } else { 1068 dfp_check_for_XX(&dfp); 1069 } 1070 } 1071 1072 set_dfp128_to_avr(t, &dfp.vt); 1073 } 1074 1075 static inline void dfp_set_bcd_digit_64(ppc_vsr_t *t, uint8_t digit, 1076 unsigned n) 1077 { 1078 t->VsrD(1) |= ((uint64_t)(digit & 0xF) << (n << 2)); 1079 } 1080 1081 static inline void dfp_set_bcd_digit_128(ppc_vsr_t *t, uint8_t digit, 1082 unsigned n) 1083 { 1084 t->VsrD((n & 0x10) ? 0 : 1) |= 1085 ((uint64_t)(digit & 0xF) << ((n & 15) << 2)); 1086 } 1087 1088 static inline void dfp_set_sign_64(ppc_vsr_t *t, uint8_t sgn) 1089 { 1090 t->VsrD(1) <<= 4; 1091 t->VsrD(1) |= (sgn & 0xF); 1092 } 1093 1094 static inline void dfp_set_sign_128(ppc_vsr_t *t, uint8_t sgn) 1095 { 1096 t->VsrD(0) <<= 4; 1097 t->VsrD(0) |= (t->VsrD(1) >> 60); 1098 t->VsrD(1) <<= 4; 1099 t->VsrD(1) |= (sgn & 0xF); 1100 } 1101 1102 #define DFP_HELPER_DEDPD(op, size) \ 1103 void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \ 1104 uint32_t sp) \ 1105 { \ 1106 struct PPC_DFP dfp; \ 1107 uint8_t digits[34]; \ 1108 int i, N; \ 1109 \ 1110 dfp_prepare_decimal##size(&dfp, 0, b, env); \ 1111 \ 1112 decNumberGetBCD(&dfp.b, digits); \ 1113 dfp.vt.VsrD(0) = dfp.vt.VsrD(1) = 0; \ 1114 N = dfp.b.digits; \ 1115 \ 1116 for (i = 0; (i < N) && (i < (size)/4); i++) { \ 1117 dfp_set_bcd_digit_##size(&dfp.vt, digits[N - i - 1], i); \ 1118 } \ 1119 \ 1120 if (sp & 2) { \ 1121 uint8_t sgn; \ 1122 \ 1123 if (decNumberIsNegative(&dfp.b)) { \ 1124 sgn = 0xD; \ 1125 } else { \ 1126 sgn = ((sp & 1) ? 0xF : 0xC); \ 1127 } \ 1128 dfp_set_sign_##size(&dfp.vt, sgn); \ 1129 } \ 1130 \ 1131 set_dfp##size(t, &dfp.vt); \ 1132 } 1133 1134 DFP_HELPER_DEDPD(ddedpd, 64) 1135 DFP_HELPER_DEDPD(ddedpdq, 128) 1136 1137 static inline uint8_t dfp_get_bcd_digit_64(ppc_vsr_t *t, unsigned n) 1138 { 1139 return t->VsrD(1) >> ((n << 2) & 63) & 15; 1140 } 1141 1142 static inline uint8_t dfp_get_bcd_digit_128(ppc_vsr_t *t, unsigned n) 1143 { 1144 return t->VsrD((n & 0x10) ? 0 : 1) >> ((n << 2) & 63) & 15; 1145 } 1146 1147 #define DFP_HELPER_ENBCD(op, size) \ 1148 void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \ 1149 uint32_t s) \ 1150 { \ 1151 struct PPC_DFP dfp; \ 1152 uint8_t digits[32]; \ 1153 int n = 0, offset = 0, sgn = 0, nonzero = 0; \ 1154 \ 1155 dfp_prepare_decimal##size(&dfp, 0, b, env); \ 1156 \ 1157 decNumberZero(&dfp.t); \ 1158 \ 1159 if (s) { \ 1160 uint8_t sgnNibble = dfp_get_bcd_digit_##size(&dfp.vb, offset++); \ 1161 switch (sgnNibble) { \ 1162 case 0xD: \ 1163 case 0xB: \ 1164 sgn = 1; \ 1165 break; \ 1166 case 0xC: \ 1167 case 0xF: \ 1168 case 0xA: \ 1169 case 0xE: \ 1170 sgn = 0; \ 1171 break; \ 1172 default: \ 1173 dfp_set_FPSCR_flag(&dfp, FP_VX | FP_VXCVI, FPSCR_VE); \ 1174 return; \ 1175 } \ 1176 } \ 1177 \ 1178 while (offset < (size) / 4) { \ 1179 n++; \ 1180 digits[(size) / 4 - n] = dfp_get_bcd_digit_##size(&dfp.vb, \ 1181 offset++); \ 1182 if (digits[(size) / 4 - n] > 10) { \ 1183 dfp_set_FPSCR_flag(&dfp, FP_VX | FP_VXCVI, FPSCR_VE); \ 1184 return; \ 1185 } else { \ 1186 nonzero |= (digits[(size) / 4 - n] > 0); \ 1187 } \ 1188 } \ 1189 \ 1190 if (nonzero) { \ 1191 decNumberSetBCD(&dfp.t, digits + ((size) / 4) - n, n); \ 1192 } \ 1193 \ 1194 if (s && sgn) { \ 1195 dfp.t.bits |= DECNEG; \ 1196 } \ 1197 dfp_finalize_decimal##size(&dfp); \ 1198 dfp_set_FPRF_from_FRT(&dfp); \ 1199 set_dfp##size(t, &dfp.vt); \ 1200 } 1201 1202 DFP_HELPER_ENBCD(denbcd, 64) 1203 DFP_HELPER_ENBCD(denbcdq, 128) 1204 1205 #define DFP_HELPER_XEX(op, size) \ 1206 void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) \ 1207 { \ 1208 struct PPC_DFP dfp; \ 1209 ppc_vsr_t vt; \ 1210 \ 1211 dfp_prepare_decimal##size(&dfp, 0, b, env); \ 1212 \ 1213 if (unlikely(decNumberIsSpecial(&dfp.b))) { \ 1214 if (decNumberIsInfinite(&dfp.b)) { \ 1215 vt.VsrD(1) = -1; \ 1216 } else if (decNumberIsSNaN(&dfp.b)) { \ 1217 vt.VsrD(1) = -3; \ 1218 } else if (decNumberIsQNaN(&dfp.b)) { \ 1219 vt.VsrD(1) = -2; \ 1220 } else { \ 1221 assert(0); \ 1222 } \ 1223 set_dfp64(t, &vt); \ 1224 } else { \ 1225 if ((size) == 64) { \ 1226 vt.VsrD(1) = dfp.b.exponent + 398; \ 1227 } else if ((size) == 128) { \ 1228 vt.VsrD(1) = dfp.b.exponent + 6176; \ 1229 } else { \ 1230 assert(0); \ 1231 } \ 1232 set_dfp64(t, &vt); \ 1233 } \ 1234 } 1235 1236 DFP_HELPER_XEX(dxex, 64) 1237 DFP_HELPER_XEX(dxexq, 128) 1238 1239 static void dfp_set_raw_exp_64(ppc_vsr_t *t, uint64_t raw) 1240 { 1241 t->VsrD(1) &= 0x8003ffffffffffffULL; 1242 t->VsrD(1) |= (raw << (63 - 13)); 1243 } 1244 1245 static void dfp_set_raw_exp_128(ppc_vsr_t *t, uint64_t raw) 1246 { 1247 t->VsrD(0) &= 0x80003fffffffffffULL; 1248 t->VsrD(0) |= (raw << (63 - 17)); 1249 } 1250 1251 #define DFP_HELPER_IEX(op, size) \ 1252 void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \ 1253 ppc_fprp_t *b) \ 1254 { \ 1255 struct PPC_DFP dfp; \ 1256 uint64_t raw_qnan, raw_snan, raw_inf, max_exp; \ 1257 ppc_vsr_t va; \ 1258 int bias; \ 1259 int64_t exp; \ 1260 \ 1261 get_dfp64(&va, a); \ 1262 exp = (int64_t)va.VsrD(1); \ 1263 dfp_prepare_decimal##size(&dfp, 0, b, env); \ 1264 \ 1265 if ((size) == 64) { \ 1266 max_exp = 767; \ 1267 raw_qnan = 0x1F00; \ 1268 raw_snan = 0x1F80; \ 1269 raw_inf = 0x1E00; \ 1270 bias = 398; \ 1271 } else if ((size) == 128) { \ 1272 max_exp = 12287; \ 1273 raw_qnan = 0x1f000; \ 1274 raw_snan = 0x1f800; \ 1275 raw_inf = 0x1e000; \ 1276 bias = 6176; \ 1277 } else { \ 1278 assert(0); \ 1279 } \ 1280 \ 1281 if (unlikely((exp < 0) || (exp > max_exp))) { \ 1282 dfp.vt.VsrD(0) = dfp.vb.VsrD(0); \ 1283 dfp.vt.VsrD(1) = dfp.vb.VsrD(1); \ 1284 if (exp == -1) { \ 1285 dfp_set_raw_exp_##size(&dfp.vt, raw_inf); \ 1286 } else if (exp == -3) { \ 1287 dfp_set_raw_exp_##size(&dfp.vt, raw_snan); \ 1288 } else { \ 1289 dfp_set_raw_exp_##size(&dfp.vt, raw_qnan); \ 1290 } \ 1291 } else { \ 1292 dfp.t = dfp.b; \ 1293 if (unlikely(decNumberIsSpecial(&dfp.t))) { \ 1294 dfp.t.bits &= ~DECSPECIAL; \ 1295 } \ 1296 dfp.t.exponent = exp - bias; \ 1297 dfp_finalize_decimal##size(&dfp); \ 1298 } \ 1299 set_dfp##size(t, &dfp.vt); \ 1300 } 1301 1302 DFP_HELPER_IEX(DIEX, 64) 1303 DFP_HELPER_IEX(DIEXQ, 128) 1304 1305 static void dfp_clear_lmd_from_g5msb(uint64_t *t) 1306 { 1307 1308 /* The most significant 5 bits of the PowerPC DFP format combine bits */ 1309 /* from the left-most decimal digit (LMD) and the biased exponent. */ 1310 /* This routine clears the LMD bits while preserving the exponent */ 1311 /* bits. See "Figure 80: Encoding of bits 0:4 of the G field for */ 1312 /* Finite Numbers" in the Power ISA for additional details. */ 1313 1314 uint64_t g5msb = (*t >> 58) & 0x1F; 1315 1316 if ((g5msb >> 3) < 3) { /* LMD in [0-7] ? */ 1317 *t &= ~(7ULL << 58); 1318 } else { 1319 switch (g5msb & 7) { 1320 case 0: 1321 case 1: 1322 g5msb = 0; 1323 break; 1324 case 2: 1325 case 3: 1326 g5msb = 0x8; 1327 break; 1328 case 4: 1329 case 5: 1330 g5msb = 0x10; 1331 break; 1332 case 6: 1333 g5msb = 0x1E; 1334 break; 1335 case 7: 1336 g5msb = 0x1F; 1337 break; 1338 } 1339 1340 *t &= ~(0x1fULL << 58); 1341 *t |= (g5msb << 58); 1342 } 1343 } 1344 1345 #define DFP_HELPER_SHIFT(op, size, shift_left) \ 1346 void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \ 1347 uint32_t sh) \ 1348 { \ 1349 struct PPC_DFP dfp; \ 1350 unsigned max_digits = ((size) == 64) ? 16 : 34; \ 1351 \ 1352 dfp_prepare_decimal##size(&dfp, a, 0, env); \ 1353 \ 1354 if (sh <= max_digits) { \ 1355 \ 1356 decNumber shd; \ 1357 unsigned special = dfp.a.bits & DECSPECIAL; \ 1358 \ 1359 if (shift_left) { \ 1360 decNumberFromUInt32(&shd, sh); \ 1361 } else { \ 1362 decNumberFromInt32(&shd, -((int32_t)sh)); \ 1363 } \ 1364 \ 1365 dfp.a.bits &= ~DECSPECIAL; \ 1366 decNumberShift(&dfp.t, &dfp.a, &shd, &dfp.context); \ 1367 \ 1368 dfp.t.bits |= special; \ 1369 if (special && (dfp.t.digits >= max_digits)) { \ 1370 dfp.t.digits = max_digits - 1; \ 1371 } \ 1372 \ 1373 dfp_finalize_decimal##size(&dfp); \ 1374 } else { \ 1375 if ((size) == 64) { \ 1376 dfp.vt.VsrD(1) = dfp.va.VsrD(1) & \ 1377 0xFFFC000000000000ULL; \ 1378 dfp_clear_lmd_from_g5msb(&dfp.vt.VsrD(1)); \ 1379 } else { \ 1380 dfp.vt.VsrD(0) = dfp.va.VsrD(0) & \ 1381 0xFFFFC00000000000ULL; \ 1382 dfp_clear_lmd_from_g5msb(&dfp.vt.VsrD(0)); \ 1383 dfp.vt.VsrD(1) = 0; \ 1384 } \ 1385 } \ 1386 \ 1387 set_dfp##size(t, &dfp.vt); \ 1388 } 1389 1390 DFP_HELPER_SHIFT(dscli, 64, 1) 1391 DFP_HELPER_SHIFT(dscliq, 128, 1) 1392 DFP_HELPER_SHIFT(dscri, 64, 0) 1393 DFP_HELPER_SHIFT(dscriq, 128, 0) 1394