1 /* 2 ** MIPS IR assembler (SSA IR -> machine code). 3 ** Copyright (C) 2005-2021 Mike Pall. See Copyright Notice in luajit.h 4 */ 5 6 /* -- Register allocator extensions --------------------------------------- */ 7 8 /* Allocate a register with a hint. */ 9 static Reg ra_hintalloc(ASMState *as, IRRef ref, Reg hint, RegSet allow) 10 { 11 Reg r = IR(ref)->r; 12 if (ra_noreg(r)) { 13 if (!ra_hashint(r) && !iscrossref(as, ref)) 14 ra_sethint(IR(ref)->r, hint); /* Propagate register hint. */ 15 r = ra_allocref(as, ref, allow); 16 } 17 ra_noweak(as, r); 18 return r; 19 } 20 21 /* Allocate a register or RID_ZERO. */ 22 static Reg ra_alloc1z(ASMState *as, IRRef ref, RegSet allow) 23 { 24 Reg r = IR(ref)->r; 25 if (ra_noreg(r)) { 26 if (!(allow & RSET_FPR) && irref_isk(ref) && get_kval(as, ref) == 0) 27 return RID_ZERO; 28 r = ra_allocref(as, ref, allow); 29 } else { 30 ra_noweak(as, r); 31 } 32 return r; 33 } 34 35 /* Allocate two source registers for three-operand instructions. */ 36 static Reg ra_alloc2(ASMState *as, IRIns *ir, RegSet allow) 37 { 38 IRIns *irl = IR(ir->op1), *irr = IR(ir->op2); 39 Reg left = irl->r, right = irr->r; 40 if (ra_hasreg(left)) { 41 ra_noweak(as, left); 42 if (ra_noreg(right)) 43 right = ra_alloc1z(as, ir->op2, rset_exclude(allow, left)); 44 else 45 ra_noweak(as, right); 46 } else if (ra_hasreg(right)) { 47 ra_noweak(as, right); 48 left = ra_alloc1z(as, ir->op1, rset_exclude(allow, right)); 49 } else if (ra_hashint(right)) { 50 right = ra_alloc1z(as, ir->op2, allow); 51 left = ra_alloc1z(as, ir->op1, rset_exclude(allow, right)); 52 } else { 53 left = ra_alloc1z(as, ir->op1, allow); 54 right = ra_alloc1z(as, ir->op2, rset_exclude(allow, left)); 55 } 56 return left | (right << 8); 57 } 58 59 /* -- Guard handling ------------------------------------------------------ */ 60 61 /* Need some spare long-range jump slots, for out-of-range branches. */ 62 #define MIPS_SPAREJUMP 4 63 64 /* Setup spare long-range jump slots per mcarea. */ 65 static void asm_sparejump_setup(ASMState *as) 66 { 67 MCode *mxp = as->mcbot; 68 if (((uintptr_t)mxp & (LJ_PAGESIZE-1)) == sizeof(MCLink)) { 69 lj_assertA(MIPSI_NOP == 0, "bad NOP"); 70 memset(mxp, 0, MIPS_SPAREJUMP*2*sizeof(MCode)); 71 mxp += MIPS_SPAREJUMP*2; 72 lj_assertA(mxp < as->mctop, "MIPS_SPAREJUMP too big"); 73 lj_mcode_sync(as->mcbot, mxp); 74 lj_mcode_commitbot(as->J, mxp); 75 as->mcbot = mxp; 76 as->mclim = as->mcbot + MCLIM_REDZONE; 77 } 78 } 79 80 /* Setup exit stub after the end of each trace. */ 81 static void asm_exitstub_setup(ASMState *as) 82 { 83 MCode *mxp = as->mctop; 84 /* sw TMP, 0(sp); j ->vm_exit_handler; li TMP, traceno */ 85 *--mxp = MIPSI_LI|MIPSF_T(RID_TMP)|as->T->traceno; 86 *--mxp = MIPSI_J|((((uintptr_t)(void *)lj_vm_exit_handler)>>2)&0x03ffffffu); 87 lj_assertA(((uintptr_t)mxp ^ (uintptr_t)(void *)lj_vm_exit_handler)>>28 == 0, 88 "branch target out of range"); 89 *--mxp = MIPSI_SW|MIPSF_T(RID_TMP)|MIPSF_S(RID_SP)|0; 90 as->mctop = mxp; 91 } 92 93 /* Keep this in-sync with exitstub_trace_addr(). */ 94 #define asm_exitstub_addr(as) ((as)->mctop) 95 96 /* Emit conditional branch to exit for guard. */ 97 static void asm_guard(ASMState *as, MIPSIns mi, Reg rs, Reg rt) 98 { 99 MCode *target = asm_exitstub_addr(as); 100 MCode *p = as->mcp; 101 if (LJ_UNLIKELY(p == as->invmcp)) { 102 as->invmcp = NULL; 103 as->loopinv = 1; 104 as->mcp = p+1; 105 #if !LJ_TARGET_MIPSR6 106 mi = mi ^ ((mi>>28) == 1 ? 0x04000000u : 0x00010000u); /* Invert cond. */ 107 #else 108 mi = mi ^ ((mi>>28) == 1 ? 0x04000000u : 109 (mi>>28) == 4 ? 0x00800000u : 0x00010000u); /* Invert cond. */ 110 #endif 111 target = p; /* Patch target later in asm_loop_fixup. */ 112 } 113 emit_ti(as, MIPSI_LI, RID_TMP, as->snapno); 114 emit_branch(as, mi, rs, rt, target); 115 } 116 117 /* -- Operand fusion ------------------------------------------------------ */ 118 119 /* Limit linear search to this distance. Avoids O(n^2) behavior. */ 120 #define CONFLICT_SEARCH_LIM 31 121 122 /* Check if there's no conflicting instruction between curins and ref. */ 123 static int noconflict(ASMState *as, IRRef ref, IROp conflict) 124 { 125 IRIns *ir = as->ir; 126 IRRef i = as->curins; 127 if (i > ref + CONFLICT_SEARCH_LIM) 128 return 0; /* Give up, ref is too far away. */ 129 while (--i > ref) 130 if (ir[i].o == conflict) 131 return 0; /* Conflict found. */ 132 return 1; /* Ok, no conflict. */ 133 } 134 135 /* Fuse the array base of colocated arrays. */ 136 static int32_t asm_fuseabase(ASMState *as, IRRef ref) 137 { 138 IRIns *ir = IR(ref); 139 if (ir->o == IR_TNEW && ir->op1 <= LJ_MAX_COLOSIZE && 140 !neverfuse(as) && noconflict(as, ref, IR_NEWREF)) 141 return (int32_t)sizeof(GCtab); 142 return 0; 143 } 144 145 /* Fuse array/hash/upvalue reference into register+offset operand. */ 146 static Reg asm_fuseahuref(ASMState *as, IRRef ref, int32_t *ofsp, RegSet allow) 147 { 148 IRIns *ir = IR(ref); 149 if (ra_noreg(ir->r)) { 150 if (ir->o == IR_AREF) { 151 if (mayfuse(as, ref)) { 152 if (irref_isk(ir->op2)) { 153 IRRef tab = IR(ir->op1)->op1; 154 int32_t ofs = asm_fuseabase(as, tab); 155 IRRef refa = ofs ? tab : ir->op1; 156 ofs += 8*IR(ir->op2)->i; 157 if (checki16(ofs)) { 158 *ofsp = ofs; 159 return ra_alloc1(as, refa, allow); 160 } 161 } 162 } 163 } else if (ir->o == IR_HREFK) { 164 if (mayfuse(as, ref)) { 165 int32_t ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node)); 166 if (checki16(ofs)) { 167 *ofsp = ofs; 168 return ra_alloc1(as, ir->op1, allow); 169 } 170 } 171 } else if (ir->o == IR_UREFC) { 172 if (irref_isk(ir->op1)) { 173 GCfunc *fn = ir_kfunc(IR(ir->op1)); 174 intptr_t ofs = (intptr_t)&gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.tv; 175 intptr_t jgl = (intptr_t)J2G(as->J); 176 if ((uintptr_t)(ofs-jgl) < 65536) { 177 *ofsp = ofs-jgl-32768; 178 return RID_JGL; 179 } else { 180 *ofsp = (int16_t)ofs; 181 return ra_allock(as, ofs-(int16_t)ofs, allow); 182 } 183 } 184 } 185 } 186 *ofsp = 0; 187 return ra_alloc1(as, ref, allow); 188 } 189 190 /* Fuse XLOAD/XSTORE reference into load/store operand. */ 191 static void asm_fusexref(ASMState *as, MIPSIns mi, Reg rt, IRRef ref, 192 RegSet allow, int32_t ofs) 193 { 194 IRIns *ir = IR(ref); 195 Reg base; 196 if (ra_noreg(ir->r) && canfuse(as, ir)) { 197 if (ir->o == IR_ADD) { 198 intptr_t ofs2; 199 if (irref_isk(ir->op2) && (ofs2 = ofs + get_kval(as, ir->op2), 200 checki16(ofs2))) { 201 ref = ir->op1; 202 ofs = (int32_t)ofs2; 203 } 204 } else if (ir->o == IR_STRREF) { 205 intptr_t ofs2 = 65536; 206 lj_assertA(ofs == 0, "bad usage"); 207 ofs = (int32_t)sizeof(GCstr); 208 if (irref_isk(ir->op2)) { 209 ofs2 = ofs + get_kval(as, ir->op2); 210 ref = ir->op1; 211 } else if (irref_isk(ir->op1)) { 212 ofs2 = ofs + get_kval(as, ir->op1); 213 ref = ir->op2; 214 } 215 if (!checki16(ofs2)) { 216 /* NYI: Fuse ADD with constant. */ 217 Reg right, left = ra_alloc2(as, ir, allow); 218 right = (left >> 8); left &= 255; 219 emit_hsi(as, mi, rt, RID_TMP, ofs); 220 emit_dst(as, MIPSI_AADDU, RID_TMP, left, right); 221 return; 222 } 223 ofs = ofs2; 224 } 225 } 226 base = ra_alloc1(as, ref, allow); 227 emit_hsi(as, mi, rt, base, ofs); 228 } 229 230 /* -- Calls --------------------------------------------------------------- */ 231 232 /* Generate a call to a C function. */ 233 static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args) 234 { 235 uint32_t n, nargs = CCI_XNARGS(ci); 236 int32_t ofs = LJ_32 ? 16 : 0; 237 #if LJ_SOFTFP 238 Reg gpr = REGARG_FIRSTGPR; 239 #else 240 Reg gpr, fpr = REGARG_FIRSTFPR; 241 #endif 242 if ((void *)ci->func) 243 emit_call(as, (void *)ci->func, 1); 244 #if !LJ_SOFTFP 245 for (gpr = REGARG_FIRSTGPR; gpr <= REGARG_LASTGPR; gpr++) 246 as->cost[gpr] = REGCOST(~0u, ASMREF_L); 247 gpr = REGARG_FIRSTGPR; 248 #endif 249 for (n = 0; n < nargs; n++) { /* Setup args. */ 250 IRRef ref = args[n]; 251 if (ref) { 252 IRIns *ir = IR(ref); 253 #if !LJ_SOFTFP 254 if (irt_isfp(ir->t) && fpr <= REGARG_LASTFPR && 255 !(ci->flags & CCI_VARARG)) { 256 lj_assertA(rset_test(as->freeset, fpr), 257 "reg %d not free", fpr); /* Already evicted. */ 258 ra_leftov(as, fpr, ref); 259 fpr += LJ_32 ? 2 : 1; 260 gpr += (LJ_32 && irt_isnum(ir->t)) ? 2 : 1; 261 } else 262 #endif 263 { 264 #if LJ_32 && !LJ_SOFTFP 265 fpr = REGARG_LASTFPR+1; 266 #endif 267 if (LJ_32 && irt_isnum(ir->t)) gpr = (gpr+1) & ~1; 268 if (gpr <= REGARG_LASTGPR) { 269 lj_assertA(rset_test(as->freeset, gpr), 270 "reg %d not free", gpr); /* Already evicted. */ 271 #if !LJ_SOFTFP 272 if (irt_isfp(ir->t)) { 273 RegSet of = as->freeset; 274 Reg r; 275 /* Workaround to protect argument GPRs from being used for remat. */ 276 as->freeset &= ~RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1); 277 r = ra_alloc1(as, ref, RSET_FPR); 278 as->freeset |= (of & RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1)); 279 if (irt_isnum(ir->t)) { 280 #if LJ_32 281 emit_tg(as, MIPSI_MFC1, gpr+(LJ_BE?0:1), r+1); 282 emit_tg(as, MIPSI_MFC1, gpr+(LJ_BE?1:0), r); 283 lj_assertA(rset_test(as->freeset, gpr+1), 284 "reg %d not free", gpr+1); /* Already evicted. */ 285 gpr += 2; 286 #else 287 emit_tg(as, MIPSI_DMFC1, gpr, r); 288 gpr++; fpr++; 289 #endif 290 } else if (irt_isfloat(ir->t)) { 291 emit_tg(as, MIPSI_MFC1, gpr, r); 292 gpr++; 293 #if LJ_64 294 fpr++; 295 #endif 296 } 297 } else 298 #endif 299 { 300 ra_leftov(as, gpr, ref); 301 gpr++; 302 #if LJ_64 && !LJ_SOFTFP 303 fpr++; 304 #endif 305 } 306 } else { 307 Reg r = ra_alloc1z(as, ref, !LJ_SOFTFP && irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); 308 #if LJ_32 309 if (irt_isnum(ir->t)) ofs = (ofs + 4) & ~4; 310 emit_spstore(as, ir, r, ofs); 311 ofs += irt_isnum(ir->t) ? 8 : 4; 312 #else 313 emit_spstore(as, ir, r, ofs + ((LJ_BE && !irt_isfp(ir->t) && !irt_is64(ir->t)) ? 4 : 0)); 314 ofs += 8; 315 #endif 316 } 317 } 318 } else { 319 #if !LJ_SOFTFP 320 fpr = REGARG_LASTFPR+1; 321 #endif 322 if (gpr <= REGARG_LASTGPR) { 323 gpr++; 324 #if LJ_64 && !LJ_SOFTFP 325 fpr++; 326 #endif 327 } else { 328 ofs += LJ_32 ? 4 : 8; 329 } 330 } 331 checkmclim(as); 332 } 333 } 334 335 /* Setup result reg/sp for call. Evict scratch regs. */ 336 static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci) 337 { 338 RegSet drop = RSET_SCRATCH; 339 #if LJ_32 340 int hiop = ((ir+1)->o == IR_HIOP && !irt_isnil((ir+1)->t)); 341 #endif 342 #if !LJ_SOFTFP 343 if ((ci->flags & CCI_NOFPRCLOBBER)) 344 drop &= ~RSET_FPR; 345 #endif 346 if (ra_hasreg(ir->r)) 347 rset_clear(drop, ir->r); /* Dest reg handled below. */ 348 #if LJ_32 349 if (hiop && ra_hasreg((ir+1)->r)) 350 rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */ 351 #endif 352 ra_evictset(as, drop); /* Evictions must be performed first. */ 353 if (ra_used(ir)) { 354 lj_assertA(!irt_ispri(ir->t), "PRI dest"); 355 if (!LJ_SOFTFP && irt_isfp(ir->t)) { 356 if ((ci->flags & CCI_CASTU64)) { 357 int32_t ofs = sps_scale(ir->s); 358 Reg dest = ir->r; 359 if (ra_hasreg(dest)) { 360 ra_free(as, dest); 361 ra_modified(as, dest); 362 #if LJ_32 363 emit_tg(as, MIPSI_MTC1, RID_RETHI, dest+1); 364 emit_tg(as, MIPSI_MTC1, RID_RETLO, dest); 365 #else 366 emit_tg(as, MIPSI_DMTC1, RID_RET, dest); 367 #endif 368 } 369 if (ofs) { 370 #if LJ_32 371 emit_tsi(as, MIPSI_SW, RID_RETLO, RID_SP, ofs+(LJ_BE?4:0)); 372 emit_tsi(as, MIPSI_SW, RID_RETHI, RID_SP, ofs+(LJ_BE?0:4)); 373 #else 374 emit_tsi(as, MIPSI_SD, RID_RET, RID_SP, ofs); 375 #endif 376 } 377 } else { 378 ra_destreg(as, ir, RID_FPRET); 379 } 380 #if LJ_32 381 } else if (hiop) { 382 ra_destpair(as, ir); 383 #endif 384 } else { 385 ra_destreg(as, ir, RID_RET); 386 } 387 } 388 } 389 390 static void asm_callx(ASMState *as, IRIns *ir) 391 { 392 IRRef args[CCI_NARGS_MAX*2]; 393 CCallInfo ci; 394 IRRef func; 395 IRIns *irf; 396 ci.flags = asm_callx_flags(as, ir); 397 asm_collectargs(as, ir, &ci, args); 398 asm_setupresult(as, ir, &ci); 399 func = ir->op2; irf = IR(func); 400 if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); } 401 if (irref_isk(func)) { /* Call to constant address. */ 402 ci.func = (ASMFunction)(void *)get_kval(as, func); 403 } else { /* Need specific register for indirect calls. */ 404 Reg r = ra_alloc1(as, func, RID2RSET(RID_CFUNCADDR)); 405 MCode *p = as->mcp; 406 if (r == RID_CFUNCADDR) 407 *--p = MIPSI_NOP; 408 else 409 *--p = MIPSI_MOVE | MIPSF_D(RID_CFUNCADDR) | MIPSF_S(r); 410 *--p = MIPSI_JALR | MIPSF_S(r); 411 as->mcp = p; 412 ci.func = (ASMFunction)(void *)0; 413 } 414 asm_gencall(as, &ci, args); 415 } 416 417 #if !LJ_SOFTFP 418 static void asm_callround(ASMState *as, IRIns *ir, IRCallID id) 419 { 420 /* The modified regs must match with the *.dasc implementation. */ 421 RegSet drop = RID2RSET(RID_R1)|RID2RSET(RID_R12)|RID2RSET(RID_FPRET)| 422 RID2RSET(RID_F2)|RID2RSET(RID_F4)|RID2RSET(REGARG_FIRSTFPR) 423 #if LJ_TARGET_MIPSR6 424 |RID2RSET(RID_F21) 425 #endif 426 ; 427 if (ra_hasreg(ir->r)) rset_clear(drop, ir->r); 428 ra_evictset(as, drop); 429 ra_destreg(as, ir, RID_FPRET); 430 emit_call(as, (void *)lj_ir_callinfo[id].func, 0); 431 ra_leftov(as, REGARG_FIRSTFPR, ir->op1); 432 } 433 #endif 434 435 /* -- Returns ------------------------------------------------------------- */ 436 437 /* Return to lower frame. Guard that it goes to the right spot. */ 438 static void asm_retf(ASMState *as, IRIns *ir) 439 { 440 Reg base = ra_alloc1(as, REF_BASE, RSET_GPR); 441 void *pc = ir_kptr(IR(ir->op2)); 442 int32_t delta = 1+LJ_FR2+bc_a(*((const BCIns *)pc - 1)); 443 as->topslot -= (BCReg)delta; 444 if ((int32_t)as->topslot < 0) as->topslot = 0; 445 irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */ 446 emit_setgl(as, base, jit_base); 447 emit_addptr(as, base, -8*delta); 448 asm_guard(as, MIPSI_BNE, RID_TMP, 449 ra_allock(as, igcptr(pc), rset_exclude(RSET_GPR, base))); 450 emit_tsi(as, MIPSI_AL, RID_TMP, base, -8); 451 } 452 453 /* -- Type conversions ---------------------------------------------------- */ 454 455 #if !LJ_SOFTFP 456 static void asm_tointg(ASMState *as, IRIns *ir, Reg left) 457 { 458 Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left)); 459 Reg dest = ra_dest(as, ir, RSET_GPR); 460 #if !LJ_TARGET_MIPSR6 461 asm_guard(as, MIPSI_BC1F, 0, 0); 462 emit_fgh(as, MIPSI_C_EQ_D, 0, tmp, left); 463 #else 464 asm_guard(as, MIPSI_BC1EQZ, 0, (tmp&31)); 465 emit_fgh(as, MIPSI_CMP_EQ_D, tmp, tmp, left); 466 #endif 467 emit_fg(as, MIPSI_CVT_D_W, tmp, tmp); 468 emit_tg(as, MIPSI_MFC1, dest, tmp); 469 emit_fg(as, MIPSI_CVT_W_D, tmp, left); 470 } 471 472 static void asm_tobit(ASMState *as, IRIns *ir) 473 { 474 RegSet allow = RSET_FPR; 475 Reg dest = ra_dest(as, ir, RSET_GPR); 476 Reg left = ra_alloc1(as, ir->op1, allow); 477 Reg right = ra_alloc1(as, ir->op2, rset_clear(allow, left)); 478 Reg tmp = ra_scratch(as, rset_clear(allow, right)); 479 emit_tg(as, MIPSI_MFC1, dest, tmp); 480 emit_fgh(as, MIPSI_ADD_D, tmp, left, right); 481 } 482 #elif LJ_64 /* && LJ_SOFTFP */ 483 static void asm_tointg(ASMState *as, IRIns *ir, Reg r) 484 { 485 /* The modified regs must match with the *.dasc implementation. */ 486 RegSet drop = RID2RSET(REGARG_FIRSTGPR)|RID2RSET(RID_RET)|RID2RSET(RID_RET+1)| 487 RID2RSET(RID_R1)|RID2RSET(RID_R12); 488 if (ra_hasreg(ir->r)) rset_clear(drop, ir->r); 489 ra_evictset(as, drop); 490 /* Return values are in RID_RET (converted value) and RID_RET+1 (status). */ 491 ra_destreg(as, ir, RID_RET); 492 asm_guard(as, MIPSI_BNE, RID_RET+1, RID_ZERO); 493 emit_call(as, (void *)lj_ir_callinfo[IRCALL_lj_vm_tointg].func, 0); 494 if (r == RID_NONE) 495 ra_leftov(as, REGARG_FIRSTGPR, ir->op1); 496 else if (r != REGARG_FIRSTGPR) 497 emit_move(as, REGARG_FIRSTGPR, r); 498 } 499 500 static void asm_tobit(ASMState *as, IRIns *ir) 501 { 502 Reg dest = ra_dest(as, ir, RSET_GPR); 503 emit_dta(as, MIPSI_SLL, dest, dest, 0); 504 asm_callid(as, ir, IRCALL_lj_vm_tobit); 505 } 506 #endif 507 508 static void asm_conv(ASMState *as, IRIns *ir) 509 { 510 IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK); 511 #if !LJ_SOFTFP32 512 int stfp = (st == IRT_NUM || st == IRT_FLOAT); 513 #endif 514 #if LJ_64 515 int st64 = (st == IRT_I64 || st == IRT_U64 || st == IRT_P64); 516 #endif 517 IRRef lref = ir->op1; 518 #if LJ_32 519 /* 64 bit integer conversions are handled by SPLIT. */ 520 lj_assertA(!(irt_isint64(ir->t) || (st == IRT_I64 || st == IRT_U64)), 521 "IR %04d has unsplit 64 bit type", 522 (int)(ir - as->ir) - REF_BIAS); 523 #endif 524 #if LJ_SOFTFP32 525 /* FP conversions are handled by SPLIT. */ 526 lj_assertA(!irt_isfp(ir->t) && !(st == IRT_NUM || st == IRT_FLOAT), 527 "IR %04d has FP type", 528 (int)(ir - as->ir) - REF_BIAS); 529 /* Can't check for same types: SPLIT uses CONV int.int + BXOR for sfp NEG. */ 530 #else 531 lj_assertA(irt_type(ir->t) != st, "inconsistent types for CONV"); 532 #if !LJ_SOFTFP 533 if (irt_isfp(ir->t)) { 534 Reg dest = ra_dest(as, ir, RSET_FPR); 535 if (stfp) { /* FP to FP conversion. */ 536 emit_fg(as, st == IRT_NUM ? MIPSI_CVT_S_D : MIPSI_CVT_D_S, 537 dest, ra_alloc1(as, lref, RSET_FPR)); 538 } else if (st == IRT_U32) { /* U32 to FP conversion. */ 539 /* y = (x ^ 0x8000000) + 2147483648.0 */ 540 Reg left = ra_alloc1(as, lref, RSET_GPR); 541 Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, dest)); 542 if (irt_isfloat(ir->t)) 543 emit_fg(as, MIPSI_CVT_S_D, dest, dest); 544 /* Must perform arithmetic with doubles to keep the precision. */ 545 emit_fgh(as, MIPSI_ADD_D, dest, dest, tmp); 546 emit_fg(as, MIPSI_CVT_D_W, dest, dest); 547 emit_lsptr(as, MIPSI_LDC1, (tmp & 31), 548 (void *)&as->J->k64[LJ_K64_2P31], RSET_GPR); 549 emit_tg(as, MIPSI_MTC1, RID_TMP, dest); 550 emit_dst(as, MIPSI_XOR, RID_TMP, RID_TMP, left); 551 emit_ti(as, MIPSI_LUI, RID_TMP, 0x8000); 552 #if LJ_64 553 } else if(st == IRT_U64) { /* U64 to FP conversion. */ 554 /* if (x >= 1u<<63) y = (double)(int64_t)(x&(1u<<63)-1) + pow(2.0, 63) */ 555 Reg left = ra_alloc1(as, lref, RSET_GPR); 556 Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, dest)); 557 MCLabel l_end = emit_label(as); 558 if (irt_isfloat(ir->t)) { 559 emit_fgh(as, MIPSI_ADD_S, dest, dest, tmp); 560 emit_lsptr(as, MIPSI_LWC1, (tmp & 31), (void *)&as->J->k32[LJ_K32_2P63], 561 rset_exclude(RSET_GPR, left)); 562 emit_fg(as, MIPSI_CVT_S_L, dest, dest); 563 } else { 564 emit_fgh(as, MIPSI_ADD_D, dest, dest, tmp); 565 emit_lsptr(as, MIPSI_LDC1, (tmp & 31), (void *)&as->J->k64[LJ_K64_2P63], 566 rset_exclude(RSET_GPR, left)); 567 emit_fg(as, MIPSI_CVT_D_L, dest, dest); 568 } 569 emit_branch(as, MIPSI_BGEZ, left, RID_ZERO, l_end); 570 emit_tg(as, MIPSI_DMTC1, RID_TMP, dest); 571 emit_tsml(as, MIPSI_DEXTM, RID_TMP, left, 30, 0); 572 #endif 573 } else { /* Integer to FP conversion. */ 574 Reg left = ra_alloc1(as, lref, RSET_GPR); 575 #if LJ_32 576 emit_fg(as, irt_isfloat(ir->t) ? MIPSI_CVT_S_W : MIPSI_CVT_D_W, 577 dest, dest); 578 emit_tg(as, MIPSI_MTC1, left, dest); 579 #else 580 MIPSIns mi = irt_isfloat(ir->t) ? 581 (st64 ? MIPSI_CVT_S_L : MIPSI_CVT_S_W) : 582 (st64 ? MIPSI_CVT_D_L : MIPSI_CVT_D_W); 583 emit_fg(as, mi, dest, dest); 584 emit_tg(as, st64 ? MIPSI_DMTC1 : MIPSI_MTC1, left, dest); 585 #endif 586 } 587 } else if (stfp) { /* FP to integer conversion. */ 588 if (irt_isguard(ir->t)) { 589 /* Checked conversions are only supported from number to int. */ 590 lj_assertA(irt_isint(ir->t) && st == IRT_NUM, 591 "bad type for checked CONV"); 592 asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR)); 593 } else { 594 Reg dest = ra_dest(as, ir, RSET_GPR); 595 Reg left = ra_alloc1(as, lref, RSET_FPR); 596 Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left)); 597 if (irt_isu32(ir->t)) { /* FP to U32 conversion. */ 598 /* y = (int)floor(x - 2147483648.0) ^ 0x80000000 */ 599 emit_dst(as, MIPSI_XOR, dest, dest, RID_TMP); 600 emit_ti(as, MIPSI_LUI, RID_TMP, 0x8000); 601 emit_tg(as, MIPSI_MFC1, dest, tmp); 602 emit_fg(as, st == IRT_FLOAT ? MIPSI_FLOOR_W_S : MIPSI_FLOOR_W_D, 603 tmp, tmp); 604 emit_fgh(as, st == IRT_FLOAT ? MIPSI_SUB_S : MIPSI_SUB_D, 605 tmp, left, tmp); 606 if (st == IRT_FLOAT) 607 emit_lsptr(as, MIPSI_LWC1, (tmp & 31), 608 (void *)&as->J->k32[LJ_K32_2P31], RSET_GPR); 609 else 610 emit_lsptr(as, MIPSI_LDC1, (tmp & 31), 611 (void *)&as->J->k64[LJ_K64_2P31], RSET_GPR); 612 #if LJ_64 613 } else if (irt_isu64(ir->t)) { /* FP to U64 conversion. */ 614 MCLabel l_end; 615 emit_tg(as, MIPSI_DMFC1, dest, tmp); 616 l_end = emit_label(as); 617 /* For inputs >= 2^63 add -2^64 and convert again. */ 618 if (st == IRT_NUM) { 619 emit_fg(as, MIPSI_TRUNC_L_D, tmp, tmp); 620 emit_fgh(as, MIPSI_ADD_D, tmp, left, tmp); 621 emit_lsptr(as, MIPSI_LDC1, (tmp & 31), 622 (void *)&as->J->k64[LJ_K64_M2P64], 623 rset_exclude(RSET_GPR, dest)); 624 emit_fg(as, MIPSI_TRUNC_L_D, tmp, left); /* Delay slot. */ 625 #if !LJ_TARGET_MIPSR6 626 emit_branch(as, MIPSI_BC1T, 0, 0, l_end); 627 emit_fgh(as, MIPSI_C_OLT_D, 0, left, tmp); 628 #else 629 emit_branch(as, MIPSI_BC1NEZ, 0, (left&31), l_end); 630 emit_fgh(as, MIPSI_CMP_LT_D, left, left, tmp); 631 #endif 632 emit_lsptr(as, MIPSI_LDC1, (tmp & 31), 633 (void *)&as->J->k64[LJ_K64_2P63], 634 rset_exclude(RSET_GPR, dest)); 635 } else { 636 emit_fg(as, MIPSI_TRUNC_L_S, tmp, tmp); 637 emit_fgh(as, MIPSI_ADD_S, tmp, left, tmp); 638 emit_lsptr(as, MIPSI_LWC1, (tmp & 31), 639 (void *)&as->J->k32[LJ_K32_M2P64], 640 rset_exclude(RSET_GPR, dest)); 641 emit_fg(as, MIPSI_TRUNC_L_S, tmp, left); /* Delay slot. */ 642 #if !LJ_TARGET_MIPSR6 643 emit_branch(as, MIPSI_BC1T, 0, 0, l_end); 644 emit_fgh(as, MIPSI_C_OLT_S, 0, left, tmp); 645 #else 646 emit_branch(as, MIPSI_BC1NEZ, 0, (left&31), l_end); 647 emit_fgh(as, MIPSI_CMP_LT_S, left, left, tmp); 648 #endif 649 emit_lsptr(as, MIPSI_LWC1, (tmp & 31), 650 (void *)&as->J->k32[LJ_K32_2P63], 651 rset_exclude(RSET_GPR, dest)); 652 } 653 #endif 654 } else { 655 #if LJ_32 656 emit_tg(as, MIPSI_MFC1, dest, tmp); 657 emit_fg(as, st == IRT_FLOAT ? MIPSI_TRUNC_W_S : MIPSI_TRUNC_W_D, 658 tmp, left); 659 #else 660 MIPSIns mi = irt_is64(ir->t) ? 661 (st == IRT_NUM ? MIPSI_TRUNC_L_D : MIPSI_TRUNC_L_S) : 662 (st == IRT_NUM ? MIPSI_TRUNC_W_D : MIPSI_TRUNC_W_S); 663 emit_tg(as, irt_is64(ir->t) ? MIPSI_DMFC1 : MIPSI_MFC1, dest, left); 664 emit_fg(as, mi, left, left); 665 #endif 666 } 667 } 668 } else 669 #else 670 if (irt_isfp(ir->t)) { 671 #if LJ_64 && LJ_HASFFI 672 if (stfp) { /* FP to FP conversion. */ 673 asm_callid(as, ir, irt_isnum(ir->t) ? IRCALL_softfp_f2d : 674 IRCALL_softfp_d2f); 675 } else { /* Integer to FP conversion. */ 676 IRCallID cid = ((IRT_IS64 >> st) & 1) ? 677 (irt_isnum(ir->t) ? 678 (st == IRT_I64 ? IRCALL_fp64_l2d : IRCALL_fp64_ul2d) : 679 (st == IRT_I64 ? IRCALL_fp64_l2f : IRCALL_fp64_ul2f)) : 680 (irt_isnum(ir->t) ? 681 (st == IRT_INT ? IRCALL_softfp_i2d : IRCALL_softfp_ui2d) : 682 (st == IRT_INT ? IRCALL_softfp_i2f : IRCALL_softfp_ui2f)); 683 asm_callid(as, ir, cid); 684 } 685 #else 686 asm_callid(as, ir, IRCALL_softfp_i2d); 687 #endif 688 } else if (stfp) { /* FP to integer conversion. */ 689 if (irt_isguard(ir->t)) { 690 /* Checked conversions are only supported from number to int. */ 691 lj_assertA(irt_isint(ir->t) && st == IRT_NUM, 692 "bad type for checked CONV"); 693 asm_tointg(as, ir, RID_NONE); 694 } else { 695 IRCallID cid = irt_is64(ir->t) ? 696 ((st == IRT_NUM) ? 697 (irt_isi64(ir->t) ? IRCALL_fp64_d2l : IRCALL_fp64_d2ul) : 698 (irt_isi64(ir->t) ? IRCALL_fp64_f2l : IRCALL_fp64_f2ul)) : 699 ((st == IRT_NUM) ? 700 (irt_isint(ir->t) ? IRCALL_softfp_d2i : IRCALL_softfp_d2ui) : 701 (irt_isint(ir->t) ? IRCALL_softfp_f2i : IRCALL_softfp_f2ui)); 702 asm_callid(as, ir, cid); 703 } 704 } else 705 #endif 706 #endif 707 { 708 Reg dest = ra_dest(as, ir, RSET_GPR); 709 if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */ 710 Reg left = ra_alloc1(as, ir->op1, RSET_GPR); 711 lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t), "bad type for CONV EXT"); 712 if ((ir->op2 & IRCONV_SEXT)) { 713 if (LJ_64 || (as->flags & JIT_F_MIPSXXR2)) { 714 emit_dst(as, st == IRT_I8 ? MIPSI_SEB : MIPSI_SEH, dest, 0, left); 715 } else { 716 uint32_t shift = st == IRT_I8 ? 24 : 16; 717 emit_dta(as, MIPSI_SRA, dest, dest, shift); 718 emit_dta(as, MIPSI_SLL, dest, left, shift); 719 } 720 } else { 721 emit_tsi(as, MIPSI_ANDI, dest, left, 722 (int32_t)(st == IRT_U8 ? 0xff : 0xffff)); 723 } 724 } else { /* 32/64 bit integer conversions. */ 725 #if LJ_32 726 /* Only need to handle 32/32 bit no-op (cast) on 32 bit archs. */ 727 ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */ 728 #else 729 if (irt_is64(ir->t)) { 730 if (st64) { 731 /* 64/64 bit no-op (cast)*/ 732 ra_leftov(as, dest, lref); 733 } else { 734 Reg left = ra_alloc1(as, lref, RSET_GPR); 735 if ((ir->op2 & IRCONV_SEXT)) { /* 32 to 64 bit sign extension. */ 736 emit_dta(as, MIPSI_SLL, dest, left, 0); 737 } else { /* 32 to 64 bit zero extension. */ 738 emit_tsml(as, MIPSI_DEXT, dest, left, 31, 0); 739 } 740 } 741 } else { 742 if (st64) { 743 /* This is either a 32 bit reg/reg mov which zeroes the hiword 744 ** or a load of the loword from a 64 bit address. 745 */ 746 Reg left = ra_alloc1(as, lref, RSET_GPR); 747 emit_tsml(as, MIPSI_DEXT, dest, left, 31, 0); 748 } else { /* 32/32 bit no-op (cast). */ 749 /* Do nothing, but may need to move regs. */ 750 ra_leftov(as, dest, lref); 751 } 752 } 753 #endif 754 } 755 } 756 } 757 758 static void asm_strto(ASMState *as, IRIns *ir) 759 { 760 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num]; 761 IRRef args[2]; 762 int32_t ofs = 0; 763 #if LJ_SOFTFP32 764 ra_evictset(as, RSET_SCRATCH); 765 if (ra_used(ir)) { 766 if (ra_hasspill(ir->s) && ra_hasspill((ir+1)->s) && 767 (ir->s & 1) == LJ_BE && (ir->s ^ 1) == (ir+1)->s) { 768 int i; 769 for (i = 0; i < 2; i++) { 770 Reg r = (ir+i)->r; 771 if (ra_hasreg(r)) { 772 ra_free(as, r); 773 ra_modified(as, r); 774 emit_spload(as, ir+i, r, sps_scale((ir+i)->s)); 775 } 776 } 777 ofs = sps_scale(ir->s & ~1); 778 } else { 779 Reg rhi = ra_dest(as, ir+1, RSET_GPR); 780 Reg rlo = ra_dest(as, ir, rset_exclude(RSET_GPR, rhi)); 781 emit_tsi(as, MIPSI_LW, rhi, RID_SP, ofs+(LJ_BE?0:4)); 782 emit_tsi(as, MIPSI_LW, rlo, RID_SP, ofs+(LJ_BE?4:0)); 783 } 784 } 785 #else 786 RegSet drop = RSET_SCRATCH; 787 if (ra_hasreg(ir->r)) rset_set(drop, ir->r); /* Spill dest reg (if any). */ 788 ra_evictset(as, drop); 789 ofs = sps_scale(ir->s); 790 #endif 791 asm_guard(as, MIPSI_BEQ, RID_RET, RID_ZERO); /* Test return status. */ 792 args[0] = ir->op1; /* GCstr *str */ 793 args[1] = ASMREF_TMP1; /* TValue *n */ 794 asm_gencall(as, ci, args); 795 /* Store the result to the spill slot or temp slots. */ 796 emit_tsi(as, MIPSI_AADDIU, ra_releasetmp(as, ASMREF_TMP1), 797 RID_SP, ofs); 798 } 799 800 /* -- Memory references --------------------------------------------------- */ 801 802 #if LJ_64 803 /* Store tagged value for ref at base+ofs. */ 804 static void asm_tvstore64(ASMState *as, Reg base, int32_t ofs, IRRef ref) 805 { 806 RegSet allow = rset_exclude(RSET_GPR, base); 807 IRIns *ir = IR(ref); 808 lj_assertA(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t), 809 "store of IR type %d", irt_type(ir->t)); 810 if (irref_isk(ref)) { 811 TValue k; 812 lj_ir_kvalue(as->J->L, &k, ir); 813 emit_tsi(as, MIPSI_SD, ra_allock(as, (int64_t)k.u64, allow), base, ofs); 814 } else { 815 Reg src = ra_alloc1(as, ref, allow); 816 Reg type = ra_allock(as, (int64_t)irt_toitype(ir->t) << 47, 817 rset_exclude(allow, src)); 818 emit_tsi(as, MIPSI_SD, RID_TMP, base, ofs); 819 if (irt_isinteger(ir->t)) { 820 emit_dst(as, MIPSI_DADDU, RID_TMP, RID_TMP, type); 821 emit_tsml(as, MIPSI_DEXT, RID_TMP, src, 31, 0); 822 } else { 823 emit_dst(as, MIPSI_DADDU, RID_TMP, src, type); 824 } 825 } 826 } 827 #endif 828 829 /* Get pointer to TValue. */ 830 static void asm_tvptr(ASMState *as, Reg dest, IRRef ref) 831 { 832 IRIns *ir = IR(ref); 833 if (irt_isnum(ir->t)) { 834 if (irref_isk(ref)) /* Use the number constant itself as a TValue. */ 835 ra_allockreg(as, igcptr(ir_knum(ir)), dest); 836 else /* Otherwise force a spill and use the spill slot. */ 837 emit_tsi(as, MIPSI_AADDIU, dest, RID_SP, ra_spill(as, ir)); 838 } else { 839 /* Otherwise use g->tmptv to hold the TValue. */ 840 #if LJ_32 841 RegSet allow = rset_exclude(RSET_GPR, dest); 842 Reg type; 843 emit_tsi(as, MIPSI_ADDIU, dest, RID_JGL, (int32_t)(offsetof(global_State, tmptv)-32768)); 844 if (!irt_ispri(ir->t)) { 845 Reg src = ra_alloc1(as, ref, allow); 846 emit_setgl(as, src, tmptv.gcr); 847 } 848 if (LJ_SOFTFP && (ir+1)->o == IR_HIOP) 849 type = ra_alloc1(as, ref+1, allow); 850 else 851 type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow); 852 emit_setgl(as, type, tmptv.it); 853 #else 854 asm_tvstore64(as, dest, 0, ref); 855 emit_tsi(as, MIPSI_DADDIU, dest, RID_JGL, 856 (int32_t)(offsetof(global_State, tmptv)-32768)); 857 #endif 858 } 859 } 860 861 static void asm_aref(ASMState *as, IRIns *ir) 862 { 863 Reg dest = ra_dest(as, ir, RSET_GPR); 864 Reg idx, base; 865 if (irref_isk(ir->op2)) { 866 IRRef tab = IR(ir->op1)->op1; 867 int32_t ofs = asm_fuseabase(as, tab); 868 IRRef refa = ofs ? tab : ir->op1; 869 ofs += 8*IR(ir->op2)->i; 870 if (checki16(ofs)) { 871 base = ra_alloc1(as, refa, RSET_GPR); 872 emit_tsi(as, MIPSI_AADDIU, dest, base, ofs); 873 return; 874 } 875 } 876 base = ra_alloc1(as, ir->op1, RSET_GPR); 877 idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base)); 878 #if !LJ_TARGET_MIPSR6 879 emit_dst(as, MIPSI_AADDU, dest, RID_TMP, base); 880 emit_dta(as, MIPSI_SLL, RID_TMP, idx, 3); 881 #else 882 emit_dst(as, MIPSI_ALSA | MIPSF_A(3-1), dest, idx, base); 883 #endif 884 } 885 886 /* Inlined hash lookup. Specialized for key type and for const keys. 887 ** The equivalent C code is: 888 ** Node *n = hashkey(t, key); 889 ** do { 890 ** if (lj_obj_equal(&n->key, key)) return &n->val; 891 ** } while ((n = nextnode(n))); 892 ** return niltv(L); 893 */ 894 static void asm_href(ASMState *as, IRIns *ir, IROp merge) 895 { 896 RegSet allow = RSET_GPR; 897 int destused = ra_used(ir); 898 Reg dest = ra_dest(as, ir, allow); 899 Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest)); 900 Reg key = RID_NONE, type = RID_NONE, tmpnum = RID_NONE, tmp1 = RID_TMP, tmp2; 901 #if LJ_64 902 Reg cmp64 = RID_NONE; 903 #endif 904 IRRef refkey = ir->op2; 905 IRIns *irkey = IR(refkey); 906 int isk = irref_isk(refkey); 907 IRType1 kt = irkey->t; 908 uint32_t khash; 909 MCLabel l_end, l_loop, l_next; 910 911 rset_clear(allow, tab); 912 #if LJ_SOFTFP32 913 if (!isk) { 914 key = ra_alloc1(as, refkey, allow); 915 rset_clear(allow, key); 916 if (irkey[1].o == IR_HIOP) { 917 if (ra_hasreg((irkey+1)->r)) { 918 type = tmpnum = (irkey+1)->r; 919 tmp1 = ra_scratch(as, allow); 920 rset_clear(allow, tmp1); 921 ra_noweak(as, tmpnum); 922 } else { 923 type = tmpnum = ra_allocref(as, refkey+1, allow); 924 } 925 rset_clear(allow, tmpnum); 926 } else { 927 type = ra_allock(as, (int32_t)irt_toitype(irkey->t), allow); 928 rset_clear(allow, type); 929 } 930 } 931 #else 932 if (!LJ_SOFTFP && irt_isnum(kt)) { 933 key = ra_alloc1(as, refkey, RSET_FPR); 934 tmpnum = ra_scratch(as, rset_exclude(RSET_FPR, key)); 935 } else if (!irt_ispri(kt)) { 936 key = ra_alloc1(as, refkey, allow); 937 rset_clear(allow, key); 938 #if LJ_32 939 type = ra_allock(as, (int32_t)irt_toitype(irkey->t), allow); 940 rset_clear(allow, type); 941 #endif 942 } 943 #endif 944 tmp2 = ra_scratch(as, allow); 945 rset_clear(allow, tmp2); 946 #if LJ_64 947 if (LJ_SOFTFP || !irt_isnum(kt)) { 948 /* Allocate cmp64 register used for 64-bit comparisons */ 949 if (LJ_SOFTFP && irt_isnum(kt)) { 950 cmp64 = key; 951 } else if (!isk && irt_isaddr(kt)) { 952 cmp64 = tmp2; 953 } else { 954 int64_t k; 955 if (isk && irt_isaddr(kt)) { 956 k = ((int64_t)irt_toitype(irkey->t) << 47) | irkey[1].tv.u64; 957 } else { 958 lj_assertA(irt_ispri(kt) && !irt_isnil(kt), "bad HREF key type"); 959 k = ~((int64_t)~irt_toitype(ir->t) << 47); 960 } 961 cmp64 = ra_allock(as, k, allow); 962 rset_clear(allow, cmp64); 963 } 964 } 965 #endif 966 967 /* Key not found in chain: jump to exit (if merged) or load niltv. */ 968 l_end = emit_label(as); 969 as->invmcp = NULL; 970 if (merge == IR_NE) 971 asm_guard(as, MIPSI_B, RID_ZERO, RID_ZERO); 972 else if (destused) 973 emit_loada(as, dest, niltvg(J2G(as->J))); 974 /* Follow hash chain until the end. */ 975 emit_move(as, dest, tmp1); 976 l_loop = --as->mcp; 977 emit_tsi(as, MIPSI_AL, tmp1, dest, (int32_t)offsetof(Node, next)); 978 l_next = emit_label(as); 979 980 /* Type and value comparison. */ 981 if (merge == IR_EQ) { /* Must match asm_guard(). */ 982 emit_ti(as, MIPSI_LI, RID_TMP, as->snapno); 983 l_end = asm_exitstub_addr(as); 984 } 985 if (!LJ_SOFTFP && irt_isnum(kt)) { 986 #if !LJ_TARGET_MIPSR6 987 emit_branch(as, MIPSI_BC1T, 0, 0, l_end); 988 emit_fgh(as, MIPSI_C_EQ_D, 0, tmpnum, key); 989 #else 990 emit_branch(as, MIPSI_BC1NEZ, 0, (tmpnum&31), l_end); 991 emit_fgh(as, MIPSI_CMP_EQ_D, tmpnum, tmpnum, key); 992 #endif 993 *--as->mcp = MIPSI_NOP; /* Avoid NaN comparison overhead. */ 994 emit_branch(as, MIPSI_BEQ, tmp1, RID_ZERO, l_next); 995 emit_tsi(as, MIPSI_SLTIU, tmp1, tmp1, (int32_t)LJ_TISNUM); 996 #if LJ_32 997 emit_hsi(as, MIPSI_LDC1, tmpnum, dest, (int32_t)offsetof(Node, key.n)); 998 } else { 999 if (irt_ispri(kt)) { 1000 emit_branch(as, MIPSI_BEQ, tmp1, type, l_end); 1001 } else { 1002 emit_branch(as, MIPSI_BEQ, tmp2, key, l_end); 1003 emit_tsi(as, MIPSI_LW, tmp2, dest, (int32_t)offsetof(Node, key.gcr)); 1004 emit_branch(as, MIPSI_BNE, tmp1, type, l_next); 1005 } 1006 } 1007 emit_tsi(as, MIPSI_LW, tmp1, dest, (int32_t)offsetof(Node, key.it)); 1008 *l_loop = MIPSI_BNE | MIPSF_S(tmp1) | ((as->mcp-l_loop-1) & 0xffffu); 1009 #else 1010 emit_dta(as, MIPSI_DSRA32, tmp1, tmp1, 15); 1011 emit_tg(as, MIPSI_DMTC1, tmp1, tmpnum); 1012 emit_tsi(as, MIPSI_LD, tmp1, dest, (int32_t)offsetof(Node, key.u64)); 1013 } else { 1014 emit_branch(as, MIPSI_BEQ, tmp1, cmp64, l_end); 1015 emit_tsi(as, MIPSI_LD, tmp1, dest, (int32_t)offsetof(Node, key.u64)); 1016 } 1017 *l_loop = MIPSI_BNE | MIPSF_S(tmp1) | ((as->mcp-l_loop-1) & 0xffffu); 1018 if (!isk && irt_isaddr(kt)) { 1019 type = ra_allock(as, (int64_t)irt_toitype(kt) << 47, allow); 1020 emit_dst(as, MIPSI_DADDU, tmp2, key, type); 1021 rset_clear(allow, type); 1022 } 1023 #endif 1024 1025 /* Load main position relative to tab->node into dest. */ 1026 khash = isk ? ir_khash(as, irkey) : 1; 1027 if (khash == 0) { 1028 emit_tsi(as, MIPSI_AL, dest, tab, (int32_t)offsetof(GCtab, node)); 1029 } else { 1030 Reg tmphash = tmp1; 1031 if (isk) 1032 tmphash = ra_allock(as, khash, allow); 1033 emit_dst(as, MIPSI_AADDU, dest, dest, tmp1); 1034 lj_assertA(sizeof(Node) == 24, "bad Node size"); 1035 emit_dst(as, MIPSI_SUBU, tmp1, tmp2, tmp1); 1036 emit_dta(as, MIPSI_SLL, tmp1, tmp1, 3); 1037 emit_dta(as, MIPSI_SLL, tmp2, tmp1, 5); 1038 emit_dst(as, MIPSI_AND, tmp1, tmp2, tmphash); 1039 emit_tsi(as, MIPSI_AL, dest, tab, (int32_t)offsetof(GCtab, node)); 1040 emit_tsi(as, MIPSI_LW, tmp2, tab, (int32_t)offsetof(GCtab, hmask)); 1041 if (isk) { 1042 /* Nothing to do. */ 1043 } else if (irt_isstr(kt)) { 1044 emit_tsi(as, MIPSI_LW, tmp1, key, (int32_t)offsetof(GCstr, sid)); 1045 } else { /* Must match with hash*() in lj_tab.c. */ 1046 emit_dst(as, MIPSI_SUBU, tmp1, tmp1, tmp2); 1047 emit_rotr(as, tmp2, tmp2, dest, (-HASH_ROT3)&31); 1048 emit_dst(as, MIPSI_XOR, tmp1, tmp1, tmp2); 1049 emit_rotr(as, tmp1, tmp1, dest, (-HASH_ROT2-HASH_ROT1)&31); 1050 emit_dst(as, MIPSI_SUBU, tmp2, tmp2, dest); 1051 #if LJ_32 1052 if (LJ_SOFTFP ? (irkey[1].o == IR_HIOP) : irt_isnum(kt)) { 1053 emit_dst(as, MIPSI_XOR, tmp2, tmp2, tmp1); 1054 if ((as->flags & JIT_F_MIPSXXR2)) { 1055 emit_dta(as, MIPSI_ROTR, dest, tmp1, (-HASH_ROT1)&31); 1056 } else { 1057 emit_dst(as, MIPSI_OR, dest, dest, tmp1); 1058 emit_dta(as, MIPSI_SLL, tmp1, tmp1, HASH_ROT1); 1059 emit_dta(as, MIPSI_SRL, dest, tmp1, (-HASH_ROT1)&31); 1060 } 1061 emit_dst(as, MIPSI_ADDU, tmp1, tmp1, tmp1); 1062 #if LJ_SOFTFP 1063 emit_ds(as, MIPSI_MOVE, tmp1, type); 1064 emit_ds(as, MIPSI_MOVE, tmp2, key); 1065 #else 1066 emit_tg(as, MIPSI_MFC1, tmp2, key); 1067 emit_tg(as, MIPSI_MFC1, tmp1, key+1); 1068 #endif 1069 } else { 1070 emit_dst(as, MIPSI_XOR, tmp2, key, tmp1); 1071 emit_rotr(as, dest, tmp1, tmp2, (-HASH_ROT1)&31); 1072 emit_dst(as, MIPSI_ADDU, tmp1, key, ra_allock(as, HASH_BIAS, allow)); 1073 } 1074 #else 1075 emit_dst(as, MIPSI_XOR, tmp2, tmp2, tmp1); 1076 emit_dta(as, MIPSI_ROTR, dest, tmp1, (-HASH_ROT1)&31); 1077 if (irt_isnum(kt)) { 1078 emit_dst(as, MIPSI_ADDU, tmp1, tmp1, tmp1); 1079 emit_dta(as, MIPSI_DSRA32, tmp1, LJ_SOFTFP ? key : tmp1, 0); 1080 emit_dta(as, MIPSI_SLL, tmp2, LJ_SOFTFP ? key : tmp1, 0); 1081 #if !LJ_SOFTFP 1082 emit_tg(as, MIPSI_DMFC1, tmp1, key); 1083 #endif 1084 } else { 1085 checkmclim(as); 1086 emit_dta(as, MIPSI_DSRA32, tmp1, tmp1, 0); 1087 emit_dta(as, MIPSI_SLL, tmp2, key, 0); 1088 emit_dst(as, MIPSI_DADDU, tmp1, key, type); 1089 } 1090 #endif 1091 } 1092 } 1093 } 1094 1095 static void asm_hrefk(ASMState *as, IRIns *ir) 1096 { 1097 IRIns *kslot = IR(ir->op2); 1098 IRIns *irkey = IR(kslot->op1); 1099 int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node)); 1100 int32_t kofs = ofs + (int32_t)offsetof(Node, key); 1101 Reg dest = (ra_used(ir)||ofs > 32736) ? ra_dest(as, ir, RSET_GPR) : RID_NONE; 1102 Reg node = ra_alloc1(as, ir->op1, RSET_GPR); 1103 RegSet allow = rset_exclude(RSET_GPR, node); 1104 Reg idx = node; 1105 #if LJ_32 1106 Reg key = RID_NONE, type = RID_TMP; 1107 int32_t lo, hi; 1108 #else 1109 Reg key = ra_scratch(as, allow); 1110 int64_t k; 1111 #endif 1112 lj_assertA(ofs % sizeof(Node) == 0, "unaligned HREFK slot"); 1113 if (ofs > 32736) { 1114 idx = dest; 1115 rset_clear(allow, dest); 1116 kofs = (int32_t)offsetof(Node, key); 1117 } else if (ra_hasreg(dest)) { 1118 emit_tsi(as, MIPSI_AADDIU, dest, node, ofs); 1119 } 1120 #if LJ_32 1121 if (!irt_ispri(irkey->t)) { 1122 key = ra_scratch(as, allow); 1123 rset_clear(allow, key); 1124 } 1125 if (irt_isnum(irkey->t)) { 1126 lo = (int32_t)ir_knum(irkey)->u32.lo; 1127 hi = (int32_t)ir_knum(irkey)->u32.hi; 1128 } else { 1129 lo = irkey->i; 1130 hi = irt_toitype(irkey->t); 1131 if (!ra_hasreg(key)) 1132 goto nolo; 1133 } 1134 asm_guard(as, MIPSI_BNE, key, lo ? ra_allock(as, lo, allow) : RID_ZERO); 1135 nolo: 1136 asm_guard(as, MIPSI_BNE, type, hi ? ra_allock(as, hi, allow) : RID_ZERO); 1137 if (ra_hasreg(key)) emit_tsi(as, MIPSI_LW, key, idx, kofs+(LJ_BE?4:0)); 1138 emit_tsi(as, MIPSI_LW, type, idx, kofs+(LJ_BE?0:4)); 1139 #else 1140 if (irt_ispri(irkey->t)) { 1141 lj_assertA(!irt_isnil(irkey->t), "bad HREFK key type"); 1142 k = ~((int64_t)~irt_toitype(irkey->t) << 47); 1143 } else if (irt_isnum(irkey->t)) { 1144 k = (int64_t)ir_knum(irkey)->u64; 1145 } else { 1146 k = ((int64_t)irt_toitype(irkey->t) << 47) | (int64_t)ir_kgc(irkey); 1147 } 1148 asm_guard(as, MIPSI_BNE, key, ra_allock(as, k, allow)); 1149 emit_tsi(as, MIPSI_LD, key, idx, kofs); 1150 #endif 1151 if (ofs > 32736) 1152 emit_tsi(as, MIPSI_AADDU, dest, node, ra_allock(as, ofs, allow)); 1153 } 1154 1155 static void asm_uref(ASMState *as, IRIns *ir) 1156 { 1157 Reg dest = ra_dest(as, ir, RSET_GPR); 1158 if (irref_isk(ir->op1)) { 1159 GCfunc *fn = ir_kfunc(IR(ir->op1)); 1160 MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v; 1161 emit_lsptr(as, MIPSI_AL, dest, v, RSET_GPR); 1162 } else { 1163 Reg uv = ra_scratch(as, RSET_GPR); 1164 Reg func = ra_alloc1(as, ir->op1, RSET_GPR); 1165 if (ir->o == IR_UREFC) { 1166 asm_guard(as, MIPSI_BEQ, RID_TMP, RID_ZERO); 1167 emit_tsi(as, MIPSI_AADDIU, dest, uv, (int32_t)offsetof(GCupval, tv)); 1168 emit_tsi(as, MIPSI_LBU, RID_TMP, uv, (int32_t)offsetof(GCupval, closed)); 1169 } else { 1170 emit_tsi(as, MIPSI_AL, dest, uv, (int32_t)offsetof(GCupval, v)); 1171 } 1172 emit_tsi(as, MIPSI_AL, uv, func, (int32_t)offsetof(GCfuncL, uvptr) + 1173 (int32_t)sizeof(MRef) * (int32_t)(ir->op2 >> 8)); 1174 } 1175 } 1176 1177 static void asm_fref(ASMState *as, IRIns *ir) 1178 { 1179 UNUSED(as); UNUSED(ir); 1180 lj_assertA(!ra_used(ir), "unfused FREF"); 1181 } 1182 1183 static void asm_strref(ASMState *as, IRIns *ir) 1184 { 1185 #if LJ_32 1186 Reg dest = ra_dest(as, ir, RSET_GPR); 1187 IRRef ref = ir->op2, refk = ir->op1; 1188 int32_t ofs = (int32_t)sizeof(GCstr); 1189 Reg r; 1190 if (irref_isk(ref)) { 1191 IRRef tmp = refk; refk = ref; ref = tmp; 1192 } else if (!irref_isk(refk)) { 1193 Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR); 1194 IRIns *irr = IR(ir->op2); 1195 if (ra_hasreg(irr->r)) { 1196 ra_noweak(as, irr->r); 1197 right = irr->r; 1198 } else if (mayfuse(as, irr->op2) && 1199 irr->o == IR_ADD && irref_isk(irr->op2) && 1200 checki16(ofs + IR(irr->op2)->i)) { 1201 ofs += IR(irr->op2)->i; 1202 right = ra_alloc1(as, irr->op1, rset_exclude(RSET_GPR, left)); 1203 } else { 1204 right = ra_allocref(as, ir->op2, rset_exclude(RSET_GPR, left)); 1205 } 1206 emit_tsi(as, MIPSI_ADDIU, dest, dest, ofs); 1207 emit_dst(as, MIPSI_ADDU, dest, left, right); 1208 return; 1209 } 1210 r = ra_alloc1(as, ref, RSET_GPR); 1211 ofs += IR(refk)->i; 1212 if (checki16(ofs)) 1213 emit_tsi(as, MIPSI_ADDIU, dest, r, ofs); 1214 else 1215 emit_dst(as, MIPSI_ADDU, dest, r, 1216 ra_allock(as, ofs, rset_exclude(RSET_GPR, r))); 1217 #else 1218 RegSet allow = RSET_GPR; 1219 Reg dest = ra_dest(as, ir, allow); 1220 Reg base = ra_alloc1(as, ir->op1, allow); 1221 IRIns *irr = IR(ir->op2); 1222 int32_t ofs = sizeof(GCstr); 1223 rset_clear(allow, base); 1224 if (irref_isk(ir->op2) && checki16(ofs + irr->i)) { 1225 emit_tsi(as, MIPSI_DADDIU, dest, base, ofs + irr->i); 1226 } else { 1227 emit_tsi(as, MIPSI_DADDIU, dest, dest, ofs); 1228 emit_dst(as, MIPSI_DADDU, dest, base, ra_alloc1(as, ir->op2, allow)); 1229 } 1230 #endif 1231 } 1232 1233 /* -- Loads and stores ---------------------------------------------------- */ 1234 1235 static MIPSIns asm_fxloadins(ASMState *as, IRIns *ir) 1236 { 1237 UNUSED(as); 1238 switch (irt_type(ir->t)) { 1239 case IRT_I8: return MIPSI_LB; 1240 case IRT_U8: return MIPSI_LBU; 1241 case IRT_I16: return MIPSI_LH; 1242 case IRT_U16: return MIPSI_LHU; 1243 case IRT_NUM: 1244 lj_assertA(!LJ_SOFTFP32, "unsplit FP op"); 1245 if (!LJ_SOFTFP) return MIPSI_LDC1; 1246 /* fallthrough */ 1247 case IRT_FLOAT: if (!LJ_SOFTFP) return MIPSI_LWC1; 1248 /* fallthrough */ 1249 default: return (LJ_64 && irt_is64(ir->t)) ? MIPSI_LD : MIPSI_LW; 1250 } 1251 } 1252 1253 static MIPSIns asm_fxstoreins(ASMState *as, IRIns *ir) 1254 { 1255 UNUSED(as); 1256 switch (irt_type(ir->t)) { 1257 case IRT_I8: case IRT_U8: return MIPSI_SB; 1258 case IRT_I16: case IRT_U16: return MIPSI_SH; 1259 case IRT_NUM: 1260 lj_assertA(!LJ_SOFTFP32, "unsplit FP op"); 1261 if (!LJ_SOFTFP) return MIPSI_SDC1; 1262 /* fallthrough */ 1263 case IRT_FLOAT: if (!LJ_SOFTFP) return MIPSI_SWC1; 1264 /* fallthrough */ 1265 default: return (LJ_64 && irt_is64(ir->t)) ? MIPSI_SD : MIPSI_SW; 1266 } 1267 } 1268 1269 static void asm_fload(ASMState *as, IRIns *ir) 1270 { 1271 Reg dest = ra_dest(as, ir, RSET_GPR); 1272 MIPSIns mi = asm_fxloadins(as, ir); 1273 Reg idx; 1274 int32_t ofs; 1275 if (ir->op1 == REF_NIL) { /* FLOAD from GG_State with offset. */ 1276 idx = RID_JGL; 1277 ofs = (ir->op2 << 2) - 32768 - GG_OFS(g); 1278 } else { 1279 idx = ra_alloc1(as, ir->op1, RSET_GPR); 1280 if (ir->op2 == IRFL_TAB_ARRAY) { 1281 ofs = asm_fuseabase(as, ir->op1); 1282 if (ofs) { /* Turn the t->array load into an add for colocated arrays. */ 1283 emit_tsi(as, MIPSI_AADDIU, dest, idx, ofs); 1284 return; 1285 } 1286 } 1287 ofs = field_ofs[ir->op2]; 1288 } 1289 lj_assertA(!irt_isfp(ir->t), "bad FP FLOAD"); 1290 emit_tsi(as, mi, dest, idx, ofs); 1291 } 1292 1293 static void asm_fstore(ASMState *as, IRIns *ir) 1294 { 1295 if (ir->r != RID_SINK) { 1296 Reg src = ra_alloc1z(as, ir->op2, RSET_GPR); 1297 IRIns *irf = IR(ir->op1); 1298 Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src)); 1299 int32_t ofs = field_ofs[irf->op2]; 1300 MIPSIns mi = asm_fxstoreins(as, ir); 1301 lj_assertA(!irt_isfp(ir->t), "bad FP FSTORE"); 1302 emit_tsi(as, mi, src, idx, ofs); 1303 } 1304 } 1305 1306 static void asm_xload(ASMState *as, IRIns *ir) 1307 { 1308 Reg dest = ra_dest(as, ir, 1309 (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR); 1310 lj_assertA(LJ_TARGET_UNALIGNED || !(ir->op2 & IRXLOAD_UNALIGNED), 1311 "unaligned XLOAD"); 1312 asm_fusexref(as, asm_fxloadins(as, ir), dest, ir->op1, RSET_GPR, 0); 1313 } 1314 1315 static void asm_xstore_(ASMState *as, IRIns *ir, int32_t ofs) 1316 { 1317 if (ir->r != RID_SINK) { 1318 Reg src = ra_alloc1z(as, ir->op2, 1319 (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR); 1320 asm_fusexref(as, asm_fxstoreins(as, ir), src, ir->op1, 1321 rset_exclude(RSET_GPR, src), ofs); 1322 } 1323 } 1324 1325 #define asm_xstore(as, ir) asm_xstore_(as, ir, 0) 1326 1327 static void asm_ahuvload(ASMState *as, IRIns *ir) 1328 { 1329 int hiop = (LJ_SOFTFP32 && (ir+1)->o == IR_HIOP); 1330 Reg dest = RID_NONE, type = RID_TMP, idx; 1331 RegSet allow = RSET_GPR; 1332 int32_t ofs = 0; 1333 IRType1 t = ir->t; 1334 if (hiop) { 1335 t.irt = IRT_NUM; 1336 if (ra_used(ir+1)) { 1337 type = ra_dest(as, ir+1, allow); 1338 rset_clear(allow, type); 1339 } 1340 } 1341 if (ra_used(ir)) { 1342 lj_assertA((LJ_SOFTFP32 ? 0 : irt_isnum(ir->t)) || 1343 irt_isint(ir->t) || irt_isaddr(ir->t), 1344 "bad load type %d", irt_type(ir->t)); 1345 dest = ra_dest(as, ir, (!LJ_SOFTFP && irt_isnum(t)) ? RSET_FPR : allow); 1346 rset_clear(allow, dest); 1347 #if LJ_64 1348 if (irt_isaddr(t)) 1349 emit_tsml(as, MIPSI_DEXTM, dest, dest, 14, 0); 1350 else if (irt_isint(t)) 1351 emit_dta(as, MIPSI_SLL, dest, dest, 0); 1352 #endif 1353 } 1354 idx = asm_fuseahuref(as, ir->op1, &ofs, allow); 1355 rset_clear(allow, idx); 1356 if (irt_isnum(t)) { 1357 asm_guard(as, MIPSI_BEQ, RID_TMP, RID_ZERO); 1358 emit_tsi(as, MIPSI_SLTIU, RID_TMP, type, (int32_t)LJ_TISNUM); 1359 } else { 1360 asm_guard(as, MIPSI_BNE, type, 1361 ra_allock(as, (int32_t)irt_toitype(t), allow)); 1362 } 1363 #if LJ_32 1364 if (ra_hasreg(dest)) { 1365 if (!LJ_SOFTFP && irt_isnum(t)) 1366 emit_hsi(as, MIPSI_LDC1, dest, idx, ofs); 1367 else 1368 emit_tsi(as, MIPSI_LW, dest, idx, ofs+(LJ_BE?4:0)); 1369 } 1370 emit_tsi(as, MIPSI_LW, type, idx, ofs+(LJ_BE?0:4)); 1371 #else 1372 if (ra_hasreg(dest)) { 1373 if (!LJ_SOFTFP && irt_isnum(t)) { 1374 emit_hsi(as, MIPSI_LDC1, dest, idx, ofs); 1375 dest = type; 1376 } 1377 } else { 1378 dest = type; 1379 } 1380 emit_dta(as, MIPSI_DSRA32, type, dest, 15); 1381 emit_tsi(as, MIPSI_LD, dest, idx, ofs); 1382 #endif 1383 } 1384 1385 static void asm_ahustore(ASMState *as, IRIns *ir) 1386 { 1387 RegSet allow = RSET_GPR; 1388 Reg idx, src = RID_NONE, type = RID_NONE; 1389 int32_t ofs = 0; 1390 if (ir->r == RID_SINK) 1391 return; 1392 if (!LJ_SOFTFP32 && irt_isnum(ir->t)) { 1393 src = ra_alloc1(as, ir->op2, LJ_SOFTFP ? RSET_GPR : RSET_FPR); 1394 idx = asm_fuseahuref(as, ir->op1, &ofs, allow); 1395 emit_hsi(as, LJ_SOFTFP ? MIPSI_SD : MIPSI_SDC1, src, idx, ofs); 1396 } else { 1397 #if LJ_32 1398 if (!irt_ispri(ir->t)) { 1399 src = ra_alloc1(as, ir->op2, allow); 1400 rset_clear(allow, src); 1401 } 1402 if (LJ_SOFTFP && (ir+1)->o == IR_HIOP) 1403 type = ra_alloc1(as, (ir+1)->op2, allow); 1404 else 1405 type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow); 1406 rset_clear(allow, type); 1407 idx = asm_fuseahuref(as, ir->op1, &ofs, allow); 1408 if (ra_hasreg(src)) 1409 emit_tsi(as, MIPSI_SW, src, idx, ofs+(LJ_BE?4:0)); 1410 emit_tsi(as, MIPSI_SW, type, idx, ofs+(LJ_BE?0:4)); 1411 #else 1412 Reg tmp = RID_TMP; 1413 if (irt_ispri(ir->t)) { 1414 tmp = ra_allock(as, ~((int64_t)~irt_toitype(ir->t) << 47), allow); 1415 rset_clear(allow, tmp); 1416 } else { 1417 src = ra_alloc1(as, ir->op2, allow); 1418 rset_clear(allow, src); 1419 type = ra_allock(as, (int64_t)irt_toitype(ir->t) << 47, allow); 1420 rset_clear(allow, type); 1421 } 1422 idx = asm_fuseahuref(as, ir->op1, &ofs, allow); 1423 emit_tsi(as, MIPSI_SD, tmp, idx, ofs); 1424 if (ra_hasreg(src)) { 1425 if (irt_isinteger(ir->t)) { 1426 emit_dst(as, MIPSI_DADDU, tmp, tmp, type); 1427 emit_tsml(as, MIPSI_DEXT, tmp, src, 31, 0); 1428 } else { 1429 emit_dst(as, MIPSI_DADDU, tmp, src, type); 1430 } 1431 } 1432 #endif 1433 } 1434 } 1435 1436 static void asm_sload(ASMState *as, IRIns *ir) 1437 { 1438 Reg dest = RID_NONE, type = RID_NONE, base; 1439 RegSet allow = RSET_GPR; 1440 IRType1 t = ir->t; 1441 #if LJ_32 1442 int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 4 : 0); 1443 int hiop = (LJ_SOFTFP32 && (ir+1)->o == IR_HIOP); 1444 if (hiop) 1445 t.irt = IRT_NUM; 1446 #else 1447 int32_t ofs = 8*((int32_t)ir->op1-2); 1448 #endif 1449 lj_assertA(!(ir->op2 & IRSLOAD_PARENT), 1450 "bad parent SLOAD"); /* Handled by asm_head_side(). */ 1451 lj_assertA(irt_isguard(ir->t) || !(ir->op2 & IRSLOAD_TYPECHECK), 1452 "inconsistent SLOAD variant"); 1453 #if LJ_SOFTFP32 1454 lj_assertA(!(ir->op2 & IRSLOAD_CONVERT), 1455 "unsplit SLOAD convert"); /* Handled by LJ_SOFTFP SPLIT. */ 1456 if (hiop && ra_used(ir+1)) { 1457 type = ra_dest(as, ir+1, allow); 1458 rset_clear(allow, type); 1459 } 1460 #else 1461 if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) { 1462 dest = ra_scratch(as, LJ_SOFTFP ? allow : RSET_FPR); 1463 asm_tointg(as, ir, dest); 1464 t.irt = IRT_NUM; /* Continue with a regular number type check. */ 1465 } else 1466 #endif 1467 if (ra_used(ir)) { 1468 lj_assertA((LJ_SOFTFP32 ? 0 : irt_isnum(ir->t)) || 1469 irt_isint(ir->t) || irt_isaddr(ir->t), 1470 "bad SLOAD type %d", irt_type(ir->t)); 1471 dest = ra_dest(as, ir, (!LJ_SOFTFP && irt_isnum(t)) ? RSET_FPR : allow); 1472 rset_clear(allow, dest); 1473 base = ra_alloc1(as, REF_BASE, allow); 1474 rset_clear(allow, base); 1475 if (!LJ_SOFTFP32 && (ir->op2 & IRSLOAD_CONVERT)) { 1476 if (irt_isint(t)) { 1477 Reg tmp = ra_scratch(as, LJ_SOFTFP ? RSET_GPR : RSET_FPR); 1478 #if LJ_SOFTFP 1479 ra_evictset(as, rset_exclude(RSET_SCRATCH, dest)); 1480 ra_destreg(as, ir, RID_RET); 1481 emit_call(as, (void *)lj_ir_callinfo[IRCALL_softfp_d2i].func, 0); 1482 if (tmp != REGARG_FIRSTGPR) 1483 emit_move(as, REGARG_FIRSTGPR, tmp); 1484 #else 1485 emit_tg(as, MIPSI_MFC1, dest, tmp); 1486 emit_fg(as, MIPSI_TRUNC_W_D, tmp, tmp); 1487 #endif 1488 dest = tmp; 1489 t.irt = IRT_NUM; /* Check for original type. */ 1490 } else { 1491 Reg tmp = ra_scratch(as, RSET_GPR); 1492 #if LJ_SOFTFP 1493 ra_evictset(as, rset_exclude(RSET_SCRATCH, dest)); 1494 ra_destreg(as, ir, RID_RET); 1495 emit_call(as, (void *)lj_ir_callinfo[IRCALL_softfp_i2d].func, 0); 1496 emit_dta(as, MIPSI_SLL, REGARG_FIRSTGPR, tmp, 0); 1497 #else 1498 emit_fg(as, MIPSI_CVT_D_W, dest, dest); 1499 emit_tg(as, MIPSI_MTC1, tmp, dest); 1500 #endif 1501 dest = tmp; 1502 t.irt = IRT_INT; /* Check for original type. */ 1503 } 1504 } 1505 #if LJ_64 1506 else if (irt_isaddr(t)) { 1507 /* Clear type from pointers. */ 1508 emit_tsml(as, MIPSI_DEXTM, dest, dest, 14, 0); 1509 } else if (irt_isint(t) && (ir->op2 & IRSLOAD_TYPECHECK)) { 1510 /* Sign-extend integers. */ 1511 emit_dta(as, MIPSI_SLL, dest, dest, 0); 1512 } 1513 #endif 1514 goto dotypecheck; 1515 } 1516 base = ra_alloc1(as, REF_BASE, allow); 1517 rset_clear(allow, base); 1518 dotypecheck: 1519 #if LJ_32 1520 if ((ir->op2 & IRSLOAD_TYPECHECK)) { 1521 if (ra_noreg(type)) 1522 type = RID_TMP; 1523 if (irt_isnum(t)) { 1524 asm_guard(as, MIPSI_BEQ, RID_TMP, RID_ZERO); 1525 emit_tsi(as, MIPSI_SLTIU, RID_TMP, type, (int32_t)LJ_TISNUM); 1526 } else { 1527 Reg ktype = ra_allock(as, irt_toitype(t), allow); 1528 asm_guard(as, MIPSI_BNE, type, ktype); 1529 } 1530 } 1531 if (ra_hasreg(dest)) { 1532 if (!LJ_SOFTFP && irt_isnum(t)) 1533 emit_hsi(as, MIPSI_LDC1, dest, base, ofs); 1534 else 1535 emit_tsi(as, MIPSI_LW, dest, base, ofs ^ (LJ_BE?4:0)); 1536 } 1537 if (ra_hasreg(type)) 1538 emit_tsi(as, MIPSI_LW, type, base, ofs ^ (LJ_BE?0:4)); 1539 #else 1540 if ((ir->op2 & IRSLOAD_TYPECHECK)) { 1541 type = dest < RID_MAX_GPR ? dest : RID_TMP; 1542 if (irt_ispri(t)) { 1543 asm_guard(as, MIPSI_BNE, type, 1544 ra_allock(as, ~((int64_t)~irt_toitype(t) << 47) , allow)); 1545 } else { 1546 if (irt_isnum(t)) { 1547 asm_guard(as, MIPSI_BEQ, RID_TMP, RID_ZERO); 1548 emit_tsi(as, MIPSI_SLTIU, RID_TMP, RID_TMP, (int32_t)LJ_TISNUM); 1549 if (!LJ_SOFTFP && ra_hasreg(dest)) 1550 emit_hsi(as, MIPSI_LDC1, dest, base, ofs); 1551 } else { 1552 asm_guard(as, MIPSI_BNE, RID_TMP, 1553 ra_allock(as, (int32_t)irt_toitype(t), allow)); 1554 } 1555 emit_dta(as, MIPSI_DSRA32, RID_TMP, type, 15); 1556 } 1557 emit_tsi(as, MIPSI_LD, type, base, ofs); 1558 } else if (ra_hasreg(dest)) { 1559 if (!LJ_SOFTFP && irt_isnum(t)) 1560 emit_hsi(as, MIPSI_LDC1, dest, base, ofs); 1561 else 1562 emit_tsi(as, irt_isint(t) ? MIPSI_LW : MIPSI_LD, dest, base, 1563 ofs ^ ((LJ_BE && irt_isint(t)) ? 4 : 0)); 1564 } 1565 #endif 1566 } 1567 1568 /* -- Allocations --------------------------------------------------------- */ 1569 1570 #if LJ_HASFFI 1571 static void asm_cnew(ASMState *as, IRIns *ir) 1572 { 1573 CTState *cts = ctype_ctsG(J2G(as->J)); 1574 CTypeID id = (CTypeID)IR(ir->op1)->i; 1575 CTSize sz; 1576 CTInfo info = lj_ctype_info(cts, id, &sz); 1577 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco]; 1578 IRRef args[4]; 1579 RegSet drop = RSET_SCRATCH; 1580 lj_assertA(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL), 1581 "bad CNEW/CNEWI operands"); 1582 1583 as->gcsteps++; 1584 if (ra_hasreg(ir->r)) 1585 rset_clear(drop, ir->r); /* Dest reg handled below. */ 1586 ra_evictset(as, drop); 1587 if (ra_used(ir)) 1588 ra_destreg(as, ir, RID_RET); /* GCcdata * */ 1589 1590 /* Initialize immutable cdata object. */ 1591 if (ir->o == IR_CNEWI) { 1592 RegSet allow = (RSET_GPR & ~RSET_SCRATCH); 1593 #if LJ_32 1594 int32_t ofs = sizeof(GCcdata); 1595 if (sz == 8) { 1596 ofs += 4; 1597 lj_assertA((ir+1)->o == IR_HIOP, "expected HIOP for CNEWI"); 1598 if (LJ_LE) ir++; 1599 } 1600 for (;;) { 1601 Reg r = ra_alloc1z(as, ir->op2, allow); 1602 emit_tsi(as, MIPSI_SW, r, RID_RET, ofs); 1603 rset_clear(allow, r); 1604 if (ofs == sizeof(GCcdata)) break; 1605 ofs -= 4; if (LJ_BE) ir++; else ir--; 1606 } 1607 #else 1608 emit_tsi(as, sz == 8 ? MIPSI_SD : MIPSI_SW, ra_alloc1(as, ir->op2, allow), 1609 RID_RET, sizeof(GCcdata)); 1610 #endif 1611 lj_assertA(sz == 4 || sz == 8, "bad CNEWI size %d", sz); 1612 } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */ 1613 ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv]; 1614 args[0] = ASMREF_L; /* lua_State *L */ 1615 args[1] = ir->op1; /* CTypeID id */ 1616 args[2] = ir->op2; /* CTSize sz */ 1617 args[3] = ASMREF_TMP1; /* CTSize align */ 1618 asm_gencall(as, ci, args); 1619 emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)ctype_align(info)); 1620 return; 1621 } 1622 1623 /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */ 1624 emit_tsi(as, MIPSI_SB, RID_RET+1, RID_RET, offsetof(GCcdata, gct)); 1625 emit_tsi(as, MIPSI_SH, RID_TMP, RID_RET, offsetof(GCcdata, ctypeid)); 1626 emit_ti(as, MIPSI_LI, RID_RET+1, ~LJ_TCDATA); 1627 emit_ti(as, MIPSI_LI, RID_TMP, id); /* Lower 16 bit used. Sign-ext ok. */ 1628 args[0] = ASMREF_L; /* lua_State *L */ 1629 args[1] = ASMREF_TMP1; /* MSize size */ 1630 asm_gencall(as, ci, args); 1631 ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)), 1632 ra_releasetmp(as, ASMREF_TMP1)); 1633 } 1634 #endif 1635 1636 /* -- Write barriers ------------------------------------------------------ */ 1637 1638 static void asm_tbar(ASMState *as, IRIns *ir) 1639 { 1640 Reg tab = ra_alloc1(as, ir->op1, RSET_GPR); 1641 Reg mark = ra_scratch(as, rset_exclude(RSET_GPR, tab)); 1642 Reg link = RID_TMP; 1643 MCLabel l_end = emit_label(as); 1644 emit_tsi(as, MIPSI_AS, link, tab, (int32_t)offsetof(GCtab, gclist)); 1645 emit_tsi(as, MIPSI_SB, mark, tab, (int32_t)offsetof(GCtab, marked)); 1646 emit_setgl(as, tab, gc.grayagain); 1647 emit_getgl(as, link, gc.grayagain); 1648 emit_dst(as, MIPSI_XOR, mark, mark, RID_TMP); /* Clear black bit. */ 1649 emit_branch(as, MIPSI_BEQ, RID_TMP, RID_ZERO, l_end); 1650 emit_tsi(as, MIPSI_ANDI, RID_TMP, mark, LJ_GC_BLACK); 1651 emit_tsi(as, MIPSI_LBU, mark, tab, (int32_t)offsetof(GCtab, marked)); 1652 } 1653 1654 static void asm_obar(ASMState *as, IRIns *ir) 1655 { 1656 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv]; 1657 IRRef args[2]; 1658 MCLabel l_end; 1659 Reg obj, val, tmp; 1660 /* No need for other object barriers (yet). */ 1661 lj_assertA(IR(ir->op1)->o == IR_UREFC, "bad OBAR type"); 1662 ra_evictset(as, RSET_SCRATCH); 1663 l_end = emit_label(as); 1664 args[0] = ASMREF_TMP1; /* global_State *g */ 1665 args[1] = ir->op1; /* TValue *tv */ 1666 asm_gencall(as, ci, args); 1667 emit_tsi(as, MIPSI_AADDIU, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768); 1668 obj = IR(ir->op1)->r; 1669 tmp = ra_scratch(as, rset_exclude(RSET_GPR, obj)); 1670 emit_branch(as, MIPSI_BEQ, RID_TMP, RID_ZERO, l_end); 1671 emit_tsi(as, MIPSI_ANDI, tmp, tmp, LJ_GC_BLACK); 1672 emit_branch(as, MIPSI_BEQ, RID_TMP, RID_ZERO, l_end); 1673 emit_tsi(as, MIPSI_ANDI, RID_TMP, RID_TMP, LJ_GC_WHITES); 1674 val = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, obj)); 1675 emit_tsi(as, MIPSI_LBU, tmp, obj, 1676 (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv)); 1677 emit_tsi(as, MIPSI_LBU, RID_TMP, val, (int32_t)offsetof(GChead, marked)); 1678 } 1679 1680 /* -- Arithmetic and logic operations ------------------------------------- */ 1681 1682 #if !LJ_SOFTFP 1683 static void asm_fparith(ASMState *as, IRIns *ir, MIPSIns mi) 1684 { 1685 Reg dest = ra_dest(as, ir, RSET_FPR); 1686 Reg right, left = ra_alloc2(as, ir, RSET_FPR); 1687 right = (left >> 8); left &= 255; 1688 emit_fgh(as, mi, dest, left, right); 1689 } 1690 1691 static void asm_fpunary(ASMState *as, IRIns *ir, MIPSIns mi) 1692 { 1693 Reg dest = ra_dest(as, ir, RSET_FPR); 1694 Reg left = ra_hintalloc(as, ir->op1, dest, RSET_FPR); 1695 emit_fg(as, mi, dest, left); 1696 } 1697 #endif 1698 1699 #if !LJ_SOFTFP32 1700 static void asm_fpmath(ASMState *as, IRIns *ir) 1701 { 1702 #if !LJ_SOFTFP 1703 if (ir->op2 <= IRFPM_TRUNC) 1704 asm_callround(as, ir, IRCALL_lj_vm_floor + ir->op2); 1705 else if (ir->op2 == IRFPM_SQRT) 1706 asm_fpunary(as, ir, MIPSI_SQRT_D); 1707 else 1708 #endif 1709 asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2); 1710 } 1711 #endif 1712 1713 #if !LJ_SOFTFP 1714 #define asm_fpadd(as, ir) asm_fparith(as, ir, MIPSI_ADD_D) 1715 #define asm_fpsub(as, ir) asm_fparith(as, ir, MIPSI_SUB_D) 1716 #define asm_fpmul(as, ir) asm_fparith(as, ir, MIPSI_MUL_D) 1717 #elif LJ_64 /* && LJ_SOFTFP */ 1718 #define asm_fpadd(as, ir) asm_callid(as, ir, IRCALL_softfp_add) 1719 #define asm_fpsub(as, ir) asm_callid(as, ir, IRCALL_softfp_sub) 1720 #define asm_fpmul(as, ir) asm_callid(as, ir, IRCALL_softfp_mul) 1721 #endif 1722 1723 static void asm_add(ASMState *as, IRIns *ir) 1724 { 1725 IRType1 t = ir->t; 1726 #if !LJ_SOFTFP32 1727 if (irt_isnum(t)) { 1728 asm_fpadd(as, ir); 1729 } else 1730 #endif 1731 { 1732 /* TODO MIPSR6: Fuse ADD(BSHL(a,1-4),b) or ADD(ADD(a,a),b) to MIPSI_ALSA. */ 1733 Reg dest = ra_dest(as, ir, RSET_GPR); 1734 Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); 1735 if (irref_isk(ir->op2)) { 1736 intptr_t k = get_kval(as, ir->op2); 1737 if (checki16(k)) { 1738 emit_tsi(as, (LJ_64 && irt_is64(t)) ? MIPSI_DADDIU : MIPSI_ADDIU, dest, 1739 left, k); 1740 return; 1741 } 1742 } 1743 right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); 1744 emit_dst(as, (LJ_64 && irt_is64(t)) ? MIPSI_DADDU : MIPSI_ADDU, dest, 1745 left, right); 1746 } 1747 } 1748 1749 static void asm_sub(ASMState *as, IRIns *ir) 1750 { 1751 #if !LJ_SOFTFP32 1752 if (irt_isnum(ir->t)) { 1753 asm_fpsub(as, ir); 1754 } else 1755 #endif 1756 { 1757 Reg dest = ra_dest(as, ir, RSET_GPR); 1758 Reg right, left = ra_alloc2(as, ir, RSET_GPR); 1759 right = (left >> 8); left &= 255; 1760 emit_dst(as, (LJ_64 && irt_is64(ir->t)) ? MIPSI_DSUBU : MIPSI_SUBU, dest, 1761 left, right); 1762 } 1763 } 1764 1765 static void asm_mul(ASMState *as, IRIns *ir) 1766 { 1767 #if !LJ_SOFTFP32 1768 if (irt_isnum(ir->t)) { 1769 asm_fpmul(as, ir); 1770 } else 1771 #endif 1772 { 1773 Reg dest = ra_dest(as, ir, RSET_GPR); 1774 Reg right, left = ra_alloc2(as, ir, RSET_GPR); 1775 right = (left >> 8); left &= 255; 1776 if (LJ_64 && irt_is64(ir->t)) { 1777 #if !LJ_TARGET_MIPSR6 1778 emit_dst(as, MIPSI_MFLO, dest, 0, 0); 1779 emit_dst(as, MIPSI_DMULT, 0, left, right); 1780 #else 1781 emit_dst(as, MIPSI_DMUL, dest, left, right); 1782 #endif 1783 } else { 1784 emit_dst(as, MIPSI_MUL, dest, left, right); 1785 } 1786 } 1787 } 1788 1789 #if !LJ_SOFTFP32 1790 static void asm_fpdiv(ASMState *as, IRIns *ir) 1791 { 1792 #if !LJ_SOFTFP 1793 asm_fparith(as, ir, MIPSI_DIV_D); 1794 #else 1795 asm_callid(as, ir, IRCALL_softfp_div); 1796 #endif 1797 } 1798 #endif 1799 1800 static void asm_neg(ASMState *as, IRIns *ir) 1801 { 1802 #if !LJ_SOFTFP 1803 if (irt_isnum(ir->t)) { 1804 asm_fpunary(as, ir, MIPSI_NEG_D); 1805 } else 1806 #elif LJ_64 /* && LJ_SOFTFP */ 1807 if (irt_isnum(ir->t)) { 1808 Reg dest = ra_dest(as, ir, RSET_GPR); 1809 Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); 1810 emit_dst(as, MIPSI_XOR, dest, left, 1811 ra_allock(as, 0x8000000000000000ll, rset_exclude(RSET_GPR, dest))); 1812 } else 1813 #endif 1814 { 1815 Reg dest = ra_dest(as, ir, RSET_GPR); 1816 Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); 1817 emit_dst(as, (LJ_64 && irt_is64(ir->t)) ? MIPSI_DSUBU : MIPSI_SUBU, dest, 1818 RID_ZERO, left); 1819 } 1820 } 1821 1822 #if !LJ_SOFTFP 1823 #define asm_abs(as, ir) asm_fpunary(as, ir, MIPSI_ABS_D) 1824 #elif LJ_64 /* && LJ_SOFTFP */ 1825 static void asm_abs(ASMState *as, IRIns *ir) 1826 { 1827 Reg dest = ra_dest(as, ir, RSET_GPR); 1828 Reg left = ra_alloc1(as, ir->op1, RSET_GPR); 1829 emit_tsml(as, MIPSI_DEXTM, dest, left, 30, 0); 1830 } 1831 #endif 1832 1833 static void asm_arithov(ASMState *as, IRIns *ir) 1834 { 1835 /* TODO MIPSR6: bovc/bnvc. Caveat: no delay slot to load RID_TMP. */ 1836 Reg right, left, tmp, dest = ra_dest(as, ir, RSET_GPR); 1837 lj_assertA(!irt_is64(ir->t), "bad usage"); 1838 if (irref_isk(ir->op2)) { 1839 int k = IR(ir->op2)->i; 1840 if (ir->o == IR_SUBOV) k = -k; 1841 if (checki16(k)) { /* (dest < left) == (k >= 0 ? 1 : 0) */ 1842 left = ra_alloc1(as, ir->op1, RSET_GPR); 1843 asm_guard(as, k >= 0 ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO); 1844 emit_dst(as, MIPSI_SLT, RID_TMP, dest, dest == left ? RID_TMP : left); 1845 emit_tsi(as, MIPSI_ADDIU, dest, left, k); 1846 if (dest == left) emit_move(as, RID_TMP, left); 1847 return; 1848 } 1849 } 1850 left = ra_alloc2(as, ir, RSET_GPR); 1851 right = (left >> 8); left &= 255; 1852 tmp = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_GPR, left), 1853 right), dest)); 1854 asm_guard(as, MIPSI_BLTZ, RID_TMP, 0); 1855 emit_dst(as, MIPSI_AND, RID_TMP, RID_TMP, tmp); 1856 if (ir->o == IR_ADDOV) { /* ((dest^left) & (dest^right)) < 0 */ 1857 emit_dst(as, MIPSI_XOR, RID_TMP, dest, dest == right ? RID_TMP : right); 1858 } else { /* ((dest^left) & (dest^~right)) < 0 */ 1859 emit_dst(as, MIPSI_XOR, RID_TMP, RID_TMP, dest); 1860 emit_dst(as, MIPSI_NOR, RID_TMP, dest == right ? RID_TMP : right, RID_ZERO); 1861 } 1862 emit_dst(as, MIPSI_XOR, tmp, dest, dest == left ? RID_TMP : left); 1863 emit_dst(as, ir->o == IR_ADDOV ? MIPSI_ADDU : MIPSI_SUBU, dest, left, right); 1864 if (dest == left || dest == right) 1865 emit_move(as, RID_TMP, dest == left ? left : right); 1866 } 1867 1868 #define asm_addov(as, ir) asm_arithov(as, ir) 1869 #define asm_subov(as, ir) asm_arithov(as, ir) 1870 1871 static void asm_mulov(ASMState *as, IRIns *ir) 1872 { 1873 Reg dest = ra_dest(as, ir, RSET_GPR); 1874 Reg tmp, right, left = ra_alloc2(as, ir, RSET_GPR); 1875 right = (left >> 8); left &= 255; 1876 tmp = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_GPR, left), 1877 right), dest)); 1878 asm_guard(as, MIPSI_BNE, RID_TMP, tmp); 1879 emit_dta(as, MIPSI_SRA, RID_TMP, dest, 31); 1880 #if !LJ_TARGET_MIPSR6 1881 emit_dst(as, MIPSI_MFHI, tmp, 0, 0); 1882 emit_dst(as, MIPSI_MFLO, dest, 0, 0); 1883 emit_dst(as, MIPSI_MULT, 0, left, right); 1884 #else 1885 emit_dst(as, MIPSI_MUL, dest, left, right); 1886 emit_dst(as, MIPSI_MUH, tmp, left, right); 1887 #endif 1888 } 1889 1890 #if LJ_32 && LJ_HASFFI 1891 static void asm_add64(ASMState *as, IRIns *ir) 1892 { 1893 Reg dest = ra_dest(as, ir, RSET_GPR); 1894 Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR); 1895 if (irref_isk(ir->op2)) { 1896 int32_t k = IR(ir->op2)->i; 1897 if (k == 0) { 1898 emit_dst(as, MIPSI_ADDU, dest, left, RID_TMP); 1899 goto loarith; 1900 } else if (checki16(k)) { 1901 emit_dst(as, MIPSI_ADDU, dest, dest, RID_TMP); 1902 emit_tsi(as, MIPSI_ADDIU, dest, left, k); 1903 goto loarith; 1904 } 1905 } 1906 emit_dst(as, MIPSI_ADDU, dest, dest, RID_TMP); 1907 right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); 1908 emit_dst(as, MIPSI_ADDU, dest, left, right); 1909 loarith: 1910 ir--; 1911 dest = ra_dest(as, ir, RSET_GPR); 1912 left = ra_alloc1(as, ir->op1, RSET_GPR); 1913 if (irref_isk(ir->op2)) { 1914 int32_t k = IR(ir->op2)->i; 1915 if (k == 0) { 1916 if (dest != left) 1917 emit_move(as, dest, left); 1918 return; 1919 } else if (checki16(k)) { 1920 if (dest == left) { 1921 Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, left)); 1922 emit_move(as, dest, tmp); 1923 dest = tmp; 1924 } 1925 emit_dst(as, MIPSI_SLTU, RID_TMP, dest, left); 1926 emit_tsi(as, MIPSI_ADDIU, dest, left, k); 1927 return; 1928 } 1929 } 1930 right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); 1931 if (dest == left && dest == right) { 1932 Reg tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), right)); 1933 emit_move(as, dest, tmp); 1934 dest = tmp; 1935 } 1936 emit_dst(as, MIPSI_SLTU, RID_TMP, dest, dest == left ? right : left); 1937 emit_dst(as, MIPSI_ADDU, dest, left, right); 1938 } 1939 1940 static void asm_sub64(ASMState *as, IRIns *ir) 1941 { 1942 Reg dest = ra_dest(as, ir, RSET_GPR); 1943 Reg right, left = ra_alloc2(as, ir, RSET_GPR); 1944 right = (left >> 8); left &= 255; 1945 emit_dst(as, MIPSI_SUBU, dest, dest, RID_TMP); 1946 emit_dst(as, MIPSI_SUBU, dest, left, right); 1947 ir--; 1948 dest = ra_dest(as, ir, RSET_GPR); 1949 left = ra_alloc2(as, ir, RSET_GPR); 1950 right = (left >> 8); left &= 255; 1951 if (dest == left) { 1952 Reg tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), right)); 1953 emit_move(as, dest, tmp); 1954 dest = tmp; 1955 } 1956 emit_dst(as, MIPSI_SLTU, RID_TMP, left, dest); 1957 emit_dst(as, MIPSI_SUBU, dest, left, right); 1958 } 1959 1960 static void asm_neg64(ASMState *as, IRIns *ir) 1961 { 1962 Reg dest = ra_dest(as, ir, RSET_GPR); 1963 Reg left = ra_alloc1(as, ir->op1, RSET_GPR); 1964 emit_dst(as, MIPSI_SUBU, dest, dest, RID_TMP); 1965 emit_dst(as, MIPSI_SUBU, dest, RID_ZERO, left); 1966 ir--; 1967 dest = ra_dest(as, ir, RSET_GPR); 1968 left = ra_alloc1(as, ir->op1, RSET_GPR); 1969 emit_dst(as, MIPSI_SLTU, RID_TMP, RID_ZERO, dest); 1970 emit_dst(as, MIPSI_SUBU, dest, RID_ZERO, left); 1971 } 1972 #endif 1973 1974 static void asm_bnot(ASMState *as, IRIns *ir) 1975 { 1976 Reg left, right, dest = ra_dest(as, ir, RSET_GPR); 1977 IRIns *irl = IR(ir->op1); 1978 if (mayfuse(as, ir->op1) && irl->o == IR_BOR) { 1979 left = ra_alloc2(as, irl, RSET_GPR); 1980 right = (left >> 8); left &= 255; 1981 } else { 1982 left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); 1983 right = RID_ZERO; 1984 } 1985 emit_dst(as, MIPSI_NOR, dest, left, right); 1986 } 1987 1988 static void asm_bswap(ASMState *as, IRIns *ir) 1989 { 1990 Reg dest = ra_dest(as, ir, RSET_GPR); 1991 Reg left = ra_alloc1(as, ir->op1, RSET_GPR); 1992 #if LJ_32 1993 if ((as->flags & JIT_F_MIPSXXR2)) { 1994 emit_dta(as, MIPSI_ROTR, dest, RID_TMP, 16); 1995 emit_dst(as, MIPSI_WSBH, RID_TMP, 0, left); 1996 } else { 1997 Reg tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), dest)); 1998 emit_dst(as, MIPSI_OR, dest, dest, tmp); 1999 emit_dst(as, MIPSI_OR, dest, dest, RID_TMP); 2000 emit_tsi(as, MIPSI_ANDI, dest, dest, 0xff00); 2001 emit_dta(as, MIPSI_SLL, RID_TMP, RID_TMP, 8); 2002 emit_dta(as, MIPSI_SRL, dest, left, 8); 2003 emit_tsi(as, MIPSI_ANDI, RID_TMP, left, 0xff00); 2004 emit_dst(as, MIPSI_OR, tmp, tmp, RID_TMP); 2005 emit_dta(as, MIPSI_SRL, tmp, left, 24); 2006 emit_dta(as, MIPSI_SLL, RID_TMP, left, 24); 2007 } 2008 #else 2009 if (irt_is64(ir->t)) { 2010 emit_dst(as, MIPSI_DSHD, dest, 0, RID_TMP); 2011 emit_dst(as, MIPSI_DSBH, RID_TMP, 0, left); 2012 } else { 2013 emit_dta(as, MIPSI_ROTR, dest, RID_TMP, 16); 2014 emit_dst(as, MIPSI_WSBH, RID_TMP, 0, left); 2015 } 2016 #endif 2017 } 2018 2019 static void asm_bitop(ASMState *as, IRIns *ir, MIPSIns mi, MIPSIns mik) 2020 { 2021 Reg dest = ra_dest(as, ir, RSET_GPR); 2022 Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); 2023 if (irref_isk(ir->op2)) { 2024 intptr_t k = get_kval(as, ir->op2); 2025 if (checku16(k)) { 2026 emit_tsi(as, mik, dest, left, k); 2027 return; 2028 } 2029 } 2030 right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); 2031 emit_dst(as, mi, dest, left, right); 2032 } 2033 2034 #define asm_band(as, ir) asm_bitop(as, ir, MIPSI_AND, MIPSI_ANDI) 2035 #define asm_bor(as, ir) asm_bitop(as, ir, MIPSI_OR, MIPSI_ORI) 2036 #define asm_bxor(as, ir) asm_bitop(as, ir, MIPSI_XOR, MIPSI_XORI) 2037 2038 static void asm_bitshift(ASMState *as, IRIns *ir, MIPSIns mi, MIPSIns mik) 2039 { 2040 Reg dest = ra_dest(as, ir, RSET_GPR); 2041 if (irref_isk(ir->op2)) { /* Constant shifts. */ 2042 uint32_t shift = (uint32_t)IR(ir->op2)->i; 2043 if (LJ_64 && irt_is64(ir->t)) mik |= (shift & 32) ? MIPSI_D32 : MIPSI_D; 2044 emit_dta(as, mik, dest, ra_hintalloc(as, ir->op1, dest, RSET_GPR), 2045 (shift & 31)); 2046 } else { 2047 Reg right, left = ra_alloc2(as, ir, RSET_GPR); 2048 right = (left >> 8); left &= 255; 2049 if (LJ_64 && irt_is64(ir->t)) mi |= MIPSI_DV; 2050 emit_dst(as, mi, dest, right, left); /* Shift amount is in rs. */ 2051 } 2052 } 2053 2054 #define asm_bshl(as, ir) asm_bitshift(as, ir, MIPSI_SLLV, MIPSI_SLL) 2055 #define asm_bshr(as, ir) asm_bitshift(as, ir, MIPSI_SRLV, MIPSI_SRL) 2056 #define asm_bsar(as, ir) asm_bitshift(as, ir, MIPSI_SRAV, MIPSI_SRA) 2057 #define asm_brol(as, ir) lj_assertA(0, "unexpected BROL") 2058 2059 static void asm_bror(ASMState *as, IRIns *ir) 2060 { 2061 if (LJ_64 || (as->flags & JIT_F_MIPSXXR2)) { 2062 asm_bitshift(as, ir, MIPSI_ROTRV, MIPSI_ROTR); 2063 } else { 2064 Reg dest = ra_dest(as, ir, RSET_GPR); 2065 if (irref_isk(ir->op2)) { /* Constant shifts. */ 2066 uint32_t shift = (uint32_t)(IR(ir->op2)->i & 31); 2067 Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); 2068 emit_rotr(as, dest, left, RID_TMP, shift); 2069 } else { 2070 Reg right, left = ra_alloc2(as, ir, RSET_GPR); 2071 right = (left >> 8); left &= 255; 2072 emit_dst(as, MIPSI_OR, dest, dest, RID_TMP); 2073 emit_dst(as, MIPSI_SRLV, dest, right, left); 2074 emit_dst(as, MIPSI_SLLV, RID_TMP, RID_TMP, left); 2075 emit_dst(as, MIPSI_SUBU, RID_TMP, ra_allock(as, 32, RSET_GPR), right); 2076 } 2077 } 2078 } 2079 2080 #if LJ_SOFTFP 2081 static void asm_sfpmin_max(ASMState *as, IRIns *ir) 2082 { 2083 CCallInfo ci = lj_ir_callinfo[(IROp)ir->o == IR_MIN ? IRCALL_lj_vm_sfmin : IRCALL_lj_vm_sfmax]; 2084 #if LJ_64 2085 IRRef args[2]; 2086 args[0] = ir->op1; 2087 args[1] = ir->op2; 2088 #else 2089 IRRef args[4]; 2090 args[0^LJ_BE] = ir->op1; 2091 args[1^LJ_BE] = (ir+1)->op1; 2092 args[2^LJ_BE] = ir->op2; 2093 args[3^LJ_BE] = (ir+1)->op2; 2094 #endif 2095 asm_setupresult(as, ir, &ci); 2096 emit_call(as, (void *)ci.func, 0); 2097 ci.func = NULL; 2098 asm_gencall(as, &ci, args); 2099 } 2100 #endif 2101 2102 static void asm_min_max(ASMState *as, IRIns *ir, int ismax) 2103 { 2104 if (!LJ_SOFTFP32 && irt_isnum(ir->t)) { 2105 #if LJ_SOFTFP 2106 asm_sfpmin_max(as, ir); 2107 #else 2108 Reg dest = ra_dest(as, ir, RSET_FPR); 2109 Reg right, left = ra_alloc2(as, ir, RSET_FPR); 2110 right = (left >> 8); left &= 255; 2111 #if !LJ_TARGET_MIPSR6 2112 if (dest == left) { 2113 emit_fg(as, MIPSI_MOVF_D, dest, right); 2114 } else { 2115 emit_fg(as, MIPSI_MOVT_D, dest, left); 2116 if (dest != right) emit_fg(as, MIPSI_MOV_D, dest, right); 2117 } 2118 emit_fgh(as, MIPSI_C_OLT_D, 0, ismax ? right : left, ismax ? left : right); 2119 #else 2120 emit_fgh(as, ismax ? MIPSI_MAX_D : MIPSI_MIN_D, dest, left, right); 2121 #endif 2122 #endif 2123 } else { 2124 Reg dest = ra_dest(as, ir, RSET_GPR); 2125 Reg right, left = ra_alloc2(as, ir, RSET_GPR); 2126 right = (left >> 8); left &= 255; 2127 if (left == right) { 2128 if (dest != left) emit_move(as, dest, left); 2129 } else { 2130 #if !LJ_TARGET_MIPSR6 2131 if (dest == left) { 2132 emit_dst(as, MIPSI_MOVN, dest, right, RID_TMP); 2133 } else { 2134 emit_dst(as, MIPSI_MOVZ, dest, left, RID_TMP); 2135 if (dest != right) emit_move(as, dest, right); 2136 } 2137 #else 2138 emit_dst(as, MIPSI_OR, dest, dest, RID_TMP); 2139 if (dest != right) { 2140 emit_dst(as, MIPSI_SELNEZ, RID_TMP, right, RID_TMP); 2141 emit_dst(as, MIPSI_SELEQZ, dest, left, RID_TMP); 2142 } else { 2143 emit_dst(as, MIPSI_SELEQZ, RID_TMP, left, RID_TMP); 2144 emit_dst(as, MIPSI_SELNEZ, dest, right, RID_TMP); 2145 } 2146 #endif 2147 emit_dst(as, MIPSI_SLT, RID_TMP, 2148 ismax ? left : right, ismax ? right : left); 2149 } 2150 } 2151 } 2152 2153 #define asm_min(as, ir) asm_min_max(as, ir, 0) 2154 #define asm_max(as, ir) asm_min_max(as, ir, 1) 2155 2156 /* -- Comparisons --------------------------------------------------------- */ 2157 2158 #if LJ_SOFTFP 2159 /* SFP comparisons. */ 2160 static void asm_sfpcomp(ASMState *as, IRIns *ir) 2161 { 2162 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_softfp_cmp]; 2163 RegSet drop = RSET_SCRATCH; 2164 Reg r; 2165 #if LJ_64 2166 IRRef args[2]; 2167 args[0] = ir->op1; 2168 args[1] = ir->op2; 2169 #else 2170 IRRef args[4]; 2171 args[LJ_LE ? 0 : 1] = ir->op1; args[LJ_LE ? 1 : 0] = (ir+1)->op1; 2172 args[LJ_LE ? 2 : 3] = ir->op2; args[LJ_LE ? 3 : 2] = (ir+1)->op2; 2173 #endif 2174 2175 for (r = REGARG_FIRSTGPR; r <= REGARG_FIRSTGPR+(LJ_64?1:3); r++) { 2176 if (!rset_test(as->freeset, r) && 2177 regcost_ref(as->cost[r]) == args[r-REGARG_FIRSTGPR]) 2178 rset_clear(drop, r); 2179 } 2180 ra_evictset(as, drop); 2181 2182 asm_setupresult(as, ir, ci); 2183 2184 switch ((IROp)ir->o) { 2185 case IR_LT: 2186 asm_guard(as, MIPSI_BGEZ, RID_RET, 0); 2187 break; 2188 case IR_ULT: 2189 asm_guard(as, MIPSI_BEQ, RID_RET, RID_TMP); 2190 emit_loadi(as, RID_TMP, 1); 2191 asm_guard(as, MIPSI_BEQ, RID_RET, RID_ZERO); 2192 break; 2193 case IR_GE: 2194 asm_guard(as, MIPSI_BEQ, RID_RET, RID_TMP); 2195 emit_loadi(as, RID_TMP, 2); 2196 asm_guard(as, MIPSI_BLTZ, RID_RET, 0); 2197 break; 2198 case IR_LE: 2199 asm_guard(as, MIPSI_BGTZ, RID_RET, 0); 2200 break; 2201 case IR_GT: 2202 asm_guard(as, MIPSI_BEQ, RID_RET, RID_TMP); 2203 emit_loadi(as, RID_TMP, 2); 2204 asm_guard(as, MIPSI_BLEZ, RID_RET, 0); 2205 break; 2206 case IR_UGE: 2207 asm_guard(as, MIPSI_BLTZ, RID_RET, 0); 2208 break; 2209 case IR_ULE: 2210 asm_guard(as, MIPSI_BEQ, RID_RET, RID_TMP); 2211 emit_loadi(as, RID_TMP, 1); 2212 break; 2213 case IR_UGT: case IR_ABC: 2214 asm_guard(as, MIPSI_BLEZ, RID_RET, 0); 2215 break; 2216 case IR_EQ: case IR_NE: 2217 asm_guard(as, (ir->o & 1) ? MIPSI_BEQ : MIPSI_BNE, RID_RET, RID_ZERO); 2218 default: 2219 break; 2220 } 2221 asm_gencall(as, ci, args); 2222 } 2223 #endif 2224 2225 static void asm_comp(ASMState *as, IRIns *ir) 2226 { 2227 /* ORDER IR: LT GE LE GT ULT UGE ULE UGT. */ 2228 IROp op = ir->o; 2229 if (!LJ_SOFTFP32 && irt_isnum(ir->t)) { 2230 #if LJ_SOFTFP 2231 asm_sfpcomp(as, ir); 2232 #else 2233 #if !LJ_TARGET_MIPSR6 2234 Reg right, left = ra_alloc2(as, ir, RSET_FPR); 2235 right = (left >> 8); left &= 255; 2236 asm_guard(as, (op&1) ? MIPSI_BC1T : MIPSI_BC1F, 0, 0); 2237 emit_fgh(as, MIPSI_C_OLT_D + ((op&3) ^ ((op>>2)&1)), 0, left, right); 2238 #else 2239 Reg tmp, right, left = ra_alloc2(as, ir, RSET_FPR); 2240 right = (left >> 8); left &= 255; 2241 tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_FPR, left), right)); 2242 asm_guard(as, (op&1) ? MIPSI_BC1NEZ : MIPSI_BC1EQZ, 0, (tmp&31)); 2243 emit_fgh(as, MIPSI_CMP_LT_D + ((op&3) ^ ((op>>2)&1)), tmp, left, right); 2244 #endif 2245 #endif 2246 } else { 2247 Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR); 2248 if (op == IR_ABC) op = IR_UGT; 2249 if ((op&4) == 0 && irref_isk(ir->op2) && get_kval(as, ir->op2) == 0) { 2250 MIPSIns mi = (op&2) ? ((op&1) ? MIPSI_BLEZ : MIPSI_BGTZ) : 2251 ((op&1) ? MIPSI_BLTZ : MIPSI_BGEZ); 2252 asm_guard(as, mi, left, 0); 2253 } else { 2254 if (irref_isk(ir->op2)) { 2255 intptr_t k = get_kval(as, ir->op2); 2256 if ((op&2)) k++; 2257 if (checki16(k)) { 2258 asm_guard(as, (op&1) ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO); 2259 emit_tsi(as, (op&4) ? MIPSI_SLTIU : MIPSI_SLTI, 2260 RID_TMP, left, k); 2261 return; 2262 } 2263 } 2264 right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); 2265 asm_guard(as, ((op^(op>>1))&1) ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO); 2266 emit_dst(as, (op&4) ? MIPSI_SLTU : MIPSI_SLT, 2267 RID_TMP, (op&2) ? right : left, (op&2) ? left : right); 2268 } 2269 } 2270 } 2271 2272 static void asm_equal(ASMState *as, IRIns *ir) 2273 { 2274 Reg right, left = ra_alloc2(as, ir, (!LJ_SOFTFP && irt_isnum(ir->t)) ? 2275 RSET_FPR : RSET_GPR); 2276 right = (left >> 8); left &= 255; 2277 if (!LJ_SOFTFP32 && irt_isnum(ir->t)) { 2278 #if LJ_SOFTFP 2279 asm_sfpcomp(as, ir); 2280 #elif !LJ_TARGET_MIPSR6 2281 asm_guard(as, (ir->o & 1) ? MIPSI_BC1T : MIPSI_BC1F, 0, 0); 2282 emit_fgh(as, MIPSI_C_EQ_D, 0, left, right); 2283 #else 2284 Reg tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_FPR, left), right)); 2285 asm_guard(as, (ir->o & 1) ? MIPSI_BC1NEZ : MIPSI_BC1EQZ, 0, (tmp&31)); 2286 emit_fgh(as, MIPSI_CMP_EQ_D, tmp, left, right); 2287 #endif 2288 } else { 2289 asm_guard(as, (ir->o & 1) ? MIPSI_BEQ : MIPSI_BNE, left, right); 2290 } 2291 } 2292 2293 #if LJ_32 && LJ_HASFFI 2294 /* 64 bit integer comparisons. */ 2295 static void asm_comp64(ASMState *as, IRIns *ir) 2296 { 2297 /* ORDER IR: LT GE LE GT ULT UGE ULE UGT. */ 2298 IROp op = (ir-1)->o; 2299 MCLabel l_end; 2300 Reg rightlo, leftlo, righthi, lefthi = ra_alloc2(as, ir, RSET_GPR); 2301 righthi = (lefthi >> 8); lefthi &= 255; 2302 leftlo = ra_alloc2(as, ir-1, 2303 rset_exclude(rset_exclude(RSET_GPR, lefthi), righthi)); 2304 rightlo = (leftlo >> 8); leftlo &= 255; 2305 asm_guard(as, ((op^(op>>1))&1) ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO); 2306 l_end = emit_label(as); 2307 if (lefthi != righthi) 2308 emit_dst(as, (op&4) ? MIPSI_SLTU : MIPSI_SLT, RID_TMP, 2309 (op&2) ? righthi : lefthi, (op&2) ? lefthi : righthi); 2310 emit_dst(as, MIPSI_SLTU, RID_TMP, 2311 (op&2) ? rightlo : leftlo, (op&2) ? leftlo : rightlo); 2312 if (lefthi != righthi) 2313 emit_branch(as, MIPSI_BEQ, lefthi, righthi, l_end); 2314 } 2315 2316 static void asm_comp64eq(ASMState *as, IRIns *ir) 2317 { 2318 Reg tmp, right, left = ra_alloc2(as, ir, RSET_GPR); 2319 right = (left >> 8); left &= 255; 2320 asm_guard(as, ((ir-1)->o & 1) ? MIPSI_BEQ : MIPSI_BNE, RID_TMP, RID_ZERO); 2321 tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), right)); 2322 emit_dst(as, MIPSI_OR, RID_TMP, RID_TMP, tmp); 2323 emit_dst(as, MIPSI_XOR, tmp, left, right); 2324 left = ra_alloc2(as, ir-1, RSET_GPR); 2325 right = (left >> 8); left &= 255; 2326 emit_dst(as, MIPSI_XOR, RID_TMP, left, right); 2327 } 2328 #endif 2329 2330 /* -- Support for 64 bit ops in 32 bit mode ------------------------------- */ 2331 2332 /* Hiword op of a split 64 bit op. Previous op must be the loword op. */ 2333 static void asm_hiop(ASMState *as, IRIns *ir) 2334 { 2335 #if LJ_32 && (LJ_HASFFI || LJ_SOFTFP) 2336 /* HIOP is marked as a store because it needs its own DCE logic. */ 2337 int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */ 2338 if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1; 2339 if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */ 2340 as->curins--; /* Always skip the CONV. */ 2341 #if LJ_HASFFI && !LJ_SOFTFP 2342 if (usehi || uselo) 2343 asm_conv64(as, ir); 2344 return; 2345 #endif 2346 } else if ((ir-1)->o < IR_EQ) { /* 64 bit integer comparisons. ORDER IR. */ 2347 as->curins--; /* Always skip the loword comparison. */ 2348 #if LJ_SOFTFP 2349 if (!irt_isint(ir->t)) { 2350 asm_sfpcomp(as, ir-1); 2351 return; 2352 } 2353 #endif 2354 #if LJ_HASFFI 2355 asm_comp64(as, ir); 2356 #endif 2357 return; 2358 } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */ 2359 as->curins--; /* Always skip the loword comparison. */ 2360 #if LJ_SOFTFP 2361 if (!irt_isint(ir->t)) { 2362 asm_sfpcomp(as, ir-1); 2363 return; 2364 } 2365 #endif 2366 #if LJ_HASFFI 2367 asm_comp64eq(as, ir); 2368 #endif 2369 return; 2370 #if LJ_SOFTFP 2371 } else if ((ir-1)->o == IR_MIN || (ir-1)->o == IR_MAX) { 2372 as->curins--; /* Always skip the loword min/max. */ 2373 if (uselo || usehi) 2374 asm_sfpmin_max(as, ir-1); 2375 return; 2376 #endif 2377 } else if ((ir-1)->o == IR_XSTORE) { 2378 as->curins--; /* Handle both stores here. */ 2379 if ((ir-1)->r != RID_SINK) { 2380 asm_xstore_(as, ir, LJ_LE ? 4 : 0); 2381 asm_xstore_(as, ir-1, LJ_LE ? 0 : 4); 2382 } 2383 return; 2384 } 2385 if (!usehi) return; /* Skip unused hiword op for all remaining ops. */ 2386 switch ((ir-1)->o) { 2387 #if LJ_HASFFI 2388 case IR_ADD: as->curins--; asm_add64(as, ir); break; 2389 case IR_SUB: as->curins--; asm_sub64(as, ir); break; 2390 case IR_NEG: as->curins--; asm_neg64(as, ir); break; 2391 #endif 2392 #if LJ_SOFTFP 2393 case IR_SLOAD: case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD: 2394 case IR_STRTO: 2395 if (!uselo) 2396 ra_allocref(as, ir->op1, RSET_GPR); /* Mark lo op as used. */ 2397 break; 2398 #endif 2399 case IR_CALLN: 2400 case IR_CALLS: 2401 case IR_CALLXS: 2402 if (!uselo) 2403 ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */ 2404 break; 2405 #if LJ_SOFTFP 2406 case IR_ASTORE: case IR_HSTORE: case IR_USTORE: case IR_TOSTR: 2407 #endif 2408 case IR_CNEWI: 2409 /* Nothing to do here. Handled by lo op itself. */ 2410 break; 2411 default: lj_assertA(0, "bad HIOP for op %d", (ir-1)->o); break; 2412 } 2413 #else 2414 /* Unused on MIPS64 or without SOFTFP or FFI. */ 2415 UNUSED(as); UNUSED(ir); lj_assertA(0, "unexpected HIOP"); 2416 #endif 2417 } 2418 2419 /* -- Profiling ----------------------------------------------------------- */ 2420 2421 static void asm_prof(ASMState *as, IRIns *ir) 2422 { 2423 UNUSED(ir); 2424 asm_guard(as, MIPSI_BNE, RID_TMP, RID_ZERO); 2425 emit_tsi(as, MIPSI_ANDI, RID_TMP, RID_TMP, HOOK_PROFILE); 2426 emit_lsglptr(as, MIPSI_LBU, RID_TMP, 2427 (int32_t)offsetof(global_State, hookmask)); 2428 } 2429 2430 /* -- Stack handling ------------------------------------------------------ */ 2431 2432 /* Check Lua stack size for overflow. Use exit handler as fallback. */ 2433 static void asm_stack_check(ASMState *as, BCReg topslot, 2434 IRIns *irp, RegSet allow, ExitNo exitno) 2435 { 2436 /* Try to get an unused temp. register, otherwise spill/restore RID_RET*. */ 2437 Reg tmp, pbase = irp ? (ra_hasreg(irp->r) ? irp->r : RID_TMP) : RID_BASE; 2438 ExitNo oldsnap = as->snapno; 2439 rset_clear(allow, pbase); 2440 #if LJ_32 2441 tmp = allow ? rset_pickbot(allow) : 2442 (pbase == RID_RETHI ? RID_RETLO : RID_RETHI); 2443 #else 2444 tmp = allow ? rset_pickbot(allow) : RID_RET; 2445 #endif 2446 as->snapno = exitno; 2447 asm_guard(as, MIPSI_BNE, RID_TMP, RID_ZERO); 2448 as->snapno = oldsnap; 2449 if (allow == RSET_EMPTY) /* Restore temp. register. */ 2450 emit_tsi(as, MIPSI_AL, tmp, RID_SP, 0); 2451 else 2452 ra_modified(as, tmp); 2453 emit_tsi(as, MIPSI_SLTIU, RID_TMP, RID_TMP, (int32_t)(8*topslot)); 2454 emit_dst(as, MIPSI_ASUBU, RID_TMP, tmp, pbase); 2455 emit_tsi(as, MIPSI_AL, tmp, tmp, offsetof(lua_State, maxstack)); 2456 if (pbase == RID_TMP) 2457 emit_getgl(as, RID_TMP, jit_base); 2458 emit_getgl(as, tmp, cur_L); 2459 if (allow == RSET_EMPTY) /* Spill temp. register. */ 2460 emit_tsi(as, MIPSI_AS, tmp, RID_SP, 0); 2461 } 2462 2463 /* Restore Lua stack from on-trace state. */ 2464 static void asm_stack_restore(ASMState *as, SnapShot *snap) 2465 { 2466 SnapEntry *map = &as->T->snapmap[snap->mapofs]; 2467 #if LJ_32 || defined(LUA_USE_ASSERT) 2468 SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1-LJ_FR2]; 2469 #endif 2470 MSize n, nent = snap->nent; 2471 /* Store the value of all modified slots to the Lua stack. */ 2472 for (n = 0; n < nent; n++) { 2473 SnapEntry sn = map[n]; 2474 BCReg s = snap_slot(sn); 2475 int32_t ofs = 8*((int32_t)s-1-LJ_FR2); 2476 IRRef ref = snap_ref(sn); 2477 IRIns *ir = IR(ref); 2478 if ((sn & SNAP_NORESTORE)) 2479 continue; 2480 if (irt_isnum(ir->t)) { 2481 #if LJ_SOFTFP32 2482 Reg tmp; 2483 RegSet allow = rset_exclude(RSET_GPR, RID_BASE); 2484 /* LJ_SOFTFP: must be a number constant. */ 2485 lj_assertA(irref_isk(ref), "unsplit FP op"); 2486 tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.lo, allow); 2487 emit_tsi(as, MIPSI_SW, tmp, RID_BASE, ofs+(LJ_BE?4:0)); 2488 if (rset_test(as->freeset, tmp+1)) allow = RID2RSET(tmp+1); 2489 tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.hi, allow); 2490 emit_tsi(as, MIPSI_SW, tmp, RID_BASE, ofs+(LJ_BE?0:4)); 2491 #elif LJ_SOFTFP /* && LJ_64 */ 2492 Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, RID_BASE)); 2493 emit_tsi(as, MIPSI_SD, src, RID_BASE, ofs); 2494 #else 2495 Reg src = ra_alloc1(as, ref, RSET_FPR); 2496 emit_hsi(as, MIPSI_SDC1, src, RID_BASE, ofs); 2497 #endif 2498 } else { 2499 #if LJ_32 2500 RegSet allow = rset_exclude(RSET_GPR, RID_BASE); 2501 Reg type; 2502 lj_assertA(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t), 2503 "restore of IR type %d", irt_type(ir->t)); 2504 if (!irt_ispri(ir->t)) { 2505 Reg src = ra_alloc1(as, ref, allow); 2506 rset_clear(allow, src); 2507 emit_tsi(as, MIPSI_SW, src, RID_BASE, ofs+(LJ_BE?4:0)); 2508 } 2509 if ((sn & (SNAP_CONT|SNAP_FRAME))) { 2510 if (s == 0) continue; /* Do not overwrite link to previous frame. */ 2511 type = ra_allock(as, (int32_t)(*flinks--), allow); 2512 #if LJ_SOFTFP 2513 } else if ((sn & SNAP_SOFTFPNUM)) { 2514 type = ra_alloc1(as, ref+1, rset_exclude(RSET_GPR, RID_BASE)); 2515 #endif 2516 } else { 2517 type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow); 2518 } 2519 emit_tsi(as, MIPSI_SW, type, RID_BASE, ofs+(LJ_BE?0:4)); 2520 #else 2521 asm_tvstore64(as, RID_BASE, ofs, ref); 2522 #endif 2523 } 2524 checkmclim(as); 2525 } 2526 lj_assertA(map + nent == flinks, "inconsistent frames in snapshot"); 2527 } 2528 2529 /* -- GC handling --------------------------------------------------------- */ 2530 2531 /* Marker to prevent patching the GC check exit. */ 2532 #define MIPS_NOPATCH_GC_CHECK MIPSI_OR 2533 2534 /* Check GC threshold and do one or more GC steps. */ 2535 static void asm_gc_check(ASMState *as) 2536 { 2537 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit]; 2538 IRRef args[2]; 2539 MCLabel l_end; 2540 Reg tmp; 2541 ra_evictset(as, RSET_SCRATCH); 2542 l_end = emit_label(as); 2543 /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */ 2544 /* Assumes asm_snap_prep() already done. */ 2545 asm_guard(as, MIPSI_BNE, RID_RET, RID_ZERO); 2546 args[0] = ASMREF_TMP1; /* global_State *g */ 2547 args[1] = ASMREF_TMP2; /* MSize steps */ 2548 asm_gencall(as, ci, args); 2549 l_end[-3] = MIPS_NOPATCH_GC_CHECK; /* Replace the nop after the call. */ 2550 emit_tsi(as, MIPSI_AADDIU, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768); 2551 tmp = ra_releasetmp(as, ASMREF_TMP2); 2552 emit_loadi(as, tmp, as->gcsteps); 2553 /* Jump around GC step if GC total < GC threshold. */ 2554 emit_branch(as, MIPSI_BNE, RID_TMP, RID_ZERO, l_end); 2555 emit_dst(as, MIPSI_SLTU, RID_TMP, RID_TMP, tmp); 2556 emit_getgl(as, tmp, gc.threshold); 2557 emit_getgl(as, RID_TMP, gc.total); 2558 as->gcsteps = 0; 2559 checkmclim(as); 2560 } 2561 2562 /* -- Loop handling ------------------------------------------------------- */ 2563 2564 /* Fixup the loop branch. */ 2565 static void asm_loop_fixup(ASMState *as) 2566 { 2567 MCode *p = as->mctop; 2568 MCode *target = as->mcp; 2569 p[-1] = MIPSI_NOP; 2570 if (as->loopinv) { /* Inverted loop branch? */ 2571 /* asm_guard already inverted the cond branch. Only patch the target. */ 2572 p[-3] |= ((target-p+2) & 0x0000ffffu); 2573 } else { 2574 p[-2] = MIPSI_J|(((uintptr_t)target>>2)&0x03ffffffu); 2575 } 2576 } 2577 2578 /* -- Head of trace ------------------------------------------------------- */ 2579 2580 /* Coalesce BASE register for a root trace. */ 2581 static void asm_head_root_base(ASMState *as) 2582 { 2583 IRIns *ir = IR(REF_BASE); 2584 Reg r = ir->r; 2585 if (as->loopinv) as->mctop--; 2586 if (ra_hasreg(r)) { 2587 ra_free(as, r); 2588 if (rset_test(as->modset, r) || irt_ismarked(ir->t)) 2589 ir->r = RID_INIT; /* No inheritance for modified BASE register. */ 2590 if (r != RID_BASE) 2591 emit_move(as, r, RID_BASE); 2592 } 2593 } 2594 2595 /* Coalesce BASE register for a side trace. */ 2596 static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow) 2597 { 2598 IRIns *ir = IR(REF_BASE); 2599 Reg r = ir->r; 2600 if (as->loopinv) as->mctop--; 2601 if (ra_hasreg(r)) { 2602 ra_free(as, r); 2603 if (rset_test(as->modset, r) || irt_ismarked(ir->t)) 2604 ir->r = RID_INIT; /* No inheritance for modified BASE register. */ 2605 if (irp->r == r) { 2606 rset_clear(allow, r); /* Mark same BASE register as coalesced. */ 2607 } else if (ra_hasreg(irp->r) && rset_test(as->freeset, irp->r)) { 2608 rset_clear(allow, irp->r); 2609 emit_move(as, r, irp->r); /* Move from coalesced parent reg. */ 2610 } else { 2611 emit_getgl(as, r, jit_base); /* Otherwise reload BASE. */ 2612 } 2613 } 2614 return allow; 2615 } 2616 2617 /* -- Tail of trace ------------------------------------------------------- */ 2618 2619 /* Fixup the tail code. */ 2620 static void asm_tail_fixup(ASMState *as, TraceNo lnk) 2621 { 2622 MCode *target = lnk ? traceref(as->J,lnk)->mcode : (MCode *)lj_vm_exit_interp; 2623 int32_t spadj = as->T->spadjust; 2624 MCode *p = as->mctop-1; 2625 *p = spadj ? (MIPSI_AADDIU|MIPSF_T(RID_SP)|MIPSF_S(RID_SP)|spadj) : MIPSI_NOP; 2626 p[-1] = MIPSI_J|(((uintptr_t)target>>2)&0x03ffffffu); 2627 } 2628 2629 /* Prepare tail of code. */ 2630 static void asm_tail_prep(ASMState *as) 2631 { 2632 as->mcp = as->mctop-2; /* Leave room for branch plus nop or stack adj. */ 2633 as->invmcp = as->loopref ? as->mcp : NULL; 2634 } 2635 2636 /* -- Trace setup --------------------------------------------------------- */ 2637 2638 /* Ensure there are enough stack slots for call arguments. */ 2639 static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci) 2640 { 2641 IRRef args[CCI_NARGS_MAX*2]; 2642 uint32_t i, nargs = CCI_XNARGS(ci); 2643 #if LJ_32 2644 int nslots = 4, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR; 2645 #else 2646 int nslots = 0, ngpr = REGARG_NUMGPR; 2647 #endif 2648 asm_collectargs(as, ir, ci, args); 2649 for (i = 0; i < nargs; i++) { 2650 #if LJ_32 2651 if (!LJ_SOFTFP && args[i] && irt_isfp(IR(args[i])->t) && 2652 nfpr > 0 && !(ci->flags & CCI_VARARG)) { 2653 nfpr--; 2654 ngpr -= irt_isnum(IR(args[i])->t) ? 2 : 1; 2655 } else if (!LJ_SOFTFP && args[i] && irt_isnum(IR(args[i])->t)) { 2656 nfpr = 0; 2657 ngpr = ngpr & ~1; 2658 if (ngpr > 0) ngpr -= 2; else nslots = (nslots+3) & ~1; 2659 } else { 2660 nfpr = 0; 2661 if (ngpr > 0) ngpr--; else nslots++; 2662 } 2663 #else 2664 if (ngpr > 0) ngpr--; else nslots += 2; 2665 #endif 2666 } 2667 if (nslots > as->evenspill) /* Leave room for args in stack slots. */ 2668 as->evenspill = nslots; 2669 return irt_isfp(ir->t) ? REGSP_HINT(RID_FPRET) : REGSP_HINT(RID_RET); 2670 } 2671 2672 static void asm_setup_target(ASMState *as) 2673 { 2674 asm_sparejump_setup(as); 2675 asm_exitstub_setup(as); 2676 } 2677 2678 /* -- Trace patching ------------------------------------------------------ */ 2679 2680 /* Patch exit jumps of existing machine code to a new target. */ 2681 void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target) 2682 { 2683 MCode *p = T->mcode; 2684 MCode *pe = (MCode *)((char *)p + T->szmcode); 2685 MCode *px = exitstub_trace_addr(T, exitno); 2686 MCode *cstart = NULL, *cstop = NULL; 2687 MCode *mcarea = lj_mcode_patch(J, p, 0); 2688 MCode exitload = MIPSI_LI | MIPSF_T(RID_TMP) | exitno; 2689 MCode tjump = MIPSI_J|(((uintptr_t)target>>2)&0x03ffffffu); 2690 for (p++; p < pe; p++) { 2691 if (*p == exitload) { /* Look for load of exit number. */ 2692 /* Look for exitstub branch. Yes, this covers all used branch variants. */ 2693 if (((p[-1] ^ (px-p)) & 0xffffu) == 0 && 2694 ((p[-1] & 0xf0000000u) == MIPSI_BEQ || 2695 (p[-1] & 0xfc1e0000u) == MIPSI_BLTZ || 2696 #if !LJ_TARGET_MIPSR6 2697 (p[-1] & 0xffe00000u) == MIPSI_BC1F 2698 #else 2699 (p[-1] & 0xff600000u) == MIPSI_BC1EQZ 2700 #endif 2701 ) && p[-2] != MIPS_NOPATCH_GC_CHECK) { 2702 ptrdiff_t delta = target - p; 2703 if (((delta + 0x8000) >> 16) == 0) { /* Patch in-range branch. */ 2704 patchbranch: 2705 p[-1] = (p[-1] & 0xffff0000u) | (delta & 0xffffu); 2706 *p = MIPSI_NOP; /* Replace the load of the exit number. */ 2707 cstop = p; 2708 if (!cstart) cstart = p-1; 2709 } else { /* Branch out of range. Use spare jump slot in mcarea. */ 2710 int i; 2711 for (i = (int)(sizeof(MCLink)/sizeof(MCode)); 2712 i < (int)(sizeof(MCLink)/sizeof(MCode)+MIPS_SPAREJUMP*2); 2713 i += 2) { 2714 if (mcarea[i] == tjump) { 2715 delta = mcarea+i - p; 2716 goto patchbranch; 2717 } else if (mcarea[i] == MIPSI_NOP) { 2718 mcarea[i] = tjump; 2719 cstart = mcarea+i; 2720 delta = mcarea+i - p; 2721 goto patchbranch; 2722 } 2723 } 2724 /* Ignore jump slot overflow. Child trace is simply not attached. */ 2725 } 2726 } else if (p+1 == pe) { 2727 /* Patch NOP after code for inverted loop branch. Use of J is ok. */ 2728 lj_assertJ(p[1] == MIPSI_NOP, "expected NOP"); 2729 p[1] = tjump; 2730 *p = MIPSI_NOP; /* Replace the load of the exit number. */ 2731 cstop = p+2; 2732 if (!cstart) cstart = p+1; 2733 } 2734 } 2735 } 2736 if (cstart) lj_mcode_sync(cstart, cstop); 2737 lj_mcode_patch(J, mcarea, 1); 2738 } 2739 2740